1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
14 #include "MCTargetDesc/RISCVMCTargetDesc.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCVISelLowering.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/IR/IntrinsicsRISCV.h"
20 #include "llvm/Support/Alignment.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/KnownBits.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
25 
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 namespace llvm {
31 namespace RISCV {
32 #define GET_RISCVVSSEGTable_IMPL
33 #define GET_RISCVVLSEGTable_IMPL
34 #define GET_RISCVVLXSEGTable_IMPL
35 #define GET_RISCVVSXSEGTable_IMPL
36 #define GET_RISCVVLETable_IMPL
37 #define GET_RISCVVSETable_IMPL
38 #define GET_RISCVVLXTable_IMPL
39 #define GET_RISCVVSXTable_IMPL
40 #define GET_RISCVMaskedPseudosTable_IMPL
41 #include "RISCVGenSearchableTables.inc"
42 } // namespace RISCV
43 } // namespace llvm
44 
45 void RISCVDAGToDAGISel::PreprocessISelDAG() {
46   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
47 
48   bool MadeChange = false;
49   while (Position != CurDAG->allnodes_begin()) {
50     SDNode *N = &*--Position;
51     if (N->use_empty())
52       continue;
53 
54     SDValue Result;
55     switch (N->getOpcode()) {
56     case ISD::SPLAT_VECTOR: {
57       // Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point
58       // SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden.
59       MVT VT = N->getSimpleValueType(0);
60       unsigned Opc =
61           VT.isInteger() ? RISCVISD::VMV_V_X_VL : RISCVISD::VFMV_V_F_VL;
62       SDLoc DL(N);
63       SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT());
64       Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT),
65                                N->getOperand(0), VL);
66       break;
67     }
68     case RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL: {
69       // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
70       // load. Done after lowering and combining so that we have a chance to
71       // optimize this to VMV_V_X_VL when the upper bits aren't needed.
72       assert(N->getNumOperands() == 4 && "Unexpected number of operands");
73       MVT VT = N->getSimpleValueType(0);
74       SDValue Passthru = N->getOperand(0);
75       SDValue Lo = N->getOperand(1);
76       SDValue Hi = N->getOperand(2);
77       SDValue VL = N->getOperand(3);
78       assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
79              Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
80              "Unexpected VTs!");
81       MachineFunction &MF = CurDAG->getMachineFunction();
82       RISCVMachineFunctionInfo *FuncInfo =
83           MF.getInfo<RISCVMachineFunctionInfo>();
84       SDLoc DL(N);
85 
86       // We use the same frame index we use for moving two i32s into 64-bit FPR.
87       // This is an analogous operation.
88       int FI = FuncInfo->getMoveF64FrameIndex(MF);
89       MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
90       const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
91       SDValue StackSlot =
92           CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
93 
94       SDValue Chain = CurDAG->getEntryNode();
95       Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
96 
97       SDValue OffsetSlot =
98           CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
99       Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
100                             Align(8));
101 
102       Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
103 
104       SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
105       SDValue IntID =
106           CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
107       SDValue Ops[] = {Chain,
108                        IntID,
109                        Passthru,
110                        StackSlot,
111                        CurDAG->getRegister(RISCV::X0, MVT::i64),
112                        VL};
113 
114       Result = CurDAG->getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
115                                            MVT::i64, MPI, Align(8),
116                                            MachineMemOperand::MOLoad);
117       break;
118     }
119     }
120 
121     if (Result) {
122       LLVM_DEBUG(dbgs() << "RISCV DAG preprocessing replacing:\nOld:    ");
123       LLVM_DEBUG(N->dump(CurDAG));
124       LLVM_DEBUG(dbgs() << "\nNew: ");
125       LLVM_DEBUG(Result->dump(CurDAG));
126       LLVM_DEBUG(dbgs() << "\n");
127 
128       CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
129       MadeChange = true;
130     }
131   }
132 
133   if (MadeChange)
134     CurDAG->RemoveDeadNodes();
135 }
136 
137 void RISCVDAGToDAGISel::PostprocessISelDAG() {
138   HandleSDNode Dummy(CurDAG->getRoot());
139   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
140 
141   bool MadeChange = false;
142   while (Position != CurDAG->allnodes_begin()) {
143     SDNode *N = &*--Position;
144     // Skip dead nodes and any non-machine opcodes.
145     if (N->use_empty() || !N->isMachineOpcode())
146       continue;
147 
148     MadeChange |= doPeepholeSExtW(N);
149     MadeChange |= doPeepholeMaskedRVV(N);
150   }
151 
152   CurDAG->setRoot(Dummy.getValue());
153 
154   if (MadeChange)
155     CurDAG->RemoveDeadNodes();
156 }
157 
158 static SDNode *selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
159                             RISCVMatInt::InstSeq &Seq) {
160   SDNode *Result = nullptr;
161   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, VT);
162   for (RISCVMatInt::Inst &Inst : Seq) {
163     SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, VT);
164     switch (Inst.getOpndKind()) {
165     case RISCVMatInt::Imm:
166       Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SDImm);
167       break;
168     case RISCVMatInt::RegX0:
169       Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg,
170                                       CurDAG->getRegister(RISCV::X0, VT));
171       break;
172     case RISCVMatInt::RegReg:
173       Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg, SrcReg);
174       break;
175     case RISCVMatInt::RegImm:
176       Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg, SDImm);
177       break;
178     }
179 
180     // Only the first instruction has X0 as its source.
181     SrcReg = SDValue(Result, 0);
182   }
183 
184   return Result;
185 }
186 
187 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
188                          int64_t Imm, const RISCVSubtarget &Subtarget) {
189   RISCVMatInt::InstSeq Seq =
190       RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
191 
192   return selectImmSeq(CurDAG, DL, VT, Seq);
193 }
194 
195 static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
196                            unsigned NF, RISCVII::VLMUL LMUL) {
197   static const unsigned M1TupleRegClassIDs[] = {
198       RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
199       RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
200       RISCV::VRN8M1RegClassID};
201   static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
202                                                 RISCV::VRN3M2RegClassID,
203                                                 RISCV::VRN4M2RegClassID};
204 
205   assert(Regs.size() >= 2 && Regs.size() <= 8);
206 
207   unsigned RegClassID;
208   unsigned SubReg0;
209   switch (LMUL) {
210   default:
211     llvm_unreachable("Invalid LMUL.");
212   case RISCVII::VLMUL::LMUL_F8:
213   case RISCVII::VLMUL::LMUL_F4:
214   case RISCVII::VLMUL::LMUL_F2:
215   case RISCVII::VLMUL::LMUL_1:
216     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
217                   "Unexpected subreg numbering");
218     SubReg0 = RISCV::sub_vrm1_0;
219     RegClassID = M1TupleRegClassIDs[NF - 2];
220     break;
221   case RISCVII::VLMUL::LMUL_2:
222     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
223                   "Unexpected subreg numbering");
224     SubReg0 = RISCV::sub_vrm2_0;
225     RegClassID = M2TupleRegClassIDs[NF - 2];
226     break;
227   case RISCVII::VLMUL::LMUL_4:
228     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
229                   "Unexpected subreg numbering");
230     SubReg0 = RISCV::sub_vrm4_0;
231     RegClassID = RISCV::VRN2M4RegClassID;
232     break;
233   }
234 
235   SDLoc DL(Regs[0]);
236   SmallVector<SDValue, 8> Ops;
237 
238   Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
239 
240   for (unsigned I = 0; I < Regs.size(); ++I) {
241     Ops.push_back(Regs[I]);
242     Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
243   }
244   SDNode *N =
245       CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
246   return SDValue(N, 0);
247 }
248 
249 void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
250     SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
251     bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
252     bool IsLoad, MVT *IndexVT) {
253   SDValue Chain = Node->getOperand(0);
254   SDValue Glue;
255 
256   Operands.push_back(Node->getOperand(CurOp++)); // Base pointer.
257 
258   if (IsStridedOrIndexed) {
259     Operands.push_back(Node->getOperand(CurOp++)); // Index.
260     if (IndexVT)
261       *IndexVT = Operands.back()->getSimpleValueType(0);
262   }
263 
264   if (IsMasked) {
265     // Mask needs to be copied to V0.
266     SDValue Mask = Node->getOperand(CurOp++);
267     Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
268     Glue = Chain.getValue(1);
269     Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
270   }
271   SDValue VL;
272   selectVLOp(Node->getOperand(CurOp++), VL);
273   Operands.push_back(VL);
274 
275   MVT XLenVT = Subtarget->getXLenVT();
276   SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
277   Operands.push_back(SEWOp);
278 
279   // Masked load has the tail policy argument.
280   if (IsMasked && IsLoad) {
281     // Policy must be a constant.
282     uint64_t Policy = Node->getConstantOperandVal(CurOp++);
283     SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
284     Operands.push_back(PolicyOp);
285   }
286 
287   Operands.push_back(Chain); // Chain.
288   if (Glue)
289     Operands.push_back(Glue);
290 }
291 
292 static bool isAllUndef(ArrayRef<SDValue> Values) {
293   return llvm::all_of(Values, [](SDValue V) { return V->isUndef(); });
294 }
295 
296 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
297                                     bool IsStrided) {
298   SDLoc DL(Node);
299   unsigned NF = Node->getNumValues() - 1;
300   MVT VT = Node->getSimpleValueType(0);
301   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
302   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
303 
304   unsigned CurOp = 2;
305   SmallVector<SDValue, 8> Operands;
306 
307   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
308                                Node->op_begin() + CurOp + NF);
309   bool IsTU = IsMasked || !isAllUndef(Regs);
310   if (IsTU) {
311     SDValue Merge = createTuple(*CurDAG, Regs, NF, LMUL);
312     Operands.push_back(Merge);
313   }
314   CurOp += NF;
315 
316   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
317                              Operands, /*IsLoad=*/true);
318 
319   const RISCV::VLSEGPseudo *P =
320       RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
321                             static_cast<unsigned>(LMUL));
322   MachineSDNode *Load =
323       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
324 
325   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
326     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
327 
328   SDValue SuperReg = SDValue(Load, 0);
329   for (unsigned I = 0; I < NF; ++I) {
330     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
331     ReplaceUses(SDValue(Node, I),
332                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
333   }
334 
335   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
336   CurDAG->RemoveDeadNode(Node);
337 }
338 
339 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
340   SDLoc DL(Node);
341   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
342   MVT VT = Node->getSimpleValueType(0);
343   MVT XLenVT = Subtarget->getXLenVT();
344   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
345   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
346 
347   unsigned CurOp = 2;
348   SmallVector<SDValue, 7> Operands;
349 
350   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
351                                Node->op_begin() + CurOp + NF);
352   bool IsTU = IsMasked || !isAllUndef(Regs);
353   if (IsTU) {
354     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
355     Operands.push_back(MaskedOff);
356   }
357   CurOp += NF;
358 
359   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
360                              /*IsStridedOrIndexed*/ false, Operands,
361                              /*IsLoad=*/true);
362 
363   const RISCV::VLSEGPseudo *P =
364       RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
365                             Log2SEW, static_cast<unsigned>(LMUL));
366   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
367                                                XLenVT, MVT::Other, Operands);
368 
369   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
370     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
371 
372   SDValue SuperReg = SDValue(Load, 0);
373   for (unsigned I = 0; I < NF; ++I) {
374     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
375     ReplaceUses(SDValue(Node, I),
376                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
377   }
378 
379   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));     // VL
380   ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Chain
381   CurDAG->RemoveDeadNode(Node);
382 }
383 
384 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
385                                      bool IsOrdered) {
386   SDLoc DL(Node);
387   unsigned NF = Node->getNumValues() - 1;
388   MVT VT = Node->getSimpleValueType(0);
389   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
390   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
391 
392   unsigned CurOp = 2;
393   SmallVector<SDValue, 8> Operands;
394 
395   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
396                                Node->op_begin() + CurOp + NF);
397   bool IsTU = IsMasked || !isAllUndef(Regs);
398   if (IsTU) {
399     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
400     Operands.push_back(MaskedOff);
401   }
402   CurOp += NF;
403 
404   MVT IndexVT;
405   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
406                              /*IsStridedOrIndexed*/ true, Operands,
407                              /*IsLoad=*/true, &IndexVT);
408 
409   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
410          "Element count mismatch");
411 
412   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
413   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
414   if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
415     report_fatal_error("The V extension does not support EEW=64 for index "
416                        "values when XLEN=32");
417   }
418   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
419       NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
420       static_cast<unsigned>(IndexLMUL));
421   MachineSDNode *Load =
422       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
423 
424   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
425     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
426 
427   SDValue SuperReg = SDValue(Load, 0);
428   for (unsigned I = 0; I < NF; ++I) {
429     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
430     ReplaceUses(SDValue(Node, I),
431                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
432   }
433 
434   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
435   CurDAG->RemoveDeadNode(Node);
436 }
437 
438 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
439                                     bool IsStrided) {
440   SDLoc DL(Node);
441   unsigned NF = Node->getNumOperands() - 4;
442   if (IsStrided)
443     NF--;
444   if (IsMasked)
445     NF--;
446   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
447   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
448   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
449   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
450   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
451 
452   SmallVector<SDValue, 8> Operands;
453   Operands.push_back(StoreVal);
454   unsigned CurOp = 2 + NF;
455 
456   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
457                              Operands);
458 
459   const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
460       NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
461   MachineSDNode *Store =
462       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
463 
464   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
465     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
466 
467   ReplaceNode(Node, Store);
468 }
469 
470 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
471                                      bool IsOrdered) {
472   SDLoc DL(Node);
473   unsigned NF = Node->getNumOperands() - 5;
474   if (IsMasked)
475     --NF;
476   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
477   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
478   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
479   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
480   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
481 
482   SmallVector<SDValue, 8> Operands;
483   Operands.push_back(StoreVal);
484   unsigned CurOp = 2 + NF;
485 
486   MVT IndexVT;
487   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
488                              /*IsStridedOrIndexed*/ true, Operands,
489                              /*IsLoad=*/false, &IndexVT);
490 
491   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
492          "Element count mismatch");
493 
494   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
495   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
496   if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
497     report_fatal_error("The V extension does not support EEW=64 for index "
498                        "values when XLEN=32");
499   }
500   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
501       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
502       static_cast<unsigned>(IndexLMUL));
503   MachineSDNode *Store =
504       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
505 
506   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
507     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
508 
509   ReplaceNode(Node, Store);
510 }
511 
512 void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
513   if (!Subtarget->hasVInstructions())
514     return;
515 
516   assert((Node->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
517           Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN) &&
518          "Unexpected opcode");
519 
520   SDLoc DL(Node);
521   MVT XLenVT = Subtarget->getXLenVT();
522 
523   bool HasChain = Node->getOpcode() == ISD::INTRINSIC_W_CHAIN;
524   unsigned IntNoOffset = HasChain ? 1 : 0;
525   unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
526 
527   assert((IntNo == Intrinsic::riscv_vsetvli ||
528           IntNo == Intrinsic::riscv_vsetvlimax ||
529           IntNo == Intrinsic::riscv_vsetvli_opt ||
530           IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
531          "Unexpected vsetvli intrinsic");
532 
533   bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
534                IntNo == Intrinsic::riscv_vsetvlimax_opt;
535   unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
536 
537   assert(Node->getNumOperands() == Offset + 2 &&
538          "Unexpected number of operands");
539 
540   unsigned SEW =
541       RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
542   RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
543       Node->getConstantOperandVal(Offset + 1) & 0x7);
544 
545   unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
546                                             /*MaskAgnostic*/ false);
547   SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
548 
549   SmallVector<EVT, 2> VTs = {XLenVT};
550   if (HasChain)
551     VTs.push_back(MVT::Other);
552 
553   SDValue VLOperand;
554   unsigned Opcode = RISCV::PseudoVSETVLI;
555   if (VLMax) {
556     VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
557     Opcode = RISCV::PseudoVSETVLIX0;
558   } else {
559     VLOperand = Node->getOperand(IntNoOffset + 1);
560 
561     if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
562       uint64_t AVL = C->getZExtValue();
563       if (isUInt<5>(AVL)) {
564         SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
565         SmallVector<SDValue, 3> Ops = {VLImm, VTypeIOp};
566         if (HasChain)
567           Ops.push_back(Node->getOperand(0));
568         ReplaceNode(
569             Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, VTs, Ops));
570         return;
571       }
572     }
573   }
574 
575   SmallVector<SDValue, 3> Ops = {VLOperand, VTypeIOp};
576   if (HasChain)
577     Ops.push_back(Node->getOperand(0));
578 
579   ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, VTs, Ops));
580 }
581 
582 void RISCVDAGToDAGISel::Select(SDNode *Node) {
583   // If we have a custom node, we have already selected.
584   if (Node->isMachineOpcode()) {
585     LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
586     Node->setNodeId(-1);
587     return;
588   }
589 
590   // Instruction Selection not handled by the auto-generated tablegen selection
591   // should be handled here.
592   unsigned Opcode = Node->getOpcode();
593   MVT XLenVT = Subtarget->getXLenVT();
594   SDLoc DL(Node);
595   MVT VT = Node->getSimpleValueType(0);
596 
597   switch (Opcode) {
598   case ISD::Constant: {
599     auto *ConstNode = cast<ConstantSDNode>(Node);
600     if (VT == XLenVT && ConstNode->isZero()) {
601       SDValue New =
602           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
603       ReplaceNode(Node, New.getNode());
604       return;
605     }
606     int64_t Imm = ConstNode->getSExtValue();
607     // If the upper XLen-16 bits are not used, try to convert this to a simm12
608     // by sign extending bit 15.
609     if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
610         hasAllHUsers(Node))
611       Imm = SignExtend64<16>(Imm);
612     // If the upper 32-bits are not used try to convert this into a simm32 by
613     // sign extending bit 32.
614     if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
615       Imm = SignExtend64<32>(Imm);
616 
617     ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget));
618     return;
619   }
620   case ISD::SHL: {
621     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
622     if (!N1C)
623       break;
624     SDValue N0 = Node->getOperand(0);
625     if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
626         !isa<ConstantSDNode>(N0.getOperand(1)))
627       break;
628     unsigned ShAmt = N1C->getZExtValue();
629     uint64_t Mask = N0.getConstantOperandVal(1);
630 
631     // Optimize (shl (and X, C2), C) -> (slli (srliw X, C3), C3+C) where C2 has
632     // 32 leading zeros and C3 trailing zeros.
633     if (ShAmt <= 32 && isShiftedMask_64(Mask)) {
634       unsigned XLen = Subtarget->getXLen();
635       unsigned LeadingZeros = XLen - (64 - countLeadingZeros(Mask));
636       unsigned TrailingZeros = countTrailingZeros(Mask);
637       if (TrailingZeros > 0 && LeadingZeros == 32) {
638         SDNode *SRLIW = CurDAG->getMachineNode(
639             RISCV::SRLIW, DL, VT, N0->getOperand(0),
640             CurDAG->getTargetConstant(TrailingZeros, DL, VT));
641         SDNode *SLLI = CurDAG->getMachineNode(
642             RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
643             CurDAG->getTargetConstant(TrailingZeros + ShAmt, DL, VT));
644         ReplaceNode(Node, SLLI);
645         return;
646       }
647     }
648     break;
649   }
650   case ISD::SRL: {
651     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
652     if (!N1C)
653       break;
654     SDValue N0 = Node->getOperand(0);
655     if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
656         !isa<ConstantSDNode>(N0.getOperand(1)))
657       break;
658     unsigned ShAmt = N1C->getZExtValue();
659     uint64_t Mask = N0.getConstantOperandVal(1);
660 
661     // Optimize (srl (and X, C2), C) -> (slli (srliw X, C3), C3-C) where C2 has
662     // 32 leading zeros and C3 trailing zeros.
663     if (isShiftedMask_64(Mask)) {
664       unsigned XLen = Subtarget->getXLen();
665       unsigned LeadingZeros = XLen - (64 - countLeadingZeros(Mask));
666       unsigned TrailingZeros = countTrailingZeros(Mask);
667       if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
668         SDNode *SRLIW = CurDAG->getMachineNode(
669             RISCV::SRLIW, DL, VT, N0->getOperand(0),
670             CurDAG->getTargetConstant(TrailingZeros, DL, VT));
671         SDNode *SLLI = CurDAG->getMachineNode(
672             RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
673             CurDAG->getTargetConstant(TrailingZeros - ShAmt, DL, VT));
674         ReplaceNode(Node, SLLI);
675         return;
676       }
677     }
678 
679     // Optimize (srl (and X, C2), C) ->
680     //          (srli (slli X, (XLen-C3), (XLen-C3) + C)
681     // Where C2 is a mask with C3 trailing ones.
682     // Taking into account that the C2 may have had lower bits unset by
683     // SimplifyDemandedBits. This avoids materializing the C2 immediate.
684     // This pattern occurs when type legalizing right shifts for types with
685     // less than XLen bits.
686     Mask |= maskTrailingOnes<uint64_t>(ShAmt);
687     if (!isMask_64(Mask))
688       break;
689     unsigned TrailingOnes = countTrailingOnes(Mask);
690     // 32 trailing ones should use srliw via tablegen pattern.
691     if (TrailingOnes == 32 || ShAmt >= TrailingOnes)
692       break;
693     unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
694     SDNode *SLLI =
695         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
696                                CurDAG->getTargetConstant(LShAmt, DL, VT));
697     SDNode *SRLI = CurDAG->getMachineNode(
698         RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
699         CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
700     ReplaceNode(Node, SRLI);
701     return;
702   }
703   case ISD::SRA: {
704     // Optimize (sra (sext_inreg X, i16), C) ->
705     //          (srai (slli X, (XLen-16), (XLen-16) + C)
706     // And      (sra (sext_inreg X, i8), C) ->
707     //          (srai (slli X, (XLen-8), (XLen-8) + C)
708     // This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal.
709     // This transform matches the code we get without Zbb. The shifts are more
710     // compressible, and this can help expose CSE opportunities in the sdiv by
711     // constant optimization.
712     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
713     if (!N1C)
714       break;
715     SDValue N0 = Node->getOperand(0);
716     if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
717       break;
718     unsigned ShAmt = N1C->getZExtValue();
719     unsigned ExtSize =
720         cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
721     // ExtSize of 32 should use sraiw via tablegen pattern.
722     if (ExtSize >= 32 || ShAmt >= ExtSize)
723       break;
724     unsigned LShAmt = Subtarget->getXLen() - ExtSize;
725     SDNode *SLLI =
726         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
727                                CurDAG->getTargetConstant(LShAmt, DL, VT));
728     SDNode *SRAI = CurDAG->getMachineNode(
729         RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
730         CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
731     ReplaceNode(Node, SRAI);
732     return;
733   }
734   case ISD::AND: {
735     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
736     if (!N1C)
737       break;
738 
739     SDValue N0 = Node->getOperand(0);
740 
741     bool LeftShift = N0.getOpcode() == ISD::SHL;
742     if (!LeftShift && N0.getOpcode() != ISD::SRL)
743       break;
744 
745     auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
746     if (!C)
747       break;
748     unsigned C2 = C->getZExtValue();
749     unsigned XLen = Subtarget->getXLen();
750     assert((C2 > 0 && C2 < XLen) && "Unexpected shift amount!");
751 
752     uint64_t C1 = N1C->getZExtValue();
753 
754     // Keep track of whether this is a c.andi. If we can't use c.andi, the
755     // shift pair might offer more compression opportunities.
756     // TODO: We could check for C extension here, but we don't have many lit
757     // tests with the C extension enabled so not checking gets better coverage.
758     // TODO: What if ANDI faster than shift?
759     bool IsCANDI = isInt<6>(N1C->getSExtValue());
760 
761     // Clear irrelevant bits in the mask.
762     if (LeftShift)
763       C1 &= maskTrailingZeros<uint64_t>(C2);
764     else
765       C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
766 
767     // Some transforms should only be done if the shift has a single use or
768     // the AND would become (srli (slli X, 32), 32)
769     bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
770 
771     SDValue X = N0.getOperand(0);
772 
773     // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
774     // with c3 leading zeros.
775     if (!LeftShift && isMask_64(C1)) {
776       unsigned Leading = XLen - (64 - countLeadingZeros(C1));
777       if (C2 < Leading) {
778         // If the number of leading zeros is C2+32 this can be SRLIW.
779         if (C2 + 32 == Leading) {
780           SDNode *SRLIW = CurDAG->getMachineNode(
781               RISCV::SRLIW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
782           ReplaceNode(Node, SRLIW);
783           return;
784         }
785 
786         // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
787         // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
788         //
789         // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
790         // legalized and goes through DAG combine.
791         if (C2 >= 32 && (Leading - C2) == 1 && N0.hasOneUse() &&
792             X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
793             cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
794           SDNode *SRAIW =
795               CurDAG->getMachineNode(RISCV::SRAIW, DL, VT, X.getOperand(0),
796                                      CurDAG->getTargetConstant(31, DL, VT));
797           SDNode *SRLIW = CurDAG->getMachineNode(
798               RISCV::SRLIW, DL, VT, SDValue(SRAIW, 0),
799               CurDAG->getTargetConstant(Leading - 32, DL, VT));
800           ReplaceNode(Node, SRLIW);
801           return;
802         }
803 
804         // (srli (slli x, c3-c2), c3).
805         // Skip if we could use (zext.w (sraiw X, C2)).
806         bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
807                     X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
808                     cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32;
809         // Also Skip if we can use bexti.
810         Skip |= Subtarget->hasStdExtZbs() && Leading == XLen - 1;
811         if (OneUseOrZExtW && !Skip) {
812           SDNode *SLLI = CurDAG->getMachineNode(
813               RISCV::SLLI, DL, VT, X,
814               CurDAG->getTargetConstant(Leading - C2, DL, VT));
815           SDNode *SRLI = CurDAG->getMachineNode(
816               RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
817               CurDAG->getTargetConstant(Leading, DL, VT));
818           ReplaceNode(Node, SRLI);
819           return;
820         }
821       }
822     }
823 
824     // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
825     // shifted by c2 bits with c3 leading zeros.
826     if (LeftShift && isShiftedMask_64(C1)) {
827       unsigned Leading = XLen - (64 - countLeadingZeros(C1));
828 
829       if (C2 + Leading < XLen &&
830           C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
831         // Use slli.uw when possible.
832         if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
833           SDNode *SLLI_UW = CurDAG->getMachineNode(
834               RISCV::SLLI_UW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
835           ReplaceNode(Node, SLLI_UW);
836           return;
837         }
838 
839         // (srli (slli c2+c3), c3)
840         if (OneUseOrZExtW && !IsCANDI) {
841           SDNode *SLLI = CurDAG->getMachineNode(
842               RISCV::SLLI, DL, VT, X,
843               CurDAG->getTargetConstant(C2 + Leading, DL, VT));
844           SDNode *SRLI = CurDAG->getMachineNode(
845               RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
846               CurDAG->getTargetConstant(Leading, DL, VT));
847           ReplaceNode(Node, SRLI);
848           return;
849         }
850       }
851     }
852 
853     // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
854     // shifted mask with c2 leading zeros and c3 trailing zeros.
855     if (!LeftShift && isShiftedMask_64(C1)) {
856       unsigned Leading = XLen - (64 - countLeadingZeros(C1));
857       unsigned Trailing = countTrailingZeros(C1);
858       if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW && !IsCANDI) {
859         unsigned SrliOpc = RISCV::SRLI;
860         // If the input is zexti32 we should use SRLIW.
861         if (X.getOpcode() == ISD::AND && isa<ConstantSDNode>(X.getOperand(1)) &&
862             X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
863           SrliOpc = RISCV::SRLIW;
864           X = X.getOperand(0);
865         }
866         SDNode *SRLI = CurDAG->getMachineNode(
867             SrliOpc, DL, VT, X,
868             CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
869         SDNode *SLLI =
870             CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
871                                    CurDAG->getTargetConstant(Trailing, DL, VT));
872         ReplaceNode(Node, SLLI);
873         return;
874       }
875       // If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
876       if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
877           OneUseOrZExtW && !IsCANDI) {
878         SDNode *SRLIW = CurDAG->getMachineNode(
879             RISCV::SRLIW, DL, VT, X,
880             CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
881         SDNode *SLLI =
882             CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
883                                    CurDAG->getTargetConstant(Trailing, DL, VT));
884         ReplaceNode(Node, SLLI);
885         return;
886       }
887     }
888 
889     // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
890     // shifted mask with no leading zeros and c3 trailing zeros.
891     if (LeftShift && isShiftedMask_64(C1)) {
892       unsigned Leading = XLen - (64 - countLeadingZeros(C1));
893       unsigned Trailing = countTrailingZeros(C1);
894       if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
895         SDNode *SRLI = CurDAG->getMachineNode(
896             RISCV::SRLI, DL, VT, X,
897             CurDAG->getTargetConstant(Trailing - C2, DL, VT));
898         SDNode *SLLI =
899             CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
900                                    CurDAG->getTargetConstant(Trailing, DL, VT));
901         ReplaceNode(Node, SLLI);
902         return;
903       }
904       // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
905       if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
906         SDNode *SRLIW = CurDAG->getMachineNode(
907             RISCV::SRLIW, DL, VT, X,
908             CurDAG->getTargetConstant(Trailing - C2, DL, VT));
909         SDNode *SLLI =
910             CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
911                                    CurDAG->getTargetConstant(Trailing, DL, VT));
912         ReplaceNode(Node, SLLI);
913         return;
914       }
915     }
916 
917     break;
918   }
919   case ISD::MUL: {
920     // Special case for calculating (mul (and X, C2), C1) where the full product
921     // fits in XLen bits. We can shift X left by the number of leading zeros in
922     // C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final
923     // product has XLen trailing zeros, putting it in the output of MULHU. This
924     // can avoid materializing a constant in a register for C2.
925 
926     // RHS should be a constant.
927     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
928     if (!N1C || !N1C->hasOneUse())
929       break;
930 
931     // LHS should be an AND with constant.
932     SDValue N0 = Node->getOperand(0);
933     if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
934       break;
935 
936     uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
937 
938     // Constant should be a mask.
939     if (!isMask_64(C2))
940       break;
941 
942     // This should be the only use of the AND unless we will use
943     // (SRLI (SLLI X, 32), 32). We don't use a shift pair for other AND
944     // constants.
945     if (!N0.hasOneUse() && C2 != UINT64_C(0xFFFFFFFF))
946       break;
947 
948     // If this can be an ANDI, ZEXT.H or ZEXT.W we don't need to do this
949     // optimization.
950     if (isInt<12>(C2) ||
951         (C2 == UINT64_C(0xFFFF) &&
952          (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())) ||
953         (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba()))
954       break;
955 
956     // We need to shift left the AND input and C1 by a total of XLen bits.
957 
958     // How far left do we need to shift the AND input?
959     unsigned XLen = Subtarget->getXLen();
960     unsigned LeadingZeros = XLen - (64 - countLeadingZeros(C2));
961 
962     // The constant gets shifted by the remaining amount unless that would
963     // shift bits out.
964     uint64_t C1 = N1C->getZExtValue();
965     unsigned ConstantShift = XLen - LeadingZeros;
966     if (ConstantShift > (XLen - (64 - countLeadingZeros(C1))))
967       break;
968 
969     uint64_t ShiftedC1 = C1 << ConstantShift;
970     // If this RV32, we need to sign extend the constant.
971     if (XLen == 32)
972       ShiftedC1 = SignExtend64<32>(ShiftedC1);
973 
974     // Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
975     SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget);
976     SDNode *SLLI =
977         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
978                                CurDAG->getTargetConstant(LeadingZeros, DL, VT));
979     SDNode *MULHU = CurDAG->getMachineNode(RISCV::MULHU, DL, VT,
980                                            SDValue(SLLI, 0), SDValue(Imm, 0));
981     ReplaceNode(Node, MULHU);
982     return;
983   }
984   case ISD::INTRINSIC_WO_CHAIN: {
985     unsigned IntNo = Node->getConstantOperandVal(0);
986     switch (IntNo) {
987       // By default we do not custom select any intrinsic.
988     default:
989       break;
990     case Intrinsic::riscv_vmsgeu:
991     case Intrinsic::riscv_vmsge: {
992       SDValue Src1 = Node->getOperand(1);
993       SDValue Src2 = Node->getOperand(2);
994       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
995       bool IsCmpUnsignedZero = false;
996       // Only custom select scalar second operand.
997       if (Src2.getValueType() != XLenVT)
998         break;
999       // Small constants are handled with patterns.
1000       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
1001         int64_t CVal = C->getSExtValue();
1002         if (CVal >= -15 && CVal <= 16) {
1003           if (!IsUnsigned || CVal != 0)
1004             break;
1005           IsCmpUnsignedZero = true;
1006         }
1007       }
1008       MVT Src1VT = Src1.getSimpleValueType();
1009       unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1010       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
1011       default:
1012         llvm_unreachable("Unexpected LMUL!");
1013 #define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)            \
1014   case RISCVII::VLMUL::lmulenum:                                               \
1015     VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix                 \
1016                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
1017     VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix;                            \
1018     VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b;                             \
1019     break;
1020         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
1021         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
1022         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
1023         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
1024         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
1025         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
1026         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
1027 #undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1028       }
1029       SDValue SEW = CurDAG->getTargetConstant(
1030           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
1031       SDValue VL;
1032       selectVLOp(Node->getOperand(3), VL);
1033 
1034       // If vmsgeu with 0 immediate, expand it to vmset.
1035       if (IsCmpUnsignedZero) {
1036         ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
1037         return;
1038       }
1039 
1040       // Expand to
1041       // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
1042       SDValue Cmp = SDValue(
1043           CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1044           0);
1045       ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
1046                                                {Cmp, Cmp, VL, SEW}));
1047       return;
1048     }
1049     case Intrinsic::riscv_vmsgeu_mask:
1050     case Intrinsic::riscv_vmsge_mask: {
1051       SDValue Src1 = Node->getOperand(2);
1052       SDValue Src2 = Node->getOperand(3);
1053       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1054       bool IsCmpUnsignedZero = false;
1055       // Only custom select scalar second operand.
1056       if (Src2.getValueType() != XLenVT)
1057         break;
1058       // Small constants are handled with patterns.
1059       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
1060         int64_t CVal = C->getSExtValue();
1061         if (CVal >= -15 && CVal <= 16) {
1062           if (!IsUnsigned || CVal != 0)
1063             break;
1064           IsCmpUnsignedZero = true;
1065         }
1066       }
1067       MVT Src1VT = Src1.getSimpleValueType();
1068       unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1069           VMOROpcode;
1070       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
1071       default:
1072         llvm_unreachable("Unexpected LMUL!");
1073 #define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)                         \
1074   case RISCVII::VLMUL::lmulenum:                                               \
1075     VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix                 \
1076                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
1077     VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK      \
1078                                  : RISCV::PseudoVMSLT_VX_##suffix##_MASK;      \
1079     break;
1080         CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1)
1081         CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
1082         CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
1083         CASE_VMSLT_OPCODES(LMUL_1, M1, B8)
1084         CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
1085         CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
1086         CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
1087 #undef CASE_VMSLT_OPCODES
1088       }
1089       // Mask operations use the LMUL from the mask type.
1090       switch (RISCVTargetLowering::getLMUL(VT)) {
1091       default:
1092         llvm_unreachable("Unexpected LMUL!");
1093 #define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)                       \
1094   case RISCVII::VLMUL::lmulenum:                                               \
1095     VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix;                              \
1096     VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix;                            \
1097     VMOROpcode = RISCV::PseudoVMOR_MM_##suffix;                                \
1098     break;
1099         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8)
1100         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4)
1101         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2)
1102         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1)
1103         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2)
1104         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4)
1105         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8)
1106 #undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1107       }
1108       SDValue SEW = CurDAG->getTargetConstant(
1109           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
1110       SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
1111       SDValue VL;
1112       selectVLOp(Node->getOperand(5), VL);
1113       SDValue MaskedOff = Node->getOperand(1);
1114       SDValue Mask = Node->getOperand(4);
1115 
1116       // If vmsgeu_mask with 0 immediate, expand it to vmor mask, maskedoff.
1117       if (IsCmpUnsignedZero) {
1118         // We don't need vmor if the MaskedOff and the Mask are the same
1119         // value.
1120         if (Mask == MaskedOff) {
1121           ReplaceUses(Node, Mask.getNode());
1122           return;
1123         }
1124         ReplaceNode(Node,
1125                     CurDAG->getMachineNode(VMOROpcode, DL, VT,
1126                                            {Mask, MaskedOff, VL, MaskSEW}));
1127         return;
1128       }
1129 
1130       // If the MaskedOff value and the Mask are the same value use
1131       // vmslt{u}.vx vt, va, x;  vmandn.mm vd, vd, vt
1132       // This avoids needing to copy v0 to vd before starting the next sequence.
1133       if (Mask == MaskedOff) {
1134         SDValue Cmp = SDValue(
1135             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1136             0);
1137         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
1138                                                  {Mask, Cmp, VL, MaskSEW}));
1139         return;
1140       }
1141 
1142       // Mask needs to be copied to V0.
1143       SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
1144                                            RISCV::V0, Mask, SDValue());
1145       SDValue Glue = Chain.getValue(1);
1146       SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
1147 
1148       // Otherwise use
1149       // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
1150       // The result is mask undisturbed.
1151       // We use the same instructions to emulate mask agnostic behavior, because
1152       // the agnostic result can be either undisturbed or all 1.
1153       SDValue Cmp = SDValue(
1154           CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
1155                                  {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1156           0);
1157       // vmxor.mm vd, vd, v0 is used to update active value.
1158       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
1159                                                {Cmp, Mask, VL, MaskSEW}));
1160       return;
1161     }
1162     case Intrinsic::riscv_vsetvli_opt:
1163     case Intrinsic::riscv_vsetvlimax_opt:
1164       return selectVSETVLI(Node);
1165     }
1166     break;
1167   }
1168   case ISD::INTRINSIC_W_CHAIN: {
1169     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1170     switch (IntNo) {
1171       // By default we do not custom select any intrinsic.
1172     default:
1173       break;
1174     case Intrinsic::riscv_vsetvli:
1175     case Intrinsic::riscv_vsetvlimax:
1176       return selectVSETVLI(Node);
1177     case Intrinsic::riscv_vlseg2:
1178     case Intrinsic::riscv_vlseg3:
1179     case Intrinsic::riscv_vlseg4:
1180     case Intrinsic::riscv_vlseg5:
1181     case Intrinsic::riscv_vlseg6:
1182     case Intrinsic::riscv_vlseg7:
1183     case Intrinsic::riscv_vlseg8: {
1184       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1185       return;
1186     }
1187     case Intrinsic::riscv_vlseg2_mask:
1188     case Intrinsic::riscv_vlseg3_mask:
1189     case Intrinsic::riscv_vlseg4_mask:
1190     case Intrinsic::riscv_vlseg5_mask:
1191     case Intrinsic::riscv_vlseg6_mask:
1192     case Intrinsic::riscv_vlseg7_mask:
1193     case Intrinsic::riscv_vlseg8_mask: {
1194       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1195       return;
1196     }
1197     case Intrinsic::riscv_vlsseg2:
1198     case Intrinsic::riscv_vlsseg3:
1199     case Intrinsic::riscv_vlsseg4:
1200     case Intrinsic::riscv_vlsseg5:
1201     case Intrinsic::riscv_vlsseg6:
1202     case Intrinsic::riscv_vlsseg7:
1203     case Intrinsic::riscv_vlsseg8: {
1204       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1205       return;
1206     }
1207     case Intrinsic::riscv_vlsseg2_mask:
1208     case Intrinsic::riscv_vlsseg3_mask:
1209     case Intrinsic::riscv_vlsseg4_mask:
1210     case Intrinsic::riscv_vlsseg5_mask:
1211     case Intrinsic::riscv_vlsseg6_mask:
1212     case Intrinsic::riscv_vlsseg7_mask:
1213     case Intrinsic::riscv_vlsseg8_mask: {
1214       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1215       return;
1216     }
1217     case Intrinsic::riscv_vloxseg2:
1218     case Intrinsic::riscv_vloxseg3:
1219     case Intrinsic::riscv_vloxseg4:
1220     case Intrinsic::riscv_vloxseg5:
1221     case Intrinsic::riscv_vloxseg6:
1222     case Intrinsic::riscv_vloxseg7:
1223     case Intrinsic::riscv_vloxseg8:
1224       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1225       return;
1226     case Intrinsic::riscv_vluxseg2:
1227     case Intrinsic::riscv_vluxseg3:
1228     case Intrinsic::riscv_vluxseg4:
1229     case Intrinsic::riscv_vluxseg5:
1230     case Intrinsic::riscv_vluxseg6:
1231     case Intrinsic::riscv_vluxseg7:
1232     case Intrinsic::riscv_vluxseg8:
1233       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1234       return;
1235     case Intrinsic::riscv_vloxseg2_mask:
1236     case Intrinsic::riscv_vloxseg3_mask:
1237     case Intrinsic::riscv_vloxseg4_mask:
1238     case Intrinsic::riscv_vloxseg5_mask:
1239     case Intrinsic::riscv_vloxseg6_mask:
1240     case Intrinsic::riscv_vloxseg7_mask:
1241     case Intrinsic::riscv_vloxseg8_mask:
1242       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1243       return;
1244     case Intrinsic::riscv_vluxseg2_mask:
1245     case Intrinsic::riscv_vluxseg3_mask:
1246     case Intrinsic::riscv_vluxseg4_mask:
1247     case Intrinsic::riscv_vluxseg5_mask:
1248     case Intrinsic::riscv_vluxseg6_mask:
1249     case Intrinsic::riscv_vluxseg7_mask:
1250     case Intrinsic::riscv_vluxseg8_mask:
1251       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1252       return;
1253     case Intrinsic::riscv_vlseg8ff:
1254     case Intrinsic::riscv_vlseg7ff:
1255     case Intrinsic::riscv_vlseg6ff:
1256     case Intrinsic::riscv_vlseg5ff:
1257     case Intrinsic::riscv_vlseg4ff:
1258     case Intrinsic::riscv_vlseg3ff:
1259     case Intrinsic::riscv_vlseg2ff: {
1260       selectVLSEGFF(Node, /*IsMasked*/ false);
1261       return;
1262     }
1263     case Intrinsic::riscv_vlseg8ff_mask:
1264     case Intrinsic::riscv_vlseg7ff_mask:
1265     case Intrinsic::riscv_vlseg6ff_mask:
1266     case Intrinsic::riscv_vlseg5ff_mask:
1267     case Intrinsic::riscv_vlseg4ff_mask:
1268     case Intrinsic::riscv_vlseg3ff_mask:
1269     case Intrinsic::riscv_vlseg2ff_mask: {
1270       selectVLSEGFF(Node, /*IsMasked*/ true);
1271       return;
1272     }
1273     case Intrinsic::riscv_vloxei:
1274     case Intrinsic::riscv_vloxei_mask:
1275     case Intrinsic::riscv_vluxei:
1276     case Intrinsic::riscv_vluxei_mask: {
1277       bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1278                       IntNo == Intrinsic::riscv_vluxei_mask;
1279       bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1280                        IntNo == Intrinsic::riscv_vloxei_mask;
1281 
1282       MVT VT = Node->getSimpleValueType(0);
1283       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1284 
1285       unsigned CurOp = 2;
1286       // Masked intrinsic only have TU version pseduo instructions.
1287       bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
1288       SmallVector<SDValue, 8> Operands;
1289       if (IsTU)
1290         Operands.push_back(Node->getOperand(CurOp++));
1291       else
1292         // Skip the undef passthru operand for nomask TA version pseudo
1293         CurOp++;
1294 
1295       MVT IndexVT;
1296       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1297                                  /*IsStridedOrIndexed*/ true, Operands,
1298                                  /*IsLoad=*/true, &IndexVT);
1299 
1300       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1301              "Element count mismatch");
1302 
1303       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1304       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1305       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1306       if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
1307         report_fatal_error("The V extension does not support EEW=64 for index "
1308                            "values when XLEN=32");
1309       }
1310       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
1311           IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1312           static_cast<unsigned>(IndexLMUL));
1313       MachineSDNode *Load =
1314           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1315 
1316       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1317         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1318 
1319       ReplaceNode(Node, Load);
1320       return;
1321     }
1322     case Intrinsic::riscv_vlm:
1323     case Intrinsic::riscv_vle:
1324     case Intrinsic::riscv_vle_mask:
1325     case Intrinsic::riscv_vlse:
1326     case Intrinsic::riscv_vlse_mask: {
1327       bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1328                       IntNo == Intrinsic::riscv_vlse_mask;
1329       bool IsStrided =
1330           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1331 
1332       MVT VT = Node->getSimpleValueType(0);
1333       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1334 
1335       unsigned CurOp = 2;
1336       // The riscv_vlm intrinsic are always tail agnostic and no passthru operand.
1337       bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1338       // Masked intrinsic only have TU version pseduo instructions.
1339       bool IsTU = HasPassthruOperand &&
1340                   (IsMasked || !Node->getOperand(CurOp).isUndef());
1341       SmallVector<SDValue, 8> Operands;
1342       if (IsTU)
1343         Operands.push_back(Node->getOperand(CurOp++));
1344       else if (HasPassthruOperand)
1345         // Skip the undef passthru operand for nomask TA version pseudo
1346         CurOp++;
1347 
1348       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1349                                  Operands, /*IsLoad=*/true);
1350 
1351       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1352       const RISCV::VLEPseudo *P =
1353           RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
1354                               static_cast<unsigned>(LMUL));
1355       MachineSDNode *Load =
1356           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1357 
1358       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1359         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1360 
1361       ReplaceNode(Node, Load);
1362       return;
1363     }
1364     case Intrinsic::riscv_vleff:
1365     case Intrinsic::riscv_vleff_mask: {
1366       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1367 
1368       MVT VT = Node->getSimpleValueType(0);
1369       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1370 
1371       unsigned CurOp = 2;
1372       // Masked intrinsic only have TU version pseduo instructions.
1373       bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
1374       SmallVector<SDValue, 7> Operands;
1375       if (IsTU)
1376         Operands.push_back(Node->getOperand(CurOp++));
1377       else
1378         // Skip the undef passthru operand for nomask TA version pseudo
1379         CurOp++;
1380 
1381       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1382                                  /*IsStridedOrIndexed*/ false, Operands,
1383                                  /*IsLoad=*/true);
1384 
1385       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1386       const RISCV::VLEPseudo *P =
1387           RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
1388                               Log2SEW, static_cast<unsigned>(LMUL));
1389       MachineSDNode *Load = CurDAG->getMachineNode(
1390           P->Pseudo, DL, Node->getVTList(), Operands);
1391       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1392         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1393 
1394       ReplaceNode(Node, Load);
1395       return;
1396     }
1397     }
1398     break;
1399   }
1400   case ISD::INTRINSIC_VOID: {
1401     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1402     switch (IntNo) {
1403     case Intrinsic::riscv_vsseg2:
1404     case Intrinsic::riscv_vsseg3:
1405     case Intrinsic::riscv_vsseg4:
1406     case Intrinsic::riscv_vsseg5:
1407     case Intrinsic::riscv_vsseg6:
1408     case Intrinsic::riscv_vsseg7:
1409     case Intrinsic::riscv_vsseg8: {
1410       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1411       return;
1412     }
1413     case Intrinsic::riscv_vsseg2_mask:
1414     case Intrinsic::riscv_vsseg3_mask:
1415     case Intrinsic::riscv_vsseg4_mask:
1416     case Intrinsic::riscv_vsseg5_mask:
1417     case Intrinsic::riscv_vsseg6_mask:
1418     case Intrinsic::riscv_vsseg7_mask:
1419     case Intrinsic::riscv_vsseg8_mask: {
1420       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1421       return;
1422     }
1423     case Intrinsic::riscv_vssseg2:
1424     case Intrinsic::riscv_vssseg3:
1425     case Intrinsic::riscv_vssseg4:
1426     case Intrinsic::riscv_vssseg5:
1427     case Intrinsic::riscv_vssseg6:
1428     case Intrinsic::riscv_vssseg7:
1429     case Intrinsic::riscv_vssseg8: {
1430       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1431       return;
1432     }
1433     case Intrinsic::riscv_vssseg2_mask:
1434     case Intrinsic::riscv_vssseg3_mask:
1435     case Intrinsic::riscv_vssseg4_mask:
1436     case Intrinsic::riscv_vssseg5_mask:
1437     case Intrinsic::riscv_vssseg6_mask:
1438     case Intrinsic::riscv_vssseg7_mask:
1439     case Intrinsic::riscv_vssseg8_mask: {
1440       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1441       return;
1442     }
1443     case Intrinsic::riscv_vsoxseg2:
1444     case Intrinsic::riscv_vsoxseg3:
1445     case Intrinsic::riscv_vsoxseg4:
1446     case Intrinsic::riscv_vsoxseg5:
1447     case Intrinsic::riscv_vsoxseg6:
1448     case Intrinsic::riscv_vsoxseg7:
1449     case Intrinsic::riscv_vsoxseg8:
1450       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1451       return;
1452     case Intrinsic::riscv_vsuxseg2:
1453     case Intrinsic::riscv_vsuxseg3:
1454     case Intrinsic::riscv_vsuxseg4:
1455     case Intrinsic::riscv_vsuxseg5:
1456     case Intrinsic::riscv_vsuxseg6:
1457     case Intrinsic::riscv_vsuxseg7:
1458     case Intrinsic::riscv_vsuxseg8:
1459       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1460       return;
1461     case Intrinsic::riscv_vsoxseg2_mask:
1462     case Intrinsic::riscv_vsoxseg3_mask:
1463     case Intrinsic::riscv_vsoxseg4_mask:
1464     case Intrinsic::riscv_vsoxseg5_mask:
1465     case Intrinsic::riscv_vsoxseg6_mask:
1466     case Intrinsic::riscv_vsoxseg7_mask:
1467     case Intrinsic::riscv_vsoxseg8_mask:
1468       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1469       return;
1470     case Intrinsic::riscv_vsuxseg2_mask:
1471     case Intrinsic::riscv_vsuxseg3_mask:
1472     case Intrinsic::riscv_vsuxseg4_mask:
1473     case Intrinsic::riscv_vsuxseg5_mask:
1474     case Intrinsic::riscv_vsuxseg6_mask:
1475     case Intrinsic::riscv_vsuxseg7_mask:
1476     case Intrinsic::riscv_vsuxseg8_mask:
1477       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1478       return;
1479     case Intrinsic::riscv_vsoxei:
1480     case Intrinsic::riscv_vsoxei_mask:
1481     case Intrinsic::riscv_vsuxei:
1482     case Intrinsic::riscv_vsuxei_mask: {
1483       bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1484                       IntNo == Intrinsic::riscv_vsuxei_mask;
1485       bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1486                        IntNo == Intrinsic::riscv_vsoxei_mask;
1487 
1488       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1489       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1490 
1491       unsigned CurOp = 2;
1492       SmallVector<SDValue, 8> Operands;
1493       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1494 
1495       MVT IndexVT;
1496       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1497                                  /*IsStridedOrIndexed*/ true, Operands,
1498                                  /*IsLoad=*/false, &IndexVT);
1499 
1500       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1501              "Element count mismatch");
1502 
1503       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1504       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1505       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1506       if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
1507         report_fatal_error("The V extension does not support EEW=64 for index "
1508                            "values when XLEN=32");
1509       }
1510       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1511           IsMasked, /*TU*/ false, IsOrdered, IndexLog2EEW,
1512           static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL));
1513       MachineSDNode *Store =
1514           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1515 
1516       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1517         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1518 
1519       ReplaceNode(Node, Store);
1520       return;
1521     }
1522     case Intrinsic::riscv_vsm:
1523     case Intrinsic::riscv_vse:
1524     case Intrinsic::riscv_vse_mask:
1525     case Intrinsic::riscv_vsse:
1526     case Intrinsic::riscv_vsse_mask: {
1527       bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1528                       IntNo == Intrinsic::riscv_vsse_mask;
1529       bool IsStrided =
1530           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1531 
1532       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1533       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1534 
1535       unsigned CurOp = 2;
1536       SmallVector<SDValue, 8> Operands;
1537       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1538 
1539       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1540                                  Operands);
1541 
1542       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1543       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1544           IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1545       MachineSDNode *Store =
1546           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1547       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1548         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1549 
1550       ReplaceNode(Node, Store);
1551       return;
1552     }
1553     }
1554     break;
1555   }
1556   case ISD::BITCAST: {
1557     MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1558     // Just drop bitcasts between vectors if both are fixed or both are
1559     // scalable.
1560     if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1561         (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1562       ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1563       CurDAG->RemoveDeadNode(Node);
1564       return;
1565     }
1566     break;
1567   }
1568   case ISD::INSERT_SUBVECTOR: {
1569     SDValue V = Node->getOperand(0);
1570     SDValue SubV = Node->getOperand(1);
1571     SDLoc DL(SubV);
1572     auto Idx = Node->getConstantOperandVal(2);
1573     MVT SubVecVT = SubV.getSimpleValueType();
1574 
1575     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1576     MVT SubVecContainerVT = SubVecVT;
1577     // Establish the correct scalable-vector types for any fixed-length type.
1578     if (SubVecVT.isFixedLengthVector())
1579       SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1580     if (VT.isFixedLengthVector())
1581       VT = TLI.getContainerForFixedLengthVector(VT);
1582 
1583     const auto *TRI = Subtarget->getRegisterInfo();
1584     unsigned SubRegIdx;
1585     std::tie(SubRegIdx, Idx) =
1586         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1587             VT, SubVecContainerVT, Idx, TRI);
1588 
1589     // If the Idx hasn't been completely eliminated then this is a subvector
1590     // insert which doesn't naturally align to a vector register. These must
1591     // be handled using instructions to manipulate the vector registers.
1592     if (Idx != 0)
1593       break;
1594 
1595     RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1596     bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1597                            SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1598                            SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1599     (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1600     assert((!IsSubVecPartReg || V.isUndef()) &&
1601            "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1602            "the subvector is smaller than a full-sized register");
1603 
1604     // If we haven't set a SubRegIdx, then we must be going between
1605     // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1606     if (SubRegIdx == RISCV::NoSubRegister) {
1607       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1608       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1609                  InRegClassID &&
1610              "Unexpected subvector extraction");
1611       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1612       SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1613                                                DL, VT, SubV, RC);
1614       ReplaceNode(Node, NewNode);
1615       return;
1616     }
1617 
1618     SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1619     ReplaceNode(Node, Insert.getNode());
1620     return;
1621   }
1622   case ISD::EXTRACT_SUBVECTOR: {
1623     SDValue V = Node->getOperand(0);
1624     auto Idx = Node->getConstantOperandVal(1);
1625     MVT InVT = V.getSimpleValueType();
1626     SDLoc DL(V);
1627 
1628     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1629     MVT SubVecContainerVT = VT;
1630     // Establish the correct scalable-vector types for any fixed-length type.
1631     if (VT.isFixedLengthVector())
1632       SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1633     if (InVT.isFixedLengthVector())
1634       InVT = TLI.getContainerForFixedLengthVector(InVT);
1635 
1636     const auto *TRI = Subtarget->getRegisterInfo();
1637     unsigned SubRegIdx;
1638     std::tie(SubRegIdx, Idx) =
1639         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1640             InVT, SubVecContainerVT, Idx, TRI);
1641 
1642     // If the Idx hasn't been completely eliminated then this is a subvector
1643     // extract which doesn't naturally align to a vector register. These must
1644     // be handled using instructions to manipulate the vector registers.
1645     if (Idx != 0)
1646       break;
1647 
1648     // If we haven't set a SubRegIdx, then we must be going between
1649     // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1650     if (SubRegIdx == RISCV::NoSubRegister) {
1651       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1652       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1653                  InRegClassID &&
1654              "Unexpected subvector extraction");
1655       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1656       SDNode *NewNode =
1657           CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1658       ReplaceNode(Node, NewNode);
1659       return;
1660     }
1661 
1662     SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1663     ReplaceNode(Node, Extract.getNode());
1664     return;
1665   }
1666   case ISD::SPLAT_VECTOR:
1667   case RISCVISD::VMV_S_X_VL:
1668   case RISCVISD::VFMV_S_F_VL:
1669   case RISCVISD::VMV_V_X_VL:
1670   case RISCVISD::VFMV_V_F_VL: {
1671     // Try to match splat of a scalar load to a strided load with stride of x0.
1672     bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
1673                         Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
1674     bool HasPassthruOperand = Node->getOpcode() != ISD::SPLAT_VECTOR;
1675     if (HasPassthruOperand && !Node->getOperand(0).isUndef())
1676       break;
1677     SDValue Src = HasPassthruOperand ? Node->getOperand(1) : Node->getOperand(0);
1678     auto *Ld = dyn_cast<LoadSDNode>(Src);
1679     if (!Ld)
1680       break;
1681     EVT MemVT = Ld->getMemoryVT();
1682     // The memory VT should be the same size as the element type.
1683     if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1684       break;
1685     if (!IsProfitableToFold(Src, Node, Node) ||
1686         !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1687       break;
1688 
1689     SDValue VL;
1690     if (Node->getOpcode() == ISD::SPLAT_VECTOR)
1691       VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
1692     else if (IsScalarMove) {
1693       // We could deal with more VL if we update the VSETVLI insert pass to
1694       // avoid introducing more VSETVLI.
1695       if (!isOneConstant(Node->getOperand(2)))
1696         break;
1697       selectVLOp(Node->getOperand(2), VL);
1698     } else
1699       selectVLOp(Node->getOperand(2), VL);
1700 
1701     unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1702     SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1703 
1704     SDValue Operands[] = {Ld->getBasePtr(),
1705                           CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1706                           Ld->getChain()};
1707 
1708     RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1709     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1710         /*IsMasked*/ false, /*IsTU*/ false, /*IsStrided*/ true, /*FF*/ false,
1711         Log2SEW, static_cast<unsigned>(LMUL));
1712     MachineSDNode *Load =
1713         CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1714 
1715     CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()});
1716 
1717     ReplaceNode(Node, Load);
1718     return;
1719   }
1720   }
1721 
1722   // Select the default instruction.
1723   SelectCode(Node);
1724 }
1725 
1726 bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
1727     const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1728   switch (ConstraintID) {
1729   case InlineAsm::Constraint_m:
1730     // We just support simple memory operands that have a single address
1731     // operand and need no special handling.
1732     OutOps.push_back(Op);
1733     return false;
1734   case InlineAsm::Constraint_A:
1735     OutOps.push_back(Op);
1736     return false;
1737   default:
1738     break;
1739   }
1740 
1741   return true;
1742 }
1743 
1744 bool RISCVDAGToDAGISel::SelectAddrFrameIndex(SDValue Addr, SDValue &Base,
1745                                              SDValue &Offset) {
1746   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1747     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1748     Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Subtarget->getXLenVT());
1749     return true;
1750   }
1751 
1752   return false;
1753 }
1754 
1755 // Select a frame index and an optional immediate offset from an ADD or OR.
1756 bool RISCVDAGToDAGISel::SelectFrameAddrRegImm(SDValue Addr, SDValue &Base,
1757                                               SDValue &Offset) {
1758   if (SelectAddrFrameIndex(Addr, Base, Offset))
1759     return true;
1760 
1761   if (!CurDAG->isBaseWithConstantOffset(Addr))
1762     return false;
1763 
1764   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
1765     int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1766     if (isInt<12>(CVal)) {
1767       Base = CurDAG->getTargetFrameIndex(FIN->getIndex(),
1768                                          Subtarget->getXLenVT());
1769       Offset = CurDAG->getTargetConstant(CVal, SDLoc(Addr),
1770                                          Subtarget->getXLenVT());
1771       return true;
1772     }
1773   }
1774 
1775   return false;
1776 }
1777 
1778 // Fold constant addresses.
1779 static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL,
1780                                const MVT VT, const RISCVSubtarget *Subtarget,
1781                                SDValue Addr, SDValue &Base, SDValue &Offset) {
1782   if (!isa<ConstantSDNode>(Addr))
1783     return false;
1784 
1785   int64_t CVal = cast<ConstantSDNode>(Addr)->getSExtValue();
1786 
1787   // If the constant is a simm12, we can fold the whole constant and use X0 as
1788   // the base. If the constant can be materialized with LUI+simm12, use LUI as
1789   // the base. We can't use generateInstSeq because it favors LUI+ADDIW.
1790   int64_t Lo12 = SignExtend64<12>(CVal);
1791   int64_t Hi = (uint64_t)CVal - (uint64_t)Lo12;
1792   if (!Subtarget->is64Bit() || isInt<32>(Hi)) {
1793     if (Hi) {
1794       int64_t Hi20 = (Hi >> 12) & 0xfffff;
1795       Base = SDValue(
1796           CurDAG->getMachineNode(RISCV::LUI, DL, VT,
1797                                  CurDAG->getTargetConstant(Hi20, DL, VT)),
1798           0);
1799     } else {
1800       Base = CurDAG->getRegister(RISCV::X0, VT);
1801     }
1802     Offset = CurDAG->getTargetConstant(Lo12, DL, VT);
1803     return true;
1804   }
1805 
1806   // Ask how constant materialization would handle this constant.
1807   RISCVMatInt::InstSeq Seq =
1808       RISCVMatInt::generateInstSeq(CVal, Subtarget->getFeatureBits());
1809 
1810   // If the last instruction would be an ADDI, we can fold its immediate and
1811   // emit the rest of the sequence as the base.
1812   if (Seq.back().Opc != RISCV::ADDI)
1813     return false;
1814   Lo12 = Seq.back().Imm;
1815 
1816   // Drop the last instruction.
1817   Seq.pop_back();
1818   assert(!Seq.empty() && "Expected more instructions in sequence");
1819 
1820   Base = SDValue(selectImmSeq(CurDAG, DL, VT, Seq), 0);
1821   Offset = CurDAG->getTargetConstant(Lo12, DL, VT);
1822   return true;
1823 }
1824 
1825 // Is this ADD instruction only used as the base pointer of scalar loads and
1826 // stores?
1827 static bool isWorthFoldingAdd(SDValue Add) {
1828   for (auto Use : Add->uses()) {
1829     if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
1830         Use->getOpcode() != ISD::ATOMIC_LOAD &&
1831         Use->getOpcode() != ISD::ATOMIC_STORE)
1832       return false;
1833     EVT VT = cast<MemSDNode>(Use)->getMemoryVT();
1834     if (!VT.isScalarInteger() && VT != MVT::f16 && VT != MVT::f32 &&
1835         VT != MVT::f64)
1836       return false;
1837     // Don't allow stores of the value. It must be used as the address.
1838     if (Use->getOpcode() == ISD::STORE &&
1839         cast<StoreSDNode>(Use)->getValue() == Add)
1840       return false;
1841     if (Use->getOpcode() == ISD::ATOMIC_STORE &&
1842         cast<AtomicSDNode>(Use)->getVal() == Add)
1843       return false;
1844   }
1845 
1846   return true;
1847 }
1848 
1849 bool RISCVDAGToDAGISel::SelectAddrRegImm(SDValue Addr, SDValue &Base,
1850                                          SDValue &Offset) {
1851   if (SelectAddrFrameIndex(Addr, Base, Offset))
1852     return true;
1853 
1854   SDLoc DL(Addr);
1855   MVT VT = Addr.getSimpleValueType();
1856 
1857   if (Addr.getOpcode() == RISCVISD::ADD_LO) {
1858     Base = Addr.getOperand(0);
1859     Offset = Addr.getOperand(1);
1860     return true;
1861   }
1862 
1863   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1864     int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1865     if (isInt<12>(CVal)) {
1866       Base = Addr.getOperand(0);
1867       if (Base.getOpcode() == RISCVISD::ADD_LO) {
1868         SDValue LoOperand = Base.getOperand(1);
1869         if (auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
1870           // If the Lo in (ADD_LO hi, lo) is a global variable's address
1871           // (its low part, really), then we can rely on the alignment of that
1872           // variable to provide a margin of safety before low part can overflow
1873           // the 12 bits of the load/store offset. Check if CVal falls within
1874           // that margin; if so (low part + CVal) can't overflow.
1875           const DataLayout &DL = CurDAG->getDataLayout();
1876           Align Alignment = commonAlignment(
1877               GA->getGlobal()->getPointerAlignment(DL), GA->getOffset());
1878           if (CVal == 0 || Alignment > CVal) {
1879             int64_t CombinedOffset = CVal + GA->getOffset();
1880             Base = Base.getOperand(0);
1881             Offset = CurDAG->getTargetGlobalAddress(
1882                 GA->getGlobal(), SDLoc(LoOperand), LoOperand.getValueType(),
1883                 CombinedOffset, GA->getTargetFlags());
1884             return true;
1885           }
1886         }
1887       }
1888 
1889       if (auto *FIN = dyn_cast<FrameIndexSDNode>(Base))
1890         Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT);
1891       Offset = CurDAG->getTargetConstant(CVal, DL, VT);
1892       return true;
1893     }
1894   }
1895 
1896   // Handle ADD with large immediates.
1897   if (Addr.getOpcode() == ISD::ADD && isa<ConstantSDNode>(Addr.getOperand(1))) {
1898     int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1899     assert(!isInt<12>(CVal) && "simm12 not already handled?");
1900 
1901     // Handle immediates in the range [-4096,-2049] or [2048, 4094]. We can use
1902     // an ADDI for part of the offset and fold the rest into the load/store.
1903     // This mirrors the AddiPair PatFrag in RISCVInstrInfo.td.
1904     if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
1905       int64_t Adj = CVal < 0 ? -2048 : 2047;
1906       Base = SDValue(
1907           CurDAG->getMachineNode(RISCV::ADDI, DL, VT, Addr.getOperand(0),
1908                                  CurDAG->getTargetConstant(Adj, DL, VT)),
1909           0);
1910       Offset = CurDAG->getTargetConstant(CVal - Adj, DL, VT);
1911       return true;
1912     }
1913 
1914     // For larger immediates, we might be able to save one instruction from
1915     // constant materialization by folding the Lo12 bits of the immediate into
1916     // the address. We should only do this if the ADD is only used by loads and
1917     // stores that can fold the lo12 bits. Otherwise, the ADD will get iseled
1918     // separately with the full materialized immediate creating extra
1919     // instructions.
1920     if (isWorthFoldingAdd(Addr) &&
1921         selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr.getOperand(1), Base,
1922                            Offset)) {
1923       // Insert an ADD instruction with the materialized Hi52 bits.
1924       Base = SDValue(
1925           CurDAG->getMachineNode(RISCV::ADD, DL, VT, Addr.getOperand(0), Base),
1926           0);
1927       return true;
1928     }
1929   }
1930 
1931   if (selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr, Base, Offset))
1932     return true;
1933 
1934   Base = Addr;
1935   Offset = CurDAG->getTargetConstant(0, DL, VT);
1936   return true;
1937 }
1938 
1939 bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
1940                                         SDValue &ShAmt) {
1941   // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1942   // amount. If there is an AND on the shift amount, we can bypass it if it
1943   // doesn't affect any of those bits.
1944   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1945     const APInt &AndMask = N->getConstantOperandAPInt(1);
1946 
1947     // Since the max shift amount is a power of 2 we can subtract 1 to make a
1948     // mask that covers the bits needed to represent all shift amounts.
1949     assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1950     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1951 
1952     if (ShMask.isSubsetOf(AndMask)) {
1953       ShAmt = N.getOperand(0);
1954       return true;
1955     }
1956 
1957     // SimplifyDemandedBits may have optimized the mask so try restoring any
1958     // bits that are known zero.
1959     KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1960     if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1961       ShAmt = N.getOperand(0);
1962       return true;
1963     }
1964   } else if (N.getOpcode() == ISD::SUB &&
1965              isa<ConstantSDNode>(N.getOperand(0))) {
1966     uint64_t Imm = N.getConstantOperandVal(0);
1967     // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
1968     // generate a NEG instead of a SUB of a constant.
1969     if (Imm != 0 && Imm % ShiftWidth == 0) {
1970       SDLoc DL(N);
1971       EVT VT = N.getValueType();
1972       SDValue Zero = CurDAG->getRegister(RISCV::X0, VT);
1973       unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
1974       MachineSDNode *Neg = CurDAG->getMachineNode(NegOpc, DL, VT, Zero,
1975                                                   N.getOperand(1));
1976       ShAmt = SDValue(Neg, 0);
1977       return true;
1978     }
1979   }
1980 
1981   ShAmt = N;
1982   return true;
1983 }
1984 
1985 bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
1986   if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1987       cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1988     Val = N.getOperand(0);
1989     return true;
1990   }
1991   MVT VT = N.getSimpleValueType();
1992   if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
1993     Val = N;
1994     return true;
1995   }
1996 
1997   return false;
1998 }
1999 
2000 bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
2001   if (N.getOpcode() == ISD::AND) {
2002     auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
2003     if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
2004       Val = N.getOperand(0);
2005       return true;
2006     }
2007   }
2008   MVT VT = N.getSimpleValueType();
2009   APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32);
2010   if (CurDAG->MaskedValueIsZero(N, Mask)) {
2011     Val = N;
2012     return true;
2013   }
2014 
2015   return false;
2016 }
2017 
2018 /// Look for various patterns that can be done with a SHL that can be folded
2019 /// into a SHXADD. \p ShAmt contains 1, 2, or 3 and is set based on which
2020 /// SHXADD we are trying to match.
2021 bool RISCVDAGToDAGISel::selectSHXADDOp(SDValue N, unsigned ShAmt,
2022                                        SDValue &Val) {
2023   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
2024     SDValue N0 = N.getOperand(0);
2025 
2026     bool LeftShift = N0.getOpcode() == ISD::SHL;
2027     if ((LeftShift || N0.getOpcode() == ISD::SRL) &&
2028         isa<ConstantSDNode>(N0.getOperand(1))) {
2029       uint64_t Mask = N.getConstantOperandVal(1);
2030       unsigned C2 = N0.getConstantOperandVal(1);
2031 
2032       unsigned XLen = Subtarget->getXLen();
2033       if (LeftShift)
2034         Mask &= maskTrailingZeros<uint64_t>(C2);
2035       else
2036         Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
2037 
2038       // Look for (and (shl y, c2), c1) where c1 is a shifted mask with no
2039       // leading zeros and c3 trailing zeros. We can use an SRLI by c2+c3
2040       // followed by a SHXADD with c3 for the X amount.
2041       if (isShiftedMask_64(Mask)) {
2042         unsigned Leading = XLen - (64 - countLeadingZeros(Mask));
2043         unsigned Trailing = countTrailingZeros(Mask);
2044         if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
2045           SDLoc DL(N);
2046           EVT VT = N.getValueType();
2047           Val = SDValue(CurDAG->getMachineNode(
2048                             RISCV::SRLI, DL, VT, N0.getOperand(0),
2049                             CurDAG->getTargetConstant(Trailing - C2, DL, VT)),
2050                         0);
2051           return true;
2052         }
2053         // Look for (and (shr y, c2), c1) where c1 is a shifted mask with c2
2054         // leading zeros and c3 trailing zeros. We can use an SRLI by C3
2055         // followed by a SHXADD using c3 for the X amount.
2056         if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
2057           SDLoc DL(N);
2058           EVT VT = N.getValueType();
2059           Val = SDValue(
2060               CurDAG->getMachineNode(
2061                   RISCV::SRLI, DL, VT, N0.getOperand(0),
2062                   CurDAG->getTargetConstant(Leading + Trailing, DL, VT)),
2063               0);
2064           return true;
2065         }
2066       }
2067     }
2068   }
2069 
2070   bool LeftShift = N.getOpcode() == ISD::SHL;
2071   if ((LeftShift || N.getOpcode() == ISD::SRL) &&
2072       isa<ConstantSDNode>(N.getOperand(1))) {
2073     SDValue N0 = N.getOperand(0);
2074     if (N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
2075         isa<ConstantSDNode>(N0.getOperand(1))) {
2076       uint64_t Mask = N0.getConstantOperandVal(1);
2077       if (isShiftedMask_64(Mask)) {
2078         unsigned C1 = N.getConstantOperandVal(1);
2079         unsigned XLen = Subtarget->getXLen();
2080         unsigned Leading = XLen - (64 - countLeadingZeros(Mask));
2081         unsigned Trailing = countTrailingZeros(Mask);
2082         // Look for (shl (and X, Mask), C1) where Mask has 32 leading zeros and
2083         // C3 trailing zeros. If C1+C3==ShAmt we can use SRLIW+SHXADD.
2084         if (LeftShift && Leading == 32 && Trailing > 0 &&
2085             (Trailing + C1) == ShAmt) {
2086           SDLoc DL(N);
2087           EVT VT = N.getValueType();
2088           Val = SDValue(CurDAG->getMachineNode(
2089                             RISCV::SRLIW, DL, VT, N0.getOperand(0),
2090                             CurDAG->getTargetConstant(Trailing, DL, VT)),
2091                         0);
2092           return true;
2093         }
2094         // Look for (srl (and X, Mask), C1) where Mask has 32 leading zeros and
2095         // C3 trailing zeros. If C3-C1==ShAmt we can use SRLIW+SHXADD.
2096         if (!LeftShift && Leading == 32 && Trailing > C1 &&
2097             (Trailing - C1) == ShAmt) {
2098           SDLoc DL(N);
2099           EVT VT = N.getValueType();
2100           Val = SDValue(CurDAG->getMachineNode(
2101                             RISCV::SRLIW, DL, VT, N0.getOperand(0),
2102                             CurDAG->getTargetConstant(Trailing, DL, VT)),
2103                         0);
2104           return true;
2105         }
2106       }
2107     }
2108   }
2109 
2110   return false;
2111 }
2112 
2113 // Return true if all users of this SDNode* only consume the lower \p Bits.
2114 // This can be used to form W instructions for add/sub/mul/shl even when the
2115 // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
2116 // SimplifyDemandedBits has made it so some users see a sext_inreg and some
2117 // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
2118 // the add/sub/mul/shl to become non-W instructions. By checking the users we
2119 // may be able to use a W instruction and CSE with the other instruction if
2120 // this has happened. We could try to detect that the CSE opportunity exists
2121 // before doing this, but that would be more complicated.
2122 // TODO: Does this need to look through AND/OR/XOR to their users to find more
2123 // opportunities.
2124 bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
2125   assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
2126           Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
2127           Node->getOpcode() == ISD::SRL ||
2128           Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
2129           Node->getOpcode() == RISCVISD::GREV ||
2130           Node->getOpcode() == RISCVISD::GORC ||
2131           isa<ConstantSDNode>(Node)) &&
2132          "Unexpected opcode");
2133 
2134   for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
2135     SDNode *User = *UI;
2136     // Users of this node should have already been instruction selected
2137     if (!User->isMachineOpcode())
2138       return false;
2139 
2140     // TODO: Add more opcodes?
2141     switch (User->getMachineOpcode()) {
2142     default:
2143       return false;
2144     case RISCV::ADDW:
2145     case RISCV::ADDIW:
2146     case RISCV::SUBW:
2147     case RISCV::MULW:
2148     case RISCV::SLLW:
2149     case RISCV::SLLIW:
2150     case RISCV::SRAW:
2151     case RISCV::SRAIW:
2152     case RISCV::SRLW:
2153     case RISCV::SRLIW:
2154     case RISCV::DIVW:
2155     case RISCV::DIVUW:
2156     case RISCV::REMW:
2157     case RISCV::REMUW:
2158     case RISCV::ROLW:
2159     case RISCV::RORW:
2160     case RISCV::RORIW:
2161     case RISCV::CLZW:
2162     case RISCV::CTZW:
2163     case RISCV::CPOPW:
2164     case RISCV::SLLI_UW:
2165     case RISCV::FMV_W_X:
2166     case RISCV::FCVT_H_W:
2167     case RISCV::FCVT_H_WU:
2168     case RISCV::FCVT_S_W:
2169     case RISCV::FCVT_S_WU:
2170     case RISCV::FCVT_D_W:
2171     case RISCV::FCVT_D_WU:
2172       if (Bits < 32)
2173         return false;
2174       break;
2175     case RISCV::SLLI:
2176       // SLLI only uses the lower (XLen - ShAmt) bits.
2177       if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
2178         return false;
2179       break;
2180     case RISCV::ANDI:
2181       if (Bits < (64 - countLeadingZeros(User->getConstantOperandVal(1))))
2182         return false;
2183       break;
2184     case RISCV::SEXT_B:
2185       if (Bits < 8)
2186         return false;
2187       break;
2188     case RISCV::SEXT_H:
2189     case RISCV::FMV_H_X:
2190     case RISCV::ZEXT_H_RV32:
2191     case RISCV::ZEXT_H_RV64:
2192       if (Bits < 16)
2193         return false;
2194       break;
2195     case RISCV::ADD_UW:
2196     case RISCV::SH1ADD_UW:
2197     case RISCV::SH2ADD_UW:
2198     case RISCV::SH3ADD_UW:
2199       // The first operand to add.uw/shXadd.uw is implicitly zero extended from
2200       // 32 bits.
2201       if (UI.getOperandNo() != 0 || Bits < 32)
2202         return false;
2203       break;
2204     case RISCV::SB:
2205       if (UI.getOperandNo() != 0 || Bits < 8)
2206         return false;
2207       break;
2208     case RISCV::SH:
2209       if (UI.getOperandNo() != 0 || Bits < 16)
2210         return false;
2211       break;
2212     case RISCV::SW:
2213       if (UI.getOperandNo() != 0 || Bits < 32)
2214         return false;
2215       break;
2216     }
2217   }
2218 
2219   return true;
2220 }
2221 
2222 // Select VL as a 5 bit immediate or a value that will become a register. This
2223 // allows us to choose betwen VSETIVLI or VSETVLI later.
2224 bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
2225   auto *C = dyn_cast<ConstantSDNode>(N);
2226   if (C && isUInt<5>(C->getZExtValue())) {
2227     VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
2228                                    N->getValueType(0));
2229   } else if (C && C->isAllOnesValue()) {
2230     // Treat all ones as VLMax.
2231     VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
2232                                    N->getValueType(0));
2233   } else if (isa<RegisterSDNode>(N) &&
2234              cast<RegisterSDNode>(N)->getReg() == RISCV::X0) {
2235     // All our VL operands use an operand that allows GPRNoX0 or an immediate
2236     // as the register class. Convert X0 to a special immediate to pass the
2237     // MachineVerifier. This is recognized specially by the vsetvli insertion
2238     // pass.
2239     VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
2240                                    N->getValueType(0));
2241   } else {
2242     VL = N;
2243   }
2244 
2245   return true;
2246 }
2247 
2248 bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
2249   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef())
2250     return false;
2251   SplatVal = N.getOperand(1);
2252   return true;
2253 }
2254 
2255 using ValidateFn = bool (*)(int64_t);
2256 
2257 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
2258                                    SelectionDAG &DAG,
2259                                    const RISCVSubtarget &Subtarget,
2260                                    ValidateFn ValidateImm) {
2261   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
2262       !isa<ConstantSDNode>(N.getOperand(1)))
2263     return false;
2264 
2265   int64_t SplatImm =
2266       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
2267 
2268   // The semantics of RISCVISD::VMV_V_X_VL is that when the operand
2269   // type is wider than the resulting vector element type: an implicit
2270   // truncation first takes place. Therefore, perform a manual
2271   // truncation/sign-extension in order to ignore any truncated bits and catch
2272   // any zero-extended immediate.
2273   // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
2274   // sign-extending to (XLenVT -1).
2275   MVT XLenVT = Subtarget.getXLenVT();
2276   assert(XLenVT == N.getOperand(1).getSimpleValueType() &&
2277          "Unexpected splat operand type");
2278   MVT EltVT = N.getSimpleValueType().getVectorElementType();
2279   if (EltVT.bitsLT(XLenVT))
2280     SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
2281 
2282   if (!ValidateImm(SplatImm))
2283     return false;
2284 
2285   SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
2286   return true;
2287 }
2288 
2289 bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
2290   return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
2291                                 [](int64_t Imm) { return isInt<5>(Imm); });
2292 }
2293 
2294 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
2295   return selectVSplatSimmHelper(
2296       N, SplatVal, *CurDAG, *Subtarget,
2297       [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
2298 }
2299 
2300 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
2301                                                       SDValue &SplatVal) {
2302   return selectVSplatSimmHelper(
2303       N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
2304         return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
2305       });
2306 }
2307 
2308 bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
2309   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
2310       !isa<ConstantSDNode>(N.getOperand(1)))
2311     return false;
2312 
2313   int64_t SplatImm =
2314       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
2315 
2316   if (!isUInt<5>(SplatImm))
2317     return false;
2318 
2319   SplatVal =
2320       CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
2321 
2322   return true;
2323 }
2324 
2325 bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
2326                                        SDValue &Imm) {
2327   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
2328     int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
2329 
2330     if (!isInt<5>(ImmVal))
2331       return false;
2332 
2333     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
2334     return true;
2335   }
2336 
2337   return false;
2338 }
2339 
2340 // Try to remove sext.w if the input is a W instruction or can be made into
2341 // a W instruction cheaply.
2342 bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
2343   // Look for the sext.w pattern, addiw rd, rs1, 0.
2344   if (N->getMachineOpcode() != RISCV::ADDIW ||
2345       !isNullConstant(N->getOperand(1)))
2346     return false;
2347 
2348   SDValue N0 = N->getOperand(0);
2349   if (!N0.isMachineOpcode())
2350     return false;
2351 
2352   switch (N0.getMachineOpcode()) {
2353   default:
2354     break;
2355   case RISCV::ADD:
2356   case RISCV::ADDI:
2357   case RISCV::SUB:
2358   case RISCV::MUL:
2359   case RISCV::SLLI: {
2360     // Convert sext.w+add/sub/mul to their W instructions. This will create
2361     // a new independent instruction. This improves latency.
2362     unsigned Opc;
2363     switch (N0.getMachineOpcode()) {
2364     default:
2365       llvm_unreachable("Unexpected opcode!");
2366     case RISCV::ADD:  Opc = RISCV::ADDW;  break;
2367     case RISCV::ADDI: Opc = RISCV::ADDIW; break;
2368     case RISCV::SUB:  Opc = RISCV::SUBW;  break;
2369     case RISCV::MUL:  Opc = RISCV::MULW;  break;
2370     case RISCV::SLLI: Opc = RISCV::SLLIW; break;
2371     }
2372 
2373     SDValue N00 = N0.getOperand(0);
2374     SDValue N01 = N0.getOperand(1);
2375 
2376     // Shift amount needs to be uimm5.
2377     if (N0.getMachineOpcode() == RISCV::SLLI &&
2378         !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
2379       break;
2380 
2381     SDNode *Result =
2382         CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
2383                                N00, N01);
2384     ReplaceUses(N, Result);
2385     return true;
2386   }
2387   case RISCV::ADDW:
2388   case RISCV::ADDIW:
2389   case RISCV::SUBW:
2390   case RISCV::MULW:
2391   case RISCV::SLLIW:
2392   case RISCV::GREVIW:
2393   case RISCV::GORCIW:
2394     // Result is already sign extended just remove the sext.w.
2395     // NOTE: We only handle the nodes that are selected with hasAllWUsers.
2396     ReplaceUses(N, N0.getNode());
2397     return true;
2398   }
2399 
2400   return false;
2401 }
2402 
2403 // Optimize masked RVV pseudo instructions with a known all-ones mask to their
2404 // corresponding "unmasked" pseudo versions. The mask we're interested in will
2405 // take the form of a V0 physical register operand, with a glued
2406 // register-setting instruction.
2407 bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) {
2408   const RISCV::RISCVMaskedPseudoInfo *I =
2409       RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
2410   if (!I)
2411     return false;
2412 
2413   unsigned MaskOpIdx = I->MaskOpIdx;
2414 
2415   // Check that we're using V0 as a mask register.
2416   if (!isa<RegisterSDNode>(N->getOperand(MaskOpIdx)) ||
2417       cast<RegisterSDNode>(N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
2418     return false;
2419 
2420   // The glued user defines V0.
2421   const auto *Glued = N->getGluedNode();
2422 
2423   if (!Glued || Glued->getOpcode() != ISD::CopyToReg)
2424     return false;
2425 
2426   // Check that we're defining V0 as a mask register.
2427   if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
2428       cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
2429     return false;
2430 
2431   // Check the instruction defining V0; it needs to be a VMSET pseudo.
2432   SDValue MaskSetter = Glued->getOperand(2);
2433 
2434   const auto IsVMSet = [](unsigned Opc) {
2435     return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
2436            Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
2437            Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
2438            Opc == RISCV::PseudoVMSET_M_B8;
2439   };
2440 
2441   // TODO: Check that the VMSET is the expected bitwidth? The pseudo has
2442   // undefined behaviour if it's the wrong bitwidth, so we could choose to
2443   // assume that it's all-ones? Same applies to its VL.
2444   if (!MaskSetter->isMachineOpcode() || !IsVMSet(MaskSetter.getMachineOpcode()))
2445     return false;
2446 
2447   // Retrieve the tail policy operand index, if any.
2448   Optional<unsigned> TailPolicyOpIdx;
2449   const RISCVInstrInfo &TII = *Subtarget->getInstrInfo();
2450   const MCInstrDesc &MaskedMCID = TII.get(N->getMachineOpcode());
2451 
2452   bool IsTA = true;
2453   if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) {
2454     // The last operand of the pseudo is the policy op, but we might have a
2455     // Glue operand last. We might also have a chain.
2456     TailPolicyOpIdx = N->getNumOperands() - 1;
2457     if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Glue)
2458       (*TailPolicyOpIdx)--;
2459     if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Other)
2460       (*TailPolicyOpIdx)--;
2461 
2462     if (!(N->getConstantOperandVal(*TailPolicyOpIdx) &
2463           RISCVII::TAIL_AGNOSTIC)) {
2464       // Keep the true-masked instruction when there is no unmasked TU
2465       // instruction
2466       if (I->UnmaskedTUPseudo == I->MaskedPseudo && !N->getOperand(0).isUndef())
2467         return false;
2468       // We can't use TA if the tie-operand is not IMPLICIT_DEF
2469       if (!N->getOperand(0).isUndef())
2470         IsTA = false;
2471     }
2472   }
2473 
2474   unsigned Opc = IsTA ? I->UnmaskedPseudo : I->UnmaskedTUPseudo;
2475 
2476   // Check that we're dropping the mask operand and any policy operand
2477   // when we transform to this unmasked pseudo. Additionally, if this insturtion
2478   // is tail agnostic, the unmasked instruction should not have a merge op.
2479   uint64_t TSFlags = TII.get(Opc).TSFlags;
2480   assert((IsTA != RISCVII::hasMergeOp(TSFlags)) &&
2481          RISCVII::hasDummyMaskOp(TSFlags) &&
2482          !RISCVII::hasVecPolicyOp(TSFlags) &&
2483          "Unexpected pseudo to transform to");
2484   (void)TSFlags;
2485 
2486   SmallVector<SDValue, 8> Ops;
2487   // Skip the merge operand at index 0 if IsTA
2488   for (unsigned I = IsTA, E = N->getNumOperands(); I != E; I++) {
2489     // Skip the mask, the policy, and the Glue.
2490     SDValue Op = N->getOperand(I);
2491     if (I == MaskOpIdx || I == TailPolicyOpIdx ||
2492         Op.getValueType() == MVT::Glue)
2493       continue;
2494     Ops.push_back(Op);
2495   }
2496 
2497   // Transitively apply any node glued to our new node.
2498   if (auto *TGlued = Glued->getGluedNode())
2499     Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1));
2500 
2501   SDNode *Result = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
2502   ReplaceUses(N, Result);
2503 
2504   return true;
2505 }
2506 
2507 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
2508 // for instruction scheduling.
2509 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM,
2510                                        CodeGenOpt::Level OptLevel) {
2511   return new RISCVDAGToDAGISel(TM, OptLevel);
2512 }
2513