109467b48Spatrick //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
209467b48Spatrick //
309467b48Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
409467b48Spatrick // See https://llvm.org/LICENSE.txt for license information.
509467b48Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
609467b48Spatrick //
709467b48Spatrick //===----------------------------------------------------------------------===//
809467b48Spatrick //
909467b48Spatrick // This file defines an instruction selector for the RISCV target.
1009467b48Spatrick //
1109467b48Spatrick //===----------------------------------------------------------------------===//
1209467b48Spatrick 
13097a140dSpatrick #include "RISCVISelDAGToDAG.h"
1409467b48Spatrick #include "MCTargetDesc/RISCVMCTargetDesc.h"
1573471bf0Spatrick #include "MCTargetDesc/RISCVMatInt.h"
1673471bf0Spatrick #include "RISCVISelLowering.h"
1773471bf0Spatrick #include "RISCVMachineFunctionInfo.h"
1809467b48Spatrick #include "llvm/CodeGen/MachineFrameInfo.h"
1973471bf0Spatrick #include "llvm/IR/IntrinsicsRISCV.h"
20097a140dSpatrick #include "llvm/Support/Alignment.h"
2109467b48Spatrick #include "llvm/Support/Debug.h"
2209467b48Spatrick #include "llvm/Support/MathExtras.h"
2309467b48Spatrick #include "llvm/Support/raw_ostream.h"
24*d415bd75Srobert #include <optional>
25097a140dSpatrick 
2609467b48Spatrick using namespace llvm;
2709467b48Spatrick 
2809467b48Spatrick #define DEBUG_TYPE "riscv-isel"
29*d415bd75Srobert #define PASS_NAME "RISCV DAG->DAG Pattern Instruction Selection"
3009467b48Spatrick 
31*d415bd75Srobert namespace llvm::RISCV {
3273471bf0Spatrick #define GET_RISCVVSSEGTable_IMPL
3373471bf0Spatrick #define GET_RISCVVLSEGTable_IMPL
3473471bf0Spatrick #define GET_RISCVVLXSEGTable_IMPL
3573471bf0Spatrick #define GET_RISCVVSXSEGTable_IMPL
3673471bf0Spatrick #define GET_RISCVVLETable_IMPL
3773471bf0Spatrick #define GET_RISCVVSETable_IMPL
3873471bf0Spatrick #define GET_RISCVVLXTable_IMPL
3973471bf0Spatrick #define GET_RISCVVSXTable_IMPL
40*d415bd75Srobert #define GET_RISCVMaskedPseudosTable_IMPL
4173471bf0Spatrick #include "RISCVGenSearchableTables.inc"
42*d415bd75Srobert } // namespace llvm::RISCV
43*d415bd75Srobert 
getLastNonGlueOrChainOpIdx(const SDNode * Node)44*d415bd75Srobert static unsigned getLastNonGlueOrChainOpIdx(const SDNode *Node) {
45*d415bd75Srobert   assert(Node->getNumOperands() > 0 && "Node with no operands");
46*d415bd75Srobert   unsigned LastOpIdx = Node->getNumOperands() - 1;
47*d415bd75Srobert   if (Node->getOperand(LastOpIdx).getValueType() == MVT::Glue)
48*d415bd75Srobert     --LastOpIdx;
49*d415bd75Srobert   if (Node->getOperand(LastOpIdx).getValueType() == MVT::Other)
50*d415bd75Srobert     --LastOpIdx;
51*d415bd75Srobert   return LastOpIdx;
52*d415bd75Srobert }
53*d415bd75Srobert 
getVecPolicyOpIdx(const SDNode * Node,const MCInstrDesc & MCID)54*d415bd75Srobert static unsigned getVecPolicyOpIdx(const SDNode *Node, const MCInstrDesc &MCID) {
55*d415bd75Srobert   assert(RISCVII::hasVecPolicyOp(MCID.TSFlags));
56*d415bd75Srobert   (void)MCID;
57*d415bd75Srobert   return getLastNonGlueOrChainOpIdx(Node);
58*d415bd75Srobert }
5973471bf0Spatrick 
PreprocessISelDAG()6073471bf0Spatrick void RISCVDAGToDAGISel::PreprocessISelDAG() {
61*d415bd75Srobert   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
6273471bf0Spatrick 
63*d415bd75Srobert   bool MadeChange = false;
64*d415bd75Srobert   while (Position != CurDAG->allnodes_begin()) {
65*d415bd75Srobert     SDNode *N = &*--Position;
66*d415bd75Srobert     if (N->use_empty())
67*d415bd75Srobert       continue;
68*d415bd75Srobert 
69*d415bd75Srobert     SDValue Result;
70*d415bd75Srobert     switch (N->getOpcode()) {
71*d415bd75Srobert     case ISD::SPLAT_VECTOR: {
72*d415bd75Srobert       // Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point
73*d415bd75Srobert       // SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden.
74*d415bd75Srobert       MVT VT = N->getSimpleValueType(0);
75*d415bd75Srobert       unsigned Opc =
76*d415bd75Srobert           VT.isInteger() ? RISCVISD::VMV_V_X_VL : RISCVISD::VFMV_V_F_VL;
77*d415bd75Srobert       SDLoc DL(N);
78*d415bd75Srobert       SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT());
79*d415bd75Srobert       Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT),
80*d415bd75Srobert                                N->getOperand(0), VL);
81*d415bd75Srobert       break;
82*d415bd75Srobert     }
83*d415bd75Srobert     case RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL: {
8473471bf0Spatrick       // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
8573471bf0Spatrick       // load. Done after lowering and combining so that we have a chance to
8673471bf0Spatrick       // optimize this to VMV_V_X_VL when the upper bits aren't needed.
87*d415bd75Srobert       assert(N->getNumOperands() == 4 && "Unexpected number of operands");
8873471bf0Spatrick       MVT VT = N->getSimpleValueType(0);
89*d415bd75Srobert       SDValue Passthru = N->getOperand(0);
90*d415bd75Srobert       SDValue Lo = N->getOperand(1);
91*d415bd75Srobert       SDValue Hi = N->getOperand(2);
92*d415bd75Srobert       SDValue VL = N->getOperand(3);
9373471bf0Spatrick       assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
9473471bf0Spatrick              Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
9573471bf0Spatrick              "Unexpected VTs!");
9673471bf0Spatrick       MachineFunction &MF = CurDAG->getMachineFunction();
97*d415bd75Srobert       RISCVMachineFunctionInfo *FuncInfo =
98*d415bd75Srobert           MF.getInfo<RISCVMachineFunctionInfo>();
9973471bf0Spatrick       SDLoc DL(N);
10073471bf0Spatrick 
10173471bf0Spatrick       // We use the same frame index we use for moving two i32s into 64-bit FPR.
10273471bf0Spatrick       // This is an analogous operation.
10373471bf0Spatrick       int FI = FuncInfo->getMoveF64FrameIndex(MF);
10473471bf0Spatrick       MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
10573471bf0Spatrick       const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
10673471bf0Spatrick       SDValue StackSlot =
10773471bf0Spatrick           CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
10873471bf0Spatrick 
10973471bf0Spatrick       SDValue Chain = CurDAG->getEntryNode();
11073471bf0Spatrick       Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
11173471bf0Spatrick 
11273471bf0Spatrick       SDValue OffsetSlot =
11373471bf0Spatrick           CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
11473471bf0Spatrick       Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
11573471bf0Spatrick                             Align(8));
11673471bf0Spatrick 
11773471bf0Spatrick       Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
11873471bf0Spatrick 
11973471bf0Spatrick       SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
12073471bf0Spatrick       SDValue IntID =
12173471bf0Spatrick           CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
122*d415bd75Srobert       SDValue Ops[] = {Chain,
123*d415bd75Srobert                        IntID,
124*d415bd75Srobert                        Passthru,
125*d415bd75Srobert                        StackSlot,
126*d415bd75Srobert                        CurDAG->getRegister(RISCV::X0, MVT::i64),
127*d415bd75Srobert                        VL};
12873471bf0Spatrick 
129*d415bd75Srobert       Result = CurDAG->getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
130*d415bd75Srobert                                            MVT::i64, MPI, Align(8),
13173471bf0Spatrick                                            MachineMemOperand::MOLoad);
132*d415bd75Srobert       break;
13373471bf0Spatrick     }
13473471bf0Spatrick     }
13573471bf0Spatrick 
136*d415bd75Srobert     if (Result) {
137*d415bd75Srobert       LLVM_DEBUG(dbgs() << "RISCV DAG preprocessing replacing:\nOld:    ");
138*d415bd75Srobert       LLVM_DEBUG(N->dump(CurDAG));
139*d415bd75Srobert       LLVM_DEBUG(dbgs() << "\nNew: ");
140*d415bd75Srobert       LLVM_DEBUG(Result->dump(CurDAG));
141*d415bd75Srobert       LLVM_DEBUG(dbgs() << "\n");
142*d415bd75Srobert 
143*d415bd75Srobert       CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
144*d415bd75Srobert       MadeChange = true;
145*d415bd75Srobert     }
146*d415bd75Srobert   }
147*d415bd75Srobert 
148*d415bd75Srobert   if (MadeChange)
149*d415bd75Srobert     CurDAG->RemoveDeadNodes();
150*d415bd75Srobert }
151*d415bd75Srobert 
PostprocessISelDAG()15209467b48Spatrick void RISCVDAGToDAGISel::PostprocessISelDAG() {
153*d415bd75Srobert   HandleSDNode Dummy(CurDAG->getRoot());
154*d415bd75Srobert   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
155*d415bd75Srobert 
156*d415bd75Srobert   bool MadeChange = false;
157*d415bd75Srobert   while (Position != CurDAG->allnodes_begin()) {
158*d415bd75Srobert     SDNode *N = &*--Position;
159*d415bd75Srobert     // Skip dead nodes and any non-machine opcodes.
160*d415bd75Srobert     if (N->use_empty() || !N->isMachineOpcode())
161*d415bd75Srobert       continue;
162*d415bd75Srobert 
163*d415bd75Srobert     MadeChange |= doPeepholeSExtW(N);
164*d415bd75Srobert     MadeChange |= doPeepholeMaskedRVV(N);
16509467b48Spatrick   }
16609467b48Spatrick 
167*d415bd75Srobert   CurDAG->setRoot(Dummy.getValue());
16809467b48Spatrick 
169*d415bd75Srobert   MadeChange |= doPeepholeMergeVVMFold();
170*d415bd75Srobert 
171*d415bd75Srobert   if (MadeChange)
172*d415bd75Srobert     CurDAG->RemoveDeadNodes();
173*d415bd75Srobert }
174*d415bd75Srobert 
selectImmSeq(SelectionDAG * CurDAG,const SDLoc & DL,const MVT VT,RISCVMatInt::InstSeq & Seq)175*d415bd75Srobert static SDNode *selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
176*d415bd75Srobert                             RISCVMatInt::InstSeq &Seq) {
17709467b48Spatrick   SDNode *Result = nullptr;
178*d415bd75Srobert   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, VT);
17909467b48Spatrick   for (RISCVMatInt::Inst &Inst : Seq) {
180*d415bd75Srobert     SDValue SDImm = CurDAG->getTargetConstant(Inst.getImm(), DL, VT);
181*d415bd75Srobert     switch (Inst.getOpndKind()) {
182*d415bd75Srobert     case RISCVMatInt::Imm:
183*d415bd75Srobert       Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SDImm);
184*d415bd75Srobert       break;
185*d415bd75Srobert     case RISCVMatInt::RegX0:
186*d415bd75Srobert       Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg,
187*d415bd75Srobert                                       CurDAG->getRegister(RISCV::X0, VT));
188*d415bd75Srobert       break;
189*d415bd75Srobert     case RISCVMatInt::RegReg:
190*d415bd75Srobert       Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg, SrcReg);
191*d415bd75Srobert       break;
192*d415bd75Srobert     case RISCVMatInt::RegImm:
193*d415bd75Srobert       Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg, SDImm);
194*d415bd75Srobert       break;
195*d415bd75Srobert     }
19609467b48Spatrick 
19709467b48Spatrick     // Only the first instruction has X0 as its source.
19809467b48Spatrick     SrcReg = SDValue(Result, 0);
19909467b48Spatrick   }
20009467b48Spatrick 
20109467b48Spatrick   return Result;
20209467b48Spatrick }
20309467b48Spatrick 
selectImm(SelectionDAG * CurDAG,const SDLoc & DL,const MVT VT,int64_t Imm,const RISCVSubtarget & Subtarget)204*d415bd75Srobert static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
205*d415bd75Srobert                          int64_t Imm, const RISCVSubtarget &Subtarget) {
206*d415bd75Srobert   RISCVMatInt::InstSeq Seq =
207*d415bd75Srobert       RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
208*d415bd75Srobert 
209*d415bd75Srobert   return selectImmSeq(CurDAG, DL, VT, Seq);
210*d415bd75Srobert }
211*d415bd75Srobert 
createTuple(SelectionDAG & CurDAG,ArrayRef<SDValue> Regs,unsigned NF,RISCVII::VLMUL LMUL)212*d415bd75Srobert static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
213*d415bd75Srobert                            unsigned NF, RISCVII::VLMUL LMUL) {
214*d415bd75Srobert   static const unsigned M1TupleRegClassIDs[] = {
215*d415bd75Srobert       RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
216*d415bd75Srobert       RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
217*d415bd75Srobert       RISCV::VRN8M1RegClassID};
218*d415bd75Srobert   static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
219*d415bd75Srobert                                                 RISCV::VRN3M2RegClassID,
220*d415bd75Srobert                                                 RISCV::VRN4M2RegClassID};
221*d415bd75Srobert 
22273471bf0Spatrick   assert(Regs.size() >= 2 && Regs.size() <= 8);
22373471bf0Spatrick 
224*d415bd75Srobert   unsigned RegClassID;
225*d415bd75Srobert   unsigned SubReg0;
226*d415bd75Srobert   switch (LMUL) {
227*d415bd75Srobert   default:
228*d415bd75Srobert     llvm_unreachable("Invalid LMUL.");
229*d415bd75Srobert   case RISCVII::VLMUL::LMUL_F8:
230*d415bd75Srobert   case RISCVII::VLMUL::LMUL_F4:
231*d415bd75Srobert   case RISCVII::VLMUL::LMUL_F2:
232*d415bd75Srobert   case RISCVII::VLMUL::LMUL_1:
233*d415bd75Srobert     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
234*d415bd75Srobert                   "Unexpected subreg numbering");
235*d415bd75Srobert     SubReg0 = RISCV::sub_vrm1_0;
236*d415bd75Srobert     RegClassID = M1TupleRegClassIDs[NF - 2];
237*d415bd75Srobert     break;
238*d415bd75Srobert   case RISCVII::VLMUL::LMUL_2:
239*d415bd75Srobert     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
240*d415bd75Srobert                   "Unexpected subreg numbering");
241*d415bd75Srobert     SubReg0 = RISCV::sub_vrm2_0;
242*d415bd75Srobert     RegClassID = M2TupleRegClassIDs[NF - 2];
243*d415bd75Srobert     break;
244*d415bd75Srobert   case RISCVII::VLMUL::LMUL_4:
245*d415bd75Srobert     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
246*d415bd75Srobert                   "Unexpected subreg numbering");
247*d415bd75Srobert     SubReg0 = RISCV::sub_vrm4_0;
248*d415bd75Srobert     RegClassID = RISCV::VRN2M4RegClassID;
249*d415bd75Srobert     break;
250*d415bd75Srobert   }
251*d415bd75Srobert 
25273471bf0Spatrick   SDLoc DL(Regs[0]);
25373471bf0Spatrick   SmallVector<SDValue, 8> Ops;
25473471bf0Spatrick 
25573471bf0Spatrick   Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
25673471bf0Spatrick 
25773471bf0Spatrick   for (unsigned I = 0; I < Regs.size(); ++I) {
25873471bf0Spatrick     Ops.push_back(Regs[I]);
25973471bf0Spatrick     Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
26009467b48Spatrick   }
26173471bf0Spatrick   SDNode *N =
26273471bf0Spatrick       CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
26373471bf0Spatrick   return SDValue(N, 0);
26409467b48Spatrick }
26509467b48Spatrick 
addVectorLoadStoreOperands(SDNode * Node,unsigned Log2SEW,const SDLoc & DL,unsigned CurOp,bool IsMasked,bool IsStridedOrIndexed,SmallVectorImpl<SDValue> & Operands,bool IsLoad,MVT * IndexVT)26673471bf0Spatrick void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
26773471bf0Spatrick     SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
26873471bf0Spatrick     bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
269*d415bd75Srobert     bool IsLoad, MVT *IndexVT) {
27073471bf0Spatrick   SDValue Chain = Node->getOperand(0);
27173471bf0Spatrick   SDValue Glue;
27273471bf0Spatrick 
273*d415bd75Srobert   Operands.push_back(Node->getOperand(CurOp++)); // Base pointer.
27473471bf0Spatrick 
27573471bf0Spatrick   if (IsStridedOrIndexed) {
27673471bf0Spatrick     Operands.push_back(Node->getOperand(CurOp++)); // Index.
27773471bf0Spatrick     if (IndexVT)
27873471bf0Spatrick       *IndexVT = Operands.back()->getSimpleValueType(0);
27973471bf0Spatrick   }
28073471bf0Spatrick 
28173471bf0Spatrick   if (IsMasked) {
28273471bf0Spatrick     // Mask needs to be copied to V0.
28373471bf0Spatrick     SDValue Mask = Node->getOperand(CurOp++);
28473471bf0Spatrick     Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
28573471bf0Spatrick     Glue = Chain.getValue(1);
28673471bf0Spatrick     Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
28773471bf0Spatrick   }
28873471bf0Spatrick   SDValue VL;
28973471bf0Spatrick   selectVLOp(Node->getOperand(CurOp++), VL);
29073471bf0Spatrick   Operands.push_back(VL);
29173471bf0Spatrick 
29273471bf0Spatrick   MVT XLenVT = Subtarget->getXLenVT();
29373471bf0Spatrick   SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
29473471bf0Spatrick   Operands.push_back(SEWOp);
29573471bf0Spatrick 
296*d415bd75Srobert   // Masked load has the tail policy argument.
297*d415bd75Srobert   if (IsMasked && IsLoad) {
298*d415bd75Srobert     // Policy must be a constant.
299*d415bd75Srobert     uint64_t Policy = Node->getConstantOperandVal(CurOp++);
300*d415bd75Srobert     SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
301*d415bd75Srobert     Operands.push_back(PolicyOp);
302*d415bd75Srobert   }
303*d415bd75Srobert 
30473471bf0Spatrick   Operands.push_back(Chain); // Chain.
30573471bf0Spatrick   if (Glue)
30673471bf0Spatrick     Operands.push_back(Glue);
30773471bf0Spatrick }
30873471bf0Spatrick 
isAllUndef(ArrayRef<SDValue> Values)309*d415bd75Srobert static bool isAllUndef(ArrayRef<SDValue> Values) {
310*d415bd75Srobert   return llvm::all_of(Values, [](SDValue V) { return V->isUndef(); });
311*d415bd75Srobert }
312*d415bd75Srobert 
selectVLSEG(SDNode * Node,bool IsMasked,bool IsStrided)31373471bf0Spatrick void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
31473471bf0Spatrick                                     bool IsStrided) {
31573471bf0Spatrick   SDLoc DL(Node);
31673471bf0Spatrick   unsigned NF = Node->getNumValues() - 1;
31773471bf0Spatrick   MVT VT = Node->getSimpleValueType(0);
31873471bf0Spatrick   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
31973471bf0Spatrick   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
32073471bf0Spatrick 
32173471bf0Spatrick   unsigned CurOp = 2;
32273471bf0Spatrick   SmallVector<SDValue, 8> Operands;
323*d415bd75Srobert 
32473471bf0Spatrick   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
32573471bf0Spatrick                                Node->op_begin() + CurOp + NF);
326*d415bd75Srobert   bool IsTU = IsMasked || !isAllUndef(Regs);
327*d415bd75Srobert   if (IsTU) {
328*d415bd75Srobert     SDValue Merge = createTuple(*CurDAG, Regs, NF, LMUL);
329*d415bd75Srobert     Operands.push_back(Merge);
33073471bf0Spatrick   }
331*d415bd75Srobert   CurOp += NF;
33273471bf0Spatrick 
33373471bf0Spatrick   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
334*d415bd75Srobert                              Operands, /*IsLoad=*/true);
33573471bf0Spatrick 
33673471bf0Spatrick   const RISCV::VLSEGPseudo *P =
337*d415bd75Srobert       RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
33873471bf0Spatrick                             static_cast<unsigned>(LMUL));
33973471bf0Spatrick   MachineSDNode *Load =
34073471bf0Spatrick       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
34173471bf0Spatrick 
34273471bf0Spatrick   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
34373471bf0Spatrick     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
34473471bf0Spatrick 
34573471bf0Spatrick   SDValue SuperReg = SDValue(Load, 0);
34673471bf0Spatrick   for (unsigned I = 0; I < NF; ++I) {
34773471bf0Spatrick     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
34873471bf0Spatrick     ReplaceUses(SDValue(Node, I),
34973471bf0Spatrick                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
35073471bf0Spatrick   }
35173471bf0Spatrick 
35273471bf0Spatrick   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
35373471bf0Spatrick   CurDAG->RemoveDeadNode(Node);
35473471bf0Spatrick }
35573471bf0Spatrick 
selectVLSEGFF(SDNode * Node,bool IsMasked)35673471bf0Spatrick void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
35773471bf0Spatrick   SDLoc DL(Node);
35873471bf0Spatrick   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
35973471bf0Spatrick   MVT VT = Node->getSimpleValueType(0);
36073471bf0Spatrick   MVT XLenVT = Subtarget->getXLenVT();
36173471bf0Spatrick   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
36273471bf0Spatrick   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
36373471bf0Spatrick 
36473471bf0Spatrick   unsigned CurOp = 2;
36573471bf0Spatrick   SmallVector<SDValue, 7> Operands;
366*d415bd75Srobert 
36773471bf0Spatrick   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
36873471bf0Spatrick                                Node->op_begin() + CurOp + NF);
369*d415bd75Srobert   bool IsTU = IsMasked || !isAllUndef(Regs);
370*d415bd75Srobert   if (IsTU) {
37173471bf0Spatrick     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
37273471bf0Spatrick     Operands.push_back(MaskedOff);
37373471bf0Spatrick   }
374*d415bd75Srobert   CurOp += NF;
37573471bf0Spatrick 
37673471bf0Spatrick   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
377*d415bd75Srobert                              /*IsStridedOrIndexed*/ false, Operands,
378*d415bd75Srobert                              /*IsLoad=*/true);
37973471bf0Spatrick 
38073471bf0Spatrick   const RISCV::VLSEGPseudo *P =
381*d415bd75Srobert       RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
38273471bf0Spatrick                             Log2SEW, static_cast<unsigned>(LMUL));
38373471bf0Spatrick   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
384*d415bd75Srobert                                                XLenVT, MVT::Other, Operands);
38573471bf0Spatrick 
38673471bf0Spatrick   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
38773471bf0Spatrick     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
38873471bf0Spatrick 
38973471bf0Spatrick   SDValue SuperReg = SDValue(Load, 0);
39073471bf0Spatrick   for (unsigned I = 0; I < NF; ++I) {
39173471bf0Spatrick     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
39273471bf0Spatrick     ReplaceUses(SDValue(Node, I),
39373471bf0Spatrick                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
39473471bf0Spatrick   }
39573471bf0Spatrick 
396*d415bd75Srobert   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));     // VL
397*d415bd75Srobert   ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Chain
39873471bf0Spatrick   CurDAG->RemoveDeadNode(Node);
39973471bf0Spatrick }
40073471bf0Spatrick 
selectVLXSEG(SDNode * Node,bool IsMasked,bool IsOrdered)40173471bf0Spatrick void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
40273471bf0Spatrick                                      bool IsOrdered) {
40373471bf0Spatrick   SDLoc DL(Node);
40473471bf0Spatrick   unsigned NF = Node->getNumValues() - 1;
40573471bf0Spatrick   MVT VT = Node->getSimpleValueType(0);
40673471bf0Spatrick   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
40773471bf0Spatrick   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
40873471bf0Spatrick 
40973471bf0Spatrick   unsigned CurOp = 2;
41073471bf0Spatrick   SmallVector<SDValue, 8> Operands;
411*d415bd75Srobert 
41273471bf0Spatrick   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
41373471bf0Spatrick                                Node->op_begin() + CurOp + NF);
414*d415bd75Srobert   bool IsTU = IsMasked || !isAllUndef(Regs);
415*d415bd75Srobert   if (IsTU) {
41673471bf0Spatrick     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
41773471bf0Spatrick     Operands.push_back(MaskedOff);
41873471bf0Spatrick   }
419*d415bd75Srobert   CurOp += NF;
42073471bf0Spatrick 
42173471bf0Spatrick   MVT IndexVT;
42273471bf0Spatrick   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
423*d415bd75Srobert                              /*IsStridedOrIndexed*/ true, Operands,
424*d415bd75Srobert                              /*IsLoad=*/true, &IndexVT);
42573471bf0Spatrick 
42673471bf0Spatrick   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
42773471bf0Spatrick          "Element count mismatch");
42873471bf0Spatrick 
42973471bf0Spatrick   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
43073471bf0Spatrick   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
431*d415bd75Srobert   if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
432*d415bd75Srobert     report_fatal_error("The V extension does not support EEW=64 for index "
433*d415bd75Srobert                        "values when XLEN=32");
434*d415bd75Srobert   }
43573471bf0Spatrick   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
436*d415bd75Srobert       NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
43773471bf0Spatrick       static_cast<unsigned>(IndexLMUL));
43873471bf0Spatrick   MachineSDNode *Load =
43973471bf0Spatrick       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
44073471bf0Spatrick 
44173471bf0Spatrick   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
44273471bf0Spatrick     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
44373471bf0Spatrick 
44473471bf0Spatrick   SDValue SuperReg = SDValue(Load, 0);
44573471bf0Spatrick   for (unsigned I = 0; I < NF; ++I) {
44673471bf0Spatrick     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
44773471bf0Spatrick     ReplaceUses(SDValue(Node, I),
44873471bf0Spatrick                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
44973471bf0Spatrick   }
45073471bf0Spatrick 
45173471bf0Spatrick   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
45273471bf0Spatrick   CurDAG->RemoveDeadNode(Node);
45373471bf0Spatrick }
45473471bf0Spatrick 
selectVSSEG(SDNode * Node,bool IsMasked,bool IsStrided)45573471bf0Spatrick void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
45673471bf0Spatrick                                     bool IsStrided) {
45773471bf0Spatrick   SDLoc DL(Node);
45873471bf0Spatrick   unsigned NF = Node->getNumOperands() - 4;
45973471bf0Spatrick   if (IsStrided)
46073471bf0Spatrick     NF--;
46173471bf0Spatrick   if (IsMasked)
46273471bf0Spatrick     NF--;
46373471bf0Spatrick   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
46473471bf0Spatrick   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
46573471bf0Spatrick   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
46673471bf0Spatrick   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
46773471bf0Spatrick   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
46873471bf0Spatrick 
46973471bf0Spatrick   SmallVector<SDValue, 8> Operands;
47073471bf0Spatrick   Operands.push_back(StoreVal);
47173471bf0Spatrick   unsigned CurOp = 2 + NF;
47273471bf0Spatrick 
47373471bf0Spatrick   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
47473471bf0Spatrick                              Operands);
47573471bf0Spatrick 
47673471bf0Spatrick   const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
47773471bf0Spatrick       NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
47873471bf0Spatrick   MachineSDNode *Store =
47973471bf0Spatrick       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
48073471bf0Spatrick 
48173471bf0Spatrick   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
48273471bf0Spatrick     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
48373471bf0Spatrick 
48473471bf0Spatrick   ReplaceNode(Node, Store);
48573471bf0Spatrick }
48673471bf0Spatrick 
selectVSXSEG(SDNode * Node,bool IsMasked,bool IsOrdered)48773471bf0Spatrick void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
48873471bf0Spatrick                                      bool IsOrdered) {
48973471bf0Spatrick   SDLoc DL(Node);
49073471bf0Spatrick   unsigned NF = Node->getNumOperands() - 5;
49173471bf0Spatrick   if (IsMasked)
49273471bf0Spatrick     --NF;
49373471bf0Spatrick   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
49473471bf0Spatrick   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
49573471bf0Spatrick   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
49673471bf0Spatrick   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
49773471bf0Spatrick   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
49873471bf0Spatrick 
49973471bf0Spatrick   SmallVector<SDValue, 8> Operands;
50073471bf0Spatrick   Operands.push_back(StoreVal);
50173471bf0Spatrick   unsigned CurOp = 2 + NF;
50273471bf0Spatrick 
50373471bf0Spatrick   MVT IndexVT;
50473471bf0Spatrick   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
505*d415bd75Srobert                              /*IsStridedOrIndexed*/ true, Operands,
506*d415bd75Srobert                              /*IsLoad=*/false, &IndexVT);
50773471bf0Spatrick 
50873471bf0Spatrick   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
50973471bf0Spatrick          "Element count mismatch");
51073471bf0Spatrick 
51173471bf0Spatrick   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
51273471bf0Spatrick   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
513*d415bd75Srobert   if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
514*d415bd75Srobert     report_fatal_error("The V extension does not support EEW=64 for index "
515*d415bd75Srobert                        "values when XLEN=32");
516*d415bd75Srobert   }
51773471bf0Spatrick   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
51873471bf0Spatrick       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
51973471bf0Spatrick       static_cast<unsigned>(IndexLMUL));
52073471bf0Spatrick   MachineSDNode *Store =
52173471bf0Spatrick       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
52273471bf0Spatrick 
52373471bf0Spatrick   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
52473471bf0Spatrick     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
52573471bf0Spatrick 
52673471bf0Spatrick   ReplaceNode(Node, Store);
52773471bf0Spatrick }
52873471bf0Spatrick 
selectVSETVLI(SDNode * Node)529*d415bd75Srobert void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
530*d415bd75Srobert   if (!Subtarget->hasVInstructions())
531*d415bd75Srobert     return;
532*d415bd75Srobert 
533*d415bd75Srobert   assert((Node->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
534*d415bd75Srobert           Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN) &&
535*d415bd75Srobert          "Unexpected opcode");
536*d415bd75Srobert 
537*d415bd75Srobert   SDLoc DL(Node);
538*d415bd75Srobert   MVT XLenVT = Subtarget->getXLenVT();
539*d415bd75Srobert 
540*d415bd75Srobert   bool HasChain = Node->getOpcode() == ISD::INTRINSIC_W_CHAIN;
541*d415bd75Srobert   unsigned IntNoOffset = HasChain ? 1 : 0;
542*d415bd75Srobert   unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
543*d415bd75Srobert 
544*d415bd75Srobert   assert((IntNo == Intrinsic::riscv_vsetvli ||
545*d415bd75Srobert           IntNo == Intrinsic::riscv_vsetvlimax ||
546*d415bd75Srobert           IntNo == Intrinsic::riscv_vsetvli_opt ||
547*d415bd75Srobert           IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
548*d415bd75Srobert          "Unexpected vsetvli intrinsic");
549*d415bd75Srobert 
550*d415bd75Srobert   bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
551*d415bd75Srobert                IntNo == Intrinsic::riscv_vsetvlimax_opt;
552*d415bd75Srobert   unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
553*d415bd75Srobert 
554*d415bd75Srobert   assert(Node->getNumOperands() == Offset + 2 &&
555*d415bd75Srobert          "Unexpected number of operands");
556*d415bd75Srobert 
557*d415bd75Srobert   unsigned SEW =
558*d415bd75Srobert       RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
559*d415bd75Srobert   RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
560*d415bd75Srobert       Node->getConstantOperandVal(Offset + 1) & 0x7);
561*d415bd75Srobert 
562*d415bd75Srobert   unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
563*d415bd75Srobert                                             /*MaskAgnostic*/ false);
564*d415bd75Srobert   SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
565*d415bd75Srobert 
566*d415bd75Srobert   SmallVector<EVT, 2> VTs = {XLenVT};
567*d415bd75Srobert   if (HasChain)
568*d415bd75Srobert     VTs.push_back(MVT::Other);
569*d415bd75Srobert 
570*d415bd75Srobert   SDValue VLOperand;
571*d415bd75Srobert   unsigned Opcode = RISCV::PseudoVSETVLI;
572*d415bd75Srobert   if (VLMax) {
573*d415bd75Srobert     VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
574*d415bd75Srobert     Opcode = RISCV::PseudoVSETVLIX0;
575*d415bd75Srobert   } else {
576*d415bd75Srobert     VLOperand = Node->getOperand(IntNoOffset + 1);
577*d415bd75Srobert 
578*d415bd75Srobert     if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
579*d415bd75Srobert       uint64_t AVL = C->getZExtValue();
580*d415bd75Srobert       if (isUInt<5>(AVL)) {
581*d415bd75Srobert         SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
582*d415bd75Srobert         SmallVector<SDValue, 3> Ops = {VLImm, VTypeIOp};
583*d415bd75Srobert         if (HasChain)
584*d415bd75Srobert           Ops.push_back(Node->getOperand(0));
585*d415bd75Srobert         ReplaceNode(
586*d415bd75Srobert             Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, VTs, Ops));
587*d415bd75Srobert         return;
588*d415bd75Srobert       }
589*d415bd75Srobert     }
590*d415bd75Srobert   }
591*d415bd75Srobert 
592*d415bd75Srobert   SmallVector<SDValue, 3> Ops = {VLOperand, VTypeIOp};
593*d415bd75Srobert   if (HasChain)
594*d415bd75Srobert     Ops.push_back(Node->getOperand(0));
595*d415bd75Srobert 
596*d415bd75Srobert   ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, VTs, Ops));
597*d415bd75Srobert }
598*d415bd75Srobert 
tryShrinkShlLogicImm(SDNode * Node)599*d415bd75Srobert bool RISCVDAGToDAGISel::tryShrinkShlLogicImm(SDNode *Node) {
600*d415bd75Srobert   MVT VT = Node->getSimpleValueType(0);
601*d415bd75Srobert   unsigned Opcode = Node->getOpcode();
602*d415bd75Srobert   assert((Opcode == ISD::AND || Opcode == ISD::OR || Opcode == ISD::XOR) &&
603*d415bd75Srobert          "Unexpected opcode");
604*d415bd75Srobert   SDLoc DL(Node);
605*d415bd75Srobert 
606*d415bd75Srobert   // For operations of the form (x << C1) op C2, check if we can use
607*d415bd75Srobert   // ANDI/ORI/XORI by transforming it into (x op (C2>>C1)) << C1.
608*d415bd75Srobert   SDValue N0 = Node->getOperand(0);
609*d415bd75Srobert   SDValue N1 = Node->getOperand(1);
610*d415bd75Srobert 
611*d415bd75Srobert   ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
612*d415bd75Srobert   if (!Cst)
613*d415bd75Srobert     return false;
614*d415bd75Srobert 
615*d415bd75Srobert   int64_t Val = Cst->getSExtValue();
616*d415bd75Srobert 
617*d415bd75Srobert   // Check if immediate can already use ANDI/ORI/XORI.
618*d415bd75Srobert   if (isInt<12>(Val))
619*d415bd75Srobert     return false;
620*d415bd75Srobert 
621*d415bd75Srobert   SDValue Shift = N0;
622*d415bd75Srobert 
623*d415bd75Srobert   // If Val is simm32 and we have a sext_inreg from i32, then the binop
624*d415bd75Srobert   // produces at least 33 sign bits. We can peek through the sext_inreg and use
625*d415bd75Srobert   // a SLLIW at the end.
626*d415bd75Srobert   bool SignExt = false;
627*d415bd75Srobert   if (isInt<32>(Val) && N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
628*d415bd75Srobert       N0.hasOneUse() && cast<VTSDNode>(N0.getOperand(1))->getVT() == MVT::i32) {
629*d415bd75Srobert     SignExt = true;
630*d415bd75Srobert     Shift = N0.getOperand(0);
631*d415bd75Srobert   }
632*d415bd75Srobert 
633*d415bd75Srobert   if (Shift.getOpcode() != ISD::SHL || !Shift.hasOneUse())
634*d415bd75Srobert     return false;
635*d415bd75Srobert 
636*d415bd75Srobert   ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
637*d415bd75Srobert   if (!ShlCst)
638*d415bd75Srobert     return false;
639*d415bd75Srobert 
640*d415bd75Srobert   uint64_t ShAmt = ShlCst->getZExtValue();
641*d415bd75Srobert 
642*d415bd75Srobert   // Make sure that we don't change the operation by removing bits.
643*d415bd75Srobert   // This only matters for OR and XOR, AND is unaffected.
644*d415bd75Srobert   uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
645*d415bd75Srobert   if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
646*d415bd75Srobert     return false;
647*d415bd75Srobert 
648*d415bd75Srobert   int64_t ShiftedVal = Val >> ShAmt;
649*d415bd75Srobert   if (!isInt<12>(ShiftedVal))
650*d415bd75Srobert     return false;
651*d415bd75Srobert 
652*d415bd75Srobert   // If we peeked through a sext_inreg, make sure the shift is valid for SLLIW.
653*d415bd75Srobert   if (SignExt && ShAmt >= 32)
654*d415bd75Srobert     return false;
655*d415bd75Srobert 
656*d415bd75Srobert   // Ok, we can reorder to get a smaller immediate.
657*d415bd75Srobert   unsigned BinOpc;
658*d415bd75Srobert   switch (Opcode) {
659*d415bd75Srobert   default: llvm_unreachable("Unexpected opcode");
660*d415bd75Srobert   case ISD::AND: BinOpc = RISCV::ANDI; break;
661*d415bd75Srobert   case ISD::OR:  BinOpc = RISCV::ORI;  break;
662*d415bd75Srobert   case ISD::XOR: BinOpc = RISCV::XORI; break;
663*d415bd75Srobert   }
664*d415bd75Srobert 
665*d415bd75Srobert   unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
666*d415bd75Srobert 
667*d415bd75Srobert   SDNode *BinOp =
668*d415bd75Srobert       CurDAG->getMachineNode(BinOpc, DL, VT, Shift.getOperand(0),
669*d415bd75Srobert                              CurDAG->getTargetConstant(ShiftedVal, DL, VT));
670*d415bd75Srobert   SDNode *SLLI =
671*d415bd75Srobert       CurDAG->getMachineNode(ShOpc, DL, VT, SDValue(BinOp, 0),
672*d415bd75Srobert                              CurDAG->getTargetConstant(ShAmt, DL, VT));
673*d415bd75Srobert   ReplaceNode(Node, SLLI);
674*d415bd75Srobert   return true;
675*d415bd75Srobert }
67673471bf0Spatrick 
Select(SDNode * Node)67709467b48Spatrick void RISCVDAGToDAGISel::Select(SDNode *Node) {
67809467b48Spatrick   // If we have a custom node, we have already selected.
67909467b48Spatrick   if (Node->isMachineOpcode()) {
68009467b48Spatrick     LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
68109467b48Spatrick     Node->setNodeId(-1);
68209467b48Spatrick     return;
68309467b48Spatrick   }
68409467b48Spatrick 
68509467b48Spatrick   // Instruction Selection not handled by the auto-generated tablegen selection
68609467b48Spatrick   // should be handled here.
68709467b48Spatrick   unsigned Opcode = Node->getOpcode();
68809467b48Spatrick   MVT XLenVT = Subtarget->getXLenVT();
68909467b48Spatrick   SDLoc DL(Node);
69073471bf0Spatrick   MVT VT = Node->getSimpleValueType(0);
69109467b48Spatrick 
69209467b48Spatrick   switch (Opcode) {
69309467b48Spatrick   case ISD::Constant: {
69473471bf0Spatrick     auto *ConstNode = cast<ConstantSDNode>(Node);
695*d415bd75Srobert     if (VT == XLenVT && ConstNode->isZero()) {
69673471bf0Spatrick       SDValue New =
69773471bf0Spatrick           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
69809467b48Spatrick       ReplaceNode(Node, New.getNode());
69909467b48Spatrick       return;
70009467b48Spatrick     }
701*d415bd75Srobert     int64_t Imm = ConstNode->getSExtValue();
702*d415bd75Srobert     // If the upper XLen-16 bits are not used, try to convert this to a simm12
703*d415bd75Srobert     // by sign extending bit 15.
704*d415bd75Srobert     if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
705*d415bd75Srobert         hasAllHUsers(Node))
706*d415bd75Srobert       Imm = SignExtend64<16>(Imm);
707*d415bd75Srobert     // If the upper 32-bits are not used try to convert this into a simm32 by
708*d415bd75Srobert     // sign extending bit 32.
709*d415bd75Srobert     if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
710*d415bd75Srobert       Imm = SignExtend64<32>(Imm);
711*d415bd75Srobert 
712*d415bd75Srobert     ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget));
71309467b48Spatrick     return;
71409467b48Spatrick   }
715*d415bd75Srobert   case ISD::SHL: {
716*d415bd75Srobert     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
717*d415bd75Srobert     if (!N1C)
718*d415bd75Srobert       break;
719*d415bd75Srobert     SDValue N0 = Node->getOperand(0);
720*d415bd75Srobert     if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
721*d415bd75Srobert         !isa<ConstantSDNode>(N0.getOperand(1)))
722*d415bd75Srobert       break;
723*d415bd75Srobert     unsigned ShAmt = N1C->getZExtValue();
724*d415bd75Srobert     uint64_t Mask = N0.getConstantOperandVal(1);
725*d415bd75Srobert 
726*d415bd75Srobert     // Optimize (shl (and X, C2), C) -> (slli (srliw X, C3), C3+C) where C2 has
727*d415bd75Srobert     // 32 leading zeros and C3 trailing zeros.
728*d415bd75Srobert     if (ShAmt <= 32 && isShiftedMask_64(Mask)) {
729*d415bd75Srobert       unsigned XLen = Subtarget->getXLen();
730*d415bd75Srobert       unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
731*d415bd75Srobert       unsigned TrailingZeros = countTrailingZeros(Mask);
732*d415bd75Srobert       if (TrailingZeros > 0 && LeadingZeros == 32) {
733*d415bd75Srobert         SDNode *SRLIW = CurDAG->getMachineNode(
734*d415bd75Srobert             RISCV::SRLIW, DL, VT, N0->getOperand(0),
735*d415bd75Srobert             CurDAG->getTargetConstant(TrailingZeros, DL, VT));
736*d415bd75Srobert         SDNode *SLLI = CurDAG->getMachineNode(
737*d415bd75Srobert             RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
738*d415bd75Srobert             CurDAG->getTargetConstant(TrailingZeros + ShAmt, DL, VT));
739*d415bd75Srobert         ReplaceNode(Node, SLLI);
74009467b48Spatrick         return;
74109467b48Spatrick       }
742*d415bd75Srobert     }
743*d415bd75Srobert     break;
744*d415bd75Srobert   }
74509467b48Spatrick   case ISD::SRL: {
74673471bf0Spatrick     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
747*d415bd75Srobert     if (!N1C)
748*d415bd75Srobert       break;
74973471bf0Spatrick     SDValue N0 = Node->getOperand(0);
750*d415bd75Srobert     if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
751*d415bd75Srobert       break;
752*d415bd75Srobert     unsigned ShAmt = N1C->getZExtValue();
75373471bf0Spatrick     uint64_t Mask = N0.getConstantOperandVal(1);
754*d415bd75Srobert 
755*d415bd75Srobert     // Optimize (srl (and X, C2), C) -> (slli (srliw X, C3), C3-C) where C2 has
756*d415bd75Srobert     // 32 leading zeros and C3 trailing zeros.
757*d415bd75Srobert     if (isShiftedMask_64(Mask) && N0.hasOneUse()) {
758*d415bd75Srobert       unsigned XLen = Subtarget->getXLen();
759*d415bd75Srobert       unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
760*d415bd75Srobert       unsigned TrailingZeros = countTrailingZeros(Mask);
761*d415bd75Srobert       if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
762*d415bd75Srobert         SDNode *SRLIW = CurDAG->getMachineNode(
763*d415bd75Srobert             RISCV::SRLIW, DL, VT, N0->getOperand(0),
764*d415bd75Srobert             CurDAG->getTargetConstant(TrailingZeros, DL, VT));
765*d415bd75Srobert         SDNode *SLLI = CurDAG->getMachineNode(
766*d415bd75Srobert             RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
767*d415bd75Srobert             CurDAG->getTargetConstant(TrailingZeros - ShAmt, DL, VT));
768*d415bd75Srobert         ReplaceNode(Node, SLLI);
769*d415bd75Srobert         return;
770*d415bd75Srobert       }
771*d415bd75Srobert     }
772*d415bd75Srobert 
773*d415bd75Srobert     // Optimize (srl (and X, C2), C) ->
774*d415bd75Srobert     //          (srli (slli X, (XLen-C3), (XLen-C3) + C)
775*d415bd75Srobert     // Where C2 is a mask with C3 trailing ones.
776*d415bd75Srobert     // Taking into account that the C2 may have had lower bits unset by
777*d415bd75Srobert     // SimplifyDemandedBits. This avoids materializing the C2 immediate.
778*d415bd75Srobert     // This pattern occurs when type legalizing right shifts for types with
779*d415bd75Srobert     // less than XLen bits.
78073471bf0Spatrick     Mask |= maskTrailingOnes<uint64_t>(ShAmt);
781*d415bd75Srobert     if (!isMask_64(Mask))
782*d415bd75Srobert       break;
783*d415bd75Srobert     unsigned TrailingOnes = countTrailingOnes(Mask);
784*d415bd75Srobert     if (ShAmt >= TrailingOnes)
785*d415bd75Srobert       break;
786*d415bd75Srobert     // If the mask has 32 trailing ones, use SRLIW.
787*d415bd75Srobert     if (TrailingOnes == 32) {
788*d415bd75Srobert       SDNode *SRLIW =
789*d415bd75Srobert           CurDAG->getMachineNode(RISCV::SRLIW, DL, VT, N0->getOperand(0),
790*d415bd75Srobert                                  CurDAG->getTargetConstant(ShAmt, DL, VT));
791*d415bd75Srobert       ReplaceNode(Node, SRLIW);
792*d415bd75Srobert       return;
793*d415bd75Srobert     }
794*d415bd75Srobert 
795*d415bd75Srobert     // Only do the remaining transforms if the shift has one use.
796*d415bd75Srobert     if (!N0.hasOneUse())
797*d415bd75Srobert       break;
798*d415bd75Srobert 
799*d415bd75Srobert     // If C2 is (1 << ShAmt) use bexti if possible.
800*d415bd75Srobert     if (Subtarget->hasStdExtZbs() && ShAmt + 1 == TrailingOnes) {
801*d415bd75Srobert       SDNode *BEXTI =
802*d415bd75Srobert           CurDAG->getMachineNode(RISCV::BEXTI, DL, VT, N0->getOperand(0),
803*d415bd75Srobert                                  CurDAG->getTargetConstant(ShAmt, DL, VT));
804*d415bd75Srobert       ReplaceNode(Node, BEXTI);
805*d415bd75Srobert       return;
806*d415bd75Srobert     }
807*d415bd75Srobert     unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
80873471bf0Spatrick     SDNode *SLLI =
80973471bf0Spatrick         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
81073471bf0Spatrick                                CurDAG->getTargetConstant(LShAmt, DL, VT));
81173471bf0Spatrick     SDNode *SRLI = CurDAG->getMachineNode(
81273471bf0Spatrick         RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
81373471bf0Spatrick         CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
81473471bf0Spatrick     ReplaceNode(Node, SRLI);
81573471bf0Spatrick     return;
81673471bf0Spatrick   }
817*d415bd75Srobert   case ISD::SRA: {
818*d415bd75Srobert     // Optimize (sra (sext_inreg X, i16), C) ->
819*d415bd75Srobert     //          (srai (slli X, (XLen-16), (XLen-16) + C)
820*d415bd75Srobert     // And      (sra (sext_inreg X, i8), C) ->
821*d415bd75Srobert     //          (srai (slli X, (XLen-8), (XLen-8) + C)
822*d415bd75Srobert     // This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal.
823*d415bd75Srobert     // This transform matches the code we get without Zbb. The shifts are more
824*d415bd75Srobert     // compressible, and this can help expose CSE opportunities in the sdiv by
825*d415bd75Srobert     // constant optimization.
826*d415bd75Srobert     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
827*d415bd75Srobert     if (!N1C)
828*d415bd75Srobert       break;
829*d415bd75Srobert     SDValue N0 = Node->getOperand(0);
830*d415bd75Srobert     if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
831*d415bd75Srobert       break;
832*d415bd75Srobert     unsigned ShAmt = N1C->getZExtValue();
833*d415bd75Srobert     unsigned ExtSize =
834*d415bd75Srobert         cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
835*d415bd75Srobert     // ExtSize of 32 should use sraiw via tablegen pattern.
836*d415bd75Srobert     if (ExtSize >= 32 || ShAmt >= ExtSize)
837*d415bd75Srobert       break;
838*d415bd75Srobert     unsigned LShAmt = Subtarget->getXLen() - ExtSize;
839*d415bd75Srobert     SDNode *SLLI =
840*d415bd75Srobert         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
841*d415bd75Srobert                                CurDAG->getTargetConstant(LShAmt, DL, VT));
842*d415bd75Srobert     SDNode *SRAI = CurDAG->getMachineNode(
843*d415bd75Srobert         RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
844*d415bd75Srobert         CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
845*d415bd75Srobert     ReplaceNode(Node, SRAI);
846*d415bd75Srobert     return;
84773471bf0Spatrick   }
848*d415bd75Srobert   case ISD::OR:
849*d415bd75Srobert   case ISD::XOR:
850*d415bd75Srobert     if (tryShrinkShlLogicImm(Node))
851*d415bd75Srobert       return;
85209467b48Spatrick 
85373471bf0Spatrick     break;
85473471bf0Spatrick   case ISD::AND: {
85573471bf0Spatrick     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
85673471bf0Spatrick     if (!N1C)
85773471bf0Spatrick       break;
85873471bf0Spatrick 
85973471bf0Spatrick     SDValue N0 = Node->getOperand(0);
86073471bf0Spatrick 
86173471bf0Spatrick     bool LeftShift = N0.getOpcode() == ISD::SHL;
862*d415bd75Srobert     if (LeftShift || N0.getOpcode() == ISD::SRL) {
86373471bf0Spatrick       auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
86473471bf0Spatrick       if (!C)
86573471bf0Spatrick         break;
866*d415bd75Srobert       unsigned C2 = C->getZExtValue();
86773471bf0Spatrick       unsigned XLen = Subtarget->getXLen();
868*d415bd75Srobert       assert((C2 > 0 && C2 < XLen) && "Unexpected shift amount!");
86973471bf0Spatrick 
87073471bf0Spatrick       uint64_t C1 = N1C->getZExtValue();
87173471bf0Spatrick 
872*d415bd75Srobert       // Keep track of whether this is a c.andi. If we can't use c.andi, the
873*d415bd75Srobert       // shift pair might offer more compression opportunities.
874*d415bd75Srobert       // TODO: We could check for C extension here, but we don't have many lit
875*d415bd75Srobert       // tests with the C extension enabled so not checking gets better
876*d415bd75Srobert       // coverage.
877*d415bd75Srobert       // TODO: What if ANDI faster than shift?
878*d415bd75Srobert       bool IsCANDI = isInt<6>(N1C->getSExtValue());
87973471bf0Spatrick 
88073471bf0Spatrick       // Clear irrelevant bits in the mask.
88173471bf0Spatrick       if (LeftShift)
88273471bf0Spatrick         C1 &= maskTrailingZeros<uint64_t>(C2);
88373471bf0Spatrick       else
88473471bf0Spatrick         C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
88573471bf0Spatrick 
88673471bf0Spatrick       // Some transforms should only be done if the shift has a single use or
88773471bf0Spatrick       // the AND would become (srli (slli X, 32), 32)
88873471bf0Spatrick       bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
88973471bf0Spatrick 
89073471bf0Spatrick       SDValue X = N0.getOperand(0);
89173471bf0Spatrick 
89273471bf0Spatrick       // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
89373471bf0Spatrick       // with c3 leading zeros.
89473471bf0Spatrick       if (!LeftShift && isMask_64(C1)) {
895*d415bd75Srobert         unsigned Leading = XLen - llvm::bit_width(C1);
896*d415bd75Srobert         if (C2 < Leading) {
89773471bf0Spatrick           // If the number of leading zeros is C2+32 this can be SRLIW.
898*d415bd75Srobert           if (C2 + 32 == Leading) {
899*d415bd75Srobert             SDNode *SRLIW = CurDAG->getMachineNode(
900*d415bd75Srobert                 RISCV::SRLIW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
90173471bf0Spatrick             ReplaceNode(Node, SRLIW);
90273471bf0Spatrick             return;
90373471bf0Spatrick           }
90473471bf0Spatrick 
905*d415bd75Srobert           // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32)
906*d415bd75Srobert           // if c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
90773471bf0Spatrick           //
90873471bf0Spatrick           // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
90973471bf0Spatrick           // legalized and goes through DAG combine.
910*d415bd75Srobert           if (C2 >= 32 && (Leading - C2) == 1 && N0.hasOneUse() &&
911*d415bd75Srobert               X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
912*d415bd75Srobert               cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
91373471bf0Spatrick             SDNode *SRAIW =
914*d415bd75Srobert                 CurDAG->getMachineNode(RISCV::SRAIW, DL, VT, X.getOperand(0),
915*d415bd75Srobert                                        CurDAG->getTargetConstant(31, DL, VT));
91673471bf0Spatrick             SDNode *SRLIW = CurDAG->getMachineNode(
917*d415bd75Srobert                 RISCV::SRLIW, DL, VT, SDValue(SRAIW, 0),
918*d415bd75Srobert                 CurDAG->getTargetConstant(Leading - 32, DL, VT));
91973471bf0Spatrick             ReplaceNode(Node, SRLIW);
92073471bf0Spatrick             return;
92173471bf0Spatrick           }
92273471bf0Spatrick 
92373471bf0Spatrick           // (srli (slli x, c3-c2), c3).
924*d415bd75Srobert           // Skip if we could use (zext.w (sraiw X, C2)).
925*d415bd75Srobert           bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
926*d415bd75Srobert                       X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
927*d415bd75Srobert                       cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32;
928*d415bd75Srobert           // Also Skip if we can use bexti.
929*d415bd75Srobert           Skip |= Subtarget->hasStdExtZbs() && Leading == XLen - 1;
930*d415bd75Srobert           if (OneUseOrZExtW && !Skip) {
93173471bf0Spatrick             SDNode *SLLI = CurDAG->getMachineNode(
932*d415bd75Srobert                 RISCV::SLLI, DL, VT, X,
933*d415bd75Srobert                 CurDAG->getTargetConstant(Leading - C2, DL, VT));
934*d415bd75Srobert             SDNode *SRLI = CurDAG->getMachineNode(
935*d415bd75Srobert                 RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
936*d415bd75Srobert                 CurDAG->getTargetConstant(Leading, DL, VT));
93773471bf0Spatrick             ReplaceNode(Node, SRLI);
93873471bf0Spatrick             return;
93973471bf0Spatrick           }
94073471bf0Spatrick         }
94173471bf0Spatrick       }
94273471bf0Spatrick 
943*d415bd75Srobert       // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
94473471bf0Spatrick       // shifted by c2 bits with c3 leading zeros.
94573471bf0Spatrick       if (LeftShift && isShiftedMask_64(C1)) {
946*d415bd75Srobert         unsigned Leading = XLen - llvm::bit_width(C1);
94773471bf0Spatrick 
948*d415bd75Srobert         if (C2 + Leading < XLen &&
949*d415bd75Srobert             C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
95073471bf0Spatrick           // Use slli.uw when possible.
951*d415bd75Srobert           if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
952*d415bd75Srobert             SDNode *SLLI_UW =
953*d415bd75Srobert                 CurDAG->getMachineNode(RISCV::SLLI_UW, DL, VT, X,
954*d415bd75Srobert                                        CurDAG->getTargetConstant(C2, DL, VT));
955*d415bd75Srobert             ReplaceNode(Node, SLLI_UW);
95673471bf0Spatrick             return;
95773471bf0Spatrick           }
95873471bf0Spatrick 
95973471bf0Spatrick           // (srli (slli c2+c3), c3)
960*d415bd75Srobert           if (OneUseOrZExtW && !IsCANDI) {
96173471bf0Spatrick             SDNode *SLLI = CurDAG->getMachineNode(
962*d415bd75Srobert                 RISCV::SLLI, DL, VT, X,
963*d415bd75Srobert                 CurDAG->getTargetConstant(C2 + Leading, DL, VT));
964*d415bd75Srobert             SDNode *SRLI = CurDAG->getMachineNode(
965*d415bd75Srobert                 RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
966*d415bd75Srobert                 CurDAG->getTargetConstant(Leading, DL, VT));
96773471bf0Spatrick             ReplaceNode(Node, SRLI);
96873471bf0Spatrick             return;
96973471bf0Spatrick           }
97073471bf0Spatrick         }
97173471bf0Spatrick       }
97273471bf0Spatrick 
973*d415bd75Srobert       // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
974*d415bd75Srobert       // shifted mask with c2 leading zeros and c3 trailing zeros.
975*d415bd75Srobert       if (!LeftShift && isShiftedMask_64(C1)) {
976*d415bd75Srobert         unsigned Leading = XLen - llvm::bit_width(C1);
977*d415bd75Srobert         unsigned Trailing = countTrailingZeros(C1);
978*d415bd75Srobert         if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
979*d415bd75Srobert             !IsCANDI) {
980*d415bd75Srobert           unsigned SrliOpc = RISCV::SRLI;
981*d415bd75Srobert           // If the input is zexti32 we should use SRLIW.
982*d415bd75Srobert           if (X.getOpcode() == ISD::AND &&
983*d415bd75Srobert               isa<ConstantSDNode>(X.getOperand(1)) &&
984*d415bd75Srobert               X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
985*d415bd75Srobert             SrliOpc = RISCV::SRLIW;
986*d415bd75Srobert             X = X.getOperand(0);
987*d415bd75Srobert           }
988*d415bd75Srobert           SDNode *SRLI = CurDAG->getMachineNode(
989*d415bd75Srobert               SrliOpc, DL, VT, X,
990*d415bd75Srobert               CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
991*d415bd75Srobert           SDNode *SLLI = CurDAG->getMachineNode(
992*d415bd75Srobert               RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
993*d415bd75Srobert               CurDAG->getTargetConstant(Trailing, DL, VT));
994*d415bd75Srobert           ReplaceNode(Node, SLLI);
995*d415bd75Srobert           return;
996*d415bd75Srobert         }
997*d415bd75Srobert         // If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
998*d415bd75Srobert         if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
999*d415bd75Srobert             OneUseOrZExtW && !IsCANDI) {
1000*d415bd75Srobert           SDNode *SRLIW = CurDAG->getMachineNode(
1001*d415bd75Srobert               RISCV::SRLIW, DL, VT, X,
1002*d415bd75Srobert               CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
1003*d415bd75Srobert           SDNode *SLLI = CurDAG->getMachineNode(
1004*d415bd75Srobert               RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
1005*d415bd75Srobert               CurDAG->getTargetConstant(Trailing, DL, VT));
1006*d415bd75Srobert           ReplaceNode(Node, SLLI);
1007*d415bd75Srobert           return;
1008*d415bd75Srobert         }
1009*d415bd75Srobert       }
1010*d415bd75Srobert 
1011*d415bd75Srobert       // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
1012*d415bd75Srobert       // shifted mask with no leading zeros and c3 trailing zeros.
1013*d415bd75Srobert       if (LeftShift && isShiftedMask_64(C1)) {
1014*d415bd75Srobert         unsigned Leading = XLen - llvm::bit_width(C1);
1015*d415bd75Srobert         unsigned Trailing = countTrailingZeros(C1);
1016*d415bd75Srobert         if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1017*d415bd75Srobert           SDNode *SRLI = CurDAG->getMachineNode(
1018*d415bd75Srobert               RISCV::SRLI, DL, VT, X,
1019*d415bd75Srobert               CurDAG->getTargetConstant(Trailing - C2, DL, VT));
1020*d415bd75Srobert           SDNode *SLLI = CurDAG->getMachineNode(
1021*d415bd75Srobert               RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
1022*d415bd75Srobert               CurDAG->getTargetConstant(Trailing, DL, VT));
1023*d415bd75Srobert           ReplaceNode(Node, SLLI);
1024*d415bd75Srobert           return;
1025*d415bd75Srobert         }
1026*d415bd75Srobert         // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
1027*d415bd75Srobert         if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1028*d415bd75Srobert           SDNode *SRLIW = CurDAG->getMachineNode(
1029*d415bd75Srobert               RISCV::SRLIW, DL, VT, X,
1030*d415bd75Srobert               CurDAG->getTargetConstant(Trailing - C2, DL, VT));
1031*d415bd75Srobert           SDNode *SLLI = CurDAG->getMachineNode(
1032*d415bd75Srobert               RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
1033*d415bd75Srobert               CurDAG->getTargetConstant(Trailing, DL, VT));
1034*d415bd75Srobert           ReplaceNode(Node, SLLI);
1035*d415bd75Srobert           return;
1036*d415bd75Srobert         }
1037*d415bd75Srobert       }
1038*d415bd75Srobert     }
1039*d415bd75Srobert 
1040*d415bd75Srobert     if (tryShrinkShlLogicImm(Node))
1041*d415bd75Srobert       return;
1042*d415bd75Srobert 
104373471bf0Spatrick     break;
104473471bf0Spatrick   }
1045*d415bd75Srobert   case ISD::MUL: {
1046*d415bd75Srobert     // Special case for calculating (mul (and X, C2), C1) where the full product
1047*d415bd75Srobert     // fits in XLen bits. We can shift X left by the number of leading zeros in
1048*d415bd75Srobert     // C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final
1049*d415bd75Srobert     // product has XLen trailing zeros, putting it in the output of MULHU. This
1050*d415bd75Srobert     // can avoid materializing a constant in a register for C2.
1051*d415bd75Srobert 
1052*d415bd75Srobert     // RHS should be a constant.
1053*d415bd75Srobert     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1054*d415bd75Srobert     if (!N1C || !N1C->hasOneUse())
1055*d415bd75Srobert       break;
1056*d415bd75Srobert 
1057*d415bd75Srobert     // LHS should be an AND with constant.
1058*d415bd75Srobert     SDValue N0 = Node->getOperand(0);
1059*d415bd75Srobert     if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
1060*d415bd75Srobert       break;
1061*d415bd75Srobert 
1062*d415bd75Srobert     uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
1063*d415bd75Srobert 
1064*d415bd75Srobert     // Constant should be a mask.
1065*d415bd75Srobert     if (!isMask_64(C2))
1066*d415bd75Srobert       break;
1067*d415bd75Srobert 
1068*d415bd75Srobert     // If this can be an ANDI, ZEXT.H or ZEXT.W, don't do this if the ANDI/ZEXT
1069*d415bd75Srobert     // has multiple users or the constant is a simm12. This prevents inserting
1070*d415bd75Srobert     // a shift and still have uses of the AND/ZEXT. Shifting a simm12 will
1071*d415bd75Srobert     // likely make it more costly to materialize. Otherwise, using a SLLI
1072*d415bd75Srobert     // might allow it to be compressed.
1073*d415bd75Srobert     bool IsANDIOrZExt =
1074*d415bd75Srobert         isInt<12>(C2) ||
1075*d415bd75Srobert         (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb()) ||
1076*d415bd75Srobert         (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba());
1077*d415bd75Srobert     if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.hasOneUse()))
1078*d415bd75Srobert       break;
1079*d415bd75Srobert 
1080*d415bd75Srobert     // We need to shift left the AND input and C1 by a total of XLen bits.
1081*d415bd75Srobert 
1082*d415bd75Srobert     // How far left do we need to shift the AND input?
1083*d415bd75Srobert     unsigned XLen = Subtarget->getXLen();
1084*d415bd75Srobert     unsigned LeadingZeros = XLen - llvm::bit_width(C2);
1085*d415bd75Srobert 
1086*d415bd75Srobert     // The constant gets shifted by the remaining amount unless that would
1087*d415bd75Srobert     // shift bits out.
1088*d415bd75Srobert     uint64_t C1 = N1C->getZExtValue();
1089*d415bd75Srobert     unsigned ConstantShift = XLen - LeadingZeros;
1090*d415bd75Srobert     if (ConstantShift > (XLen - llvm::bit_width(C1)))
1091*d415bd75Srobert       break;
1092*d415bd75Srobert 
1093*d415bd75Srobert     uint64_t ShiftedC1 = C1 << ConstantShift;
1094*d415bd75Srobert     // If this RV32, we need to sign extend the constant.
1095*d415bd75Srobert     if (XLen == 32)
1096*d415bd75Srobert       ShiftedC1 = SignExtend64<32>(ShiftedC1);
1097*d415bd75Srobert 
1098*d415bd75Srobert     // Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
1099*d415bd75Srobert     SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget);
1100*d415bd75Srobert     SDNode *SLLI =
1101*d415bd75Srobert         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
1102*d415bd75Srobert                                CurDAG->getTargetConstant(LeadingZeros, DL, VT));
1103*d415bd75Srobert     SDNode *MULHU = CurDAG->getMachineNode(RISCV::MULHU, DL, VT,
1104*d415bd75Srobert                                            SDValue(SLLI, 0), SDValue(Imm, 0));
1105*d415bd75Srobert     ReplaceNode(Node, MULHU);
1106*d415bd75Srobert     return;
1107*d415bd75Srobert   }
110873471bf0Spatrick   case ISD::INTRINSIC_WO_CHAIN: {
110973471bf0Spatrick     unsigned IntNo = Node->getConstantOperandVal(0);
111073471bf0Spatrick     switch (IntNo) {
111173471bf0Spatrick       // By default we do not custom select any intrinsic.
111273471bf0Spatrick     default:
111373471bf0Spatrick       break;
111473471bf0Spatrick     case Intrinsic::riscv_vmsgeu:
111573471bf0Spatrick     case Intrinsic::riscv_vmsge: {
111673471bf0Spatrick       SDValue Src1 = Node->getOperand(1);
111773471bf0Spatrick       SDValue Src2 = Node->getOperand(2);
1118*d415bd75Srobert       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1119*d415bd75Srobert       bool IsCmpUnsignedZero = false;
112073471bf0Spatrick       // Only custom select scalar second operand.
112173471bf0Spatrick       if (Src2.getValueType() != XLenVT)
112273471bf0Spatrick         break;
112373471bf0Spatrick       // Small constants are handled with patterns.
112473471bf0Spatrick       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
112573471bf0Spatrick         int64_t CVal = C->getSExtValue();
1126*d415bd75Srobert         if (CVal >= -15 && CVal <= 16) {
1127*d415bd75Srobert           if (!IsUnsigned || CVal != 0)
112873471bf0Spatrick             break;
1129*d415bd75Srobert           IsCmpUnsignedZero = true;
113073471bf0Spatrick         }
1131*d415bd75Srobert       }
113273471bf0Spatrick       MVT Src1VT = Src1.getSimpleValueType();
1133*d415bd75Srobert       unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
113473471bf0Spatrick       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
113573471bf0Spatrick       default:
113673471bf0Spatrick         llvm_unreachable("Unexpected LMUL!");
1137*d415bd75Srobert #define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)            \
1138*d415bd75Srobert   case RISCVII::VLMUL::lmulenum:                                               \
1139*d415bd75Srobert     VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix                 \
1140*d415bd75Srobert                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
1141*d415bd75Srobert     VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix;                            \
1142*d415bd75Srobert     VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b;                             \
114373471bf0Spatrick     break;
1144*d415bd75Srobert         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
1145*d415bd75Srobert         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
1146*d415bd75Srobert         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
1147*d415bd75Srobert         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
1148*d415bd75Srobert         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
1149*d415bd75Srobert         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
1150*d415bd75Srobert         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
1151*d415bd75Srobert #undef CASE_VMSLT_VMNAND_VMSET_OPCODES
115273471bf0Spatrick       }
115373471bf0Spatrick       SDValue SEW = CurDAG->getTargetConstant(
115473471bf0Spatrick           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
115573471bf0Spatrick       SDValue VL;
115673471bf0Spatrick       selectVLOp(Node->getOperand(3), VL);
115773471bf0Spatrick 
1158*d415bd75Srobert       // If vmsgeu with 0 immediate, expand it to vmset.
1159*d415bd75Srobert       if (IsCmpUnsignedZero) {
1160*d415bd75Srobert         ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
1161*d415bd75Srobert         return;
1162*d415bd75Srobert       }
1163*d415bd75Srobert 
116473471bf0Spatrick       // Expand to
116573471bf0Spatrick       // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
116673471bf0Spatrick       SDValue Cmp = SDValue(
116773471bf0Spatrick           CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
116873471bf0Spatrick           0);
116973471bf0Spatrick       ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
117073471bf0Spatrick                                                {Cmp, Cmp, VL, SEW}));
117173471bf0Spatrick       return;
117273471bf0Spatrick     }
117373471bf0Spatrick     case Intrinsic::riscv_vmsgeu_mask:
117473471bf0Spatrick     case Intrinsic::riscv_vmsge_mask: {
117573471bf0Spatrick       SDValue Src1 = Node->getOperand(2);
117673471bf0Spatrick       SDValue Src2 = Node->getOperand(3);
1177*d415bd75Srobert       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1178*d415bd75Srobert       bool IsCmpUnsignedZero = false;
117973471bf0Spatrick       // Only custom select scalar second operand.
118073471bf0Spatrick       if (Src2.getValueType() != XLenVT)
118173471bf0Spatrick         break;
118273471bf0Spatrick       // Small constants are handled with patterns.
118373471bf0Spatrick       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
118473471bf0Spatrick         int64_t CVal = C->getSExtValue();
1185*d415bd75Srobert         if (CVal >= -15 && CVal <= 16) {
1186*d415bd75Srobert           if (!IsUnsigned || CVal != 0)
118773471bf0Spatrick             break;
1188*d415bd75Srobert           IsCmpUnsignedZero = true;
118973471bf0Spatrick         }
1190*d415bd75Srobert       }
119173471bf0Spatrick       MVT Src1VT = Src1.getSimpleValueType();
1192*d415bd75Srobert       unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1193*d415bd75Srobert           VMOROpcode;
119473471bf0Spatrick       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
119573471bf0Spatrick       default:
119673471bf0Spatrick         llvm_unreachable("Unexpected LMUL!");
1197*d415bd75Srobert #define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)                         \
1198*d415bd75Srobert   case RISCVII::VLMUL::lmulenum:                                               \
1199*d415bd75Srobert     VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix                 \
1200*d415bd75Srobert                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
1201*d415bd75Srobert     VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK      \
1202*d415bd75Srobert                                  : RISCV::PseudoVMSLT_VX_##suffix##_MASK;      \
120373471bf0Spatrick     break;
1204*d415bd75Srobert         CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1)
1205*d415bd75Srobert         CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
1206*d415bd75Srobert         CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
1207*d415bd75Srobert         CASE_VMSLT_OPCODES(LMUL_1, M1, B8)
1208*d415bd75Srobert         CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
1209*d415bd75Srobert         CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
1210*d415bd75Srobert         CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
1211*d415bd75Srobert #undef CASE_VMSLT_OPCODES
121273471bf0Spatrick       }
121373471bf0Spatrick       // Mask operations use the LMUL from the mask type.
121473471bf0Spatrick       switch (RISCVTargetLowering::getLMUL(VT)) {
121573471bf0Spatrick       default:
121673471bf0Spatrick         llvm_unreachable("Unexpected LMUL!");
1217*d415bd75Srobert #define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)                       \
1218*d415bd75Srobert   case RISCVII::VLMUL::lmulenum:                                               \
1219*d415bd75Srobert     VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix;                              \
1220*d415bd75Srobert     VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix;                            \
1221*d415bd75Srobert     VMOROpcode = RISCV::PseudoVMOR_MM_##suffix;                                \
122273471bf0Spatrick     break;
1223*d415bd75Srobert         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8)
1224*d415bd75Srobert         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4)
1225*d415bd75Srobert         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2)
1226*d415bd75Srobert         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1)
1227*d415bd75Srobert         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2)
1228*d415bd75Srobert         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4)
1229*d415bd75Srobert         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8)
1230*d415bd75Srobert #undef CASE_VMXOR_VMANDN_VMOR_OPCODES
123173471bf0Spatrick       }
123273471bf0Spatrick       SDValue SEW = CurDAG->getTargetConstant(
123373471bf0Spatrick           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
123473471bf0Spatrick       SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
123573471bf0Spatrick       SDValue VL;
123673471bf0Spatrick       selectVLOp(Node->getOperand(5), VL);
123773471bf0Spatrick       SDValue MaskedOff = Node->getOperand(1);
123873471bf0Spatrick       SDValue Mask = Node->getOperand(4);
1239*d415bd75Srobert 
1240*d415bd75Srobert       // If vmsgeu_mask with 0 immediate, expand it to vmor mask, maskedoff.
1241*d415bd75Srobert       if (IsCmpUnsignedZero) {
1242*d415bd75Srobert         // We don't need vmor if the MaskedOff and the Mask are the same
1243*d415bd75Srobert         // value.
1244*d415bd75Srobert         if (Mask == MaskedOff) {
1245*d415bd75Srobert           ReplaceUses(Node, Mask.getNode());
1246*d415bd75Srobert           return;
1247*d415bd75Srobert         }
1248*d415bd75Srobert         ReplaceNode(Node,
1249*d415bd75Srobert                     CurDAG->getMachineNode(VMOROpcode, DL, VT,
1250*d415bd75Srobert                                            {Mask, MaskedOff, VL, MaskSEW}));
1251*d415bd75Srobert         return;
1252*d415bd75Srobert       }
1253*d415bd75Srobert 
125473471bf0Spatrick       // If the MaskedOff value and the Mask are the same value use
1255*d415bd75Srobert       // vmslt{u}.vx vt, va, x;  vmandn.mm vd, vd, vt
125673471bf0Spatrick       // This avoids needing to copy v0 to vd before starting the next sequence.
125773471bf0Spatrick       if (Mask == MaskedOff) {
125873471bf0Spatrick         SDValue Cmp = SDValue(
125973471bf0Spatrick             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
126073471bf0Spatrick             0);
1261*d415bd75Srobert         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
126273471bf0Spatrick                                                  {Mask, Cmp, VL, MaskSEW}));
126373471bf0Spatrick         return;
126473471bf0Spatrick       }
126573471bf0Spatrick 
126673471bf0Spatrick       // Mask needs to be copied to V0.
126773471bf0Spatrick       SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
126873471bf0Spatrick                                            RISCV::V0, Mask, SDValue());
126973471bf0Spatrick       SDValue Glue = Chain.getValue(1);
127073471bf0Spatrick       SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
127173471bf0Spatrick 
127273471bf0Spatrick       // Otherwise use
127373471bf0Spatrick       // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
1274*d415bd75Srobert       // The result is mask undisturbed.
1275*d415bd75Srobert       // We use the same instructions to emulate mask agnostic behavior, because
1276*d415bd75Srobert       // the agnostic result can be either undisturbed or all 1.
127773471bf0Spatrick       SDValue Cmp = SDValue(
127873471bf0Spatrick           CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
127973471bf0Spatrick                                  {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
128073471bf0Spatrick           0);
1281*d415bd75Srobert       // vmxor.mm vd, vd, v0 is used to update active value.
128273471bf0Spatrick       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
128373471bf0Spatrick                                                {Cmp, Mask, VL, MaskSEW}));
128409467b48Spatrick       return;
128509467b48Spatrick     }
1286*d415bd75Srobert     case Intrinsic::riscv_vsetvli_opt:
1287*d415bd75Srobert     case Intrinsic::riscv_vsetvlimax_opt:
1288*d415bd75Srobert       return selectVSETVLI(Node);
128909467b48Spatrick     }
129009467b48Spatrick     break;
129109467b48Spatrick   }
129273471bf0Spatrick   case ISD::INTRINSIC_W_CHAIN: {
129373471bf0Spatrick     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
129473471bf0Spatrick     switch (IntNo) {
129573471bf0Spatrick       // By default we do not custom select any intrinsic.
129673471bf0Spatrick     default:
129773471bf0Spatrick       break;
129873471bf0Spatrick     case Intrinsic::riscv_vsetvli:
1299*d415bd75Srobert     case Intrinsic::riscv_vsetvlimax:
1300*d415bd75Srobert       return selectVSETVLI(Node);
130173471bf0Spatrick     case Intrinsic::riscv_vlseg2:
130273471bf0Spatrick     case Intrinsic::riscv_vlseg3:
130373471bf0Spatrick     case Intrinsic::riscv_vlseg4:
130473471bf0Spatrick     case Intrinsic::riscv_vlseg5:
130573471bf0Spatrick     case Intrinsic::riscv_vlseg6:
130673471bf0Spatrick     case Intrinsic::riscv_vlseg7:
130773471bf0Spatrick     case Intrinsic::riscv_vlseg8: {
130873471bf0Spatrick       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
130973471bf0Spatrick       return;
131073471bf0Spatrick     }
131173471bf0Spatrick     case Intrinsic::riscv_vlseg2_mask:
131273471bf0Spatrick     case Intrinsic::riscv_vlseg3_mask:
131373471bf0Spatrick     case Intrinsic::riscv_vlseg4_mask:
131473471bf0Spatrick     case Intrinsic::riscv_vlseg5_mask:
131573471bf0Spatrick     case Intrinsic::riscv_vlseg6_mask:
131673471bf0Spatrick     case Intrinsic::riscv_vlseg7_mask:
131773471bf0Spatrick     case Intrinsic::riscv_vlseg8_mask: {
131873471bf0Spatrick       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
131973471bf0Spatrick       return;
132073471bf0Spatrick     }
132173471bf0Spatrick     case Intrinsic::riscv_vlsseg2:
132273471bf0Spatrick     case Intrinsic::riscv_vlsseg3:
132373471bf0Spatrick     case Intrinsic::riscv_vlsseg4:
132473471bf0Spatrick     case Intrinsic::riscv_vlsseg5:
132573471bf0Spatrick     case Intrinsic::riscv_vlsseg6:
132673471bf0Spatrick     case Intrinsic::riscv_vlsseg7:
132773471bf0Spatrick     case Intrinsic::riscv_vlsseg8: {
132873471bf0Spatrick       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
132973471bf0Spatrick       return;
133073471bf0Spatrick     }
133173471bf0Spatrick     case Intrinsic::riscv_vlsseg2_mask:
133273471bf0Spatrick     case Intrinsic::riscv_vlsseg3_mask:
133373471bf0Spatrick     case Intrinsic::riscv_vlsseg4_mask:
133473471bf0Spatrick     case Intrinsic::riscv_vlsseg5_mask:
133573471bf0Spatrick     case Intrinsic::riscv_vlsseg6_mask:
133673471bf0Spatrick     case Intrinsic::riscv_vlsseg7_mask:
133773471bf0Spatrick     case Intrinsic::riscv_vlsseg8_mask: {
133873471bf0Spatrick       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
133973471bf0Spatrick       return;
134073471bf0Spatrick     }
134173471bf0Spatrick     case Intrinsic::riscv_vloxseg2:
134273471bf0Spatrick     case Intrinsic::riscv_vloxseg3:
134373471bf0Spatrick     case Intrinsic::riscv_vloxseg4:
134473471bf0Spatrick     case Intrinsic::riscv_vloxseg5:
134573471bf0Spatrick     case Intrinsic::riscv_vloxseg6:
134673471bf0Spatrick     case Intrinsic::riscv_vloxseg7:
134773471bf0Spatrick     case Intrinsic::riscv_vloxseg8:
134873471bf0Spatrick       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
134973471bf0Spatrick       return;
135073471bf0Spatrick     case Intrinsic::riscv_vluxseg2:
135173471bf0Spatrick     case Intrinsic::riscv_vluxseg3:
135273471bf0Spatrick     case Intrinsic::riscv_vluxseg4:
135373471bf0Spatrick     case Intrinsic::riscv_vluxseg5:
135473471bf0Spatrick     case Intrinsic::riscv_vluxseg6:
135573471bf0Spatrick     case Intrinsic::riscv_vluxseg7:
135673471bf0Spatrick     case Intrinsic::riscv_vluxseg8:
135773471bf0Spatrick       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
135873471bf0Spatrick       return;
135973471bf0Spatrick     case Intrinsic::riscv_vloxseg2_mask:
136073471bf0Spatrick     case Intrinsic::riscv_vloxseg3_mask:
136173471bf0Spatrick     case Intrinsic::riscv_vloxseg4_mask:
136273471bf0Spatrick     case Intrinsic::riscv_vloxseg5_mask:
136373471bf0Spatrick     case Intrinsic::riscv_vloxseg6_mask:
136473471bf0Spatrick     case Intrinsic::riscv_vloxseg7_mask:
136573471bf0Spatrick     case Intrinsic::riscv_vloxseg8_mask:
136673471bf0Spatrick       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
136773471bf0Spatrick       return;
136873471bf0Spatrick     case Intrinsic::riscv_vluxseg2_mask:
136973471bf0Spatrick     case Intrinsic::riscv_vluxseg3_mask:
137073471bf0Spatrick     case Intrinsic::riscv_vluxseg4_mask:
137173471bf0Spatrick     case Intrinsic::riscv_vluxseg5_mask:
137273471bf0Spatrick     case Intrinsic::riscv_vluxseg6_mask:
137373471bf0Spatrick     case Intrinsic::riscv_vluxseg7_mask:
137473471bf0Spatrick     case Intrinsic::riscv_vluxseg8_mask:
137573471bf0Spatrick       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
137673471bf0Spatrick       return;
137773471bf0Spatrick     case Intrinsic::riscv_vlseg8ff:
137873471bf0Spatrick     case Intrinsic::riscv_vlseg7ff:
137973471bf0Spatrick     case Intrinsic::riscv_vlseg6ff:
138073471bf0Spatrick     case Intrinsic::riscv_vlseg5ff:
138173471bf0Spatrick     case Intrinsic::riscv_vlseg4ff:
138273471bf0Spatrick     case Intrinsic::riscv_vlseg3ff:
138373471bf0Spatrick     case Intrinsic::riscv_vlseg2ff: {
138473471bf0Spatrick       selectVLSEGFF(Node, /*IsMasked*/ false);
138573471bf0Spatrick       return;
138673471bf0Spatrick     }
138773471bf0Spatrick     case Intrinsic::riscv_vlseg8ff_mask:
138873471bf0Spatrick     case Intrinsic::riscv_vlseg7ff_mask:
138973471bf0Spatrick     case Intrinsic::riscv_vlseg6ff_mask:
139073471bf0Spatrick     case Intrinsic::riscv_vlseg5ff_mask:
139173471bf0Spatrick     case Intrinsic::riscv_vlseg4ff_mask:
139273471bf0Spatrick     case Intrinsic::riscv_vlseg3ff_mask:
139373471bf0Spatrick     case Intrinsic::riscv_vlseg2ff_mask: {
139473471bf0Spatrick       selectVLSEGFF(Node, /*IsMasked*/ true);
139573471bf0Spatrick       return;
139673471bf0Spatrick     }
139773471bf0Spatrick     case Intrinsic::riscv_vloxei:
139873471bf0Spatrick     case Intrinsic::riscv_vloxei_mask:
139973471bf0Spatrick     case Intrinsic::riscv_vluxei:
140073471bf0Spatrick     case Intrinsic::riscv_vluxei_mask: {
140173471bf0Spatrick       bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
140273471bf0Spatrick                       IntNo == Intrinsic::riscv_vluxei_mask;
140373471bf0Spatrick       bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
140473471bf0Spatrick                        IntNo == Intrinsic::riscv_vloxei_mask;
140573471bf0Spatrick 
140673471bf0Spatrick       MVT VT = Node->getSimpleValueType(0);
140773471bf0Spatrick       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
140873471bf0Spatrick 
140973471bf0Spatrick       unsigned CurOp = 2;
1410*d415bd75Srobert       // Masked intrinsic only have TU version pseduo instructions.
1411*d415bd75Srobert       bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
141273471bf0Spatrick       SmallVector<SDValue, 8> Operands;
1413*d415bd75Srobert       if (IsTU)
141473471bf0Spatrick         Operands.push_back(Node->getOperand(CurOp++));
1415*d415bd75Srobert       else
1416*d415bd75Srobert         // Skip the undef passthru operand for nomask TA version pseudo
1417*d415bd75Srobert         CurOp++;
141873471bf0Spatrick 
141973471bf0Spatrick       MVT IndexVT;
142073471bf0Spatrick       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
142173471bf0Spatrick                                  /*IsStridedOrIndexed*/ true, Operands,
1422*d415bd75Srobert                                  /*IsLoad=*/true, &IndexVT);
142373471bf0Spatrick 
142473471bf0Spatrick       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
142573471bf0Spatrick              "Element count mismatch");
142673471bf0Spatrick 
142773471bf0Spatrick       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
142873471bf0Spatrick       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
142973471bf0Spatrick       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1430*d415bd75Srobert       if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
1431*d415bd75Srobert         report_fatal_error("The V extension does not support EEW=64 for index "
1432*d415bd75Srobert                            "values when XLEN=32");
1433*d415bd75Srobert       }
143473471bf0Spatrick       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
1435*d415bd75Srobert           IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
143673471bf0Spatrick           static_cast<unsigned>(IndexLMUL));
143773471bf0Spatrick       MachineSDNode *Load =
143873471bf0Spatrick           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
143973471bf0Spatrick 
144073471bf0Spatrick       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
144173471bf0Spatrick         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
144273471bf0Spatrick 
144373471bf0Spatrick       ReplaceNode(Node, Load);
144473471bf0Spatrick       return;
144573471bf0Spatrick     }
1446*d415bd75Srobert     case Intrinsic::riscv_vlm:
144773471bf0Spatrick     case Intrinsic::riscv_vle:
144873471bf0Spatrick     case Intrinsic::riscv_vle_mask:
144973471bf0Spatrick     case Intrinsic::riscv_vlse:
145073471bf0Spatrick     case Intrinsic::riscv_vlse_mask: {
145173471bf0Spatrick       bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
145273471bf0Spatrick                       IntNo == Intrinsic::riscv_vlse_mask;
145373471bf0Spatrick       bool IsStrided =
145473471bf0Spatrick           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
145573471bf0Spatrick 
145673471bf0Spatrick       MVT VT = Node->getSimpleValueType(0);
145773471bf0Spatrick       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
145873471bf0Spatrick 
145973471bf0Spatrick       unsigned CurOp = 2;
1460*d415bd75Srobert       // The riscv_vlm intrinsic are always tail agnostic and no passthru operand.
1461*d415bd75Srobert       bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1462*d415bd75Srobert       // Masked intrinsic only have TU version pseduo instructions.
1463*d415bd75Srobert       bool IsTU = HasPassthruOperand &&
1464*d415bd75Srobert                   (IsMasked || !Node->getOperand(CurOp).isUndef());
146573471bf0Spatrick       SmallVector<SDValue, 8> Operands;
1466*d415bd75Srobert       if (IsTU)
146773471bf0Spatrick         Operands.push_back(Node->getOperand(CurOp++));
1468*d415bd75Srobert       else if (HasPassthruOperand)
1469*d415bd75Srobert         // Skip the undef passthru operand for nomask TA version pseudo
1470*d415bd75Srobert         CurOp++;
147173471bf0Spatrick 
147273471bf0Spatrick       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1473*d415bd75Srobert                                  Operands, /*IsLoad=*/true);
147473471bf0Spatrick 
147573471bf0Spatrick       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
147673471bf0Spatrick       const RISCV::VLEPseudo *P =
1477*d415bd75Srobert           RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
147873471bf0Spatrick                               static_cast<unsigned>(LMUL));
147973471bf0Spatrick       MachineSDNode *Load =
148073471bf0Spatrick           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
148173471bf0Spatrick 
148273471bf0Spatrick       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
148373471bf0Spatrick         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
148473471bf0Spatrick 
148573471bf0Spatrick       ReplaceNode(Node, Load);
148673471bf0Spatrick       return;
148773471bf0Spatrick     }
148873471bf0Spatrick     case Intrinsic::riscv_vleff:
148973471bf0Spatrick     case Intrinsic::riscv_vleff_mask: {
149073471bf0Spatrick       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
149173471bf0Spatrick 
149273471bf0Spatrick       MVT VT = Node->getSimpleValueType(0);
149373471bf0Spatrick       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
149473471bf0Spatrick 
149573471bf0Spatrick       unsigned CurOp = 2;
1496*d415bd75Srobert       // Masked intrinsic only have TU version pseduo instructions.
1497*d415bd75Srobert       bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
149873471bf0Spatrick       SmallVector<SDValue, 7> Operands;
1499*d415bd75Srobert       if (IsTU)
150073471bf0Spatrick         Operands.push_back(Node->getOperand(CurOp++));
1501*d415bd75Srobert       else
1502*d415bd75Srobert         // Skip the undef passthru operand for nomask TA version pseudo
1503*d415bd75Srobert         CurOp++;
150473471bf0Spatrick 
150573471bf0Spatrick       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1506*d415bd75Srobert                                  /*IsStridedOrIndexed*/ false, Operands,
1507*d415bd75Srobert                                  /*IsLoad=*/true);
150873471bf0Spatrick 
150973471bf0Spatrick       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
151073471bf0Spatrick       const RISCV::VLEPseudo *P =
1511*d415bd75Srobert           RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
1512*d415bd75Srobert                               Log2SEW, static_cast<unsigned>(LMUL));
1513*d415bd75Srobert       MachineSDNode *Load = CurDAG->getMachineNode(
1514*d415bd75Srobert           P->Pseudo, DL, Node->getVTList(), Operands);
151573471bf0Spatrick       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
151673471bf0Spatrick         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
151773471bf0Spatrick 
1518*d415bd75Srobert       ReplaceNode(Node, Load);
151973471bf0Spatrick       return;
152073471bf0Spatrick     }
152173471bf0Spatrick     }
152273471bf0Spatrick     break;
152373471bf0Spatrick   }
152473471bf0Spatrick   case ISD::INTRINSIC_VOID: {
152573471bf0Spatrick     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
152673471bf0Spatrick     switch (IntNo) {
152773471bf0Spatrick     case Intrinsic::riscv_vsseg2:
152873471bf0Spatrick     case Intrinsic::riscv_vsseg3:
152973471bf0Spatrick     case Intrinsic::riscv_vsseg4:
153073471bf0Spatrick     case Intrinsic::riscv_vsseg5:
153173471bf0Spatrick     case Intrinsic::riscv_vsseg6:
153273471bf0Spatrick     case Intrinsic::riscv_vsseg7:
153373471bf0Spatrick     case Intrinsic::riscv_vsseg8: {
153473471bf0Spatrick       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
153573471bf0Spatrick       return;
153673471bf0Spatrick     }
153773471bf0Spatrick     case Intrinsic::riscv_vsseg2_mask:
153873471bf0Spatrick     case Intrinsic::riscv_vsseg3_mask:
153973471bf0Spatrick     case Intrinsic::riscv_vsseg4_mask:
154073471bf0Spatrick     case Intrinsic::riscv_vsseg5_mask:
154173471bf0Spatrick     case Intrinsic::riscv_vsseg6_mask:
154273471bf0Spatrick     case Intrinsic::riscv_vsseg7_mask:
154373471bf0Spatrick     case Intrinsic::riscv_vsseg8_mask: {
154473471bf0Spatrick       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
154573471bf0Spatrick       return;
154673471bf0Spatrick     }
154773471bf0Spatrick     case Intrinsic::riscv_vssseg2:
154873471bf0Spatrick     case Intrinsic::riscv_vssseg3:
154973471bf0Spatrick     case Intrinsic::riscv_vssseg4:
155073471bf0Spatrick     case Intrinsic::riscv_vssseg5:
155173471bf0Spatrick     case Intrinsic::riscv_vssseg6:
155273471bf0Spatrick     case Intrinsic::riscv_vssseg7:
155373471bf0Spatrick     case Intrinsic::riscv_vssseg8: {
155473471bf0Spatrick       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
155573471bf0Spatrick       return;
155673471bf0Spatrick     }
155773471bf0Spatrick     case Intrinsic::riscv_vssseg2_mask:
155873471bf0Spatrick     case Intrinsic::riscv_vssseg3_mask:
155973471bf0Spatrick     case Intrinsic::riscv_vssseg4_mask:
156073471bf0Spatrick     case Intrinsic::riscv_vssseg5_mask:
156173471bf0Spatrick     case Intrinsic::riscv_vssseg6_mask:
156273471bf0Spatrick     case Intrinsic::riscv_vssseg7_mask:
156373471bf0Spatrick     case Intrinsic::riscv_vssseg8_mask: {
156473471bf0Spatrick       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
156573471bf0Spatrick       return;
156673471bf0Spatrick     }
156773471bf0Spatrick     case Intrinsic::riscv_vsoxseg2:
156873471bf0Spatrick     case Intrinsic::riscv_vsoxseg3:
156973471bf0Spatrick     case Intrinsic::riscv_vsoxseg4:
157073471bf0Spatrick     case Intrinsic::riscv_vsoxseg5:
157173471bf0Spatrick     case Intrinsic::riscv_vsoxseg6:
157273471bf0Spatrick     case Intrinsic::riscv_vsoxseg7:
157373471bf0Spatrick     case Intrinsic::riscv_vsoxseg8:
157473471bf0Spatrick       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
157573471bf0Spatrick       return;
157673471bf0Spatrick     case Intrinsic::riscv_vsuxseg2:
157773471bf0Spatrick     case Intrinsic::riscv_vsuxseg3:
157873471bf0Spatrick     case Intrinsic::riscv_vsuxseg4:
157973471bf0Spatrick     case Intrinsic::riscv_vsuxseg5:
158073471bf0Spatrick     case Intrinsic::riscv_vsuxseg6:
158173471bf0Spatrick     case Intrinsic::riscv_vsuxseg7:
158273471bf0Spatrick     case Intrinsic::riscv_vsuxseg8:
158373471bf0Spatrick       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
158473471bf0Spatrick       return;
158573471bf0Spatrick     case Intrinsic::riscv_vsoxseg2_mask:
158673471bf0Spatrick     case Intrinsic::riscv_vsoxseg3_mask:
158773471bf0Spatrick     case Intrinsic::riscv_vsoxseg4_mask:
158873471bf0Spatrick     case Intrinsic::riscv_vsoxseg5_mask:
158973471bf0Spatrick     case Intrinsic::riscv_vsoxseg6_mask:
159073471bf0Spatrick     case Intrinsic::riscv_vsoxseg7_mask:
159173471bf0Spatrick     case Intrinsic::riscv_vsoxseg8_mask:
159273471bf0Spatrick       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
159373471bf0Spatrick       return;
159473471bf0Spatrick     case Intrinsic::riscv_vsuxseg2_mask:
159573471bf0Spatrick     case Intrinsic::riscv_vsuxseg3_mask:
159673471bf0Spatrick     case Intrinsic::riscv_vsuxseg4_mask:
159773471bf0Spatrick     case Intrinsic::riscv_vsuxseg5_mask:
159873471bf0Spatrick     case Intrinsic::riscv_vsuxseg6_mask:
159973471bf0Spatrick     case Intrinsic::riscv_vsuxseg7_mask:
160073471bf0Spatrick     case Intrinsic::riscv_vsuxseg8_mask:
160173471bf0Spatrick       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
160273471bf0Spatrick       return;
160373471bf0Spatrick     case Intrinsic::riscv_vsoxei:
160473471bf0Spatrick     case Intrinsic::riscv_vsoxei_mask:
160573471bf0Spatrick     case Intrinsic::riscv_vsuxei:
160673471bf0Spatrick     case Intrinsic::riscv_vsuxei_mask: {
160773471bf0Spatrick       bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
160873471bf0Spatrick                       IntNo == Intrinsic::riscv_vsuxei_mask;
160973471bf0Spatrick       bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
161073471bf0Spatrick                        IntNo == Intrinsic::riscv_vsoxei_mask;
161173471bf0Spatrick 
161273471bf0Spatrick       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
161373471bf0Spatrick       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
161473471bf0Spatrick 
161573471bf0Spatrick       unsigned CurOp = 2;
161673471bf0Spatrick       SmallVector<SDValue, 8> Operands;
161773471bf0Spatrick       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
161873471bf0Spatrick 
161973471bf0Spatrick       MVT IndexVT;
162073471bf0Spatrick       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
162173471bf0Spatrick                                  /*IsStridedOrIndexed*/ true, Operands,
1622*d415bd75Srobert                                  /*IsLoad=*/false, &IndexVT);
162373471bf0Spatrick 
162473471bf0Spatrick       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
162573471bf0Spatrick              "Element count mismatch");
162673471bf0Spatrick 
162773471bf0Spatrick       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
162873471bf0Spatrick       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
162973471bf0Spatrick       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1630*d415bd75Srobert       if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
1631*d415bd75Srobert         report_fatal_error("The V extension does not support EEW=64 for index "
1632*d415bd75Srobert                            "values when XLEN=32");
1633*d415bd75Srobert       }
163473471bf0Spatrick       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1635*d415bd75Srobert           IsMasked, /*TU*/ false, IsOrdered, IndexLog2EEW,
1636*d415bd75Srobert           static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL));
163773471bf0Spatrick       MachineSDNode *Store =
163873471bf0Spatrick           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
163973471bf0Spatrick 
164073471bf0Spatrick       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
164173471bf0Spatrick         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
164273471bf0Spatrick 
164373471bf0Spatrick       ReplaceNode(Node, Store);
164473471bf0Spatrick       return;
164573471bf0Spatrick     }
1646*d415bd75Srobert     case Intrinsic::riscv_vsm:
164773471bf0Spatrick     case Intrinsic::riscv_vse:
164873471bf0Spatrick     case Intrinsic::riscv_vse_mask:
164973471bf0Spatrick     case Intrinsic::riscv_vsse:
165073471bf0Spatrick     case Intrinsic::riscv_vsse_mask: {
165173471bf0Spatrick       bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
165273471bf0Spatrick                       IntNo == Intrinsic::riscv_vsse_mask;
165373471bf0Spatrick       bool IsStrided =
165473471bf0Spatrick           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
165573471bf0Spatrick 
165673471bf0Spatrick       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
165773471bf0Spatrick       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
165873471bf0Spatrick 
165973471bf0Spatrick       unsigned CurOp = 2;
166073471bf0Spatrick       SmallVector<SDValue, 8> Operands;
166173471bf0Spatrick       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
166273471bf0Spatrick 
166373471bf0Spatrick       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
166473471bf0Spatrick                                  Operands);
166573471bf0Spatrick 
166673471bf0Spatrick       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
166773471bf0Spatrick       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
166873471bf0Spatrick           IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
166973471bf0Spatrick       MachineSDNode *Store =
167073471bf0Spatrick           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
167173471bf0Spatrick       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
167273471bf0Spatrick         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
167373471bf0Spatrick 
167473471bf0Spatrick       ReplaceNode(Node, Store);
167573471bf0Spatrick       return;
167673471bf0Spatrick     }
167773471bf0Spatrick     }
167873471bf0Spatrick     break;
167973471bf0Spatrick   }
168073471bf0Spatrick   case ISD::BITCAST: {
168173471bf0Spatrick     MVT SrcVT = Node->getOperand(0).getSimpleValueType();
168273471bf0Spatrick     // Just drop bitcasts between vectors if both are fixed or both are
168373471bf0Spatrick     // scalable.
168473471bf0Spatrick     if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
168573471bf0Spatrick         (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
168673471bf0Spatrick       ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
168773471bf0Spatrick       CurDAG->RemoveDeadNode(Node);
168873471bf0Spatrick       return;
168973471bf0Spatrick     }
169073471bf0Spatrick     break;
169173471bf0Spatrick   }
169273471bf0Spatrick   case ISD::INSERT_SUBVECTOR: {
169373471bf0Spatrick     SDValue V = Node->getOperand(0);
169473471bf0Spatrick     SDValue SubV = Node->getOperand(1);
169573471bf0Spatrick     SDLoc DL(SubV);
169673471bf0Spatrick     auto Idx = Node->getConstantOperandVal(2);
169773471bf0Spatrick     MVT SubVecVT = SubV.getSimpleValueType();
169873471bf0Spatrick 
169973471bf0Spatrick     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
170073471bf0Spatrick     MVT SubVecContainerVT = SubVecVT;
170173471bf0Spatrick     // Establish the correct scalable-vector types for any fixed-length type.
170273471bf0Spatrick     if (SubVecVT.isFixedLengthVector())
170373471bf0Spatrick       SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
170473471bf0Spatrick     if (VT.isFixedLengthVector())
170573471bf0Spatrick       VT = TLI.getContainerForFixedLengthVector(VT);
170673471bf0Spatrick 
170773471bf0Spatrick     const auto *TRI = Subtarget->getRegisterInfo();
170873471bf0Spatrick     unsigned SubRegIdx;
170973471bf0Spatrick     std::tie(SubRegIdx, Idx) =
171073471bf0Spatrick         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
171173471bf0Spatrick             VT, SubVecContainerVT, Idx, TRI);
171273471bf0Spatrick 
171373471bf0Spatrick     // If the Idx hasn't been completely eliminated then this is a subvector
171473471bf0Spatrick     // insert which doesn't naturally align to a vector register. These must
171573471bf0Spatrick     // be handled using instructions to manipulate the vector registers.
171673471bf0Spatrick     if (Idx != 0)
171773471bf0Spatrick       break;
171873471bf0Spatrick 
171973471bf0Spatrick     RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
172073471bf0Spatrick     bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
172173471bf0Spatrick                            SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
172273471bf0Spatrick                            SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
172373471bf0Spatrick     (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
172473471bf0Spatrick     assert((!IsSubVecPartReg || V.isUndef()) &&
172573471bf0Spatrick            "Expecting lowering to have created legal INSERT_SUBVECTORs when "
172673471bf0Spatrick            "the subvector is smaller than a full-sized register");
172773471bf0Spatrick 
172873471bf0Spatrick     // If we haven't set a SubRegIdx, then we must be going between
172973471bf0Spatrick     // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
173073471bf0Spatrick     if (SubRegIdx == RISCV::NoSubRegister) {
173173471bf0Spatrick       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
173273471bf0Spatrick       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
173373471bf0Spatrick                  InRegClassID &&
173473471bf0Spatrick              "Unexpected subvector extraction");
173573471bf0Spatrick       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
173673471bf0Spatrick       SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
173773471bf0Spatrick                                                DL, VT, SubV, RC);
173873471bf0Spatrick       ReplaceNode(Node, NewNode);
173973471bf0Spatrick       return;
174073471bf0Spatrick     }
174173471bf0Spatrick 
174273471bf0Spatrick     SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
174373471bf0Spatrick     ReplaceNode(Node, Insert.getNode());
174473471bf0Spatrick     return;
174573471bf0Spatrick   }
174673471bf0Spatrick   case ISD::EXTRACT_SUBVECTOR: {
174773471bf0Spatrick     SDValue V = Node->getOperand(0);
174873471bf0Spatrick     auto Idx = Node->getConstantOperandVal(1);
174973471bf0Spatrick     MVT InVT = V.getSimpleValueType();
175073471bf0Spatrick     SDLoc DL(V);
175173471bf0Spatrick 
175273471bf0Spatrick     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
175373471bf0Spatrick     MVT SubVecContainerVT = VT;
175473471bf0Spatrick     // Establish the correct scalable-vector types for any fixed-length type.
175573471bf0Spatrick     if (VT.isFixedLengthVector())
175673471bf0Spatrick       SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
175773471bf0Spatrick     if (InVT.isFixedLengthVector())
175873471bf0Spatrick       InVT = TLI.getContainerForFixedLengthVector(InVT);
175973471bf0Spatrick 
176073471bf0Spatrick     const auto *TRI = Subtarget->getRegisterInfo();
176173471bf0Spatrick     unsigned SubRegIdx;
176273471bf0Spatrick     std::tie(SubRegIdx, Idx) =
176373471bf0Spatrick         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
176473471bf0Spatrick             InVT, SubVecContainerVT, Idx, TRI);
176573471bf0Spatrick 
176673471bf0Spatrick     // If the Idx hasn't been completely eliminated then this is a subvector
176773471bf0Spatrick     // extract which doesn't naturally align to a vector register. These must
176873471bf0Spatrick     // be handled using instructions to manipulate the vector registers.
176973471bf0Spatrick     if (Idx != 0)
177073471bf0Spatrick       break;
177173471bf0Spatrick 
177273471bf0Spatrick     // If we haven't set a SubRegIdx, then we must be going between
177373471bf0Spatrick     // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
177473471bf0Spatrick     if (SubRegIdx == RISCV::NoSubRegister) {
177573471bf0Spatrick       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
177673471bf0Spatrick       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
177773471bf0Spatrick                  InRegClassID &&
177873471bf0Spatrick              "Unexpected subvector extraction");
177973471bf0Spatrick       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
178073471bf0Spatrick       SDNode *NewNode =
178173471bf0Spatrick           CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
178273471bf0Spatrick       ReplaceNode(Node, NewNode);
178373471bf0Spatrick       return;
178473471bf0Spatrick     }
178573471bf0Spatrick 
178673471bf0Spatrick     SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
178773471bf0Spatrick     ReplaceNode(Node, Extract.getNode());
178873471bf0Spatrick     return;
178973471bf0Spatrick   }
1790*d415bd75Srobert   case RISCVISD::VMV_S_X_VL:
1791*d415bd75Srobert   case RISCVISD::VFMV_S_F_VL:
179273471bf0Spatrick   case RISCVISD::VMV_V_X_VL:
179373471bf0Spatrick   case RISCVISD::VFMV_V_F_VL: {
1794*d415bd75Srobert     // Only if we have optimized zero-stride vector load.
1795*d415bd75Srobert     if (!Subtarget->hasOptimizedZeroStrideLoad())
1796*d415bd75Srobert       break;
1797*d415bd75Srobert 
179873471bf0Spatrick     // Try to match splat of a scalar load to a strided load with stride of x0.
1799*d415bd75Srobert     bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
1800*d415bd75Srobert                         Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
1801*d415bd75Srobert     if (!Node->getOperand(0).isUndef())
1802*d415bd75Srobert       break;
1803*d415bd75Srobert     SDValue Src = Node->getOperand(1);
180473471bf0Spatrick     auto *Ld = dyn_cast<LoadSDNode>(Src);
180573471bf0Spatrick     if (!Ld)
180673471bf0Spatrick       break;
180773471bf0Spatrick     EVT MemVT = Ld->getMemoryVT();
180873471bf0Spatrick     // The memory VT should be the same size as the element type.
180973471bf0Spatrick     if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
181073471bf0Spatrick       break;
181173471bf0Spatrick     if (!IsProfitableToFold(Src, Node, Node) ||
181273471bf0Spatrick         !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
181373471bf0Spatrick       break;
181473471bf0Spatrick 
181573471bf0Spatrick     SDValue VL;
1816*d415bd75Srobert     if (IsScalarMove) {
1817*d415bd75Srobert       // We could deal with more VL if we update the VSETVLI insert pass to
1818*d415bd75Srobert       // avoid introducing more VSETVLI.
1819*d415bd75Srobert       if (!isOneConstant(Node->getOperand(2)))
1820*d415bd75Srobert         break;
1821*d415bd75Srobert       selectVLOp(Node->getOperand(2), VL);
1822*d415bd75Srobert     } else
1823*d415bd75Srobert       selectVLOp(Node->getOperand(2), VL);
182473471bf0Spatrick 
182573471bf0Spatrick     unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
182673471bf0Spatrick     SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
182773471bf0Spatrick 
182873471bf0Spatrick     SDValue Operands[] = {Ld->getBasePtr(),
182973471bf0Spatrick                           CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
183073471bf0Spatrick                           Ld->getChain()};
183173471bf0Spatrick 
183273471bf0Spatrick     RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
183373471bf0Spatrick     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1834*d415bd75Srobert         /*IsMasked*/ false, /*IsTU*/ false, /*IsStrided*/ true, /*FF*/ false,
1835*d415bd75Srobert         Log2SEW, static_cast<unsigned>(LMUL));
183673471bf0Spatrick     MachineSDNode *Load =
1837*d415bd75Srobert         CurDAG->getMachineNode(P->Pseudo, DL, {VT, MVT::Other}, Operands);
1838*d415bd75Srobert     // Update the chain.
1839*d415bd75Srobert     ReplaceUses(Src.getValue(1), SDValue(Load, 1));
1840*d415bd75Srobert     // Record the mem-refs
1841*d415bd75Srobert     CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()});
1842*d415bd75Srobert     // Replace the splat with the vlse.
184373471bf0Spatrick     ReplaceNode(Node, Load);
184473471bf0Spatrick     return;
184573471bf0Spatrick   }
184673471bf0Spatrick   }
184709467b48Spatrick 
184809467b48Spatrick   // Select the default instruction.
184909467b48Spatrick   SelectCode(Node);
185009467b48Spatrick }
185109467b48Spatrick 
SelectInlineAsmMemoryOperand(const SDValue & Op,unsigned ConstraintID,std::vector<SDValue> & OutOps)185209467b48Spatrick bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
185309467b48Spatrick     const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
185409467b48Spatrick   switch (ConstraintID) {
185509467b48Spatrick   case InlineAsm::Constraint_m:
185609467b48Spatrick     // We just support simple memory operands that have a single address
185709467b48Spatrick     // operand and need no special handling.
185809467b48Spatrick     OutOps.push_back(Op);
185909467b48Spatrick     return false;
186009467b48Spatrick   case InlineAsm::Constraint_A:
186109467b48Spatrick     OutOps.push_back(Op);
186209467b48Spatrick     return false;
186309467b48Spatrick   default:
186409467b48Spatrick     break;
186509467b48Spatrick   }
186609467b48Spatrick 
186709467b48Spatrick   return true;
186809467b48Spatrick }
186909467b48Spatrick 
SelectAddrFrameIndex(SDValue Addr,SDValue & Base,SDValue & Offset)1870*d415bd75Srobert bool RISCVDAGToDAGISel::SelectAddrFrameIndex(SDValue Addr, SDValue &Base,
1871*d415bd75Srobert                                              SDValue &Offset) {
187273471bf0Spatrick   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
187309467b48Spatrick     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1874*d415bd75Srobert     Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Subtarget->getXLenVT());
187509467b48Spatrick     return true;
187609467b48Spatrick   }
1877*d415bd75Srobert 
187809467b48Spatrick   return false;
187909467b48Spatrick }
188009467b48Spatrick 
1881*d415bd75Srobert // Select a frame index and an optional immediate offset from an ADD or OR.
SelectFrameAddrRegImm(SDValue Addr,SDValue & Base,SDValue & Offset)1882*d415bd75Srobert bool RISCVDAGToDAGISel::SelectFrameAddrRegImm(SDValue Addr, SDValue &Base,
1883*d415bd75Srobert                                               SDValue &Offset) {
1884*d415bd75Srobert   if (SelectAddrFrameIndex(Addr, Base, Offset))
1885*d415bd75Srobert     return true;
1886*d415bd75Srobert 
1887*d415bd75Srobert   if (!CurDAG->isBaseWithConstantOffset(Addr))
1888*d415bd75Srobert     return false;
1889*d415bd75Srobert 
1890*d415bd75Srobert   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
1891*d415bd75Srobert     int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1892*d415bd75Srobert     if (isInt<12>(CVal)) {
1893*d415bd75Srobert       Base = CurDAG->getTargetFrameIndex(FIN->getIndex(),
1894*d415bd75Srobert                                          Subtarget->getXLenVT());
1895*d415bd75Srobert       Offset = CurDAG->getTargetConstant(CVal, SDLoc(Addr),
1896*d415bd75Srobert                                          Subtarget->getXLenVT());
1897*d415bd75Srobert       return true;
1898*d415bd75Srobert     }
1899*d415bd75Srobert   }
1900*d415bd75Srobert 
1901*d415bd75Srobert   return false;
1902*d415bd75Srobert }
1903*d415bd75Srobert 
1904*d415bd75Srobert // Fold constant addresses.
selectConstantAddr(SelectionDAG * CurDAG,const SDLoc & DL,const MVT VT,const RISCVSubtarget * Subtarget,SDValue Addr,SDValue & Base,SDValue & Offset)1905*d415bd75Srobert static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL,
1906*d415bd75Srobert                                const MVT VT, const RISCVSubtarget *Subtarget,
1907*d415bd75Srobert                                SDValue Addr, SDValue &Base, SDValue &Offset) {
1908*d415bd75Srobert   if (!isa<ConstantSDNode>(Addr))
1909*d415bd75Srobert     return false;
1910*d415bd75Srobert 
1911*d415bd75Srobert   int64_t CVal = cast<ConstantSDNode>(Addr)->getSExtValue();
1912*d415bd75Srobert 
1913*d415bd75Srobert   // If the constant is a simm12, we can fold the whole constant and use X0 as
1914*d415bd75Srobert   // the base. If the constant can be materialized with LUI+simm12, use LUI as
1915*d415bd75Srobert   // the base. We can't use generateInstSeq because it favors LUI+ADDIW.
1916*d415bd75Srobert   int64_t Lo12 = SignExtend64<12>(CVal);
1917*d415bd75Srobert   int64_t Hi = (uint64_t)CVal - (uint64_t)Lo12;
1918*d415bd75Srobert   if (!Subtarget->is64Bit() || isInt<32>(Hi)) {
1919*d415bd75Srobert     if (Hi) {
1920*d415bd75Srobert       int64_t Hi20 = (Hi >> 12) & 0xfffff;
1921*d415bd75Srobert       Base = SDValue(
1922*d415bd75Srobert           CurDAG->getMachineNode(RISCV::LUI, DL, VT,
1923*d415bd75Srobert                                  CurDAG->getTargetConstant(Hi20, DL, VT)),
1924*d415bd75Srobert           0);
1925*d415bd75Srobert     } else {
1926*d415bd75Srobert       Base = CurDAG->getRegister(RISCV::X0, VT);
1927*d415bd75Srobert     }
1928*d415bd75Srobert     Offset = CurDAG->getTargetConstant(Lo12, DL, VT);
1929*d415bd75Srobert     return true;
1930*d415bd75Srobert   }
1931*d415bd75Srobert 
1932*d415bd75Srobert   // Ask how constant materialization would handle this constant.
1933*d415bd75Srobert   RISCVMatInt::InstSeq Seq =
1934*d415bd75Srobert       RISCVMatInt::generateInstSeq(CVal, Subtarget->getFeatureBits());
1935*d415bd75Srobert 
1936*d415bd75Srobert   // If the last instruction would be an ADDI, we can fold its immediate and
1937*d415bd75Srobert   // emit the rest of the sequence as the base.
1938*d415bd75Srobert   if (Seq.back().getOpcode() != RISCV::ADDI)
1939*d415bd75Srobert     return false;
1940*d415bd75Srobert   Lo12 = Seq.back().getImm();
1941*d415bd75Srobert 
1942*d415bd75Srobert   // Drop the last instruction.
1943*d415bd75Srobert   Seq.pop_back();
1944*d415bd75Srobert   assert(!Seq.empty() && "Expected more instructions in sequence");
1945*d415bd75Srobert 
1946*d415bd75Srobert   Base = SDValue(selectImmSeq(CurDAG, DL, VT, Seq), 0);
1947*d415bd75Srobert   Offset = CurDAG->getTargetConstant(Lo12, DL, VT);
1948*d415bd75Srobert   return true;
1949*d415bd75Srobert }
1950*d415bd75Srobert 
1951*d415bd75Srobert // Is this ADD instruction only used as the base pointer of scalar loads and
1952*d415bd75Srobert // stores?
isWorthFoldingAdd(SDValue Add)1953*d415bd75Srobert static bool isWorthFoldingAdd(SDValue Add) {
1954*d415bd75Srobert   for (auto *Use : Add->uses()) {
1955*d415bd75Srobert     if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
1956*d415bd75Srobert         Use->getOpcode() != ISD::ATOMIC_LOAD &&
1957*d415bd75Srobert         Use->getOpcode() != ISD::ATOMIC_STORE)
1958*d415bd75Srobert       return false;
1959*d415bd75Srobert     EVT VT = cast<MemSDNode>(Use)->getMemoryVT();
1960*d415bd75Srobert     if (!VT.isScalarInteger() && VT != MVT::f16 && VT != MVT::f32 &&
1961*d415bd75Srobert         VT != MVT::f64)
1962*d415bd75Srobert       return false;
1963*d415bd75Srobert     // Don't allow stores of the value. It must be used as the address.
1964*d415bd75Srobert     if (Use->getOpcode() == ISD::STORE &&
1965*d415bd75Srobert         cast<StoreSDNode>(Use)->getValue() == Add)
1966*d415bd75Srobert       return false;
1967*d415bd75Srobert     if (Use->getOpcode() == ISD::ATOMIC_STORE &&
1968*d415bd75Srobert         cast<AtomicSDNode>(Use)->getVal() == Add)
1969*d415bd75Srobert       return false;
1970*d415bd75Srobert   }
1971*d415bd75Srobert 
1972*d415bd75Srobert   return true;
1973*d415bd75Srobert }
1974*d415bd75Srobert 
SelectAddrRegImm(SDValue Addr,SDValue & Base,SDValue & Offset)1975*d415bd75Srobert bool RISCVDAGToDAGISel::SelectAddrRegImm(SDValue Addr, SDValue &Base,
1976*d415bd75Srobert                                          SDValue &Offset) {
1977*d415bd75Srobert   if (SelectAddrFrameIndex(Addr, Base, Offset))
1978*d415bd75Srobert     return true;
1979*d415bd75Srobert 
1980*d415bd75Srobert   SDLoc DL(Addr);
1981*d415bd75Srobert   MVT VT = Addr.getSimpleValueType();
1982*d415bd75Srobert 
1983*d415bd75Srobert   if (Addr.getOpcode() == RISCVISD::ADD_LO) {
1984*d415bd75Srobert     Base = Addr.getOperand(0);
1985*d415bd75Srobert     Offset = Addr.getOperand(1);
1986*d415bd75Srobert     return true;
1987*d415bd75Srobert   }
1988*d415bd75Srobert 
1989*d415bd75Srobert   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1990*d415bd75Srobert     int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1991*d415bd75Srobert     if (isInt<12>(CVal)) {
1992*d415bd75Srobert       Base = Addr.getOperand(0);
1993*d415bd75Srobert       if (Base.getOpcode() == RISCVISD::ADD_LO) {
1994*d415bd75Srobert         SDValue LoOperand = Base.getOperand(1);
1995*d415bd75Srobert         if (auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
1996*d415bd75Srobert           // If the Lo in (ADD_LO hi, lo) is a global variable's address
1997*d415bd75Srobert           // (its low part, really), then we can rely on the alignment of that
1998*d415bd75Srobert           // variable to provide a margin of safety before low part can overflow
1999*d415bd75Srobert           // the 12 bits of the load/store offset. Check if CVal falls within
2000*d415bd75Srobert           // that margin; if so (low part + CVal) can't overflow.
2001*d415bd75Srobert           const DataLayout &DL = CurDAG->getDataLayout();
2002*d415bd75Srobert           Align Alignment = commonAlignment(
2003*d415bd75Srobert               GA->getGlobal()->getPointerAlignment(DL), GA->getOffset());
2004*d415bd75Srobert           if (CVal == 0 || Alignment > CVal) {
2005*d415bd75Srobert             int64_t CombinedOffset = CVal + GA->getOffset();
2006*d415bd75Srobert             Base = Base.getOperand(0);
2007*d415bd75Srobert             Offset = CurDAG->getTargetGlobalAddress(
2008*d415bd75Srobert                 GA->getGlobal(), SDLoc(LoOperand), LoOperand.getValueType(),
2009*d415bd75Srobert                 CombinedOffset, GA->getTargetFlags());
2010*d415bd75Srobert             return true;
2011*d415bd75Srobert           }
2012*d415bd75Srobert         }
2013*d415bd75Srobert       }
2014*d415bd75Srobert 
2015*d415bd75Srobert       if (auto *FIN = dyn_cast<FrameIndexSDNode>(Base))
2016*d415bd75Srobert         Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT);
2017*d415bd75Srobert       Offset = CurDAG->getTargetConstant(CVal, DL, VT);
2018*d415bd75Srobert       return true;
2019*d415bd75Srobert     }
2020*d415bd75Srobert   }
2021*d415bd75Srobert 
2022*d415bd75Srobert   // Handle ADD with large immediates.
2023*d415bd75Srobert   if (Addr.getOpcode() == ISD::ADD && isa<ConstantSDNode>(Addr.getOperand(1))) {
2024*d415bd75Srobert     int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
2025*d415bd75Srobert     assert(!isInt<12>(CVal) && "simm12 not already handled?");
2026*d415bd75Srobert 
2027*d415bd75Srobert     // Handle immediates in the range [-4096,-2049] or [2048, 4094]. We can use
2028*d415bd75Srobert     // an ADDI for part of the offset and fold the rest into the load/store.
2029*d415bd75Srobert     // This mirrors the AddiPair PatFrag in RISCVInstrInfo.td.
2030*d415bd75Srobert     if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
2031*d415bd75Srobert       int64_t Adj = CVal < 0 ? -2048 : 2047;
2032*d415bd75Srobert       Base = SDValue(
2033*d415bd75Srobert           CurDAG->getMachineNode(RISCV::ADDI, DL, VT, Addr.getOperand(0),
2034*d415bd75Srobert                                  CurDAG->getTargetConstant(Adj, DL, VT)),
2035*d415bd75Srobert           0);
2036*d415bd75Srobert       Offset = CurDAG->getTargetConstant(CVal - Adj, DL, VT);
2037*d415bd75Srobert       return true;
2038*d415bd75Srobert     }
2039*d415bd75Srobert 
2040*d415bd75Srobert     // For larger immediates, we might be able to save one instruction from
2041*d415bd75Srobert     // constant materialization by folding the Lo12 bits of the immediate into
2042*d415bd75Srobert     // the address. We should only do this if the ADD is only used by loads and
2043*d415bd75Srobert     // stores that can fold the lo12 bits. Otherwise, the ADD will get iseled
2044*d415bd75Srobert     // separately with the full materialized immediate creating extra
2045*d415bd75Srobert     // instructions.
2046*d415bd75Srobert     if (isWorthFoldingAdd(Addr) &&
2047*d415bd75Srobert         selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr.getOperand(1), Base,
2048*d415bd75Srobert                            Offset)) {
2049*d415bd75Srobert       // Insert an ADD instruction with the materialized Hi52 bits.
2050*d415bd75Srobert       Base = SDValue(
2051*d415bd75Srobert           CurDAG->getMachineNode(RISCV::ADD, DL, VT, Addr.getOperand(0), Base),
2052*d415bd75Srobert           0);
2053*d415bd75Srobert       return true;
2054*d415bd75Srobert     }
2055*d415bd75Srobert   }
2056*d415bd75Srobert 
2057*d415bd75Srobert   if (selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr, Base, Offset))
2058*d415bd75Srobert     return true;
2059*d415bd75Srobert 
206073471bf0Spatrick   Base = Addr;
2061*d415bd75Srobert   Offset = CurDAG->getTargetConstant(0, DL, VT);
206273471bf0Spatrick   return true;
206373471bf0Spatrick }
2064097a140dSpatrick 
selectShiftMask(SDValue N,unsigned ShiftWidth,SDValue & ShAmt)206573471bf0Spatrick bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
206673471bf0Spatrick                                         SDValue &ShAmt) {
2067*d415bd75Srobert   ShAmt = N;
2068*d415bd75Srobert 
206973471bf0Spatrick   // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
207073471bf0Spatrick   // amount. If there is an AND on the shift amount, we can bypass it if it
207173471bf0Spatrick   // doesn't affect any of those bits.
2072*d415bd75Srobert   if (ShAmt.getOpcode() == ISD::AND && isa<ConstantSDNode>(ShAmt.getOperand(1))) {
2073*d415bd75Srobert     const APInt &AndMask = ShAmt.getConstantOperandAPInt(1);
207473471bf0Spatrick 
207573471bf0Spatrick     // Since the max shift amount is a power of 2 we can subtract 1 to make a
207673471bf0Spatrick     // mask that covers the bits needed to represent all shift amounts.
207773471bf0Spatrick     assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
207873471bf0Spatrick     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
207973471bf0Spatrick 
208073471bf0Spatrick     if (ShMask.isSubsetOf(AndMask)) {
2081*d415bd75Srobert       ShAmt = ShAmt.getOperand(0);
2082*d415bd75Srobert     } else {
208373471bf0Spatrick       // SimplifyDemandedBits may have optimized the mask so try restoring any
208473471bf0Spatrick       // bits that are known zero.
2085*d415bd75Srobert       KnownBits Known = CurDAG->computeKnownBits(ShAmt.getOperand(0));
2086*d415bd75Srobert       if (!ShMask.isSubsetOf(AndMask | Known.Zero))
2087*d415bd75Srobert         return true;
2088*d415bd75Srobert       ShAmt = ShAmt.getOperand(0);
2089*d415bd75Srobert     }
2090*d415bd75Srobert   }
2091*d415bd75Srobert 
2092*d415bd75Srobert   if (ShAmt.getOpcode() == ISD::ADD &&
2093*d415bd75Srobert       isa<ConstantSDNode>(ShAmt.getOperand(1))) {
2094*d415bd75Srobert     uint64_t Imm = ShAmt.getConstantOperandVal(1);
2095*d415bd75Srobert     // If we are shifting by X+N where N == 0 mod Size, then just shift by X
2096*d415bd75Srobert     // to avoid the ADD.
2097*d415bd75Srobert     if (Imm != 0 && Imm % ShiftWidth == 0) {
2098*d415bd75Srobert       ShAmt = ShAmt.getOperand(0);
2099*d415bd75Srobert       return true;
2100*d415bd75Srobert     }
2101*d415bd75Srobert   } else if (ShAmt.getOpcode() == ISD::SUB &&
2102*d415bd75Srobert              isa<ConstantSDNode>(ShAmt.getOperand(0))) {
2103*d415bd75Srobert     uint64_t Imm = ShAmt.getConstantOperandVal(0);
2104*d415bd75Srobert     // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
2105*d415bd75Srobert     // generate a NEG instead of a SUB of a constant.
2106*d415bd75Srobert     if (Imm != 0 && Imm % ShiftWidth == 0) {
2107*d415bd75Srobert       SDLoc DL(ShAmt);
2108*d415bd75Srobert       EVT VT = ShAmt.getValueType();
2109*d415bd75Srobert       SDValue Zero = CurDAG->getRegister(RISCV::X0, VT);
2110*d415bd75Srobert       unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
2111*d415bd75Srobert       MachineSDNode *Neg = CurDAG->getMachineNode(NegOpc, DL, VT, Zero,
2112*d415bd75Srobert                                                   ShAmt.getOperand(1));
2113*d415bd75Srobert       ShAmt = SDValue(Neg, 0);
2114*d415bd75Srobert       return true;
2115*d415bd75Srobert     }
2116*d415bd75Srobert     // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
2117*d415bd75Srobert     // to generate a NOT instead of a SUB of a constant.
2118*d415bd75Srobert     if (Imm % ShiftWidth == ShiftWidth - 1) {
2119*d415bd75Srobert       SDLoc DL(ShAmt);
2120*d415bd75Srobert       EVT VT = ShAmt.getValueType();
2121*d415bd75Srobert       MachineSDNode *Not =
2122*d415bd75Srobert           CurDAG->getMachineNode(RISCV::XORI, DL, VT, ShAmt.getOperand(1),
2123*d415bd75Srobert                                  CurDAG->getTargetConstant(-1, DL, VT));
2124*d415bd75Srobert       ShAmt = SDValue(Not, 0);
2125097a140dSpatrick       return true;
2126097a140dSpatrick     }
2127097a140dSpatrick   }
212873471bf0Spatrick 
2129097a140dSpatrick   return true;
2130097a140dSpatrick }
2131097a140dSpatrick 
selectSExti32(SDValue N,SDValue & Val)213273471bf0Spatrick bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
2133097a140dSpatrick   if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
2134097a140dSpatrick       cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
213573471bf0Spatrick     Val = N.getOperand(0);
2136097a140dSpatrick     return true;
2137097a140dSpatrick   }
213873471bf0Spatrick   MVT VT = N.getSimpleValueType();
213973471bf0Spatrick   if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
214073471bf0Spatrick     Val = N;
214173471bf0Spatrick     return true;
2142097a140dSpatrick   }
214373471bf0Spatrick 
2144097a140dSpatrick   return false;
2145097a140dSpatrick }
2146097a140dSpatrick 
selectZExtBits(SDValue N,unsigned Bits,SDValue & Val)2147*d415bd75Srobert bool RISCVDAGToDAGISel::selectZExtBits(SDValue N, unsigned Bits, SDValue &Val) {
214873471bf0Spatrick   if (N.getOpcode() == ISD::AND) {
214973471bf0Spatrick     auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
2150*d415bd75Srobert     if (C && C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
215173471bf0Spatrick       Val = N.getOperand(0);
2152097a140dSpatrick       return true;
2153097a140dSpatrick     }
2154097a140dSpatrick   }
215573471bf0Spatrick   MVT VT = N.getSimpleValueType();
2156*d415bd75Srobert   APInt Mask = APInt::getBitsSetFrom(VT.getSizeInBits(), Bits);
215773471bf0Spatrick   if (CurDAG->MaskedValueIsZero(N, Mask)) {
215873471bf0Spatrick     Val = N;
215973471bf0Spatrick     return true;
2160097a140dSpatrick   }
216173471bf0Spatrick 
216273471bf0Spatrick   return false;
2163097a140dSpatrick }
216473471bf0Spatrick 
2165*d415bd75Srobert /// Look for various patterns that can be done with a SHL that can be folded
2166*d415bd75Srobert /// into a SHXADD. \p ShAmt contains 1, 2, or 3 and is set based on which
2167*d415bd75Srobert /// SHXADD we are trying to match.
selectSHXADDOp(SDValue N,unsigned ShAmt,SDValue & Val)2168*d415bd75Srobert bool RISCVDAGToDAGISel::selectSHXADDOp(SDValue N, unsigned ShAmt,
2169*d415bd75Srobert                                        SDValue &Val) {
2170*d415bd75Srobert   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
2171*d415bd75Srobert     SDValue N0 = N.getOperand(0);
2172*d415bd75Srobert 
2173*d415bd75Srobert     bool LeftShift = N0.getOpcode() == ISD::SHL;
2174*d415bd75Srobert     if ((LeftShift || N0.getOpcode() == ISD::SRL) &&
2175*d415bd75Srobert         isa<ConstantSDNode>(N0.getOperand(1))) {
2176*d415bd75Srobert       uint64_t Mask = N.getConstantOperandVal(1);
2177*d415bd75Srobert       unsigned C2 = N0.getConstantOperandVal(1);
2178*d415bd75Srobert 
2179*d415bd75Srobert       unsigned XLen = Subtarget->getXLen();
2180*d415bd75Srobert       if (LeftShift)
2181*d415bd75Srobert         Mask &= maskTrailingZeros<uint64_t>(C2);
2182*d415bd75Srobert       else
2183*d415bd75Srobert         Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
2184*d415bd75Srobert 
2185*d415bd75Srobert       // Look for (and (shl y, c2), c1) where c1 is a shifted mask with no
2186*d415bd75Srobert       // leading zeros and c3 trailing zeros. We can use an SRLI by c2+c3
2187*d415bd75Srobert       // followed by a SHXADD with c3 for the X amount.
2188*d415bd75Srobert       if (isShiftedMask_64(Mask)) {
2189*d415bd75Srobert         unsigned Leading = XLen - llvm::bit_width(Mask);
2190*d415bd75Srobert         unsigned Trailing = countTrailingZeros(Mask);
2191*d415bd75Srobert         if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
2192*d415bd75Srobert           SDLoc DL(N);
2193*d415bd75Srobert           EVT VT = N.getValueType();
2194*d415bd75Srobert           Val = SDValue(CurDAG->getMachineNode(
2195*d415bd75Srobert                             RISCV::SRLI, DL, VT, N0.getOperand(0),
2196*d415bd75Srobert                             CurDAG->getTargetConstant(Trailing - C2, DL, VT)),
2197*d415bd75Srobert                         0);
2198*d415bd75Srobert           return true;
2199*d415bd75Srobert         }
2200*d415bd75Srobert         // Look for (and (shr y, c2), c1) where c1 is a shifted mask with c2
2201*d415bd75Srobert         // leading zeros and c3 trailing zeros. We can use an SRLI by C3
2202*d415bd75Srobert         // followed by a SHXADD using c3 for the X amount.
2203*d415bd75Srobert         if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
2204*d415bd75Srobert           SDLoc DL(N);
2205*d415bd75Srobert           EVT VT = N.getValueType();
2206*d415bd75Srobert           Val = SDValue(
2207*d415bd75Srobert               CurDAG->getMachineNode(
2208*d415bd75Srobert                   RISCV::SRLI, DL, VT, N0.getOperand(0),
2209*d415bd75Srobert                   CurDAG->getTargetConstant(Leading + Trailing, DL, VT)),
2210*d415bd75Srobert               0);
2211*d415bd75Srobert           return true;
2212*d415bd75Srobert         }
2213*d415bd75Srobert       }
2214*d415bd75Srobert     }
2215*d415bd75Srobert   }
2216*d415bd75Srobert 
2217*d415bd75Srobert   bool LeftShift = N.getOpcode() == ISD::SHL;
2218*d415bd75Srobert   if ((LeftShift || N.getOpcode() == ISD::SRL) &&
2219*d415bd75Srobert       isa<ConstantSDNode>(N.getOperand(1))) {
2220*d415bd75Srobert     SDValue N0 = N.getOperand(0);
2221*d415bd75Srobert     if (N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
2222*d415bd75Srobert         isa<ConstantSDNode>(N0.getOperand(1))) {
2223*d415bd75Srobert       uint64_t Mask = N0.getConstantOperandVal(1);
2224*d415bd75Srobert       if (isShiftedMask_64(Mask)) {
2225*d415bd75Srobert         unsigned C1 = N.getConstantOperandVal(1);
2226*d415bd75Srobert         unsigned XLen = Subtarget->getXLen();
2227*d415bd75Srobert         unsigned Leading = XLen - llvm::bit_width(Mask);
2228*d415bd75Srobert         unsigned Trailing = countTrailingZeros(Mask);
2229*d415bd75Srobert         // Look for (shl (and X, Mask), C1) where Mask has 32 leading zeros and
2230*d415bd75Srobert         // C3 trailing zeros. If C1+C3==ShAmt we can use SRLIW+SHXADD.
2231*d415bd75Srobert         if (LeftShift && Leading == 32 && Trailing > 0 &&
2232*d415bd75Srobert             (Trailing + C1) == ShAmt) {
2233*d415bd75Srobert           SDLoc DL(N);
2234*d415bd75Srobert           EVT VT = N.getValueType();
2235*d415bd75Srobert           Val = SDValue(CurDAG->getMachineNode(
2236*d415bd75Srobert                             RISCV::SRLIW, DL, VT, N0.getOperand(0),
2237*d415bd75Srobert                             CurDAG->getTargetConstant(Trailing, DL, VT)),
2238*d415bd75Srobert                         0);
2239*d415bd75Srobert           return true;
2240*d415bd75Srobert         }
2241*d415bd75Srobert         // Look for (srl (and X, Mask), C1) where Mask has 32 leading zeros and
2242*d415bd75Srobert         // C3 trailing zeros. If C3-C1==ShAmt we can use SRLIW+SHXADD.
2243*d415bd75Srobert         if (!LeftShift && Leading == 32 && Trailing > C1 &&
2244*d415bd75Srobert             (Trailing - C1) == ShAmt) {
2245*d415bd75Srobert           SDLoc DL(N);
2246*d415bd75Srobert           EVT VT = N.getValueType();
2247*d415bd75Srobert           Val = SDValue(CurDAG->getMachineNode(
2248*d415bd75Srobert                             RISCV::SRLIW, DL, VT, N0.getOperand(0),
2249*d415bd75Srobert                             CurDAG->getTargetConstant(Trailing, DL, VT)),
2250*d415bd75Srobert                         0);
2251*d415bd75Srobert           return true;
2252*d415bd75Srobert         }
2253*d415bd75Srobert       }
2254*d415bd75Srobert     }
2255*d415bd75Srobert   }
2256*d415bd75Srobert 
2257*d415bd75Srobert   return false;
2258*d415bd75Srobert }
2259*d415bd75Srobert 
2260*d415bd75Srobert /// Look for various patterns that can be done with a SHL that can be folded
2261*d415bd75Srobert /// into a SHXADD_UW. \p ShAmt contains 1, 2, or 3 and is set based on which
2262*d415bd75Srobert /// SHXADD_UW we are trying to match.
selectSHXADD_UWOp(SDValue N,unsigned ShAmt,SDValue & Val)2263*d415bd75Srobert bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt,
2264*d415bd75Srobert                                           SDValue &Val) {
2265*d415bd75Srobert   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1)) &&
2266*d415bd75Srobert       N.hasOneUse()) {
2267*d415bd75Srobert     SDValue N0 = N.getOperand(0);
2268*d415bd75Srobert     if (N0.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N0.getOperand(1)) &&
2269*d415bd75Srobert         N0.hasOneUse()) {
2270*d415bd75Srobert       uint64_t Mask = N.getConstantOperandVal(1);
2271*d415bd75Srobert       unsigned C2 = N0.getConstantOperandVal(1);
2272*d415bd75Srobert 
2273*d415bd75Srobert       Mask &= maskTrailingZeros<uint64_t>(C2);
2274*d415bd75Srobert 
2275*d415bd75Srobert       // Look for (and (shl y, c2), c1) where c1 is a shifted mask with
2276*d415bd75Srobert       // 32-ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
2277*d415bd75Srobert       // c2-ShAmt followed by SHXADD_UW with ShAmt for the X amount.
2278*d415bd75Srobert       if (isShiftedMask_64(Mask)) {
2279*d415bd75Srobert         unsigned Leading = countLeadingZeros(Mask);
2280*d415bd75Srobert         unsigned Trailing = countTrailingZeros(Mask);
2281*d415bd75Srobert         if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
2282*d415bd75Srobert           SDLoc DL(N);
2283*d415bd75Srobert           EVT VT = N.getValueType();
2284*d415bd75Srobert           Val = SDValue(CurDAG->getMachineNode(
2285*d415bd75Srobert                             RISCV::SLLI, DL, VT, N0.getOperand(0),
2286*d415bd75Srobert                             CurDAG->getTargetConstant(C2 - ShAmt, DL, VT)),
2287*d415bd75Srobert                         0);
2288*d415bd75Srobert           return true;
2289*d415bd75Srobert         }
2290*d415bd75Srobert       }
2291*d415bd75Srobert     }
2292*d415bd75Srobert   }
2293*d415bd75Srobert 
2294*d415bd75Srobert   return false;
2295*d415bd75Srobert }
2296*d415bd75Srobert 
2297*d415bd75Srobert // Return true if all users of this SDNode* only consume the lower \p Bits.
2298*d415bd75Srobert // This can be used to form W instructions for add/sub/mul/shl even when the
2299*d415bd75Srobert // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
2300*d415bd75Srobert // SimplifyDemandedBits has made it so some users see a sext_inreg and some
2301*d415bd75Srobert // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
2302*d415bd75Srobert // the add/sub/mul/shl to become non-W instructions. By checking the users we
2303*d415bd75Srobert // may be able to use a W instruction and CSE with the other instruction if
2304*d415bd75Srobert // this has happened. We could try to detect that the CSE opportunity exists
2305*d415bd75Srobert // before doing this, but that would be more complicated.
hasAllNBitUsers(SDNode * Node,unsigned Bits,const unsigned Depth) const2306*d415bd75Srobert bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits,
2307*d415bd75Srobert                                         const unsigned Depth) const {
2308*d415bd75Srobert   assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
2309*d415bd75Srobert           Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
2310*d415bd75Srobert           Node->getOpcode() == ISD::SRL || Node->getOpcode() == ISD::AND ||
2311*d415bd75Srobert           Node->getOpcode() == ISD::OR || Node->getOpcode() == ISD::XOR ||
2312*d415bd75Srobert           Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
2313*d415bd75Srobert           isa<ConstantSDNode>(Node) || Depth != 0) &&
2314*d415bd75Srobert          "Unexpected opcode");
2315*d415bd75Srobert 
2316*d415bd75Srobert   if (Depth >= SelectionDAG::MaxRecursionDepth)
2317*d415bd75Srobert     return false;
2318*d415bd75Srobert 
2319*d415bd75Srobert   for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
2320*d415bd75Srobert     SDNode *User = *UI;
2321*d415bd75Srobert     // Users of this node should have already been instruction selected
2322*d415bd75Srobert     if (!User->isMachineOpcode())
2323*d415bd75Srobert       return false;
2324*d415bd75Srobert 
2325*d415bd75Srobert     // TODO: Add more opcodes?
2326*d415bd75Srobert     switch (User->getMachineOpcode()) {
2327*d415bd75Srobert     default:
2328*d415bd75Srobert       return false;
2329*d415bd75Srobert     case RISCV::ADDW:
2330*d415bd75Srobert     case RISCV::ADDIW:
2331*d415bd75Srobert     case RISCV::SUBW:
2332*d415bd75Srobert     case RISCV::MULW:
2333*d415bd75Srobert     case RISCV::SLLW:
2334*d415bd75Srobert     case RISCV::SLLIW:
2335*d415bd75Srobert     case RISCV::SRAW:
2336*d415bd75Srobert     case RISCV::SRAIW:
2337*d415bd75Srobert     case RISCV::SRLW:
2338*d415bd75Srobert     case RISCV::SRLIW:
2339*d415bd75Srobert     case RISCV::DIVW:
2340*d415bd75Srobert     case RISCV::DIVUW:
2341*d415bd75Srobert     case RISCV::REMW:
2342*d415bd75Srobert     case RISCV::REMUW:
2343*d415bd75Srobert     case RISCV::ROLW:
2344*d415bd75Srobert     case RISCV::RORW:
2345*d415bd75Srobert     case RISCV::RORIW:
2346*d415bd75Srobert     case RISCV::CLZW:
2347*d415bd75Srobert     case RISCV::CTZW:
2348*d415bd75Srobert     case RISCV::CPOPW:
2349*d415bd75Srobert     case RISCV::SLLI_UW:
2350*d415bd75Srobert     case RISCV::FMV_W_X:
2351*d415bd75Srobert     case RISCV::FCVT_H_W:
2352*d415bd75Srobert     case RISCV::FCVT_H_WU:
2353*d415bd75Srobert     case RISCV::FCVT_S_W:
2354*d415bd75Srobert     case RISCV::FCVT_S_WU:
2355*d415bd75Srobert     case RISCV::FCVT_D_W:
2356*d415bd75Srobert     case RISCV::FCVT_D_WU:
2357*d415bd75Srobert       if (Bits < 32)
2358*d415bd75Srobert         return false;
2359*d415bd75Srobert       break;
2360*d415bd75Srobert     case RISCV::SLL:
2361*d415bd75Srobert     case RISCV::SRA:
2362*d415bd75Srobert     case RISCV::SRL:
2363*d415bd75Srobert     case RISCV::ROL:
2364*d415bd75Srobert     case RISCV::ROR:
2365*d415bd75Srobert     case RISCV::BSET:
2366*d415bd75Srobert     case RISCV::BCLR:
2367*d415bd75Srobert     case RISCV::BINV:
2368*d415bd75Srobert       // Shift amount operands only use log2(Xlen) bits.
2369*d415bd75Srobert       if (UI.getOperandNo() != 1 || Bits < Log2_32(Subtarget->getXLen()))
2370*d415bd75Srobert         return false;
2371*d415bd75Srobert       break;
2372*d415bd75Srobert     case RISCV::SLLI:
2373*d415bd75Srobert       // SLLI only uses the lower (XLen - ShAmt) bits.
2374*d415bd75Srobert       if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
2375*d415bd75Srobert         return false;
2376*d415bd75Srobert       break;
2377*d415bd75Srobert     case RISCV::ANDI:
2378*d415bd75Srobert       if (Bits >= (unsigned)llvm::bit_width(User->getConstantOperandVal(1)))
2379*d415bd75Srobert         break;
2380*d415bd75Srobert       goto RecCheck;
2381*d415bd75Srobert     case RISCV::ORI: {
2382*d415bd75Srobert       uint64_t Imm = cast<ConstantSDNode>(User->getOperand(1))->getSExtValue();
2383*d415bd75Srobert       if (Bits >= (unsigned)llvm::bit_width<uint64_t>(~Imm))
2384*d415bd75Srobert         break;
2385*d415bd75Srobert       [[fallthrough]];
2386*d415bd75Srobert     }
2387*d415bd75Srobert     case RISCV::AND:
2388*d415bd75Srobert     case RISCV::OR:
2389*d415bd75Srobert     case RISCV::XOR:
2390*d415bd75Srobert     case RISCV::XORI:
2391*d415bd75Srobert     case RISCV::ANDN:
2392*d415bd75Srobert     case RISCV::ORN:
2393*d415bd75Srobert     case RISCV::XNOR:
2394*d415bd75Srobert     case RISCV::SH1ADD:
2395*d415bd75Srobert     case RISCV::SH2ADD:
2396*d415bd75Srobert     case RISCV::SH3ADD:
2397*d415bd75Srobert     RecCheck:
2398*d415bd75Srobert       if (hasAllNBitUsers(User, Bits, Depth + 1))
2399*d415bd75Srobert         break;
2400*d415bd75Srobert       return false;
2401*d415bd75Srobert     case RISCV::SRLI: {
2402*d415bd75Srobert       unsigned ShAmt = User->getConstantOperandVal(1);
2403*d415bd75Srobert       // If we are shifting right by less than Bits, and users don't demand any
2404*d415bd75Srobert       // bits that were shifted into [Bits-1:0], then we can consider this as an
2405*d415bd75Srobert       // N-Bit user.
2406*d415bd75Srobert       if (Bits > ShAmt && hasAllNBitUsers(User, Bits - ShAmt, Depth + 1))
2407*d415bd75Srobert         break;
2408*d415bd75Srobert       return false;
2409*d415bd75Srobert     }
2410*d415bd75Srobert     case RISCV::SEXT_B:
2411*d415bd75Srobert     case RISCV::PACKH:
2412*d415bd75Srobert       if (Bits < 8)
2413*d415bd75Srobert         return false;
2414*d415bd75Srobert       break;
2415*d415bd75Srobert     case RISCV::SEXT_H:
2416*d415bd75Srobert     case RISCV::FMV_H_X:
2417*d415bd75Srobert     case RISCV::ZEXT_H_RV32:
2418*d415bd75Srobert     case RISCV::ZEXT_H_RV64:
2419*d415bd75Srobert     case RISCV::PACKW:
2420*d415bd75Srobert       if (Bits < 16)
2421*d415bd75Srobert         return false;
2422*d415bd75Srobert       break;
2423*d415bd75Srobert     case RISCV::PACK:
2424*d415bd75Srobert       if (Bits < (Subtarget->getXLen() / 2))
2425*d415bd75Srobert         return false;
2426*d415bd75Srobert       break;
2427*d415bd75Srobert     case RISCV::ADD_UW:
2428*d415bd75Srobert     case RISCV::SH1ADD_UW:
2429*d415bd75Srobert     case RISCV::SH2ADD_UW:
2430*d415bd75Srobert     case RISCV::SH3ADD_UW:
2431*d415bd75Srobert       // The first operand to add.uw/shXadd.uw is implicitly zero extended from
2432*d415bd75Srobert       // 32 bits.
2433*d415bd75Srobert       if (UI.getOperandNo() != 0 || Bits < 32)
2434*d415bd75Srobert         return false;
2435*d415bd75Srobert       break;
2436*d415bd75Srobert     case RISCV::SB:
2437*d415bd75Srobert       if (UI.getOperandNo() != 0 || Bits < 8)
2438*d415bd75Srobert         return false;
2439*d415bd75Srobert       break;
2440*d415bd75Srobert     case RISCV::SH:
2441*d415bd75Srobert       if (UI.getOperandNo() != 0 || Bits < 16)
2442*d415bd75Srobert         return false;
2443*d415bd75Srobert       break;
2444*d415bd75Srobert     case RISCV::SW:
2445*d415bd75Srobert       if (UI.getOperandNo() != 0 || Bits < 32)
2446*d415bd75Srobert         return false;
2447*d415bd75Srobert       break;
2448*d415bd75Srobert     }
2449*d415bd75Srobert   }
2450*d415bd75Srobert 
2451*d415bd75Srobert   return true;
2452*d415bd75Srobert }
2453*d415bd75Srobert 
245473471bf0Spatrick // Select VL as a 5 bit immediate or a value that will become a register. This
245573471bf0Spatrick // allows us to choose betwen VSETIVLI or VSETVLI later.
selectVLOp(SDValue N,SDValue & VL)245673471bf0Spatrick bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
245773471bf0Spatrick   auto *C = dyn_cast<ConstantSDNode>(N);
2458*d415bd75Srobert   if (C && isUInt<5>(C->getZExtValue())) {
245973471bf0Spatrick     VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
246073471bf0Spatrick                                    N->getValueType(0));
2461*d415bd75Srobert   } else if (C && C->isAllOnesValue()) {
2462*d415bd75Srobert     // Treat all ones as VLMax.
2463*d415bd75Srobert     VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
2464*d415bd75Srobert                                    N->getValueType(0));
2465*d415bd75Srobert   } else if (isa<RegisterSDNode>(N) &&
2466*d415bd75Srobert              cast<RegisterSDNode>(N)->getReg() == RISCV::X0) {
2467*d415bd75Srobert     // All our VL operands use an operand that allows GPRNoX0 or an immediate
2468*d415bd75Srobert     // as the register class. Convert X0 to a special immediate to pass the
2469*d415bd75Srobert     // MachineVerifier. This is recognized specially by the vsetvli insertion
2470*d415bd75Srobert     // pass.
2471*d415bd75Srobert     VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
2472*d415bd75Srobert                                    N->getValueType(0));
2473*d415bd75Srobert   } else {
247473471bf0Spatrick     VL = N;
2475*d415bd75Srobert   }
247673471bf0Spatrick 
247773471bf0Spatrick   return true;
2478097a140dSpatrick }
247973471bf0Spatrick 
selectVSplat(SDValue N,SDValue & SplatVal)248073471bf0Spatrick bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
2481*d415bd75Srobert   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef())
248273471bf0Spatrick     return false;
2483*d415bd75Srobert   assert(N.getNumOperands() == 3 && "Unexpected number of operands");
2484*d415bd75Srobert   SplatVal = N.getOperand(1);
248573471bf0Spatrick   return true;
2486097a140dSpatrick }
248773471bf0Spatrick 
248873471bf0Spatrick using ValidateFn = bool (*)(int64_t);
248973471bf0Spatrick 
selectVSplatSimmHelper(SDValue N,SDValue & SplatVal,SelectionDAG & DAG,const RISCVSubtarget & Subtarget,ValidateFn ValidateImm)249073471bf0Spatrick static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
249173471bf0Spatrick                                    SelectionDAG &DAG,
249273471bf0Spatrick                                    const RISCVSubtarget &Subtarget,
249373471bf0Spatrick                                    ValidateFn ValidateImm) {
2494*d415bd75Srobert   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
2495*d415bd75Srobert       !isa<ConstantSDNode>(N.getOperand(1)))
249673471bf0Spatrick     return false;
2497*d415bd75Srobert   assert(N.getNumOperands() == 3 && "Unexpected number of operands");
249873471bf0Spatrick 
2499*d415bd75Srobert   int64_t SplatImm =
2500*d415bd75Srobert       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
250173471bf0Spatrick 
2502*d415bd75Srobert   // The semantics of RISCVISD::VMV_V_X_VL is that when the operand
2503*d415bd75Srobert   // type is wider than the resulting vector element type: an implicit
2504*d415bd75Srobert   // truncation first takes place. Therefore, perform a manual
2505*d415bd75Srobert   // truncation/sign-extension in order to ignore any truncated bits and catch
2506*d415bd75Srobert   // any zero-extended immediate.
250773471bf0Spatrick   // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
250873471bf0Spatrick   // sign-extending to (XLenVT -1).
250973471bf0Spatrick   MVT XLenVT = Subtarget.getXLenVT();
2510*d415bd75Srobert   assert(XLenVT == N.getOperand(1).getSimpleValueType() &&
251173471bf0Spatrick          "Unexpected splat operand type");
251273471bf0Spatrick   MVT EltVT = N.getSimpleValueType().getVectorElementType();
251373471bf0Spatrick   if (EltVT.bitsLT(XLenVT))
251473471bf0Spatrick     SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
251573471bf0Spatrick 
251673471bf0Spatrick   if (!ValidateImm(SplatImm))
251773471bf0Spatrick     return false;
251873471bf0Spatrick 
251973471bf0Spatrick   SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
252073471bf0Spatrick   return true;
252173471bf0Spatrick }
252273471bf0Spatrick 
selectVSplatSimm5(SDValue N,SDValue & SplatVal)252373471bf0Spatrick bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
252473471bf0Spatrick   return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
252573471bf0Spatrick                                 [](int64_t Imm) { return isInt<5>(Imm); });
252673471bf0Spatrick }
252773471bf0Spatrick 
selectVSplatSimm5Plus1(SDValue N,SDValue & SplatVal)252873471bf0Spatrick bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
252973471bf0Spatrick   return selectVSplatSimmHelper(
253073471bf0Spatrick       N, SplatVal, *CurDAG, *Subtarget,
253173471bf0Spatrick       [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
253273471bf0Spatrick }
253373471bf0Spatrick 
selectVSplatSimm5Plus1NonZero(SDValue N,SDValue & SplatVal)253473471bf0Spatrick bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
253573471bf0Spatrick                                                       SDValue &SplatVal) {
253673471bf0Spatrick   return selectVSplatSimmHelper(
253773471bf0Spatrick       N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
253873471bf0Spatrick         return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
253973471bf0Spatrick       });
254073471bf0Spatrick }
254173471bf0Spatrick 
selectVSplatUimm5(SDValue N,SDValue & SplatVal)254273471bf0Spatrick bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
2543*d415bd75Srobert   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
2544*d415bd75Srobert       !isa<ConstantSDNode>(N.getOperand(1)))
254573471bf0Spatrick     return false;
254673471bf0Spatrick 
2547*d415bd75Srobert   int64_t SplatImm =
2548*d415bd75Srobert       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
254973471bf0Spatrick 
255073471bf0Spatrick   if (!isUInt<5>(SplatImm))
255173471bf0Spatrick     return false;
255273471bf0Spatrick 
255373471bf0Spatrick   SplatVal =
255473471bf0Spatrick       CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
255573471bf0Spatrick 
255673471bf0Spatrick   return true;
255773471bf0Spatrick }
255873471bf0Spatrick 
selectRVVSimm5(SDValue N,unsigned Width,SDValue & Imm)255973471bf0Spatrick bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
256073471bf0Spatrick                                        SDValue &Imm) {
256173471bf0Spatrick   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
256273471bf0Spatrick     int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
256373471bf0Spatrick 
256473471bf0Spatrick     if (!isInt<5>(ImmVal))
256573471bf0Spatrick       return false;
256673471bf0Spatrick 
256773471bf0Spatrick     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
256873471bf0Spatrick     return true;
256973471bf0Spatrick   }
257073471bf0Spatrick 
2571097a140dSpatrick   return false;
2572097a140dSpatrick }
2573097a140dSpatrick 
2574*d415bd75Srobert // Try to remove sext.w if the input is a W instruction or can be made into
2575*d415bd75Srobert // a W instruction cheaply.
doPeepholeSExtW(SDNode * N)2576*d415bd75Srobert bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
2577*d415bd75Srobert   // Look for the sext.w pattern, addiw rd, rs1, 0.
2578*d415bd75Srobert   if (N->getMachineOpcode() != RISCV::ADDIW ||
2579*d415bd75Srobert       !isNullConstant(N->getOperand(1)))
2580*d415bd75Srobert     return false;
2581*d415bd75Srobert 
2582*d415bd75Srobert   SDValue N0 = N->getOperand(0);
2583*d415bd75Srobert   if (!N0.isMachineOpcode())
2584*d415bd75Srobert     return false;
2585*d415bd75Srobert 
2586*d415bd75Srobert   switch (N0.getMachineOpcode()) {
2587*d415bd75Srobert   default:
2588*d415bd75Srobert     break;
2589*d415bd75Srobert   case RISCV::ADD:
2590*d415bd75Srobert   case RISCV::ADDI:
2591*d415bd75Srobert   case RISCV::SUB:
2592*d415bd75Srobert   case RISCV::MUL:
2593*d415bd75Srobert   case RISCV::SLLI: {
2594*d415bd75Srobert     // Convert sext.w+add/sub/mul to their W instructions. This will create
2595*d415bd75Srobert     // a new independent instruction. This improves latency.
2596*d415bd75Srobert     unsigned Opc;
2597*d415bd75Srobert     switch (N0.getMachineOpcode()) {
2598*d415bd75Srobert     default:
2599*d415bd75Srobert       llvm_unreachable("Unexpected opcode!");
2600*d415bd75Srobert     case RISCV::ADD:  Opc = RISCV::ADDW;  break;
2601*d415bd75Srobert     case RISCV::ADDI: Opc = RISCV::ADDIW; break;
2602*d415bd75Srobert     case RISCV::SUB:  Opc = RISCV::SUBW;  break;
2603*d415bd75Srobert     case RISCV::MUL:  Opc = RISCV::MULW;  break;
2604*d415bd75Srobert     case RISCV::SLLI: Opc = RISCV::SLLIW; break;
2605*d415bd75Srobert     }
2606*d415bd75Srobert 
2607*d415bd75Srobert     SDValue N00 = N0.getOperand(0);
2608*d415bd75Srobert     SDValue N01 = N0.getOperand(1);
2609*d415bd75Srobert 
2610*d415bd75Srobert     // Shift amount needs to be uimm5.
2611*d415bd75Srobert     if (N0.getMachineOpcode() == RISCV::SLLI &&
2612*d415bd75Srobert         !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
2613*d415bd75Srobert       break;
2614*d415bd75Srobert 
2615*d415bd75Srobert     SDNode *Result =
2616*d415bd75Srobert         CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
2617*d415bd75Srobert                                N00, N01);
2618*d415bd75Srobert     ReplaceUses(N, Result);
2619*d415bd75Srobert     return true;
2620*d415bd75Srobert   }
2621*d415bd75Srobert   case RISCV::ADDW:
2622*d415bd75Srobert   case RISCV::ADDIW:
2623*d415bd75Srobert   case RISCV::SUBW:
2624*d415bd75Srobert   case RISCV::MULW:
2625*d415bd75Srobert   case RISCV::SLLIW:
2626*d415bd75Srobert   case RISCV::PACKW:
2627*d415bd75Srobert     // Result is already sign extended just remove the sext.w.
2628*d415bd75Srobert     // NOTE: We only handle the nodes that are selected with hasAllWUsers.
2629*d415bd75Srobert     ReplaceUses(N, N0.getNode());
2630*d415bd75Srobert     return true;
2631*d415bd75Srobert   }
2632*d415bd75Srobert 
2633*d415bd75Srobert   return false;
2634*d415bd75Srobert }
2635*d415bd75Srobert 
2636*d415bd75Srobert // Return true if we can make sure mask of N is all-ones mask.
usesAllOnesMask(SDNode * N,unsigned MaskOpIdx)2637*d415bd75Srobert static bool usesAllOnesMask(SDNode *N, unsigned MaskOpIdx) {
2638*d415bd75Srobert   // Check that we're using V0 as a mask register.
2639*d415bd75Srobert   if (!isa<RegisterSDNode>(N->getOperand(MaskOpIdx)) ||
2640*d415bd75Srobert       cast<RegisterSDNode>(N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
2641*d415bd75Srobert     return false;
2642*d415bd75Srobert 
2643*d415bd75Srobert   // The glued user defines V0.
2644*d415bd75Srobert   const auto *Glued = N->getGluedNode();
2645*d415bd75Srobert 
2646*d415bd75Srobert   if (!Glued || Glued->getOpcode() != ISD::CopyToReg)
2647*d415bd75Srobert     return false;
2648*d415bd75Srobert 
2649*d415bd75Srobert   // Check that we're defining V0 as a mask register.
2650*d415bd75Srobert   if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
2651*d415bd75Srobert       cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
2652*d415bd75Srobert     return false;
2653*d415bd75Srobert 
2654*d415bd75Srobert   // Check the instruction defining V0; it needs to be a VMSET pseudo.
2655*d415bd75Srobert   SDValue MaskSetter = Glued->getOperand(2);
2656*d415bd75Srobert 
2657*d415bd75Srobert   const auto IsVMSet = [](unsigned Opc) {
2658*d415bd75Srobert     return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
2659*d415bd75Srobert            Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
2660*d415bd75Srobert            Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
2661*d415bd75Srobert            Opc == RISCV::PseudoVMSET_M_B8;
2662*d415bd75Srobert   };
2663*d415bd75Srobert 
2664*d415bd75Srobert   // TODO: Check that the VMSET is the expected bitwidth? The pseudo has
2665*d415bd75Srobert   // undefined behaviour if it's the wrong bitwidth, so we could choose to
2666*d415bd75Srobert   // assume that it's all-ones? Same applies to its VL.
2667*d415bd75Srobert   return MaskSetter->isMachineOpcode() &&
2668*d415bd75Srobert          IsVMSet(MaskSetter.getMachineOpcode());
2669*d415bd75Srobert }
2670*d415bd75Srobert 
2671*d415bd75Srobert // Optimize masked RVV pseudo instructions with a known all-ones mask to their
2672*d415bd75Srobert // corresponding "unmasked" pseudo versions. The mask we're interested in will
2673*d415bd75Srobert // take the form of a V0 physical register operand, with a glued
2674*d415bd75Srobert // register-setting instruction.
doPeepholeMaskedRVV(SDNode * N)2675*d415bd75Srobert bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) {
2676*d415bd75Srobert   const RISCV::RISCVMaskedPseudoInfo *I =
2677*d415bd75Srobert       RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
2678*d415bd75Srobert   if (!I)
2679*d415bd75Srobert     return false;
2680*d415bd75Srobert 
2681*d415bd75Srobert   unsigned MaskOpIdx = I->MaskOpIdx;
2682*d415bd75Srobert 
2683*d415bd75Srobert   if (!usesAllOnesMask(N, MaskOpIdx))
2684*d415bd75Srobert     return false;
2685*d415bd75Srobert 
2686*d415bd75Srobert   // Retrieve the tail policy operand index, if any.
2687*d415bd75Srobert   std::optional<unsigned> TailPolicyOpIdx;
2688*d415bd75Srobert   const RISCVInstrInfo &TII = *Subtarget->getInstrInfo();
2689*d415bd75Srobert   const MCInstrDesc &MaskedMCID = TII.get(N->getMachineOpcode());
2690*d415bd75Srobert 
2691*d415bd75Srobert   bool IsTA = true;
2692*d415bd75Srobert   if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) {
2693*d415bd75Srobert     TailPolicyOpIdx = getVecPolicyOpIdx(N, MaskedMCID);
2694*d415bd75Srobert     if (!(N->getConstantOperandVal(*TailPolicyOpIdx) &
2695*d415bd75Srobert           RISCVII::TAIL_AGNOSTIC)) {
2696*d415bd75Srobert       // Keep the true-masked instruction when there is no unmasked TU
2697*d415bd75Srobert       // instruction
2698*d415bd75Srobert       if (I->UnmaskedTUPseudo == I->MaskedPseudo && !N->getOperand(0).isUndef())
2699*d415bd75Srobert         return false;
2700*d415bd75Srobert       // We can't use TA if the tie-operand is not IMPLICIT_DEF
2701*d415bd75Srobert       if (!N->getOperand(0).isUndef())
2702*d415bd75Srobert         IsTA = false;
2703*d415bd75Srobert     }
2704*d415bd75Srobert   }
2705*d415bd75Srobert 
2706*d415bd75Srobert   unsigned Opc = IsTA ? I->UnmaskedPseudo : I->UnmaskedTUPseudo;
2707*d415bd75Srobert 
2708*d415bd75Srobert   // Check that we're dropping the mask operand and any policy operand
2709*d415bd75Srobert   // when we transform to this unmasked pseudo. Additionally, if this insturtion
2710*d415bd75Srobert   // is tail agnostic, the unmasked instruction should not have a merge op.
2711*d415bd75Srobert   uint64_t TSFlags = TII.get(Opc).TSFlags;
2712*d415bd75Srobert   assert((IsTA != RISCVII::hasMergeOp(TSFlags)) &&
2713*d415bd75Srobert          RISCVII::hasDummyMaskOp(TSFlags) &&
2714*d415bd75Srobert          !RISCVII::hasVecPolicyOp(TSFlags) &&
2715*d415bd75Srobert          "Unexpected pseudo to transform to");
2716*d415bd75Srobert   (void)TSFlags;
2717*d415bd75Srobert 
2718*d415bd75Srobert   SmallVector<SDValue, 8> Ops;
2719*d415bd75Srobert   // Skip the merge operand at index 0 if IsTA
2720*d415bd75Srobert   for (unsigned I = IsTA, E = N->getNumOperands(); I != E; I++) {
2721*d415bd75Srobert     // Skip the mask, the policy, and the Glue.
2722*d415bd75Srobert     SDValue Op = N->getOperand(I);
2723*d415bd75Srobert     if (I == MaskOpIdx || I == TailPolicyOpIdx ||
2724*d415bd75Srobert         Op.getValueType() == MVT::Glue)
2725*d415bd75Srobert       continue;
2726*d415bd75Srobert     Ops.push_back(Op);
2727*d415bd75Srobert   }
2728*d415bd75Srobert 
2729*d415bd75Srobert   // Transitively apply any node glued to our new node.
2730*d415bd75Srobert   const auto *Glued = N->getGluedNode();
2731*d415bd75Srobert   if (auto *TGlued = Glued->getGluedNode())
2732*d415bd75Srobert     Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1));
2733*d415bd75Srobert 
2734*d415bd75Srobert   SDNode *Result = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
2735*d415bd75Srobert   Result->setFlags(N->getFlags());
2736*d415bd75Srobert   ReplaceUses(N, Result);
2737*d415bd75Srobert 
2738*d415bd75Srobert   return true;
2739*d415bd75Srobert }
2740*d415bd75Srobert 
2741*d415bd75Srobert // Try to fold VMERGE_VVM with unmasked intrinsic to masked intrinsic. The
2742*d415bd75Srobert // peephole only deals with VMERGE_VVM which is TU and has false operand same as
2743*d415bd75Srobert // its true operand now. E.g. (VMERGE_VVM_M1_TU False, False, (VADD_M1 ...),
2744*d415bd75Srobert // ...) -> (VADD_VV_M1_MASK)
performCombineVMergeAndVOps(SDNode * N,bool IsTA)2745*d415bd75Srobert bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N, bool IsTA) {
2746*d415bd75Srobert   unsigned Offset = IsTA ? 0 : 1;
2747*d415bd75Srobert   uint64_t Policy = IsTA ? RISCVII::TAIL_AGNOSTIC : /*TUMU*/ 0;
2748*d415bd75Srobert 
2749*d415bd75Srobert   SDValue False = N->getOperand(0 + Offset);
2750*d415bd75Srobert   SDValue True = N->getOperand(1 + Offset);
2751*d415bd75Srobert   SDValue Mask = N->getOperand(2 + Offset);
2752*d415bd75Srobert   SDValue VL = N->getOperand(3 + Offset);
2753*d415bd75Srobert 
2754*d415bd75Srobert   assert(True.getResNo() == 0 &&
2755*d415bd75Srobert          "Expect True is the first output of an instruction.");
2756*d415bd75Srobert 
2757*d415bd75Srobert   // Need N is the exactly one using True.
2758*d415bd75Srobert   if (!True.hasOneUse())
2759*d415bd75Srobert     return false;
2760*d415bd75Srobert 
2761*d415bd75Srobert   if (!True.isMachineOpcode())
2762*d415bd75Srobert     return false;
2763*d415bd75Srobert 
2764*d415bd75Srobert   unsigned TrueOpc = True.getMachineOpcode();
2765*d415bd75Srobert 
2766*d415bd75Srobert   // Skip if True has merge operand.
2767*d415bd75Srobert   // TODO: Deal with True having same merge operand with N.
2768*d415bd75Srobert   if (RISCVII::hasMergeOp(TII->get(TrueOpc).TSFlags))
2769*d415bd75Srobert     return false;
2770*d415bd75Srobert 
2771*d415bd75Srobert   // Skip if True has side effect.
2772*d415bd75Srobert   // TODO: Support velff and vlsegff.
2773*d415bd75Srobert   if (TII->get(TrueOpc).hasUnmodeledSideEffects())
2774*d415bd75Srobert     return false;
2775*d415bd75Srobert 
2776*d415bd75Srobert   // Only deal with True when True is unmasked intrinsic now.
2777*d415bd75Srobert   const RISCV::RISCVMaskedPseudoInfo *Info =
2778*d415bd75Srobert       RISCV::lookupMaskedIntrinsicByUnmaskedTA(TrueOpc);
2779*d415bd75Srobert 
2780*d415bd75Srobert   if (!Info)
2781*d415bd75Srobert     return false;
2782*d415bd75Srobert 
2783*d415bd75Srobert   // The last operand of unmasked intrinsic should be sew or chain.
2784*d415bd75Srobert   bool HasChainOp =
2785*d415bd75Srobert       True.getOperand(True.getNumOperands() - 1).getValueType() == MVT::Other;
2786*d415bd75Srobert 
2787*d415bd75Srobert   if (HasChainOp) {
2788*d415bd75Srobert     // Avoid creating cycles in the DAG. We must ensure that none of the other
2789*d415bd75Srobert     // operands depend on True through it's Chain.
2790*d415bd75Srobert     SmallVector<const SDNode *, 4> LoopWorklist;
2791*d415bd75Srobert     SmallPtrSet<const SDNode *, 16> Visited;
2792*d415bd75Srobert     LoopWorklist.push_back(False.getNode());
2793*d415bd75Srobert     LoopWorklist.push_back(Mask.getNode());
2794*d415bd75Srobert     LoopWorklist.push_back(VL.getNode());
2795*d415bd75Srobert     if (SDNode *Glued = N->getGluedNode())
2796*d415bd75Srobert       LoopWorklist.push_back(Glued);
2797*d415bd75Srobert     if (SDNode::hasPredecessorHelper(True.getNode(), Visited, LoopWorklist))
2798*d415bd75Srobert       return false;
2799*d415bd75Srobert   }
2800*d415bd75Srobert 
2801*d415bd75Srobert   // Need True has same VL with N.
2802*d415bd75Srobert   unsigned TrueVLIndex = True.getNumOperands() - HasChainOp - 2;
2803*d415bd75Srobert   SDValue TrueVL = True.getOperand(TrueVLIndex);
2804*d415bd75Srobert 
2805*d415bd75Srobert   auto IsNoFPExcept = [this](SDValue N) {
2806*d415bd75Srobert     return !this->mayRaiseFPException(N.getNode()) ||
2807*d415bd75Srobert            N->getFlags().hasNoFPExcept();
2808*d415bd75Srobert   };
2809*d415bd75Srobert 
2810*d415bd75Srobert   // Allow the peephole for non-exception True with VLMAX vector length, since
2811*d415bd75Srobert   // all the values after VL of N are dependent on Merge. VLMAX should be
2812*d415bd75Srobert   // lowered to (XLenVT -1).
2813*d415bd75Srobert   if (TrueVL != VL && !(IsNoFPExcept(True) && isAllOnesConstant(TrueVL)))
2814*d415bd75Srobert     return false;
2815*d415bd75Srobert 
2816*d415bd75Srobert   SDLoc DL(N);
2817*d415bd75Srobert   unsigned MaskedOpc = Info->MaskedPseudo;
2818*d415bd75Srobert   assert(RISCVII::hasVecPolicyOp(TII->get(MaskedOpc).TSFlags) &&
2819*d415bd75Srobert          "Expected instructions with mask have policy operand.");
2820*d415bd75Srobert   assert(RISCVII::hasMergeOp(TII->get(MaskedOpc).TSFlags) &&
2821*d415bd75Srobert          "Expected instructions with mask have merge operand.");
2822*d415bd75Srobert 
2823*d415bd75Srobert   SmallVector<SDValue, 8> Ops;
2824*d415bd75Srobert   Ops.push_back(False);
2825*d415bd75Srobert   Ops.append(True->op_begin(), True->op_begin() + TrueVLIndex);
2826*d415bd75Srobert   Ops.append({Mask, VL, /* SEW */ True.getOperand(TrueVLIndex + 1)});
2827*d415bd75Srobert   Ops.push_back(CurDAG->getTargetConstant(Policy, DL, Subtarget->getXLenVT()));
2828*d415bd75Srobert 
2829*d415bd75Srobert   // Result node should have chain operand of True.
2830*d415bd75Srobert   if (HasChainOp)
2831*d415bd75Srobert     Ops.push_back(True.getOperand(True.getNumOperands() - 1));
2832*d415bd75Srobert 
2833*d415bd75Srobert   // Result node should take over glued node of N.
2834*d415bd75Srobert   if (N->getGluedNode())
2835*d415bd75Srobert     Ops.push_back(N->getOperand(N->getNumOperands() - 1));
2836*d415bd75Srobert 
2837*d415bd75Srobert   SDNode *Result =
2838*d415bd75Srobert       CurDAG->getMachineNode(MaskedOpc, DL, True->getVTList(), Ops);
2839*d415bd75Srobert   Result->setFlags(True->getFlags());
2840*d415bd75Srobert 
2841*d415bd75Srobert   // Replace vmerge.vvm node by Result.
2842*d415bd75Srobert   ReplaceUses(SDValue(N, 0), SDValue(Result, 0));
2843*d415bd75Srobert 
2844*d415bd75Srobert   // Replace another value of True. E.g. chain and VL.
2845*d415bd75Srobert   for (unsigned Idx = 1; Idx < True->getNumValues(); ++Idx)
2846*d415bd75Srobert     ReplaceUses(True.getValue(Idx), SDValue(Result, Idx));
2847*d415bd75Srobert 
2848*d415bd75Srobert   // Try to transform Result to unmasked intrinsic.
2849*d415bd75Srobert   doPeepholeMaskedRVV(Result);
2850*d415bd75Srobert   return true;
2851*d415bd75Srobert }
2852*d415bd75Srobert 
2853*d415bd75Srobert // Transform (VMERGE_VVM_<LMUL>_TU false, false, true, allones, vl, sew) to
2854*d415bd75Srobert // (VADD_VI_<LMUL>_TU false, true, 0, vl, sew). It may decrease uses of VMSET.
performVMergeToVAdd(SDNode * N)2855*d415bd75Srobert bool RISCVDAGToDAGISel::performVMergeToVAdd(SDNode *N) {
2856*d415bd75Srobert   unsigned NewOpc;
2857*d415bd75Srobert   switch (N->getMachineOpcode()) {
2858*d415bd75Srobert   default:
2859*d415bd75Srobert     llvm_unreachable("Expected VMERGE_VVM_<LMUL>_TU instruction.");
2860*d415bd75Srobert   case RISCV::PseudoVMERGE_VVM_MF8_TU:
2861*d415bd75Srobert     NewOpc = RISCV::PseudoVADD_VI_MF8_TU;
2862*d415bd75Srobert     break;
2863*d415bd75Srobert   case RISCV::PseudoVMERGE_VVM_MF4_TU:
2864*d415bd75Srobert     NewOpc = RISCV::PseudoVADD_VI_MF4_TU;
2865*d415bd75Srobert     break;
2866*d415bd75Srobert   case RISCV::PseudoVMERGE_VVM_MF2_TU:
2867*d415bd75Srobert     NewOpc = RISCV::PseudoVADD_VI_MF2_TU;
2868*d415bd75Srobert     break;
2869*d415bd75Srobert   case RISCV::PseudoVMERGE_VVM_M1_TU:
2870*d415bd75Srobert     NewOpc = RISCV::PseudoVADD_VI_M1_TU;
2871*d415bd75Srobert     break;
2872*d415bd75Srobert   case RISCV::PseudoVMERGE_VVM_M2_TU:
2873*d415bd75Srobert     NewOpc = RISCV::PseudoVADD_VI_M2_TU;
2874*d415bd75Srobert     break;
2875*d415bd75Srobert   case RISCV::PseudoVMERGE_VVM_M4_TU:
2876*d415bd75Srobert     NewOpc = RISCV::PseudoVADD_VI_M4_TU;
2877*d415bd75Srobert     break;
2878*d415bd75Srobert   case RISCV::PseudoVMERGE_VVM_M8_TU:
2879*d415bd75Srobert     NewOpc = RISCV::PseudoVADD_VI_M8_TU;
2880*d415bd75Srobert     break;
2881*d415bd75Srobert   }
2882*d415bd75Srobert 
2883*d415bd75Srobert   if (!usesAllOnesMask(N, /* MaskOpIdx */ 3))
2884*d415bd75Srobert     return false;
2885*d415bd75Srobert 
2886*d415bd75Srobert   SDLoc DL(N);
2887*d415bd75Srobert   EVT VT = N->getValueType(0);
2888*d415bd75Srobert   SDValue Ops[] = {N->getOperand(1), N->getOperand(2),
2889*d415bd75Srobert                    CurDAG->getTargetConstant(0, DL, Subtarget->getXLenVT()),
2890*d415bd75Srobert                    N->getOperand(4), N->getOperand(5)};
2891*d415bd75Srobert   SDNode *Result = CurDAG->getMachineNode(NewOpc, DL, VT, Ops);
2892*d415bd75Srobert   ReplaceUses(N, Result);
2893*d415bd75Srobert   return true;
2894*d415bd75Srobert }
2895*d415bd75Srobert 
doPeepholeMergeVVMFold()2896*d415bd75Srobert bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
2897*d415bd75Srobert   bool MadeChange = false;
2898*d415bd75Srobert   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
289909467b48Spatrick 
290009467b48Spatrick   while (Position != CurDAG->allnodes_begin()) {
290109467b48Spatrick     SDNode *N = &*--Position;
290209467b48Spatrick     if (N->use_empty() || !N->isMachineOpcode())
290309467b48Spatrick       continue;
290409467b48Spatrick 
2905*d415bd75Srobert     auto IsVMergeTU = [](unsigned Opcode) {
2906*d415bd75Srobert       return Opcode == RISCV::PseudoVMERGE_VVM_MF8_TU ||
2907*d415bd75Srobert              Opcode == RISCV::PseudoVMERGE_VVM_MF4_TU ||
2908*d415bd75Srobert              Opcode == RISCV::PseudoVMERGE_VVM_MF2_TU ||
2909*d415bd75Srobert              Opcode == RISCV::PseudoVMERGE_VVM_M1_TU ||
2910*d415bd75Srobert              Opcode == RISCV::PseudoVMERGE_VVM_M2_TU ||
2911*d415bd75Srobert              Opcode == RISCV::PseudoVMERGE_VVM_M4_TU ||
2912*d415bd75Srobert              Opcode == RISCV::PseudoVMERGE_VVM_M8_TU;
2913*d415bd75Srobert     };
291409467b48Spatrick 
2915*d415bd75Srobert     auto IsVMergeTA = [](unsigned Opcode) {
2916*d415bd75Srobert       return Opcode == RISCV::PseudoVMERGE_VVM_MF8 ||
2917*d415bd75Srobert              Opcode == RISCV::PseudoVMERGE_VVM_MF4 ||
2918*d415bd75Srobert              Opcode == RISCV::PseudoVMERGE_VVM_MF2 ||
2919*d415bd75Srobert              Opcode == RISCV::PseudoVMERGE_VVM_M1 ||
2920*d415bd75Srobert              Opcode == RISCV::PseudoVMERGE_VVM_M2 ||
2921*d415bd75Srobert              Opcode == RISCV::PseudoVMERGE_VVM_M4 ||
2922*d415bd75Srobert              Opcode == RISCV::PseudoVMERGE_VVM_M8;
2923*d415bd75Srobert     };
2924*d415bd75Srobert 
2925*d415bd75Srobert     unsigned Opc = N->getMachineOpcode();
2926*d415bd75Srobert     // The following optimizations require that the merge operand of N is same
2927*d415bd75Srobert     // as the false operand of N.
2928*d415bd75Srobert     if ((IsVMergeTU(Opc) && N->getOperand(0) == N->getOperand(1)) ||
2929*d415bd75Srobert         IsVMergeTA(Opc))
2930*d415bd75Srobert       MadeChange |= performCombineVMergeAndVOps(N, IsVMergeTA(Opc));
2931*d415bd75Srobert     if (IsVMergeTU(Opc) && N->getOperand(0) == N->getOperand(1))
2932*d415bd75Srobert       MadeChange |= performVMergeToVAdd(N);
293309467b48Spatrick   }
2934*d415bd75Srobert   return MadeChange;
293509467b48Spatrick }
293609467b48Spatrick 
293709467b48Spatrick // This pass converts a legalized DAG into a RISCV-specific DAG, ready
293809467b48Spatrick // for instruction scheduling.
createRISCVISelDag(RISCVTargetMachine & TM,CodeGenOpt::Level OptLevel)2939*d415bd75Srobert FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM,
2940*d415bd75Srobert                                        CodeGenOpt::Level OptLevel) {
2941*d415bd75Srobert   return new RISCVDAGToDAGISel(TM, OptLevel);
294209467b48Spatrick }
2943*d415bd75Srobert 
2944*d415bd75Srobert char RISCVDAGToDAGISel::ID = 0;
2945*d415bd75Srobert 
2946*d415bd75Srobert INITIALIZE_PASS(RISCVDAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)
2947