1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
14 #include "MCTargetDesc/RISCVMCTargetDesc.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCVISelLowering.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/IR/IntrinsicsRISCV.h"
20 #include "llvm/Support/Alignment.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/KnownBits.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
25 
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 namespace llvm {
31 namespace RISCV {
32 #define GET_RISCVVSSEGTable_IMPL
33 #define GET_RISCVVLSEGTable_IMPL
34 #define GET_RISCVVLXSEGTable_IMPL
35 #define GET_RISCVVSXSEGTable_IMPL
36 #define GET_RISCVVLETable_IMPL
37 #define GET_RISCVVSETable_IMPL
38 #define GET_RISCVVLXTable_IMPL
39 #define GET_RISCVVSXTable_IMPL
40 #include "RISCVGenSearchableTables.inc"
41 } // namespace RISCV
42 } // namespace llvm
43 
PreprocessISelDAG()44 void RISCVDAGToDAGISel::PreprocessISelDAG() {
45   for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
46                                        E = CurDAG->allnodes_end();
47        I != E;) {
48     SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
49 
50     // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
51     // load. Done after lowering and combining so that we have a chance to
52     // optimize this to VMV_V_X_VL when the upper bits aren't needed.
53     if (N->getOpcode() != RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL)
54       continue;
55 
56     assert(N->getNumOperands() == 3 && "Unexpected number of operands");
57     MVT VT = N->getSimpleValueType(0);
58     SDValue Lo = N->getOperand(0);
59     SDValue Hi = N->getOperand(1);
60     SDValue VL = N->getOperand(2);
61     assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
62            Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
63            "Unexpected VTs!");
64     MachineFunction &MF = CurDAG->getMachineFunction();
65     RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
66     SDLoc DL(N);
67 
68     // We use the same frame index we use for moving two i32s into 64-bit FPR.
69     // This is an analogous operation.
70     int FI = FuncInfo->getMoveF64FrameIndex(MF);
71     MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
72     const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
73     SDValue StackSlot =
74         CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
75 
76     SDValue Chain = CurDAG->getEntryNode();
77     Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
78 
79     SDValue OffsetSlot =
80         CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
81     Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
82                           Align(8));
83 
84     Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
85 
86     SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
87     SDValue IntID =
88         CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
89     SDValue Ops[] = {Chain, IntID, StackSlot,
90                      CurDAG->getRegister(RISCV::X0, MVT::i64), VL};
91 
92     SDValue Result = CurDAG->getMemIntrinsicNode(
93         ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8),
94         MachineMemOperand::MOLoad);
95 
96     // We're about to replace all uses of the SPLAT_VECTOR_SPLIT_I64 with the
97     // vlse we created.  This will cause general havok on the dag because
98     // anything below the conversion could be folded into other existing nodes.
99     // To avoid invalidating 'I', back it up to the convert node.
100     --I;
101     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
102 
103     // Now that we did that, the node is dead.  Increment the iterator to the
104     // next node to process, then delete N.
105     ++I;
106     CurDAG->DeleteNode(N);
107   }
108 }
109 
PostprocessISelDAG()110 void RISCVDAGToDAGISel::PostprocessISelDAG() {
111   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
112 
113   bool MadeChange = false;
114   while (Position != CurDAG->allnodes_begin()) {
115     SDNode *N = &*--Position;
116     // Skip dead nodes and any non-machine opcodes.
117     if (N->use_empty() || !N->isMachineOpcode())
118       continue;
119 
120     MadeChange |= doPeepholeSExtW(N);
121     MadeChange |= doPeepholeLoadStoreADDI(N);
122   }
123 
124   if (MadeChange)
125     CurDAG->RemoveDeadNodes();
126 }
127 
selectImm(SelectionDAG * CurDAG,const SDLoc & DL,int64_t Imm,const RISCVSubtarget & Subtarget)128 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
129                          const RISCVSubtarget &Subtarget) {
130   MVT XLenVT = Subtarget.getXLenVT();
131   RISCVMatInt::InstSeq Seq =
132       RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
133 
134   SDNode *Result = nullptr;
135   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
136   for (RISCVMatInt::Inst &Inst : Seq) {
137     SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
138     if (Inst.Opc == RISCV::LUI)
139       Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
140     else if (Inst.Opc == RISCV::ADDUW)
141       Result = CurDAG->getMachineNode(RISCV::ADDUW, DL, XLenVT, SrcReg,
142                                       CurDAG->getRegister(RISCV::X0, XLenVT));
143     else
144       Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
145 
146     // Only the first instruction has X0 as its source.
147     SrcReg = SDValue(Result, 0);
148   }
149 
150   return Result;
151 }
152 
createTupleImpl(SelectionDAG & CurDAG,ArrayRef<SDValue> Regs,unsigned RegClassID,unsigned SubReg0)153 static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
154                                unsigned RegClassID, unsigned SubReg0) {
155   assert(Regs.size() >= 2 && Regs.size() <= 8);
156 
157   SDLoc DL(Regs[0]);
158   SmallVector<SDValue, 8> Ops;
159 
160   Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
161 
162   for (unsigned I = 0; I < Regs.size(); ++I) {
163     Ops.push_back(Regs[I]);
164     Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
165   }
166   SDNode *N =
167       CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
168   return SDValue(N, 0);
169 }
170 
createM1Tuple(SelectionDAG & CurDAG,ArrayRef<SDValue> Regs,unsigned NF)171 static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
172                              unsigned NF) {
173   static const unsigned RegClassIDs[] = {
174       RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
175       RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
176       RISCV::VRN8M1RegClassID};
177 
178   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
179 }
180 
createM2Tuple(SelectionDAG & CurDAG,ArrayRef<SDValue> Regs,unsigned NF)181 static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
182                              unsigned NF) {
183   static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
184                                          RISCV::VRN3M2RegClassID,
185                                          RISCV::VRN4M2RegClassID};
186 
187   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
188 }
189 
createM4Tuple(SelectionDAG & CurDAG,ArrayRef<SDValue> Regs,unsigned NF)190 static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
191                              unsigned NF) {
192   return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
193                          RISCV::sub_vrm4_0);
194 }
195 
createTuple(SelectionDAG & CurDAG,ArrayRef<SDValue> Regs,unsigned NF,RISCVII::VLMUL LMUL)196 static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
197                            unsigned NF, RISCVII::VLMUL LMUL) {
198   switch (LMUL) {
199   default:
200     llvm_unreachable("Invalid LMUL.");
201   case RISCVII::VLMUL::LMUL_F8:
202   case RISCVII::VLMUL::LMUL_F4:
203   case RISCVII::VLMUL::LMUL_F2:
204   case RISCVII::VLMUL::LMUL_1:
205     return createM1Tuple(CurDAG, Regs, NF);
206   case RISCVII::VLMUL::LMUL_2:
207     return createM2Tuple(CurDAG, Regs, NF);
208   case RISCVII::VLMUL::LMUL_4:
209     return createM4Tuple(CurDAG, Regs, NF);
210   }
211 }
212 
addVectorLoadStoreOperands(SDNode * Node,unsigned Log2SEW,const SDLoc & DL,unsigned CurOp,bool IsMasked,bool IsStridedOrIndexed,SmallVectorImpl<SDValue> & Operands,bool IsLoad,MVT * IndexVT)213 void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
214     SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
215     bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
216     bool IsLoad, MVT *IndexVT) {
217   SDValue Chain = Node->getOperand(0);
218   SDValue Glue;
219 
220   SDValue Base;
221   SelectBaseAddr(Node->getOperand(CurOp++), Base);
222   Operands.push_back(Base); // Base pointer.
223 
224   if (IsStridedOrIndexed) {
225     Operands.push_back(Node->getOperand(CurOp++)); // Index.
226     if (IndexVT)
227       *IndexVT = Operands.back()->getSimpleValueType(0);
228   }
229 
230   if (IsMasked) {
231     // Mask needs to be copied to V0.
232     SDValue Mask = Node->getOperand(CurOp++);
233     Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
234     Glue = Chain.getValue(1);
235     Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
236   }
237   SDValue VL;
238   selectVLOp(Node->getOperand(CurOp++), VL);
239   Operands.push_back(VL);
240 
241   MVT XLenVT = Subtarget->getXLenVT();
242   SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
243   Operands.push_back(SEWOp);
244 
245   // Masked load has the tail policy argument.
246   if (IsMasked && IsLoad) {
247     // Policy must be a constant.
248     uint64_t Policy = Node->getConstantOperandVal(CurOp++);
249     SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
250     Operands.push_back(PolicyOp);
251   }
252 
253   Operands.push_back(Chain); // Chain.
254   if (Glue)
255     Operands.push_back(Glue);
256 }
257 
selectVLSEG(SDNode * Node,bool IsMasked,bool IsStrided)258 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
259                                     bool IsStrided) {
260   SDLoc DL(Node);
261   unsigned NF = Node->getNumValues() - 1;
262   MVT VT = Node->getSimpleValueType(0);
263   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
264   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
265 
266   unsigned CurOp = 2;
267   SmallVector<SDValue, 8> Operands;
268   if (IsMasked) {
269     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
270                                  Node->op_begin() + CurOp + NF);
271     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
272     Operands.push_back(MaskedOff);
273     CurOp += NF;
274   }
275 
276   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
277                              Operands, /*IsLoad=*/true);
278 
279   const RISCV::VLSEGPseudo *P =
280       RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, Log2SEW,
281                             static_cast<unsigned>(LMUL));
282   MachineSDNode *Load =
283       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
284 
285   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
286     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
287 
288   SDValue SuperReg = SDValue(Load, 0);
289   for (unsigned I = 0; I < NF; ++I) {
290     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
291     ReplaceUses(SDValue(Node, I),
292                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
293   }
294 
295   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
296   CurDAG->RemoveDeadNode(Node);
297 }
298 
selectVLSEGFF(SDNode * Node,bool IsMasked)299 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
300   SDLoc DL(Node);
301   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
302   MVT VT = Node->getSimpleValueType(0);
303   MVT XLenVT = Subtarget->getXLenVT();
304   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
305   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
306 
307   unsigned CurOp = 2;
308   SmallVector<SDValue, 7> Operands;
309   if (IsMasked) {
310     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
311                                  Node->op_begin() + CurOp + NF);
312     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
313     Operands.push_back(MaskedOff);
314     CurOp += NF;
315   }
316 
317   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
318                              /*IsStridedOrIndexed*/ false, Operands,
319                              /*IsLoad=*/true);
320 
321   const RISCV::VLSEGPseudo *P =
322       RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
323                             Log2SEW, static_cast<unsigned>(LMUL));
324   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
325                                                MVT::Other, MVT::Glue, Operands);
326   SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
327                                           /*Glue*/ SDValue(Load, 2));
328 
329   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
330     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
331 
332   SDValue SuperReg = SDValue(Load, 0);
333   for (unsigned I = 0; I < NF; ++I) {
334     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
335     ReplaceUses(SDValue(Node, I),
336                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
337   }
338 
339   ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0));   // VL
340   ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
341   CurDAG->RemoveDeadNode(Node);
342 }
343 
selectVLXSEG(SDNode * Node,bool IsMasked,bool IsOrdered)344 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
345                                      bool IsOrdered) {
346   SDLoc DL(Node);
347   unsigned NF = Node->getNumValues() - 1;
348   MVT VT = Node->getSimpleValueType(0);
349   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
350   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
351 
352   unsigned CurOp = 2;
353   SmallVector<SDValue, 8> Operands;
354   if (IsMasked) {
355     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
356                                  Node->op_begin() + CurOp + NF);
357     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
358     Operands.push_back(MaskedOff);
359     CurOp += NF;
360   }
361 
362   MVT IndexVT;
363   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
364                              /*IsStridedOrIndexed*/ true, Operands,
365                              /*IsLoad=*/true, &IndexVT);
366 
367   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
368          "Element count mismatch");
369 
370   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
371   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
372   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
373       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
374       static_cast<unsigned>(IndexLMUL));
375   MachineSDNode *Load =
376       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
377 
378   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
379     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
380 
381   SDValue SuperReg = SDValue(Load, 0);
382   for (unsigned I = 0; I < NF; ++I) {
383     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
384     ReplaceUses(SDValue(Node, I),
385                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
386   }
387 
388   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
389   CurDAG->RemoveDeadNode(Node);
390 }
391 
selectVSSEG(SDNode * Node,bool IsMasked,bool IsStrided)392 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
393                                     bool IsStrided) {
394   SDLoc DL(Node);
395   unsigned NF = Node->getNumOperands() - 4;
396   if (IsStrided)
397     NF--;
398   if (IsMasked)
399     NF--;
400   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
401   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
402   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
403   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
404   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
405 
406   SmallVector<SDValue, 8> Operands;
407   Operands.push_back(StoreVal);
408   unsigned CurOp = 2 + NF;
409 
410   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
411                              Operands);
412 
413   const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
414       NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
415   MachineSDNode *Store =
416       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
417 
418   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
419     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
420 
421   ReplaceNode(Node, Store);
422 }
423 
selectVSXSEG(SDNode * Node,bool IsMasked,bool IsOrdered)424 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
425                                      bool IsOrdered) {
426   SDLoc DL(Node);
427   unsigned NF = Node->getNumOperands() - 5;
428   if (IsMasked)
429     --NF;
430   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
431   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
432   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
433   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
434   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
435 
436   SmallVector<SDValue, 8> Operands;
437   Operands.push_back(StoreVal);
438   unsigned CurOp = 2 + NF;
439 
440   MVT IndexVT;
441   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
442                              /*IsStridedOrIndexed*/ true, Operands,
443                              /*IsLoad=*/false, &IndexVT);
444 
445   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
446          "Element count mismatch");
447 
448   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
449   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
450   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
451       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
452       static_cast<unsigned>(IndexLMUL));
453   MachineSDNode *Store =
454       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
455 
456   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
457     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
458 
459   ReplaceNode(Node, Store);
460 }
461 
462 
Select(SDNode * Node)463 void RISCVDAGToDAGISel::Select(SDNode *Node) {
464   // If we have a custom node, we have already selected.
465   if (Node->isMachineOpcode()) {
466     LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
467     Node->setNodeId(-1);
468     return;
469   }
470 
471   // Instruction Selection not handled by the auto-generated tablegen selection
472   // should be handled here.
473   unsigned Opcode = Node->getOpcode();
474   MVT XLenVT = Subtarget->getXLenVT();
475   SDLoc DL(Node);
476   MVT VT = Node->getSimpleValueType(0);
477 
478   switch (Opcode) {
479   case ISD::Constant: {
480     auto *ConstNode = cast<ConstantSDNode>(Node);
481     if (VT == XLenVT && ConstNode->isZero()) {
482       SDValue New =
483           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
484       ReplaceNode(Node, New.getNode());
485       return;
486     }
487     int64_t Imm = ConstNode->getSExtValue();
488     // If the upper XLen-16 bits are not used, try to convert this to a simm12
489     // by sign extending bit 15.
490     if (isUInt<16>(Imm) && isInt<12>(SignExtend64(Imm, 16)) &&
491         hasAllHUsers(Node))
492       Imm = SignExtend64(Imm, 16);
493     // If the upper 32-bits are not used try to convert this into a simm32 by
494     // sign extending bit 32.
495     if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
496       Imm = SignExtend64(Imm, 32);
497 
498     ReplaceNode(Node, selectImm(CurDAG, DL, Imm, *Subtarget));
499     return;
500   }
501   case ISD::FrameIndex: {
502     SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
503     int FI = cast<FrameIndexSDNode>(Node)->getIndex();
504     SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
505     ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
506     return;
507   }
508   case ISD::SRL: {
509     // We don't need this transform if zext.h is supported.
510     if (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())
511       break;
512     // Optimize (srl (and X, 0xffff), C) ->
513     //          (srli (slli X, (XLen-16), (XLen-16) + C)
514     // Taking into account that the 0xffff may have had lower bits unset by
515     // SimplifyDemandedBits. This avoids materializing the 0xffff immediate.
516     // This pattern occurs when type legalizing i16 right shifts.
517     // FIXME: This could be extended to other AND masks.
518     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
519     if (N1C) {
520       uint64_t ShAmt = N1C->getZExtValue();
521       SDValue N0 = Node->getOperand(0);
522       if (ShAmt < 16 && N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
523           isa<ConstantSDNode>(N0.getOperand(1))) {
524         uint64_t Mask = N0.getConstantOperandVal(1);
525         Mask |= maskTrailingOnes<uint64_t>(ShAmt);
526         if (Mask == 0xffff) {
527           unsigned LShAmt = Subtarget->getXLen() - 16;
528           SDNode *SLLI =
529               CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
530                                      CurDAG->getTargetConstant(LShAmt, DL, VT));
531           SDNode *SRLI = CurDAG->getMachineNode(
532               RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
533               CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
534           ReplaceNode(Node, SRLI);
535           return;
536         }
537       }
538     }
539 
540     break;
541   }
542   case ISD::AND: {
543     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
544     if (!N1C)
545       break;
546 
547     SDValue N0 = Node->getOperand(0);
548 
549     bool LeftShift = N0.getOpcode() == ISD::SHL;
550     if (!LeftShift && N0.getOpcode() != ISD::SRL)
551       break;
552 
553     auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
554     if (!C)
555       break;
556     uint64_t C2 = C->getZExtValue();
557     unsigned XLen = Subtarget->getXLen();
558     if (!C2 || C2 >= XLen)
559       break;
560 
561     uint64_t C1 = N1C->getZExtValue();
562 
563     // Keep track of whether this is a andi, zext.h, or zext.w.
564     bool ZExtOrANDI = isInt<12>(N1C->getSExtValue());
565     if (C1 == UINT64_C(0xFFFF) &&
566         (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp()))
567       ZExtOrANDI = true;
568     if (C1 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba())
569       ZExtOrANDI = true;
570 
571     // Clear irrelevant bits in the mask.
572     if (LeftShift)
573       C1 &= maskTrailingZeros<uint64_t>(C2);
574     else
575       C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
576 
577     // Some transforms should only be done if the shift has a single use or
578     // the AND would become (srli (slli X, 32), 32)
579     bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
580 
581     SDValue X = N0.getOperand(0);
582 
583     // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
584     // with c3 leading zeros.
585     if (!LeftShift && isMask_64(C1)) {
586       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
587       if (C2 < C3) {
588         // If the number of leading zeros is C2+32 this can be SRLIW.
589         if (C2 + 32 == C3) {
590           SDNode *SRLIW =
591               CurDAG->getMachineNode(RISCV::SRLIW, DL, XLenVT, X,
592                                      CurDAG->getTargetConstant(C2, DL, XLenVT));
593           ReplaceNode(Node, SRLIW);
594           return;
595         }
596 
597         // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
598         // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
599         //
600         // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
601         // legalized and goes through DAG combine.
602         SDValue Y;
603         if (C2 >= 32 && (C3 - C2) == 1 && N0.hasOneUse() &&
604             selectSExti32(X, Y)) {
605           SDNode *SRAIW =
606               CurDAG->getMachineNode(RISCV::SRAIW, DL, XLenVT, Y,
607                                      CurDAG->getTargetConstant(31, DL, XLenVT));
608           SDNode *SRLIW = CurDAG->getMachineNode(
609               RISCV::SRLIW, DL, XLenVT, SDValue(SRAIW, 0),
610               CurDAG->getTargetConstant(C3 - 32, DL, XLenVT));
611           ReplaceNode(Node, SRLIW);
612           return;
613         }
614 
615         // (srli (slli x, c3-c2), c3).
616         if (OneUseOrZExtW && !ZExtOrANDI) {
617           SDNode *SLLI = CurDAG->getMachineNode(
618               RISCV::SLLI, DL, XLenVT, X,
619               CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
620           SDNode *SRLI =
621               CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
622                                      CurDAG->getTargetConstant(C3, DL, XLenVT));
623           ReplaceNode(Node, SRLI);
624           return;
625         }
626       }
627     }
628 
629     // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
630     // shifted by c2 bits with c3 leading zeros.
631     if (LeftShift && isShiftedMask_64(C1)) {
632       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
633 
634       if (C2 + C3 < XLen &&
635           C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + C3)) << C2)) {
636         // Use slli.uw when possible.
637         if ((XLen - (C2 + C3)) == 32 && Subtarget->hasStdExtZba()) {
638           SDNode *SLLIUW =
639               CurDAG->getMachineNode(RISCV::SLLIUW, DL, XLenVT, X,
640                                      CurDAG->getTargetConstant(C2, DL, XLenVT));
641           ReplaceNode(Node, SLLIUW);
642           return;
643         }
644 
645         // (srli (slli c2+c3), c3)
646         if (OneUseOrZExtW && !ZExtOrANDI) {
647           SDNode *SLLI = CurDAG->getMachineNode(
648               RISCV::SLLI, DL, XLenVT, X,
649               CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
650           SDNode *SRLI =
651               CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
652                                      CurDAG->getTargetConstant(C3, DL, XLenVT));
653           ReplaceNode(Node, SRLI);
654           return;
655         }
656       }
657     }
658 
659     // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
660     // shifted mask with c2 leading zeros and c3 trailing zeros.
661     if (!LeftShift && isShiftedMask_64(C1)) {
662       uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
663       uint64_t C3 = countTrailingZeros(C1);
664       if (Leading == C2 && C2 + C3 < XLen && OneUseOrZExtW && !ZExtOrANDI) {
665         SDNode *SRLI = CurDAG->getMachineNode(
666             RISCV::SRLI, DL, XLenVT, X,
667             CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
668         SDNode *SLLI =
669             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
670                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
671         ReplaceNode(Node, SLLI);
672         return;
673       }
674       // If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
675       if (Leading > 32 && (Leading - 32) == C2 && C2 + C3 < 32 &&
676           OneUseOrZExtW && !ZExtOrANDI) {
677         SDNode *SRLIW = CurDAG->getMachineNode(
678             RISCV::SRLIW, DL, XLenVT, X,
679             CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
680         SDNode *SLLI =
681             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
682                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
683         ReplaceNode(Node, SLLI);
684         return;
685       }
686     }
687 
688     // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
689     // shifted mask with no leading zeros and c3 trailing zeros.
690     if (LeftShift && isShiftedMask_64(C1)) {
691       uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
692       uint64_t C3 = countTrailingZeros(C1);
693       if (Leading == 0 && C2 < C3 && OneUseOrZExtW && !ZExtOrANDI) {
694         SDNode *SRLI = CurDAG->getMachineNode(
695             RISCV::SRLI, DL, XLenVT, X,
696             CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
697         SDNode *SLLI =
698             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
699                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
700         ReplaceNode(Node, SLLI);
701         return;
702       }
703       // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
704       if (C2 < C3 && Leading + C2 == 32 && OneUseOrZExtW && !ZExtOrANDI) {
705         SDNode *SRLIW = CurDAG->getMachineNode(
706             RISCV::SRLIW, DL, XLenVT, X,
707             CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
708         SDNode *SLLI =
709             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
710                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
711         ReplaceNode(Node, SLLI);
712         return;
713       }
714     }
715 
716     break;
717   }
718   case ISD::INTRINSIC_WO_CHAIN: {
719     unsigned IntNo = Node->getConstantOperandVal(0);
720     switch (IntNo) {
721       // By default we do not custom select any intrinsic.
722     default:
723       break;
724     case Intrinsic::riscv_vmsgeu:
725     case Intrinsic::riscv_vmsge: {
726       SDValue Src1 = Node->getOperand(1);
727       SDValue Src2 = Node->getOperand(2);
728       // Only custom select scalar second operand.
729       if (Src2.getValueType() != XLenVT)
730         break;
731       // Small constants are handled with patterns.
732       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
733         int64_t CVal = C->getSExtValue();
734         if (CVal >= -15 && CVal <= 16)
735           break;
736       }
737       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
738       MVT Src1VT = Src1.getSimpleValueType();
739       unsigned VMSLTOpcode, VMNANDOpcode;
740       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
741       default:
742         llvm_unreachable("Unexpected LMUL!");
743       case RISCVII::VLMUL::LMUL_F8:
744         VMSLTOpcode =
745             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
746         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8;
747         break;
748       case RISCVII::VLMUL::LMUL_F4:
749         VMSLTOpcode =
750             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
751         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4;
752         break;
753       case RISCVII::VLMUL::LMUL_F2:
754         VMSLTOpcode =
755             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
756         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2;
757         break;
758       case RISCVII::VLMUL::LMUL_1:
759         VMSLTOpcode =
760             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
761         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1;
762         break;
763       case RISCVII::VLMUL::LMUL_2:
764         VMSLTOpcode =
765             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
766         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2;
767         break;
768       case RISCVII::VLMUL::LMUL_4:
769         VMSLTOpcode =
770             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
771         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4;
772         break;
773       case RISCVII::VLMUL::LMUL_8:
774         VMSLTOpcode =
775             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
776         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
777         break;
778       }
779       SDValue SEW = CurDAG->getTargetConstant(
780           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
781       SDValue VL;
782       selectVLOp(Node->getOperand(3), VL);
783 
784       // Expand to
785       // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
786       SDValue Cmp = SDValue(
787           CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
788           0);
789       ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
790                                                {Cmp, Cmp, VL, SEW}));
791       return;
792     }
793     case Intrinsic::riscv_vmsgeu_mask:
794     case Intrinsic::riscv_vmsge_mask: {
795       SDValue Src1 = Node->getOperand(2);
796       SDValue Src2 = Node->getOperand(3);
797       // Only custom select scalar second operand.
798       if (Src2.getValueType() != XLenVT)
799         break;
800       // Small constants are handled with patterns.
801       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
802         int64_t CVal = C->getSExtValue();
803         if (CVal >= -15 && CVal <= 16)
804           break;
805       }
806       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
807       MVT Src1VT = Src1.getSimpleValueType();
808       unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode;
809       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
810       default:
811         llvm_unreachable("Unexpected LMUL!");
812       case RISCVII::VLMUL::LMUL_F8:
813         VMSLTOpcode =
814             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
815         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
816                                      : RISCV::PseudoVMSLT_VX_MF8_MASK;
817         break;
818       case RISCVII::VLMUL::LMUL_F4:
819         VMSLTOpcode =
820             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
821         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
822                                      : RISCV::PseudoVMSLT_VX_MF4_MASK;
823         break;
824       case RISCVII::VLMUL::LMUL_F2:
825         VMSLTOpcode =
826             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
827         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
828                                      : RISCV::PseudoVMSLT_VX_MF2_MASK;
829         break;
830       case RISCVII::VLMUL::LMUL_1:
831         VMSLTOpcode =
832             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
833         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
834                                      : RISCV::PseudoVMSLT_VX_M1_MASK;
835         break;
836       case RISCVII::VLMUL::LMUL_2:
837         VMSLTOpcode =
838             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
839         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
840                                      : RISCV::PseudoVMSLT_VX_M2_MASK;
841         break;
842       case RISCVII::VLMUL::LMUL_4:
843         VMSLTOpcode =
844             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
845         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
846                                      : RISCV::PseudoVMSLT_VX_M4_MASK;
847         break;
848       case RISCVII::VLMUL::LMUL_8:
849         VMSLTOpcode =
850             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
851         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
852                                      : RISCV::PseudoVMSLT_VX_M8_MASK;
853         break;
854       }
855       // Mask operations use the LMUL from the mask type.
856       switch (RISCVTargetLowering::getLMUL(VT)) {
857       default:
858         llvm_unreachable("Unexpected LMUL!");
859       case RISCVII::VLMUL::LMUL_F8:
860         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
861         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
862         break;
863       case RISCVII::VLMUL::LMUL_F4:
864         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
865         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
866         break;
867       case RISCVII::VLMUL::LMUL_F2:
868         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
869         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
870         break;
871       case RISCVII::VLMUL::LMUL_1:
872         VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
873         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
874         break;
875       case RISCVII::VLMUL::LMUL_2:
876         VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
877         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
878         break;
879       case RISCVII::VLMUL::LMUL_4:
880         VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
881         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
882         break;
883       case RISCVII::VLMUL::LMUL_8:
884         VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
885         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
886         break;
887       }
888       SDValue SEW = CurDAG->getTargetConstant(
889           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
890       SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
891       SDValue VL;
892       selectVLOp(Node->getOperand(5), VL);
893       SDValue MaskedOff = Node->getOperand(1);
894       SDValue Mask = Node->getOperand(4);
895       // If the MaskedOff value and the Mask are the same value use
896       // vmslt{u}.vx vt, va, x;  vmandnot.mm vd, vd, vt
897       // This avoids needing to copy v0 to vd before starting the next sequence.
898       if (Mask == MaskedOff) {
899         SDValue Cmp = SDValue(
900             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
901             0);
902         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT,
903                                                  {Mask, Cmp, VL, MaskSEW}));
904         return;
905       }
906 
907       // Mask needs to be copied to V0.
908       SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
909                                            RISCV::V0, Mask, SDValue());
910       SDValue Glue = Chain.getValue(1);
911       SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
912 
913       // Otherwise use
914       // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
915       SDValue Cmp = SDValue(
916           CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
917                                  {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
918           0);
919       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
920                                                {Cmp, Mask, VL, MaskSEW}));
921       return;
922     }
923     }
924     break;
925   }
926   case ISD::INTRINSIC_W_CHAIN: {
927     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
928     switch (IntNo) {
929       // By default we do not custom select any intrinsic.
930     default:
931       break;
932 
933     case Intrinsic::riscv_vsetvli:
934     case Intrinsic::riscv_vsetvlimax: {
935       if (!Subtarget->hasStdExtV())
936         break;
937 
938       bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
939       unsigned Offset = VLMax ? 2 : 3;
940 
941       assert(Node->getNumOperands() == Offset + 2 &&
942              "Unexpected number of operands");
943 
944       unsigned SEW =
945           RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
946       RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
947           Node->getConstantOperandVal(Offset + 1) & 0x7);
948 
949       unsigned VTypeI = RISCVVType::encodeVTYPE(
950           VLMul, SEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false);
951       SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
952 
953       SDValue VLOperand;
954       unsigned Opcode = RISCV::PseudoVSETVLI;
955       if (VLMax) {
956         VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
957         Opcode = RISCV::PseudoVSETVLIX0;
958       } else {
959         VLOperand = Node->getOperand(2);
960 
961         if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
962           uint64_t AVL = C->getZExtValue();
963           if (isUInt<5>(AVL)) {
964             SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
965             ReplaceNode(
966                 Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, XLenVT,
967                                              MVT::Other, VLImm, VTypeIOp,
968                                              /* Chain */ Node->getOperand(0)));
969             return;
970           }
971         }
972       }
973 
974       ReplaceNode(Node,
975                   CurDAG->getMachineNode(Opcode, DL, XLenVT,
976                                          MVT::Other, VLOperand, VTypeIOp,
977                                          /* Chain */ Node->getOperand(0)));
978       return;
979     }
980     case Intrinsic::riscv_vlseg2:
981     case Intrinsic::riscv_vlseg3:
982     case Intrinsic::riscv_vlseg4:
983     case Intrinsic::riscv_vlseg5:
984     case Intrinsic::riscv_vlseg6:
985     case Intrinsic::riscv_vlseg7:
986     case Intrinsic::riscv_vlseg8: {
987       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
988       return;
989     }
990     case Intrinsic::riscv_vlseg2_mask:
991     case Intrinsic::riscv_vlseg3_mask:
992     case Intrinsic::riscv_vlseg4_mask:
993     case Intrinsic::riscv_vlseg5_mask:
994     case Intrinsic::riscv_vlseg6_mask:
995     case Intrinsic::riscv_vlseg7_mask:
996     case Intrinsic::riscv_vlseg8_mask: {
997       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
998       return;
999     }
1000     case Intrinsic::riscv_vlsseg2:
1001     case Intrinsic::riscv_vlsseg3:
1002     case Intrinsic::riscv_vlsseg4:
1003     case Intrinsic::riscv_vlsseg5:
1004     case Intrinsic::riscv_vlsseg6:
1005     case Intrinsic::riscv_vlsseg7:
1006     case Intrinsic::riscv_vlsseg8: {
1007       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1008       return;
1009     }
1010     case Intrinsic::riscv_vlsseg2_mask:
1011     case Intrinsic::riscv_vlsseg3_mask:
1012     case Intrinsic::riscv_vlsseg4_mask:
1013     case Intrinsic::riscv_vlsseg5_mask:
1014     case Intrinsic::riscv_vlsseg6_mask:
1015     case Intrinsic::riscv_vlsseg7_mask:
1016     case Intrinsic::riscv_vlsseg8_mask: {
1017       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1018       return;
1019     }
1020     case Intrinsic::riscv_vloxseg2:
1021     case Intrinsic::riscv_vloxseg3:
1022     case Intrinsic::riscv_vloxseg4:
1023     case Intrinsic::riscv_vloxseg5:
1024     case Intrinsic::riscv_vloxseg6:
1025     case Intrinsic::riscv_vloxseg7:
1026     case Intrinsic::riscv_vloxseg8:
1027       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1028       return;
1029     case Intrinsic::riscv_vluxseg2:
1030     case Intrinsic::riscv_vluxseg3:
1031     case Intrinsic::riscv_vluxseg4:
1032     case Intrinsic::riscv_vluxseg5:
1033     case Intrinsic::riscv_vluxseg6:
1034     case Intrinsic::riscv_vluxseg7:
1035     case Intrinsic::riscv_vluxseg8:
1036       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1037       return;
1038     case Intrinsic::riscv_vloxseg2_mask:
1039     case Intrinsic::riscv_vloxseg3_mask:
1040     case Intrinsic::riscv_vloxseg4_mask:
1041     case Intrinsic::riscv_vloxseg5_mask:
1042     case Intrinsic::riscv_vloxseg6_mask:
1043     case Intrinsic::riscv_vloxseg7_mask:
1044     case Intrinsic::riscv_vloxseg8_mask:
1045       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1046       return;
1047     case Intrinsic::riscv_vluxseg2_mask:
1048     case Intrinsic::riscv_vluxseg3_mask:
1049     case Intrinsic::riscv_vluxseg4_mask:
1050     case Intrinsic::riscv_vluxseg5_mask:
1051     case Intrinsic::riscv_vluxseg6_mask:
1052     case Intrinsic::riscv_vluxseg7_mask:
1053     case Intrinsic::riscv_vluxseg8_mask:
1054       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1055       return;
1056     case Intrinsic::riscv_vlseg8ff:
1057     case Intrinsic::riscv_vlseg7ff:
1058     case Intrinsic::riscv_vlseg6ff:
1059     case Intrinsic::riscv_vlseg5ff:
1060     case Intrinsic::riscv_vlseg4ff:
1061     case Intrinsic::riscv_vlseg3ff:
1062     case Intrinsic::riscv_vlseg2ff: {
1063       selectVLSEGFF(Node, /*IsMasked*/ false);
1064       return;
1065     }
1066     case Intrinsic::riscv_vlseg8ff_mask:
1067     case Intrinsic::riscv_vlseg7ff_mask:
1068     case Intrinsic::riscv_vlseg6ff_mask:
1069     case Intrinsic::riscv_vlseg5ff_mask:
1070     case Intrinsic::riscv_vlseg4ff_mask:
1071     case Intrinsic::riscv_vlseg3ff_mask:
1072     case Intrinsic::riscv_vlseg2ff_mask: {
1073       selectVLSEGFF(Node, /*IsMasked*/ true);
1074       return;
1075     }
1076     case Intrinsic::riscv_vloxei:
1077     case Intrinsic::riscv_vloxei_mask:
1078     case Intrinsic::riscv_vluxei:
1079     case Intrinsic::riscv_vluxei_mask: {
1080       bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1081                       IntNo == Intrinsic::riscv_vluxei_mask;
1082       bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1083                        IntNo == Intrinsic::riscv_vloxei_mask;
1084 
1085       MVT VT = Node->getSimpleValueType(0);
1086       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1087 
1088       unsigned CurOp = 2;
1089       SmallVector<SDValue, 8> Operands;
1090       if (IsMasked)
1091         Operands.push_back(Node->getOperand(CurOp++));
1092 
1093       MVT IndexVT;
1094       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1095                                  /*IsStridedOrIndexed*/ true, Operands,
1096                                  /*IsLoad=*/true, &IndexVT);
1097 
1098       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1099              "Element count mismatch");
1100 
1101       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1102       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1103       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1104       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
1105           IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1106           static_cast<unsigned>(IndexLMUL));
1107       MachineSDNode *Load =
1108           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1109 
1110       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1111         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1112 
1113       ReplaceNode(Node, Load);
1114       return;
1115     }
1116     case Intrinsic::riscv_vlm:
1117     case Intrinsic::riscv_vle:
1118     case Intrinsic::riscv_vle_mask:
1119     case Intrinsic::riscv_vlse:
1120     case Intrinsic::riscv_vlse_mask: {
1121       bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1122                       IntNo == Intrinsic::riscv_vlse_mask;
1123       bool IsStrided =
1124           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1125 
1126       MVT VT = Node->getSimpleValueType(0);
1127       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1128 
1129       unsigned CurOp = 2;
1130       SmallVector<SDValue, 8> Operands;
1131       if (IsMasked)
1132         Operands.push_back(Node->getOperand(CurOp++));
1133 
1134       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1135                                  Operands, /*IsLoad=*/true);
1136 
1137       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1138       const RISCV::VLEPseudo *P =
1139           RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
1140                               static_cast<unsigned>(LMUL));
1141       MachineSDNode *Load =
1142           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1143 
1144       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1145         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1146 
1147       ReplaceNode(Node, Load);
1148       return;
1149     }
1150     case Intrinsic::riscv_vleff:
1151     case Intrinsic::riscv_vleff_mask: {
1152       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1153 
1154       MVT VT = Node->getSimpleValueType(0);
1155       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1156 
1157       unsigned CurOp = 2;
1158       SmallVector<SDValue, 7> Operands;
1159       if (IsMasked)
1160         Operands.push_back(Node->getOperand(CurOp++));
1161 
1162       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1163                                  /*IsStridedOrIndexed*/ false, Operands,
1164                                  /*IsLoad=*/true);
1165 
1166       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1167       const RISCV::VLEPseudo *P =
1168           RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, Log2SEW,
1169                               static_cast<unsigned>(LMUL));
1170       MachineSDNode *Load =
1171           CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
1172                                  MVT::Other, MVT::Glue, Operands);
1173       SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
1174                                               /*Glue*/ SDValue(Load, 2));
1175 
1176       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1177         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1178 
1179       ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
1180       ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
1181       ReplaceUses(SDValue(Node, 2), SDValue(Load, 1));   // Chain
1182       CurDAG->RemoveDeadNode(Node);
1183       return;
1184     }
1185     }
1186     break;
1187   }
1188   case ISD::INTRINSIC_VOID: {
1189     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1190     switch (IntNo) {
1191     case Intrinsic::riscv_vsseg2:
1192     case Intrinsic::riscv_vsseg3:
1193     case Intrinsic::riscv_vsseg4:
1194     case Intrinsic::riscv_vsseg5:
1195     case Intrinsic::riscv_vsseg6:
1196     case Intrinsic::riscv_vsseg7:
1197     case Intrinsic::riscv_vsseg8: {
1198       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1199       return;
1200     }
1201     case Intrinsic::riscv_vsseg2_mask:
1202     case Intrinsic::riscv_vsseg3_mask:
1203     case Intrinsic::riscv_vsseg4_mask:
1204     case Intrinsic::riscv_vsseg5_mask:
1205     case Intrinsic::riscv_vsseg6_mask:
1206     case Intrinsic::riscv_vsseg7_mask:
1207     case Intrinsic::riscv_vsseg8_mask: {
1208       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1209       return;
1210     }
1211     case Intrinsic::riscv_vssseg2:
1212     case Intrinsic::riscv_vssseg3:
1213     case Intrinsic::riscv_vssseg4:
1214     case Intrinsic::riscv_vssseg5:
1215     case Intrinsic::riscv_vssseg6:
1216     case Intrinsic::riscv_vssseg7:
1217     case Intrinsic::riscv_vssseg8: {
1218       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1219       return;
1220     }
1221     case Intrinsic::riscv_vssseg2_mask:
1222     case Intrinsic::riscv_vssseg3_mask:
1223     case Intrinsic::riscv_vssseg4_mask:
1224     case Intrinsic::riscv_vssseg5_mask:
1225     case Intrinsic::riscv_vssseg6_mask:
1226     case Intrinsic::riscv_vssseg7_mask:
1227     case Intrinsic::riscv_vssseg8_mask: {
1228       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1229       return;
1230     }
1231     case Intrinsic::riscv_vsoxseg2:
1232     case Intrinsic::riscv_vsoxseg3:
1233     case Intrinsic::riscv_vsoxseg4:
1234     case Intrinsic::riscv_vsoxseg5:
1235     case Intrinsic::riscv_vsoxseg6:
1236     case Intrinsic::riscv_vsoxseg7:
1237     case Intrinsic::riscv_vsoxseg8:
1238       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1239       return;
1240     case Intrinsic::riscv_vsuxseg2:
1241     case Intrinsic::riscv_vsuxseg3:
1242     case Intrinsic::riscv_vsuxseg4:
1243     case Intrinsic::riscv_vsuxseg5:
1244     case Intrinsic::riscv_vsuxseg6:
1245     case Intrinsic::riscv_vsuxseg7:
1246     case Intrinsic::riscv_vsuxseg8:
1247       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1248       return;
1249     case Intrinsic::riscv_vsoxseg2_mask:
1250     case Intrinsic::riscv_vsoxseg3_mask:
1251     case Intrinsic::riscv_vsoxseg4_mask:
1252     case Intrinsic::riscv_vsoxseg5_mask:
1253     case Intrinsic::riscv_vsoxseg6_mask:
1254     case Intrinsic::riscv_vsoxseg7_mask:
1255     case Intrinsic::riscv_vsoxseg8_mask:
1256       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1257       return;
1258     case Intrinsic::riscv_vsuxseg2_mask:
1259     case Intrinsic::riscv_vsuxseg3_mask:
1260     case Intrinsic::riscv_vsuxseg4_mask:
1261     case Intrinsic::riscv_vsuxseg5_mask:
1262     case Intrinsic::riscv_vsuxseg6_mask:
1263     case Intrinsic::riscv_vsuxseg7_mask:
1264     case Intrinsic::riscv_vsuxseg8_mask:
1265       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1266       return;
1267     case Intrinsic::riscv_vsoxei:
1268     case Intrinsic::riscv_vsoxei_mask:
1269     case Intrinsic::riscv_vsuxei:
1270     case Intrinsic::riscv_vsuxei_mask: {
1271       bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1272                       IntNo == Intrinsic::riscv_vsuxei_mask;
1273       bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1274                        IntNo == Intrinsic::riscv_vsoxei_mask;
1275 
1276       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1277       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1278 
1279       unsigned CurOp = 2;
1280       SmallVector<SDValue, 8> Operands;
1281       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1282 
1283       MVT IndexVT;
1284       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1285                                  /*IsStridedOrIndexed*/ true, Operands,
1286                                  /*IsLoad=*/false, &IndexVT);
1287 
1288       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1289              "Element count mismatch");
1290 
1291       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1292       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1293       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1294       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1295           IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1296           static_cast<unsigned>(IndexLMUL));
1297       MachineSDNode *Store =
1298           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1299 
1300       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1301         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1302 
1303       ReplaceNode(Node, Store);
1304       return;
1305     }
1306     case Intrinsic::riscv_vsm:
1307     case Intrinsic::riscv_vse:
1308     case Intrinsic::riscv_vse_mask:
1309     case Intrinsic::riscv_vsse:
1310     case Intrinsic::riscv_vsse_mask: {
1311       bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1312                       IntNo == Intrinsic::riscv_vsse_mask;
1313       bool IsStrided =
1314           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1315 
1316       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1317       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1318 
1319       unsigned CurOp = 2;
1320       SmallVector<SDValue, 8> Operands;
1321       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1322 
1323       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1324                                  Operands);
1325 
1326       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1327       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1328           IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1329       MachineSDNode *Store =
1330           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1331       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1332         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1333 
1334       ReplaceNode(Node, Store);
1335       return;
1336     }
1337     }
1338     break;
1339   }
1340   case ISD::BITCAST: {
1341     MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1342     // Just drop bitcasts between vectors if both are fixed or both are
1343     // scalable.
1344     if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1345         (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1346       ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1347       CurDAG->RemoveDeadNode(Node);
1348       return;
1349     }
1350     break;
1351   }
1352   case ISD::INSERT_SUBVECTOR: {
1353     SDValue V = Node->getOperand(0);
1354     SDValue SubV = Node->getOperand(1);
1355     SDLoc DL(SubV);
1356     auto Idx = Node->getConstantOperandVal(2);
1357     MVT SubVecVT = SubV.getSimpleValueType();
1358 
1359     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1360     MVT SubVecContainerVT = SubVecVT;
1361     // Establish the correct scalable-vector types for any fixed-length type.
1362     if (SubVecVT.isFixedLengthVector())
1363       SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1364     if (VT.isFixedLengthVector())
1365       VT = TLI.getContainerForFixedLengthVector(VT);
1366 
1367     const auto *TRI = Subtarget->getRegisterInfo();
1368     unsigned SubRegIdx;
1369     std::tie(SubRegIdx, Idx) =
1370         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1371             VT, SubVecContainerVT, Idx, TRI);
1372 
1373     // If the Idx hasn't been completely eliminated then this is a subvector
1374     // insert which doesn't naturally align to a vector register. These must
1375     // be handled using instructions to manipulate the vector registers.
1376     if (Idx != 0)
1377       break;
1378 
1379     RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1380     bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1381                            SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1382                            SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1383     (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1384     assert((!IsSubVecPartReg || V.isUndef()) &&
1385            "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1386            "the subvector is smaller than a full-sized register");
1387 
1388     // If we haven't set a SubRegIdx, then we must be going between
1389     // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1390     if (SubRegIdx == RISCV::NoSubRegister) {
1391       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1392       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1393                  InRegClassID &&
1394              "Unexpected subvector extraction");
1395       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1396       SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1397                                                DL, VT, SubV, RC);
1398       ReplaceNode(Node, NewNode);
1399       return;
1400     }
1401 
1402     SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1403     ReplaceNode(Node, Insert.getNode());
1404     return;
1405   }
1406   case ISD::EXTRACT_SUBVECTOR: {
1407     SDValue V = Node->getOperand(0);
1408     auto Idx = Node->getConstantOperandVal(1);
1409     MVT InVT = V.getSimpleValueType();
1410     SDLoc DL(V);
1411 
1412     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1413     MVT SubVecContainerVT = VT;
1414     // Establish the correct scalable-vector types for any fixed-length type.
1415     if (VT.isFixedLengthVector())
1416       SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1417     if (InVT.isFixedLengthVector())
1418       InVT = TLI.getContainerForFixedLengthVector(InVT);
1419 
1420     const auto *TRI = Subtarget->getRegisterInfo();
1421     unsigned SubRegIdx;
1422     std::tie(SubRegIdx, Idx) =
1423         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1424             InVT, SubVecContainerVT, Idx, TRI);
1425 
1426     // If the Idx hasn't been completely eliminated then this is a subvector
1427     // extract which doesn't naturally align to a vector register. These must
1428     // be handled using instructions to manipulate the vector registers.
1429     if (Idx != 0)
1430       break;
1431 
1432     // If we haven't set a SubRegIdx, then we must be going between
1433     // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1434     if (SubRegIdx == RISCV::NoSubRegister) {
1435       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1436       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1437                  InRegClassID &&
1438              "Unexpected subvector extraction");
1439       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1440       SDNode *NewNode =
1441           CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1442       ReplaceNode(Node, NewNode);
1443       return;
1444     }
1445 
1446     SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1447     ReplaceNode(Node, Extract.getNode());
1448     return;
1449   }
1450   case RISCVISD::VMV_V_X_VL:
1451   case RISCVISD::VFMV_V_F_VL: {
1452     // Try to match splat of a scalar load to a strided load with stride of x0.
1453     SDValue Src = Node->getOperand(0);
1454     auto *Ld = dyn_cast<LoadSDNode>(Src);
1455     if (!Ld)
1456       break;
1457     EVT MemVT = Ld->getMemoryVT();
1458     // The memory VT should be the same size as the element type.
1459     if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1460       break;
1461     if (!IsProfitableToFold(Src, Node, Node) ||
1462         !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1463       break;
1464 
1465     SDValue VL;
1466     selectVLOp(Node->getOperand(1), VL);
1467 
1468     unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1469     SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1470 
1471     SDValue Operands[] = {Ld->getBasePtr(),
1472                           CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1473                           Ld->getChain()};
1474 
1475     RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1476     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1477         /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, Log2SEW,
1478         static_cast<unsigned>(LMUL));
1479     MachineSDNode *Load =
1480         CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1481 
1482     if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1483       CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1484 
1485     ReplaceNode(Node, Load);
1486     return;
1487   }
1488   }
1489 
1490   // Select the default instruction.
1491   SelectCode(Node);
1492 }
1493 
SelectInlineAsmMemoryOperand(const SDValue & Op,unsigned ConstraintID,std::vector<SDValue> & OutOps)1494 bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
1495     const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1496   switch (ConstraintID) {
1497   case InlineAsm::Constraint_m:
1498     // We just support simple memory operands that have a single address
1499     // operand and need no special handling.
1500     OutOps.push_back(Op);
1501     return false;
1502   case InlineAsm::Constraint_A:
1503     OutOps.push_back(Op);
1504     return false;
1505   default:
1506     break;
1507   }
1508 
1509   return true;
1510 }
1511 
SelectAddrFI(SDValue Addr,SDValue & Base)1512 bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) {
1513   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1514     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1515     return true;
1516   }
1517   return false;
1518 }
1519 
SelectBaseAddr(SDValue Addr,SDValue & Base)1520 bool RISCVDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) {
1521   // If this is FrameIndex, select it directly. Otherwise just let it get
1522   // selected to a register independently.
1523   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1524     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1525   else
1526     Base = Addr;
1527   return true;
1528 }
1529 
selectShiftMask(SDValue N,unsigned ShiftWidth,SDValue & ShAmt)1530 bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
1531                                         SDValue &ShAmt) {
1532   // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1533   // amount. If there is an AND on the shift amount, we can bypass it if it
1534   // doesn't affect any of those bits.
1535   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1536     const APInt &AndMask = N->getConstantOperandAPInt(1);
1537 
1538     // Since the max shift amount is a power of 2 we can subtract 1 to make a
1539     // mask that covers the bits needed to represent all shift amounts.
1540     assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1541     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1542 
1543     if (ShMask.isSubsetOf(AndMask)) {
1544       ShAmt = N.getOperand(0);
1545       return true;
1546     }
1547 
1548     // SimplifyDemandedBits may have optimized the mask so try restoring any
1549     // bits that are known zero.
1550     KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1551     if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1552       ShAmt = N.getOperand(0);
1553       return true;
1554     }
1555   }
1556 
1557   ShAmt = N;
1558   return true;
1559 }
1560 
selectSExti32(SDValue N,SDValue & Val)1561 bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
1562   if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1563       cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1564     Val = N.getOperand(0);
1565     return true;
1566   }
1567   MVT VT = N.getSimpleValueType();
1568   if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
1569     Val = N;
1570     return true;
1571   }
1572 
1573   return false;
1574 }
1575 
selectZExti32(SDValue N,SDValue & Val)1576 bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
1577   if (N.getOpcode() == ISD::AND) {
1578     auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1579     if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1580       Val = N.getOperand(0);
1581       return true;
1582     }
1583   }
1584   MVT VT = N.getSimpleValueType();
1585   APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32);
1586   if (CurDAG->MaskedValueIsZero(N, Mask)) {
1587     Val = N;
1588     return true;
1589   }
1590 
1591   return false;
1592 }
1593 
1594 // Return true if all users of this SDNode* only consume the lower \p Bits.
1595 // This can be used to form W instructions for add/sub/mul/shl even when the
1596 // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
1597 // SimplifyDemandedBits has made it so some users see a sext_inreg and some
1598 // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
1599 // the add/sub/mul/shl to become non-W instructions. By checking the users we
1600 // may be able to use a W instruction and CSE with the other instruction if
1601 // this has happened. We could try to detect that the CSE opportunity exists
1602 // before doing this, but that would be more complicated.
1603 // TODO: Does this need to look through AND/OR/XOR to their users to find more
1604 // opportunities.
hasAllNBitUsers(SDNode * Node,unsigned Bits) const1605 bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
1606   assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
1607           Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
1608           Node->getOpcode() == ISD::SRL ||
1609           Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
1610           isa<ConstantSDNode>(Node)) &&
1611          "Unexpected opcode");
1612 
1613   for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
1614     SDNode *User = *UI;
1615     // Users of this node should have already been instruction selected
1616     if (!User->isMachineOpcode())
1617       return false;
1618 
1619     // TODO: Add more opcodes?
1620     switch (User->getMachineOpcode()) {
1621     default:
1622       return false;
1623     case RISCV::ADDW:
1624     case RISCV::ADDIW:
1625     case RISCV::SUBW:
1626     case RISCV::MULW:
1627     case RISCV::SLLW:
1628     case RISCV::SLLIW:
1629     case RISCV::SRAW:
1630     case RISCV::SRAIW:
1631     case RISCV::SRLW:
1632     case RISCV::SRLIW:
1633     case RISCV::DIVW:
1634     case RISCV::DIVUW:
1635     case RISCV::REMW:
1636     case RISCV::REMUW:
1637     case RISCV::ROLW:
1638     case RISCV::RORW:
1639     case RISCV::RORIW:
1640     case RISCV::CLZW:
1641     case RISCV::CTZW:
1642     case RISCV::CPOPW:
1643     case RISCV::SLLIUW:
1644     case RISCV::FCVT_H_W:
1645     case RISCV::FCVT_H_WU:
1646     case RISCV::FCVT_S_W:
1647     case RISCV::FCVT_S_WU:
1648     case RISCV::FCVT_D_W:
1649     case RISCV::FCVT_D_WU:
1650       if (Bits < 32)
1651         return false;
1652       break;
1653     case RISCV::SLLI:
1654       // SLLI only uses the lower (XLen - ShAmt) bits.
1655       if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
1656         return false;
1657       break;
1658     case RISCV::ADDUW:
1659     case RISCV::SH1ADDUW:
1660     case RISCV::SH2ADDUW:
1661     case RISCV::SH3ADDUW:
1662       // The first operand to add.uw/shXadd.uw is implicitly zero extended from
1663       // 32 bits.
1664       if (UI.getOperandNo() != 0 || Bits < 32)
1665         return false;
1666       break;
1667     case RISCV::SB:
1668       if (UI.getOperandNo() != 0 || Bits < 8)
1669         return false;
1670       break;
1671     case RISCV::SH:
1672       if (UI.getOperandNo() != 0 || Bits < 16)
1673         return false;
1674       break;
1675     case RISCV::SW:
1676       if (UI.getOperandNo() != 0 || Bits < 32)
1677         return false;
1678       break;
1679     }
1680   }
1681 
1682   return true;
1683 }
1684 
1685 // Select VL as a 5 bit immediate or a value that will become a register. This
1686 // allows us to choose betwen VSETIVLI or VSETVLI later.
selectVLOp(SDValue N,SDValue & VL)1687 bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
1688   auto *C = dyn_cast<ConstantSDNode>(N);
1689   if (C && isUInt<5>(C->getZExtValue()))
1690     VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
1691                                    N->getValueType(0));
1692   else
1693     VL = N;
1694 
1695   return true;
1696 }
1697 
selectVSplat(SDValue N,SDValue & SplatVal)1698 bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
1699   if (N.getOpcode() != ISD::SPLAT_VECTOR &&
1700       N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1701       N.getOpcode() != RISCVISD::VMV_V_X_VL)
1702     return false;
1703   SplatVal = N.getOperand(0);
1704   return true;
1705 }
1706 
1707 using ValidateFn = bool (*)(int64_t);
1708 
selectVSplatSimmHelper(SDValue N,SDValue & SplatVal,SelectionDAG & DAG,const RISCVSubtarget & Subtarget,ValidateFn ValidateImm)1709 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
1710                                    SelectionDAG &DAG,
1711                                    const RISCVSubtarget &Subtarget,
1712                                    ValidateFn ValidateImm) {
1713   if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1714        N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1715        N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1716       !isa<ConstantSDNode>(N.getOperand(0)))
1717     return false;
1718 
1719   int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1720 
1721   // ISD::SPLAT_VECTOR, RISCVISD::SPLAT_VECTOR_I64 and RISCVISD::VMV_V_X_VL
1722   // share semantics when the operand type is wider than the resulting vector
1723   // element type: an implicit truncation first takes place. Therefore, perform
1724   // a manual truncation/sign-extension in order to ignore any truncated bits
1725   // and catch any zero-extended immediate.
1726   // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
1727   // sign-extending to (XLenVT -1).
1728   MVT XLenVT = Subtarget.getXLenVT();
1729   assert(XLenVT == N.getOperand(0).getSimpleValueType() &&
1730          "Unexpected splat operand type");
1731   MVT EltVT = N.getSimpleValueType().getVectorElementType();
1732   if (EltVT.bitsLT(XLenVT))
1733     SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
1734 
1735   if (!ValidateImm(SplatImm))
1736     return false;
1737 
1738   SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
1739   return true;
1740 }
1741 
selectVSplatSimm5(SDValue N,SDValue & SplatVal)1742 bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
1743   return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
1744                                 [](int64_t Imm) { return isInt<5>(Imm); });
1745 }
1746 
selectVSplatSimm5Plus1(SDValue N,SDValue & SplatVal)1747 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
1748   return selectVSplatSimmHelper(
1749       N, SplatVal, *CurDAG, *Subtarget,
1750       [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
1751 }
1752 
selectVSplatSimm5Plus1NonZero(SDValue N,SDValue & SplatVal)1753 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
1754                                                       SDValue &SplatVal) {
1755   return selectVSplatSimmHelper(
1756       N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
1757         return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
1758       });
1759 }
1760 
selectVSplatUimm5(SDValue N,SDValue & SplatVal)1761 bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
1762   if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1763        N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1764        N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1765       !isa<ConstantSDNode>(N.getOperand(0)))
1766     return false;
1767 
1768   int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1769 
1770   if (!isUInt<5>(SplatImm))
1771     return false;
1772 
1773   SplatVal =
1774       CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
1775 
1776   return true;
1777 }
1778 
selectRVVSimm5(SDValue N,unsigned Width,SDValue & Imm)1779 bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
1780                                        SDValue &Imm) {
1781   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
1782     int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
1783 
1784     if (!isInt<5>(ImmVal))
1785       return false;
1786 
1787     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
1788     return true;
1789   }
1790 
1791   return false;
1792 }
1793 
1794 // Merge an ADDI into the offset of a load/store instruction where possible.
1795 // (load (addi base, off1), off2) -> (load base, off1+off2)
1796 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
1797 // This is possible when off1+off2 fits a 12-bit immediate.
doPeepholeLoadStoreADDI(SDNode * N)1798 bool RISCVDAGToDAGISel::doPeepholeLoadStoreADDI(SDNode *N) {
1799   int OffsetOpIdx;
1800   int BaseOpIdx;
1801 
1802   // Only attempt this optimisation for I-type loads and S-type stores.
1803   switch (N->getMachineOpcode()) {
1804   default:
1805     return false;
1806   case RISCV::LB:
1807   case RISCV::LH:
1808   case RISCV::LW:
1809   case RISCV::LBU:
1810   case RISCV::LHU:
1811   case RISCV::LWU:
1812   case RISCV::LD:
1813   case RISCV::FLH:
1814   case RISCV::FLW:
1815   case RISCV::FLD:
1816     BaseOpIdx = 0;
1817     OffsetOpIdx = 1;
1818     break;
1819   case RISCV::SB:
1820   case RISCV::SH:
1821   case RISCV::SW:
1822   case RISCV::SD:
1823   case RISCV::FSH:
1824   case RISCV::FSW:
1825   case RISCV::FSD:
1826     BaseOpIdx = 1;
1827     OffsetOpIdx = 2;
1828     break;
1829   }
1830 
1831   if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
1832     return false;
1833 
1834   SDValue Base = N->getOperand(BaseOpIdx);
1835 
1836   // If the base is an ADDI, we can merge it in to the load/store.
1837   if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI)
1838     return false;
1839 
1840   SDValue ImmOperand = Base.getOperand(1);
1841   uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
1842 
1843   if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
1844     int64_t Offset1 = Const->getSExtValue();
1845     int64_t CombinedOffset = Offset1 + Offset2;
1846     if (!isInt<12>(CombinedOffset))
1847       return false;
1848     ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
1849                                            ImmOperand.getValueType());
1850   } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
1851     // If the off1 in (addi base, off1) is a global variable's address (its
1852     // low part, really), then we can rely on the alignment of that variable
1853     // to provide a margin of safety before off1 can overflow the 12 bits.
1854     // Check if off2 falls within that margin; if so off1+off2 can't overflow.
1855     const DataLayout &DL = CurDAG->getDataLayout();
1856     Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
1857     if (Offset2 != 0 && Alignment <= Offset2)
1858       return false;
1859     int64_t Offset1 = GA->getOffset();
1860     int64_t CombinedOffset = Offset1 + Offset2;
1861     ImmOperand = CurDAG->getTargetGlobalAddress(
1862         GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
1863         CombinedOffset, GA->getTargetFlags());
1864   } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
1865     // Ditto.
1866     Align Alignment = CP->getAlign();
1867     if (Offset2 != 0 && Alignment <= Offset2)
1868       return false;
1869     int64_t Offset1 = CP->getOffset();
1870     int64_t CombinedOffset = Offset1 + Offset2;
1871     ImmOperand = CurDAG->getTargetConstantPool(
1872         CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
1873         CombinedOffset, CP->getTargetFlags());
1874   } else {
1875     return false;
1876   }
1877 
1878   LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase:    ");
1879   LLVM_DEBUG(Base->dump(CurDAG));
1880   LLVM_DEBUG(dbgs() << "\nN: ");
1881   LLVM_DEBUG(N->dump(CurDAG));
1882   LLVM_DEBUG(dbgs() << "\n");
1883 
1884   // Modify the offset operand of the load/store.
1885   if (BaseOpIdx == 0) // Load
1886     CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
1887                                N->getOperand(2));
1888   else // Store
1889     CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
1890                                ImmOperand, N->getOperand(3));
1891 
1892   return true;
1893 }
1894 
1895 // Try to remove sext.w if the input is a W instruction or can be made into
1896 // a W instruction cheaply.
doPeepholeSExtW(SDNode * N)1897 bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
1898   // Look for the sext.w pattern, addiw rd, rs1, 0.
1899   if (N->getMachineOpcode() != RISCV::ADDIW ||
1900       !isNullConstant(N->getOperand(1)))
1901     return false;
1902 
1903   SDValue N0 = N->getOperand(0);
1904   if (!N0.isMachineOpcode())
1905     return false;
1906 
1907   switch (N0.getMachineOpcode()) {
1908   default:
1909     break;
1910   case RISCV::ADD:
1911   case RISCV::ADDI:
1912   case RISCV::SUB:
1913   case RISCV::MUL:
1914   case RISCV::SLLI: {
1915     // Convert sext.w+add/sub/mul to their W instructions. This will create
1916     // a new independent instruction. This improves latency.
1917     unsigned Opc;
1918     switch (N0.getMachineOpcode()) {
1919     default:
1920       llvm_unreachable("Unexpected opcode!");
1921     case RISCV::ADD:  Opc = RISCV::ADDW;  break;
1922     case RISCV::ADDI: Opc = RISCV::ADDIW; break;
1923     case RISCV::SUB:  Opc = RISCV::SUBW;  break;
1924     case RISCV::MUL:  Opc = RISCV::MULW;  break;
1925     case RISCV::SLLI: Opc = RISCV::SLLIW; break;
1926     }
1927 
1928     SDValue N00 = N0.getOperand(0);
1929     SDValue N01 = N0.getOperand(1);
1930 
1931     // Shift amount needs to be uimm5.
1932     if (N0.getMachineOpcode() == RISCV::SLLI &&
1933         !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
1934       break;
1935 
1936     SDNode *Result =
1937         CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
1938                                N00, N01);
1939     ReplaceUses(N, Result);
1940     return true;
1941   }
1942   case RISCV::ADDW:
1943   case RISCV::ADDIW:
1944   case RISCV::SUBW:
1945   case RISCV::MULW:
1946   case RISCV::SLLIW:
1947     // Result is already sign extended just remove the sext.w.
1948     // NOTE: We only handle the nodes that are selected with hasAllWUsers.
1949     ReplaceUses(N, N0.getNode());
1950     return true;
1951   }
1952 
1953   return false;
1954 }
1955 
1956 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
1957 // for instruction scheduling.
createRISCVISelDag(RISCVTargetMachine & TM)1958 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) {
1959   return new RISCVDAGToDAGISel(TM);
1960 }
1961