1//===- RISCVInstrInfoVVLPatterns.td - RVV VL patterns ------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and VL patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// version 1.0. 12/// 13/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 14/// 15/// Note: the patterns for RVV intrinsics are found in 16/// RISCVInstrInfoVPseudos.td. 17/// 18//===----------------------------------------------------------------------===// 19 20//===----------------------------------------------------------------------===// 21// Helpers to define the VL patterns. 22//===----------------------------------------------------------------------===// 23 24def SDT_RISCVIntUnOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, 25 SDTCisSameAs<0, 2>, 26 SDTCisVec<0>, SDTCisInt<0>, 27 SDTCVecEltisVT<3, i1>, 28 SDTCisSameNumEltsAs<0, 3>, 29 SDTCisVT<4, XLenVT>]>; 30 31def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 32 SDTCisSameAs<0, 2>, 33 SDTCisVec<0>, SDTCisInt<0>, 34 SDTCisSameAs<0, 3>, 35 SDTCVecEltisVT<4, i1>, 36 SDTCisSameNumEltsAs<0, 4>, 37 SDTCisVT<5, XLenVT>]>; 38 39def SDT_RISCVFPUnOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 40 SDTCisVec<0>, SDTCisFP<0>, 41 SDTCVecEltisVT<2, i1>, 42 SDTCisSameNumEltsAs<0, 2>, 43 SDTCisVT<3, XLenVT>]>; 44def SDT_RISCVFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 45 SDTCisSameAs<0, 2>, 46 SDTCisVec<0>, SDTCisFP<0>, 47 SDTCisSameAs<0, 3>, 48 SDTCVecEltisVT<4, i1>, 49 SDTCisSameNumEltsAs<0, 4>, 50 SDTCisVT<5, XLenVT>]>; 51 52def SDT_RISCVCopySign_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 53 SDTCisSameAs<0, 2>, 54 SDTCisVec<0>, SDTCisFP<0>, 55 SDTCisSameAs<0, 3>, 56 SDTCVecEltisVT<4, i1>, 57 SDTCisSameNumEltsAs<0, 4>, 58 SDTCisVT<5, XLenVT>]>; 59 60def riscv_vmv_v_v_vl : SDNode<"RISCVISD::VMV_V_V_VL", 61 SDTypeProfile<1, 3, [SDTCisVec<0>, 62 SDTCisSameAs<0, 1>, 63 SDTCisSameAs<0, 2>, 64 SDTCisVT<3, XLenVT>]>>; 65def riscv_vmv_v_x_vl : SDNode<"RISCVISD::VMV_V_X_VL", 66 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, 67 SDTCisSameAs<0, 1>, 68 SDTCisVT<2, XLenVT>, 69 SDTCisVT<3, XLenVT>]>>; 70def riscv_vfmv_v_f_vl : SDNode<"RISCVISD::VFMV_V_F_VL", 71 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>, 72 SDTCisSameAs<0, 1>, 73 SDTCisEltOfVec<2, 0>, 74 SDTCisVT<3, XLenVT>]>>; 75def riscv_vmv_s_x_vl : SDNode<"RISCVISD::VMV_S_X_VL", 76 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 77 SDTCisInt<0>, 78 SDTCisVT<2, XLenVT>, 79 SDTCisVT<3, XLenVT>]>>; 80def riscv_vfmv_s_f_vl : SDNode<"RISCVISD::VFMV_S_F_VL", 81 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 82 SDTCisFP<0>, 83 SDTCisEltOfVec<2, 0>, 84 SDTCisVT<3, XLenVT>]>>; 85 86def riscv_add_vl : SDNode<"RISCVISD::ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 87def riscv_sub_vl : SDNode<"RISCVISD::SUB_VL", SDT_RISCVIntBinOp_VL>; 88def riscv_mul_vl : SDNode<"RISCVISD::MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 89def riscv_mulhs_vl : SDNode<"RISCVISD::MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 90def riscv_mulhu_vl : SDNode<"RISCVISD::MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 91def riscv_and_vl : SDNode<"RISCVISD::AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 92def riscv_or_vl : SDNode<"RISCVISD::OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 93def riscv_xor_vl : SDNode<"RISCVISD::XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 94def riscv_sdiv_vl : SDNode<"RISCVISD::SDIV_VL", SDT_RISCVIntBinOp_VL>; 95def riscv_srem_vl : SDNode<"RISCVISD::SREM_VL", SDT_RISCVIntBinOp_VL>; 96def riscv_udiv_vl : SDNode<"RISCVISD::UDIV_VL", SDT_RISCVIntBinOp_VL>; 97def riscv_urem_vl : SDNode<"RISCVISD::UREM_VL", SDT_RISCVIntBinOp_VL>; 98def riscv_shl_vl : SDNode<"RISCVISD::SHL_VL", SDT_RISCVIntBinOp_VL>; 99def riscv_sra_vl : SDNode<"RISCVISD::SRA_VL", SDT_RISCVIntBinOp_VL>; 100def riscv_srl_vl : SDNode<"RISCVISD::SRL_VL", SDT_RISCVIntBinOp_VL>; 101def riscv_rotl_vl : SDNode<"RISCVISD::ROTL_VL", SDT_RISCVIntBinOp_VL>; 102def riscv_rotr_vl : SDNode<"RISCVISD::ROTR_VL", SDT_RISCVIntBinOp_VL>; 103def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 104def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 105def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 106def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 107 108def riscv_bitreverse_vl : SDNode<"RISCVISD::BITREVERSE_VL", SDT_RISCVIntUnOp_VL>; 109def riscv_bswap_vl : SDNode<"RISCVISD::BSWAP_VL", SDT_RISCVIntUnOp_VL>; 110def riscv_ctlz_vl : SDNode<"RISCVISD::CTLZ_VL", SDT_RISCVIntUnOp_VL>; 111def riscv_cttz_vl : SDNode<"RISCVISD::CTTZ_VL", SDT_RISCVIntUnOp_VL>; 112def riscv_ctpop_vl : SDNode<"RISCVISD::CTPOP_VL", SDT_RISCVIntUnOp_VL>; 113 114def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 115def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 116def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>; 117def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>; 118 119def riscv_fadd_vl : SDNode<"RISCVISD::FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 120def riscv_fsub_vl : SDNode<"RISCVISD::FSUB_VL", SDT_RISCVFPBinOp_VL>; 121def riscv_fmul_vl : SDNode<"RISCVISD::FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 122def riscv_fdiv_vl : SDNode<"RISCVISD::FDIV_VL", SDT_RISCVFPBinOp_VL>; 123def riscv_fneg_vl : SDNode<"RISCVISD::FNEG_VL", SDT_RISCVFPUnOp_VL>; 124def riscv_fabs_vl : SDNode<"RISCVISD::FABS_VL", SDT_RISCVFPUnOp_VL>; 125def riscv_fsqrt_vl : SDNode<"RISCVISD::FSQRT_VL", SDT_RISCVFPUnOp_VL>; 126def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVCopySign_VL>; 127def riscv_vfmin_vl : SDNode<"RISCVISD::VFMIN_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 128def riscv_vfmax_vl : SDNode<"RISCVISD::VFMAX_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 129 130def riscv_strict_fadd_vl : SDNode<"RISCVISD::STRICT_FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; 131def riscv_strict_fsub_vl : SDNode<"RISCVISD::STRICT_FSUB_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; 132def riscv_strict_fmul_vl : SDNode<"RISCVISD::STRICT_FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; 133def riscv_strict_fdiv_vl : SDNode<"RISCVISD::STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; 134def riscv_strict_fsqrt_vl : SDNode<"RISCVISD::STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; 135 136def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 137 [(riscv_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 138 (riscv_strict_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 139def any_riscv_fsub_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 140 [(riscv_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 141 (riscv_strict_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 142def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 143 [(riscv_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 144 (riscv_strict_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 145def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 146 [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), 147 (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; 148def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 149 [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl), 150 (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>; 151 152def riscv_fclass_vl : SDNode<"RISCVISD::FCLASS_VL", 153 SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>, 154 SDTCisFP<1>, SDTCisVec<1>, 155 SDTCisSameSizeAs<0, 1>, 156 SDTCisSameNumEltsAs<0, 1>, 157 SDTCVecEltisVT<2, i1>, 158 SDTCisSameNumEltsAs<0, 2>, 159 SDTCisVT<3, XLenVT>]>>; 160 161def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 162 SDTCisSameAs<0, 2>, 163 SDTCisSameAs<0, 3>, 164 SDTCisVec<0>, SDTCisFP<0>, 165 SDTCVecEltisVT<4, i1>, 166 SDTCisSameNumEltsAs<0, 4>, 167 SDTCisVT<5, XLenVT>]>; 168def riscv_vfmadd_vl : SDNode<"RISCVISD::VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 169def riscv_vfnmadd_vl : SDNode<"RISCVISD::VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 170def riscv_vfmsub_vl : SDNode<"RISCVISD::VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 171def riscv_vfnmsub_vl : SDNode<"RISCVISD::VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 172 173def SDT_RISCVWVecFMA_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 174 SDTCisVec<1>, SDTCisFP<1>, 175 SDTCisOpSmallerThanOp<1, 0>, 176 SDTCisSameNumEltsAs<0, 1>, 177 SDTCisSameAs<1, 2>, 178 SDTCisSameAs<0, 3>, 179 SDTCVecEltisVT<4, i1>, 180 SDTCisSameNumEltsAs<0, 4>, 181 SDTCisVT<5, XLenVT>]>; 182def riscv_vfwmadd_vl : SDNode<"RISCVISD::VFWMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 183def riscv_vfwnmadd_vl : SDNode<"RISCVISD::VFWNMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 184def riscv_vfwmsub_vl : SDNode<"RISCVISD::VFWMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 185def riscv_vfwnmsub_vl : SDNode<"RISCVISD::VFWNMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 186 187def riscv_strict_vfmadd_vl : SDNode<"RISCVISD::STRICT_VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 188def riscv_strict_vfnmadd_vl : SDNode<"RISCVISD::STRICT_VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 189def riscv_strict_vfmsub_vl : SDNode<"RISCVISD::STRICT_VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 190def riscv_strict_vfnmsub_vl : SDNode<"RISCVISD::STRICT_VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 191 192def any_riscv_vfmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 193 [(riscv_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 194 (riscv_strict_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 195def any_riscv_vfnmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 196 [(riscv_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 197 (riscv_strict_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 198def any_riscv_vfmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 199 [(riscv_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 200 (riscv_strict_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 201def any_riscv_vfnmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 202 [(riscv_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 203 (riscv_strict_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 204 205def SDT_RISCVFPRoundOp_VL : SDTypeProfile<1, 3, [ 206 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>, 207 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 208]>; 209def SDT_RISCVFPExtendOp_VL : SDTypeProfile<1, 3, [ 210 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>, 211 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 212]>; 213 214def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>; 215def riscv_strict_fpround_vl : SDNode<"RISCVISD::STRICT_FP_ROUND_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; 216def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>; 217def riscv_strict_fpextend_vl : SDNode<"RISCVISD::STRICT_FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL, [SDNPHasChain]>; 218def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>; 219def riscv_strict_fncvt_rod_vl : SDNode<"RISCVISD::STRICT_VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; 220 221def any_riscv_fpround_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 222 [(riscv_fpround_vl node:$src, node:$mask, node:$vl), 223 (riscv_strict_fpround_vl node:$src, node:$mask, node:$vl)]>; 224def any_riscv_fpextend_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 225 [(riscv_fpextend_vl node:$src, node:$mask, node:$vl), 226 (riscv_strict_fpextend_vl node:$src, node:$mask, node:$vl)]>; 227def any_riscv_fncvt_rod_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 228 [(riscv_fncvt_rod_vl node:$src, node:$mask, node:$vl), 229 (riscv_strict_fncvt_rod_vl node:$src, node:$mask, node:$vl)]>; 230 231def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [ 232 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 233 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 234]>; 235def SDT_RISCVFP2IOp_RM_VL : SDTypeProfile<1, 4, [ 236 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 237 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, 238 SDTCisVT<4, XLenVT> // Rounding mode 239]>; 240 241def SDT_RISCVI2FPOp_VL : SDTypeProfile<1, 3, [ 242 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 243 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 244]>; 245def SDT_RISCVI2FPOp_RM_VL : SDTypeProfile<1, 4, [ 246 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 247 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, 248 SDTCisVT<4, XLenVT> // Rounding mode 249]>; 250 251def SDT_RISCVSETCCOP_VL : SDTypeProfile<1, 6, [ 252 SDTCVecEltisVT<0, i1>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, 253 SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>, SDTCisSameAs<0, 4>, 254 SDTCisSameAs<0, 5>, SDTCisVT<6, XLenVT>]>; 255 256// Float -> Int 257def riscv_vfcvt_xu_f_vl : SDNode<"RISCVISD::VFCVT_XU_F_VL", SDT_RISCVFP2IOp_VL>; 258def riscv_vfcvt_x_f_vl : SDNode<"RISCVISD::VFCVT_X_F_VL", SDT_RISCVFP2IOp_VL>; 259def riscv_vfcvt_rm_xu_f_vl : SDNode<"RISCVISD::VFCVT_RM_XU_F_VL", SDT_RISCVFP2IOp_RM_VL>; 260def riscv_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL>; 261 262def riscv_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL>; 263def riscv_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL>; 264 265def riscv_strict_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL, [SDNPHasChain]>; 266def riscv_strict_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; 267def riscv_strict_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; 268 269def any_riscv_vfcvt_rm_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl, node:$rm), 270 [(riscv_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm), 271 (riscv_strict_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm)]>; 272def any_riscv_vfcvt_rtz_xu_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 273 [(riscv_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl), 274 (riscv_strict_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl)]>; 275def any_riscv_vfcvt_rtz_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 276 [(riscv_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl), 277 (riscv_strict_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl)]>; 278 279// Int -> Float 280def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 281def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 282def riscv_vfcvt_rm_f_xu_vl : SDNode<"RISCVISD::VFCVT_RM_F_XU_VL", SDT_RISCVI2FPOp_RM_VL>; 283def riscv_vfcvt_rm_f_x_vl : SDNode<"RISCVISD::VFCVT_RM_F_X_VL", SDT_RISCVI2FPOp_RM_VL>; 284 285def riscv_strict_sint_to_fp_vl : SDNode<"RISCVISD::STRICT_SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; 286def riscv_strict_uint_to_fp_vl : SDNode<"RISCVISD::STRICT_UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; 287 288def any_riscv_sint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 289 [(riscv_sint_to_fp_vl node:$src, node:$mask, node:$vl), 290 (riscv_strict_sint_to_fp_vl node:$src, node:$mask, node:$vl)]>; 291def any_riscv_uint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 292 [(riscv_uint_to_fp_vl node:$src, node:$mask, node:$vl), 293 (riscv_strict_uint_to_fp_vl node:$src, node:$mask, node:$vl)]>; 294 295def riscv_vfround_noexcept_vl: SDNode<"RISCVISD::VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL>; 296def riscv_strict_vfround_noexcept_vl: SDNode<"RISCVISD::STRICT_VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; 297 298def any_riscv_vfround_noexcept_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 299 [(riscv_vfround_noexcept_vl node:$src, node:$mask, node:$vl), 300 (riscv_strict_vfround_noexcept_vl node:$src, node:$mask, node:$vl)]>; 301 302def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", SDT_RISCVSETCCOP_VL>; 303def riscv_strict_fsetcc_vl : SDNode<"RISCVISD::STRICT_FSETCC_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; 304def riscv_strict_fsetccs_vl : SDNode<"RISCVISD::STRICT_FSETCCS_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; 305def any_riscv_fsetcc_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 306 [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 307 (riscv_strict_fsetcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl)]>; 308def any_riscv_fsetccs_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 309 [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl), 310 (riscv_strict_fsetccs_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl)]>; 311 312def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL", 313 SDTypeProfile<1, 5, [SDTCisVec<0>, 314 SDTCisSameAs<0, 1>, 315 SDTCisVT<2, XLenVT>, 316 SDTCisSameAs<0, 3>, 317 SDTCVecEltisVT<4, i1>, 318 SDTCisSameNumEltsAs<0, 4>, 319 SDTCisVT<5, XLenVT>]>>; 320def riscv_vrgather_vv_vl : SDNode<"RISCVISD::VRGATHER_VV_VL", 321 SDTypeProfile<1, 5, [SDTCisVec<0>, 322 SDTCisSameAs<0, 1>, 323 SDTCisInt<2>, 324 SDTCisSameNumEltsAs<0, 2>, 325 SDTCisSameSizeAs<0, 2>, 326 SDTCisSameAs<0, 3>, 327 SDTCVecEltisVT<4, i1>, 328 SDTCisSameNumEltsAs<0, 4>, 329 SDTCisVT<5, XLenVT>]>>; 330def riscv_vrgatherei16_vv_vl : SDNode<"RISCVISD::VRGATHEREI16_VV_VL", 331 SDTypeProfile<1, 5, [SDTCisVec<0>, 332 SDTCisSameAs<0, 1>, 333 SDTCisInt<2>, 334 SDTCVecEltisVT<2, i16>, 335 SDTCisSameNumEltsAs<0, 2>, 336 SDTCisSameAs<0, 3>, 337 SDTCVecEltisVT<4, i1>, 338 SDTCisSameNumEltsAs<0, 4>, 339 SDTCisVT<5, XLenVT>]>>; 340 341def SDT_RISCVSelect_VL : SDTypeProfile<1, 4, [ 342 SDTCisVec<0>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<1, i1>, 343 SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisVT<4, XLenVT> 344]>; 345 346def riscv_vselect_vl : SDNode<"RISCVISD::VSELECT_VL", SDT_RISCVSelect_VL>; 347 348def SDT_RISCVVMERGE_VL : SDTypeProfile<1, 5, [ 349 SDTCisVec<0>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<1, i1>, 350 SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisSameAs<0, 4>, 351 SDTCisVT<5, XLenVT> 352]>; 353 354def riscv_vmerge_vl : SDNode<"RISCVISD::VMERGE_VL", SDT_RISCVVMERGE_VL>; 355 356def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>, 357 SDTCisVT<1, XLenVT>]>; 358def riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>; 359def riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>; 360 361def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 362 SDTCisSameAs<0, 2>, 363 SDTCVecEltisVT<0, i1>, 364 SDTCisVT<3, XLenVT>]>; 365def riscv_vmand_vl : SDNode<"RISCVISD::VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 366def riscv_vmor_vl : SDNode<"RISCVISD::VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 367def riscv_vmxor_vl : SDNode<"RISCVISD::VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 368 369def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>; 370 371def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl), 372 (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>; 373 374def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL", 375 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 376 SDTCisVec<1>, SDTCisInt<1>, 377 SDTCVecEltisVT<2, i1>, 378 SDTCisSameNumEltsAs<1, 2>, 379 SDTCisVT<3, XLenVT>]>>; 380 381def riscv_vfirst_vl : SDNode<"RISCVISD::VFIRST_VL", 382 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 383 SDTCisVec<1>, SDTCisInt<1>, 384 SDTCVecEltisVT<2, i1>, 385 SDTCisSameNumEltsAs<1, 2>, 386 SDTCisVT<3, XLenVT>]>>; 387 388def SDT_RISCVVEXTEND_VL : SDTypeProfile<1, 3, [SDTCisVec<0>, 389 SDTCisSameNumEltsAs<0, 1>, 390 SDTCisSameNumEltsAs<1, 2>, 391 SDTCVecEltisVT<2, i1>, 392 SDTCisVT<3, XLenVT>]>; 393def riscv_sext_vl : SDNode<"RISCVISD::VSEXT_VL", SDT_RISCVVEXTEND_VL>; 394def riscv_zext_vl : SDNode<"RISCVISD::VZEXT_VL", SDT_RISCVVEXTEND_VL>; 395 396def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL", 397 SDTypeProfile<1, 3, [SDTCisVec<0>, 398 SDTCisSameNumEltsAs<0, 1>, 399 SDTCisSameNumEltsAs<0, 2>, 400 SDTCVecEltisVT<2, i1>, 401 SDTCisVT<3, XLenVT>]>>; 402 403def SDT_RISCVVWIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 404 SDTCisInt<1>, 405 SDTCisSameNumEltsAs<0, 1>, 406 SDTCisOpSmallerThanOp<1, 0>, 407 SDTCisSameAs<1, 2>, 408 SDTCisSameAs<0, 3>, 409 SDTCisSameNumEltsAs<1, 4>, 410 SDTCVecEltisVT<4, i1>, 411 SDTCisVT<5, XLenVT>]>; 412def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 413def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 414def riscv_vwmulsu_vl : SDNode<"RISCVISD::VWMULSU_VL", SDT_RISCVVWIntBinOp_VL>; 415def riscv_vwadd_vl : SDNode<"RISCVISD::VWADD_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 416def riscv_vwaddu_vl : SDNode<"RISCVISD::VWADDU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 417def riscv_vwsub_vl : SDNode<"RISCVISD::VWSUB_VL", SDT_RISCVVWIntBinOp_VL, []>; 418def riscv_vwsubu_vl : SDNode<"RISCVISD::VWSUBU_VL", SDT_RISCVVWIntBinOp_VL, []>; 419def riscv_vwsll_vl : SDNode<"RISCVISD::VWSLL_VL", SDT_RISCVVWIntBinOp_VL, []>; 420 421def SDT_RISCVVWIntTernOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 422 SDTCisInt<1>, 423 SDTCisSameNumEltsAs<0, 1>, 424 SDTCisOpSmallerThanOp<1, 0>, 425 SDTCisSameAs<1, 2>, 426 SDTCisSameAs<0, 3>, 427 SDTCisSameNumEltsAs<1, 4>, 428 SDTCVecEltisVT<4, i1>, 429 SDTCisVT<5, XLenVT>]>; 430def riscv_vwmacc_vl : SDNode<"RISCVISD::VWMACC_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; 431def riscv_vwmaccu_vl : SDNode<"RISCVISD::VWMACCU_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; 432def riscv_vwmaccsu_vl : SDNode<"RISCVISD::VWMACCSU_VL", SDT_RISCVVWIntTernOp_VL, []>; 433 434def SDT_RISCVVWFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 435 SDTCisFP<1>, 436 SDTCisSameNumEltsAs<0, 1>, 437 SDTCisOpSmallerThanOp<1, 0>, 438 SDTCisSameAs<1, 2>, 439 SDTCisSameAs<0, 3>, 440 SDTCisSameNumEltsAs<1, 4>, 441 SDTCVecEltisVT<4, i1>, 442 SDTCisVT<5, XLenVT>]>; 443def riscv_vfwmul_vl : SDNode<"RISCVISD::VFWMUL_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; 444def riscv_vfwadd_vl : SDNode<"RISCVISD::VFWADD_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; 445def riscv_vfwsub_vl : SDNode<"RISCVISD::VFWSUB_VL", SDT_RISCVVWFPBinOp_VL, []>; 446 447def SDT_RISCVVNIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 448 SDTCisInt<1>, 449 SDTCisSameNumEltsAs<0, 1>, 450 SDTCisOpSmallerThanOp<0, 1>, 451 SDTCisSameAs<0, 2>, 452 SDTCisSameAs<0, 3>, 453 SDTCisSameNumEltsAs<0, 4>, 454 SDTCVecEltisVT<4, i1>, 455 SDTCisVT<5, XLenVT>]>; 456def riscv_vnsrl_vl : SDNode<"RISCVISD::VNSRL_VL", SDT_RISCVVNIntBinOp_VL>; 457 458def SDT_RISCVVWIntBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 459 SDTCisSameAs<0, 1>, 460 SDTCisInt<2>, 461 SDTCisSameNumEltsAs<1, 2>, 462 SDTCisOpSmallerThanOp<2, 1>, 463 SDTCisSameAs<0, 3>, 464 SDTCisSameNumEltsAs<1, 4>, 465 SDTCVecEltisVT<4, i1>, 466 SDTCisVT<5, XLenVT>]>; 467def riscv_vwadd_w_vl : SDNode<"RISCVISD::VWADD_W_VL", SDT_RISCVVWIntBinOpW_VL>; 468def riscv_vwaddu_w_vl : SDNode<"RISCVISD::VWADDU_W_VL", SDT_RISCVVWIntBinOpW_VL>; 469def riscv_vwsub_w_vl : SDNode<"RISCVISD::VWSUB_W_VL", SDT_RISCVVWIntBinOpW_VL>; 470def riscv_vwsubu_w_vl : SDNode<"RISCVISD::VWSUBU_W_VL", SDT_RISCVVWIntBinOpW_VL>; 471 472def SDT_RISCVVWFPBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 473 SDTCisSameAs<0, 1>, 474 SDTCisFP<2>, 475 SDTCisSameNumEltsAs<1, 2>, 476 SDTCisOpSmallerThanOp<2, 1>, 477 SDTCisSameAs<0, 3>, 478 SDTCisSameNumEltsAs<1, 4>, 479 SDTCVecEltisVT<4, i1>, 480 SDTCisVT<5, XLenVT>]>; 481 482def riscv_vfwadd_w_vl : SDNode<"RISCVISD::VFWADD_W_VL", SDT_RISCVVWFPBinOpW_VL>; 483def riscv_vfwsub_w_vl : SDNode<"RISCVISD::VFWSUB_W_VL", SDT_RISCVVWFPBinOpW_VL>; 484 485def SDTRVVVecReduce : SDTypeProfile<1, 6, [ 486 SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>, 487 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT>, 488 SDTCisVT<6, XLenVT> 489]>; 490 491def riscv_add_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 492 node:$E), 493 (riscv_add_vl node:$A, node:$B, node:$C, 494 node:$D, node:$E), [{ 495 return N->hasOneUse(); 496}]>; 497 498def riscv_sub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 499 node:$E), 500 (riscv_sub_vl node:$A, node:$B, node:$C, 501 node:$D, node:$E), [{ 502 return N->hasOneUse(); 503}]>; 504 505def riscv_mul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 506 node:$E), 507 (riscv_mul_vl node:$A, node:$B, node:$C, 508 node:$D, node:$E), [{ 509 return N->hasOneUse(); 510}]>; 511 512def riscv_vwmul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 513 node:$E), 514 (riscv_vwmul_vl node:$A, node:$B, node:$C, 515 node:$D, node:$E), [{ 516 return N->hasOneUse(); 517}]>; 518 519def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 520 node:$E), 521 (riscv_vwmulu_vl node:$A, node:$B, node:$C, 522 node:$D, node:$E), [{ 523 return N->hasOneUse(); 524}]>; 525 526def riscv_vwmulsu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 527 node:$E), 528 (riscv_vwmulsu_vl node:$A, node:$B, node:$C, 529 node:$D, node:$E), [{ 530 return N->hasOneUse(); 531}]>; 532 533def riscv_sext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 534 (riscv_sext_vl node:$A, node:$B, node:$C), [{ 535 return N->hasOneUse(); 536}]>; 537 538def riscv_zext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 539 (riscv_zext_vl node:$A, node:$B, node:$C), [{ 540 return N->hasOneUse(); 541}]>; 542 543def riscv_fpextend_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 544 (riscv_fpextend_vl node:$A, node:$B, node:$C), [{ 545 return N->hasOneUse(); 546}]>; 547 548def riscv_vfmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 549 node:$E), 550 (riscv_vfmadd_vl node:$A, node:$B, 551 node:$C, node:$D, node:$E), [{ 552 return N->hasOneUse(); 553}]>; 554 555def riscv_vfnmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 556 node:$E), 557 (riscv_vfnmadd_vl node:$A, node:$B, 558 node:$C, node:$D, node:$E), [{ 559 return N->hasOneUse(); 560}]>; 561 562def riscv_vfmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 563 node:$E), 564 (riscv_vfmsub_vl node:$A, node:$B, 565 node:$C, node:$D, node:$E), [{ 566 return N->hasOneUse(); 567}]>; 568 569def riscv_vfnmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 570 node:$E), 571 (riscv_vfnmsub_vl node:$A, node:$B, 572 node:$C, node:$D, node:$E), [{ 573 return N->hasOneUse(); 574}]>; 575 576foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR", 577 "FADD", "SEQ_FADD", "FMIN", "FMAX"] in 578 def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>; 579 580// Give explicit Complexity to prefer simm5/uimm5. 581def SplatPat : ComplexPattern<vAny, 1, "selectVSplat", [], [], 1>; 582def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [], [], 3>; 583def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<5>", [], [], 3>; 584def SplatPat_uimm6 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<6>", [], [], 3>; 585def SplatPat_simm5_plus1 586 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1", [], [], 3>; 587def SplatPat_simm5_plus1_nonzero 588 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NonZero", [], [], 3>; 589 590// Selects extends or truncates of splats where we only care about the lowest 8 591// bits of each element. 592def Low8BitsSplatPat 593 : ComplexPattern<vAny, 1, "selectLow8BitsVSplat", [], [], 2>; 594 595// Ignore the vl operand on vmv_v_f, and vmv_s_f. 596def SplatFPOp : PatFrags<(ops node:$op), 597 [(riscv_vfmv_v_f_vl undef, node:$op, srcvalue), 598 (riscv_vfmv_s_f_vl undef, node:$op, srcvalue)]>; 599 600def sew8simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<8>", []>; 601def sew16simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<16>", []>; 602def sew32simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<32>", []>; 603def sew64simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<64>", []>; 604 605class VPatBinaryVL_V<SDPatternOperator vop, 606 string instruction_name, 607 string suffix, 608 ValueType result_type, 609 ValueType op1_type, 610 ValueType op2_type, 611 ValueType mask_type, 612 int log2sew, 613 LMULInfo vlmul, 614 VReg result_reg_class, 615 VReg op1_reg_class, 616 VReg op2_reg_class, 617 bit isSEWAware = 0> 618 : Pat<(result_type (vop 619 (op1_type op1_reg_class:$rs1), 620 (op2_type op2_reg_class:$rs2), 621 (result_type result_reg_class:$merge), 622 (mask_type V0), 623 VLOpFrag)), 624 (!cast<Instruction>( 625 !if(isSEWAware, 626 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 627 instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) 628 result_reg_class:$merge, 629 op1_reg_class:$rs1, 630 op2_reg_class:$rs2, 631 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 632 633class VPatBinaryVL_V_RM<SDPatternOperator vop, 634 string instruction_name, 635 string suffix, 636 ValueType result_type, 637 ValueType op1_type, 638 ValueType op2_type, 639 ValueType mask_type, 640 int log2sew, 641 LMULInfo vlmul, 642 VReg result_reg_class, 643 VReg op1_reg_class, 644 VReg op2_reg_class, 645 bit isSEWAware = 0> 646 : Pat<(result_type (vop 647 (op1_type op1_reg_class:$rs1), 648 (op2_type op2_reg_class:$rs2), 649 (result_type result_reg_class:$merge), 650 (mask_type V0), 651 VLOpFrag)), 652 (!cast<Instruction>( 653 !if(isSEWAware, 654 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 655 instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) 656 result_reg_class:$merge, 657 op1_reg_class:$rs1, 658 op2_reg_class:$rs2, 659 (mask_type V0), 660 // Value to indicate no rounding mode change in 661 // RISCVInsertReadWriteCSR 662 FRM_DYN, 663 GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 664 665multiclass VPatTiedBinaryNoMaskVL_V<SDNode vop, 666 string instruction_name, 667 string suffix, 668 ValueType result_type, 669 ValueType op2_type, 670 int sew, 671 LMULInfo vlmul, 672 VReg result_reg_class, 673 VReg op2_reg_class> { 674 def : Pat<(result_type (vop 675 (result_type result_reg_class:$rs1), 676 (op2_type op2_reg_class:$rs2), 677 srcvalue, 678 true_mask, 679 VLOpFrag)), 680 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 681 result_reg_class:$rs1, 682 op2_reg_class:$rs2, 683 GPR:$vl, sew, TAIL_AGNOSTIC)>; 684 // Tail undisturbed 685 def : Pat<(riscv_vmerge_vl true_mask, 686 (result_type (vop 687 result_reg_class:$rs1, 688 (op2_type op2_reg_class:$rs2), 689 srcvalue, 690 true_mask, 691 VLOpFrag)), 692 result_reg_class:$rs1, result_reg_class:$rs1, VLOpFrag), 693 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 694 result_reg_class:$rs1, 695 op2_reg_class:$rs2, 696 GPR:$vl, sew, TU_MU)>; 697} 698 699multiclass VPatTiedBinaryNoMaskVL_V_RM<SDNode vop, 700 string instruction_name, 701 string suffix, 702 ValueType result_type, 703 ValueType op2_type, 704 int sew, 705 LMULInfo vlmul, 706 VReg result_reg_class, 707 VReg op2_reg_class> { 708 def : Pat<(result_type (vop 709 (result_type result_reg_class:$rs1), 710 (op2_type op2_reg_class:$rs2), 711 srcvalue, 712 true_mask, 713 VLOpFrag)), 714 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 715 result_reg_class:$rs1, 716 op2_reg_class:$rs2, 717 // Value to indicate no rounding mode change in 718 // RISCVInsertReadWriteCSR 719 FRM_DYN, 720 GPR:$vl, sew, TAIL_AGNOSTIC)>; 721 // Tail undisturbed 722 def : Pat<(riscv_vmerge_vl true_mask, 723 (result_type (vop 724 result_reg_class:$rs1, 725 (op2_type op2_reg_class:$rs2), 726 srcvalue, 727 true_mask, 728 VLOpFrag)), 729 result_reg_class:$rs1, result_reg_class:$rs1, VLOpFrag), 730 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 731 result_reg_class:$rs1, 732 op2_reg_class:$rs2, 733 // Value to indicate no rounding mode change in 734 // RISCVInsertReadWriteCSR 735 FRM_DYN, 736 GPR:$vl, sew, TU_MU)>; 737} 738 739class VPatBinaryVL_XI<SDPatternOperator vop, 740 string instruction_name, 741 string suffix, 742 ValueType result_type, 743 ValueType vop1_type, 744 ValueType vop2_type, 745 ValueType mask_type, 746 int log2sew, 747 LMULInfo vlmul, 748 VReg result_reg_class, 749 VReg vop_reg_class, 750 ComplexPattern SplatPatKind, 751 DAGOperand xop_kind, 752 bit isSEWAware = 0> 753 : Pat<(result_type (vop 754 (vop1_type vop_reg_class:$rs1), 755 (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))), 756 (result_type result_reg_class:$merge), 757 (mask_type V0), 758 VLOpFrag)), 759 (!cast<Instruction>( 760 !if(isSEWAware, 761 instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 762 instruction_name#_#suffix#_#vlmul.MX#"_MASK")) 763 result_reg_class:$merge, 764 vop_reg_class:$rs1, 765 xop_kind:$rs2, 766 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 767 768multiclass VPatBinaryVL_VV_VX<SDPatternOperator vop, string instruction_name, 769 list<VTypeInfo> vtilist = AllIntegerVectors, 770 bit isSEWAware = 0> { 771 foreach vti = vtilist in { 772 let Predicates = GetVTypePredicates<vti>.Predicates in { 773 def : VPatBinaryVL_V<vop, instruction_name, "VV", 774 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 775 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 776 vti.RegClass, isSEWAware>; 777 def : VPatBinaryVL_XI<vop, instruction_name, "VX", 778 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 779 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 780 SplatPat, GPR, isSEWAware>; 781 } 782 } 783} 784 785multiclass VPatBinaryVL_VV_VX_VI<SDPatternOperator vop, string instruction_name, 786 Operand ImmType = simm5> 787 : VPatBinaryVL_VV_VX<vop, instruction_name> { 788 foreach vti = AllIntegerVectors in { 789 let Predicates = GetVTypePredicates<vti>.Predicates in 790 def : VPatBinaryVL_XI<vop, instruction_name, "VI", 791 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 792 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 793 !cast<ComplexPattern>(SplatPat#_#ImmType), 794 ImmType>; 795 } 796} 797 798multiclass VPatBinaryWVL_VV_VX<SDPatternOperator vop, string instruction_name> { 799 foreach VtiToWti = AllWidenableIntVectors in { 800 defvar vti = VtiToWti.Vti; 801 defvar wti = VtiToWti.Wti; 802 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 803 GetVTypePredicates<wti>.Predicates) in { 804 def : VPatBinaryVL_V<vop, instruction_name, "VV", 805 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 806 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 807 vti.RegClass>; 808 def : VPatBinaryVL_XI<vop, instruction_name, "VX", 809 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 810 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 811 SplatPat, GPR>; 812 } 813 } 814} 815 816multiclass VPatBinaryWVL_VV_VX_WV_WX<SDPatternOperator vop, SDNode vop_w, 817 string instruction_name> 818 : VPatBinaryWVL_VV_VX<vop, instruction_name> { 819 foreach VtiToWti = AllWidenableIntVectors in { 820 defvar vti = VtiToWti.Vti; 821 defvar wti = VtiToWti.Wti; 822 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 823 GetVTypePredicates<wti>.Predicates) in { 824 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV", 825 wti.Vector, vti.Vector, vti.Log2SEW, 826 vti.LMul, wti.RegClass, vti.RegClass>; 827 def : VPatBinaryVL_V<vop_w, instruction_name, "WV", 828 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 829 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 830 vti.RegClass>; 831 def : VPatBinaryVL_XI<vop_w, instruction_name, "WX", 832 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 833 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 834 SplatPat, GPR>; 835 } 836 } 837} 838 839multiclass VPatBinaryNVL_WV_WX_WI<SDPatternOperator vop, string instruction_name> { 840 foreach VtiToWti = AllWidenableIntVectors in { 841 defvar vti = VtiToWti.Vti; 842 defvar wti = VtiToWti.Wti; 843 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 844 GetVTypePredicates<wti>.Predicates) in { 845 def : VPatBinaryVL_V<vop, instruction_name, "WV", 846 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 847 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 848 vti.RegClass>; 849 def : VPatBinaryVL_XI<vop, instruction_name, "WX", 850 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 851 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 852 SplatPat, GPR>; 853 def : VPatBinaryVL_XI<vop, instruction_name, "WI", 854 vti.Vector, wti.Vector, vti.Vector, vti.Mask, 855 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass, 856 !cast<ComplexPattern>(SplatPat#_#uimm5), 857 uimm5>; 858 } 859 } 860} 861 862class VPatBinaryVL_VF<SDPatternOperator vop, 863 string instruction_name, 864 ValueType result_type, 865 ValueType vop1_type, 866 ValueType vop2_type, 867 ValueType mask_type, 868 int log2sew, 869 LMULInfo vlmul, 870 VReg result_reg_class, 871 VReg vop_reg_class, 872 RegisterClass scalar_reg_class, 873 bit isSEWAware = 0> 874 : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), 875 (vop2_type (SplatFPOp scalar_reg_class:$rs2)), 876 (result_type result_reg_class:$merge), 877 (mask_type V0), 878 VLOpFrag)), 879 (!cast<Instruction>( 880 !if(isSEWAware, 881 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 882 instruction_name#"_"#vlmul.MX#"_MASK")) 883 result_reg_class:$merge, 884 vop_reg_class:$rs1, 885 scalar_reg_class:$rs2, 886 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 887 888class VPatBinaryVL_VF_RM<SDPatternOperator vop, 889 string instruction_name, 890 ValueType result_type, 891 ValueType vop1_type, 892 ValueType vop2_type, 893 ValueType mask_type, 894 int log2sew, 895 LMULInfo vlmul, 896 VReg result_reg_class, 897 VReg vop_reg_class, 898 RegisterClass scalar_reg_class, 899 bit isSEWAware = 0> 900 : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), 901 (vop2_type (SplatFPOp scalar_reg_class:$rs2)), 902 (result_type result_reg_class:$merge), 903 (mask_type V0), 904 VLOpFrag)), 905 (!cast<Instruction>( 906 !if(isSEWAware, 907 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 908 instruction_name#"_"#vlmul.MX#"_MASK")) 909 result_reg_class:$merge, 910 vop_reg_class:$rs1, 911 scalar_reg_class:$rs2, 912 (mask_type V0), 913 // Value to indicate no rounding mode change in 914 // RISCVInsertReadWriteCSR 915 FRM_DYN, 916 GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 917 918multiclass VPatBinaryFPVL_VV_VF<SDPatternOperator vop, string instruction_name, 919 bit isSEWAware = 0> { 920 foreach vti = AllFloatVectors in { 921 let Predicates = GetVTypePredicates<vti>.Predicates in { 922 def : VPatBinaryVL_V<vop, instruction_name, "VV", 923 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 924 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 925 vti.RegClass, isSEWAware>; 926 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 927 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 928 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 929 vti.ScalarRegClass, isSEWAware>; 930 } 931 } 932} 933 934multiclass VPatBinaryFPVL_VV_VF_RM<SDPatternOperator vop, string instruction_name, 935 bit isSEWAware = 0> { 936 foreach vti = AllFloatVectors in { 937 let Predicates = GetVTypePredicates<vti>.Predicates in { 938 def : VPatBinaryVL_V_RM<vop, instruction_name, "VV", 939 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 940 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 941 vti.RegClass, isSEWAware>; 942 def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 943 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 944 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 945 vti.ScalarRegClass, isSEWAware>; 946 } 947 } 948} 949 950multiclass VPatBinaryFPVL_R_VF<SDPatternOperator vop, string instruction_name, 951 bit isSEWAware = 0> { 952 foreach fvti = AllFloatVectors in { 953 let Predicates = GetVTypePredicates<fvti>.Predicates in 954 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 955 fvti.RegClass:$rs1, 956 (fvti.Vector fvti.RegClass:$merge), 957 (fvti.Mask V0), 958 VLOpFrag)), 959 (!cast<Instruction>( 960 !if(isSEWAware, 961 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", 962 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) 963 fvti.RegClass:$merge, 964 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 965 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 966 } 967} 968 969multiclass VPatBinaryFPVL_R_VF_RM<SDPatternOperator vop, string instruction_name, 970 bit isSEWAware = 0> { 971 foreach fvti = AllFloatVectors in { 972 let Predicates = GetVTypePredicates<fvti>.Predicates in 973 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 974 fvti.RegClass:$rs1, 975 (fvti.Vector fvti.RegClass:$merge), 976 (fvti.Mask V0), 977 VLOpFrag)), 978 (!cast<Instruction>( 979 !if(isSEWAware, 980 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", 981 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) 982 fvti.RegClass:$merge, 983 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 984 (fvti.Mask V0), 985 // Value to indicate no rounding mode change in 986 // RISCVInsertReadWriteCSR 987 FRM_DYN, 988 GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 989 } 990} 991 992multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name, 993 CondCode cc> { 994 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 995 vti.RegClass:$rs2, cc, 996 VR:$merge, 997 (vti.Mask V0), 998 VLOpFrag)), 999 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 1000 VR:$merge, 1001 vti.RegClass:$rs1, 1002 vti.RegClass:$rs2, 1003 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1004} 1005 1006// Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped. 1007multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_name, 1008 CondCode cc, CondCode invcc> 1009 : VPatIntegerSetCCVL_VV<vti, instruction_name, cc> { 1010 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2), 1011 vti.RegClass:$rs1, invcc, 1012 VR:$merge, 1013 (vti.Mask V0), 1014 VLOpFrag)), 1015 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 1016 VR:$merge, vti.RegClass:$rs1, 1017 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1018} 1019 1020multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name, 1021 CondCode cc, CondCode invcc> { 1022 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK"); 1023 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1024 (SplatPat (XLenVT GPR:$rs2)), cc, 1025 VR:$merge, 1026 (vti.Mask V0), 1027 VLOpFrag)), 1028 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1029 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1030 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)), 1031 (vti.Vector vti.RegClass:$rs1), invcc, 1032 VR:$merge, 1033 (vti.Mask V0), 1034 VLOpFrag)), 1035 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1036 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1037} 1038 1039multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name, 1040 CondCode cc, CondCode invcc> { 1041 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 1042 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1043 (SplatPat_simm5 simm5:$rs2), cc, 1044 VR:$merge, 1045 (vti.Mask V0), 1046 VLOpFrag)), 1047 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1048 XLenVT:$rs2, (vti.Mask V0), GPR:$vl, 1049 vti.Log2SEW)>; 1050 1051 // FIXME: Can do some canonicalization to remove these patterns. 1052 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2), 1053 (vti.Vector vti.RegClass:$rs1), invcc, 1054 VR:$merge, 1055 (vti.Mask V0), 1056 VLOpFrag)), 1057 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1058 simm5:$rs2, (vti.Mask V0), GPR:$vl, 1059 vti.Log2SEW)>; 1060} 1061 1062multiclass VPatIntegerSetCCVL_VIPlus1_Swappable<VTypeInfo vti, 1063 string instruction_name, 1064 CondCode cc, CondCode invcc, 1065 ComplexPattern splatpat_kind> { 1066 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 1067 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1068 (splatpat_kind simm5:$rs2), cc, 1069 VR:$merge, 1070 (vti.Mask V0), 1071 VLOpFrag)), 1072 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1073 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 1074 vti.Log2SEW)>; 1075 1076 // FIXME: Can do some canonicalization to remove these patterns. 1077 def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2), 1078 (vti.Vector vti.RegClass:$rs1), invcc, 1079 VR:$merge, 1080 (vti.Mask V0), 1081 VLOpFrag)), 1082 (instruction_masked VR:$merge, vti.RegClass:$rs1, 1083 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 1084 vti.Log2SEW)>; 1085} 1086 1087multiclass VPatFPSetCCVL_VV_VF_FV<SDPatternOperator vop, CondCode cc, 1088 string inst_name, 1089 string swapped_op_inst_name> { 1090 foreach fvti = AllFloatVectors in { 1091 let Predicates = GetVTypePredicates<fvti>.Predicates in { 1092 def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), 1093 fvti.RegClass:$rs2, 1094 cc, 1095 VR:$merge, 1096 (fvti.Mask V0), 1097 VLOpFrag)), 1098 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX#"_MASK") 1099 VR:$merge, fvti.RegClass:$rs1, 1100 fvti.RegClass:$rs2, (fvti.Mask V0), 1101 GPR:$vl, fvti.Log2SEW)>; 1102 def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), 1103 (SplatFPOp fvti.ScalarRegClass:$rs2), 1104 cc, 1105 VR:$merge, 1106 (fvti.Mask V0), 1107 VLOpFrag)), 1108 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 1109 VR:$merge, fvti.RegClass:$rs1, 1110 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 1111 GPR:$vl, fvti.Log2SEW)>; 1112 def : Pat<(fvti.Mask (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 1113 (fvti.Vector fvti.RegClass:$rs1), 1114 cc, 1115 VR:$merge, 1116 (fvti.Mask V0), 1117 VLOpFrag)), 1118 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 1119 VR:$merge, fvti.RegClass:$rs1, 1120 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 1121 GPR:$vl, fvti.Log2SEW)>; 1122 } 1123 } 1124} 1125 1126multiclass VPatExtendVL_V<SDNode vop, string inst_name, string suffix, 1127 list <VTypeInfoToFraction> fraction_list> { 1128 foreach vtiTofti = fraction_list in { 1129 defvar vti = vtiTofti.Vti; 1130 defvar fti = vtiTofti.Fti; 1131 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1132 GetVTypePredicates<fti>.Predicates) in 1133 def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2), 1134 (fti.Mask V0), VLOpFrag)), 1135 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX#"_MASK") 1136 (vti.Vector (IMPLICIT_DEF)), 1137 fti.RegClass:$rs2, 1138 (fti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1139 } 1140} 1141 1142// Single width converting 1143 1144multiclass VPatConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> { 1145 foreach fvti = AllFloatVectors in { 1146 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1147 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1148 GetVTypePredicates<ivti>.Predicates) in 1149 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1150 (fvti.Mask V0), 1151 VLOpFrag)), 1152 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1153 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1154 (fvti.Mask V0), GPR:$vl, ivti.Log2SEW, TA_MA)>; 1155 } 1156} 1157 1158multiclass VPatConvertFP2IVL_V_RM<SDPatternOperator vop, string instruction_name> { 1159 foreach fvti = AllFloatVectors in { 1160 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1161 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1162 GetVTypePredicates<ivti>.Predicates) in 1163 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1164 (fvti.Mask V0), 1165 VLOpFrag)), 1166 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1167 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1168 (fvti.Mask V0), 1169 // Value to indicate no rounding mode change in 1170 // RISCVInsertReadWriteCSR 1171 FRM_DYN, 1172 GPR:$vl, ivti.Log2SEW, TA_MA)>; 1173 } 1174} 1175 1176 1177multiclass VPatConvertFP2I_RM_VL_V<SDPatternOperator vop, string instruction_name> { 1178 foreach fvti = AllFloatVectors in { 1179 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1180 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1181 GetVTypePredicates<ivti>.Predicates) in 1182 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1183 (fvti.Mask V0), (XLenVT timm:$frm), 1184 VLOpFrag)), 1185 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1186 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1187 (fvti.Mask V0), timm:$frm, GPR:$vl, ivti.Log2SEW, 1188 TA_MA)>; 1189 } 1190} 1191 1192multiclass VPatConvertI2FPVL_V_RM<SDPatternOperator vop, string instruction_name> { 1193 foreach fvti = AllFloatVectors in { 1194 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1195 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1196 GetVTypePredicates<ivti>.Predicates) in 1197 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1198 (ivti.Mask V0), 1199 VLOpFrag)), 1200 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1201 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1202 (ivti.Mask V0), 1203 // Value to indicate no rounding mode change in 1204 // RISCVInsertReadWriteCSR 1205 FRM_DYN, 1206 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1207 } 1208} 1209 1210multiclass VPatConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> { 1211 foreach fvti = AllFloatVectors in { 1212 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1213 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1214 GetVTypePredicates<ivti>.Predicates) in 1215 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1216 (ivti.Mask V0), (XLenVT timm:$frm), 1217 VLOpFrag)), 1218 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1219 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1220 (ivti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1221 } 1222} 1223 1224// Widening converting 1225 1226multiclass VPatWConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> { 1227 foreach fvtiToFWti = AllWidenableFloatVectors in { 1228 defvar fvti = fvtiToFWti.Vti; 1229 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1230 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1231 GetVTypePredicates<iwti>.Predicates) in 1232 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1233 (fvti.Mask V0), 1234 VLOpFrag)), 1235 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1236 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1237 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 1238 } 1239} 1240 1241multiclass VPatWConvertFP2IVL_V_RM<SDPatternOperator vop, string instruction_name> { 1242 foreach fvtiToFWti = AllWidenableFloatVectors in { 1243 defvar fvti = fvtiToFWti.Vti; 1244 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1245 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1246 GetVTypePredicates<iwti>.Predicates) in 1247 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1248 (fvti.Mask V0), 1249 VLOpFrag)), 1250 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1251 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1252 (fvti.Mask V0), 1253 // Value to indicate no rounding mode change in 1254 // RISCVInsertReadWriteCSR 1255 FRM_DYN, 1256 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1257 } 1258} 1259 1260 1261multiclass VPatWConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> { 1262 foreach fvtiToFWti = AllWidenableFloatVectors in { 1263 defvar fvti = fvtiToFWti.Vti; 1264 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1265 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1266 GetVTypePredicates<iwti>.Predicates) in 1267 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1268 (fvti.Mask V0), (XLenVT timm:$frm), 1269 VLOpFrag)), 1270 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1271 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1272 (fvti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1273 } 1274} 1275 1276multiclass VPatWConvertI2FPVL_V<SDPatternOperator vop, 1277 string instruction_name> { 1278 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1279 defvar ivti = vtiToWti.Vti; 1280 defvar fwti = vtiToWti.Wti; 1281 let Predicates = !listconcat(GetVTypePredicates<ivti>.Predicates, 1282 GetVTypePredicates<fwti>.Predicates) in 1283 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1284 (ivti.Mask V0), 1285 VLOpFrag)), 1286 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1287 (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1288 (ivti.Mask V0), 1289 GPR:$vl, ivti.Log2SEW, TA_MA)>; 1290 } 1291} 1292 1293// Narrowing converting 1294 1295multiclass VPatNConvertFP2IVL_W<SDPatternOperator vop, 1296 string instruction_name> { 1297 // Reuse the same list of types used in the widening nodes, but just swap the 1298 // direction of types around so we're converting from Wti -> Vti 1299 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1300 defvar vti = vtiToWti.Vti; 1301 defvar fwti = vtiToWti.Wti; 1302 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1303 GetVTypePredicates<fwti>.Predicates) in 1304 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1305 (fwti.Mask V0), 1306 VLOpFrag)), 1307 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1308 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1309 (fwti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1310 } 1311} 1312 1313multiclass VPatNConvertFP2IVL_W_RM<SDPatternOperator vop, 1314 string instruction_name> { 1315 // Reuse the same list of types used in the widening nodes, but just swap the 1316 // direction of types around so we're converting from Wti -> Vti 1317 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1318 defvar vti = vtiToWti.Vti; 1319 defvar fwti = vtiToWti.Wti; 1320 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1321 GetVTypePredicates<fwti>.Predicates) in 1322 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1323 (fwti.Mask V0), 1324 VLOpFrag)), 1325 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1326 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1327 (fwti.Mask V0), 1328 // Value to indicate no rounding mode change in 1329 // RISCVInsertReadWriteCSR 1330 FRM_DYN, 1331 GPR:$vl, vti.Log2SEW, TA_MA)>; 1332 } 1333} 1334 1335multiclass VPatNConvertFP2I_RM_VL_W<SDNode vop, string instruction_name> { 1336 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1337 defvar vti = vtiToWti.Vti; 1338 defvar fwti = vtiToWti.Wti; 1339 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1340 GetVTypePredicates<fwti>.Predicates) in 1341 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1342 (fwti.Mask V0), (XLenVT timm:$frm), 1343 VLOpFrag)), 1344 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1345 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1346 (fwti.Mask V0), timm:$frm, GPR:$vl, vti.Log2SEW, TA_MA)>; 1347 } 1348} 1349 1350multiclass VPatNConvertI2FPVL_W_RM<SDPatternOperator vop, 1351 string instruction_name> { 1352 foreach fvtiToFWti = AllWidenableFloatVectors in { 1353 defvar fvti = fvtiToFWti.Vti; 1354 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1355 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1356 GetVTypePredicates<iwti>.Predicates) in 1357 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 1358 (iwti.Mask V0), 1359 VLOpFrag)), 1360 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1361 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 1362 (iwti.Mask V0), 1363 // Value to indicate no rounding mode change in 1364 // RISCVInsertReadWriteCSR 1365 FRM_DYN, 1366 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1367 } 1368} 1369 1370multiclass VPatNConvertI2FP_RM_VL_W<SDNode vop, string instruction_name> { 1371 foreach fvtiToFWti = AllWidenableFloatVectors in { 1372 defvar fvti = fvtiToFWti.Vti; 1373 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1374 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1375 GetVTypePredicates<iwti>.Predicates) in 1376 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 1377 (iwti.Mask V0), (XLenVT timm:$frm), 1378 VLOpFrag)), 1379 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1380 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 1381 (iwti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1382 } 1383} 1384 1385multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> { 1386 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 1387 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 1388 let Predicates = GetVTypePredicates<vti>.Predicates in { 1389 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), 1390 (vti.Vector vti.RegClass:$rs1), VR:$rs2, 1391 (vti.Mask V0), VLOpFrag, 1392 (XLenVT timm:$policy))), 1393 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1394 (vti_m1.Vector VR:$merge), 1395 (vti.Vector vti.RegClass:$rs1), 1396 (vti_m1.Vector VR:$rs2), 1397 (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1398 } 1399 } 1400} 1401 1402multiclass VPatReductionVL_RM<SDNode vop, string instruction_name, bit is_float> { 1403 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 1404 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 1405 let Predicates = GetVTypePredicates<vti>.Predicates in { 1406 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), 1407 (vti.Vector vti.RegClass:$rs1), VR:$rs2, 1408 (vti.Mask V0), VLOpFrag, 1409 (XLenVT timm:$policy))), 1410 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1411 (vti_m1.Vector VR:$merge), 1412 (vti.Vector vti.RegClass:$rs1), 1413 (vti_m1.Vector VR:$rs2), 1414 (vti.Mask V0), 1415 // Value to indicate no rounding mode change in 1416 // RISCVInsertReadWriteCSR 1417 FRM_DYN, 1418 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1419 } 1420 } 1421} 1422 1423multiclass VPatBinaryVL_WV_WX_WI<SDNode op, string instruction_name> { 1424 foreach vtiToWti = AllWidenableIntVectors in { 1425 defvar vti = vtiToWti.Vti; 1426 defvar wti = vtiToWti.Wti; 1427 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1428 GetVTypePredicates<wti>.Predicates) in { 1429 def : Pat< 1430 (vti.Vector 1431 (riscv_trunc_vector_vl 1432 (op (wti.Vector wti.RegClass:$rs2), 1433 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))), 1434 (vti.Mask true_mask), 1435 VLOpFrag)), 1436 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX) 1437 (vti.Vector (IMPLICIT_DEF)), 1438 wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1439 1440 def : Pat< 1441 (vti.Vector 1442 (riscv_trunc_vector_vl 1443 (op (wti.Vector wti.RegClass:$rs2), 1444 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1)))), 1445 (vti.Mask true_mask), 1446 VLOpFrag)), 1447 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1448 (vti.Vector (IMPLICIT_DEF)), 1449 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1450 1451 def : Pat< 1452 (vti.Vector 1453 (riscv_trunc_vector_vl 1454 (op (wti.Vector wti.RegClass:$rs2), 1455 (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), (vti.Mask true_mask), 1456 VLOpFrag)), 1457 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 1458 (vti.Vector (IMPLICIT_DEF)), 1459 wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1460 } 1461 } 1462} 1463 1464multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1465 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1466 defvar vti = vtiToWti.Vti; 1467 defvar wti = vtiToWti.Wti; 1468 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1469 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1470 GetVTypePredicates<wti>.Predicates) in { 1471 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1472 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1473 VR:$rs2, (vti.Mask V0), VLOpFrag, 1474 (XLenVT timm:$policy))), 1475 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1476 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1477 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1478 (XLenVT timm:$policy))>; 1479 } 1480 } 1481} 1482 1483multiclass VPatWidenReductionVL_RM<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1484 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1485 defvar vti = vtiToWti.Vti; 1486 defvar wti = vtiToWti.Wti; 1487 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1488 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1489 GetVTypePredicates<wti>.Predicates) in { 1490 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1491 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1492 VR:$rs2, (vti.Mask V0), VLOpFrag, 1493 (XLenVT timm:$policy))), 1494 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1495 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1496 (wti_m1.Vector VR:$rs2), (vti.Mask V0), 1497 // Value to indicate no rounding mode change in 1498 // RISCVInsertReadWriteCSR 1499 FRM_DYN, 1500 GPR:$vl, vti.Log2SEW, 1501 (XLenVT timm:$policy))>; 1502 } 1503 } 1504} 1505 1506multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1507 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1508 defvar vti = vtiToWti.Vti; 1509 defvar wti = vtiToWti.Wti; 1510 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1511 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1512 GetVTypePredicates<wti>.Predicates) in { 1513 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1514 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1515 VR:$rs2, (vti.Mask V0), VLOpFrag, 1516 (XLenVT timm:$policy))), 1517 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1518 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1519 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1520 (XLenVT timm:$policy))>; 1521 } 1522 } 1523} 1524 1525multiclass VPatWidenReductionVL_Ext_VL_RM<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1526 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1527 defvar vti = vtiToWti.Vti; 1528 defvar wti = vtiToWti.Wti; 1529 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1530 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1531 GetVTypePredicates<wti>.Predicates) in { 1532 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), 1533 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1534 VR:$rs2, (vti.Mask V0), VLOpFrag, 1535 (XLenVT timm:$policy))), 1536 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1537 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), 1538 (wti_m1.Vector VR:$rs2), (vti.Mask V0), 1539 // Value to indicate no rounding mode change in 1540 // RISCVInsertReadWriteCSR 1541 FRM_DYN, 1542 GPR:$vl, vti.Log2SEW, 1543 (XLenVT timm:$policy))>; 1544 } 1545 } 1546} 1547 1548multiclass VPatBinaryFPWVL_VV_VF<SDNode vop, string instruction_name> { 1549 foreach fvtiToFWti = AllWidenableFloatVectors in { 1550 defvar vti = fvtiToFWti.Vti; 1551 defvar wti = fvtiToFWti.Wti; 1552 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1553 GetVTypePredicates<wti>.Predicates) in { 1554 def : VPatBinaryVL_V<vop, instruction_name, "VV", 1555 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1556 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1557 vti.RegClass>; 1558 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 1559 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1560 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1561 vti.ScalarRegClass>; 1562 } 1563 } 1564} 1565 1566multiclass VPatBinaryFPWVL_VV_VF_RM<SDNode vop, string instruction_name> { 1567 foreach fvtiToFWti = AllWidenableFloatVectors in { 1568 defvar vti = fvtiToFWti.Vti; 1569 defvar wti = fvtiToFWti.Wti; 1570 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1571 GetVTypePredicates<wti>.Predicates) in { 1572 def : VPatBinaryVL_V_RM<vop, instruction_name, "VV", 1573 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1574 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1575 vti.RegClass>; 1576 def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 1577 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1578 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1579 vti.ScalarRegClass>; 1580 } 1581 } 1582} 1583 1584multiclass VPatBinaryFPWVL_VV_VF_WV_WF<SDNode vop, SDNode vop_w, string instruction_name> 1585 : VPatBinaryFPWVL_VV_VF<vop, instruction_name> { 1586 foreach fvtiToFWti = AllWidenableFloatVectors in { 1587 defvar vti = fvtiToFWti.Vti; 1588 defvar wti = fvtiToFWti.Wti; 1589 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1590 GetVTypePredicates<wti>.Predicates) in { 1591 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV", 1592 wti.Vector, vti.Vector, vti.Log2SEW, 1593 vti.LMul, wti.RegClass, vti.RegClass>; 1594 def : VPatBinaryVL_V<vop_w, instruction_name, "WV", 1595 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1596 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1597 vti.RegClass>; 1598 def : VPatBinaryVL_VF<vop_w, instruction_name#"_W"#vti.ScalarSuffix, 1599 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1600 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1601 vti.ScalarRegClass>; 1602 } 1603 } 1604} 1605 1606multiclass VPatBinaryFPWVL_VV_VF_WV_WF_RM<SDNode vop, SDNode vop_w, string instruction_name> 1607 : VPatBinaryFPWVL_VV_VF_RM<vop, instruction_name> { 1608 foreach fvtiToFWti = AllWidenableFloatVectors in { 1609 defvar vti = fvtiToFWti.Vti; 1610 defvar wti = fvtiToFWti.Wti; 1611 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1612 GetVTypePredicates<wti>.Predicates) in { 1613 defm : VPatTiedBinaryNoMaskVL_V_RM<vop_w, instruction_name, "WV", 1614 wti.Vector, vti.Vector, vti.Log2SEW, 1615 vti.LMul, wti.RegClass, vti.RegClass>; 1616 def : VPatBinaryVL_V_RM<vop_w, instruction_name, "WV", 1617 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1618 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1619 vti.RegClass>; 1620 def : VPatBinaryVL_VF_RM<vop_w, instruction_name#"_W"#vti.ScalarSuffix, 1621 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1622 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1623 vti.ScalarRegClass>; 1624 } 1625 } 1626} 1627 1628multiclass VPatNarrowShiftSplatExt_WX<SDNode op, PatFrags extop, string instruction_name> { 1629 foreach vtiToWti = AllWidenableIntVectors in { 1630 defvar vti = vtiToWti.Vti; 1631 defvar wti = vtiToWti.Wti; 1632 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1633 GetVTypePredicates<wti>.Predicates) in 1634 def : Pat< 1635 (vti.Vector 1636 (riscv_trunc_vector_vl 1637 (op (wti.Vector wti.RegClass:$rs2), 1638 (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1))), 1639 (vti.Mask true_mask), VLOpFrag)), 1640 srcvalue, (wti.Mask true_mask), VLOpFrag), 1641 (vti.Mask true_mask), VLOpFrag)), 1642 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1643 (vti.Vector (IMPLICIT_DEF)), 1644 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1645 } 1646} 1647 1648multiclass VPatNarrowShiftExtVL_WV<SDNode op, PatFrags extop, string instruction_name> { 1649 foreach vtiToWti = AllWidenableIntVectors in { 1650 defvar vti = vtiToWti.Vti; 1651 defvar wti = vtiToWti.Wti; 1652 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1653 GetVTypePredicates<wti>.Predicates) in 1654 def : Pat< 1655 (vti.Vector 1656 (riscv_trunc_vector_vl 1657 (op (wti.Vector wti.RegClass:$rs2), 1658 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), 1659 (vti.Mask true_mask), VLOpFrag)), 1660 srcvalue, (vti.Mask true_mask), VLOpFrag), 1661 (vti.Mask V0), VLOpFrag)), 1662 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_MASK") 1663 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, vti.RegClass:$rs1, 1664 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1665 } 1666} 1667 1668multiclass VPatNarrowShiftVL_WV<SDNode op, string instruction_name> { 1669 defm : VPatNarrowShiftExtVL_WV<op, riscv_sext_vl_oneuse, instruction_name>; 1670 defm : VPatNarrowShiftExtVL_WV<op, riscv_zext_vl_oneuse, instruction_name>; 1671} 1672 1673multiclass VPatMultiplyAddVL_VV_VX<SDNode op, string instruction_name> { 1674 foreach vti = AllIntegerVectors in { 1675 defvar suffix = vti.LMul.MX; 1676 let Predicates = GetVTypePredicates<vti>.Predicates in { 1677 // NOTE: We choose VMADD because it has the most commuting freedom. So it 1678 // works best with how TwoAddressInstructionPass tries commuting. 1679 def : Pat<(vti.Vector 1680 (op vti.RegClass:$rs2, 1681 (riscv_mul_vl_oneuse vti.RegClass:$rs1, 1682 vti.RegClass:$rd, 1683 srcvalue, (vti.Mask true_mask), VLOpFrag), 1684 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1685 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 1686 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1687 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1688 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 1689 // commutable. 1690 def : Pat<(vti.Vector 1691 (op vti.RegClass:$rs2, 1692 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), 1693 vti.RegClass:$rd, 1694 srcvalue, (vti.Mask true_mask), VLOpFrag), 1695 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1696 (!cast<Instruction>(instruction_name#"_VX_" # suffix) 1697 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1698 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1699 } 1700 } 1701} 1702 1703multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> { 1704 foreach vti = AllIntegerVectors in { 1705 defvar suffix = vti.LMul.MX; 1706 let Predicates = GetVTypePredicates<vti>.Predicates in { 1707 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1708 (vti.Vector (op vti.RegClass:$rd, 1709 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1710 srcvalue, (vti.Mask true_mask), VLOpFrag), 1711 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1712 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1713 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1714 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1715 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1716 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1717 (vti.Vector (op vti.RegClass:$rd, 1718 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1719 srcvalue, (vti.Mask true_mask), VLOpFrag), 1720 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1721 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1722 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK") 1723 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1724 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1725 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1726 (vti.Vector (op vti.RegClass:$rd, 1727 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1728 srcvalue, (vti.Mask true_mask), VLOpFrag), 1729 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1730 vti.RegClass:$rd, VLOpFrag), 1731 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1732 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1733 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1734 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1735 (vti.Vector (op vti.RegClass:$rd, 1736 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1737 srcvalue, (vti.Mask true_mask), VLOpFrag), 1738 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1739 vti.RegClass:$rd, VLOpFrag), 1740 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK") 1741 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1742 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1743 } 1744 } 1745} 1746 1747multiclass VPatWidenMultiplyAddVL_VV_VX<SDNode vwmacc_op, string instr_name> { 1748 foreach vtiTowti = AllWidenableIntVectors in { 1749 defvar vti = vtiTowti.Vti; 1750 defvar wti = vtiTowti.Wti; 1751 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1752 GetVTypePredicates<wti>.Predicates) in { 1753 def : Pat<(vwmacc_op (vti.Vector vti.RegClass:$rs1), 1754 (vti.Vector vti.RegClass:$rs2), 1755 (wti.Vector wti.RegClass:$rd), 1756 (vti.Mask V0), VLOpFrag), 1757 (!cast<Instruction>(instr_name#"_VV_"#vti.LMul.MX#"_MASK") 1758 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1759 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1760 def : Pat<(vwmacc_op (SplatPat XLenVT:$rs1), 1761 (vti.Vector vti.RegClass:$rs2), 1762 (wti.Vector wti.RegClass:$rd), 1763 (vti.Mask V0), VLOpFrag), 1764 (!cast<Instruction>(instr_name#"_VX_"#vti.LMul.MX#"_MASK") 1765 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, 1766 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1767 TAIL_AGNOSTIC)>; 1768 } 1769 } 1770} 1771 1772multiclass VPatNarrowShiftSplat_WX_WI<SDNode op, string instruction_name> { 1773 foreach vtiTowti = AllWidenableIntVectors in { 1774 defvar vti = vtiTowti.Vti; 1775 defvar wti = vtiTowti.Wti; 1776 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1777 GetVTypePredicates<wti>.Predicates) in { 1778 def : Pat<(vti.Vector (riscv_trunc_vector_vl 1779 (wti.Vector (op wti.RegClass:$rs1, (SplatPat XLenVT:$rs2), 1780 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), 1781 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1782 (vti.Vector (IMPLICIT_DEF)), 1783 wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 1784 def : Pat<(vti.Vector (riscv_trunc_vector_vl 1785 (wti.Vector (op wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2), 1786 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), 1787 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 1788 (vti.Vector (IMPLICIT_DEF)), 1789 wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 1790 } 1791 } 1792} 1793 1794multiclass VPatFPMulAddVL_VV_VF<SDPatternOperator vop, string instruction_name> { 1795 foreach vti = AllFloatVectors in { 1796 defvar suffix = vti.LMul.MX; 1797 let Predicates = GetVTypePredicates<vti>.Predicates in { 1798 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 1799 vti.RegClass:$rs2, (vti.Mask V0), 1800 VLOpFrag)), 1801 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1802 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1803 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1804 1805 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 1806 vti.RegClass:$rd, vti.RegClass:$rs2, 1807 (vti.Mask V0), 1808 VLOpFrag)), 1809 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1810 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1811 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1812 } 1813 } 1814} 1815 1816multiclass VPatFPMulAddVL_VV_VF_RM<SDPatternOperator vop, string instruction_name> { 1817 foreach vti = AllFloatVectors in { 1818 defvar suffix = vti.LMul.MX; 1819 let Predicates = GetVTypePredicates<vti>.Predicates in { 1820 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 1821 vti.RegClass:$rs2, (vti.Mask V0), 1822 VLOpFrag)), 1823 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1824 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1825 (vti.Mask V0), 1826 // Value to indicate no rounding mode change in 1827 // RISCVInsertReadWriteCSR 1828 FRM_DYN, 1829 GPR:$vl, vti.Log2SEW, TA_MA)>; 1830 1831 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 1832 vti.RegClass:$rd, vti.RegClass:$rs2, 1833 (vti.Mask V0), 1834 VLOpFrag)), 1835 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1836 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1837 (vti.Mask V0), 1838 // Value to indicate no rounding mode change in 1839 // RISCVInsertReadWriteCSR 1840 FRM_DYN, 1841 GPR:$vl, vti.Log2SEW, TA_MA)>; 1842 } 1843 } 1844} 1845 1846multiclass VPatFPMulAccVL_VV_VF<PatFrag vop, string instruction_name> { 1847 foreach vti = AllFloatVectors in { 1848 defvar suffix = vti.LMul.MX; 1849 let Predicates = GetVTypePredicates<vti>.Predicates in { 1850 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1851 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1852 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1853 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1854 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1855 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1856 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1857 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1858 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1859 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1860 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1861 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1862 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1863 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1864 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1865 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1866 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1867 vti.RegClass:$rd, VLOpFrag), 1868 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1869 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1870 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1871 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1872 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1873 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1874 vti.RegClass:$rd, VLOpFrag), 1875 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1876 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1877 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1878 } 1879 } 1880} 1881 1882multiclass VPatFPMulAccVL_VV_VF_RM<PatFrag vop, string instruction_name> { 1883 foreach vti = AllFloatVectors in { 1884 defvar suffix = vti.LMul.MX; 1885 let Predicates = GetVTypePredicates<vti>.Predicates in { 1886 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1887 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1888 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1889 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1890 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1891 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1892 (vti.Mask V0), 1893 // Value to indicate no rounding mode change in 1894 // RISCVInsertReadWriteCSR 1895 FRM_DYN, 1896 GPR:$vl, vti.Log2SEW, TU_MU)>; 1897 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1898 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1899 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1900 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1901 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1902 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1903 (vti.Mask V0), 1904 // Value to indicate no rounding mode change in 1905 // RISCVInsertReadWriteCSR 1906 FRM_DYN, 1907 GPR:$vl, vti.Log2SEW, TU_MU)>; 1908 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1909 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1910 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1911 vti.RegClass:$rd, VLOpFrag), 1912 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1913 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1914 (vti.Mask V0), 1915 // Value to indicate no rounding mode change in 1916 // RISCVInsertReadWriteCSR 1917 FRM_DYN, 1918 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1919 def : Pat<(riscv_vselect_vl (vti.Mask V0), 1920 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1921 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1922 vti.RegClass:$rd, VLOpFrag), 1923 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1924 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1925 (vti.Mask V0), 1926 // Value to indicate no rounding mode change in 1927 // RISCVInsertReadWriteCSR 1928 FRM_DYN, 1929 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1930 } 1931 } 1932} 1933 1934multiclass VPatWidenFPMulAccVL_VV_VF<SDNode vop, string instruction_name> { 1935 foreach vtiToWti = AllWidenableFloatVectors in { 1936 defvar vti = vtiToWti.Vti; 1937 defvar wti = vtiToWti.Wti; 1938 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1939 GetVTypePredicates<wti>.Predicates) in { 1940 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 1941 (vti.Vector vti.RegClass:$rs2), 1942 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1943 VLOpFrag), 1944 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX #"_MASK") 1945 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1946 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1947 def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 1948 (vti.Vector vti.RegClass:$rs2), 1949 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1950 VLOpFrag), 1951 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX #"_MASK") 1952 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1953 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1954 } 1955 } 1956} 1957 1958multiclass VPatWidenFPMulAccVL_VV_VF_RM<SDNode vop, string instruction_name> { 1959 foreach vtiToWti = AllWidenableFloatVectors in { 1960 defvar vti = vtiToWti.Vti; 1961 defvar wti = vtiToWti.Wti; 1962 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1963 GetVTypePredicates<wti>.Predicates) in { 1964 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 1965 (vti.Vector vti.RegClass:$rs2), 1966 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1967 VLOpFrag), 1968 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX #"_MASK") 1969 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1970 (vti.Mask V0), 1971 // Value to indicate no rounding mode change in 1972 // RISCVInsertReadWriteCSR 1973 FRM_DYN, 1974 GPR:$vl, vti.Log2SEW, TA_MA)>; 1975 def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 1976 (vti.Vector vti.RegClass:$rs2), 1977 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1978 VLOpFrag), 1979 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX #"_MASK") 1980 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1981 (vti.Mask V0), 1982 // Value to indicate no rounding mode change in 1983 // RISCVInsertReadWriteCSR 1984 FRM_DYN, 1985 GPR:$vl, vti.Log2SEW, TA_MA)>; 1986 } 1987 } 1988} 1989 1990multiclass VPatSlideVL_VX_VI<SDNode vop, string instruction_name> { 1991 foreach vti = AllVectors in { 1992 let Predicates = GetVTypePredicates<vti>.Predicates in { 1993 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd), 1994 (vti.Vector vti.RegClass:$rs1), 1995 uimm5:$rs2, (vti.Mask V0), 1996 VLOpFrag, (XLenVT timm:$policy))), 1997 (!cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK") 1998 vti.RegClass:$rd, vti.RegClass:$rs1, uimm5:$rs2, 1999 (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2000 (XLenVT timm:$policy))>; 2001 2002 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd), 2003 (vti.Vector vti.RegClass:$rs1), 2004 GPR:$rs2, (vti.Mask V0), 2005 VLOpFrag, (XLenVT timm:$policy))), 2006 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK") 2007 vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, 2008 (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2009 (XLenVT timm:$policy))>; 2010 } 2011 } 2012} 2013 2014multiclass VPatSlide1VL_VX<SDNode vop, string instruction_name> { 2015 foreach vti = AllIntegerVectors in { 2016 let Predicates = GetVTypePredicates<vti>.Predicates in { 2017 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3), 2018 (vti.Vector vti.RegClass:$rs1), 2019 GPR:$rs2, (vti.Mask V0), VLOpFrag)), 2020 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK") 2021 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, 2022 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 2023 } 2024 } 2025} 2026 2027multiclass VPatSlide1VL_VF<SDNode vop, string instruction_name> { 2028 foreach vti = AllFloatVectors in { 2029 let Predicates = GetVTypePredicates<vti>.Predicates in { 2030 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3), 2031 (vti.Vector vti.RegClass:$rs1), 2032 vti.Scalar:$rs2, (vti.Mask V0), VLOpFrag)), 2033 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_MASK") 2034 vti.RegClass:$rs3, vti.RegClass:$rs1, vti.Scalar:$rs2, 2035 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 2036 } 2037 } 2038} 2039 2040//===----------------------------------------------------------------------===// 2041// Patterns. 2042//===----------------------------------------------------------------------===// 2043 2044// 11. Vector Integer Arithmetic Instructions 2045 2046// 11.1. Vector Single-Width Integer Add and Subtract 2047defm : VPatBinaryVL_VV_VX_VI<riscv_add_vl, "PseudoVADD">; 2048defm : VPatBinaryVL_VV_VX<riscv_sub_vl, "PseudoVSUB">; 2049// Handle VRSUB specially since it's the only integer binary op with reversed 2050// pattern operands 2051foreach vti = AllIntegerVectors in { 2052 let Predicates = GetVTypePredicates<vti>.Predicates in { 2053 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 2054 (vti.Vector vti.RegClass:$rs1), 2055 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2056 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK") 2057 vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2, 2058 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2059 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)), 2060 (vti.Vector vti.RegClass:$rs1), 2061 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2062 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK") 2063 vti.RegClass:$merge, vti.RegClass:$rs1, simm5:$rs2, 2064 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2065 } 2066} 2067 2068// 11.2. Vector Widening Integer Add/Subtract 2069defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwadd_vl, riscv_vwadd_w_vl, "PseudoVWADD">; 2070defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwaddu_vl, riscv_vwaddu_w_vl, "PseudoVWADDU">; 2071defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsub_vl, riscv_vwsub_w_vl, "PseudoVWSUB">; 2072defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsubu_vl, riscv_vwsubu_w_vl, "PseudoVWSUBU">; 2073 2074// shl_vl (ext_vl v, splat 1) is a special case of widening add. 2075foreach vtiToWti = AllWidenableIntVectors in { 2076 defvar vti = vtiToWti.Vti; 2077 defvar wti = vtiToWti.Wti; 2078 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2079 GetVTypePredicates<wti>.Predicates) in { 2080 def : Pat<(riscv_shl_vl (wti.Vector (riscv_sext_vl_oneuse 2081 (vti.Vector vti.RegClass:$rs1), 2082 (vti.Mask V0), VLOpFrag)), 2083 (wti.Vector (riscv_vmv_v_x_vl 2084 (wti.Vector undef), 1, VLOpFrag)), 2085 wti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2086 (!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX#"_MASK") 2087 wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1, 2088 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2089 def : Pat<(riscv_shl_vl (wti.Vector (riscv_zext_vl_oneuse 2090 (vti.Vector vti.RegClass:$rs1), 2091 (vti.Mask V0), VLOpFrag)), 2092 (wti.Vector (riscv_vmv_v_x_vl 2093 (wti.Vector undef), 1, VLOpFrag)), 2094 wti.RegClass:$merge, (vti.Mask V0), VLOpFrag), 2095 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX#"_MASK") 2096 wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1, 2097 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2098 } 2099} 2100 2101// 11.3. Vector Integer Extension 2102defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF2", 2103 AllFractionableVF2IntVectors>; 2104defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF2", 2105 AllFractionableVF2IntVectors>; 2106defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF4", 2107 AllFractionableVF4IntVectors>; 2108defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF4", 2109 AllFractionableVF4IntVectors>; 2110defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF8", 2111 AllFractionableVF8IntVectors>; 2112defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF8", 2113 AllFractionableVF8IntVectors>; 2114 2115// 11.5. Vector Bitwise Logical Instructions 2116defm : VPatBinaryVL_VV_VX_VI<riscv_and_vl, "PseudoVAND">; 2117defm : VPatBinaryVL_VV_VX_VI<riscv_or_vl, "PseudoVOR">; 2118defm : VPatBinaryVL_VV_VX_VI<riscv_xor_vl, "PseudoVXOR">; 2119 2120// 11.6. Vector Single-Width Bit Shift Instructions 2121defm : VPatBinaryVL_VV_VX_VI<riscv_shl_vl, "PseudoVSLL", uimm5>; 2122defm : VPatBinaryVL_VV_VX_VI<riscv_srl_vl, "PseudoVSRL", uimm5>; 2123defm : VPatBinaryVL_VV_VX_VI<riscv_sra_vl, "PseudoVSRA", uimm5>; 2124 2125foreach vti = AllIntegerVectors in { 2126 // Emit shift by 1 as an add since it might be faster. 2127 let Predicates = GetVTypePredicates<vti>.Predicates in 2128 def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1), 2129 (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)), 2130 srcvalue, (vti.Mask true_mask), VLOpFrag), 2131 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 2132 (vti.Vector (IMPLICIT_DEF)), 2133 vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 2134} 2135 2136// 11.7. Vector Narrowing Integer Right Shift Instructions 2137defm : VPatBinaryVL_WV_WX_WI<srl, "PseudoVNSRL">; 2138defm : VPatBinaryVL_WV_WX_WI<sra, "PseudoVNSRA">; 2139 2140defm : VPatNarrowShiftSplat_WX_WI<riscv_sra_vl, "PseudoVNSRA">; 2141defm : VPatNarrowShiftSplat_WX_WI<riscv_srl_vl, "PseudoVNSRL">; 2142defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_sext_vl_oneuse, "PseudoVNSRA">; 2143defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_zext_vl_oneuse, "PseudoVNSRA">; 2144defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_sext_vl_oneuse, "PseudoVNSRL">; 2145defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_zext_vl_oneuse, "PseudoVNSRL">; 2146 2147defm : VPatNarrowShiftVL_WV<riscv_srl_vl, "PseudoVNSRL">; 2148defm : VPatNarrowShiftVL_WV<riscv_sra_vl, "PseudoVNSRA">; 2149 2150defm : VPatBinaryNVL_WV_WX_WI<riscv_vnsrl_vl, "PseudoVNSRL">; 2151 2152foreach vtiTowti = AllWidenableIntVectors in { 2153 defvar vti = vtiTowti.Vti; 2154 defvar wti = vtiTowti.Wti; 2155 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2156 GetVTypePredicates<wti>.Predicates) in 2157 def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1), 2158 (vti.Mask V0), 2159 VLOpFrag)), 2160 (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX#"_MASK") 2161 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 2162 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2163} 2164 2165// 11.8. Vector Integer Comparison Instructions 2166foreach vti = AllIntegerVectors in { 2167 let Predicates = GetVTypePredicates<vti>.Predicates in { 2168 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSEQ", SETEQ>; 2169 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSNE", SETNE>; 2170 2171 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 2172 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 2173 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2174 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2175 2176 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 2177 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 2178 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 2179 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 2180 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2181 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2182 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 2183 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 2184 // There is no VMSGE(U)_VX instruction 2185 2186 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 2187 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 2188 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2189 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2190 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 2191 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 2192 2193 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLE", SETLT, SETGT, 2194 SplatPat_simm5_plus1>; 2195 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLEU", SETULT, SETUGT, 2196 SplatPat_simm5_plus1_nonzero>; 2197 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGT", SETGE, SETLE, 2198 SplatPat_simm5_plus1>; 2199 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGTU", SETUGE, SETULE, 2200 SplatPat_simm5_plus1_nonzero>; 2201 } 2202} // foreach vti = AllIntegerVectors 2203 2204// 11.9. Vector Integer Min/Max Instructions 2205defm : VPatBinaryVL_VV_VX<riscv_umin_vl, "PseudoVMINU">; 2206defm : VPatBinaryVL_VV_VX<riscv_smin_vl, "PseudoVMIN">; 2207defm : VPatBinaryVL_VV_VX<riscv_umax_vl, "PseudoVMAXU">; 2208defm : VPatBinaryVL_VV_VX<riscv_smax_vl, "PseudoVMAX">; 2209 2210// 11.10. Vector Single-Width Integer Multiply Instructions 2211defm : VPatBinaryVL_VV_VX<riscv_mul_vl, "PseudoVMUL">; 2212defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH", IntegerVectorsExceptI64>; 2213defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU", IntegerVectorsExceptI64>; 2214// vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*. 2215let Predicates = [HasVInstructionsFullMultiply] in { 2216 defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH", I64IntegerVectors>; 2217 defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU", I64IntegerVectors>; 2218} 2219 2220// 11.11. Vector Integer Divide Instructions 2221defm : VPatBinaryVL_VV_VX<riscv_udiv_vl, "PseudoVDIVU", isSEWAware=1>; 2222defm : VPatBinaryVL_VV_VX<riscv_sdiv_vl, "PseudoVDIV", isSEWAware=1>; 2223defm : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU", isSEWAware=1>; 2224defm : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM", isSEWAware=1>; 2225 2226// 11.12. Vector Widening Integer Multiply Instructions 2227defm : VPatBinaryWVL_VV_VX<riscv_vwmul_vl, "PseudoVWMUL">; 2228defm : VPatBinaryWVL_VV_VX<riscv_vwmulu_vl, "PseudoVWMULU">; 2229defm : VPatBinaryWVL_VV_VX<riscv_vwmulsu_vl, "PseudoVWMULSU">; 2230 2231// 11.13 Vector Single-Width Integer Multiply-Add Instructions 2232defm : VPatMultiplyAddVL_VV_VX<riscv_add_vl, "PseudoVMADD">; 2233defm : VPatMultiplyAddVL_VV_VX<riscv_sub_vl, "PseudoVNMSUB">; 2234defm : VPatMultiplyAccVL_VV_VX<riscv_add_vl_oneuse, "PseudoVMACC">; 2235defm : VPatMultiplyAccVL_VV_VX<riscv_sub_vl_oneuse, "PseudoVNMSAC">; 2236 2237// 11.14. Vector Widening Integer Multiply-Add Instructions 2238defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmacc_vl, "PseudoVWMACC">; 2239defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmaccu_vl, "PseudoVWMACCU">; 2240defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmaccsu_vl, "PseudoVWMACCSU">; 2241foreach vtiTowti = AllWidenableIntVectors in { 2242 defvar vti = vtiTowti.Vti; 2243 defvar wti = vtiTowti.Wti; 2244 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2245 GetVTypePredicates<wti>.Predicates) in 2246 def : Pat<(riscv_vwmaccsu_vl (vti.Vector vti.RegClass:$rs1), 2247 (SplatPat XLenVT:$rs2), 2248 (wti.Vector wti.RegClass:$rd), 2249 (vti.Mask V0), VLOpFrag), 2250 (!cast<Instruction>("PseudoVWMACCUS_VX_"#vti.LMul.MX#"_MASK") 2251 wti.RegClass:$rd, vti.ScalarRegClass:$rs2, vti.RegClass:$rs1, 2252 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2253} 2254 2255// 11.15. Vector Integer Merge Instructions 2256foreach vti = AllIntegerVectors in { 2257 let Predicates = GetVTypePredicates<vti>.Predicates in { 2258 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 2259 vti.RegClass:$rs1, 2260 vti.RegClass:$rs2, 2261 VLOpFrag)), 2262 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 2263 (vti.Vector (IMPLICIT_DEF)), 2264 vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0), 2265 GPR:$vl, vti.Log2SEW)>; 2266 2267 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 2268 (SplatPat XLenVT:$rs1), 2269 vti.RegClass:$rs2, 2270 VLOpFrag)), 2271 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 2272 (vti.Vector (IMPLICIT_DEF)), 2273 vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2274 2275 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0), 2276 (SplatPat_simm5 simm5:$rs1), 2277 vti.RegClass:$rs2, 2278 VLOpFrag)), 2279 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 2280 (vti.Vector (IMPLICIT_DEF)), 2281 vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2282 2283 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2284 vti.RegClass:$rs1, 2285 vti.RegClass:$rs2, 2286 vti.RegClass:$merge, 2287 VLOpFrag)), 2288 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 2289 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2290 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2291 2292 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2293 (SplatPat XLenVT:$rs1), 2294 vti.RegClass:$rs2, 2295 vti.RegClass:$merge, 2296 VLOpFrag)), 2297 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 2298 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2299 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2300 2301 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2302 (SplatPat_simm5 simm5:$rs1), 2303 vti.RegClass:$rs2, 2304 vti.RegClass:$merge, 2305 VLOpFrag)), 2306 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 2307 vti.RegClass:$merge, vti.RegClass:$rs2, simm5:$rs1, 2308 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2309 } 2310} 2311 2312// 11.16. Vector Integer Move Instructions 2313foreach vti = AllVectors in { 2314 let Predicates = GetVTypePredicates<vti>.Predicates in { 2315 def : Pat<(vti.Vector (riscv_vmv_v_v_vl vti.RegClass:$passthru, 2316 vti.RegClass:$rs2, VLOpFrag)), 2317 (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX) 2318 vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2319} 2320 2321foreach vti = AllIntegerVectors in { 2322 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, GPR:$rs2, VLOpFrag)), 2323 (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX) 2324 vti.RegClass:$passthru, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2325 defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5"); 2326 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, (ImmPat simm5:$imm5), 2327 VLOpFrag)), 2328 (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX) 2329 vti.RegClass:$passthru, simm5:$imm5, GPR:$vl, vti.Log2SEW, TU_MU)>; 2330 } 2331} 2332 2333// 12. Vector Fixed-Point Arithmetic Instructions 2334 2335// 12.1. Vector Single-Width Saturating Add and Subtract 2336defm : VPatBinaryVL_VV_VX_VI<riscv_saddsat_vl, "PseudoVSADD">; 2337defm : VPatBinaryVL_VV_VX_VI<riscv_uaddsat_vl, "PseudoVSADDU">; 2338defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">; 2339defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">; 2340 2341// 12.5. Vector Narrowing Fixed-Point Clip Instructions 2342class VPatTruncSatClipMaxMinBase<string inst, 2343 VTypeInfo vti, 2344 VTypeInfo wti, 2345 SDPatternOperator op1, 2346 int op1_value, 2347 SDPatternOperator op2, 2348 int op2_value> : 2349 Pat<(vti.Vector (riscv_trunc_vector_vl 2350 (wti.Vector (op1 2351 (wti.Vector (op2 2352 (wti.Vector wti.RegClass:$rs1), 2353 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), op2_value, (XLenVT srcvalue))), 2354 (wti.Vector undef),(wti.Mask V0), VLOpFrag)), 2355 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), op1_value, (XLenVT srcvalue))), 2356 (wti.Vector undef), (wti.Mask V0), VLOpFrag)), 2357 (vti.Mask V0), VLOpFrag)), 2358 (!cast<Instruction>(inst#"_WI_"#vti.LMul.MX#"_MASK") 2359 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 2360 (vti.Mask V0), 0, GPR:$vl, vti.Log2SEW, TA_MA)>; 2361 2362class VPatTruncSatClipUMin<VTypeInfo vti, 2363 VTypeInfo wti, 2364 int uminval> : 2365 Pat<(vti.Vector (riscv_trunc_vector_vl 2366 (wti.Vector (riscv_umin_vl 2367 (wti.Vector wti.RegClass:$rs1), 2368 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), uminval, (XLenVT srcvalue))), 2369 (wti.Vector undef), (wti.Mask V0), VLOpFrag)), 2370 (vti.Mask V0), VLOpFrag)), 2371 (!cast<Instruction>("PseudoVNCLIPU_WI_"#vti.LMul.MX#"_MASK") 2372 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 2373 (vti.Mask V0), 0, GPR:$vl, vti.Log2SEW, TA_MA)>; 2374 2375multiclass VPatTruncSatClipMaxMin<string inst, VTypeInfo vti, VTypeInfo wti, 2376 SDPatternOperator max, int maxval, SDPatternOperator min, int minval> { 2377 def : VPatTruncSatClipMaxMinBase<inst, vti, wti, max, maxval, min, minval>; 2378 def : VPatTruncSatClipMaxMinBase<inst, vti, wti, min, minval, max, maxval>; 2379} 2380 2381multiclass VPatTruncSatClip<VTypeInfo vti, VTypeInfo wti> { 2382 defvar sew = vti.SEW; 2383 defvar uminval = !sub(!shl(1, sew), 1); 2384 defvar sminval = !sub(!shl(1, !sub(sew, 1)), 1); 2385 defvar smaxval = !sub(0, !shl(1, !sub(sew, 1))); 2386 2387 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2388 GetVTypePredicates<wti>.Predicates) in { 2389 defm : VPatTruncSatClipMaxMin<"PseudoVNCLIP", vti, wti, riscv_smin_vl, 2390 sminval, riscv_smax_vl, smaxval>; 2391 def : VPatTruncSatClipUMin<vti, wti, uminval>; 2392 } 2393 2394} 2395 2396foreach vtiToWti = AllWidenableIntVectors in 2397 defm : VPatTruncSatClip<vtiToWti.Vti, vtiToWti.Wti>; 2398 2399// 13. Vector Floating-Point Instructions 2400 2401// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 2402defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fadd_vl, "PseudoVFADD">; 2403defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fsub_vl, "PseudoVFSUB">; 2404defm : VPatBinaryFPVL_R_VF_RM<any_riscv_fsub_vl, "PseudoVFRSUB">; 2405 2406// 13.3. Vector Widening Floating-Point Add/Subtract Instructions 2407defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM<riscv_vfwadd_vl, riscv_vfwadd_w_vl, "PseudoVFWADD">; 2408defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM<riscv_vfwsub_vl, riscv_vfwsub_w_vl, "PseudoVFWSUB">; 2409 2410// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 2411defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fmul_vl, "PseudoVFMUL">; 2412defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fdiv_vl, "PseudoVFDIV", isSEWAware=1>; 2413defm : VPatBinaryFPVL_R_VF_RM<any_riscv_fdiv_vl, "PseudoVFRDIV", isSEWAware=1>; 2414 2415// 13.5. Vector Widening Floating-Point Multiply Instructions 2416defm : VPatBinaryFPWVL_VV_VF_RM<riscv_vfwmul_vl, "PseudoVFWMUL">; 2417 2418// 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 2419defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfmadd_vl, "PseudoVFMADD">; 2420defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfmsub_vl, "PseudoVFMSUB">; 2421defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfnmadd_vl, "PseudoVFNMADD">; 2422defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfnmsub_vl, "PseudoVFNMSUB">; 2423defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfmadd_vl_oneuse, "PseudoVFMACC">; 2424defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfmsub_vl_oneuse, "PseudoVFMSAC">; 2425defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfnmadd_vl_oneuse, "PseudoVFNMACC">; 2426defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfnmsub_vl_oneuse, "PseudoVFNMSAC">; 2427 2428// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 2429defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmadd_vl, "PseudoVFWMACC">; 2430defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwnmadd_vl, "PseudoVFWNMACC">; 2431defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmsub_vl, "PseudoVFWMSAC">; 2432defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwnmsub_vl, "PseudoVFWNMSAC">; 2433 2434// 13.11. Vector Floating-Point MIN/MAX Instructions 2435defm : VPatBinaryFPVL_VV_VF<riscv_vfmin_vl, "PseudoVFMIN">; 2436defm : VPatBinaryFPVL_VV_VF<riscv_vfmax_vl, "PseudoVFMAX">; 2437 2438// 13.13. Vector Floating-Point Compare Instructions 2439defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETEQ, 2440 "PseudoVMFEQ", "PseudoVMFEQ">; 2441defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETOEQ, 2442 "PseudoVMFEQ", "PseudoVMFEQ">; 2443defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETNE, 2444 "PseudoVMFNE", "PseudoVMFNE">; 2445defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETUNE, 2446 "PseudoVMFNE", "PseudoVMFNE">; 2447defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETLT, 2448 "PseudoVMFLT", "PseudoVMFGT">; 2449defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLT, 2450 "PseudoVMFLT", "PseudoVMFGT">; 2451defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETLE, 2452 "PseudoVMFLE", "PseudoVMFGE">; 2453defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLE, 2454 "PseudoVMFLE", "PseudoVMFGE">; 2455 2456foreach vti = AllFloatVectors in { 2457 let Predicates = GetVTypePredicates<vti>.Predicates in { 2458 // 13.8. Vector Floating-Point Square-Root Instruction 2459 def : Pat<(any_riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask V0), 2460 VLOpFrag), 2461 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX # "_E" # vti.SEW # "_MASK") 2462 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 2463 (vti.Mask V0), 2464 // Value to indicate no rounding mode change in 2465 // RISCVInsertReadWriteCSR 2466 FRM_DYN, 2467 GPR:$vl, vti.Log2SEW, TA_MA)>; 2468 2469 // 13.12. Vector Floating-Point Sign-Injection Instructions 2470 def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 2471 VLOpFrag), 2472 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX #"_MASK") 2473 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 2474 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2475 TA_MA)>; 2476 // Handle fneg with VFSGNJN using the same input for both operands. 2477 def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 2478 VLOpFrag), 2479 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX #"_MASK") 2480 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 2481 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2482 TA_MA)>; 2483 2484 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2485 (vti.Vector vti.RegClass:$rs2), 2486 vti.RegClass:$merge, 2487 (vti.Mask V0), 2488 VLOpFrag), 2489 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_MASK") 2490 vti.RegClass:$merge, vti.RegClass:$rs1, 2491 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2492 TAIL_AGNOSTIC)>; 2493 2494 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2495 (riscv_fneg_vl vti.RegClass:$rs2, 2496 (vti.Mask true_mask), 2497 VLOpFrag), 2498 srcvalue, 2499 (vti.Mask true_mask), 2500 VLOpFrag), 2501 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 2502 (vti.Vector (IMPLICIT_DEF)), 2503 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 2504 2505 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2506 (SplatFPOp vti.ScalarRegClass:$rs2), 2507 vti.RegClass:$merge, 2508 (vti.Mask V0), 2509 VLOpFrag), 2510 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_MASK") 2511 vti.RegClass:$merge, vti.RegClass:$rs1, 2512 vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2513 TAIL_AGNOSTIC)>; 2514 2515 // Rounding without exception to implement nearbyint. 2516 def : Pat<(any_riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1), 2517 (vti.Mask V0), VLOpFrag), 2518 (!cast<Instruction>("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK") 2519 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 2520 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2521 2522 // 14.14. Vector Floating-Point Classify Instruction 2523 def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2), 2524 (vti.Mask V0), VLOpFrag), 2525 (!cast<Instruction>("PseudoVFCLASS_V_"# vti.LMul.MX #"_MASK") 2526 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 2527 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2528 } 2529} 2530 2531foreach fvti = AllFloatVectors in { 2532 // Floating-point vselects: 2533 // 11.15. Vector Integer Merge Instructions 2534 // 13.15. Vector Floating-Point Merge Instruction 2535 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 2536 let Predicates = GetVTypePredicates<ivti>.Predicates in { 2537 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 2538 fvti.RegClass:$rs1, 2539 fvti.RegClass:$rs2, 2540 VLOpFrag)), 2541 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 2542 (fvti.Vector (IMPLICIT_DEF)), 2543 fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 2544 GPR:$vl, fvti.Log2SEW)>; 2545 2546 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 2547 (SplatFPOp (SelectFPImm (XLenVT GPR:$imm))), 2548 fvti.RegClass:$rs2, 2549 VLOpFrag)), 2550 (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX) 2551 (fvti.Vector (IMPLICIT_DEF)), 2552 fvti.RegClass:$rs2, 2553 GPR:$imm, 2554 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 2555 2556 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 2557 (SplatFPOp (fvti.Scalar fpimm0)), 2558 fvti.RegClass:$rs2, 2559 VLOpFrag)), 2560 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 2561 (fvti.Vector (IMPLICIT_DEF)), 2562 fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 2563 2564 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2565 fvti.RegClass:$rs1, 2566 fvti.RegClass:$rs2, 2567 fvti.RegClass:$merge, 2568 VLOpFrag)), 2569 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 2570 fvti.RegClass:$merge, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 2571 GPR:$vl, fvti.Log2SEW)>; 2572 2573 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2574 (SplatFPOp (fvti.Scalar fpimm0)), 2575 fvti.RegClass:$rs2, 2576 fvti.RegClass:$merge, 2577 VLOpFrag)), 2578 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 2579 fvti.RegClass:$merge, fvti.RegClass:$rs2, 0, (fvti.Mask V0), 2580 GPR:$vl, fvti.Log2SEW)>; 2581 } 2582 2583 let Predicates = GetVTypePredicates<fvti>.Predicates in { 2584 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), 2585 (SplatFPOp fvti.ScalarRegClass:$rs1), 2586 fvti.RegClass:$rs2, 2587 VLOpFrag)), 2588 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 2589 (fvti.Vector (IMPLICIT_DEF)), 2590 fvti.RegClass:$rs2, 2591 (fvti.Scalar fvti.ScalarRegClass:$rs1), 2592 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 2593 2594 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2595 (SplatFPOp fvti.ScalarRegClass:$rs1), 2596 fvti.RegClass:$rs2, 2597 fvti.RegClass:$merge, 2598 VLOpFrag)), 2599 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 2600 fvti.RegClass:$merge, fvti.RegClass:$rs2, 2601 (fvti.Scalar fvti.ScalarRegClass:$rs1), 2602 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 2603 2604 // 13.16. Vector Floating-Point Move Instruction 2605 // If we're splatting fpimm0, use vmv.v.x vd, x0. 2606 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2607 fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)), 2608 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 2609 $passthru, 0, GPR:$vl, fvti.Log2SEW, TU_MU)>; 2610 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2611 fvti.Vector:$passthru, (fvti.Scalar (SelectFPImm (XLenVT GPR:$imm))), VLOpFrag)), 2612 (!cast<Instruction>("PseudoVMV_V_X_"#fvti.LMul.MX) 2613 $passthru, GPR:$imm, GPR:$vl, fvti.Log2SEW, TU_MU)>; 2614 2615 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2616 fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), 2617 (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # 2618 fvti.LMul.MX) 2619 $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), 2620 GPR:$vl, fvti.Log2SEW, TU_MU)>; 2621 } 2622} 2623 2624// 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 2625defm : VPatConvertFP2IVL_V_RM<riscv_vfcvt_xu_f_vl, "PseudoVFCVT_XU_F_V">; 2626defm : VPatConvertFP2IVL_V_RM<riscv_vfcvt_x_f_vl, "PseudoVFCVT_X_F_V">; 2627defm : VPatConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFCVT_RM_XU_F_V">; 2628defm : VPatConvertFP2I_RM_VL_V<any_riscv_vfcvt_rm_x_f_vl, "PseudoVFCVT_RM_X_F_V">; 2629 2630defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFCVT_RTZ_XU_F_V">; 2631defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFCVT_RTZ_X_F_V">; 2632 2633defm : VPatConvertI2FPVL_V_RM<any_riscv_uint_to_fp_vl, "PseudoVFCVT_F_XU_V">; 2634defm : VPatConvertI2FPVL_V_RM<any_riscv_sint_to_fp_vl, "PseudoVFCVT_F_X_V">; 2635 2636defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFCVT_RM_F_XU_V">; 2637defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFCVT_RM_F_X_V">; 2638 2639// 13.18. Widening Floating-Point/Integer Type-Convert Instructions 2640defm : VPatWConvertFP2IVL_V_RM<riscv_vfcvt_xu_f_vl, "PseudoVFWCVT_XU_F_V">; 2641defm : VPatWConvertFP2IVL_V_RM<riscv_vfcvt_x_f_vl, "PseudoVFWCVT_X_F_V">; 2642defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFWCVT_RM_XU_F_V">; 2643defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFWCVT_RM_X_F_V">; 2644 2645defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFWCVT_RTZ_XU_F_V">; 2646defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFWCVT_RTZ_X_F_V">; 2647 2648defm : VPatWConvertI2FPVL_V<any_riscv_uint_to_fp_vl, "PseudoVFWCVT_F_XU_V">; 2649defm : VPatWConvertI2FPVL_V<any_riscv_sint_to_fp_vl, "PseudoVFWCVT_F_X_V">; 2650 2651foreach fvtiToFWti = AllWidenableFloatVectors in { 2652 defvar fvti = fvtiToFWti.Vti; 2653 defvar fwti = fvtiToFWti.Wti; 2654 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], 2655 !listconcat(GetVTypePredicates<fvti>.Predicates, 2656 GetVTypePredicates<fwti>.Predicates)) in 2657 def : Pat<(fwti.Vector (any_riscv_fpextend_vl 2658 (fvti.Vector fvti.RegClass:$rs1), 2659 (fvti.Mask V0), 2660 VLOpFrag)), 2661 (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_MASK") 2662 (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 2663 (fvti.Mask V0), 2664 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2665} 2666 2667// 13.19 Narrowing Floating-Point/Integer Type-Convert Instructions 2668defm : VPatNConvertFP2IVL_W_RM<riscv_vfcvt_xu_f_vl, "PseudoVFNCVT_XU_F_W">; 2669defm : VPatNConvertFP2IVL_W_RM<riscv_vfcvt_x_f_vl, "PseudoVFNCVT_X_F_W">; 2670defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_xu_f_vl, "PseudoVFNCVT_RM_XU_F_W">; 2671defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_x_f_vl, "PseudoVFNCVT_RM_X_F_W">; 2672 2673defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFNCVT_RTZ_XU_F_W">; 2674defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFNCVT_RTZ_X_F_W">; 2675 2676defm : VPatNConvertI2FPVL_W_RM<any_riscv_uint_to_fp_vl, "PseudoVFNCVT_F_XU_W">; 2677defm : VPatNConvertI2FPVL_W_RM<any_riscv_sint_to_fp_vl, "PseudoVFNCVT_F_X_W">; 2678 2679defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_xu_vl, "PseudoVFNCVT_RM_F_XU_W">; 2680defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_x_vl, "PseudoVFNCVT_RM_F_X_W">; 2681 2682foreach fvtiToFWti = AllWidenableFloatVectors in { 2683 defvar fvti = fvtiToFWti.Vti; 2684 defvar fwti = fvtiToFWti.Wti; 2685 // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable. 2686 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], 2687 !listconcat(GetVTypePredicates<fvti>.Predicates, 2688 GetVTypePredicates<fwti>.Predicates)) in { 2689 def : Pat<(fvti.Vector (any_riscv_fpround_vl 2690 (fwti.Vector fwti.RegClass:$rs1), 2691 (fwti.Mask V0), VLOpFrag)), 2692 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_MASK") 2693 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 2694 (fwti.Mask V0), 2695 // Value to indicate no rounding mode change in 2696 // RISCVInsertReadWriteCSR 2697 FRM_DYN, 2698 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2699 2700 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 2701 GetVTypePredicates<fwti>.Predicates) in 2702 def : Pat<(fvti.Vector (any_riscv_fncvt_rod_vl 2703 (fwti.Vector fwti.RegClass:$rs1), 2704 (fwti.Mask V0), VLOpFrag)), 2705 (!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_MASK") 2706 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 2707 (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 2708 } 2709} 2710 2711// 14. Vector Reduction Operations 2712 2713// 14.1. Vector Single-Width Integer Reduction Instructions 2714defm : VPatReductionVL<rvv_vecreduce_ADD_vl, "PseudoVREDSUM", is_float=0>; 2715defm : VPatReductionVL<rvv_vecreduce_UMAX_vl, "PseudoVREDMAXU", is_float=0>; 2716defm : VPatReductionVL<rvv_vecreduce_SMAX_vl, "PseudoVREDMAX", is_float=0>; 2717defm : VPatReductionVL<rvv_vecreduce_UMIN_vl, "PseudoVREDMINU", is_float=0>; 2718defm : VPatReductionVL<rvv_vecreduce_SMIN_vl, "PseudoVREDMIN", is_float=0>; 2719defm : VPatReductionVL<rvv_vecreduce_AND_vl, "PseudoVREDAND", is_float=0>; 2720defm : VPatReductionVL<rvv_vecreduce_OR_vl, "PseudoVREDOR", is_float=0>; 2721defm : VPatReductionVL<rvv_vecreduce_XOR_vl, "PseudoVREDXOR", is_float=0>; 2722 2723// 14.2. Vector Widening Integer Reduction Instructions 2724defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, anyext_oneuse, "PseudoVWREDSUMU", is_float=0>; 2725defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, zext_oneuse, "PseudoVWREDSUMU", is_float=0>; 2726defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_zext_vl_oneuse, "PseudoVWREDSUMU", is_float=0>; 2727defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, sext_oneuse, "PseudoVWREDSUM", is_float=0>; 2728defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_sext_vl_oneuse, "PseudoVWREDSUM", is_float=0>; 2729 2730// 14.3. Vector Single-Width Floating-Point Reduction Instructions 2731defm : VPatReductionVL_RM<rvv_vecreduce_SEQ_FADD_vl, "PseudoVFREDOSUM", is_float=1>; 2732defm : VPatReductionVL_RM<rvv_vecreduce_FADD_vl, "PseudoVFREDUSUM", is_float=1>; 2733defm : VPatReductionVL<rvv_vecreduce_FMIN_vl, "PseudoVFREDMIN", is_float=1>; 2734defm : VPatReductionVL<rvv_vecreduce_FMAX_vl, "PseudoVFREDMAX", is_float=1>; 2735 2736// 14.4. Vector Widening Floating-Point Reduction Instructions 2737defm : VPatWidenReductionVL_RM<rvv_vecreduce_SEQ_FADD_vl, fpext_oneuse, 2738 "PseudoVFWREDOSUM", is_float=1>; 2739defm : VPatWidenReductionVL_Ext_VL_RM<rvv_vecreduce_SEQ_FADD_vl, 2740 riscv_fpextend_vl_oneuse, 2741 "PseudoVFWREDOSUM", is_float=1>; 2742defm : VPatWidenReductionVL_RM<rvv_vecreduce_FADD_vl, fpext_oneuse, 2743 "PseudoVFWREDUSUM", is_float=1>; 2744defm : VPatWidenReductionVL_Ext_VL_RM<rvv_vecreduce_FADD_vl, 2745 riscv_fpextend_vl_oneuse, 2746 "PseudoVFWREDUSUM", is_float=1>; 2747 2748// 15. Vector Mask Instructions 2749 2750foreach mti = AllMasks in { 2751 let Predicates = [HasVInstructions] in { 2752 // 15.1 Vector Mask-Register Logical Instructions 2753 def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)), 2754 (!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 2755 def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)), 2756 (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 2757 2758 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2759 (!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX) 2760 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2761 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2762 (!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX) 2763 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2764 def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2765 (!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX) 2766 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2767 2768 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, 2769 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 2770 VLOpFrag)), 2771 (!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX) 2772 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2773 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, 2774 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 2775 VLOpFrag)), 2776 (!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX) 2777 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2778 // XOR is associative so we need 2 patterns for VMXNOR. 2779 def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1, 2780 VLOpFrag), 2781 VR:$rs2, VLOpFrag)), 2782 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 2783 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2784 2785 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2, 2786 VLOpFrag), 2787 VLOpFrag)), 2788 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 2789 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2790 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2, 2791 VLOpFrag), 2792 VLOpFrag)), 2793 (!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX) 2794 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2795 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2, 2796 VLOpFrag), 2797 VLOpFrag)), 2798 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX) 2799 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2800 2801 // Match the not idiom to the vmnot.m pseudo. 2802 def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)), 2803 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX) 2804 VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>; 2805 2806 // 15.2 Vector count population in mask vcpop.m 2807 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 2808 VLOpFrag)), 2809 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX) 2810 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2811 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0), 2812 VLOpFrag)), 2813 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX # "_MASK") 2814 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 2815 2816 // 15.3 vfirst find-first-set mask bit 2817 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 2818 VLOpFrag)), 2819 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX) 2820 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2821 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask V0), 2822 VLOpFrag)), 2823 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX # "_MASK") 2824 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 2825 } 2826} 2827 2828// 16. Vector Permutation Instructions 2829 2830// 16.1. Integer Scalar Move Instructions 2831// 16.4. Vector Register Gather Instruction 2832foreach vti = AllIntegerVectors in { 2833 let Predicates = GetVTypePredicates<vti>.Predicates in { 2834 def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$merge), 2835 vti.ScalarRegClass:$rs1, 2836 VLOpFrag)), 2837 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX) 2838 vti.RegClass:$merge, 2839 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 2840 2841 def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2, 2842 vti.RegClass:$rs1, 2843 vti.RegClass:$merge, 2844 (vti.Mask V0), 2845 VLOpFrag)), 2846 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") 2847 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2848 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2849 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 2850 vti.RegClass:$merge, 2851 (vti.Mask V0), 2852 VLOpFrag)), 2853 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 2854 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2855 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2856 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, 2857 uimm5:$imm, 2858 vti.RegClass:$merge, 2859 (vti.Mask V0), 2860 VLOpFrag)), 2861 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 2862 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 2863 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2864 } 2865 2866 // emul = lmul * 16 / sew 2867 defvar vlmul = vti.LMul; 2868 defvar octuple_lmul = vlmul.octuple; 2869 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 2870 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2871 defvar emul_str = octuple_to_str<octuple_emul>.ret; 2872 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 2873 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; 2874 let Predicates = GetVTypePredicates<vti>.Predicates in 2875 def : Pat<(vti.Vector 2876 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 2877 (ivti.Vector ivti.RegClass:$rs1), 2878 vti.RegClass:$merge, 2879 (vti.Mask V0), 2880 VLOpFrag)), 2881 (!cast<Instruction>(inst#"_MASK") 2882 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 2883 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2884 } 2885} 2886 2887// 16.2. Floating-Point Scalar Move Instructions 2888foreach vti = AllFloatVectors in { 2889 let Predicates = GetVTypePredicates<vti>.Predicates in { 2890 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2891 (vti.Scalar (fpimm0)), 2892 VLOpFrag)), 2893 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX) 2894 vti.RegClass:$merge, (XLenVT X0), GPR:$vl, vti.Log2SEW)>; 2895 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2896 (vti.Scalar (SelectFPImm (XLenVT GPR:$imm))), 2897 VLOpFrag)), 2898 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX) 2899 vti.RegClass:$merge, GPR:$imm, GPR:$vl, vti.Log2SEW)>; 2900 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), 2901 vti.ScalarRegClass:$rs1, 2902 VLOpFrag)), 2903 (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX) 2904 vti.RegClass:$merge, 2905 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 2906 } 2907 defvar ivti = GetIntVTypeInfo<vti>.Vti; 2908 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2909 GetVTypePredicates<ivti>.Predicates) in { 2910 def : Pat<(vti.Vector 2911 (riscv_vrgather_vv_vl vti.RegClass:$rs2, 2912 (ivti.Vector vti.RegClass:$rs1), 2913 vti.RegClass:$merge, 2914 (vti.Mask V0), 2915 VLOpFrag)), 2916 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") 2917 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, 2918 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2919 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 2920 vti.RegClass:$merge, 2921 (vti.Mask V0), 2922 VLOpFrag)), 2923 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 2924 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1, 2925 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2926 def : Pat<(vti.Vector 2927 (riscv_vrgather_vx_vl vti.RegClass:$rs2, 2928 uimm5:$imm, 2929 vti.RegClass:$merge, 2930 (vti.Mask V0), 2931 VLOpFrag)), 2932 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 2933 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, 2934 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2935 } 2936 2937 defvar vlmul = vti.LMul; 2938 defvar octuple_lmul = vlmul.octuple; 2939 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 2940 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2941 defvar emul_str = octuple_to_str<octuple_emul>.ret; 2942 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 2943 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; 2944 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2945 GetVTypePredicates<ivti>.Predicates) in 2946 def : Pat<(vti.Vector 2947 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 2948 (ivti.Vector ivti.RegClass:$rs1), 2949 vti.RegClass:$merge, 2950 (vti.Mask V0), 2951 VLOpFrag)), 2952 (!cast<Instruction>(inst#"_MASK") 2953 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, 2954 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2955 } 2956} 2957 2958//===----------------------------------------------------------------------===// 2959// Miscellaneous RISCVISD SDNodes 2960//===----------------------------------------------------------------------===// 2961 2962def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2, 2963 [SDTCisVec<0>, SDTCVecEltisVT<1, i1>, 2964 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>; 2965 2966def SDTRVVSlide : SDTypeProfile<1, 6, [ 2967 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>, 2968 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>, 2969 SDTCisVT<6, XLenVT> 2970]>; 2971def SDTRVVSlide1 : SDTypeProfile<1, 5, [ 2972 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>, 2973 SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, 2974 SDTCisVT<5, XLenVT> 2975]>; 2976def SDTRVVFSlide1 : SDTypeProfile<1, 5, [ 2977 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisFP<0>, 2978 SDTCisEltOfVec<3, 0>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, 2979 SDTCisVT<5, XLenVT> 2980]>; 2981 2982def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>; 2983def riscv_slide1up_vl : SDNode<"RISCVISD::VSLIDE1UP_VL", SDTRVVSlide1, []>; 2984def riscv_slidedown_vl : SDNode<"RISCVISD::VSLIDEDOWN_VL", SDTRVVSlide, []>; 2985def riscv_slide1down_vl : SDNode<"RISCVISD::VSLIDE1DOWN_VL", SDTRVVSlide1, []>; 2986def riscv_fslide1up_vl : SDNode<"RISCVISD::VFSLIDE1UP_VL", SDTRVVFSlide1, []>; 2987def riscv_fslide1down_vl : SDNode<"RISCVISD::VFSLIDE1DOWN_VL", SDTRVVFSlide1, []>; 2988 2989foreach vti = AllIntegerVectors in { 2990 let Predicates = GetVTypePredicates<vti>.Predicates in { 2991 def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask V0), 2992 VLOpFrag)), 2993 (!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX#"_MASK") 2994 (vti.Vector (IMPLICIT_DEF)), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2995 TAIL_AGNOSTIC)>; 2996 } 2997} 2998 2999defm : VPatSlideVL_VX_VI<riscv_slideup_vl, "PseudoVSLIDEUP">; 3000defm : VPatSlideVL_VX_VI<riscv_slidedown_vl, "PseudoVSLIDEDOWN">; 3001defm : VPatSlide1VL_VX<riscv_slide1up_vl, "PseudoVSLIDE1UP">; 3002defm : VPatSlide1VL_VF<riscv_fslide1up_vl, "PseudoVFSLIDE1UP">; 3003defm : VPatSlide1VL_VX<riscv_slide1down_vl, "PseudoVSLIDE1DOWN">; 3004defm : VPatSlide1VL_VF<riscv_fslide1down_vl, "PseudoVFSLIDE1DOWN">; 3005