1*da58b97aSjoerg//===- RISCVInstrInfoVSDPatterns.td - RVV SDNode patterns --*- tablegen -*-===// 2*da58b97aSjoerg// 3*da58b97aSjoerg// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*da58b97aSjoerg// See https://llvm.org/LICENSE.txt for license information. 5*da58b97aSjoerg// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6*da58b97aSjoerg// 7*da58b97aSjoerg//===----------------------------------------------------------------------===// 8*da58b97aSjoerg/// 9*da58b97aSjoerg/// This file contains the required infrastructure and SDNode patterns to 10*da58b97aSjoerg/// support code generation for the standard 'V' (Vector) extension, version 11*da58b97aSjoerg/// 0.10. This version is still experimental as the 'V' extension hasn't been 12*da58b97aSjoerg/// ratified yet. 13*da58b97aSjoerg/// 14*da58b97aSjoerg/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 15*da58b97aSjoerg/// 16*da58b97aSjoerg/// Note: the patterns for RVV intrinsics are found in 17*da58b97aSjoerg/// RISCVInstrInfoVPseudos.td. 18*da58b97aSjoerg/// 19*da58b97aSjoerg//===----------------------------------------------------------------------===// 20*da58b97aSjoerg 21*da58b97aSjoerg//===----------------------------------------------------------------------===// 22*da58b97aSjoerg// Helpers to define the SDNode patterns. 23*da58b97aSjoerg//===----------------------------------------------------------------------===// 24*da58b97aSjoerg 25*da58b97aSjoergdef SDTSplatI64 : SDTypeProfile<1, 1, [ 26*da58b97aSjoerg SDTCVecEltisVT<0, i64>, SDTCisVT<1, i32> 27*da58b97aSjoerg]>; 28*da58b97aSjoerg 29*da58b97aSjoergdef rv32_splat_i64 : SDNode<"RISCVISD::SPLAT_VECTOR_I64", SDTSplatI64>; 30*da58b97aSjoerg 31*da58b97aSjoergdef SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>, 32*da58b97aSjoerg SDTCisVT<1, XLenVT>]>; 33*da58b97aSjoergdef riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>; 34*da58b97aSjoergdef riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>; 35*da58b97aSjoerg 36*da58b97aSjoergdef rvv_vnot : PatFrag<(ops node:$in), 37*da58b97aSjoerg (xor node:$in, (riscv_vmset_vl (XLenVT srcvalue)))>; 38*da58b97aSjoerg 39*da58b97aSjoerg// Give explicit Complexity to prefer simm5/uimm5. 40*da58b97aSjoergdef SplatPat : ComplexPattern<vAny, 1, "selectVSplat", [splat_vector, rv32_splat_i64], [], 1>; 41*da58b97aSjoergdef SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [splat_vector, rv32_splat_i64], [], 2>; 42*da58b97aSjoergdef SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimm5", [splat_vector, rv32_splat_i64], [], 2>; 43*da58b97aSjoergdef SplatPat_simm5_plus1 44*da58b97aSjoerg : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1", 45*da58b97aSjoerg [splat_vector, rv32_splat_i64], [], 2>; 46*da58b97aSjoergdef SplatPat_simm5_plus1_nonzero 47*da58b97aSjoerg : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NonZero", 48*da58b97aSjoerg [splat_vector, rv32_splat_i64], [], 2>; 49*da58b97aSjoerg 50*da58b97aSjoergclass SwapHelper<dag Prefix, dag A, dag B, dag Suffix, bit swap> { 51*da58b97aSjoerg dag Value = !con(Prefix, !if(swap, B, A), !if(swap, A, B), Suffix); 52*da58b97aSjoerg} 53*da58b97aSjoerg 54*da58b97aSjoergmulticlass VPatUSLoadStoreSDNode<ValueType type, 55*da58b97aSjoerg int log2sew, 56*da58b97aSjoerg LMULInfo vlmul, 57*da58b97aSjoerg OutPatFrag avl, 58*da58b97aSjoerg VReg reg_class, 59*da58b97aSjoerg int sew = !shl(1, log2sew)> 60*da58b97aSjoerg{ 61*da58b97aSjoerg defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX); 62*da58b97aSjoerg defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX); 63*da58b97aSjoerg // Load 64*da58b97aSjoerg def : Pat<(type (load BaseAddr:$rs1)), 65*da58b97aSjoerg (load_instr BaseAddr:$rs1, avl, log2sew)>; 66*da58b97aSjoerg // Store 67*da58b97aSjoerg def : Pat<(store type:$rs2, BaseAddr:$rs1), 68*da58b97aSjoerg (store_instr reg_class:$rs2, BaseAddr:$rs1, avl, log2sew)>; 69*da58b97aSjoerg} 70*da58b97aSjoerg 71*da58b97aSjoergmulticlass VPatUSLoadStoreWholeVRSDNode<ValueType type, 72*da58b97aSjoerg int log2sew, 73*da58b97aSjoerg LMULInfo vlmul, 74*da58b97aSjoerg VReg reg_class, 75*da58b97aSjoerg int sew = !shl(1, log2sew)> 76*da58b97aSjoerg{ 77*da58b97aSjoerg defvar load_instr = 78*da58b97aSjoerg !cast<Instruction>("VL"#!substr(vlmul.MX, 1)#"RE"#sew#"_V"); 79*da58b97aSjoerg defvar store_instr = 80*da58b97aSjoerg !cast<Instruction>("VS"#!substr(vlmul.MX, 1)#"R_V"); 81*da58b97aSjoerg 82*da58b97aSjoerg // Load 83*da58b97aSjoerg def : Pat<(type (load BaseAddr:$rs1)), 84*da58b97aSjoerg (load_instr BaseAddr:$rs1)>; 85*da58b97aSjoerg // Store 86*da58b97aSjoerg def : Pat<(store type:$rs2, BaseAddr:$rs1), 87*da58b97aSjoerg (store_instr reg_class:$rs2, BaseAddr:$rs1)>; 88*da58b97aSjoerg} 89*da58b97aSjoerg 90*da58b97aSjoergmulticlass VPatUSLoadStoreMaskSDNode<MTypeInfo m> 91*da58b97aSjoerg{ 92*da58b97aSjoerg defvar load_instr = !cast<Instruction>("PseudoVLE1_V_"#m.BX); 93*da58b97aSjoerg defvar store_instr = !cast<Instruction>("PseudoVSE1_V_"#m.BX); 94*da58b97aSjoerg // Load 95*da58b97aSjoerg def : Pat<(m.Mask (load BaseAddr:$rs1)), 96*da58b97aSjoerg (load_instr BaseAddr:$rs1, m.AVL, m.Log2SEW)>; 97*da58b97aSjoerg // Store 98*da58b97aSjoerg def : Pat<(store m.Mask:$rs2, BaseAddr:$rs1), 99*da58b97aSjoerg (store_instr VR:$rs2, BaseAddr:$rs1, m.AVL, m.Log2SEW)>; 100*da58b97aSjoerg} 101*da58b97aSjoerg 102*da58b97aSjoergclass VPatBinarySDNode_VV<SDNode vop, 103*da58b97aSjoerg string instruction_name, 104*da58b97aSjoerg ValueType result_type, 105*da58b97aSjoerg ValueType op_type, 106*da58b97aSjoerg ValueType mask_type, 107*da58b97aSjoerg int sew, 108*da58b97aSjoerg LMULInfo vlmul, 109*da58b97aSjoerg OutPatFrag avl, 110*da58b97aSjoerg VReg RetClass, 111*da58b97aSjoerg VReg op_reg_class> : 112*da58b97aSjoerg Pat<(result_type (vop 113*da58b97aSjoerg (op_type op_reg_class:$rs1), 114*da58b97aSjoerg (op_type op_reg_class:$rs2))), 115*da58b97aSjoerg (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX) 116*da58b97aSjoerg op_reg_class:$rs1, 117*da58b97aSjoerg op_reg_class:$rs2, 118*da58b97aSjoerg avl, sew)>; 119*da58b97aSjoerg 120*da58b97aSjoergclass VPatBinarySDNode_XI<SDNode vop, 121*da58b97aSjoerg string instruction_name, 122*da58b97aSjoerg string suffix, 123*da58b97aSjoerg ValueType result_type, 124*da58b97aSjoerg ValueType vop_type, 125*da58b97aSjoerg ValueType mask_type, 126*da58b97aSjoerg int sew, 127*da58b97aSjoerg LMULInfo vlmul, 128*da58b97aSjoerg OutPatFrag avl, 129*da58b97aSjoerg VReg RetClass, 130*da58b97aSjoerg VReg vop_reg_class, 131*da58b97aSjoerg ComplexPattern SplatPatKind, 132*da58b97aSjoerg DAGOperand xop_kind> : 133*da58b97aSjoerg Pat<(result_type (vop 134*da58b97aSjoerg (vop_type vop_reg_class:$rs1), 135*da58b97aSjoerg (vop_type (SplatPatKind xop_kind:$rs2)))), 136*da58b97aSjoerg (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX) 137*da58b97aSjoerg vop_reg_class:$rs1, 138*da58b97aSjoerg xop_kind:$rs2, 139*da58b97aSjoerg avl, sew)>; 140*da58b97aSjoerg 141*da58b97aSjoergmulticlass VPatBinarySDNode_VV_VX<SDNode vop, string instruction_name> { 142*da58b97aSjoerg foreach vti = AllIntegerVectors in { 143*da58b97aSjoerg def : VPatBinarySDNode_VV<vop, instruction_name, 144*da58b97aSjoerg vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 145*da58b97aSjoerg vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>; 146*da58b97aSjoerg def : VPatBinarySDNode_XI<vop, instruction_name, "VX", 147*da58b97aSjoerg vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 148*da58b97aSjoerg vti.LMul, vti.AVL, vti.RegClass, vti.RegClass, 149*da58b97aSjoerg SplatPat, GPR>; 150*da58b97aSjoerg } 151*da58b97aSjoerg} 152*da58b97aSjoerg 153*da58b97aSjoergmulticlass VPatBinarySDNode_VV_VX_VI<SDNode vop, string instruction_name, 154*da58b97aSjoerg Operand ImmType = simm5> 155*da58b97aSjoerg : VPatBinarySDNode_VV_VX<vop, instruction_name> { 156*da58b97aSjoerg foreach vti = AllIntegerVectors in { 157*da58b97aSjoerg def : VPatBinarySDNode_XI<vop, instruction_name, "VI", 158*da58b97aSjoerg vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 159*da58b97aSjoerg vti.LMul, vti.AVL, vti.RegClass, vti.RegClass, 160*da58b97aSjoerg !cast<ComplexPattern>(SplatPat#_#ImmType), 161*da58b97aSjoerg ImmType>; 162*da58b97aSjoerg } 163*da58b97aSjoerg} 164*da58b97aSjoerg 165*da58b97aSjoergclass VPatBinarySDNode_VF<SDNode vop, 166*da58b97aSjoerg string instruction_name, 167*da58b97aSjoerg ValueType result_type, 168*da58b97aSjoerg ValueType vop_type, 169*da58b97aSjoerg ValueType xop_type, 170*da58b97aSjoerg ValueType mask_type, 171*da58b97aSjoerg int sew, 172*da58b97aSjoerg LMULInfo vlmul, 173*da58b97aSjoerg OutPatFrag avl, 174*da58b97aSjoerg VReg RetClass, 175*da58b97aSjoerg VReg vop_reg_class, 176*da58b97aSjoerg DAGOperand xop_kind> : 177*da58b97aSjoerg Pat<(result_type (vop (vop_type vop_reg_class:$rs1), 178*da58b97aSjoerg (vop_type (splat_vector xop_kind:$rs2)))), 179*da58b97aSjoerg (!cast<Instruction>(instruction_name#"_"#vlmul.MX) 180*da58b97aSjoerg vop_reg_class:$rs1, 181*da58b97aSjoerg (xop_type xop_kind:$rs2), 182*da58b97aSjoerg avl, sew)>; 183*da58b97aSjoerg 184*da58b97aSjoergmulticlass VPatBinaryFPSDNode_VV_VF<SDNode vop, string instruction_name> { 185*da58b97aSjoerg foreach vti = AllFloatVectors in { 186*da58b97aSjoerg def : VPatBinarySDNode_VV<vop, instruction_name, 187*da58b97aSjoerg vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, 188*da58b97aSjoerg vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>; 189*da58b97aSjoerg def : VPatBinarySDNode_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 190*da58b97aSjoerg vti.Vector, vti.Vector, vti.Scalar, vti.Mask, 191*da58b97aSjoerg vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, vti.RegClass, 192*da58b97aSjoerg vti.ScalarRegClass>; 193*da58b97aSjoerg } 194*da58b97aSjoerg} 195*da58b97aSjoerg 196*da58b97aSjoergmulticlass VPatBinaryFPSDNode_R_VF<SDNode vop, string instruction_name> { 197*da58b97aSjoerg foreach fvti = AllFloatVectors in 198*da58b97aSjoerg def : Pat<(fvti.Vector (vop (fvti.Vector (splat_vector fvti.Scalar:$rs2)), 199*da58b97aSjoerg (fvti.Vector fvti.RegClass:$rs1))), 200*da58b97aSjoerg (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 201*da58b97aSjoerg fvti.RegClass:$rs1, 202*da58b97aSjoerg (fvti.Scalar fvti.ScalarRegClass:$rs2), 203*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 204*da58b97aSjoerg} 205*da58b97aSjoerg 206*da58b97aSjoergmulticlass VPatIntegerSetCCSDNode_VV<CondCode cc, 207*da58b97aSjoerg string instruction_name, 208*da58b97aSjoerg bit swap = 0> { 209*da58b97aSjoerg foreach vti = AllIntegerVectors in { 210*da58b97aSjoerg defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX); 211*da58b97aSjoerg def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 212*da58b97aSjoerg (vti.Vector vti.RegClass:$rs2), cc)), 213*da58b97aSjoerg SwapHelper<(instruction), 214*da58b97aSjoerg (instruction vti.RegClass:$rs1), 215*da58b97aSjoerg (instruction vti.RegClass:$rs2), 216*da58b97aSjoerg (instruction vti.AVL, vti.Log2SEW), 217*da58b97aSjoerg swap>.Value>; 218*da58b97aSjoerg } 219*da58b97aSjoerg} 220*da58b97aSjoerg 221*da58b97aSjoergmulticlass VPatIntegerSetCCSDNode_XI<CondCode cc, 222*da58b97aSjoerg string instruction_name, 223*da58b97aSjoerg string kind, 224*da58b97aSjoerg ComplexPattern SplatPatKind, 225*da58b97aSjoerg DAGOperand xop_kind, 226*da58b97aSjoerg bit swap = 0> { 227*da58b97aSjoerg foreach vti = AllIntegerVectors in { 228*da58b97aSjoerg defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX); 229*da58b97aSjoerg def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 230*da58b97aSjoerg (vti.Vector (SplatPatKind xop_kind:$rs2)), cc)), 231*da58b97aSjoerg SwapHelper<(instruction), 232*da58b97aSjoerg (instruction vti.RegClass:$rs1), 233*da58b97aSjoerg (instruction xop_kind:$rs2), 234*da58b97aSjoerg (instruction vti.AVL, vti.Log2SEW), 235*da58b97aSjoerg swap>.Value>; 236*da58b97aSjoerg } 237*da58b97aSjoerg} 238*da58b97aSjoerg 239*da58b97aSjoergmulticlass VPatIntegerSetCCSDNode_VV_VX_VI<CondCode cc, 240*da58b97aSjoerg string instruction_name, 241*da58b97aSjoerg bit swap = 0> { 242*da58b97aSjoerg defm : VPatIntegerSetCCSDNode_VV<cc, instruction_name, swap>; 243*da58b97aSjoerg defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VX", 244*da58b97aSjoerg SplatPat, GPR, swap>; 245*da58b97aSjoerg defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VI", 246*da58b97aSjoerg SplatPat_simm5, simm5, swap>; 247*da58b97aSjoerg} 248*da58b97aSjoerg 249*da58b97aSjoergmulticlass VPatIntegerSetCCSDNode_VV_VX<CondCode cc, 250*da58b97aSjoerg string instruction_name, 251*da58b97aSjoerg bit swap = 0> { 252*da58b97aSjoerg defm : VPatIntegerSetCCSDNode_VV<cc, instruction_name, swap>; 253*da58b97aSjoerg defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VX", 254*da58b97aSjoerg SplatPat, GPR, swap>; 255*da58b97aSjoerg} 256*da58b97aSjoerg 257*da58b97aSjoergmulticlass VPatIntegerSetCCSDNode_VX_VI<CondCode cc, 258*da58b97aSjoerg string instruction_name, 259*da58b97aSjoerg bit swap = 0> { 260*da58b97aSjoerg defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VX", 261*da58b97aSjoerg SplatPat, GPR, swap>; 262*da58b97aSjoerg defm : VPatIntegerSetCCSDNode_XI<cc, instruction_name, "VI", 263*da58b97aSjoerg SplatPat_simm5, simm5, swap>; 264*da58b97aSjoerg} 265*da58b97aSjoerg 266*da58b97aSjoergmulticlass VPatIntegerSetCCSDNode_VIPlus1<CondCode cc, string instruction_name, 267*da58b97aSjoerg ComplexPattern splatpat_kind> { 268*da58b97aSjoerg foreach vti = AllIntegerVectors in { 269*da58b97aSjoerg defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX); 270*da58b97aSjoerg def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), 271*da58b97aSjoerg (vti.Vector (splatpat_kind simm5:$rs2)), 272*da58b97aSjoerg cc)), 273*da58b97aSjoerg (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2), 274*da58b97aSjoerg vti.AVL, vti.Log2SEW)>; 275*da58b97aSjoerg } 276*da58b97aSjoerg} 277*da58b97aSjoerg 278*da58b97aSjoergmulticlass VPatFPSetCCSDNode_VV_VF_FV<CondCode cc, 279*da58b97aSjoerg string inst_name, 280*da58b97aSjoerg string swapped_op_inst_name> { 281*da58b97aSjoerg foreach fvti = AllFloatVectors in { 282*da58b97aSjoerg def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 283*da58b97aSjoerg (fvti.Vector fvti.RegClass:$rs2), 284*da58b97aSjoerg cc)), 285*da58b97aSjoerg (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX) 286*da58b97aSjoerg fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.Log2SEW)>; 287*da58b97aSjoerg def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), 288*da58b97aSjoerg (splat_vector fvti.ScalarRegClass:$rs2), 289*da58b97aSjoerg cc)), 290*da58b97aSjoerg (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 291*da58b97aSjoerg fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 292*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 293*da58b97aSjoerg def : Pat<(fvti.Mask (setcc (splat_vector fvti.ScalarRegClass:$rs2), 294*da58b97aSjoerg (fvti.Vector fvti.RegClass:$rs1), 295*da58b97aSjoerg cc)), 296*da58b97aSjoerg (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 297*da58b97aSjoerg fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 298*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 299*da58b97aSjoerg } 300*da58b97aSjoerg} 301*da58b97aSjoerg 302*da58b97aSjoergmulticlass VPatExtendSDNode_V<list<SDNode> ops, string inst_name, string suffix, 303*da58b97aSjoerg list <VTypeInfoToFraction> fraction_list> { 304*da58b97aSjoerg foreach vtiTofti = fraction_list in { 305*da58b97aSjoerg defvar vti = vtiTofti.Vti; 306*da58b97aSjoerg defvar fti = vtiTofti.Fti; 307*da58b97aSjoerg foreach op = ops in 308*da58b97aSjoerg def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))), 309*da58b97aSjoerg (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX) 310*da58b97aSjoerg fti.RegClass:$rs2, fti.AVL, vti.Log2SEW)>; 311*da58b97aSjoerg } 312*da58b97aSjoerg} 313*da58b97aSjoerg 314*da58b97aSjoergmulticlass VPatConvertI2FPSDNode_V<SDNode vop, string instruction_name> { 315*da58b97aSjoerg foreach fvti = AllFloatVectors in { 316*da58b97aSjoerg defvar ivti = GetIntVTypeInfo<fvti>.Vti; 317*da58b97aSjoerg def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 318*da58b97aSjoerg (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 319*da58b97aSjoerg ivti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; 320*da58b97aSjoerg } 321*da58b97aSjoerg} 322*da58b97aSjoerg 323*da58b97aSjoergmulticlass VPatConvertFP2ISDNode_V<SDNode vop, string instruction_name> { 324*da58b97aSjoerg foreach fvti = AllFloatVectors in { 325*da58b97aSjoerg defvar ivti = GetIntVTypeInfo<fvti>.Vti; 326*da58b97aSjoerg def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 327*da58b97aSjoerg (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 328*da58b97aSjoerg fvti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW)>; 329*da58b97aSjoerg } 330*da58b97aSjoerg} 331*da58b97aSjoerg 332*da58b97aSjoergmulticlass VPatWConvertI2FPSDNode_V<SDNode vop, string instruction_name> { 333*da58b97aSjoerg foreach vtiToWti = AllWidenableIntToFloatVectors in { 334*da58b97aSjoerg defvar ivti = vtiToWti.Vti; 335*da58b97aSjoerg defvar fwti = vtiToWti.Wti; 336*da58b97aSjoerg def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))), 337*da58b97aSjoerg (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX) 338*da58b97aSjoerg ivti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW)>; 339*da58b97aSjoerg } 340*da58b97aSjoerg} 341*da58b97aSjoerg 342*da58b97aSjoergmulticlass VPatWConvertFP2ISDNode_V<SDNode vop, string instruction_name> { 343*da58b97aSjoerg foreach fvtiToFWti = AllWidenableFloatVectors in { 344*da58b97aSjoerg defvar fvti = fvtiToFWti.Vti; 345*da58b97aSjoerg defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 346*da58b97aSjoerg def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), 347*da58b97aSjoerg (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 348*da58b97aSjoerg fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; 349*da58b97aSjoerg } 350*da58b97aSjoerg} 351*da58b97aSjoerg 352*da58b97aSjoergmulticlass VPatNConvertI2FPSDNode_V<SDNode vop, string instruction_name> { 353*da58b97aSjoerg foreach fvtiToFWti = AllWidenableFloatVectors in { 354*da58b97aSjoerg defvar fvti = fvtiToFWti.Vti; 355*da58b97aSjoerg defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 356*da58b97aSjoerg def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1))), 357*da58b97aSjoerg (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX) 358*da58b97aSjoerg iwti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; 359*da58b97aSjoerg } 360*da58b97aSjoerg} 361*da58b97aSjoerg 362*da58b97aSjoergmulticlass VPatNConvertFP2ISDNode_V<SDNode vop, string instruction_name> { 363*da58b97aSjoerg foreach vtiToWti = AllWidenableIntToFloatVectors in { 364*da58b97aSjoerg defvar vti = vtiToWti.Vti; 365*da58b97aSjoerg defvar fwti = vtiToWti.Wti; 366*da58b97aSjoerg def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1))), 367*da58b97aSjoerg (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX) 368*da58b97aSjoerg fwti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; 369*da58b97aSjoerg } 370*da58b97aSjoerg} 371*da58b97aSjoerg 372*da58b97aSjoerg//===----------------------------------------------------------------------===// 373*da58b97aSjoerg// Patterns. 374*da58b97aSjoerg//===----------------------------------------------------------------------===// 375*da58b97aSjoerg 376*da58b97aSjoerglet Predicates = [HasStdExtV] in { 377*da58b97aSjoerg 378*da58b97aSjoerg// 7.4. Vector Unit-Stride Instructions 379*da58b97aSjoergforeach vti = !listconcat(FractionalGroupIntegerVectors, 380*da58b97aSjoerg FractionalGroupFloatVectors) in 381*da58b97aSjoerg defm : VPatUSLoadStoreSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 382*da58b97aSjoerg vti.AVL, vti.RegClass>; 383*da58b97aSjoergforeach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VF16M1, VF32M1, VF64M1] in 384*da58b97aSjoerg defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 385*da58b97aSjoerg vti.RegClass>; 386*da58b97aSjoergforeach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors) in 387*da58b97aSjoerg defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul, 388*da58b97aSjoerg vti.RegClass>; 389*da58b97aSjoergforeach mti = AllMasks in 390*da58b97aSjoerg defm : VPatUSLoadStoreMaskSDNode<mti>; 391*da58b97aSjoerg 392*da58b97aSjoerg// 12.1. Vector Single-Width Integer Add and Subtract 393*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX_VI<add, "PseudoVADD">; 394*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX<sub, "PseudoVSUB">; 395*da58b97aSjoerg// Handle VRSUB specially since it's the only integer binary op with reversed 396*da58b97aSjoerg// pattern operands 397*da58b97aSjoergforeach vti = AllIntegerVectors in { 398*da58b97aSjoerg def : Pat<(sub (vti.Vector (SplatPat GPR:$rs2)), 399*da58b97aSjoerg (vti.Vector vti.RegClass:$rs1)), 400*da58b97aSjoerg (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX) 401*da58b97aSjoerg vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.Log2SEW)>; 402*da58b97aSjoerg def : Pat<(sub (vti.Vector (SplatPat_simm5 simm5:$rs2)), 403*da58b97aSjoerg (vti.Vector vti.RegClass:$rs1)), 404*da58b97aSjoerg (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX) 405*da58b97aSjoerg vti.RegClass:$rs1, simm5:$rs2, vti.AVL, vti.Log2SEW)>; 406*da58b97aSjoerg} 407*da58b97aSjoerg 408*da58b97aSjoerg// 12.3. Vector Integer Extension 409*da58b97aSjoergdefm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF2", 410*da58b97aSjoerg AllFractionableVF2IntVectors>; 411*da58b97aSjoergdefm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF2", 412*da58b97aSjoerg AllFractionableVF2IntVectors>; 413*da58b97aSjoergdefm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF4", 414*da58b97aSjoerg AllFractionableVF4IntVectors>; 415*da58b97aSjoergdefm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF4", 416*da58b97aSjoerg AllFractionableVF4IntVectors>; 417*da58b97aSjoergdefm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF8", 418*da58b97aSjoerg AllFractionableVF8IntVectors>; 419*da58b97aSjoergdefm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF8", 420*da58b97aSjoerg AllFractionableVF8IntVectors>; 421*da58b97aSjoerg 422*da58b97aSjoerg// 12.5. Vector Bitwise Logical Instructions 423*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX_VI<and, "PseudoVAND">; 424*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX_VI<or, "PseudoVOR">; 425*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX_VI<xor, "PseudoVXOR">; 426*da58b97aSjoerg 427*da58b97aSjoerg// 12.6. Vector Single-Width Bit Shift Instructions 428*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX_VI<shl, "PseudoVSLL", uimm5>; 429*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX_VI<srl, "PseudoVSRL", uimm5>; 430*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX_VI<sra, "PseudoVSRA", uimm5>; 431*da58b97aSjoerg 432*da58b97aSjoerg// 12.8. Vector Integer Comparison Instructions 433*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VV_VX_VI<SETEQ, "PseudoVMSEQ">; 434*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VV_VX_VI<SETNE, "PseudoVMSNE">; 435*da58b97aSjoerg 436*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VV_VX<SETLT, "PseudoVMSLT">; 437*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VV_VX<SETULT, "PseudoVMSLTU">; 438*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VIPlus1<SETLT, "PseudoVMSLE", 439*da58b97aSjoerg SplatPat_simm5_plus1>; 440*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VIPlus1<SETULT, "PseudoVMSLEU", 441*da58b97aSjoerg SplatPat_simm5_plus1_nonzero>; 442*da58b97aSjoerg 443*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VV<SETGT, "PseudoVMSLT", /*swap*/1>; 444*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VV<SETUGT, "PseudoVMSLTU", /*swap*/1>; 445*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VX_VI<SETGT, "PseudoVMSGT">; 446*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VX_VI<SETUGT, "PseudoVMSGTU">; 447*da58b97aSjoerg 448*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VV_VX_VI<SETLE, "PseudoVMSLE">; 449*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VV_VX_VI<SETULE, "PseudoVMSLEU">; 450*da58b97aSjoerg 451*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VV<SETGE, "PseudoVMSLE", /*swap*/1>; 452*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VV<SETUGE, "PseudoVMSLEU", /*swap*/1>; 453*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VIPlus1<SETGE, "PseudoVMSGT", 454*da58b97aSjoerg SplatPat_simm5_plus1>; 455*da58b97aSjoergdefm : VPatIntegerSetCCSDNode_VIPlus1<SETUGE, "PseudoVMSGTU", 456*da58b97aSjoerg SplatPat_simm5_plus1_nonzero>; 457*da58b97aSjoerg 458*da58b97aSjoerg// 12.9. Vector Integer Min/Max Instructions 459*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX<umin, "PseudoVMINU">; 460*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX<smin, "PseudoVMIN">; 461*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX<umax, "PseudoVMAXU">; 462*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX<smax, "PseudoVMAX">; 463*da58b97aSjoerg 464*da58b97aSjoerg// 12.10. Vector Single-Width Integer Multiply Instructions 465*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX<mul, "PseudoVMUL">; 466*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH">; 467*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU">; 468*da58b97aSjoerg 469*da58b97aSjoerg// 12.11. Vector Integer Divide Instructions 470*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX<udiv, "PseudoVDIVU">; 471*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX<sdiv, "PseudoVDIV">; 472*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX<urem, "PseudoVREMU">; 473*da58b97aSjoergdefm : VPatBinarySDNode_VV_VX<srem, "PseudoVREM">; 474*da58b97aSjoerg 475*da58b97aSjoerg// 12.15. Vector Integer Merge Instructions 476*da58b97aSjoergforeach vti = AllIntegerVectors in { 477*da58b97aSjoerg def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), vti.RegClass:$rs1, 478*da58b97aSjoerg vti.RegClass:$rs2)), 479*da58b97aSjoerg (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 480*da58b97aSjoerg vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm, 481*da58b97aSjoerg vti.AVL, vti.Log2SEW)>; 482*da58b97aSjoerg 483*da58b97aSjoerg def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat XLenVT:$rs1), 484*da58b97aSjoerg vti.RegClass:$rs2)), 485*da58b97aSjoerg (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 486*da58b97aSjoerg vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, vti.AVL, vti.Log2SEW)>; 487*da58b97aSjoerg 488*da58b97aSjoerg def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat_simm5 simm5:$rs1), 489*da58b97aSjoerg vti.RegClass:$rs2)), 490*da58b97aSjoerg (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 491*da58b97aSjoerg vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, vti.AVL, vti.Log2SEW)>; 492*da58b97aSjoerg} 493*da58b97aSjoerg 494*da58b97aSjoerg// 16.1. Vector Mask-Register Logical Instructions 495*da58b97aSjoergforeach mti = AllMasks in { 496*da58b97aSjoerg def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)), 497*da58b97aSjoerg (!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX) 498*da58b97aSjoerg VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 499*da58b97aSjoerg def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)), 500*da58b97aSjoerg (!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX) 501*da58b97aSjoerg VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 502*da58b97aSjoerg def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)), 503*da58b97aSjoerg (!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX) 504*da58b97aSjoerg VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 505*da58b97aSjoerg 506*da58b97aSjoerg def : Pat<(mti.Mask (rvv_vnot (and VR:$rs1, VR:$rs2))), 507*da58b97aSjoerg (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) 508*da58b97aSjoerg VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 509*da58b97aSjoerg def : Pat<(mti.Mask (rvv_vnot (or VR:$rs1, VR:$rs2))), 510*da58b97aSjoerg (!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX) 511*da58b97aSjoerg VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 512*da58b97aSjoerg def : Pat<(mti.Mask (rvv_vnot (xor VR:$rs1, VR:$rs2))), 513*da58b97aSjoerg (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX) 514*da58b97aSjoerg VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 515*da58b97aSjoerg 516*da58b97aSjoerg def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))), 517*da58b97aSjoerg (!cast<Instruction>("PseudoVMANDNOT_MM_"#mti.LMul.MX) 518*da58b97aSjoerg VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 519*da58b97aSjoerg def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))), 520*da58b97aSjoerg (!cast<Instruction>("PseudoVMORNOT_MM_"#mti.LMul.MX) 521*da58b97aSjoerg VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; 522*da58b97aSjoerg 523*da58b97aSjoerg // Handle rvv_vnot the same as the vmnot.m pseudoinstruction. 524*da58b97aSjoerg def : Pat<(mti.Mask (rvv_vnot VR:$rs)), 525*da58b97aSjoerg (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX) 526*da58b97aSjoerg VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>; 527*da58b97aSjoerg} 528*da58b97aSjoerg 529*da58b97aSjoerg} // Predicates = [HasStdExtV] 530*da58b97aSjoerg 531*da58b97aSjoerglet Predicates = [HasStdExtV, HasStdExtF] in { 532*da58b97aSjoerg 533*da58b97aSjoerg// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions 534*da58b97aSjoergdefm : VPatBinaryFPSDNode_VV_VF<fadd, "PseudoVFADD">; 535*da58b97aSjoergdefm : VPatBinaryFPSDNode_VV_VF<fsub, "PseudoVFSUB">; 536*da58b97aSjoergdefm : VPatBinaryFPSDNode_R_VF<fsub, "PseudoVFRSUB">; 537*da58b97aSjoerg 538*da58b97aSjoerg// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 539*da58b97aSjoergdefm : VPatBinaryFPSDNode_VV_VF<fmul, "PseudoVFMUL">; 540*da58b97aSjoergdefm : VPatBinaryFPSDNode_VV_VF<fdiv, "PseudoVFDIV">; 541*da58b97aSjoergdefm : VPatBinaryFPSDNode_R_VF<fdiv, "PseudoVFRDIV">; 542*da58b97aSjoerg 543*da58b97aSjoerg// 14.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 544*da58b97aSjoergforeach fvti = AllFloatVectors in { 545*da58b97aSjoerg // NOTE: We choose VFMADD because it has the most commuting freedom. So it 546*da58b97aSjoerg // works best with how TwoAddressInstructionPass tries commuting. 547*da58b97aSjoerg defvar suffix = fvti.LMul.MX # "_COMMUTABLE"; 548*da58b97aSjoerg def : Pat<(fvti.Vector (fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 549*da58b97aSjoerg fvti.RegClass:$rs2)), 550*da58b97aSjoerg (!cast<Instruction>("PseudoVFMADD_VV_"# suffix) 551*da58b97aSjoerg fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 552*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 553*da58b97aSjoerg def : Pat<(fvti.Vector (fma fvti.RegClass:$rs1, fvti.RegClass:$rd, 554*da58b97aSjoerg (fneg fvti.RegClass:$rs2))), 555*da58b97aSjoerg (!cast<Instruction>("PseudoVFMSUB_VV_"# suffix) 556*da58b97aSjoerg fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 557*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 558*da58b97aSjoerg def : Pat<(fvti.Vector (fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 559*da58b97aSjoerg (fneg fvti.RegClass:$rs2))), 560*da58b97aSjoerg (!cast<Instruction>("PseudoVFNMADD_VV_"# suffix) 561*da58b97aSjoerg fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 562*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 563*da58b97aSjoerg def : Pat<(fvti.Vector (fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, 564*da58b97aSjoerg fvti.RegClass:$rs2)), 565*da58b97aSjoerg (!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix) 566*da58b97aSjoerg fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, 567*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 568*da58b97aSjoerg 569*da58b97aSjoerg // The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally 570*da58b97aSjoerg // commutable. 571*da58b97aSjoerg def : Pat<(fvti.Vector (fma (splat_vector fvti.ScalarRegClass:$rs1), 572*da58b97aSjoerg fvti.RegClass:$rd, fvti.RegClass:$rs2)), 573*da58b97aSjoerg (!cast<Instruction>("PseudoVFMADD_V" # fvti.ScalarSuffix # "_" # suffix) 574*da58b97aSjoerg fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 575*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 576*da58b97aSjoerg def : Pat<(fvti.Vector (fma (splat_vector fvti.ScalarRegClass:$rs1), 577*da58b97aSjoerg fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 578*da58b97aSjoerg (!cast<Instruction>("PseudoVFMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 579*da58b97aSjoerg fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 580*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 581*da58b97aSjoerg 582*da58b97aSjoerg def : Pat<(fvti.Vector (fma (splat_vector fvti.ScalarRegClass:$rs1), 583*da58b97aSjoerg (fneg fvti.RegClass:$rd), (fneg fvti.RegClass:$rs2))), 584*da58b97aSjoerg (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 585*da58b97aSjoerg fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 586*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 587*da58b97aSjoerg def : Pat<(fvti.Vector (fma (splat_vector fvti.ScalarRegClass:$rs1), 588*da58b97aSjoerg (fneg fvti.RegClass:$rd), fvti.RegClass:$rs2)), 589*da58b97aSjoerg (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 590*da58b97aSjoerg fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 591*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 592*da58b97aSjoerg 593*da58b97aSjoerg // The splat might be negated. 594*da58b97aSjoerg def : Pat<(fvti.Vector (fma (fneg (splat_vector fvti.ScalarRegClass:$rs1)), 595*da58b97aSjoerg fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), 596*da58b97aSjoerg (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) 597*da58b97aSjoerg fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 598*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 599*da58b97aSjoerg def : Pat<(fvti.Vector (fma (fneg (splat_vector fvti.ScalarRegClass:$rs1)), 600*da58b97aSjoerg fvti.RegClass:$rd, fvti.RegClass:$rs2)), 601*da58b97aSjoerg (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) 602*da58b97aSjoerg fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, 603*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 604*da58b97aSjoerg} 605*da58b97aSjoerg 606*da58b97aSjoergforeach vti = AllFloatVectors in { 607*da58b97aSjoerg // 14.8. Vector Floating-Point Square-Root Instruction 608*da58b97aSjoerg def : Pat<(fsqrt (vti.Vector vti.RegClass:$rs2)), 609*da58b97aSjoerg (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX) 610*da58b97aSjoerg vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>; 611*da58b97aSjoerg 612*da58b97aSjoerg // 14.12. Vector Floating-Point Sign-Injection Instructions 613*da58b97aSjoerg def : Pat<(fabs (vti.Vector vti.RegClass:$rs)), 614*da58b97aSjoerg (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX) 615*da58b97aSjoerg vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW)>; 616*da58b97aSjoerg // Handle fneg with VFSGNJN using the same input for both operands. 617*da58b97aSjoerg def : Pat<(fneg (vti.Vector vti.RegClass:$rs)), 618*da58b97aSjoerg (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 619*da58b97aSjoerg vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW)>; 620*da58b97aSjoerg 621*da58b97aSjoerg def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 622*da58b97aSjoerg (vti.Vector vti.RegClass:$rs2))), 623*da58b97aSjoerg (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX) 624*da58b97aSjoerg vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>; 625*da58b97aSjoerg def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 626*da58b97aSjoerg (vti.Vector (splat_vector vti.ScalarRegClass:$rs2)))), 627*da58b97aSjoerg (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 628*da58b97aSjoerg vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW)>; 629*da58b97aSjoerg 630*da58b97aSjoerg def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 631*da58b97aSjoerg (vti.Vector (fneg vti.RegClass:$rs2)))), 632*da58b97aSjoerg (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX) 633*da58b97aSjoerg vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>; 634*da58b97aSjoerg def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), 635*da58b97aSjoerg (vti.Vector (fneg (splat_vector vti.ScalarRegClass:$rs2))))), 636*da58b97aSjoerg (!cast<Instruction>("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) 637*da58b97aSjoerg vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW)>; 638*da58b97aSjoerg} 639*da58b97aSjoerg 640*da58b97aSjoerg// 14.11. Vector Floating-Point MIN/MAX Instructions 641*da58b97aSjoergdefm : VPatBinaryFPSDNode_VV_VF<fminnum, "PseudoVFMIN">; 642*da58b97aSjoergdefm : VPatBinaryFPSDNode_VV_VF<fmaxnum, "PseudoVFMAX">; 643*da58b97aSjoerg 644*da58b97aSjoerg// 14.13. Vector Floating-Point Compare Instructions 645*da58b97aSjoergdefm : VPatFPSetCCSDNode_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 646*da58b97aSjoergdefm : VPatFPSetCCSDNode_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">; 647*da58b97aSjoerg 648*da58b97aSjoergdefm : VPatFPSetCCSDNode_VV_VF_FV<SETNE, "PseudoVMFNE", "PseudoVMFNE">; 649*da58b97aSjoergdefm : VPatFPSetCCSDNode_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">; 650*da58b97aSjoerg 651*da58b97aSjoergdefm : VPatFPSetCCSDNode_VV_VF_FV<SETLT, "PseudoVMFLT", "PseudoVMFGT">; 652*da58b97aSjoergdefm : VPatFPSetCCSDNode_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">; 653*da58b97aSjoerg 654*da58b97aSjoergdefm : VPatFPSetCCSDNode_VV_VF_FV<SETLE, "PseudoVMFLE", "PseudoVMFGE">; 655*da58b97aSjoergdefm : VPatFPSetCCSDNode_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">; 656*da58b97aSjoerg 657*da58b97aSjoerg// Floating-point vselects: 658*da58b97aSjoerg// 12.15. Vector Integer Merge Instructions 659*da58b97aSjoerg// 14.15. Vector Floating-Point Merge Instruction 660*da58b97aSjoergforeach fvti = AllFloatVectors in { 661*da58b97aSjoerg def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), fvti.RegClass:$rs1, 662*da58b97aSjoerg fvti.RegClass:$rs2)), 663*da58b97aSjoerg (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 664*da58b97aSjoerg fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm, 665*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 666*da58b97aSjoerg 667*da58b97aSjoerg def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), 668*da58b97aSjoerg (splat_vector fvti.ScalarRegClass:$rs1), 669*da58b97aSjoerg fvti.RegClass:$rs2)), 670*da58b97aSjoerg (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 671*da58b97aSjoerg fvti.RegClass:$rs2, 672*da58b97aSjoerg (fvti.Scalar fvti.ScalarRegClass:$rs1), 673*da58b97aSjoerg VMV0:$vm, fvti.AVL, fvti.Log2SEW)>; 674*da58b97aSjoerg 675*da58b97aSjoerg def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), 676*da58b97aSjoerg (splat_vector (fvti.Scalar fpimm0)), 677*da58b97aSjoerg fvti.RegClass:$rs2)), 678*da58b97aSjoerg (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 679*da58b97aSjoerg fvti.RegClass:$rs2, 0, VMV0:$vm, fvti.AVL, fvti.Log2SEW)>; 680*da58b97aSjoerg} 681*da58b97aSjoerg 682*da58b97aSjoerg// 14.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 683*da58b97aSjoergdefm : VPatConvertFP2ISDNode_V<fp_to_sint, "PseudoVFCVT_RTZ_X_F_V">; 684*da58b97aSjoergdefm : VPatConvertFP2ISDNode_V<fp_to_uint, "PseudoVFCVT_RTZ_XU_F_V">; 685*da58b97aSjoergdefm : VPatConvertI2FPSDNode_V<sint_to_fp, "PseudoVFCVT_F_X_V">; 686*da58b97aSjoergdefm : VPatConvertI2FPSDNode_V<uint_to_fp, "PseudoVFCVT_F_XU_V">; 687*da58b97aSjoerg 688*da58b97aSjoerg// 14.18. Widening Floating-Point/Integer Type-Convert Instructions 689*da58b97aSjoergdefm : VPatWConvertFP2ISDNode_V<fp_to_sint, "PseudoVFWCVT_RTZ_X_F_V">; 690*da58b97aSjoergdefm : VPatWConvertFP2ISDNode_V<fp_to_uint, "PseudoVFWCVT_RTZ_XU_F_V">; 691*da58b97aSjoergdefm : VPatWConvertI2FPSDNode_V<sint_to_fp, "PseudoVFWCVT_F_X_V">; 692*da58b97aSjoergdefm : VPatWConvertI2FPSDNode_V<uint_to_fp, "PseudoVFWCVT_F_XU_V">; 693*da58b97aSjoergforeach fvtiToFWti = AllWidenableFloatVectors in { 694*da58b97aSjoerg defvar fvti = fvtiToFWti.Vti; 695*da58b97aSjoerg defvar fwti = fvtiToFWti.Wti; 696*da58b97aSjoerg def : Pat<(fwti.Vector (fpextend (fvti.Vector fvti.RegClass:$rs1))), 697*da58b97aSjoerg (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX) 698*da58b97aSjoerg fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; 699*da58b97aSjoerg} 700*da58b97aSjoerg 701*da58b97aSjoerg// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions 702*da58b97aSjoergdefm : VPatNConvertFP2ISDNode_V<fp_to_sint, "PseudoVFNCVT_RTZ_X_F_W">; 703*da58b97aSjoergdefm : VPatNConvertFP2ISDNode_V<fp_to_uint, "PseudoVFNCVT_RTZ_XU_F_W">; 704*da58b97aSjoergdefm : VPatNConvertI2FPSDNode_V<sint_to_fp, "PseudoVFNCVT_F_X_W">; 705*da58b97aSjoergdefm : VPatNConvertI2FPSDNode_V<uint_to_fp, "PseudoVFNCVT_F_XU_W">; 706*da58b97aSjoergforeach fvtiToFWti = AllWidenableFloatVectors in { 707*da58b97aSjoerg defvar fvti = fvtiToFWti.Vti; 708*da58b97aSjoerg defvar fwti = fvtiToFWti.Wti; 709*da58b97aSjoerg def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))), 710*da58b97aSjoerg (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) 711*da58b97aSjoerg fwti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; 712*da58b97aSjoerg} 713*da58b97aSjoerg} // Predicates = [HasStdExtV, HasStdExtF] 714*da58b97aSjoerg 715*da58b97aSjoerg//===----------------------------------------------------------------------===// 716*da58b97aSjoerg// Vector Splats 717*da58b97aSjoerg//===----------------------------------------------------------------------===// 718*da58b97aSjoerg 719*da58b97aSjoerglet Predicates = [HasStdExtV] in { 720*da58b97aSjoergforeach vti = AllIntegerVectors in { 721*da58b97aSjoerg def : Pat<(vti.Vector (SplatPat GPR:$rs1)), 722*da58b97aSjoerg (!cast<Instruction>("PseudoVMV_V_X_" # vti.LMul.MX) 723*da58b97aSjoerg GPR:$rs1, vti.AVL, vti.Log2SEW)>; 724*da58b97aSjoerg def : Pat<(vti.Vector (SplatPat_simm5 simm5:$rs1)), 725*da58b97aSjoerg (!cast<Instruction>("PseudoVMV_V_I_" # vti.LMul.MX) 726*da58b97aSjoerg simm5:$rs1, vti.AVL, vti.Log2SEW)>; 727*da58b97aSjoerg} 728*da58b97aSjoerg} // Predicates = [HasStdExtV] 729*da58b97aSjoerg 730*da58b97aSjoerglet Predicates = [HasStdExtV, HasStdExtF] in { 731*da58b97aSjoergforeach fvti = AllFloatVectors in { 732*da58b97aSjoerg def : Pat<(fvti.Vector (splat_vector fvti.ScalarRegClass:$rs1)), 733*da58b97aSjoerg (!cast<Instruction>("PseudoVFMV_V_"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) 734*da58b97aSjoerg (fvti.Scalar fvti.ScalarRegClass:$rs1), 735*da58b97aSjoerg fvti.AVL, fvti.Log2SEW)>; 736*da58b97aSjoerg 737*da58b97aSjoerg def : Pat<(fvti.Vector (splat_vector (fvti.Scalar fpimm0))), 738*da58b97aSjoerg (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 739*da58b97aSjoerg 0, fvti.AVL, fvti.Log2SEW)>; 740*da58b97aSjoerg} 741*da58b97aSjoerg} // Predicates = [HasStdExtV, HasStdExtF] 742*da58b97aSjoerg 743*da58b97aSjoerg//===----------------------------------------------------------------------===// 744*da58b97aSjoerg// Vector Element Extracts 745*da58b97aSjoerg//===----------------------------------------------------------------------===// 746*da58b97aSjoerglet Predicates = [HasStdExtV, HasStdExtF] in 747*da58b97aSjoergforeach vti = AllFloatVectors in { 748*da58b97aSjoerg defvar vmv_f_s_inst = !cast<Instruction>(!strconcat("PseudoVFMV_", 749*da58b97aSjoerg vti.ScalarSuffix, 750*da58b97aSjoerg "_S_", vti.LMul.MX)); 751*da58b97aSjoerg // Only pattern-match extract-element operations where the index is 0. Any 752*da58b97aSjoerg // other index will have been custom-lowered to slide the vector correctly 753*da58b97aSjoerg // into place. 754*da58b97aSjoerg def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)), 755*da58b97aSjoerg (vmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>; 756*da58b97aSjoerg} 757