1//===-- RISCVInstrInfoVPseudos.td - RISC-V 'V' Pseudos -----*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure to support code generation 10/// for the standard 'V' (Vector) extension, version 0.10. This version is still 11/// experimental as the 'V' extension hasn't been ratified yet. 12/// 13/// This file is included from RISCVInstrInfoV.td 14/// 15//===----------------------------------------------------------------------===// 16 17def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S", 18 SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>, 19 SDTCisInt<1>]>>; 20def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB", 21 SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>; 22 23// Operand that is allowed to be a register or a 5 bit immediate. 24// This allows us to pick between VSETIVLI and VSETVLI opcodes using the same 25// pseudo instructions. 26def AVL : RegisterOperand<GPR> { 27 let OperandNamespace = "RISCVOp"; 28 let OperandType = "OPERAND_AVL"; 29} 30 31// X0 has special meaning for vsetvl/vsetvli. 32// rd | rs1 | AVL value | Effect on vl 33//-------------------------------------------------------------- 34// !X0 | X0 | VLMAX | Set vl to VLMAX 35// X0 | X0 | Value in vl | Keep current vl, just change vtype. 36def VLOp : ComplexPattern<XLenVT, 1, "selectVLOp">; 37 38def DecImm : SDNodeXForm<imm, [{ 39 return CurDAG->getTargetConstant(N->getSExtValue() - 1, SDLoc(N), 40 N->getValueType(0)); 41}]>; 42 43//===----------------------------------------------------------------------===// 44// Utilities. 45//===----------------------------------------------------------------------===// 46 47// This class describes information associated to the LMUL. 48class LMULInfo<int lmul, VReg regclass, VReg wregclass, 49 VReg f2regclass, VReg f4regclass, VReg f8regclass, string mx> { 50 bits<3> value = lmul; // This is encoded as the vlmul field of vtype. 51 VReg vrclass = regclass; 52 VReg wvrclass = wregclass; 53 VReg f8vrclass = f8regclass; 54 VReg f4vrclass = f4regclass; 55 VReg f2vrclass = f2regclass; 56 string MX = mx; 57} 58 59// Associate LMUL with tablegen records of register classes. 60def V_M1 : LMULInfo<0b000, VR, VRM2, VR, VR, VR, "M1">; 61def V_M2 : LMULInfo<0b001, VRM2, VRM4, VR, VR, VR, "M2">; 62def V_M4 : LMULInfo<0b010, VRM4, VRM8, VRM2, VR, VR, "M4">; 63def V_M8 : LMULInfo<0b011, VRM8,/*NoVReg*/VR, VRM4, VRM2, VR, "M8">; 64 65def V_MF8 : LMULInfo<0b101, VR, VR,/*NoVReg*/VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF8">; 66def V_MF4 : LMULInfo<0b110, VR, VR, VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF4">; 67def V_MF2 : LMULInfo<0b111, VR, VR, VR, VR,/*NoVReg*/VR, "MF2">; 68 69// Used to iterate over all possible LMULs. 70def MxList { 71 list<LMULInfo> m = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8]; 72} 73 74class FPR_Info<RegisterClass regclass, string fx> { 75 RegisterClass fprclass = regclass; 76 string FX = fx; 77} 78 79def SCALAR_F16 : FPR_Info<FPR16, "F16">; 80def SCALAR_F32 : FPR_Info<FPR32, "F32">; 81def SCALAR_F64 : FPR_Info<FPR64, "F64">; 82 83def FPList { 84 list<FPR_Info> fpinfo = [SCALAR_F16, SCALAR_F32, SCALAR_F64]; 85} 86 87class MxSet<int eew> { 88 list<LMULInfo> m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8], 89 !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8], 90 !eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8], 91 !eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]); 92} 93 94class NFSet<LMULInfo m> { 95 list<int> L = !cond(!eq(m.value, V_M8.value): [], 96 !eq(m.value, V_M4.value): [2], 97 !eq(m.value, V_M2.value): [2, 3, 4], 98 true: [2, 3, 4, 5, 6, 7, 8]); 99} 100 101class shift_amount<int num> { 102 int val = !if(!eq(num, 1), 0, !add(1, shift_amount<!srl(num, 1)>.val)); 103} 104 105class octuple_from_str<string MX> { 106 int ret = !cond(!eq(MX, "MF8") : 1, 107 !eq(MX, "MF4") : 2, 108 !eq(MX, "MF2") : 4, 109 !eq(MX, "M1") : 8, 110 !eq(MX, "M2") : 16, 111 !eq(MX, "M4") : 32, 112 !eq(MX, "M8") : 64); 113} 114 115class octuple_to_str<int octuple> { 116 string ret = !if(!eq(octuple, 1), "MF8", 117 !if(!eq(octuple, 2), "MF4", 118 !if(!eq(octuple, 4), "MF2", 119 !if(!eq(octuple, 8), "M1", 120 !if(!eq(octuple, 16), "M2", 121 !if(!eq(octuple, 32), "M4", 122 !if(!eq(octuple, 64), "M8", 123 "NoDef"))))))); 124} 125 126def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>; 127 128// Output pattern for X0 used to represent VLMAX in the pseudo instructions. 129def VLMax : OutPatFrag<(ops), (XLenVT X0)>; 130 131// List of EEW. 132defvar EEWList = [8, 16, 32, 64]; 133 134class SegRegClass<LMULInfo m, int nf> { 135 VReg RC = !cast<VReg>("VRN" # nf # !cond(!eq(m.value, V_MF8.value): V_M1.MX, 136 !eq(m.value, V_MF4.value): V_M1.MX, 137 !eq(m.value, V_MF2.value): V_M1.MX, 138 true: m.MX)); 139} 140 141//===----------------------------------------------------------------------===// 142// Vector register and vector group type information. 143//===----------------------------------------------------------------------===// 144 145class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, VReg Reg, LMULInfo M, 146 ValueType Scal = XLenVT, RegisterClass ScalarReg = GPR> 147{ 148 ValueType Vector = Vec; 149 ValueType Mask = Mas; 150 int SEW = Sew; 151 int Log2SEW = shift_amount<Sew>.val; 152 VReg RegClass = Reg; 153 LMULInfo LMul = M; 154 ValueType Scalar = Scal; 155 RegisterClass ScalarRegClass = ScalarReg; 156 // The pattern fragment which produces the AVL operand, representing the 157 // "natural" vector length for this type. For scalable vectors this is VLMax. 158 OutPatFrag AVL = VLMax; 159 160 string ScalarSuffix = !cond(!eq(Scal, XLenVT) : "X", 161 !eq(Scal, f16) : "F16", 162 !eq(Scal, f32) : "F32", 163 !eq(Scal, f64) : "F64"); 164} 165 166class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas, int Sew, 167 VReg Reg, LMULInfo M, ValueType Scal = XLenVT, 168 RegisterClass ScalarReg = GPR> 169 : VTypeInfo<Vec, Mas, Sew, Reg, M, Scal, ScalarReg> 170{ 171 ValueType VectorM1 = VecM1; 172} 173 174defset list<VTypeInfo> AllVectors = { 175 defset list<VTypeInfo> AllIntegerVectors = { 176 defset list<VTypeInfo> NoGroupIntegerVectors = { 177 defset list<VTypeInfo> FractionalGroupIntegerVectors = { 178 def VI8MF8: VTypeInfo<vint8mf8_t, vbool64_t, 8, VR, V_MF8>; 179 def VI8MF4: VTypeInfo<vint8mf4_t, vbool32_t, 8, VR, V_MF4>; 180 def VI8MF2: VTypeInfo<vint8mf2_t, vbool16_t, 8, VR, V_MF2>; 181 def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>; 182 def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>; 183 def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>; 184 } 185 def VI8M1: VTypeInfo<vint8m1_t, vbool8_t, 8, VR, V_M1>; 186 def VI16M1: VTypeInfo<vint16m1_t, vbool16_t, 16, VR, V_M1>; 187 def VI32M1: VTypeInfo<vint32m1_t, vbool32_t, 32, VR, V_M1>; 188 def VI64M1: VTypeInfo<vint64m1_t, vbool64_t, 64, VR, V_M1>; 189 } 190 defset list<GroupVTypeInfo> GroupIntegerVectors = { 191 def VI8M2: GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, VRM2, V_M2>; 192 def VI8M4: GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, VRM4, V_M4>; 193 def VI8M8: GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, VRM8, V_M8>; 194 195 def VI16M2: GroupVTypeInfo<vint16m2_t,vint16m1_t,vbool8_t, 16,VRM2, V_M2>; 196 def VI16M4: GroupVTypeInfo<vint16m4_t,vint16m1_t,vbool4_t, 16,VRM4, V_M4>; 197 def VI16M8: GroupVTypeInfo<vint16m8_t,vint16m1_t,vbool2_t, 16,VRM8, V_M8>; 198 199 def VI32M2: GroupVTypeInfo<vint32m2_t,vint32m1_t,vbool16_t,32,VRM2, V_M2>; 200 def VI32M4: GroupVTypeInfo<vint32m4_t,vint32m1_t,vbool8_t, 32,VRM4, V_M4>; 201 def VI32M8: GroupVTypeInfo<vint32m8_t,vint32m1_t,vbool4_t, 32,VRM8, V_M8>; 202 203 def VI64M2: GroupVTypeInfo<vint64m2_t,vint64m1_t,vbool32_t,64,VRM2, V_M2>; 204 def VI64M4: GroupVTypeInfo<vint64m4_t,vint64m1_t,vbool16_t,64,VRM4, V_M4>; 205 def VI64M8: GroupVTypeInfo<vint64m8_t,vint64m1_t,vbool8_t, 64,VRM8, V_M8>; 206 } 207 } 208 209 defset list<VTypeInfo> AllFloatVectors = { 210 defset list<VTypeInfo> NoGroupFloatVectors = { 211 defset list<VTypeInfo> FractionalGroupFloatVectors = { 212 def VF16MF4: VTypeInfo<vfloat16mf4_t, vbool64_t, 16, VR, V_MF4, f16, FPR16>; 213 def VF16MF2: VTypeInfo<vfloat16mf2_t, vbool32_t, 16, VR, V_MF2, f16, FPR16>; 214 def VF32MF2: VTypeInfo<vfloat32mf2_t,vbool64_t, 32, VR, V_MF2, f32, FPR32>; 215 } 216 def VF16M1: VTypeInfo<vfloat16m1_t, vbool16_t, 16, VR, V_M1, f16, FPR16>; 217 def VF32M1: VTypeInfo<vfloat32m1_t, vbool32_t, 32, VR, V_M1, f32, FPR32>; 218 def VF64M1: VTypeInfo<vfloat64m1_t, vbool64_t, 64, VR, V_M1, f64, FPR64>; 219 } 220 221 defset list<GroupVTypeInfo> GroupFloatVectors = { 222 def VF16M2: GroupVTypeInfo<vfloat16m2_t, vfloat16m1_t, vbool8_t, 16, 223 VRM2, V_M2, f16, FPR16>; 224 def VF16M4: GroupVTypeInfo<vfloat16m4_t, vfloat16m1_t, vbool4_t, 16, 225 VRM4, V_M4, f16, FPR16>; 226 def VF16M8: GroupVTypeInfo<vfloat16m8_t, vfloat16m1_t, vbool2_t, 16, 227 VRM8, V_M8, f16, FPR16>; 228 229 def VF32M2: GroupVTypeInfo<vfloat32m2_t, vfloat32m1_t, vbool16_t, 32, 230 VRM2, V_M2, f32, FPR32>; 231 def VF32M4: GroupVTypeInfo<vfloat32m4_t, vfloat32m1_t, vbool8_t, 32, 232 VRM4, V_M4, f32, FPR32>; 233 def VF32M8: GroupVTypeInfo<vfloat32m8_t, vfloat32m1_t, vbool4_t, 32, 234 VRM8, V_M8, f32, FPR32>; 235 236 def VF64M2: GroupVTypeInfo<vfloat64m2_t, vfloat64m1_t, vbool32_t, 64, 237 VRM2, V_M2, f64, FPR64>; 238 def VF64M4: GroupVTypeInfo<vfloat64m4_t, vfloat64m1_t, vbool16_t, 64, 239 VRM4, V_M4, f64, FPR64>; 240 def VF64M8: GroupVTypeInfo<vfloat64m8_t, vfloat64m1_t, vbool8_t, 64, 241 VRM8, V_M8, f64, FPR64>; 242 } 243 } 244} 245 246// This functor is used to obtain the int vector type that has the same SEW and 247// multiplier as the input parameter type 248class GetIntVTypeInfo<VTypeInfo vti> 249{ 250 // Equivalent integer vector type. Eg. 251 // VI8M1 → VI8M1 (identity) 252 // VF64M4 → VI64M4 253 VTypeInfo Vti = !cast<VTypeInfo>(!subst("VF", "VI", !cast<string>(vti))); 254} 255 256class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> { 257 ValueType Mask = Mas; 258 // {SEW, VLMul} values set a valid VType to deal with this mask type. 259 // we assume SEW=8 and set corresponding LMUL. 260 int SEW = 8; 261 int Log2SEW = 3; 262 LMULInfo LMul = M; 263 string BX = Bx; // Appendix of mask operations. 264 // The pattern fragment which produces the AVL operand, representing the 265 // "natural" vector length for this mask type. For scalable masks this is 266 // VLMax. 267 OutPatFrag AVL = VLMax; 268} 269 270defset list<MTypeInfo> AllMasks = { 271 // vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL. 272 def : MTypeInfo<vbool64_t, V_MF8, "B1">; 273 def : MTypeInfo<vbool32_t, V_MF4, "B2">; 274 def : MTypeInfo<vbool16_t, V_MF2, "B4">; 275 def : MTypeInfo<vbool8_t, V_M1, "B8">; 276 def : MTypeInfo<vbool4_t, V_M2, "B16">; 277 def : MTypeInfo<vbool2_t, V_M4, "B32">; 278 def : MTypeInfo<vbool1_t, V_M8, "B64">; 279} 280 281class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti> 282{ 283 VTypeInfo Vti = vti; 284 VTypeInfo Wti = wti; 285} 286 287class VTypeInfoToFraction<VTypeInfo vti, VTypeInfo fti> 288{ 289 VTypeInfo Vti = vti; 290 VTypeInfo Fti = fti; 291} 292 293defset list<VTypeInfoToWide> AllWidenableIntVectors = { 294 def : VTypeInfoToWide<VI8MF8, VI16MF4>; 295 def : VTypeInfoToWide<VI8MF4, VI16MF2>; 296 def : VTypeInfoToWide<VI8MF2, VI16M1>; 297 def : VTypeInfoToWide<VI8M1, VI16M2>; 298 def : VTypeInfoToWide<VI8M2, VI16M4>; 299 def : VTypeInfoToWide<VI8M4, VI16M8>; 300 301 def : VTypeInfoToWide<VI16MF4, VI32MF2>; 302 def : VTypeInfoToWide<VI16MF2, VI32M1>; 303 def : VTypeInfoToWide<VI16M1, VI32M2>; 304 def : VTypeInfoToWide<VI16M2, VI32M4>; 305 def : VTypeInfoToWide<VI16M4, VI32M8>; 306 307 def : VTypeInfoToWide<VI32MF2, VI64M1>; 308 def : VTypeInfoToWide<VI32M1, VI64M2>; 309 def : VTypeInfoToWide<VI32M2, VI64M4>; 310 def : VTypeInfoToWide<VI32M4, VI64M8>; 311} 312 313defset list<VTypeInfoToWide> AllWidenableFloatVectors = { 314 def : VTypeInfoToWide<VF16MF4, VF32MF2>; 315 def : VTypeInfoToWide<VF16MF2, VF32M1>; 316 def : VTypeInfoToWide<VF16M1, VF32M2>; 317 def : VTypeInfoToWide<VF16M2, VF32M4>; 318 def : VTypeInfoToWide<VF16M4, VF32M8>; 319 320 def : VTypeInfoToWide<VF32MF2, VF64M1>; 321 def : VTypeInfoToWide<VF32M1, VF64M2>; 322 def : VTypeInfoToWide<VF32M2, VF64M4>; 323 def : VTypeInfoToWide<VF32M4, VF64M8>; 324} 325 326defset list<VTypeInfoToFraction> AllFractionableVF2IntVectors = { 327 def : VTypeInfoToFraction<VI16MF4, VI8MF8>; 328 def : VTypeInfoToFraction<VI16MF2, VI8MF4>; 329 def : VTypeInfoToFraction<VI16M1, VI8MF2>; 330 def : VTypeInfoToFraction<VI16M2, VI8M1>; 331 def : VTypeInfoToFraction<VI16M4, VI8M2>; 332 def : VTypeInfoToFraction<VI16M8, VI8M4>; 333 def : VTypeInfoToFraction<VI32MF2, VI16MF4>; 334 def : VTypeInfoToFraction<VI32M1, VI16MF2>; 335 def : VTypeInfoToFraction<VI32M2, VI16M1>; 336 def : VTypeInfoToFraction<VI32M4, VI16M2>; 337 def : VTypeInfoToFraction<VI32M8, VI16M4>; 338 def : VTypeInfoToFraction<VI64M1, VI32MF2>; 339 def : VTypeInfoToFraction<VI64M2, VI32M1>; 340 def : VTypeInfoToFraction<VI64M4, VI32M2>; 341 def : VTypeInfoToFraction<VI64M8, VI32M4>; 342} 343 344defset list<VTypeInfoToFraction> AllFractionableVF4IntVectors = { 345 def : VTypeInfoToFraction<VI32MF2, VI8MF8>; 346 def : VTypeInfoToFraction<VI32M1, VI8MF4>; 347 def : VTypeInfoToFraction<VI32M2, VI8MF2>; 348 def : VTypeInfoToFraction<VI32M4, VI8M1>; 349 def : VTypeInfoToFraction<VI32M8, VI8M2>; 350 def : VTypeInfoToFraction<VI64M1, VI16MF4>; 351 def : VTypeInfoToFraction<VI64M2, VI16MF2>; 352 def : VTypeInfoToFraction<VI64M4, VI16M1>; 353 def : VTypeInfoToFraction<VI64M8, VI16M2>; 354} 355 356defset list<VTypeInfoToFraction> AllFractionableVF8IntVectors = { 357 def : VTypeInfoToFraction<VI64M1, VI8MF8>; 358 def : VTypeInfoToFraction<VI64M2, VI8MF4>; 359 def : VTypeInfoToFraction<VI64M4, VI8MF2>; 360 def : VTypeInfoToFraction<VI64M8, VI8M1>; 361} 362 363defset list<VTypeInfoToWide> AllWidenableIntToFloatVectors = { 364 def : VTypeInfoToWide<VI8MF8, VF16MF4>; 365 def : VTypeInfoToWide<VI8MF4, VF16MF2>; 366 def : VTypeInfoToWide<VI8MF2, VF16M1>; 367 def : VTypeInfoToWide<VI8M1, VF16M2>; 368 def : VTypeInfoToWide<VI8M2, VF16M4>; 369 def : VTypeInfoToWide<VI8M4, VF16M8>; 370 371 def : VTypeInfoToWide<VI16MF4, VF32MF2>; 372 def : VTypeInfoToWide<VI16MF2, VF32M1>; 373 def : VTypeInfoToWide<VI16M1, VF32M2>; 374 def : VTypeInfoToWide<VI16M2, VF32M4>; 375 def : VTypeInfoToWide<VI16M4, VF32M8>; 376 377 def : VTypeInfoToWide<VI32MF2, VF64M1>; 378 def : VTypeInfoToWide<VI32M1, VF64M2>; 379 def : VTypeInfoToWide<VI32M2, VF64M4>; 380 def : VTypeInfoToWide<VI32M4, VF64M8>; 381} 382 383// This class holds the record of the RISCVVPseudoTable below. 384// This represents the information we need in codegen for each pseudo. 385// The definition should be consistent with `struct PseudoInfo` in 386// RISCVBaseInfo.h. 387class CONST8b<bits<8> val> { 388 bits<8> V = val; 389} 390def InvalidIndex : CONST8b<0x80>; 391class RISCVVPseudo { 392 Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key. 393 Instruction BaseInstr; 394} 395 396// The actual table. 397def RISCVVPseudosTable : GenericTable { 398 let FilterClass = "RISCVVPseudo"; 399 let CppTypeName = "PseudoInfo"; 400 let Fields = [ "Pseudo", "BaseInstr" ]; 401 let PrimaryKey = [ "Pseudo" ]; 402 let PrimaryKeyName = "getPseudoInfo"; 403 let PrimaryKeyEarlyOut = true; 404} 405 406def RISCVVIntrinsicsTable : GenericTable { 407 let FilterClass = "RISCVVIntrinsic"; 408 let CppTypeName = "RISCVVIntrinsicInfo"; 409 let Fields = ["IntrinsicID", "SplatOperand"]; 410 let PrimaryKey = ["IntrinsicID"]; 411 let PrimaryKeyName = "getRISCVVIntrinsicInfo"; 412} 413 414class RISCVVLE<bit M, bit Str, bit F, bits<7> S, bits<3> L> { 415 bits<1> Masked = M; 416 bits<1> Strided = Str; 417 bits<1> FF = F; 418 bits<7> SEW = S; 419 bits<3> LMUL = L; 420 Pseudo Pseudo = !cast<Pseudo>(NAME); 421} 422 423def RISCVVLETable : GenericTable { 424 let FilterClass = "RISCVVLE"; 425 let CppTypeName = "VLEPseudo"; 426 let Fields = ["Masked", "Strided", "FF", "SEW", "LMUL", "Pseudo"]; 427 let PrimaryKey = ["Masked", "Strided", "FF", "SEW", "LMUL"]; 428 let PrimaryKeyName = "getVLEPseudo"; 429} 430 431class RISCVVSE<bit M, bit Str, bits<7> S, bits<3> L> { 432 bits<1> Masked = M; 433 bits<1> Strided = Str; 434 bits<7> SEW = S; 435 bits<3> LMUL = L; 436 Pseudo Pseudo = !cast<Pseudo>(NAME); 437} 438 439def RISCVVSETable : GenericTable { 440 let FilterClass = "RISCVVSE"; 441 let CppTypeName = "VSEPseudo"; 442 let Fields = ["Masked", "Strided", "SEW", "LMUL", "Pseudo"]; 443 let PrimaryKey = ["Masked", "Strided", "SEW", "LMUL"]; 444 let PrimaryKeyName = "getVSEPseudo"; 445} 446 447class RISCVVLX_VSX<bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> { 448 bits<1> Masked = M; 449 bits<1> Ordered = O; 450 bits<7> SEW = S; 451 bits<3> LMUL = L; 452 bits<3> IndexLMUL = IL; 453 Pseudo Pseudo = !cast<Pseudo>(NAME); 454} 455 456class RISCVVLX<bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> : 457 RISCVVLX_VSX<M, O, S, L, IL>; 458class RISCVVSX<bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> : 459 RISCVVLX_VSX<M, O, S, L, IL>; 460 461class RISCVVLX_VSXTable : GenericTable { 462 let CppTypeName = "VLX_VSXPseudo"; 463 let Fields = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"]; 464 let PrimaryKey = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"]; 465} 466 467def RISCVVLXTable : RISCVVLX_VSXTable { 468 let FilterClass = "RISCVVLX"; 469 let PrimaryKeyName = "getVLXPseudo"; 470} 471 472def RISCVVSXTable : RISCVVLX_VSXTable { 473 let FilterClass = "RISCVVSX"; 474 let PrimaryKeyName = "getVSXPseudo"; 475} 476 477class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<7> S, bits<3> L> { 478 bits<4> NF = N; 479 bits<1> Masked = M; 480 bits<1> Strided = Str; 481 bits<1> FF = F; 482 bits<7> SEW = S; 483 bits<3> LMUL = L; 484 Pseudo Pseudo = !cast<Pseudo>(NAME); 485} 486 487def RISCVVLSEGTable : GenericTable { 488 let FilterClass = "RISCVVLSEG"; 489 let CppTypeName = "VLSEGPseudo"; 490 let Fields = ["NF", "Masked", "Strided", "FF", "SEW", "LMUL", "Pseudo"]; 491 let PrimaryKey = ["NF", "Masked", "Strided", "FF", "SEW", "LMUL"]; 492 let PrimaryKeyName = "getVLSEGPseudo"; 493} 494 495class RISCVVLXSEG<bits<4> N, bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> { 496 bits<4> NF = N; 497 bits<1> Masked = M; 498 bits<1> Ordered = O; 499 bits<7> SEW = S; 500 bits<3> LMUL = L; 501 bits<3> IndexLMUL = IL; 502 Pseudo Pseudo = !cast<Pseudo>(NAME); 503} 504 505def RISCVVLXSEGTable : GenericTable { 506 let FilterClass = "RISCVVLXSEG"; 507 let CppTypeName = "VLXSEGPseudo"; 508 let Fields = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"]; 509 let PrimaryKey = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"]; 510 let PrimaryKeyName = "getVLXSEGPseudo"; 511} 512 513class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<7> S, bits<3> L> { 514 bits<4> NF = N; 515 bits<1> Masked = M; 516 bits<1> Strided = Str; 517 bits<7> SEW = S; 518 bits<3> LMUL = L; 519 Pseudo Pseudo = !cast<Pseudo>(NAME); 520} 521 522def RISCVVSSEGTable : GenericTable { 523 let FilterClass = "RISCVVSSEG"; 524 let CppTypeName = "VSSEGPseudo"; 525 let Fields = ["NF", "Masked", "Strided", "SEW", "LMUL", "Pseudo"]; 526 let PrimaryKey = ["NF", "Masked", "Strided", "SEW", "LMUL"]; 527 let PrimaryKeyName = "getVSSEGPseudo"; 528} 529 530class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> { 531 bits<4> NF = N; 532 bits<1> Masked = M; 533 bits<1> Ordered = O; 534 bits<7> SEW = S; 535 bits<3> LMUL = L; 536 bits<3> IndexLMUL = IL; 537 Pseudo Pseudo = !cast<Pseudo>(NAME); 538} 539 540def RISCVVSXSEGTable : GenericTable { 541 let FilterClass = "RISCVVSXSEG"; 542 let CppTypeName = "VSXSEGPseudo"; 543 let Fields = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"]; 544 let PrimaryKey = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"]; 545 let PrimaryKeyName = "getVSXSEGPseudo"; 546} 547 548//===----------------------------------------------------------------------===// 549// Helpers to define the different pseudo instructions. 550//===----------------------------------------------------------------------===// 551 552class PseudoToVInst<string PseudoInst> { 553 string VInst = !subst("_M8", "", 554 !subst("_M4", "", 555 !subst("_M2", "", 556 !subst("_M1", "", 557 !subst("_MF2", "", 558 !subst("_MF4", "", 559 !subst("_MF8", "", 560 !subst("_B1", "", 561 !subst("_B2", "", 562 !subst("_B4", "", 563 !subst("_B8", "", 564 !subst("_B16", "", 565 !subst("_B32", "", 566 !subst("_B64", "", 567 !subst("_MASK", "", 568 !subst("_COMMUTABLE", "", 569 !subst("F16", "F", 570 !subst("F32", "F", 571 !subst("F64", "F", 572 !subst("Pseudo", "", PseudoInst)))))))))))))))))))); 573} 574 575// The destination vector register group for a masked vector instruction cannot 576// overlap the source mask register (v0), unless the destination vector register 577// is being written with a mask value (e.g., comparisons) or the scalar result 578// of a reduction. 579class GetVRegNoV0<VReg VRegClass> { 580 VReg R = !cond(!eq(VRegClass, VR) : VRNoV0, 581 !eq(VRegClass, VRM2) : VRM2NoV0, 582 !eq(VRegClass, VRM4) : VRM4NoV0, 583 !eq(VRegClass, VRM8) : VRM8NoV0, 584 !eq(VRegClass, VRN2M1) : VRN2M1NoV0, 585 !eq(VRegClass, VRN2M2) : VRN2M2NoV0, 586 !eq(VRegClass, VRN2M4) : VRN2M4NoV0, 587 !eq(VRegClass, VRN3M1) : VRN3M1NoV0, 588 !eq(VRegClass, VRN3M2) : VRN3M2NoV0, 589 !eq(VRegClass, VRN4M1) : VRN4M1NoV0, 590 !eq(VRegClass, VRN4M2) : VRN4M2NoV0, 591 !eq(VRegClass, VRN5M1) : VRN5M1NoV0, 592 !eq(VRegClass, VRN6M1) : VRN6M1NoV0, 593 !eq(VRegClass, VRN7M1) : VRN7M1NoV0, 594 !eq(VRegClass, VRN8M1) : VRN8M1NoV0, 595 true : VRegClass); 596} 597 598// Join strings in list using separator and ignoring empty elements 599class Join<list<string> strings, string separator> { 600 string ret = !foldl(!head(strings), !tail(strings), a, b, 601 !cond( 602 !and(!empty(a), !empty(b)) : "", 603 !empty(a) : b, 604 !empty(b) : a, 605 1 : a#separator#b)); 606} 607 608class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> : 609 Pseudo<outs, ins, []>, RISCVVPseudo { 610 let BaseInstr = instr; 611 let VLMul = m.value; 612} 613 614class VPseudoUSLoadNoMask<VReg RetClass, bits<7> EEW, bit isFF> : 615 Pseudo<(outs RetClass:$rd), 616 (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, 617 RISCVVPseudo, 618 RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/isFF, EEW, VLMul> { 619 let mayLoad = 1; 620 let mayStore = 0; 621 let hasSideEffects = 0; 622 let usesCustomInserter = 1; 623 let Uses = [VL, VTYPE]; 624 let HasVLOp = 1; 625 let HasSEWOp = 1; 626 let HasDummyMask = 1; 627 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 628} 629 630class VPseudoUSLoadMask<VReg RetClass, bits<7> EEW, bit isFF> : 631 Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd), 632 (ins GetVRegNoV0<RetClass>.R:$merge, 633 GPR:$rs1, 634 VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, 635 RISCVVPseudo, 636 RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/isFF, EEW, VLMul> { 637 let mayLoad = 1; 638 let mayStore = 0; 639 let hasSideEffects = 0; 640 let usesCustomInserter = 1; 641 let Constraints = "$rd = $merge"; 642 let Uses = [VL, VTYPE]; 643 let HasVLOp = 1; 644 let HasSEWOp = 1; 645 let HasMergeOp = 1; 646 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 647} 648 649class VPseudoSLoadNoMask<VReg RetClass, bits<7> EEW>: 650 Pseudo<(outs RetClass:$rd), 651 (ins GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>, 652 RISCVVPseudo, 653 RISCVVLE</*Masked*/0, /*Strided*/1, /*FF*/0, EEW, VLMul> { 654 let mayLoad = 1; 655 let mayStore = 0; 656 let hasSideEffects = 0; 657 let usesCustomInserter = 1; 658 let Uses = [VL, VTYPE]; 659 let HasVLOp = 1; 660 let HasSEWOp = 1; 661 let HasDummyMask = 1; 662 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 663} 664 665class VPseudoSLoadMask<VReg RetClass, bits<7> EEW>: 666 Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd), 667 (ins GetVRegNoV0<RetClass>.R:$merge, 668 GPR:$rs1, GPR:$rs2, 669 VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, 670 RISCVVPseudo, 671 RISCVVLE</*Masked*/1, /*Strided*/1, /*FF*/0, EEW, VLMul> { 672 let mayLoad = 1; 673 let mayStore = 0; 674 let hasSideEffects = 0; 675 let usesCustomInserter = 1; 676 let Constraints = "$rd = $merge"; 677 let Uses = [VL, VTYPE]; 678 let HasVLOp = 1; 679 let HasSEWOp = 1; 680 let HasMergeOp = 1; 681 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 682} 683 684class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL, 685 bit Ordered, bit EarlyClobber>: 686 Pseudo<(outs RetClass:$rd), 687 (ins GPR:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew),[]>, 688 RISCVVPseudo, 689 RISCVVLX</*Masked*/0, Ordered, EEW, VLMul, LMUL> { 690 let mayLoad = 1; 691 let mayStore = 0; 692 let hasSideEffects = 0; 693 let usesCustomInserter = 1; 694 let Uses = [VL, VTYPE]; 695 let HasVLOp = 1; 696 let HasSEWOp = 1; 697 let HasDummyMask = 1; 698 let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd", ""); 699 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 700} 701 702class VPseudoILoadMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL, 703 bit Ordered, bit EarlyClobber>: 704 Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd), 705 (ins GetVRegNoV0<RetClass>.R:$merge, 706 GPR:$rs1, IdxClass:$rs2, 707 VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, 708 RISCVVPseudo, 709 RISCVVLX</*Masked*/1, Ordered, EEW, VLMul, LMUL> { 710 let mayLoad = 1; 711 let mayStore = 0; 712 let hasSideEffects = 0; 713 let usesCustomInserter = 1; 714 let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $merge", "$rd = $merge"); 715 let Uses = [VL, VTYPE]; 716 let HasVLOp = 1; 717 let HasSEWOp = 1; 718 let HasMergeOp = 1; 719 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 720} 721 722class VPseudoUSStoreNoMask<VReg StClass, bits<7> EEW>: 723 Pseudo<(outs), 724 (ins StClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, 725 RISCVVPseudo, 726 RISCVVSE</*Masked*/0, /*Strided*/0, EEW, VLMul> { 727 let mayLoad = 0; 728 let mayStore = 1; 729 let hasSideEffects = 0; 730 let usesCustomInserter = 1; 731 let Uses = [VL, VTYPE]; 732 let HasVLOp = 1; 733 let HasSEWOp = 1; 734 let HasDummyMask = 1; 735 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 736} 737 738class VPseudoUSStoreMask<VReg StClass, bits<7> EEW>: 739 Pseudo<(outs), 740 (ins StClass:$rd, GPR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, 741 RISCVVPseudo, 742 RISCVVSE</*Masked*/1, /*Strided*/0, EEW, VLMul> { 743 let mayLoad = 0; 744 let mayStore = 1; 745 let hasSideEffects = 0; 746 let usesCustomInserter = 1; 747 let Uses = [VL, VTYPE]; 748 let HasVLOp = 1; 749 let HasSEWOp = 1; 750 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 751} 752 753class VPseudoSStoreNoMask<VReg StClass, bits<7> EEW>: 754 Pseudo<(outs), 755 (ins StClass:$rd, GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>, 756 RISCVVPseudo, 757 RISCVVSE</*Masked*/0, /*Strided*/1, EEW, VLMul> { 758 let mayLoad = 0; 759 let mayStore = 1; 760 let hasSideEffects = 0; 761 let usesCustomInserter = 1; 762 let Uses = [VL, VTYPE]; 763 let HasVLOp = 1; 764 let HasSEWOp = 1; 765 let HasDummyMask = 1; 766 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 767} 768 769class VPseudoSStoreMask<VReg StClass, bits<7> EEW>: 770 Pseudo<(outs), 771 (ins StClass:$rd, GPR:$rs1, GPR:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, 772 RISCVVPseudo, 773 RISCVVSE</*Masked*/1, /*Strided*/1, EEW, VLMul> { 774 let mayLoad = 0; 775 let mayStore = 1; 776 let hasSideEffects = 0; 777 let usesCustomInserter = 1; 778 let Uses = [VL, VTYPE]; 779 let HasVLOp = 1; 780 let HasSEWOp = 1; 781 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 782} 783 784// Unary instruction that is never masked so HasDummyMask=0. 785class VPseudoUnaryNoDummyMask<VReg RetClass, 786 DAGOperand Op2Class> : 787 Pseudo<(outs RetClass:$rd), 788 (ins Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>, 789 RISCVVPseudo { 790 let mayLoad = 0; 791 let mayStore = 0; 792 let hasSideEffects = 0; 793 let usesCustomInserter = 1; 794 let Uses = [VL, VTYPE]; 795 let HasVLOp = 1; 796 let HasSEWOp = 1; 797 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 798} 799 800class VPseudoNullaryNoMask<VReg RegClass>: 801 Pseudo<(outs RegClass:$rd), 802 (ins AVL:$vl, ixlenimm:$sew), 803 []>, RISCVVPseudo { 804 let mayLoad = 0; 805 let mayStore = 0; 806 let hasSideEffects = 0; 807 let usesCustomInserter = 1; 808 let Uses = [VL, VTYPE]; 809 let HasVLOp = 1; 810 let HasSEWOp = 1; 811 let HasDummyMask = 1; 812 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 813} 814 815class VPseudoNullaryMask<VReg RegClass>: 816 Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd), 817 (ins GetVRegNoV0<RegClass>.R:$merge, VMaskOp:$vm, AVL:$vl, 818 ixlenimm:$sew), []>, RISCVVPseudo { 819 let mayLoad = 0; 820 let mayStore = 0; 821 let hasSideEffects = 0; 822 let usesCustomInserter = 1; 823 let Constraints ="$rd = $merge"; 824 let Uses = [VL, VTYPE]; 825 let HasVLOp = 1; 826 let HasSEWOp = 1; 827 let HasMergeOp = 1; 828 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 829} 830 831// Nullary for pseudo instructions. They are expanded in 832// RISCVExpandPseudoInsts pass. 833class VPseudoNullaryPseudoM<string BaseInst> 834 : Pseudo<(outs VR:$rd), (ins AVL:$vl, ixlenimm:$sew), []>, 835 RISCVVPseudo { 836 let mayLoad = 0; 837 let mayStore = 0; 838 let hasSideEffects = 0; 839 let usesCustomInserter = 1; 840 let Uses = [VL, VTYPE]; 841 let HasVLOp = 1; 842 let HasSEWOp = 1; 843 // BaseInstr is not used in RISCVExpandPseudoInsts pass. 844 // Just fill a corresponding real v-inst to pass tablegen check. 845 let BaseInstr = !cast<Instruction>(BaseInst); 846} 847 848// RetClass could be GPR or VReg. 849class VPseudoUnaryNoMask<DAGOperand RetClass, VReg OpClass, string Constraint = ""> : 850 Pseudo<(outs RetClass:$rd), 851 (ins OpClass:$rs2, AVL:$vl, ixlenimm:$sew), []>, 852 RISCVVPseudo { 853 let mayLoad = 0; 854 let mayStore = 0; 855 let hasSideEffects = 0; 856 let usesCustomInserter = 1; 857 let Constraints = Constraint; 858 let Uses = [VL, VTYPE]; 859 let HasVLOp = 1; 860 let HasSEWOp = 1; 861 let HasDummyMask = 1; 862 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 863} 864 865class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> : 866 Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd), 867 (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2, 868 VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>, 869 RISCVVPseudo { 870 let mayLoad = 0; 871 let mayStore = 0; 872 let hasSideEffects = 0; 873 let usesCustomInserter = 1; 874 let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; 875 let Uses = [VL, VTYPE]; 876 let HasVLOp = 1; 877 let HasSEWOp = 1; 878 let HasMergeOp = 1; 879 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 880} 881 882// mask unary operation without maskedoff 883class VPseudoMaskUnarySOutMask: 884 Pseudo<(outs GPR:$rd), 885 (ins VR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>, 886 RISCVVPseudo { 887 let mayLoad = 0; 888 let mayStore = 0; 889 let hasSideEffects = 0; 890 let usesCustomInserter = 1; 891 let Uses = [VL, VTYPE]; 892 let HasVLOp = 1; 893 let HasSEWOp = 1; 894 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 895} 896 897// Mask can be V0~V31 898class VPseudoUnaryAnyMask<VReg RetClass, 899 VReg Op1Class> : 900 Pseudo<(outs RetClass:$rd), 901 (ins RetClass:$merge, 902 Op1Class:$rs2, 903 VR:$vm, AVL:$vl, ixlenimm:$sew), 904 []>, 905 RISCVVPseudo { 906 let mayLoad = 0; 907 let mayStore = 0; 908 let hasSideEffects = 0; 909 let usesCustomInserter = 1; 910 let Constraints = "@earlyclobber $rd, $rd = $merge"; 911 let Uses = [VL, VTYPE]; 912 let HasVLOp = 1; 913 let HasSEWOp = 1; 914 let HasMergeOp = 1; 915 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 916} 917 918class VPseudoBinaryNoMask<VReg RetClass, 919 VReg Op1Class, 920 DAGOperand Op2Class, 921 string Constraint> : 922 Pseudo<(outs RetClass:$rd), 923 (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>, 924 RISCVVPseudo { 925 let mayLoad = 0; 926 let mayStore = 0; 927 let hasSideEffects = 0; 928 let usesCustomInserter = 1; 929 let Constraints = Constraint; 930 let Uses = [VL, VTYPE]; 931 let HasVLOp = 1; 932 let HasSEWOp = 1; 933 let HasDummyMask = 1; 934 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 935} 936 937class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL, 938 bit Ordered>: 939 Pseudo<(outs), 940 (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew),[]>, 941 RISCVVPseudo, 942 RISCVVSX</*Masked*/0, Ordered, EEW, VLMul, LMUL> { 943 let mayLoad = 0; 944 let mayStore = 1; 945 let hasSideEffects = 0; 946 let usesCustomInserter = 1; 947 let Uses = [VL, VTYPE]; 948 let HasVLOp = 1; 949 let HasSEWOp = 1; 950 let HasDummyMask = 1; 951 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 952} 953 954class VPseudoIStoreMask<VReg StClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL, 955 bit Ordered>: 956 Pseudo<(outs), 957 (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, 958 RISCVVPseudo, 959 RISCVVSX</*Masked*/1, Ordered, EEW, VLMul, LMUL> { 960 let mayLoad = 0; 961 let mayStore = 1; 962 let hasSideEffects = 0; 963 let usesCustomInserter = 1; 964 let Uses = [VL, VTYPE]; 965 let HasVLOp = 1; 966 let HasSEWOp = 1; 967 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 968} 969 970class VPseudoBinaryMask<VReg RetClass, 971 RegisterClass Op1Class, 972 DAGOperand Op2Class, 973 string Constraint> : 974 Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd), 975 (ins GetVRegNoV0<RetClass>.R:$merge, 976 Op1Class:$rs2, Op2Class:$rs1, 977 VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>, 978 RISCVVPseudo { 979 let mayLoad = 0; 980 let mayStore = 0; 981 let hasSideEffects = 0; 982 let usesCustomInserter = 1; 983 let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; 984 let Uses = [VL, VTYPE]; 985 let HasVLOp = 1; 986 let HasSEWOp = 1; 987 let HasMergeOp = 1; 988 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 989} 990 991// Like VPseudoBinaryMask, but output can be V0. 992class VPseudoBinaryMOutMask<VReg RetClass, 993 RegisterClass Op1Class, 994 DAGOperand Op2Class, 995 string Constraint> : 996 Pseudo<(outs RetClass:$rd), 997 (ins RetClass:$merge, 998 Op1Class:$rs2, Op2Class:$rs1, 999 VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>, 1000 RISCVVPseudo { 1001 let mayLoad = 0; 1002 let mayStore = 0; 1003 let hasSideEffects = 0; 1004 let usesCustomInserter = 1; 1005 let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; 1006 let Uses = [VL, VTYPE]; 1007 let HasVLOp = 1; 1008 let HasSEWOp = 1; 1009 let HasMergeOp = 1; 1010 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1011} 1012 1013class VPseudoBinaryCarryIn<VReg RetClass, 1014 VReg Op1Class, 1015 DAGOperand Op2Class, 1016 LMULInfo MInfo, 1017 bit CarryIn, 1018 string Constraint> : 1019 Pseudo<(outs RetClass:$rd), 1020 !if(CarryIn, 1021 (ins Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, AVL:$vl, 1022 ixlenimm:$sew), 1023 (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew)), []>, 1024 RISCVVPseudo { 1025 let mayLoad = 0; 1026 let mayStore = 0; 1027 let hasSideEffects = 0; 1028 let usesCustomInserter = 1; 1029 let Constraints = Constraint; 1030 let Uses = [VL, VTYPE]; 1031 let HasVLOp = 1; 1032 let HasSEWOp = 1; 1033 let HasMergeOp = 0; 1034 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1035 let VLMul = MInfo.value; 1036} 1037 1038class VPseudoTernaryNoMask<VReg RetClass, 1039 RegisterClass Op1Class, 1040 DAGOperand Op2Class, 1041 string Constraint> : 1042 Pseudo<(outs RetClass:$rd), 1043 (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, 1044 AVL:$vl, ixlenimm:$sew), 1045 []>, 1046 RISCVVPseudo { 1047 let mayLoad = 0; 1048 let mayStore = 0; 1049 let hasSideEffects = 0; 1050 let usesCustomInserter = 1; 1051 let Constraints = Join<[Constraint, "$rd = $rs3"], ",">.ret; 1052 let Uses = [VL, VTYPE]; 1053 let HasVLOp = 1; 1054 let HasSEWOp = 1; 1055 let HasMergeOp = 1; 1056 let HasDummyMask = 1; 1057 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1058} 1059 1060class VPseudoAMOWDNoMask<VReg RetClass, 1061 VReg Op1Class> : 1062 Pseudo<(outs GetVRegNoV0<RetClass>.R:$vd_wd), 1063 (ins GPR:$rs1, 1064 Op1Class:$vs2, 1065 GetVRegNoV0<RetClass>.R:$vd, 1066 AVL:$vl, ixlenimm:$sew), []>, 1067 RISCVVPseudo { 1068 let mayLoad = 1; 1069 let mayStore = 1; 1070 let hasSideEffects = 1; 1071 let usesCustomInserter = 1; 1072 let Constraints = "$vd_wd = $vd"; 1073 let Uses = [VL, VTYPE]; 1074 let HasVLOp = 1; 1075 let HasSEWOp = 1; 1076 let HasDummyMask = 1; 1077 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1078} 1079 1080class VPseudoAMOWDMask<VReg RetClass, 1081 VReg Op1Class> : 1082 Pseudo<(outs GetVRegNoV0<RetClass>.R:$vd_wd), 1083 (ins GPR:$rs1, 1084 Op1Class:$vs2, 1085 GetVRegNoV0<RetClass>.R:$vd, 1086 VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>, 1087 RISCVVPseudo { 1088 let mayLoad = 1; 1089 let mayStore = 1; 1090 let hasSideEffects = 1; 1091 let usesCustomInserter = 1; 1092 let Constraints = "$vd_wd = $vd"; 1093 let Uses = [VL, VTYPE]; 1094 let HasVLOp = 1; 1095 let HasSEWOp = 1; 1096 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1097} 1098 1099multiclass VPseudoAMOEI<int eew> { 1100 // Standard scalar AMO supports 32, 64, and 128 Mem data bits, 1101 // and in the base vector "V" extension, only SEW up to ELEN = max(XLEN, FLEN) 1102 // are required to be supported. 1103 // therefore only [32, 64] is allowed here. 1104 foreach sew = [32, 64] in { 1105 foreach lmul = MxSet<sew>.m in { 1106 defvar octuple_lmul = octuple_from_str<lmul.MX>.ret; 1107 // Calculate emul = eew * lmul / sew 1108 defvar octuple_emul = !srl(!mul(eew, octuple_lmul), shift_amount<sew>.val); 1109 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 1110 defvar emulMX = octuple_to_str<octuple_emul>.ret; 1111 defvar emul= !cast<LMULInfo>("V_" # emulMX); 1112 let VLMul = lmul.value in { 1113 def "_WD_" # lmul.MX # "_" # emulMX : VPseudoAMOWDNoMask<lmul.vrclass, emul.vrclass>; 1114 def "_WD_" # lmul.MX # "_" # emulMX # "_MASK" : VPseudoAMOWDMask<lmul.vrclass, emul.vrclass>; 1115 } 1116 } 1117 } 1118 } 1119} 1120 1121multiclass VPseudoAMO { 1122 foreach eew = EEWList in 1123 defm "EI" # eew : VPseudoAMOEI<eew>; 1124} 1125 1126class VPseudoUSSegLoadNoMask<VReg RetClass, bits<7> EEW, bits<4> NF, bit isFF>: 1127 Pseudo<(outs RetClass:$rd), 1128 (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, 1129 RISCVVPseudo, 1130 RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/isFF, EEW, VLMul> { 1131 let mayLoad = 1; 1132 let mayStore = 0; 1133 let hasSideEffects = 0; 1134 let usesCustomInserter = 1; 1135 let Uses = [VL, VTYPE]; 1136 let HasVLOp = 1; 1137 let HasSEWOp = 1; 1138 let HasDummyMask = 1; 1139 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1140} 1141 1142class VPseudoUSSegLoadMask<VReg RetClass, bits<7> EEW, bits<4> NF, bit isFF>: 1143 Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd), 1144 (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1, 1145 VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, 1146 RISCVVPseudo, 1147 RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/isFF, EEW, VLMul> { 1148 let mayLoad = 1; 1149 let mayStore = 0; 1150 let hasSideEffects = 0; 1151 let usesCustomInserter = 1; 1152 let Constraints = "$rd = $merge"; 1153 let Uses = [VL, VTYPE]; 1154 let HasVLOp = 1; 1155 let HasSEWOp = 1; 1156 let HasMergeOp = 1; 1157 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1158} 1159 1160class VPseudoSSegLoadNoMask<VReg RetClass, bits<7> EEW, bits<4> NF>: 1161 Pseudo<(outs RetClass:$rd), 1162 (ins GPR:$rs1, GPR:$offset, AVL:$vl, ixlenimm:$sew),[]>, 1163 RISCVVPseudo, 1164 RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, EEW, VLMul> { 1165 let mayLoad = 1; 1166 let mayLoad = 1; 1167 let mayStore = 0; 1168 let hasSideEffects = 0; 1169 let usesCustomInserter = 1; 1170 let Uses = [VL, VTYPE]; 1171 let HasVLOp = 1; 1172 let HasSEWOp = 1; 1173 let HasDummyMask = 1; 1174 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1175} 1176 1177class VPseudoSSegLoadMask<VReg RetClass, bits<7> EEW, bits<4> NF>: 1178 Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd), 1179 (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1, 1180 GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, 1181 RISCVVPseudo, 1182 RISCVVLSEG<NF, /*Masked*/1, /*Strided*/1, /*FF*/0, EEW, VLMul> { 1183 let mayLoad = 1; 1184 let mayStore = 0; 1185 let hasSideEffects = 0; 1186 let usesCustomInserter = 1; 1187 let Constraints = "$rd = $merge"; 1188 let Uses = [VL, VTYPE]; 1189 let HasVLOp = 1; 1190 let HasSEWOp = 1; 1191 let HasMergeOp = 1; 1192 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1193} 1194 1195class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL, 1196 bits<4> NF, bit Ordered>: 1197 Pseudo<(outs RetClass:$rd), 1198 (ins GPR:$rs1, IdxClass:$offset, AVL:$vl, ixlenimm:$sew),[]>, 1199 RISCVVPseudo, 1200 RISCVVLXSEG<NF, /*Masked*/0, Ordered, EEW, VLMul, LMUL> { 1201 let mayLoad = 1; 1202 let mayStore = 0; 1203 let hasSideEffects = 0; 1204 let usesCustomInserter = 1; 1205 // For vector indexed segment loads, the destination vector register groups 1206 // cannot overlap the source vector register group 1207 let Constraints = "@earlyclobber $rd"; 1208 let Uses = [VL, VTYPE]; 1209 let HasVLOp = 1; 1210 let HasSEWOp = 1; 1211 let HasDummyMask = 1; 1212 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1213} 1214 1215class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL, 1216 bits<4> NF, bit Ordered>: 1217 Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd), 1218 (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1, 1219 IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, 1220 RISCVVPseudo, 1221 RISCVVLXSEG<NF, /*Masked*/1, Ordered, EEW, VLMul, LMUL> { 1222 let mayLoad = 1; 1223 let mayStore = 0; 1224 let hasSideEffects = 0; 1225 let usesCustomInserter = 1; 1226 // For vector indexed segment loads, the destination vector register groups 1227 // cannot overlap the source vector register group 1228 let Constraints = "@earlyclobber $rd, $rd = $merge"; 1229 let Uses = [VL, VTYPE]; 1230 let HasVLOp = 1; 1231 let HasSEWOp = 1; 1232 let HasMergeOp = 1; 1233 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1234} 1235 1236class VPseudoUSSegStoreNoMask<VReg ValClass, bits<7> EEW, bits<4> NF>: 1237 Pseudo<(outs), 1238 (ins ValClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, 1239 RISCVVPseudo, 1240 RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, EEW, VLMul> { 1241 let mayLoad = 0; 1242 let mayStore = 1; 1243 let hasSideEffects = 0; 1244 let usesCustomInserter = 1; 1245 let Uses = [VL, VTYPE]; 1246 let HasVLOp = 1; 1247 let HasSEWOp = 1; 1248 let HasDummyMask = 1; 1249 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1250} 1251 1252class VPseudoUSSegStoreMask<VReg ValClass, bits<7> EEW, bits<4> NF>: 1253 Pseudo<(outs), 1254 (ins ValClass:$rd, GPR:$rs1, 1255 VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, 1256 RISCVVPseudo, 1257 RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, EEW, VLMul> { 1258 let mayLoad = 0; 1259 let mayStore = 1; 1260 let hasSideEffects = 0; 1261 let usesCustomInserter = 1; 1262 let Uses = [VL, VTYPE]; 1263 let HasVLOp = 1; 1264 let HasSEWOp = 1; 1265 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1266} 1267 1268class VPseudoSSegStoreNoMask<VReg ValClass, bits<7> EEW, bits<4> NF>: 1269 Pseudo<(outs), 1270 (ins ValClass:$rd, GPR:$rs1, GPR: $offset, AVL:$vl, ixlenimm:$sew),[]>, 1271 RISCVVPseudo, 1272 RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, EEW, VLMul> { 1273 let mayLoad = 0; 1274 let mayStore = 1; 1275 let hasSideEffects = 0; 1276 let usesCustomInserter = 1; 1277 let Uses = [VL, VTYPE]; 1278 let HasVLOp = 1; 1279 let HasSEWOp = 1; 1280 let HasDummyMask = 1; 1281 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1282} 1283 1284class VPseudoSSegStoreMask<VReg ValClass, bits<7> EEW, bits<4> NF>: 1285 Pseudo<(outs), 1286 (ins ValClass:$rd, GPR:$rs1, GPR: $offset, 1287 VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, 1288 RISCVVPseudo, 1289 RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, EEW, VLMul> { 1290 let mayLoad = 0; 1291 let mayStore = 1; 1292 let hasSideEffects = 0; 1293 let usesCustomInserter = 1; 1294 let Uses = [VL, VTYPE]; 1295 let HasVLOp = 1; 1296 let HasSEWOp = 1; 1297 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1298} 1299 1300class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL, 1301 bits<4> NF, bit Ordered>: 1302 Pseudo<(outs), 1303 (ins ValClass:$rd, GPR:$rs1, IdxClass: $index, 1304 AVL:$vl, ixlenimm:$sew),[]>, 1305 RISCVVPseudo, 1306 RISCVVSXSEG<NF, /*Masked*/0, Ordered, EEW, VLMul, LMUL> { 1307 let mayLoad = 0; 1308 let mayStore = 1; 1309 let hasSideEffects = 0; 1310 let usesCustomInserter = 1; 1311 let Uses = [VL, VTYPE]; 1312 let HasVLOp = 1; 1313 let HasSEWOp = 1; 1314 let HasDummyMask = 1; 1315 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1316} 1317 1318class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL, 1319 bits<4> NF, bit Ordered>: 1320 Pseudo<(outs), 1321 (ins ValClass:$rd, GPR:$rs1, IdxClass: $index, 1322 VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, 1323 RISCVVPseudo, 1324 RISCVVSXSEG<NF, /*Masked*/1, Ordered, EEW, VLMul, LMUL> { 1325 let mayLoad = 0; 1326 let mayStore = 1; 1327 let hasSideEffects = 0; 1328 let usesCustomInserter = 1; 1329 let Uses = [VL, VTYPE]; 1330 let HasVLOp = 1; 1331 let HasSEWOp = 1; 1332 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); 1333} 1334 1335multiclass VPseudoUSLoad<bit isFF> { 1336 foreach eew = EEWList in { 1337 foreach lmul = MxSet<eew>.m in { 1338 defvar LInfo = lmul.MX; 1339 defvar vreg = lmul.vrclass; 1340 defvar FFStr = !if(isFF, "FF", ""); 1341 let VLMul = lmul.value in { 1342 def "E" # eew # FFStr # "_V_" # LInfo : 1343 VPseudoUSLoadNoMask<vreg, eew, isFF>; 1344 def "E" # eew # FFStr # "_V_" # LInfo # "_MASK" : 1345 VPseudoUSLoadMask<vreg, eew, isFF>; 1346 } 1347 } 1348 } 1349} 1350 1351multiclass VPseudoLoadMask { 1352 foreach mti = AllMasks in { 1353 let VLMul = mti.LMul.value in { 1354 def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*isFF*/0>; 1355 } 1356 } 1357} 1358 1359multiclass VPseudoSLoad { 1360 foreach eew = EEWList in { 1361 foreach lmul = MxSet<eew>.m in { 1362 defvar LInfo = lmul.MX; 1363 defvar vreg = lmul.vrclass; 1364 let VLMul = lmul.value in { 1365 def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask<vreg, eew>; 1366 def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSLoadMask<vreg, eew>; 1367 } 1368 } 1369 } 1370} 1371 1372multiclass VPseudoILoad<bit Ordered> { 1373 foreach eew = EEWList in { 1374 foreach sew = EEWList in { 1375 foreach lmul = MxSet<sew>.m in { 1376 defvar octuple_lmul = octuple_from_str<lmul.MX>.ret; 1377 // Calculate emul = eew * lmul / sew 1378 defvar octuple_emul = !srl(!mul(eew, octuple_lmul), shift_amount<sew>.val); 1379 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 1380 defvar LInfo = lmul.MX; 1381 defvar IdxLInfo = octuple_to_str<octuple_emul>.ret; 1382 defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo); 1383 defvar Vreg = lmul.vrclass; 1384 defvar IdxVreg = idx_lmul.vrclass; 1385 defvar HasConstraint = !ne(sew, eew); 1386 let VLMul = lmul.value in { 1387 def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo : 1388 VPseudoILoadNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>; 1389 def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" : 1390 VPseudoILoadMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>; 1391 } 1392 } 1393 } 1394 } 1395 } 1396} 1397 1398multiclass VPseudoUSStore { 1399 foreach eew = EEWList in { 1400 foreach lmul = MxSet<eew>.m in { 1401 defvar LInfo = lmul.MX; 1402 defvar vreg = lmul.vrclass; 1403 let VLMul = lmul.value in { 1404 def "E" # eew # "_V_" # LInfo : VPseudoUSStoreNoMask<vreg, eew>; 1405 def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask<vreg, eew>; 1406 } 1407 } 1408 } 1409} 1410 1411multiclass VPseudoStoreMask { 1412 foreach mti = AllMasks in { 1413 let VLMul = mti.LMul.value in { 1414 def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, /*EEW*/1>; 1415 } 1416 } 1417} 1418 1419multiclass VPseudoSStore { 1420 foreach eew = EEWList in { 1421 foreach lmul = MxSet<eew>.m in { 1422 defvar LInfo = lmul.MX; 1423 defvar vreg = lmul.vrclass; 1424 let VLMul = lmul.value in { 1425 def "E" # eew # "_V_" # LInfo : VPseudoSStoreNoMask<vreg, eew>; 1426 def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSStoreMask<vreg, eew>; 1427 } 1428 } 1429 } 1430} 1431 1432multiclass VPseudoIStore<bit Ordered> { 1433 foreach eew = EEWList in { 1434 foreach sew = EEWList in { 1435 foreach lmul = MxSet<sew>.m in { 1436 defvar octuple_lmul = octuple_from_str<lmul.MX>.ret; 1437 // Calculate emul = eew * lmul / sew 1438 defvar octuple_emul = !srl(!mul(eew, octuple_lmul), shift_amount<sew>.val); 1439 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 1440 defvar LInfo = lmul.MX; 1441 defvar IdxLInfo = octuple_to_str<octuple_emul>.ret; 1442 defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo); 1443 defvar Vreg = lmul.vrclass; 1444 defvar IdxVreg = idx_lmul.vrclass; 1445 let VLMul = lmul.value in { 1446 def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo : 1447 VPseudoIStoreNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>; 1448 def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" : 1449 VPseudoIStoreMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>; 1450 } 1451 } 1452 } 1453 } 1454 } 1455} 1456 1457multiclass VPseudoUnaryS_M { 1458 foreach mti = AllMasks in 1459 { 1460 let VLMul = mti.LMul.value in { 1461 def "_M_" # mti.BX : VPseudoUnaryNoMask<GPR, VR>; 1462 def "_M_" # mti.BX # "_MASK" : VPseudoMaskUnarySOutMask; 1463 } 1464 } 1465} 1466 1467multiclass VPseudoUnaryM_M { 1468 defvar constraint = "@earlyclobber $rd"; 1469 foreach mti = AllMasks in 1470 { 1471 let VLMul = mti.LMul.value in { 1472 def "_M_" # mti.BX : VPseudoUnaryNoMask<VR, VR, constraint>; 1473 def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint>; 1474 } 1475 } 1476} 1477 1478multiclass VPseudoMaskNullaryV { 1479 foreach m = MxList.m in { 1480 let VLMul = m.value in { 1481 def "_V_" # m.MX : VPseudoNullaryNoMask<m.vrclass>; 1482 def "_V_" # m.MX # "_MASK" : VPseudoNullaryMask<m.vrclass>; 1483 } 1484 } 1485} 1486 1487multiclass VPseudoNullaryPseudoM <string BaseInst> { 1488 foreach mti = AllMasks in { 1489 let VLMul = mti.LMul.value in { 1490 def "_M_" # mti.BX : VPseudoNullaryPseudoM<BaseInst # "_MM">; 1491 } 1492 } 1493} 1494 1495multiclass VPseudoUnaryV_M { 1496 defvar constraint = "@earlyclobber $rd"; 1497 foreach m = MxList.m in { 1498 let VLMul = m.value in { 1499 def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, VR, constraint>; 1500 def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>; 1501 } 1502 } 1503} 1504 1505multiclass VPseudoUnaryV_V_AnyMask { 1506 foreach m = MxList.m in { 1507 let VLMul = m.value in 1508 def _VM # "_" # m.MX : VPseudoUnaryAnyMask<m.vrclass, m.vrclass>; 1509 } 1510} 1511 1512multiclass VPseudoBinary<VReg RetClass, 1513 VReg Op1Class, 1514 DAGOperand Op2Class, 1515 LMULInfo MInfo, 1516 string Constraint = ""> { 1517 let VLMul = MInfo.value in { 1518 def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class, 1519 Constraint>; 1520 def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class, 1521 Constraint>; 1522 } 1523} 1524 1525multiclass VPseudoBinaryM<VReg RetClass, 1526 VReg Op1Class, 1527 DAGOperand Op2Class, 1528 LMULInfo MInfo, 1529 string Constraint = ""> { 1530 let VLMul = MInfo.value in { 1531 def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class, 1532 Constraint>; 1533 def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask<RetClass, Op1Class, 1534 Op2Class, Constraint>; 1535 } 1536} 1537 1538multiclass VPseudoBinaryEmul<VReg RetClass, 1539 VReg Op1Class, 1540 DAGOperand Op2Class, 1541 LMULInfo lmul, 1542 LMULInfo emul, 1543 string Constraint = ""> { 1544 let VLMul = lmul.value in { 1545 def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class, 1546 Constraint>; 1547 def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class, 1548 Constraint>; 1549 } 1550} 1551 1552multiclass VPseudoBinaryV_VV<string Constraint = ""> { 1553 foreach m = MxList.m in 1554 defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>; 1555} 1556 1557multiclass VPseudoBinaryV_VV_EEW<int eew, string Constraint = ""> { 1558 foreach m = MxList.m in { 1559 foreach sew = EEWList in { 1560 defvar octuple_lmul = octuple_from_str<m.MX>.ret; 1561 // emul = lmul * eew / sew 1562 defvar octuple_emul = !srl(!mul(octuple_lmul, eew), shift_amount<sew>.val); 1563 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 1564 defvar emulMX = octuple_to_str<octuple_emul>.ret; 1565 defvar emul = !cast<LMULInfo>("V_" # emulMX); 1566 defm _VV : VPseudoBinaryEmul<m.vrclass, m.vrclass, emul.vrclass, m, emul, Constraint>; 1567 } 1568 } 1569 } 1570} 1571 1572multiclass VPseudoBinaryV_VX<string Constraint = ""> { 1573 foreach m = MxList.m in 1574 defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>; 1575} 1576 1577multiclass VPseudoBinaryV_VF<string Constraint = ""> { 1578 foreach m = MxList.m in 1579 foreach f = FPList.fpinfo in 1580 defm "_V" # f.FX : VPseudoBinary<m.vrclass, m.vrclass, 1581 f.fprclass, m, Constraint>; 1582} 1583 1584multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, string Constraint = ""> { 1585 foreach m = MxList.m in 1586 defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>; 1587} 1588 1589multiclass VPseudoBinaryM_MM { 1590 foreach m = MxList.m in 1591 let VLMul = m.value in { 1592 def "_MM_" # m.MX : VPseudoBinaryNoMask<VR, VR, VR, "">; 1593 } 1594} 1595 1596// We use earlyclobber here due to 1597// * The destination EEW is smaller than the source EEW and the overlap is 1598// in the lowest-numbered part of the source register group is legal. 1599// Otherwise, it is illegal. 1600// * The destination EEW is greater than the source EEW, the source EMUL is 1601// at least 1, and the overlap is in the highest-numbered part of the 1602// destination register group is legal. Otherwise, it is illegal. 1603multiclass VPseudoBinaryW_VV { 1604 foreach m = MxList.m[0-5] in 1605 defm _VV : VPseudoBinary<m.wvrclass, m.vrclass, m.vrclass, m, 1606 "@earlyclobber $rd">; 1607} 1608 1609multiclass VPseudoBinaryW_VX { 1610 foreach m = MxList.m[0-5] in 1611 defm "_VX" : VPseudoBinary<m.wvrclass, m.vrclass, GPR, m, 1612 "@earlyclobber $rd">; 1613} 1614 1615multiclass VPseudoBinaryW_VF { 1616 foreach m = MxList.m[0-5] in 1617 foreach f = FPList.fpinfo[0-1] in 1618 defm "_V" # f.FX : VPseudoBinary<m.wvrclass, m.vrclass, 1619 f.fprclass, m, 1620 "@earlyclobber $rd">; 1621} 1622 1623multiclass VPseudoBinaryW_WV { 1624 foreach m = MxList.m[0-5] in 1625 defm _WV : VPseudoBinary<m.wvrclass, m.wvrclass, m.vrclass, m, 1626 "@earlyclobber $rd">; 1627} 1628 1629multiclass VPseudoBinaryW_WX { 1630 foreach m = MxList.m[0-5] in 1631 defm "_WX" : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m>; 1632} 1633 1634multiclass VPseudoBinaryW_WF { 1635 foreach m = MxList.m[0-5] in 1636 foreach f = FPList.fpinfo[0-1] in 1637 defm "_W" # f.FX : VPseudoBinary<m.wvrclass, m.wvrclass, 1638 f.fprclass, m>; 1639} 1640 1641multiclass VPseudoBinaryV_WV { 1642 foreach m = MxList.m[0-5] in 1643 defm _WV : VPseudoBinary<m.vrclass, m.wvrclass, m.vrclass, m, 1644 "@earlyclobber $rd">; 1645} 1646 1647multiclass VPseudoBinaryV_WX { 1648 foreach m = MxList.m[0-5] in 1649 defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m, 1650 "@earlyclobber $rd">; 1651} 1652 1653multiclass VPseudoBinaryV_WI { 1654 foreach m = MxList.m[0-5] in 1655 defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m, 1656 "@earlyclobber $rd">; 1657} 1658 1659// For vadc and vsbc, the instruction encoding is reserved if the destination 1660// vector register is v0. 1661// For vadc and vsbc, CarryIn == 1 and CarryOut == 0 1662multiclass VPseudoBinaryV_VM<bit CarryOut = 0, bit CarryIn = 1, 1663 string Constraint = ""> { 1664 foreach m = MxList.m in 1665 def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX : 1666 VPseudoBinaryCarryIn<!if(CarryOut, VR, 1667 !if(!and(CarryIn, !not(CarryOut)), 1668 GetVRegNoV0<m.vrclass>.R, m.vrclass)), 1669 m.vrclass, m.vrclass, m, CarryIn, Constraint>; 1670} 1671 1672multiclass VPseudoBinaryV_XM<bit CarryOut = 0, bit CarryIn = 1, 1673 string Constraint = ""> { 1674 foreach m = MxList.m in 1675 def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX : 1676 VPseudoBinaryCarryIn<!if(CarryOut, VR, 1677 !if(!and(CarryIn, !not(CarryOut)), 1678 GetVRegNoV0<m.vrclass>.R, m.vrclass)), 1679 m.vrclass, GPR, m, CarryIn, Constraint>; 1680} 1681 1682multiclass VPseudoBinaryV_FM { 1683 foreach m = MxList.m in 1684 foreach f = FPList.fpinfo in 1685 def "_V" # f.FX # "M_" # m.MX : 1686 VPseudoBinaryCarryIn<GetVRegNoV0<m.vrclass>.R, 1687 m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">; 1688} 1689 1690multiclass VPseudoBinaryV_IM<bit CarryOut = 0, bit CarryIn = 1, 1691 string Constraint = ""> { 1692 foreach m = MxList.m in 1693 def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX : 1694 VPseudoBinaryCarryIn<!if(CarryOut, VR, 1695 !if(!and(CarryIn, !not(CarryOut)), 1696 GetVRegNoV0<m.vrclass>.R, m.vrclass)), 1697 m.vrclass, simm5, m, CarryIn, Constraint>; 1698} 1699 1700multiclass VPseudoUnaryV_V_X_I_NoDummyMask { 1701 foreach m = MxList.m in { 1702 let VLMul = m.value in { 1703 def "_V_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, m.vrclass>; 1704 def "_X_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, GPR>; 1705 def "_I_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, simm5>; 1706 } 1707 } 1708} 1709 1710multiclass VPseudoUnaryV_F_NoDummyMask { 1711 foreach m = MxList.m in { 1712 foreach f = FPList.fpinfo in { 1713 let VLMul = m.value in { 1714 def "_" # f.FX # "_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, f.fprclass>; 1715 } 1716 } 1717 } 1718} 1719 1720multiclass VPseudoUnaryV_V { 1721 foreach m = MxList.m in { 1722 let VLMul = m.value in { 1723 def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>; 1724 def "_V_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>; 1725 } 1726 } 1727} 1728 1729multiclass PseudoUnaryV_VF2 { 1730 defvar constraints = "@earlyclobber $rd"; 1731 foreach m = MxList.m[1-6] in 1732 { 1733 let VLMul = m.value in { 1734 def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints>; 1735 def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.f2vrclass, 1736 constraints>; 1737 } 1738 } 1739} 1740 1741multiclass PseudoUnaryV_VF4 { 1742 defvar constraints = "@earlyclobber $rd"; 1743 foreach m = MxList.m[2-6] in 1744 { 1745 let VLMul = m.value in { 1746 def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints>; 1747 def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.f4vrclass, 1748 constraints>; 1749 } 1750 } 1751} 1752 1753multiclass PseudoUnaryV_VF8 { 1754 defvar constraints = "@earlyclobber $rd"; 1755 foreach m = MxList.m[3-6] in 1756 { 1757 let VLMul = m.value in { 1758 def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints>; 1759 def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.f8vrclass, 1760 constraints>; 1761 } 1762 } 1763} 1764 1765// The destination EEW is 1. 1766// The source EEW is 8, 16, 32, or 64. 1767// When the destination EEW is different from source EEW, we need to use 1768// @earlyclobber to avoid the overlap between destination and source registers. 1769multiclass VPseudoBinaryM_VV { 1770 foreach m = MxList.m in 1771 defm _VV : VPseudoBinaryM<VR, m.vrclass, m.vrclass, m, "@earlyclobber $rd">; 1772} 1773 1774multiclass VPseudoBinaryM_VX { 1775 foreach m = MxList.m in 1776 defm "_VX" : 1777 VPseudoBinaryM<VR, m.vrclass, GPR, m, "@earlyclobber $rd">; 1778} 1779 1780multiclass VPseudoBinaryM_VF { 1781 foreach m = MxList.m in 1782 foreach f = FPList.fpinfo in 1783 defm "_V" # f.FX : 1784 VPseudoBinaryM<VR, m.vrclass, f.fprclass, m, "@earlyclobber $rd">; 1785} 1786 1787multiclass VPseudoBinaryM_VI { 1788 foreach m = MxList.m in 1789 defm _VI : VPseudoBinaryM<VR, m.vrclass, simm5, m, "@earlyclobber $rd">; 1790} 1791 1792multiclass VPseudoBinaryV_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> { 1793 defm "" : VPseudoBinaryV_VV<Constraint>; 1794 defm "" : VPseudoBinaryV_VX<Constraint>; 1795 defm "" : VPseudoBinaryV_VI<ImmType, Constraint>; 1796} 1797 1798multiclass VPseudoBinaryV_VV_VX { 1799 defm "" : VPseudoBinaryV_VV; 1800 defm "" : VPseudoBinaryV_VX; 1801} 1802 1803multiclass VPseudoBinaryV_VV_VF { 1804 defm "" : VPseudoBinaryV_VV; 1805 defm "" : VPseudoBinaryV_VF; 1806} 1807 1808multiclass VPseudoBinaryV_VX_VI<Operand ImmType = simm5> { 1809 defm "" : VPseudoBinaryV_VX; 1810 defm "" : VPseudoBinaryV_VI<ImmType>; 1811} 1812 1813multiclass VPseudoBinaryW_VV_VX { 1814 defm "" : VPseudoBinaryW_VV; 1815 defm "" : VPseudoBinaryW_VX; 1816} 1817 1818multiclass VPseudoBinaryW_VV_VF { 1819 defm "" : VPseudoBinaryW_VV; 1820 defm "" : VPseudoBinaryW_VF; 1821} 1822 1823multiclass VPseudoBinaryW_WV_WX { 1824 defm "" : VPseudoBinaryW_WV; 1825 defm "" : VPseudoBinaryW_WX; 1826} 1827 1828multiclass VPseudoBinaryW_WV_WF { 1829 defm "" : VPseudoBinaryW_WV; 1830 defm "" : VPseudoBinaryW_WF; 1831} 1832 1833multiclass VPseudoBinaryV_VM_XM_IM { 1834 defm "" : VPseudoBinaryV_VM; 1835 defm "" : VPseudoBinaryV_XM; 1836 defm "" : VPseudoBinaryV_IM; 1837} 1838 1839multiclass VPseudoBinaryV_VM_XM { 1840 defm "" : VPseudoBinaryV_VM; 1841 defm "" : VPseudoBinaryV_XM; 1842} 1843 1844multiclass VPseudoBinaryM_VM_XM_IM<string Constraint> { 1845 defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>; 1846 defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>; 1847 defm "" : VPseudoBinaryV_IM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>; 1848} 1849 1850multiclass VPseudoBinaryM_VM_XM<string Constraint> { 1851 defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>; 1852 defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>; 1853} 1854 1855multiclass VPseudoBinaryM_V_X_I<string Constraint> { 1856 defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>; 1857 defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>; 1858 defm "" : VPseudoBinaryV_IM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>; 1859} 1860 1861multiclass VPseudoBinaryM_V_X<string Constraint> { 1862 defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>; 1863 defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>; 1864} 1865 1866multiclass VPseudoBinaryV_WV_WX_WI { 1867 defm "" : VPseudoBinaryV_WV; 1868 defm "" : VPseudoBinaryV_WX; 1869 defm "" : VPseudoBinaryV_WI; 1870} 1871 1872multiclass VPseudoTernary<VReg RetClass, 1873 RegisterClass Op1Class, 1874 DAGOperand Op2Class, 1875 LMULInfo MInfo, 1876 string Constraint = ""> { 1877 let VLMul = MInfo.value in { 1878 def "_" # MInfo.MX : VPseudoTernaryNoMask<RetClass, Op1Class, Op2Class, Constraint>; 1879 def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class, Constraint>; 1880 } 1881} 1882 1883multiclass VPseudoTernaryV_VV<string Constraint = ""> { 1884 foreach m = MxList.m in 1885 defm _VV : VPseudoTernary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>; 1886} 1887 1888multiclass VPseudoTernaryV_VX<string Constraint = ""> { 1889 foreach m = MxList.m in 1890 defm _VX : VPseudoTernary<m.vrclass, m.vrclass, GPR, m, Constraint>; 1891} 1892 1893multiclass VPseudoTernaryV_VX_AAXA<string Constraint = ""> { 1894 foreach m = MxList.m in 1895 defm "_VX" : VPseudoTernary<m.vrclass, GPR, m.vrclass, m, Constraint>; 1896} 1897 1898multiclass VPseudoTernaryV_VF_AAXA<string Constraint = ""> { 1899 foreach m = MxList.m in 1900 foreach f = FPList.fpinfo in 1901 defm "_V" # f.FX : VPseudoTernary<m.vrclass, f.fprclass, m.vrclass, 1902 m, Constraint>; 1903} 1904 1905multiclass VPseudoTernaryW_VV { 1906 defvar constraint = "@earlyclobber $rd"; 1907 foreach m = MxList.m[0-5] in 1908 defm _VV : VPseudoTernary<m.wvrclass, m.vrclass, m.vrclass, m, constraint>; 1909} 1910 1911multiclass VPseudoTernaryW_VX { 1912 defvar constraint = "@earlyclobber $rd"; 1913 foreach m = MxList.m[0-5] in 1914 defm "_VX" : VPseudoTernary<m.wvrclass, GPR, m.vrclass, m, constraint>; 1915} 1916 1917multiclass VPseudoTernaryW_VF { 1918 defvar constraint = "@earlyclobber $rd"; 1919 foreach m = MxList.m[0-5] in 1920 foreach f = FPList.fpinfo[0-1] in 1921 defm "_V" # f.FX : VPseudoTernary<m.wvrclass, f.fprclass, m.vrclass, m, 1922 constraint>; 1923} 1924 1925multiclass VPseudoTernaryV_VI<Operand ImmType = simm5, string Constraint = ""> { 1926 foreach m = MxList.m in 1927 defm _VI : VPseudoTernary<m.vrclass, m.vrclass, ImmType, m, Constraint>; 1928} 1929 1930multiclass VPseudoTernaryV_VV_VX_AAXA<string Constraint = ""> { 1931 defm "" : VPseudoTernaryV_VV<Constraint>; 1932 defm "" : VPseudoTernaryV_VX_AAXA<Constraint>; 1933} 1934 1935multiclass VPseudoTernaryV_VV_VF_AAXA<string Constraint = ""> { 1936 defm "" : VPseudoTernaryV_VV<Constraint>; 1937 defm "" : VPseudoTernaryV_VF_AAXA<Constraint>; 1938 1939 foreach m = MxList.m in { 1940 // Add a commutable version for use by IR fma. 1941 // NOTE: We need this because we use a tail undisturbed policy on the 1942 // intrinsic version so we can't commute those instructions since it would 1943 // change which input operand is tied to the destination. That would 1944 // remove user control of the tail elements. 1945 let isCommutable = 1, ForceTailAgnostic = true, VLMul = m.value in { 1946 def "_VV_" # m.MX # "_COMMUTABLE" : VPseudoTernaryNoMask<m.vrclass, 1947 m.vrclass, 1948 m.vrclass, 1949 Constraint>; 1950 foreach f = FPList.fpinfo in 1951 def "_V" # f.FX # "_" # m.MX # "_COMMUTABLE" : 1952 VPseudoTernaryNoMask<m.vrclass, f.fprclass, m.vrclass, Constraint>; 1953 } 1954 } 1955} 1956 1957multiclass VPseudoTernaryV_VX_VI<Operand ImmType = simm5, string Constraint = ""> { 1958 defm "" : VPseudoTernaryV_VX<Constraint>; 1959 defm "" : VPseudoTernaryV_VI<ImmType, Constraint>; 1960} 1961 1962multiclass VPseudoTernaryW_VV_VX { 1963 defm "" : VPseudoTernaryW_VV; 1964 defm "" : VPseudoTernaryW_VX; 1965} 1966 1967multiclass VPseudoTernaryW_VV_VF { 1968 defm "" : VPseudoTernaryW_VV; 1969 defm "" : VPseudoTernaryW_VF; 1970} 1971 1972multiclass VPseudoBinaryM_VV_VX_VI { 1973 defm "" : VPseudoBinaryM_VV; 1974 defm "" : VPseudoBinaryM_VX; 1975 defm "" : VPseudoBinaryM_VI; 1976} 1977 1978multiclass VPseudoBinaryM_VV_VX { 1979 defm "" : VPseudoBinaryM_VV; 1980 defm "" : VPseudoBinaryM_VX; 1981} 1982 1983multiclass VPseudoBinaryM_VV_VF { 1984 defm "" : VPseudoBinaryM_VV; 1985 defm "" : VPseudoBinaryM_VF; 1986} 1987 1988multiclass VPseudoBinaryM_VX_VI { 1989 defm "" : VPseudoBinaryM_VX; 1990 defm "" : VPseudoBinaryM_VI; 1991} 1992 1993multiclass VPseudoReductionV_VS { 1994 foreach m = MxList.m in { 1995 let ForceTailAgnostic = true in 1996 defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>; 1997 } 1998} 1999 2000multiclass VPseudoConversion<VReg RetClass, 2001 VReg Op1Class, 2002 LMULInfo MInfo, 2003 string Constraint = ""> { 2004 let VLMul = MInfo.value in { 2005 def "_" # MInfo.MX : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint>; 2006 def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask<RetClass, Op1Class, 2007 Constraint>; 2008 } 2009} 2010 2011multiclass VPseudoConversionV_V { 2012 foreach m = MxList.m in 2013 defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>; 2014} 2015 2016multiclass VPseudoConversionW_V { 2017 defvar constraint = "@earlyclobber $rd"; 2018 foreach m = MxList.m[0-5] in 2019 defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>; 2020} 2021 2022multiclass VPseudoConversionV_W { 2023 defvar constraint = "@earlyclobber $rd"; 2024 foreach m = MxList.m[0-5] in 2025 defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>; 2026} 2027 2028multiclass VPseudoUSSegLoad<bit isFF> { 2029 foreach eew = EEWList in { 2030 foreach lmul = MxSet<eew>.m in { 2031 defvar LInfo = lmul.MX; 2032 let VLMul = lmul.value in { 2033 foreach nf = NFSet<lmul>.L in { 2034 defvar vreg = SegRegClass<lmul, nf>.RC; 2035 defvar FFStr = !if(isFF, "FF", ""); 2036 def nf # "E" # eew # FFStr # "_V_" # LInfo : 2037 VPseudoUSSegLoadNoMask<vreg, eew, nf, isFF>; 2038 def nf # "E" # eew # FFStr # "_V_" # LInfo # "_MASK" : 2039 VPseudoUSSegLoadMask<vreg, eew, nf, isFF>; 2040 } 2041 } 2042 } 2043 } 2044} 2045 2046multiclass VPseudoSSegLoad { 2047 foreach eew = EEWList in { 2048 foreach lmul = MxSet<eew>.m in { 2049 defvar LInfo = lmul.MX; 2050 let VLMul = lmul.value in { 2051 foreach nf = NFSet<lmul>.L in { 2052 defvar vreg = SegRegClass<lmul, nf>.RC; 2053 def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>; 2054 def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>; 2055 } 2056 } 2057 } 2058 } 2059} 2060 2061multiclass VPseudoISegLoad<bit Ordered> { 2062 foreach idx_eew = EEWList in { 2063 foreach sew = EEWList in { 2064 foreach val_lmul = MxSet<sew>.m in { 2065 defvar octuple_lmul = octuple_from_str<val_lmul.MX>.ret; 2066 // Calculate emul = eew * lmul / sew 2067 defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), shift_amount<sew>.val); 2068 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2069 defvar ValLInfo = val_lmul.MX; 2070 defvar IdxLInfo = octuple_to_str<octuple_emul>.ret; 2071 defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo); 2072 defvar Vreg = val_lmul.vrclass; 2073 defvar IdxVreg = idx_lmul.vrclass; 2074 let VLMul = val_lmul.value in { 2075 foreach nf = NFSet<val_lmul>.L in { 2076 defvar ValVreg = SegRegClass<val_lmul, nf>.RC; 2077 def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo : 2078 VPseudoISegLoadNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value, 2079 nf, Ordered>; 2080 def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" : 2081 VPseudoISegLoadMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value, 2082 nf, Ordered>; 2083 } 2084 } 2085 } 2086 } 2087 } 2088 } 2089} 2090 2091multiclass VPseudoUSSegStore { 2092 foreach eew = EEWList in { 2093 foreach lmul = MxSet<eew>.m in { 2094 defvar LInfo = lmul.MX; 2095 let VLMul = lmul.value in { 2096 foreach nf = NFSet<lmul>.L in { 2097 defvar vreg = SegRegClass<lmul, nf>.RC; 2098 def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>; 2099 def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>; 2100 } 2101 } 2102 } 2103 } 2104} 2105 2106multiclass VPseudoSSegStore { 2107 foreach eew = EEWList in { 2108 foreach lmul = MxSet<eew>.m in { 2109 defvar LInfo = lmul.MX; 2110 let VLMul = lmul.value in { 2111 foreach nf = NFSet<lmul>.L in { 2112 defvar vreg = SegRegClass<lmul, nf>.RC; 2113 def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>; 2114 def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>; 2115 } 2116 } 2117 } 2118 } 2119} 2120 2121multiclass VPseudoISegStore<bit Ordered> { 2122 foreach idx_eew = EEWList in { 2123 foreach sew = EEWList in { 2124 foreach val_lmul = MxSet<sew>.m in { 2125 defvar octuple_lmul = octuple_from_str<val_lmul.MX>.ret; 2126 // Calculate emul = eew * lmul / sew 2127 defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), shift_amount<sew>.val); 2128 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2129 defvar ValLInfo = val_lmul.MX; 2130 defvar IdxLInfo = octuple_to_str<octuple_emul>.ret; 2131 defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo); 2132 defvar Vreg = val_lmul.vrclass; 2133 defvar IdxVreg = idx_lmul.vrclass; 2134 let VLMul = val_lmul.value in { 2135 foreach nf = NFSet<val_lmul>.L in { 2136 defvar ValVreg = SegRegClass<val_lmul, nf>.RC; 2137 def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo : 2138 VPseudoISegStoreNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value, 2139 nf, Ordered>; 2140 def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" : 2141 VPseudoISegStoreMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value, 2142 nf, Ordered>; 2143 } 2144 } 2145 } 2146 } 2147 } 2148 } 2149} 2150 2151//===----------------------------------------------------------------------===// 2152// Helpers to define the intrinsic patterns. 2153//===----------------------------------------------------------------------===// 2154 2155class VPatUnaryNoMask<string intrinsic_name, 2156 string inst, 2157 string kind, 2158 ValueType result_type, 2159 ValueType op2_type, 2160 int sew, 2161 LMULInfo vlmul, 2162 VReg op2_reg_class> : 2163 Pat<(result_type (!cast<Intrinsic>(intrinsic_name) 2164 (op2_type op2_reg_class:$rs2), 2165 VLOpFrag)), 2166 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX) 2167 (op2_type op2_reg_class:$rs2), 2168 GPR:$vl, sew)>; 2169 2170class VPatUnaryMask<string intrinsic_name, 2171 string inst, 2172 string kind, 2173 ValueType result_type, 2174 ValueType op2_type, 2175 ValueType mask_type, 2176 int sew, 2177 LMULInfo vlmul, 2178 VReg result_reg_class, 2179 VReg op2_reg_class> : 2180 Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask") 2181 (result_type result_reg_class:$merge), 2182 (op2_type op2_reg_class:$rs2), 2183 (mask_type V0), 2184 VLOpFrag)), 2185 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK") 2186 (result_type result_reg_class:$merge), 2187 (op2_type op2_reg_class:$rs2), 2188 (mask_type V0), GPR:$vl, sew)>; 2189 2190class VPatMaskUnaryNoMask<string intrinsic_name, 2191 string inst, 2192 MTypeInfo mti> : 2193 Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name) 2194 (mti.Mask VR:$rs2), 2195 VLOpFrag)), 2196 (!cast<Instruction>(inst#"_M_"#mti.BX) 2197 (mti.Mask VR:$rs2), 2198 GPR:$vl, mti.Log2SEW)>; 2199 2200class VPatMaskUnaryMask<string intrinsic_name, 2201 string inst, 2202 MTypeInfo mti> : 2203 Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask") 2204 (mti.Mask VR:$merge), 2205 (mti.Mask VR:$rs2), 2206 (mti.Mask V0), 2207 VLOpFrag)), 2208 (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") 2209 (mti.Mask VR:$merge), 2210 (mti.Mask VR:$rs2), 2211 (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 2212 2213class VPatUnaryAnyMask<string intrinsic, 2214 string inst, 2215 string kind, 2216 ValueType result_type, 2217 ValueType op1_type, 2218 ValueType mask_type, 2219 int sew, 2220 LMULInfo vlmul, 2221 VReg result_reg_class, 2222 VReg op1_reg_class> : 2223 Pat<(result_type (!cast<Intrinsic>(intrinsic) 2224 (result_type result_reg_class:$merge), 2225 (op1_type op1_reg_class:$rs1), 2226 (mask_type VR:$rs2), 2227 VLOpFrag)), 2228 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX) 2229 (result_type result_reg_class:$merge), 2230 (op1_type op1_reg_class:$rs1), 2231 (mask_type VR:$rs2), 2232 GPR:$vl, sew)>; 2233 2234class VPatBinaryNoMask<string intrinsic_name, 2235 string inst, 2236 ValueType result_type, 2237 ValueType op1_type, 2238 ValueType op2_type, 2239 int sew, 2240 VReg op1_reg_class, 2241 DAGOperand op2_kind> : 2242 Pat<(result_type (!cast<Intrinsic>(intrinsic_name) 2243 (op1_type op1_reg_class:$rs1), 2244 (op2_type op2_kind:$rs2), 2245 VLOpFrag)), 2246 (!cast<Instruction>(inst) 2247 (op1_type op1_reg_class:$rs1), 2248 (op2_type op2_kind:$rs2), 2249 GPR:$vl, sew)>; 2250 2251// Same as above but source operands are swapped. 2252class VPatBinaryNoMaskSwapped<string intrinsic_name, 2253 string inst, 2254 ValueType result_type, 2255 ValueType op1_type, 2256 ValueType op2_type, 2257 int sew, 2258 VReg op1_reg_class, 2259 DAGOperand op2_kind> : 2260 Pat<(result_type (!cast<Intrinsic>(intrinsic_name) 2261 (op2_type op2_kind:$rs2), 2262 (op1_type op1_reg_class:$rs1), 2263 VLOpFrag)), 2264 (!cast<Instruction>(inst) 2265 (op1_type op1_reg_class:$rs1), 2266 (op2_type op2_kind:$rs2), 2267 GPR:$vl, sew)>; 2268 2269class VPatBinaryMask<string intrinsic_name, 2270 string inst, 2271 ValueType result_type, 2272 ValueType op1_type, 2273 ValueType op2_type, 2274 ValueType mask_type, 2275 int sew, 2276 VReg result_reg_class, 2277 VReg op1_reg_class, 2278 DAGOperand op2_kind> : 2279 Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask") 2280 (result_type result_reg_class:$merge), 2281 (op1_type op1_reg_class:$rs1), 2282 (op2_type op2_kind:$rs2), 2283 (mask_type V0), 2284 VLOpFrag)), 2285 (!cast<Instruction>(inst#"_MASK") 2286 (result_type result_reg_class:$merge), 2287 (op1_type op1_reg_class:$rs1), 2288 (op2_type op2_kind:$rs2), 2289 (mask_type V0), GPR:$vl, sew)>; 2290 2291// Same as above but source operands are swapped. 2292class VPatBinaryMaskSwapped<string intrinsic_name, 2293 string inst, 2294 ValueType result_type, 2295 ValueType op1_type, 2296 ValueType op2_type, 2297 ValueType mask_type, 2298 int sew, 2299 VReg result_reg_class, 2300 VReg op1_reg_class, 2301 DAGOperand op2_kind> : 2302 Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask") 2303 (result_type result_reg_class:$merge), 2304 (op2_type op2_kind:$rs2), 2305 (op1_type op1_reg_class:$rs1), 2306 (mask_type V0), 2307 VLOpFrag)), 2308 (!cast<Instruction>(inst#"_MASK") 2309 (result_type result_reg_class:$merge), 2310 (op1_type op1_reg_class:$rs1), 2311 (op2_type op2_kind:$rs2), 2312 (mask_type V0), GPR:$vl, sew)>; 2313 2314class VPatTernaryNoMask<string intrinsic, 2315 string inst, 2316 string kind, 2317 ValueType result_type, 2318 ValueType op1_type, 2319 ValueType op2_type, 2320 ValueType mask_type, 2321 int sew, 2322 LMULInfo vlmul, 2323 VReg result_reg_class, 2324 RegisterClass op1_reg_class, 2325 DAGOperand op2_kind> : 2326 Pat<(result_type (!cast<Intrinsic>(intrinsic) 2327 (result_type result_reg_class:$rs3), 2328 (op1_type op1_reg_class:$rs1), 2329 (op2_type op2_kind:$rs2), 2330 VLOpFrag)), 2331 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX) 2332 result_reg_class:$rs3, 2333 (op1_type op1_reg_class:$rs1), 2334 op2_kind:$rs2, 2335 GPR:$vl, sew)>; 2336 2337class VPatTernaryMask<string intrinsic, 2338 string inst, 2339 string kind, 2340 ValueType result_type, 2341 ValueType op1_type, 2342 ValueType op2_type, 2343 ValueType mask_type, 2344 int sew, 2345 LMULInfo vlmul, 2346 VReg result_reg_class, 2347 RegisterClass op1_reg_class, 2348 DAGOperand op2_kind> : 2349 Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask") 2350 (result_type result_reg_class:$rs3), 2351 (op1_type op1_reg_class:$rs1), 2352 (op2_type op2_kind:$rs2), 2353 (mask_type V0), 2354 VLOpFrag)), 2355 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK") 2356 result_reg_class:$rs3, 2357 (op1_type op1_reg_class:$rs1), 2358 op2_kind:$rs2, 2359 (mask_type V0), 2360 GPR:$vl, sew)>; 2361 2362class VPatAMOWDNoMask<string intrinsic_name, 2363 string inst, 2364 ValueType result_type, 2365 ValueType op1_type, 2366 int sew, 2367 LMULInfo vlmul, 2368 LMULInfo emul, 2369 VReg op1_reg_class> : 2370 Pat<(result_type (!cast<Intrinsic>(intrinsic_name) 2371 GPR:$rs1, 2372 (op1_type op1_reg_class:$vs2), 2373 (result_type vlmul.vrclass:$vd), 2374 VLOpFrag)), 2375 (!cast<Instruction>(inst # "_WD_" # vlmul.MX # "_" # emul.MX) 2376 $rs1, $vs2, $vd, 2377 GPR:$vl, sew)>; 2378 2379class VPatAMOWDMask<string intrinsic_name, 2380 string inst, 2381 ValueType result_type, 2382 ValueType op1_type, 2383 ValueType mask_type, 2384 int sew, 2385 LMULInfo vlmul, 2386 LMULInfo emul, 2387 VReg op1_reg_class> : 2388 Pat<(result_type (!cast<Intrinsic>(intrinsic_name # "_mask") 2389 GPR:$rs1, 2390 (op1_type op1_reg_class:$vs2), 2391 (result_type vlmul.vrclass:$vd), 2392 (mask_type V0), 2393 VLOpFrag)), 2394 (!cast<Instruction>(inst # "_WD_" # vlmul.MX # "_" # emul.MX # "_MASK") 2395 $rs1, $vs2, $vd, 2396 (mask_type V0), GPR:$vl, sew)>; 2397 2398multiclass VPatUnaryS_M<string intrinsic_name, 2399 string inst> 2400{ 2401 foreach mti = AllMasks in { 2402 def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name) 2403 (mti.Mask VR:$rs1), VLOpFrag)), 2404 (!cast<Instruction>(inst#"_M_"#mti.BX) $rs1, 2405 GPR:$vl, mti.Log2SEW)>; 2406 def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask") 2407 (mti.Mask VR:$rs1), (mti.Mask V0), VLOpFrag)), 2408 (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1, 2409 (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 2410 } 2411} 2412 2413multiclass VPatUnaryV_V_AnyMask<string intrinsic, string instruction, 2414 list<VTypeInfo> vtilist> { 2415 foreach vti = vtilist in { 2416 def : VPatUnaryAnyMask<intrinsic, instruction, "VM", 2417 vti.Vector, vti.Vector, vti.Mask, 2418 vti.Log2SEW, vti.LMul, vti.RegClass, 2419 vti.RegClass>; 2420 } 2421} 2422 2423multiclass VPatUnaryM_M<string intrinsic, 2424 string inst> 2425{ 2426 foreach mti = AllMasks in { 2427 def : VPatMaskUnaryNoMask<intrinsic, inst, mti>; 2428 def : VPatMaskUnaryMask<intrinsic, inst, mti>; 2429 } 2430} 2431 2432multiclass VPatUnaryV_M<string intrinsic, string instruction> 2433{ 2434 foreach vti = AllIntegerVectors in { 2435 def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask, 2436 vti.Log2SEW, vti.LMul, VR>; 2437 def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask, 2438 vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>; 2439 } 2440} 2441 2442multiclass VPatUnaryV_VF<string intrinsic, string instruction, string suffix, 2443 list<VTypeInfoToFraction> fractionList> 2444{ 2445 foreach vtiTofti = fractionList in 2446 { 2447 defvar vti = vtiTofti.Vti; 2448 defvar fti = vtiTofti.Fti; 2449 def : VPatUnaryNoMask<intrinsic, instruction, suffix, 2450 vti.Vector, fti.Vector, 2451 vti.Log2SEW, vti.LMul, fti.RegClass>; 2452 def : VPatUnaryMask<intrinsic, instruction, suffix, 2453 vti.Vector, fti.Vector, vti.Mask, 2454 vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>; 2455 } 2456} 2457 2458multiclass VPatUnaryV_V<string intrinsic, string instruction, 2459 list<VTypeInfo> vtilist> { 2460 foreach vti = vtilist in { 2461 def : VPatUnaryNoMask<intrinsic, instruction, "V", 2462 vti.Vector, vti.Vector, 2463 vti.Log2SEW, vti.LMul, vti.RegClass>; 2464 def : VPatUnaryMask<intrinsic, instruction, "V", 2465 vti.Vector, vti.Vector, vti.Mask, 2466 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>; 2467 } 2468} 2469 2470multiclass VPatNullaryV<string intrinsic, string instruction> 2471{ 2472 foreach vti = AllIntegerVectors in { 2473 def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic) 2474 VLOpFrag)), 2475 (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX) 2476 GPR:$vl, vti.Log2SEW)>; 2477 def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask") 2478 (vti.Vector vti.RegClass:$merge), 2479 (vti.Mask V0), VLOpFrag)), 2480 (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK") 2481 vti.RegClass:$merge, (vti.Mask V0), 2482 GPR:$vl, vti.Log2SEW)>; 2483 } 2484} 2485 2486multiclass VPatNullaryM<string intrinsic, string inst> { 2487 foreach mti = AllMasks in 2488 def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic) 2489 (XLenVT (VLOp (XLenVT (XLenVT GPR:$vl)))))), 2490 (!cast<Instruction>(inst#"_M_"#mti.BX) 2491 GPR:$vl, mti.Log2SEW)>; 2492} 2493 2494multiclass VPatBinary<string intrinsic, 2495 string inst, 2496 ValueType result_type, 2497 ValueType op1_type, 2498 ValueType op2_type, 2499 ValueType mask_type, 2500 int sew, 2501 VReg result_reg_class, 2502 VReg op1_reg_class, 2503 DAGOperand op2_kind> 2504{ 2505 def : VPatBinaryNoMask<intrinsic, inst, result_type, op1_type, op2_type, 2506 sew, op1_reg_class, op2_kind>; 2507 def : VPatBinaryMask<intrinsic, inst, result_type, op1_type, op2_type, 2508 mask_type, sew, result_reg_class, op1_reg_class, 2509 op2_kind>; 2510} 2511 2512multiclass VPatBinarySwapped<string intrinsic, 2513 string inst, 2514 ValueType result_type, 2515 ValueType op1_type, 2516 ValueType op2_type, 2517 ValueType mask_type, 2518 int sew, 2519 VReg result_reg_class, 2520 VReg op1_reg_class, 2521 DAGOperand op2_kind> 2522{ 2523 def : VPatBinaryNoMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type, 2524 sew, op1_reg_class, op2_kind>; 2525 def : VPatBinaryMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type, 2526 mask_type, sew, result_reg_class, op1_reg_class, 2527 op2_kind>; 2528} 2529 2530multiclass VPatBinaryCarryIn<string intrinsic, 2531 string inst, 2532 string kind, 2533 ValueType result_type, 2534 ValueType op1_type, 2535 ValueType op2_type, 2536 ValueType mask_type, 2537 int sew, 2538 LMULInfo vlmul, 2539 VReg op1_reg_class, 2540 DAGOperand op2_kind> 2541{ 2542 def : Pat<(result_type (!cast<Intrinsic>(intrinsic) 2543 (op1_type op1_reg_class:$rs1), 2544 (op2_type op2_kind:$rs2), 2545 (mask_type V0), 2546 VLOpFrag)), 2547 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX) 2548 (op1_type op1_reg_class:$rs1), 2549 (op2_type op2_kind:$rs2), 2550 (mask_type V0), GPR:$vl, sew)>; 2551} 2552 2553multiclass VPatBinaryMaskOut<string intrinsic, 2554 string inst, 2555 string kind, 2556 ValueType result_type, 2557 ValueType op1_type, 2558 ValueType op2_type, 2559 int sew, 2560 LMULInfo vlmul, 2561 VReg op1_reg_class, 2562 DAGOperand op2_kind> 2563{ 2564 def : Pat<(result_type (!cast<Intrinsic>(intrinsic) 2565 (op1_type op1_reg_class:$rs1), 2566 (op2_type op2_kind:$rs2), 2567 VLOpFrag)), 2568 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX) 2569 (op1_type op1_reg_class:$rs1), 2570 (op2_type op2_kind:$rs2), 2571 GPR:$vl, sew)>; 2572} 2573 2574multiclass VPatConversion<string intrinsic, 2575 string inst, 2576 string kind, 2577 ValueType result_type, 2578 ValueType op1_type, 2579 ValueType mask_type, 2580 int sew, 2581 LMULInfo vlmul, 2582 VReg result_reg_class, 2583 VReg op1_reg_class> 2584{ 2585 def : VPatUnaryNoMask<intrinsic, inst, kind, result_type, op1_type, 2586 sew, vlmul, op1_reg_class>; 2587 def : VPatUnaryMask<intrinsic, inst, kind, result_type, op1_type, 2588 mask_type, sew, vlmul, result_reg_class, op1_reg_class>; 2589} 2590 2591multiclass VPatBinaryV_VV<string intrinsic, string instruction, 2592 list<VTypeInfo> vtilist> { 2593 foreach vti = vtilist in 2594 defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX, 2595 vti.Vector, vti.Vector, vti.Vector,vti.Mask, 2596 vti.Log2SEW, vti.RegClass, 2597 vti.RegClass, vti.RegClass>; 2598} 2599 2600multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction, 2601 list<VTypeInfo> vtilist> { 2602 foreach vti = vtilist in { 2603 defvar ivti = GetIntVTypeInfo<vti>.Vti; 2604 defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX, 2605 vti.Vector, vti.Vector, ivti.Vector, vti.Mask, 2606 vti.Log2SEW, vti.RegClass, 2607 vti.RegClass, vti.RegClass>; 2608 } 2609} 2610 2611multiclass VPatBinaryV_VV_INT_EEW<string intrinsic, string instruction, 2612 int eew, list<VTypeInfo> vtilist> { 2613 foreach vti = vtilist in { 2614 // emul = lmul * eew / sew 2615 defvar vlmul = vti.LMul; 2616 defvar octuple_lmul = octuple_from_str<vlmul.MX>.ret; 2617 defvar octuple_emul = !srl(!mul(octuple_lmul, eew), vti.Log2SEW); 2618 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2619 defvar emul_str = octuple_to_str<octuple_emul>.ret; 2620 defvar ivti = !cast<VTypeInfo>("VI" # eew # emul_str); 2621 defvar inst = instruction # "_VV_" # vti.LMul.MX # "_" # emul_str; 2622 defm : VPatBinary<intrinsic, inst, 2623 vti.Vector, vti.Vector, ivti.Vector, vti.Mask, 2624 vti.Log2SEW, vti.RegClass, 2625 vti.RegClass, ivti.RegClass>; 2626 } 2627 } 2628} 2629 2630multiclass VPatBinaryV_VX<string intrinsic, string instruction, 2631 list<VTypeInfo> vtilist> { 2632 foreach vti = vtilist in { 2633 defvar kind = "V"#vti.ScalarSuffix; 2634 defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX, 2635 vti.Vector, vti.Vector, vti.Scalar, vti.Mask, 2636 vti.Log2SEW, vti.RegClass, 2637 vti.RegClass, vti.ScalarRegClass>; 2638 } 2639} 2640 2641multiclass VPatBinaryV_VX_INT<string intrinsic, string instruction, 2642 list<VTypeInfo> vtilist> { 2643 foreach vti = vtilist in 2644 defm : VPatBinary<intrinsic, instruction # "_VX_" # vti.LMul.MX, 2645 vti.Vector, vti.Vector, XLenVT, vti.Mask, 2646 vti.Log2SEW, vti.RegClass, 2647 vti.RegClass, GPR>; 2648} 2649 2650multiclass VPatBinaryV_VI<string intrinsic, string instruction, 2651 list<VTypeInfo> vtilist, Operand imm_type> { 2652 foreach vti = vtilist in 2653 defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX, 2654 vti.Vector, vti.Vector, XLenVT, vti.Mask, 2655 vti.Log2SEW, vti.RegClass, 2656 vti.RegClass, imm_type>; 2657} 2658 2659multiclass VPatBinaryM_MM<string intrinsic, string instruction> { 2660 foreach mti = AllMasks in 2661 def : VPatBinaryNoMask<intrinsic, instruction # "_MM_" # mti.LMul.MX, 2662 mti.Mask, mti.Mask, mti.Mask, 2663 mti.Log2SEW, VR, VR>; 2664} 2665 2666multiclass VPatBinaryW_VV<string intrinsic, string instruction, 2667 list<VTypeInfoToWide> vtilist> { 2668 foreach VtiToWti = vtilist in { 2669 defvar Vti = VtiToWti.Vti; 2670 defvar Wti = VtiToWti.Wti; 2671 defm : VPatBinary<intrinsic, instruction # "_VV_" # Vti.LMul.MX, 2672 Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask, 2673 Vti.Log2SEW, Wti.RegClass, 2674 Vti.RegClass, Vti.RegClass>; 2675 } 2676} 2677 2678multiclass VPatBinaryW_VX<string intrinsic, string instruction, 2679 list<VTypeInfoToWide> vtilist> { 2680 foreach VtiToWti = vtilist in { 2681 defvar Vti = VtiToWti.Vti; 2682 defvar Wti = VtiToWti.Wti; 2683 defvar kind = "V"#Vti.ScalarSuffix; 2684 defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX, 2685 Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask, 2686 Vti.Log2SEW, Wti.RegClass, 2687 Vti.RegClass, Vti.ScalarRegClass>; 2688 } 2689} 2690 2691multiclass VPatBinaryW_WV<string intrinsic, string instruction, 2692 list<VTypeInfoToWide> vtilist> { 2693 foreach VtiToWti = vtilist in { 2694 defvar Vti = VtiToWti.Vti; 2695 defvar Wti = VtiToWti.Wti; 2696 defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX, 2697 Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask, 2698 Vti.Log2SEW, Wti.RegClass, 2699 Wti.RegClass, Vti.RegClass>; 2700 } 2701} 2702 2703multiclass VPatBinaryW_WX<string intrinsic, string instruction, 2704 list<VTypeInfoToWide> vtilist> { 2705 foreach VtiToWti = vtilist in { 2706 defvar Vti = VtiToWti.Vti; 2707 defvar Wti = VtiToWti.Wti; 2708 defvar kind = "W"#Vti.ScalarSuffix; 2709 defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX, 2710 Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask, 2711 Vti.Log2SEW, Wti.RegClass, 2712 Wti.RegClass, Vti.ScalarRegClass>; 2713 } 2714} 2715 2716multiclass VPatBinaryV_WV<string intrinsic, string instruction, 2717 list<VTypeInfoToWide> vtilist> { 2718 foreach VtiToWti = vtilist in { 2719 defvar Vti = VtiToWti.Vti; 2720 defvar Wti = VtiToWti.Wti; 2721 defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX, 2722 Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask, 2723 Vti.Log2SEW, Vti.RegClass, 2724 Wti.RegClass, Vti.RegClass>; 2725 } 2726} 2727 2728multiclass VPatBinaryV_WX<string intrinsic, string instruction, 2729 list<VTypeInfoToWide> vtilist> { 2730 foreach VtiToWti = vtilist in { 2731 defvar Vti = VtiToWti.Vti; 2732 defvar Wti = VtiToWti.Wti; 2733 defvar kind = "W"#Vti.ScalarSuffix; 2734 defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX, 2735 Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask, 2736 Vti.Log2SEW, Vti.RegClass, 2737 Wti.RegClass, Vti.ScalarRegClass>; 2738 } 2739} 2740 2741multiclass VPatBinaryV_WI<string intrinsic, string instruction, 2742 list<VTypeInfoToWide> vtilist> { 2743 foreach VtiToWti = vtilist in { 2744 defvar Vti = VtiToWti.Vti; 2745 defvar Wti = VtiToWti.Wti; 2746 defm : VPatBinary<intrinsic, instruction # "_WI_" # Vti.LMul.MX, 2747 Vti.Vector, Wti.Vector, XLenVT, Vti.Mask, 2748 Vti.Log2SEW, Vti.RegClass, 2749 Wti.RegClass, uimm5>; 2750 } 2751} 2752 2753multiclass VPatBinaryV_VM<string intrinsic, string instruction, 2754 bit CarryOut = 0, 2755 list<VTypeInfo> vtilist = AllIntegerVectors> { 2756 foreach vti = vtilist in 2757 defm : VPatBinaryCarryIn<intrinsic, instruction, "VVM", 2758 !if(CarryOut, vti.Mask, vti.Vector), 2759 vti.Vector, vti.Vector, vti.Mask, 2760 vti.Log2SEW, vti.LMul, 2761 vti.RegClass, vti.RegClass>; 2762} 2763 2764multiclass VPatBinaryV_XM<string intrinsic, string instruction, 2765 bit CarryOut = 0, 2766 list<VTypeInfo> vtilist = AllIntegerVectors> { 2767 foreach vti = vtilist in 2768 defm : VPatBinaryCarryIn<intrinsic, instruction, 2769 "V"#vti.ScalarSuffix#"M", 2770 !if(CarryOut, vti.Mask, vti.Vector), 2771 vti.Vector, vti.Scalar, vti.Mask, 2772 vti.Log2SEW, vti.LMul, 2773 vti.RegClass, vti.ScalarRegClass>; 2774} 2775 2776multiclass VPatBinaryV_IM<string intrinsic, string instruction, 2777 bit CarryOut = 0> { 2778 foreach vti = AllIntegerVectors in 2779 defm : VPatBinaryCarryIn<intrinsic, instruction, "VIM", 2780 !if(CarryOut, vti.Mask, vti.Vector), 2781 vti.Vector, XLenVT, vti.Mask, 2782 vti.Log2SEW, vti.LMul, 2783 vti.RegClass, simm5>; 2784} 2785 2786multiclass VPatBinaryV_V<string intrinsic, string instruction> { 2787 foreach vti = AllIntegerVectors in 2788 defm : VPatBinaryMaskOut<intrinsic, instruction, "VV", 2789 vti.Mask, vti.Vector, vti.Vector, 2790 vti.Log2SEW, vti.LMul, 2791 vti.RegClass, vti.RegClass>; 2792} 2793 2794multiclass VPatBinaryV_X<string intrinsic, string instruction> { 2795 foreach vti = AllIntegerVectors in 2796 defm : VPatBinaryMaskOut<intrinsic, instruction, "VX", 2797 vti.Mask, vti.Vector, XLenVT, 2798 vti.Log2SEW, vti.LMul, 2799 vti.RegClass, GPR>; 2800} 2801 2802multiclass VPatBinaryV_I<string intrinsic, string instruction> { 2803 foreach vti = AllIntegerVectors in 2804 defm : VPatBinaryMaskOut<intrinsic, instruction, "VI", 2805 vti.Mask, vti.Vector, XLenVT, 2806 vti.Log2SEW, vti.LMul, 2807 vti.RegClass, simm5>; 2808} 2809 2810multiclass VPatBinaryM_VV<string intrinsic, string instruction, 2811 list<VTypeInfo> vtilist> { 2812 foreach vti = vtilist in 2813 defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX, 2814 vti.Mask, vti.Vector, vti.Vector, vti.Mask, 2815 vti.Log2SEW, VR, 2816 vti.RegClass, vti.RegClass>; 2817} 2818 2819multiclass VPatBinarySwappedM_VV<string intrinsic, string instruction, 2820 list<VTypeInfo> vtilist> { 2821 foreach vti = vtilist in 2822 defm : VPatBinarySwapped<intrinsic, instruction # "_VV_" # vti.LMul.MX, 2823 vti.Mask, vti.Vector, vti.Vector, vti.Mask, 2824 vti.Log2SEW, VR, 2825 vti.RegClass, vti.RegClass>; 2826} 2827 2828multiclass VPatBinaryM_VX<string intrinsic, string instruction, 2829 list<VTypeInfo> vtilist> { 2830 foreach vti = vtilist in { 2831 defvar kind = "V"#vti.ScalarSuffix; 2832 defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX, 2833 vti.Mask, vti.Vector, vti.Scalar, vti.Mask, 2834 vti.Log2SEW, VR, 2835 vti.RegClass, vti.ScalarRegClass>; 2836 } 2837} 2838 2839multiclass VPatBinaryM_VI<string intrinsic, string instruction, 2840 list<VTypeInfo> vtilist> { 2841 foreach vti = vtilist in 2842 defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX, 2843 vti.Mask, vti.Vector, XLenVT, vti.Mask, 2844 vti.Log2SEW, VR, 2845 vti.RegClass, simm5>; 2846} 2847 2848multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction, 2849 list<VTypeInfo> vtilist, Operand ImmType = simm5> 2850 : VPatBinaryV_VV<intrinsic, instruction, vtilist>, 2851 VPatBinaryV_VX<intrinsic, instruction, vtilist>, 2852 VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>; 2853 2854multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction, 2855 list<VTypeInfo> vtilist> 2856 : VPatBinaryV_VV<intrinsic, instruction, vtilist>, 2857 VPatBinaryV_VX<intrinsic, instruction, vtilist>; 2858 2859multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction, 2860 list<VTypeInfo> vtilist> 2861 : VPatBinaryV_VX<intrinsic, instruction, vtilist>, 2862 VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>; 2863 2864multiclass VPatBinaryW_VV_VX<string intrinsic, string instruction, 2865 list<VTypeInfoToWide> vtilist> 2866 : VPatBinaryW_VV<intrinsic, instruction, vtilist>, 2867 VPatBinaryW_VX<intrinsic, instruction, vtilist>; 2868 2869multiclass VPatBinaryW_WV_WX<string intrinsic, string instruction, 2870 list<VTypeInfoToWide> vtilist> 2871 : VPatBinaryW_WV<intrinsic, instruction, vtilist>, 2872 VPatBinaryW_WX<intrinsic, instruction, vtilist>; 2873 2874multiclass VPatBinaryV_WV_WX_WI<string intrinsic, string instruction, 2875 list<VTypeInfoToWide> vtilist> 2876 : VPatBinaryV_WV<intrinsic, instruction, vtilist>, 2877 VPatBinaryV_WX<intrinsic, instruction, vtilist>, 2878 VPatBinaryV_WI<intrinsic, instruction, vtilist>; 2879 2880multiclass VPatBinaryV_VM_XM_IM<string intrinsic, string instruction> 2881 : VPatBinaryV_VM<intrinsic, instruction>, 2882 VPatBinaryV_XM<intrinsic, instruction>, 2883 VPatBinaryV_IM<intrinsic, instruction>; 2884 2885multiclass VPatBinaryM_VM_XM_IM<string intrinsic, string instruction> 2886 : VPatBinaryV_VM<intrinsic, instruction, /*CarryOut=*/1>, 2887 VPatBinaryV_XM<intrinsic, instruction, /*CarryOut=*/1>, 2888 VPatBinaryV_IM<intrinsic, instruction, /*CarryOut=*/1>; 2889 2890multiclass VPatBinaryM_V_X_I<string intrinsic, string instruction> 2891 : VPatBinaryV_V<intrinsic, instruction>, 2892 VPatBinaryV_X<intrinsic, instruction>, 2893 VPatBinaryV_I<intrinsic, instruction>; 2894 2895multiclass VPatBinaryV_VM_XM<string intrinsic, string instruction> 2896 : VPatBinaryV_VM<intrinsic, instruction>, 2897 VPatBinaryV_XM<intrinsic, instruction>; 2898 2899multiclass VPatBinaryM_VM_XM<string intrinsic, string instruction> 2900 : VPatBinaryV_VM<intrinsic, instruction, /*CarryOut=*/1>, 2901 VPatBinaryV_XM<intrinsic, instruction, /*CarryOut=*/1>; 2902 2903multiclass VPatBinaryM_V_X<string intrinsic, string instruction> 2904 : VPatBinaryV_V<intrinsic, instruction>, 2905 VPatBinaryV_X<intrinsic, instruction>; 2906 2907multiclass VPatTernary<string intrinsic, 2908 string inst, 2909 string kind, 2910 ValueType result_type, 2911 ValueType op1_type, 2912 ValueType op2_type, 2913 ValueType mask_type, 2914 int sew, 2915 LMULInfo vlmul, 2916 VReg result_reg_class, 2917 RegisterClass op1_reg_class, 2918 DAGOperand op2_kind> { 2919 def : VPatTernaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type, 2920 mask_type, sew, vlmul, result_reg_class, op1_reg_class, 2921 op2_kind>; 2922 def : VPatTernaryMask<intrinsic, inst, kind, result_type, op1_type, op2_type, 2923 mask_type, sew, vlmul, result_reg_class, op1_reg_class, 2924 op2_kind>; 2925} 2926 2927multiclass VPatTernaryV_VV<string intrinsic, string instruction, 2928 list<VTypeInfo> vtilist> { 2929 foreach vti = vtilist in 2930 defm : VPatTernary<intrinsic, instruction, "VV", 2931 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 2932 vti.Log2SEW, vti.LMul, vti.RegClass, 2933 vti.RegClass, vti.RegClass>; 2934} 2935 2936multiclass VPatTernaryV_VX<string intrinsic, string instruction, 2937 list<VTypeInfo> vtilist> { 2938 foreach vti = vtilist in 2939 defm : VPatTernary<intrinsic, instruction, "VX", 2940 vti.Vector, vti.Vector, XLenVT, vti.Mask, 2941 vti.Log2SEW, vti.LMul, vti.RegClass, 2942 vti.RegClass, GPR>; 2943} 2944 2945multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction, 2946 list<VTypeInfo> vtilist> { 2947 foreach vti = vtilist in 2948 defm : VPatTernary<intrinsic, instruction, 2949 "V"#vti.ScalarSuffix, 2950 vti.Vector, vti.Scalar, vti.Vector, vti.Mask, 2951 vti.Log2SEW, vti.LMul, vti.RegClass, 2952 vti.ScalarRegClass, vti.RegClass>; 2953} 2954 2955multiclass VPatTernaryV_VI<string intrinsic, string instruction, 2956 list<VTypeInfo> vtilist, Operand Imm_type> { 2957 foreach vti = vtilist in 2958 defm : VPatTernary<intrinsic, instruction, "VI", 2959 vti.Vector, vti.Vector, XLenVT, vti.Mask, 2960 vti.Log2SEW, vti.LMul, vti.RegClass, 2961 vti.RegClass, Imm_type>; 2962} 2963 2964multiclass VPatTernaryW_VV<string intrinsic, string instruction, 2965 list<VTypeInfoToWide> vtilist> { 2966 foreach vtiToWti = vtilist in { 2967 defvar vti = vtiToWti.Vti; 2968 defvar wti = vtiToWti.Wti; 2969 defm : VPatTernary<intrinsic, instruction, "VV", 2970 wti.Vector, vti.Vector, vti.Vector, 2971 vti.Mask, vti.Log2SEW, vti.LMul, 2972 wti.RegClass, vti.RegClass, vti.RegClass>; 2973 } 2974} 2975 2976multiclass VPatTernaryW_VX<string intrinsic, string instruction, 2977 list<VTypeInfoToWide> vtilist> { 2978 foreach vtiToWti = vtilist in { 2979 defvar vti = vtiToWti.Vti; 2980 defvar wti = vtiToWti.Wti; 2981 defm : VPatTernary<intrinsic, instruction, 2982 "V"#vti.ScalarSuffix, 2983 wti.Vector, vti.Scalar, vti.Vector, 2984 vti.Mask, vti.Log2SEW, vti.LMul, 2985 wti.RegClass, vti.ScalarRegClass, vti.RegClass>; 2986 } 2987} 2988 2989multiclass VPatTernaryV_VV_VX_AAXA<string intrinsic, string instruction, 2990 list<VTypeInfo> vtilist> 2991 : VPatTernaryV_VV<intrinsic, instruction, vtilist>, 2992 VPatTernaryV_VX_AAXA<intrinsic, instruction, vtilist>; 2993 2994multiclass VPatTernaryV_VX_VI<string intrinsic, string instruction, 2995 list<VTypeInfo> vtilist, Operand Imm_type = simm5> 2996 : VPatTernaryV_VX<intrinsic, instruction, vtilist>, 2997 VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>; 2998 2999multiclass VPatBinaryM_VV_VX_VI<string intrinsic, string instruction, 3000 list<VTypeInfo> vtilist> 3001 : VPatBinaryM_VV<intrinsic, instruction, vtilist>, 3002 VPatBinaryM_VX<intrinsic, instruction, vtilist>, 3003 VPatBinaryM_VI<intrinsic, instruction, vtilist>; 3004 3005multiclass VPatTernaryW_VV_VX<string intrinsic, string instruction, 3006 list<VTypeInfoToWide> vtilist> 3007 : VPatTernaryW_VV<intrinsic, instruction, vtilist>, 3008 VPatTernaryW_VX<intrinsic, instruction, vtilist>; 3009 3010multiclass VPatBinaryM_VV_VX<string intrinsic, string instruction, 3011 list<VTypeInfo> vtilist> 3012 : VPatBinaryM_VV<intrinsic, instruction, vtilist>, 3013 VPatBinaryM_VX<intrinsic, instruction, vtilist>; 3014 3015multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction, 3016 list<VTypeInfo> vtilist> 3017 : VPatBinaryM_VX<intrinsic, instruction, vtilist>, 3018 VPatBinaryM_VI<intrinsic, instruction, vtilist>; 3019 3020multiclass VPatBinaryV_VV_VX_VI_INT<string intrinsic, string instruction, 3021 list<VTypeInfo> vtilist, Operand ImmType = simm5> 3022 : VPatBinaryV_VV_INT<intrinsic#"_vv", instruction, vtilist>, 3023 VPatBinaryV_VX_INT<intrinsic#"_vx", instruction, vtilist>, 3024 VPatBinaryV_VI<intrinsic#"_vx", instruction, vtilist, ImmType>; 3025 3026multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat = 0> { 3027 foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in 3028 { 3029 defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1"); 3030 defm : VPatTernary<intrinsic, instruction, "VS", 3031 vectorM1.Vector, vti.Vector, 3032 vectorM1.Vector, vti.Mask, 3033 vti.Log2SEW, vti.LMul, 3034 VR, vti.RegClass, VR>; 3035 } 3036 foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in 3037 { 3038 defm : VPatTernary<intrinsic, instruction, "VS", 3039 gvti.VectorM1, gvti.Vector, 3040 gvti.VectorM1, gvti.Mask, 3041 gvti.Log2SEW, gvti.LMul, 3042 VR, gvti.RegClass, VR>; 3043 } 3044} 3045 3046multiclass VPatReductionW_VS<string intrinsic, string instruction, bit IsFloat = 0> { 3047 foreach vti = !if(IsFloat, AllFloatVectors, AllIntegerVectors) in 3048 { 3049 defvar wtiSEW = !mul(vti.SEW, 2); 3050 if !le(wtiSEW, 64) then { 3051 defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1"); 3052 defm : VPatTernary<intrinsic, instruction, "VS", 3053 wtiM1.Vector, vti.Vector, 3054 wtiM1.Vector, vti.Mask, 3055 vti.Log2SEW, vti.LMul, 3056 wtiM1.RegClass, vti.RegClass, 3057 wtiM1.RegClass>; 3058 } 3059 } 3060} 3061 3062multiclass VPatConversionVI_VF<string intrinsic, 3063 string instruction> 3064{ 3065 foreach fvti = AllFloatVectors in 3066 { 3067 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 3068 3069 defm : VPatConversion<intrinsic, instruction, "V", 3070 ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW, 3071 fvti.LMul, ivti.RegClass, fvti.RegClass>; 3072 } 3073} 3074 3075multiclass VPatConversionVF_VI<string intrinsic, 3076 string instruction> 3077{ 3078 foreach fvti = AllFloatVectors in 3079 { 3080 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 3081 3082 defm : VPatConversion<intrinsic, instruction, "V", 3083 fvti.Vector, ivti.Vector, fvti.Mask, ivti.Log2SEW, 3084 ivti.LMul, fvti.RegClass, ivti.RegClass>; 3085 } 3086} 3087 3088multiclass VPatConversionWI_VF<string intrinsic, string instruction> { 3089 foreach fvtiToFWti = AllWidenableFloatVectors in 3090 { 3091 defvar fvti = fvtiToFWti.Vti; 3092 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 3093 3094 defm : VPatConversion<intrinsic, instruction, "V", 3095 iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW, 3096 fvti.LMul, iwti.RegClass, fvti.RegClass>; 3097 } 3098} 3099 3100multiclass VPatConversionWF_VI<string intrinsic, string instruction> { 3101 foreach vtiToWti = AllWidenableIntToFloatVectors in 3102 { 3103 defvar vti = vtiToWti.Vti; 3104 defvar fwti = vtiToWti.Wti; 3105 3106 defm : VPatConversion<intrinsic, instruction, "V", 3107 fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW, 3108 vti.LMul, fwti.RegClass, vti.RegClass>; 3109 } 3110} 3111 3112multiclass VPatConversionWF_VF <string intrinsic, string instruction> { 3113 foreach fvtiToFWti = AllWidenableFloatVectors in 3114 { 3115 defvar fvti = fvtiToFWti.Vti; 3116 defvar fwti = fvtiToFWti.Wti; 3117 3118 defm : VPatConversion<intrinsic, instruction, "V", 3119 fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW, 3120 fvti.LMul, fwti.RegClass, fvti.RegClass>; 3121 } 3122} 3123 3124multiclass VPatConversionVI_WF <string intrinsic, string instruction> { 3125 foreach vtiToWti = AllWidenableIntToFloatVectors in 3126 { 3127 defvar vti = vtiToWti.Vti; 3128 defvar fwti = vtiToWti.Wti; 3129 3130 defm : VPatConversion<intrinsic, instruction, "W", 3131 vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW, 3132 vti.LMul, vti.RegClass, fwti.RegClass>; 3133 } 3134} 3135 3136multiclass VPatConversionVF_WI <string intrinsic, string instruction> { 3137 foreach fvtiToFWti = AllWidenableFloatVectors in 3138 { 3139 defvar fvti = fvtiToFWti.Vti; 3140 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 3141 3142 defm : VPatConversion<intrinsic, instruction, "W", 3143 fvti.Vector, iwti.Vector, fvti.Mask, fvti.Log2SEW, 3144 fvti.LMul, fvti.RegClass, iwti.RegClass>; 3145 } 3146} 3147 3148multiclass VPatConversionVF_WF <string intrinsic, string instruction> { 3149 foreach fvtiToFWti = AllWidenableFloatVectors in 3150 { 3151 defvar fvti = fvtiToFWti.Vti; 3152 defvar fwti = fvtiToFWti.Wti; 3153 3154 defm : VPatConversion<intrinsic, instruction, "W", 3155 fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW, 3156 fvti.LMul, fvti.RegClass, fwti.RegClass>; 3157 } 3158} 3159 3160multiclass VPatAMOWD<string intrinsic, 3161 string inst, 3162 ValueType result_type, 3163 ValueType offset_type, 3164 ValueType mask_type, 3165 int sew, 3166 LMULInfo vlmul, 3167 LMULInfo emul, 3168 VReg op1_reg_class> 3169{ 3170 def : VPatAMOWDNoMask<intrinsic, inst, result_type, offset_type, 3171 sew, vlmul, emul, op1_reg_class>; 3172 def : VPatAMOWDMask<intrinsic, inst, result_type, offset_type, 3173 mask_type, sew, vlmul, emul, op1_reg_class>; 3174} 3175 3176multiclass VPatAMOV_WD<string intrinsic, 3177 string inst, 3178 list<VTypeInfo> vtilist> { 3179 foreach eew = EEWList in { 3180 foreach vti = vtilist in { 3181 if !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64)) then { 3182 defvar octuple_lmul = octuple_from_str<vti.LMul.MX>.ret; 3183 // Calculate emul = eew * lmul / sew 3184 defvar octuple_emul = !srl(!mul(eew, octuple_lmul), vti.Log2SEW); 3185 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 3186 defvar emulMX = octuple_to_str<octuple_emul>.ret; 3187 defvar offsetVti = !cast<VTypeInfo>("VI" # eew # emulMX); 3188 defvar inst_ei = inst # "EI" # eew; 3189 defm : VPatAMOWD<intrinsic, inst_ei, 3190 vti.Vector, offsetVti.Vector, 3191 vti.Mask, vti.Log2SEW, vti.LMul, offsetVti.LMul, offsetVti.RegClass>; 3192 } 3193 } 3194 } 3195 } 3196} 3197 3198//===----------------------------------------------------------------------===// 3199// Pseudo instructions 3200//===----------------------------------------------------------------------===// 3201 3202let Predicates = [HasStdExtV] in { 3203 3204//===----------------------------------------------------------------------===// 3205// Pseudo Instructions for CodeGen 3206//===----------------------------------------------------------------------===// 3207let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { 3208 def PseudoVMV1R_V : VPseudo<VMV1R_V, V_M1, (outs VR:$vd), (ins VR:$vs2)>; 3209 def PseudoVMV2R_V : VPseudo<VMV2R_V, V_M2, (outs VRM2:$vd), (ins VRM2:$vs2)>; 3210 def PseudoVMV4R_V : VPseudo<VMV4R_V, V_M4, (outs VRM4:$vd), (ins VRM4:$vs2)>; 3211 def PseudoVMV8R_V : VPseudo<VMV8R_V, V_M8, (outs VRM8:$vd), (ins VRM8:$vs2)>; 3212} 3213 3214let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in { 3215 def PseudoReadVLENB : Pseudo<(outs GPR:$rd), (ins), 3216 [(set GPR:$rd, (riscv_read_vlenb))]>; 3217} 3218 3219let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1, 3220 Uses = [VL] in 3221def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>; 3222 3223let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in { 3224 def PseudoVSPILL_M1 : VPseudo<VS1R_V, V_M1, (outs), (ins VR:$rs1, GPR:$rs2)>; 3225 def PseudoVSPILL_M2 : VPseudo<VS2R_V, V_M2, (outs), (ins VRM2:$rs1, GPR:$rs2)>; 3226 def PseudoVSPILL_M4 : VPseudo<VS4R_V, V_M4, (outs), (ins VRM4:$rs1, GPR:$rs2)>; 3227 def PseudoVSPILL_M8 : VPseudo<VS8R_V, V_M8, (outs), (ins VRM8:$rs1, GPR:$rs2)>; 3228} 3229 3230let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1 in { 3231 def PseudoVRELOAD_M1 : VPseudo<VL1RE8_V, V_M1, (outs VR:$rs1), (ins GPR:$rs2)>; 3232 def PseudoVRELOAD_M2 : VPseudo<VL2RE8_V, V_M2, (outs VRM2:$rs1), (ins GPR:$rs2)>; 3233 def PseudoVRELOAD_M4 : VPseudo<VL4RE8_V, V_M4, (outs VRM4:$rs1), (ins GPR:$rs2)>; 3234 def PseudoVRELOAD_M8 : VPseudo<VL8RE8_V, V_M8, (outs VRM8:$rs1), (ins GPR:$rs2)>; 3235} 3236 3237foreach lmul = MxList.m in { 3238 foreach nf = NFSet<lmul>.L in { 3239 defvar vreg = SegRegClass<lmul, nf>.RC; 3240 let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in { 3241 def "PseudoVSPILL" # nf # "_" # lmul.MX : 3242 Pseudo<(outs), (ins vreg:$rs1, GPR:$rs2, GPR:$vlenb), []>; 3243 } 3244 let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1 in { 3245 def "PseudoVRELOAD" # nf # "_" # lmul.MX : 3246 Pseudo<(outs vreg:$rs1), (ins GPR:$rs2, GPR:$vlenb), []>; 3247 } 3248 } 3249} 3250 3251//===----------------------------------------------------------------------===// 3252// 6. Configuration-Setting Instructions 3253//===----------------------------------------------------------------------===// 3254 3255// Pseudos. 3256let hasSideEffects = 1, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in { 3257def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei), []>; 3258def PseudoVSETIVLI : Pseudo<(outs GPR:$rd), (ins uimm5:$rs1, VTypeIOp:$vtypei), []>; 3259} 3260 3261//===----------------------------------------------------------------------===// 3262// 7. Vector Loads and Stores 3263//===----------------------------------------------------------------------===// 3264 3265//===----------------------------------------------------------------------===// 3266// 7.4 Vector Unit-Stride Instructions 3267//===----------------------------------------------------------------------===// 3268 3269// Pseudos Unit-Stride Loads and Stores 3270defm PseudoVL : VPseudoUSLoad</*isFF=*/false>; 3271defm PseudoVS : VPseudoUSStore; 3272 3273defm PseudoVLE1 : VPseudoLoadMask; 3274defm PseudoVSE1 : VPseudoStoreMask; 3275 3276//===----------------------------------------------------------------------===// 3277// 7.5 Vector Strided Instructions 3278//===----------------------------------------------------------------------===// 3279 3280// Vector Strided Loads and Stores 3281defm PseudoVLS : VPseudoSLoad; 3282defm PseudoVSS : VPseudoSStore; 3283 3284//===----------------------------------------------------------------------===// 3285// 7.6 Vector Indexed Instructions 3286//===----------------------------------------------------------------------===// 3287 3288// Vector Indexed Loads and Stores 3289defm PseudoVLUX : VPseudoILoad</*Ordered=*/false>; 3290defm PseudoVLOX : VPseudoILoad</*Ordered=*/true>; 3291defm PseudoVSOX : VPseudoIStore</*Ordered=*/true>; 3292defm PseudoVSUX : VPseudoIStore</*Ordered=*/false>; 3293 3294//===----------------------------------------------------------------------===// 3295// 7.7. Unit-stride Fault-Only-First Loads 3296//===----------------------------------------------------------------------===// 3297 3298// vleff may update VL register 3299let hasSideEffects = 1, Defs = [VL] in 3300defm PseudoVL : VPseudoUSLoad</*isFF=*/true>; 3301 3302//===----------------------------------------------------------------------===// 3303// 7.8. Vector Load/Store Segment Instructions 3304//===----------------------------------------------------------------------===// 3305defm PseudoVLSEG : VPseudoUSSegLoad</*isFF=*/false>; 3306defm PseudoVLSSEG : VPseudoSSegLoad; 3307defm PseudoVLOXSEG : VPseudoISegLoad</*Ordered=*/true>; 3308defm PseudoVLUXSEG : VPseudoISegLoad</*Ordered=*/false>; 3309defm PseudoVSSEG : VPseudoUSSegStore; 3310defm PseudoVSSSEG : VPseudoSSegStore; 3311defm PseudoVSOXSEG : VPseudoISegStore</*Ordered=*/true>; 3312defm PseudoVSUXSEG : VPseudoISegStore</*Ordered=*/false>; 3313 3314// vlseg<nf>e<eew>ff.v may update VL register 3315let hasSideEffects = 1, Defs = [VL] in 3316defm PseudoVLSEG : VPseudoUSSegLoad</*isFF=*/true>; 3317 3318//===----------------------------------------------------------------------===// 3319// 8. Vector AMO Operations 3320//===----------------------------------------------------------------------===// 3321defm PseudoVAMOSWAP : VPseudoAMO; 3322defm PseudoVAMOADD : VPseudoAMO; 3323defm PseudoVAMOXOR : VPseudoAMO; 3324defm PseudoVAMOAND : VPseudoAMO; 3325defm PseudoVAMOOR : VPseudoAMO; 3326defm PseudoVAMOMIN : VPseudoAMO; 3327defm PseudoVAMOMAX : VPseudoAMO; 3328defm PseudoVAMOMINU : VPseudoAMO; 3329defm PseudoVAMOMAXU : VPseudoAMO; 3330 3331//===----------------------------------------------------------------------===// 3332// 12. Vector Integer Arithmetic Instructions 3333//===----------------------------------------------------------------------===// 3334 3335//===----------------------------------------------------------------------===// 3336// 12.1. Vector Single-Width Integer Add and Subtract 3337//===----------------------------------------------------------------------===// 3338defm PseudoVADD : VPseudoBinaryV_VV_VX_VI; 3339defm PseudoVSUB : VPseudoBinaryV_VV_VX; 3340defm PseudoVRSUB : VPseudoBinaryV_VX_VI; 3341 3342foreach vti = AllIntegerVectors in { 3343 // Match vrsub with 2 vector operands to vsub.vv by swapping operands. This 3344 // Occurs when legalizing vrsub.vx intrinsics for i64 on RV32 since we need 3345 // to use a more complex splat sequence. Add the pattern for all VTs for 3346 // consistency. 3347 def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$rs2), 3348 (vti.Vector vti.RegClass:$rs1), 3349 VLOpFrag)), 3350 (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1, 3351 vti.RegClass:$rs2, 3352 GPR:$vl, 3353 vti.Log2SEW)>; 3354 def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge), 3355 (vti.Vector vti.RegClass:$rs2), 3356 (vti.Vector vti.RegClass:$rs1), 3357 (vti.Mask V0), 3358 VLOpFrag)), 3359 (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK") 3360 vti.RegClass:$merge, 3361 vti.RegClass:$rs1, 3362 vti.RegClass:$rs2, 3363 (vti.Mask V0), 3364 GPR:$vl, 3365 vti.Log2SEW)>; 3366 3367 // Match VSUB with a small immediate to vadd.vi by negating the immediate. 3368 def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1), 3369 (vti.Scalar simm5_plus1:$rs2), 3370 VLOpFrag)), 3371 (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1, 3372 (NegImm simm5_plus1:$rs2), 3373 GPR:$vl, 3374 vti.Log2SEW)>; 3375 def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge), 3376 (vti.Vector vti.RegClass:$rs1), 3377 (vti.Scalar simm5_plus1:$rs2), 3378 (vti.Mask V0), 3379 VLOpFrag)), 3380 (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK") 3381 vti.RegClass:$merge, 3382 vti.RegClass:$rs1, 3383 (NegImm simm5_plus1:$rs2), 3384 (vti.Mask V0), 3385 GPR:$vl, 3386 vti.Log2SEW)>; 3387} 3388 3389//===----------------------------------------------------------------------===// 3390// 12.2. Vector Widening Integer Add/Subtract 3391//===----------------------------------------------------------------------===// 3392defm PseudoVWADDU : VPseudoBinaryW_VV_VX; 3393defm PseudoVWSUBU : VPseudoBinaryW_VV_VX; 3394defm PseudoVWADD : VPseudoBinaryW_VV_VX; 3395defm PseudoVWSUB : VPseudoBinaryW_VV_VX; 3396defm PseudoVWADDU : VPseudoBinaryW_WV_WX; 3397defm PseudoVWSUBU : VPseudoBinaryW_WV_WX; 3398defm PseudoVWADD : VPseudoBinaryW_WV_WX; 3399defm PseudoVWSUB : VPseudoBinaryW_WV_WX; 3400 3401//===----------------------------------------------------------------------===// 3402// 12.3. Vector Integer Extension 3403//===----------------------------------------------------------------------===// 3404defm PseudoVZEXT_VF2 : PseudoUnaryV_VF2; 3405defm PseudoVZEXT_VF4 : PseudoUnaryV_VF4; 3406defm PseudoVZEXT_VF8 : PseudoUnaryV_VF8; 3407defm PseudoVSEXT_VF2 : PseudoUnaryV_VF2; 3408defm PseudoVSEXT_VF4 : PseudoUnaryV_VF4; 3409defm PseudoVSEXT_VF8 : PseudoUnaryV_VF8; 3410 3411//===----------------------------------------------------------------------===// 3412// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions 3413//===----------------------------------------------------------------------===// 3414defm PseudoVADC : VPseudoBinaryV_VM_XM_IM; 3415defm PseudoVMADC : VPseudoBinaryM_VM_XM_IM<"@earlyclobber $rd">; 3416defm PseudoVMADC : VPseudoBinaryM_V_X_I<"@earlyclobber $rd">; 3417 3418defm PseudoVSBC : VPseudoBinaryV_VM_XM; 3419defm PseudoVMSBC : VPseudoBinaryM_VM_XM<"@earlyclobber $rd">; 3420defm PseudoVMSBC : VPseudoBinaryM_V_X<"@earlyclobber $rd">; 3421 3422//===----------------------------------------------------------------------===// 3423// 12.5. Vector Bitwise Logical Instructions 3424//===----------------------------------------------------------------------===// 3425defm PseudoVAND : VPseudoBinaryV_VV_VX_VI; 3426defm PseudoVOR : VPseudoBinaryV_VV_VX_VI; 3427defm PseudoVXOR : VPseudoBinaryV_VV_VX_VI; 3428 3429//===----------------------------------------------------------------------===// 3430// 12.6. Vector Single-Width Bit Shift Instructions 3431//===----------------------------------------------------------------------===// 3432defm PseudoVSLL : VPseudoBinaryV_VV_VX_VI<uimm5>; 3433defm PseudoVSRL : VPseudoBinaryV_VV_VX_VI<uimm5>; 3434defm PseudoVSRA : VPseudoBinaryV_VV_VX_VI<uimm5>; 3435 3436//===----------------------------------------------------------------------===// 3437// 12.7. Vector Narrowing Integer Right Shift Instructions 3438//===----------------------------------------------------------------------===// 3439defm PseudoVNSRL : VPseudoBinaryV_WV_WX_WI; 3440defm PseudoVNSRA : VPseudoBinaryV_WV_WX_WI; 3441 3442//===----------------------------------------------------------------------===// 3443// 12.8. Vector Integer Comparison Instructions 3444//===----------------------------------------------------------------------===// 3445defm PseudoVMSEQ : VPseudoBinaryM_VV_VX_VI; 3446defm PseudoVMSNE : VPseudoBinaryM_VV_VX_VI; 3447defm PseudoVMSLTU : VPseudoBinaryM_VV_VX; 3448defm PseudoVMSLT : VPseudoBinaryM_VV_VX; 3449defm PseudoVMSLEU : VPseudoBinaryM_VV_VX_VI; 3450defm PseudoVMSLE : VPseudoBinaryM_VV_VX_VI; 3451defm PseudoVMSGTU : VPseudoBinaryM_VX_VI; 3452defm PseudoVMSGT : VPseudoBinaryM_VX_VI; 3453 3454//===----------------------------------------------------------------------===// 3455// 12.9. Vector Integer Min/Max Instructions 3456//===----------------------------------------------------------------------===// 3457defm PseudoVMINU : VPseudoBinaryV_VV_VX; 3458defm PseudoVMIN : VPseudoBinaryV_VV_VX; 3459defm PseudoVMAXU : VPseudoBinaryV_VV_VX; 3460defm PseudoVMAX : VPseudoBinaryV_VV_VX; 3461 3462//===----------------------------------------------------------------------===// 3463// 12.10. Vector Single-Width Integer Multiply Instructions 3464//===----------------------------------------------------------------------===// 3465defm PseudoVMUL : VPseudoBinaryV_VV_VX; 3466defm PseudoVMULH : VPseudoBinaryV_VV_VX; 3467defm PseudoVMULHU : VPseudoBinaryV_VV_VX; 3468defm PseudoVMULHSU : VPseudoBinaryV_VV_VX; 3469 3470//===----------------------------------------------------------------------===// 3471// 12.11. Vector Integer Divide Instructions 3472//===----------------------------------------------------------------------===// 3473defm PseudoVDIVU : VPseudoBinaryV_VV_VX; 3474defm PseudoVDIV : VPseudoBinaryV_VV_VX; 3475defm PseudoVREMU : VPseudoBinaryV_VV_VX; 3476defm PseudoVREM : VPseudoBinaryV_VV_VX; 3477 3478//===----------------------------------------------------------------------===// 3479// 12.12. Vector Widening Integer Multiply Instructions 3480//===----------------------------------------------------------------------===// 3481defm PseudoVWMUL : VPseudoBinaryW_VV_VX; 3482defm PseudoVWMULU : VPseudoBinaryW_VV_VX; 3483defm PseudoVWMULSU : VPseudoBinaryW_VV_VX; 3484 3485//===----------------------------------------------------------------------===// 3486// 12.13. Vector Single-Width Integer Multiply-Add Instructions 3487//===----------------------------------------------------------------------===// 3488defm PseudoVMACC : VPseudoTernaryV_VV_VX_AAXA; 3489defm PseudoVNMSAC : VPseudoTernaryV_VV_VX_AAXA; 3490defm PseudoVMADD : VPseudoTernaryV_VV_VX_AAXA; 3491defm PseudoVNMSUB : VPseudoTernaryV_VV_VX_AAXA; 3492 3493//===----------------------------------------------------------------------===// 3494// 12.14. Vector Widening Integer Multiply-Add Instructions 3495//===----------------------------------------------------------------------===// 3496defm PseudoVWMACCU : VPseudoTernaryW_VV_VX; 3497defm PseudoVWMACC : VPseudoTernaryW_VV_VX; 3498defm PseudoVWMACCSU : VPseudoTernaryW_VV_VX; 3499defm PseudoVWMACCUS : VPseudoTernaryW_VX; 3500 3501//===----------------------------------------------------------------------===// 3502// 12.15. Vector Integer Merge Instructions 3503//===----------------------------------------------------------------------===// 3504defm PseudoVMERGE : VPseudoBinaryV_VM_XM_IM; 3505 3506//===----------------------------------------------------------------------===// 3507// 12.16. Vector Integer Move Instructions 3508//===----------------------------------------------------------------------===// 3509defm PseudoVMV_V : VPseudoUnaryV_V_X_I_NoDummyMask; 3510 3511//===----------------------------------------------------------------------===// 3512// 13.1. Vector Single-Width Saturating Add and Subtract 3513//===----------------------------------------------------------------------===// 3514let Defs = [VXSAT], hasSideEffects = 1 in { 3515 defm PseudoVSADDU : VPseudoBinaryV_VV_VX_VI; 3516 defm PseudoVSADD : VPseudoBinaryV_VV_VX_VI; 3517 defm PseudoVSSUBU : VPseudoBinaryV_VV_VX; 3518 defm PseudoVSSUB : VPseudoBinaryV_VV_VX; 3519} 3520 3521//===----------------------------------------------------------------------===// 3522// 13.2. Vector Single-Width Averaging Add and Subtract 3523//===----------------------------------------------------------------------===// 3524let Uses = [VL, VTYPE, VXRM], hasSideEffects = 1 in { 3525 defm PseudoVAADDU : VPseudoBinaryV_VV_VX; 3526 defm PseudoVAADD : VPseudoBinaryV_VV_VX; 3527 defm PseudoVASUBU : VPseudoBinaryV_VV_VX; 3528 defm PseudoVASUB : VPseudoBinaryV_VV_VX; 3529} 3530 3531//===----------------------------------------------------------------------===// 3532// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation 3533//===----------------------------------------------------------------------===// 3534let Uses = [VL, VTYPE, VXRM], Defs = [VXSAT], hasSideEffects = 1 in { 3535 defm PseudoVSMUL : VPseudoBinaryV_VV_VX; 3536} 3537 3538//===----------------------------------------------------------------------===// 3539// 13.4. Vector Single-Width Scaling Shift Instructions 3540//===----------------------------------------------------------------------===// 3541let Uses = [VL, VTYPE, VXRM], hasSideEffects = 1 in { 3542 defm PseudoVSSRL : VPseudoBinaryV_VV_VX_VI<uimm5>; 3543 defm PseudoVSSRA : VPseudoBinaryV_VV_VX_VI<uimm5>; 3544} 3545 3546//===----------------------------------------------------------------------===// 3547// 13.5. Vector Narrowing Fixed-Point Clip Instructions 3548//===----------------------------------------------------------------------===// 3549let Uses = [VL, VTYPE, VXRM], Defs = [VXSAT], hasSideEffects = 1 in { 3550 defm PseudoVNCLIP : VPseudoBinaryV_WV_WX_WI; 3551 defm PseudoVNCLIPU : VPseudoBinaryV_WV_WX_WI; 3552} 3553 3554} // Predicates = [HasStdExtV] 3555 3556let Predicates = [HasStdExtV, HasStdExtF] in { 3557//===----------------------------------------------------------------------===// 3558// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions 3559//===----------------------------------------------------------------------===// 3560defm PseudoVFADD : VPseudoBinaryV_VV_VF; 3561defm PseudoVFSUB : VPseudoBinaryV_VV_VF; 3562defm PseudoVFRSUB : VPseudoBinaryV_VF; 3563 3564//===----------------------------------------------------------------------===// 3565// 14.3. Vector Widening Floating-Point Add/Subtract Instructions 3566//===----------------------------------------------------------------------===// 3567defm PseudoVFWADD : VPseudoBinaryW_VV_VF; 3568defm PseudoVFWSUB : VPseudoBinaryW_VV_VF; 3569defm PseudoVFWADD : VPseudoBinaryW_WV_WF; 3570defm PseudoVFWSUB : VPseudoBinaryW_WV_WF; 3571 3572//===----------------------------------------------------------------------===// 3573// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 3574//===----------------------------------------------------------------------===// 3575defm PseudoVFMUL : VPseudoBinaryV_VV_VF; 3576defm PseudoVFDIV : VPseudoBinaryV_VV_VF; 3577defm PseudoVFRDIV : VPseudoBinaryV_VF; 3578 3579//===----------------------------------------------------------------------===// 3580// 14.5. Vector Widening Floating-Point Multiply 3581//===----------------------------------------------------------------------===// 3582defm PseudoVFWMUL : VPseudoBinaryW_VV_VF; 3583 3584//===----------------------------------------------------------------------===// 3585// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions 3586//===----------------------------------------------------------------------===// 3587defm PseudoVFMACC : VPseudoTernaryV_VV_VF_AAXA; 3588defm PseudoVFNMACC : VPseudoTernaryV_VV_VF_AAXA; 3589defm PseudoVFMSAC : VPseudoTernaryV_VV_VF_AAXA; 3590defm PseudoVFNMSAC : VPseudoTernaryV_VV_VF_AAXA; 3591defm PseudoVFMADD : VPseudoTernaryV_VV_VF_AAXA; 3592defm PseudoVFNMADD : VPseudoTernaryV_VV_VF_AAXA; 3593defm PseudoVFMSUB : VPseudoTernaryV_VV_VF_AAXA; 3594defm PseudoVFNMSUB : VPseudoTernaryV_VV_VF_AAXA; 3595 3596//===----------------------------------------------------------------------===// 3597// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 3598//===----------------------------------------------------------------------===// 3599defm PseudoVFWMACC : VPseudoTernaryW_VV_VF; 3600defm PseudoVFWNMACC : VPseudoTernaryW_VV_VF; 3601defm PseudoVFWMSAC : VPseudoTernaryW_VV_VF; 3602defm PseudoVFWNMSAC : VPseudoTernaryW_VV_VF; 3603 3604//===----------------------------------------------------------------------===// 3605// 14.8. Vector Floating-Point Square-Root Instruction 3606//===----------------------------------------------------------------------===// 3607defm PseudoVFSQRT : VPseudoUnaryV_V; 3608 3609//===----------------------------------------------------------------------===// 3610// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction 3611//===----------------------------------------------------------------------===// 3612defm PseudoVFRSQRT7 : VPseudoUnaryV_V; 3613 3614//===----------------------------------------------------------------------===// 3615// 14.10. Vector Floating-Point Reciprocal Estimate Instruction 3616//===----------------------------------------------------------------------===// 3617defm PseudoVFREC7 : VPseudoUnaryV_V; 3618 3619//===----------------------------------------------------------------------===// 3620// 14.11. Vector Floating-Point Min/Max Instructions 3621//===----------------------------------------------------------------------===// 3622defm PseudoVFMIN : VPseudoBinaryV_VV_VF; 3623defm PseudoVFMAX : VPseudoBinaryV_VV_VF; 3624 3625//===----------------------------------------------------------------------===// 3626// 14.12. Vector Floating-Point Sign-Injection Instructions 3627//===----------------------------------------------------------------------===// 3628defm PseudoVFSGNJ : VPseudoBinaryV_VV_VF; 3629defm PseudoVFSGNJN : VPseudoBinaryV_VV_VF; 3630defm PseudoVFSGNJX : VPseudoBinaryV_VV_VF; 3631 3632//===----------------------------------------------------------------------===// 3633// 14.13. Vector Floating-Point Compare Instructions 3634//===----------------------------------------------------------------------===// 3635defm PseudoVMFEQ : VPseudoBinaryM_VV_VF; 3636defm PseudoVMFNE : VPseudoBinaryM_VV_VF; 3637defm PseudoVMFLT : VPseudoBinaryM_VV_VF; 3638defm PseudoVMFLE : VPseudoBinaryM_VV_VF; 3639defm PseudoVMFGT : VPseudoBinaryM_VF; 3640defm PseudoVMFGE : VPseudoBinaryM_VF; 3641 3642//===----------------------------------------------------------------------===// 3643// 14.14. Vector Floating-Point Classify Instruction 3644//===----------------------------------------------------------------------===// 3645defm PseudoVFCLASS : VPseudoUnaryV_V; 3646 3647//===----------------------------------------------------------------------===// 3648// 14.15. Vector Floating-Point Merge Instruction 3649//===----------------------------------------------------------------------===// 3650defm PseudoVFMERGE : VPseudoBinaryV_FM; 3651 3652//===----------------------------------------------------------------------===// 3653// 14.16. Vector Floating-Point Move Instruction 3654//===----------------------------------------------------------------------===// 3655defm PseudoVFMV_V : VPseudoUnaryV_F_NoDummyMask; 3656 3657//===----------------------------------------------------------------------===// 3658// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions 3659//===----------------------------------------------------------------------===// 3660defm PseudoVFCVT_XU_F : VPseudoConversionV_V; 3661defm PseudoVFCVT_X_F : VPseudoConversionV_V; 3662defm PseudoVFCVT_RTZ_XU_F : VPseudoConversionV_V; 3663defm PseudoVFCVT_RTZ_X_F : VPseudoConversionV_V; 3664defm PseudoVFCVT_F_XU : VPseudoConversionV_V; 3665defm PseudoVFCVT_F_X : VPseudoConversionV_V; 3666 3667//===----------------------------------------------------------------------===// 3668// 14.18. Widening Floating-Point/Integer Type-Convert Instructions 3669//===----------------------------------------------------------------------===// 3670defm PseudoVFWCVT_XU_F : VPseudoConversionW_V; 3671defm PseudoVFWCVT_X_F : VPseudoConversionW_V; 3672defm PseudoVFWCVT_RTZ_XU_F : VPseudoConversionW_V; 3673defm PseudoVFWCVT_RTZ_X_F : VPseudoConversionW_V; 3674defm PseudoVFWCVT_F_XU : VPseudoConversionW_V; 3675defm PseudoVFWCVT_F_X : VPseudoConversionW_V; 3676defm PseudoVFWCVT_F_F : VPseudoConversionW_V; 3677 3678//===----------------------------------------------------------------------===// 3679// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions 3680//===----------------------------------------------------------------------===// 3681defm PseudoVFNCVT_XU_F : VPseudoConversionV_W; 3682defm PseudoVFNCVT_X_F : VPseudoConversionV_W; 3683defm PseudoVFNCVT_RTZ_XU_F : VPseudoConversionV_W; 3684defm PseudoVFNCVT_RTZ_X_F : VPseudoConversionV_W; 3685defm PseudoVFNCVT_F_XU : VPseudoConversionV_W; 3686defm PseudoVFNCVT_F_X : VPseudoConversionV_W; 3687defm PseudoVFNCVT_F_F : VPseudoConversionV_W; 3688defm PseudoVFNCVT_ROD_F_F : VPseudoConversionV_W; 3689} // Predicates = [HasStdExtV, HasStdExtF] 3690 3691let Predicates = [HasStdExtV] in { 3692//===----------------------------------------------------------------------===// 3693// 15.1. Vector Single-Width Integer Reduction Instructions 3694//===----------------------------------------------------------------------===// 3695defm PseudoVREDSUM : VPseudoReductionV_VS; 3696defm PseudoVREDAND : VPseudoReductionV_VS; 3697defm PseudoVREDOR : VPseudoReductionV_VS; 3698defm PseudoVREDXOR : VPseudoReductionV_VS; 3699defm PseudoVREDMINU : VPseudoReductionV_VS; 3700defm PseudoVREDMIN : VPseudoReductionV_VS; 3701defm PseudoVREDMAXU : VPseudoReductionV_VS; 3702defm PseudoVREDMAX : VPseudoReductionV_VS; 3703 3704//===----------------------------------------------------------------------===// 3705// 15.2. Vector Widening Integer Reduction Instructions 3706//===----------------------------------------------------------------------===// 3707defm PseudoVWREDSUMU : VPseudoReductionV_VS; 3708defm PseudoVWREDSUM : VPseudoReductionV_VS; 3709} // Predicates = [HasStdExtV] 3710 3711let Predicates = [HasStdExtV, HasStdExtF] in { 3712//===----------------------------------------------------------------------===// 3713// 15.3. Vector Single-Width Floating-Point Reduction Instructions 3714//===----------------------------------------------------------------------===// 3715defm PseudoVFREDOSUM : VPseudoReductionV_VS; 3716defm PseudoVFREDSUM : VPseudoReductionV_VS; 3717defm PseudoVFREDMIN : VPseudoReductionV_VS; 3718defm PseudoVFREDMAX : VPseudoReductionV_VS; 3719 3720//===----------------------------------------------------------------------===// 3721// 15.4. Vector Widening Floating-Point Reduction Instructions 3722//===----------------------------------------------------------------------===// 3723defm PseudoVFWREDSUM : VPseudoReductionV_VS; 3724defm PseudoVFWREDOSUM : VPseudoReductionV_VS; 3725 3726} // Predicates = [HasStdExtV, HasStdExtF] 3727 3728//===----------------------------------------------------------------------===// 3729// 16. Vector Mask Instructions 3730//===----------------------------------------------------------------------===// 3731 3732//===----------------------------------------------------------------------===// 3733// 16.1 Vector Mask-Register Logical Instructions 3734//===----------------------------------------------------------------------===// 3735 3736defm PseudoVMAND: VPseudoBinaryM_MM; 3737defm PseudoVMNAND: VPseudoBinaryM_MM; 3738defm PseudoVMANDNOT: VPseudoBinaryM_MM; 3739defm PseudoVMXOR: VPseudoBinaryM_MM; 3740defm PseudoVMOR: VPseudoBinaryM_MM; 3741defm PseudoVMNOR: VPseudoBinaryM_MM; 3742defm PseudoVMORNOT: VPseudoBinaryM_MM; 3743defm PseudoVMXNOR: VPseudoBinaryM_MM; 3744 3745// Pseudo instructions 3746defm PseudoVMCLR : VPseudoNullaryPseudoM<"VMXOR">; 3747defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">; 3748 3749//===----------------------------------------------------------------------===// 3750// 16.2. Vector mask population count vpopc 3751//===----------------------------------------------------------------------===// 3752 3753defm PseudoVPOPC: VPseudoUnaryS_M; 3754 3755//===----------------------------------------------------------------------===// 3756// 16.3. vfirst find-first-set mask bit 3757//===----------------------------------------------------------------------===// 3758 3759defm PseudoVFIRST: VPseudoUnaryS_M; 3760 3761//===----------------------------------------------------------------------===// 3762// 16.4. vmsbf.m set-before-first mask bit 3763//===----------------------------------------------------------------------===// 3764defm PseudoVMSBF: VPseudoUnaryM_M; 3765 3766//===----------------------------------------------------------------------===// 3767// 16.5. vmsif.m set-including-first mask bit 3768//===----------------------------------------------------------------------===// 3769defm PseudoVMSIF: VPseudoUnaryM_M; 3770 3771//===----------------------------------------------------------------------===// 3772// 16.6. vmsof.m set-only-first mask bit 3773//===----------------------------------------------------------------------===// 3774defm PseudoVMSOF: VPseudoUnaryM_M; 3775 3776//===----------------------------------------------------------------------===// 3777// 16.8. Vector Iota Instruction 3778//===----------------------------------------------------------------------===// 3779defm PseudoVIOTA_M: VPseudoUnaryV_M; 3780 3781//===----------------------------------------------------------------------===// 3782// 16.9. Vector Element Index Instruction 3783//===----------------------------------------------------------------------===// 3784defm PseudoVID : VPseudoMaskNullaryV; 3785 3786//===----------------------------------------------------------------------===// 3787// 17. Vector Permutation Instructions 3788//===----------------------------------------------------------------------===// 3789 3790//===----------------------------------------------------------------------===// 3791// 17.1. Integer Scalar Move Instructions 3792//===----------------------------------------------------------------------===// 3793 3794let Predicates = [HasStdExtV] in { 3795let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1, 3796 Uses = [VL, VTYPE] in { 3797 foreach m = MxList.m in { 3798 let VLMul = m.value in { 3799 let HasSEWOp = 1, BaseInstr = VMV_X_S in 3800 def PseudoVMV_X_S # "_" # m.MX: Pseudo<(outs GPR:$rd), 3801 (ins m.vrclass:$rs2, ixlenimm:$sew), 3802 []>, RISCVVPseudo; 3803 let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X, 3804 ForceTailAgnostic = true, Constraints = "$rd = $rs1" in 3805 def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd), 3806 (ins m.vrclass:$rs1, GPR:$rs2, 3807 AVL:$vl, ixlenimm:$sew), 3808 []>, RISCVVPseudo; 3809 } 3810 } 3811} 3812} // Predicates = [HasStdExtV] 3813 3814//===----------------------------------------------------------------------===// 3815// 17.2. Floating-Point Scalar Move Instructions 3816//===----------------------------------------------------------------------===// 3817 3818let Predicates = [HasStdExtV, HasStdExtF] in { 3819let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1, 3820 Uses = [VL, VTYPE] in { 3821 foreach m = MxList.m in { 3822 foreach f = FPList.fpinfo in { 3823 let VLMul = m.value in { 3824 let HasSEWOp = 1, BaseInstr = VFMV_F_S in 3825 def "PseudoVFMV_" # f.FX # "_S_" # m.MX : 3826 Pseudo<(outs f.fprclass:$rd), 3827 (ins m.vrclass:$rs2, 3828 ixlenimm:$sew), 3829 []>, RISCVVPseudo; 3830 let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F, 3831 ForceTailAgnostic = true, Constraints = "$rd = $rs1" in 3832 def "PseudoVFMV_S_" # f.FX # "_" # m.MX : 3833 Pseudo<(outs m.vrclass:$rd), 3834 (ins m.vrclass:$rs1, f.fprclass:$rs2, 3835 AVL:$vl, ixlenimm:$sew), 3836 []>, RISCVVPseudo; 3837 } 3838 } 3839 } 3840} 3841} // Predicates = [HasStdExtV, HasStdExtF] 3842 3843//===----------------------------------------------------------------------===// 3844// 17.3. Vector Slide Instructions 3845//===----------------------------------------------------------------------===// 3846let Predicates = [HasStdExtV] in { 3847 defm PseudoVSLIDEUP : VPseudoTernaryV_VX_VI<uimm5, "@earlyclobber $rd">; 3848 defm PseudoVSLIDEDOWN : VPseudoTernaryV_VX_VI<uimm5>; 3849 defm PseudoVSLIDE1UP : VPseudoBinaryV_VX<"@earlyclobber $rd">; 3850 defm PseudoVSLIDE1DOWN : VPseudoBinaryV_VX; 3851} // Predicates = [HasStdExtV] 3852 3853let Predicates = [HasStdExtV, HasStdExtF] in { 3854 defm PseudoVFSLIDE1UP : VPseudoBinaryV_VF<"@earlyclobber $rd">; 3855 defm PseudoVFSLIDE1DOWN : VPseudoBinaryV_VF; 3856} // Predicates = [HasStdExtV, HasStdExtF] 3857 3858//===----------------------------------------------------------------------===// 3859// 17.4. Vector Register Gather Instructions 3860//===----------------------------------------------------------------------===// 3861defm PseudoVRGATHER : VPseudoBinaryV_VV_VX_VI<uimm5, "@earlyclobber $rd">; 3862defm PseudoVRGATHEREI16 : VPseudoBinaryV_VV_EEW</* eew */ 16, "@earlyclobber $rd">; 3863 3864//===----------------------------------------------------------------------===// 3865// 17.5. Vector Compress Instruction 3866//===----------------------------------------------------------------------===// 3867defm PseudoVCOMPRESS : VPseudoUnaryV_V_AnyMask; 3868 3869//===----------------------------------------------------------------------===// 3870// Patterns. 3871//===----------------------------------------------------------------------===// 3872 3873//===----------------------------------------------------------------------===// 3874// 8. Vector AMO Operations 3875//===----------------------------------------------------------------------===// 3876let Predicates = [HasStdExtZvamo] in { 3877 defm : VPatAMOV_WD<"int_riscv_vamoswap", "PseudoVAMOSWAP", AllIntegerVectors>; 3878 defm : VPatAMOV_WD<"int_riscv_vamoadd", "PseudoVAMOADD", AllIntegerVectors>; 3879 defm : VPatAMOV_WD<"int_riscv_vamoxor", "PseudoVAMOXOR", AllIntegerVectors>; 3880 defm : VPatAMOV_WD<"int_riscv_vamoand", "PseudoVAMOAND", AllIntegerVectors>; 3881 defm : VPatAMOV_WD<"int_riscv_vamoor", "PseudoVAMOOR", AllIntegerVectors>; 3882 defm : VPatAMOV_WD<"int_riscv_vamomin", "PseudoVAMOMIN", AllIntegerVectors>; 3883 defm : VPatAMOV_WD<"int_riscv_vamomax", "PseudoVAMOMAX", AllIntegerVectors>; 3884 defm : VPatAMOV_WD<"int_riscv_vamominu", "PseudoVAMOMINU", AllIntegerVectors>; 3885 defm : VPatAMOV_WD<"int_riscv_vamomaxu", "PseudoVAMOMAXU", AllIntegerVectors>; 3886} // Predicates = [HasStdExtZvamo] 3887 3888let Predicates = [HasStdExtZvamo, HasStdExtF] in { 3889 defm : VPatAMOV_WD<"int_riscv_vamoswap", "PseudoVAMOSWAP", AllFloatVectors>; 3890} // Predicates = [HasStdExtZvamo, HasStdExtF] 3891 3892//===----------------------------------------------------------------------===// 3893// 12. Vector Integer Arithmetic Instructions 3894//===----------------------------------------------------------------------===// 3895 3896let Predicates = [HasStdExtV] in { 3897//===----------------------------------------------------------------------===// 3898// 12.1. Vector Single-Width Integer Add and Subtract 3899//===----------------------------------------------------------------------===// 3900defm : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>; 3901defm : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>; 3902defm : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>; 3903 3904//===----------------------------------------------------------------------===// 3905// 12.2. Vector Widening Integer Add/Subtract 3906//===----------------------------------------------------------------------===// 3907defm : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU", AllWidenableIntVectors>; 3908defm : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU", AllWidenableIntVectors>; 3909defm : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD", AllWidenableIntVectors>; 3910defm : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB", AllWidenableIntVectors>; 3911defm : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU", AllWidenableIntVectors>; 3912defm : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU", AllWidenableIntVectors>; 3913defm : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD", AllWidenableIntVectors>; 3914defm : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB", AllWidenableIntVectors>; 3915 3916//===----------------------------------------------------------------------===// 3917// 12.3. Vector Integer Extension 3918//===----------------------------------------------------------------------===// 3919defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF2", 3920 AllFractionableVF2IntVectors>; 3921defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF4", 3922 AllFractionableVF4IntVectors>; 3923defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF8", 3924 AllFractionableVF8IntVectors>; 3925defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF2", 3926 AllFractionableVF2IntVectors>; 3927defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF4", 3928 AllFractionableVF4IntVectors>; 3929defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF8", 3930 AllFractionableVF8IntVectors>; 3931 3932//===----------------------------------------------------------------------===// 3933// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions 3934//===----------------------------------------------------------------------===// 3935defm : VPatBinaryV_VM_XM_IM<"int_riscv_vadc", "PseudoVADC">; 3936defm : VPatBinaryM_VM_XM_IM<"int_riscv_vmadc_carry_in", "PseudoVMADC">; 3937defm : VPatBinaryM_V_X_I<"int_riscv_vmadc", "PseudoVMADC">; 3938 3939defm : VPatBinaryV_VM_XM<"int_riscv_vsbc", "PseudoVSBC">; 3940defm : VPatBinaryM_VM_XM<"int_riscv_vmsbc_borrow_in", "PseudoVMSBC">; 3941defm : VPatBinaryM_V_X<"int_riscv_vmsbc", "PseudoVMSBC">; 3942 3943//===----------------------------------------------------------------------===// 3944// 12.5. Vector Bitwise Logical Instructions 3945//===----------------------------------------------------------------------===// 3946defm : VPatBinaryV_VV_VX_VI<"int_riscv_vand", "PseudoVAND", AllIntegerVectors>; 3947defm : VPatBinaryV_VV_VX_VI<"int_riscv_vor", "PseudoVOR", AllIntegerVectors>; 3948defm : VPatBinaryV_VV_VX_VI<"int_riscv_vxor", "PseudoVXOR", AllIntegerVectors>; 3949 3950//===----------------------------------------------------------------------===// 3951// 12.6. Vector Single-Width Bit Shift Instructions 3952//===----------------------------------------------------------------------===// 3953defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsll", "PseudoVSLL", AllIntegerVectors, 3954 uimm5>; 3955defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsrl", "PseudoVSRL", AllIntegerVectors, 3956 uimm5>; 3957defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors, 3958 uimm5>; 3959 3960//===----------------------------------------------------------------------===// 3961// 12.7. Vector Narrowing Integer Right Shift Instructions 3962//===----------------------------------------------------------------------===// 3963defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL", AllWidenableIntVectors>; 3964defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA", AllWidenableIntVectors>; 3965 3966//===----------------------------------------------------------------------===// 3967// 12.8. Vector Integer Comparison Instructions 3968//===----------------------------------------------------------------------===// 3969defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmseq", "PseudoVMSEQ", AllIntegerVectors>; 3970defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsne", "PseudoVMSNE", AllIntegerVectors>; 3971defm : VPatBinaryM_VV_VX<"int_riscv_vmsltu", "PseudoVMSLTU", AllIntegerVectors>; 3972defm : VPatBinaryM_VV_VX<"int_riscv_vmslt", "PseudoVMSLT", AllIntegerVectors>; 3973defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsleu", "PseudoVMSLEU", AllIntegerVectors>; 3974defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsle", "PseudoVMSLE", AllIntegerVectors>; 3975 3976defm : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>; 3977defm : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>; 3978 3979// Match vmsgt with 2 vector operands to vmslt with the operands swapped. 3980defm : VPatBinarySwappedM_VV<"int_riscv_vmsgtu", "PseudoVMSLTU", AllIntegerVectors>; 3981defm : VPatBinarySwappedM_VV<"int_riscv_vmsgt", "PseudoVMSLT", AllIntegerVectors>; 3982 3983defm : VPatBinarySwappedM_VV<"int_riscv_vmsgeu", "PseudoVMSLEU", AllIntegerVectors>; 3984defm : VPatBinarySwappedM_VV<"int_riscv_vmsge", "PseudoVMSLE", AllIntegerVectors>; 3985 3986// Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16. This 3987// avoids the user needing to know that there is no vmslt(u).vi instruction. 3988// Similar for vmsge(u).vx intrinsics using vmslt(u).vi. 3989foreach vti = AllIntegerVectors in { 3990 def : Pat<(vti.Mask (int_riscv_vmslt (vti.Vector vti.RegClass:$rs1), 3991 (vti.Scalar simm5_plus1:$rs2), 3992 VLOpFrag)), 3993 (!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX) vti.RegClass:$rs1, 3994 (DecImm simm5_plus1:$rs2), 3995 GPR:$vl, 3996 vti.Log2SEW)>; 3997 def : Pat<(vti.Mask (int_riscv_vmslt_mask (vti.Mask VR:$merge), 3998 (vti.Vector vti.RegClass:$rs1), 3999 (vti.Scalar simm5_plus1:$rs2), 4000 (vti.Mask V0), 4001 VLOpFrag)), 4002 (!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX#"_MASK") 4003 VR:$merge, 4004 vti.RegClass:$rs1, 4005 (DecImm simm5_plus1:$rs2), 4006 (vti.Mask V0), 4007 GPR:$vl, 4008 vti.Log2SEW)>; 4009 4010 def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1), 4011 (vti.Scalar simm5_plus1:$rs2), 4012 VLOpFrag)), 4013 (!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX) vti.RegClass:$rs1, 4014 (DecImm simm5_plus1:$rs2), 4015 GPR:$vl, 4016 vti.Log2SEW)>; 4017 def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask VR:$merge), 4018 (vti.Vector vti.RegClass:$rs1), 4019 (vti.Scalar simm5_plus1:$rs2), 4020 (vti.Mask V0), 4021 VLOpFrag)), 4022 (!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX#"_MASK") 4023 VR:$merge, 4024 vti.RegClass:$rs1, 4025 (DecImm simm5_plus1:$rs2), 4026 (vti.Mask V0), 4027 GPR:$vl, 4028 vti.Log2SEW)>; 4029 4030 // Special cases to avoid matching vmsltu.vi 0 (always false) to 4031 // vmsleu.vi -1 (always true). Instead match to vmsne.vv. 4032 def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1), 4033 (vti.Scalar 0), VLOpFrag)), 4034 (!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX) vti.RegClass:$rs1, 4035 vti.RegClass:$rs1, 4036 GPR:$vl, 4037 vti.Log2SEW)>; 4038 def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask VR:$merge), 4039 (vti.Vector vti.RegClass:$rs1), 4040 (vti.Scalar 0), 4041 (vti.Mask V0), 4042 VLOpFrag)), 4043 (!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX#"_MASK") 4044 VR:$merge, 4045 vti.RegClass:$rs1, 4046 vti.RegClass:$rs1, 4047 (vti.Mask V0), 4048 GPR:$vl, 4049 vti.Log2SEW)>; 4050 4051 def : Pat<(vti.Mask (int_riscv_vmsge (vti.Vector vti.RegClass:$rs1), 4052 (vti.Scalar simm5_plus1:$rs2), 4053 VLOpFrag)), 4054 (!cast<Instruction>("PseudoVMSGT_VI_"#vti.LMul.MX) vti.RegClass:$rs1, 4055 (DecImm simm5_plus1:$rs2), 4056 GPR:$vl, 4057 vti.Log2SEW)>; 4058 def : Pat<(vti.Mask (int_riscv_vmsge_mask (vti.Mask VR:$merge), 4059 (vti.Vector vti.RegClass:$rs1), 4060 (vti.Scalar simm5_plus1:$rs2), 4061 (vti.Mask V0), 4062 VLOpFrag)), 4063 (!cast<Instruction>("PseudoVMSGT_VI_"#vti.LMul.MX#"_MASK") 4064 VR:$merge, 4065 vti.RegClass:$rs1, 4066 (DecImm simm5_plus1:$rs2), 4067 (vti.Mask V0), 4068 GPR:$vl, 4069 vti.Log2SEW)>; 4070 4071 def : Pat<(vti.Mask (int_riscv_vmsgeu (vti.Vector vti.RegClass:$rs1), 4072 (vti.Scalar simm5_plus1:$rs2), 4073 VLOpFrag)), 4074 (!cast<Instruction>("PseudoVMSGTU_VI_"#vti.LMul.MX) vti.RegClass:$rs1, 4075 (DecImm simm5_plus1:$rs2), 4076 GPR:$vl, 4077 vti.Log2SEW)>; 4078 def : Pat<(vti.Mask (int_riscv_vmsgeu_mask (vti.Mask VR:$merge), 4079 (vti.Vector vti.RegClass:$rs1), 4080 (vti.Scalar simm5_plus1:$rs2), 4081 (vti.Mask V0), 4082 VLOpFrag)), 4083 (!cast<Instruction>("PseudoVMSGTU_VI_"#vti.LMul.MX#"_MASK") 4084 VR:$merge, 4085 vti.RegClass:$rs1, 4086 (DecImm simm5_plus1:$rs2), 4087 (vti.Mask V0), 4088 GPR:$vl, 4089 vti.Log2SEW)>; 4090 4091 // Special cases to avoid matching vmsgeu.vi 0 (always true) to 4092 // vmsgtu.vi -1 (always false). Instead match to vmsne.vv. 4093 def : Pat<(vti.Mask (int_riscv_vmsgeu (vti.Vector vti.RegClass:$rs1), 4094 (vti.Scalar 0), VLOpFrag)), 4095 (!cast<Instruction>("PseudoVMSEQ_VV_"#vti.LMul.MX) vti.RegClass:$rs1, 4096 vti.RegClass:$rs1, 4097 GPR:$vl, 4098 vti.Log2SEW)>; 4099 def : Pat<(vti.Mask (int_riscv_vmsgeu_mask (vti.Mask VR:$merge), 4100 (vti.Vector vti.RegClass:$rs1), 4101 (vti.Scalar 0), 4102 (vti.Mask V0), 4103 VLOpFrag)), 4104 (!cast<Instruction>("PseudoVMSEQ_VV_"#vti.LMul.MX#"_MASK") 4105 VR:$merge, 4106 vti.RegClass:$rs1, 4107 vti.RegClass:$rs1, 4108 (vti.Mask V0), 4109 GPR:$vl, 4110 vti.Log2SEW)>; 4111} 4112 4113//===----------------------------------------------------------------------===// 4114// 12.9. Vector Integer Min/Max Instructions 4115//===----------------------------------------------------------------------===// 4116defm : VPatBinaryV_VV_VX<"int_riscv_vminu", "PseudoVMINU", AllIntegerVectors>; 4117defm : VPatBinaryV_VV_VX<"int_riscv_vmin", "PseudoVMIN", AllIntegerVectors>; 4118defm : VPatBinaryV_VV_VX<"int_riscv_vmaxu", "PseudoVMAXU", AllIntegerVectors>; 4119defm : VPatBinaryV_VV_VX<"int_riscv_vmax", "PseudoVMAX", AllIntegerVectors>; 4120 4121//===----------------------------------------------------------------------===// 4122// 12.10. Vector Single-Width Integer Multiply Instructions 4123//===----------------------------------------------------------------------===// 4124defm : VPatBinaryV_VV_VX<"int_riscv_vmul", "PseudoVMUL", AllIntegerVectors>; 4125defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH", AllIntegerVectors>; 4126defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU", AllIntegerVectors>; 4127defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU", AllIntegerVectors>; 4128 4129//===----------------------------------------------------------------------===// 4130// 12.11. Vector Integer Divide Instructions 4131//===----------------------------------------------------------------------===// 4132defm : VPatBinaryV_VV_VX<"int_riscv_vdivu", "PseudoVDIVU", AllIntegerVectors>; 4133defm : VPatBinaryV_VV_VX<"int_riscv_vdiv", "PseudoVDIV", AllIntegerVectors>; 4134defm : VPatBinaryV_VV_VX<"int_riscv_vremu", "PseudoVREMU", AllIntegerVectors>; 4135defm : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors>; 4136 4137//===----------------------------------------------------------------------===// 4138// 12.12. Vector Widening Integer Multiply Instructions 4139//===----------------------------------------------------------------------===// 4140defm : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL", AllWidenableIntVectors>; 4141defm : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU", AllWidenableIntVectors>; 4142defm : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU", AllWidenableIntVectors>; 4143 4144//===----------------------------------------------------------------------===// 4145// 12.13. Vector Single-Width Integer Multiply-Add Instructions 4146//===----------------------------------------------------------------------===// 4147defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmadd", "PseudoVMADD", AllIntegerVectors>; 4148defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsub", "PseudoVNMSUB", AllIntegerVectors>; 4149defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmacc", "PseudoVMACC", AllIntegerVectors>; 4150defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsac", "PseudoVNMSAC", AllIntegerVectors>; 4151 4152//===----------------------------------------------------------------------===// 4153// 12.14. Vector Widening Integer Multiply-Add Instructions 4154//===----------------------------------------------------------------------===// 4155defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccu", "PseudoVWMACCU", AllWidenableIntVectors>; 4156defm : VPatTernaryW_VV_VX<"int_riscv_vwmacc", "PseudoVWMACC", AllWidenableIntVectors>; 4157defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccsu", "PseudoVWMACCSU", AllWidenableIntVectors>; 4158defm : VPatTernaryW_VX<"int_riscv_vwmaccus", "PseudoVWMACCUS", AllWidenableIntVectors>; 4159 4160//===----------------------------------------------------------------------===// 4161// 12.15. Vector Integer Merge Instructions 4162//===----------------------------------------------------------------------===// 4163defm : VPatBinaryV_VM_XM_IM<"int_riscv_vmerge", "PseudoVMERGE">; 4164 4165//===----------------------------------------------------------------------===// 4166// 12.16. Vector Integer Move Instructions 4167//===----------------------------------------------------------------------===// 4168foreach vti = AllVectors in { 4169 def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$rs1), 4170 VLOpFrag)), 4171 (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX) 4172 $rs1, GPR:$vl, vti.Log2SEW)>; 4173 4174 // vmv.v.x/vmv.v.i are handled in RISCInstrVInstrInfoVVLPatterns.td 4175} 4176 4177//===----------------------------------------------------------------------===// 4178// 13.1. Vector Single-Width Saturating Add and Subtract 4179//===----------------------------------------------------------------------===// 4180defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsaddu", "PseudoVSADDU", AllIntegerVectors>; 4181defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsadd", "PseudoVSADD", AllIntegerVectors>; 4182defm : VPatBinaryV_VV_VX<"int_riscv_vssubu", "PseudoVSSUBU", AllIntegerVectors>; 4183defm : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>; 4184 4185//===----------------------------------------------------------------------===// 4186// 13.2. Vector Single-Width Averaging Add and Subtract 4187//===----------------------------------------------------------------------===// 4188defm : VPatBinaryV_VV_VX<"int_riscv_vaaddu", "PseudoVAADDU", AllIntegerVectors>; 4189defm : VPatBinaryV_VV_VX<"int_riscv_vaadd", "PseudoVAADD", AllIntegerVectors>; 4190defm : VPatBinaryV_VV_VX<"int_riscv_vasubu", "PseudoVASUBU", AllIntegerVectors>; 4191defm : VPatBinaryV_VV_VX<"int_riscv_vasub", "PseudoVASUB", AllIntegerVectors>; 4192 4193//===----------------------------------------------------------------------===// 4194// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation 4195//===----------------------------------------------------------------------===// 4196defm : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", AllIntegerVectors>; 4197 4198//===----------------------------------------------------------------------===// 4199// 13.4. Vector Single-Width Scaling Shift Instructions 4200//===----------------------------------------------------------------------===// 4201defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssrl", "PseudoVSSRL", AllIntegerVectors, 4202 uimm5>; 4203defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssra", "PseudoVSSRA", AllIntegerVectors, 4204 uimm5>; 4205 4206//===----------------------------------------------------------------------===// 4207// 13.5. Vector Narrowing Fixed-Point Clip Instructions 4208//===----------------------------------------------------------------------===// 4209defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclipu", "PseudoVNCLIPU", AllWidenableIntVectors>; 4210defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclip", "PseudoVNCLIP", AllWidenableIntVectors>; 4211 4212} // Predicates = [HasStdExtV] 4213 4214let Predicates = [HasStdExtV, HasStdExtF] in { 4215//===----------------------------------------------------------------------===// 4216// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions 4217//===----------------------------------------------------------------------===// 4218defm : VPatBinaryV_VV_VX<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors>; 4219defm : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>; 4220defm : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>; 4221 4222//===----------------------------------------------------------------------===// 4223// 14.3. Vector Widening Floating-Point Add/Subtract Instructions 4224//===----------------------------------------------------------------------===// 4225defm : VPatBinaryW_VV_VX<"int_riscv_vfwadd", "PseudoVFWADD", AllWidenableFloatVectors>; 4226defm : VPatBinaryW_VV_VX<"int_riscv_vfwsub", "PseudoVFWSUB", AllWidenableFloatVectors>; 4227defm : VPatBinaryW_WV_WX<"int_riscv_vfwadd_w", "PseudoVFWADD", AllWidenableFloatVectors>; 4228defm : VPatBinaryW_WV_WX<"int_riscv_vfwsub_w", "PseudoVFWSUB", AllWidenableFloatVectors>; 4229 4230//===----------------------------------------------------------------------===// 4231// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 4232//===----------------------------------------------------------------------===// 4233defm : VPatBinaryV_VV_VX<"int_riscv_vfmul", "PseudoVFMUL", AllFloatVectors>; 4234defm : VPatBinaryV_VV_VX<"int_riscv_vfdiv", "PseudoVFDIV", AllFloatVectors>; 4235defm : VPatBinaryV_VX<"int_riscv_vfrdiv", "PseudoVFRDIV", AllFloatVectors>; 4236 4237//===----------------------------------------------------------------------===// 4238// 14.5. Vector Widening Floating-Point Multiply 4239//===----------------------------------------------------------------------===// 4240defm : VPatBinaryW_VV_VX<"int_riscv_vfwmul", "PseudoVFWMUL", AllWidenableFloatVectors>; 4241 4242//===----------------------------------------------------------------------===// 4243// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions 4244//===----------------------------------------------------------------------===// 4245defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmacc", "PseudoVFMACC", AllFloatVectors>; 4246defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmacc", "PseudoVFNMACC", AllFloatVectors>; 4247defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsac", "PseudoVFMSAC", AllFloatVectors>; 4248defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsac", "PseudoVFNMSAC", AllFloatVectors>; 4249defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmadd", "PseudoVFMADD", AllFloatVectors>; 4250defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmadd", "PseudoVFNMADD", AllFloatVectors>; 4251defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsub", "PseudoVFMSUB", AllFloatVectors>; 4252defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsub", "PseudoVFNMSUB", AllFloatVectors>; 4253 4254//===----------------------------------------------------------------------===// 4255// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 4256//===----------------------------------------------------------------------===// 4257defm : VPatTernaryW_VV_VX<"int_riscv_vfwmacc", "PseudoVFWMACC", AllWidenableFloatVectors>; 4258defm : VPatTernaryW_VV_VX<"int_riscv_vfwnmacc", "PseudoVFWNMACC", AllWidenableFloatVectors>; 4259defm : VPatTernaryW_VV_VX<"int_riscv_vfwmsac", "PseudoVFWMSAC", AllWidenableFloatVectors>; 4260defm : VPatTernaryW_VV_VX<"int_riscv_vfwnmsac", "PseudoVFWNMSAC", AllWidenableFloatVectors>; 4261 4262//===----------------------------------------------------------------------===// 4263// 14.8. Vector Floating-Point Square-Root Instruction 4264//===----------------------------------------------------------------------===// 4265defm : VPatUnaryV_V<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors>; 4266 4267//===----------------------------------------------------------------------===// 4268// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction 4269//===----------------------------------------------------------------------===// 4270defm : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7", AllFloatVectors>; 4271 4272//===----------------------------------------------------------------------===// 4273// 14.10. Vector Floating-Point Reciprocal Estimate Instruction 4274//===----------------------------------------------------------------------===// 4275defm : VPatUnaryV_V<"int_riscv_vfrec7", "PseudoVFREC7", AllFloatVectors>; 4276 4277//===----------------------------------------------------------------------===// 4278// 14.11. Vector Floating-Point Min/Max Instructions 4279//===----------------------------------------------------------------------===// 4280defm : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN", AllFloatVectors>; 4281defm : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX", AllFloatVectors>; 4282 4283//===----------------------------------------------------------------------===// 4284// 14.12. Vector Floating-Point Sign-Injection Instructions 4285//===----------------------------------------------------------------------===// 4286defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ", AllFloatVectors>; 4287defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN", AllFloatVectors>; 4288defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX", AllFloatVectors>; 4289 4290//===----------------------------------------------------------------------===// 4291// 14.13. Vector Floating-Point Compare Instructions 4292//===----------------------------------------------------------------------===// 4293defm : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ", AllFloatVectors>; 4294defm : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE", AllFloatVectors>; 4295defm : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT", AllFloatVectors>; 4296defm : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE", AllFloatVectors>; 4297defm : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>; 4298defm : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>; 4299defm : VPatBinarySwappedM_VV<"int_riscv_vmfgt", "PseudoVMFLT", AllFloatVectors>; 4300defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE", AllFloatVectors>; 4301 4302//===----------------------------------------------------------------------===// 4303// 14.14. Vector Floating-Point Classify Instruction 4304//===----------------------------------------------------------------------===// 4305defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">; 4306 4307//===----------------------------------------------------------------------===// 4308// 14.15. Vector Floating-Point Merge Instruction 4309//===----------------------------------------------------------------------===// 4310// We can use vmerge.vvm to support vector-vector vfmerge. 4311defm : VPatBinaryV_VM<"int_riscv_vfmerge", "PseudoVMERGE", 4312 /*CarryOut = */0, /*vtilist=*/AllFloatVectors>; 4313defm : VPatBinaryV_XM<"int_riscv_vfmerge", "PseudoVFMERGE", 4314 /*CarryOut = */0, /*vtilist=*/AllFloatVectors>; 4315 4316foreach fvti = AllFloatVectors in { 4317 defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX); 4318 def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$rs2), 4319 (fvti.Scalar (fpimm0)), 4320 (fvti.Mask V0), VLOpFrag)), 4321 (instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 4322} 4323 4324//===----------------------------------------------------------------------===// 4325// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions 4326//===----------------------------------------------------------------------===// 4327defm : VPatConversionVI_VF<"int_riscv_vfcvt_xu_f_v", "PseudoVFCVT_XU_F">; 4328defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_xu_f_v", "PseudoVFCVT_RTZ_XU_F">; 4329defm : VPatConversionVI_VF<"int_riscv_vfcvt_x_f_v", "PseudoVFCVT_X_F">; 4330defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_x_f_v", "PseudoVFCVT_RTZ_X_F">; 4331defm : VPatConversionVF_VI<"int_riscv_vfcvt_f_x_v", "PseudoVFCVT_F_X">; 4332defm : VPatConversionVF_VI<"int_riscv_vfcvt_f_xu_v", "PseudoVFCVT_F_XU">; 4333 4334//===----------------------------------------------------------------------===// 4335// 14.18. Widening Floating-Point/Integer Type-Convert Instructions 4336//===----------------------------------------------------------------------===// 4337defm : VPatConversionWI_VF<"int_riscv_vfwcvt_xu_f_v", "PseudoVFWCVT_XU_F">; 4338defm : VPatConversionWI_VF<"int_riscv_vfwcvt_x_f_v", "PseudoVFWCVT_X_F">; 4339defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_xu_f_v", "PseudoVFWCVT_RTZ_XU_F">; 4340defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_x_f_v", "PseudoVFWCVT_RTZ_X_F">; 4341defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU">; 4342defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X">; 4343defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F">; 4344 4345//===----------------------------------------------------------------------===// 4346// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions 4347//===----------------------------------------------------------------------===// 4348defm : VPatConversionVI_WF<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F">; 4349defm : VPatConversionVI_WF<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F">; 4350defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F">; 4351defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F">; 4352defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_xu_w", "PseudoVFNCVT_F_XU">; 4353defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X">; 4354defm : VPatConversionVF_WF<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F">; 4355defm : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F">; 4356} // Predicates = [HasStdExtV, HasStdExtF] 4357 4358let Predicates = [HasStdExtV] in { 4359//===----------------------------------------------------------------------===// 4360// 15.1. Vector Single-Width Integer Reduction Instructions 4361//===----------------------------------------------------------------------===// 4362defm : VPatReductionV_VS<"int_riscv_vredsum", "PseudoVREDSUM">; 4363defm : VPatReductionV_VS<"int_riscv_vredand", "PseudoVREDAND">; 4364defm : VPatReductionV_VS<"int_riscv_vredor", "PseudoVREDOR">; 4365defm : VPatReductionV_VS<"int_riscv_vredxor", "PseudoVREDXOR">; 4366defm : VPatReductionV_VS<"int_riscv_vredminu", "PseudoVREDMINU">; 4367defm : VPatReductionV_VS<"int_riscv_vredmin", "PseudoVREDMIN">; 4368defm : VPatReductionV_VS<"int_riscv_vredmaxu", "PseudoVREDMAXU">; 4369defm : VPatReductionV_VS<"int_riscv_vredmax", "PseudoVREDMAX">; 4370 4371//===----------------------------------------------------------------------===// 4372// 15.2. Vector Widening Integer Reduction Instructions 4373//===----------------------------------------------------------------------===// 4374defm : VPatReductionW_VS<"int_riscv_vwredsumu", "PseudoVWREDSUMU">; 4375defm : VPatReductionW_VS<"int_riscv_vwredsum", "PseudoVWREDSUM">; 4376} // Predicates = [HasStdExtV] 4377 4378let Predicates = [HasStdExtV, HasStdExtF] in { 4379//===----------------------------------------------------------------------===// 4380// 15.3. Vector Single-Width Floating-Point Reduction Instructions 4381//===----------------------------------------------------------------------===// 4382defm : VPatReductionV_VS<"int_riscv_vfredosum", "PseudoVFREDOSUM", /*IsFloat=*/1>; 4383defm : VPatReductionV_VS<"int_riscv_vfredsum", "PseudoVFREDSUM", /*IsFloat=*/1>; 4384defm : VPatReductionV_VS<"int_riscv_vfredmin", "PseudoVFREDMIN", /*IsFloat=*/1>; 4385defm : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", /*IsFloat=*/1>; 4386 4387//===----------------------------------------------------------------------===// 4388// 15.4. Vector Widening Floating-Point Reduction Instructions 4389//===----------------------------------------------------------------------===// 4390defm : VPatReductionW_VS<"int_riscv_vfwredsum", "PseudoVFWREDSUM", /*IsFloat=*/1>; 4391defm : VPatReductionW_VS<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", /*IsFloat=*/1>; 4392 4393} // Predicates = [HasStdExtV, HasStdExtF] 4394 4395//===----------------------------------------------------------------------===// 4396// 16. Vector Mask Instructions 4397//===----------------------------------------------------------------------===// 4398 4399let Predicates = [HasStdExtV] in { 4400//===----------------------------------------------------------------------===// 4401// 16.1 Vector Mask-Register Logical Instructions 4402//===----------------------------------------------------------------------===// 4403defm : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">; 4404defm : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">; 4405defm : VPatBinaryM_MM<"int_riscv_vmandnot", "PseudoVMANDNOT">; 4406defm : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">; 4407defm : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">; 4408defm : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">; 4409defm : VPatBinaryM_MM<"int_riscv_vmornot", "PseudoVMORNOT">; 4410defm : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">; 4411 4412// pseudo instructions 4413defm : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">; 4414defm : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">; 4415 4416//===----------------------------------------------------------------------===// 4417// 16.2. Vector mask population count vpopc 4418//===----------------------------------------------------------------------===// 4419defm : VPatUnaryS_M<"int_riscv_vpopc", "PseudoVPOPC">; 4420 4421//===----------------------------------------------------------------------===// 4422// 16.3. vfirst find-first-set mask bit 4423//===----------------------------------------------------------------------===// 4424defm : VPatUnaryS_M<"int_riscv_vfirst", "PseudoVFIRST">; 4425 4426//===----------------------------------------------------------------------===// 4427// 16.4. vmsbf.m set-before-first mask bit 4428//===----------------------------------------------------------------------===// 4429defm : VPatUnaryM_M<"int_riscv_vmsbf", "PseudoVMSBF">; 4430 4431//===----------------------------------------------------------------------===// 4432// 16.5. vmsif.m set-including-first mask bit 4433//===----------------------------------------------------------------------===// 4434defm : VPatUnaryM_M<"int_riscv_vmsif", "PseudoVMSIF">; 4435 4436//===----------------------------------------------------------------------===// 4437// 16.6. vmsof.m set-only-first mask bit 4438//===----------------------------------------------------------------------===// 4439defm : VPatUnaryM_M<"int_riscv_vmsof", "PseudoVMSOF">; 4440 4441//===----------------------------------------------------------------------===// 4442// 16.8. Vector Iota Instruction 4443//===----------------------------------------------------------------------===// 4444defm : VPatUnaryV_M<"int_riscv_viota", "PseudoVIOTA">; 4445 4446//===----------------------------------------------------------------------===// 4447// 16.9. Vector Element Index Instruction 4448//===----------------------------------------------------------------------===// 4449defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">; 4450 4451} // Predicates = [HasStdExtV] 4452 4453//===----------------------------------------------------------------------===// 4454// 17. Vector Permutation Instructions 4455//===----------------------------------------------------------------------===// 4456 4457//===----------------------------------------------------------------------===// 4458// 17.1. Integer Scalar Move Instructions 4459//===----------------------------------------------------------------------===// 4460 4461let Predicates = [HasStdExtV] in { 4462foreach vti = AllIntegerVectors in { 4463 def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)), 4464 (!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.Log2SEW)>; 4465 // vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td 4466} 4467} // Predicates = [HasStdExtV] 4468 4469//===----------------------------------------------------------------------===// 4470// 17.2. Floating-Point Scalar Move Instructions 4471//===----------------------------------------------------------------------===// 4472 4473let Predicates = [HasStdExtV, HasStdExtF] in { 4474foreach fvti = AllFloatVectors in { 4475 defvar instr = !cast<Instruction>("PseudoVFMV_"#fvti.ScalarSuffix#"_S_" # 4476 fvti.LMul.MX); 4477 def : Pat<(fvti.Scalar (int_riscv_vfmv_f_s (fvti.Vector fvti.RegClass:$rs2))), 4478 (instr $rs2, fvti.Log2SEW)>; 4479 4480 def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1), 4481 (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), 4482 (!cast<Instruction>("PseudoVFMV_S_"#fvti.ScalarSuffix#"_" # 4483 fvti.LMul.MX) 4484 (fvti.Vector $rs1), 4485 (fvti.Scalar fvti.ScalarRegClass:$rs2), 4486 GPR:$vl, fvti.Log2SEW)>; 4487} 4488} // Predicates = [HasStdExtV, HasStdExtF] 4489 4490//===----------------------------------------------------------------------===// 4491// 17.3. Vector Slide Instructions 4492//===----------------------------------------------------------------------===// 4493let Predicates = [HasStdExtV] in { 4494 defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>; 4495 defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>; 4496 defm : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>; 4497 defm : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>; 4498} // Predicates = [HasStdExtV] 4499 4500let Predicates = [HasStdExtV, HasStdExtF] in { 4501 defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>; 4502 defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>; 4503 defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>; 4504 defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>; 4505} // Predicates = [HasStdExtV, HasStdExtF] 4506 4507//===----------------------------------------------------------------------===// 4508// 17.4. Vector Register Gather Instructions 4509//===----------------------------------------------------------------------===// 4510let Predicates = [HasStdExtV] in { 4511 defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", 4512 AllIntegerVectors, uimm5>; 4513 defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", 4514 /* eew */ 16, AllIntegerVectors>; 4515} // Predicates = [HasStdExtV] 4516 4517let Predicates = [HasStdExtV, HasStdExtF] in { 4518 defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", 4519 AllFloatVectors, uimm5>; 4520 defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", 4521 /* eew */ 16, AllFloatVectors>; 4522} // Predicates = [HasStdExtV, HasStdExtF] 4523 4524//===----------------------------------------------------------------------===// 4525// 17.5. Vector Compress Instruction 4526//===----------------------------------------------------------------------===// 4527let Predicates = [HasStdExtV] in { 4528 defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>; 4529} // Predicates = [HasStdExtV] 4530 4531let Predicates = [HasStdExtV, HasStdExtF] in { 4532 defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; 4533} // Predicates = [HasStdExtV, HasStdExtF] 4534 4535// Include the non-intrinsic ISel patterns 4536include "RISCVInstrInfoVSDPatterns.td" 4537include "RISCVInstrInfoVVLPatterns.td" 4538