1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ 3; RUN: < %s | FileCheck %s 4declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8( 5 <vscale x 1 x i8>, 6 <vscale x 1 x i8>, 7 <vscale x 1 x i8>, 8 i32); 9 10define <vscale x 1 x i8> @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind { 11; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8: 12; CHECK: # %bb.0: # %entry 13; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu 14; CHECK-NEXT: vmadd.vv v8, v9, v10 15; CHECK-NEXT: ret 16entry: 17 %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8( 18 <vscale x 1 x i8> %0, 19 <vscale x 1 x i8> %1, 20 <vscale x 1 x i8> %2, 21 i32 %3) 22 23 ret <vscale x 1 x i8> %a 24} 25 26declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8( 27 <vscale x 1 x i8>, 28 <vscale x 1 x i8>, 29 <vscale x 1 x i8>, 30 <vscale x 1 x i1>, 31 i32); 32 33define <vscale x 1 x i8> @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 34; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: 35; CHECK: # %bb.0: # %entry 36; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu 37; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t 38; CHECK-NEXT: ret 39entry: 40 %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8( 41 <vscale x 1 x i8> %0, 42 <vscale x 1 x i8> %1, 43 <vscale x 1 x i8> %2, 44 <vscale x 1 x i1> %3, 45 i32 %4) 46 47 ret <vscale x 1 x i8> %a 48} 49 50declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8( 51 <vscale x 2 x i8>, 52 <vscale x 2 x i8>, 53 <vscale x 2 x i8>, 54 i32); 55 56define <vscale x 2 x i8> @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind { 57; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8: 58; CHECK: # %bb.0: # %entry 59; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu 60; CHECK-NEXT: vmadd.vv v8, v9, v10 61; CHECK-NEXT: ret 62entry: 63 %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8( 64 <vscale x 2 x i8> %0, 65 <vscale x 2 x i8> %1, 66 <vscale x 2 x i8> %2, 67 i32 %3) 68 69 ret <vscale x 2 x i8> %a 70} 71 72declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8( 73 <vscale x 2 x i8>, 74 <vscale x 2 x i8>, 75 <vscale x 2 x i8>, 76 <vscale x 2 x i1>, 77 i32); 78 79define <vscale x 2 x i8> @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 80; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: 81; CHECK: # %bb.0: # %entry 82; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu 83; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t 84; CHECK-NEXT: ret 85entry: 86 %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8( 87 <vscale x 2 x i8> %0, 88 <vscale x 2 x i8> %1, 89 <vscale x 2 x i8> %2, 90 <vscale x 2 x i1> %3, 91 i32 %4) 92 93 ret <vscale x 2 x i8> %a 94} 95 96declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8( 97 <vscale x 4 x i8>, 98 <vscale x 4 x i8>, 99 <vscale x 4 x i8>, 100 i32); 101 102define <vscale x 4 x i8> @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind { 103; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8: 104; CHECK: # %bb.0: # %entry 105; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu 106; CHECK-NEXT: vmadd.vv v8, v9, v10 107; CHECK-NEXT: ret 108entry: 109 %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8( 110 <vscale x 4 x i8> %0, 111 <vscale x 4 x i8> %1, 112 <vscale x 4 x i8> %2, 113 i32 %3) 114 115 ret <vscale x 4 x i8> %a 116} 117 118declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8( 119 <vscale x 4 x i8>, 120 <vscale x 4 x i8>, 121 <vscale x 4 x i8>, 122 <vscale x 4 x i1>, 123 i32); 124 125define <vscale x 4 x i8> @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 126; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: 127; CHECK: # %bb.0: # %entry 128; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu 129; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t 130; CHECK-NEXT: ret 131entry: 132 %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8( 133 <vscale x 4 x i8> %0, 134 <vscale x 4 x i8> %1, 135 <vscale x 4 x i8> %2, 136 <vscale x 4 x i1> %3, 137 i32 %4) 138 139 ret <vscale x 4 x i8> %a 140} 141 142declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8( 143 <vscale x 8 x i8>, 144 <vscale x 8 x i8>, 145 <vscale x 8 x i8>, 146 i32); 147 148define <vscale x 8 x i8> @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { 149; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8: 150; CHECK: # %bb.0: # %entry 151; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu 152; CHECK-NEXT: vmadd.vv v8, v9, v10 153; CHECK-NEXT: ret 154entry: 155 %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8( 156 <vscale x 8 x i8> %0, 157 <vscale x 8 x i8> %1, 158 <vscale x 8 x i8> %2, 159 i32 %3) 160 161 ret <vscale x 8 x i8> %a 162} 163 164declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8( 165 <vscale x 8 x i8>, 166 <vscale x 8 x i8>, 167 <vscale x 8 x i8>, 168 <vscale x 8 x i1>, 169 i32); 170 171define <vscale x 8 x i8> @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 172; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: 173; CHECK: # %bb.0: # %entry 174; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu 175; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t 176; CHECK-NEXT: ret 177entry: 178 %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8( 179 <vscale x 8 x i8> %0, 180 <vscale x 8 x i8> %1, 181 <vscale x 8 x i8> %2, 182 <vscale x 8 x i1> %3, 183 i32 %4) 184 185 ret <vscale x 8 x i8> %a 186} 187 188declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8( 189 <vscale x 16 x i8>, 190 <vscale x 16 x i8>, 191 <vscale x 16 x i8>, 192 i32); 193 194define <vscale x 16 x i8> @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind { 195; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8: 196; CHECK: # %bb.0: # %entry 197; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu 198; CHECK-NEXT: vmadd.vv v8, v10, v12 199; CHECK-NEXT: ret 200entry: 201 %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8( 202 <vscale x 16 x i8> %0, 203 <vscale x 16 x i8> %1, 204 <vscale x 16 x i8> %2, 205 i32 %3) 206 207 ret <vscale x 16 x i8> %a 208} 209 210declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8( 211 <vscale x 16 x i8>, 212 <vscale x 16 x i8>, 213 <vscale x 16 x i8>, 214 <vscale x 16 x i1>, 215 i32); 216 217define <vscale x 16 x i8> @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 218; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: 219; CHECK: # %bb.0: # %entry 220; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu 221; CHECK-NEXT: vmadd.vv v8, v10, v12, v0.t 222; CHECK-NEXT: ret 223entry: 224 %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8( 225 <vscale x 16 x i8> %0, 226 <vscale x 16 x i8> %1, 227 <vscale x 16 x i8> %2, 228 <vscale x 16 x i1> %3, 229 i32 %4) 230 231 ret <vscale x 16 x i8> %a 232} 233 234declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8( 235 <vscale x 32 x i8>, 236 <vscale x 32 x i8>, 237 <vscale x 32 x i8>, 238 i32); 239 240define <vscale x 32 x i8> @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind { 241; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8: 242; CHECK: # %bb.0: # %entry 243; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu 244; CHECK-NEXT: vmadd.vv v8, v12, v16 245; CHECK-NEXT: ret 246entry: 247 %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8( 248 <vscale x 32 x i8> %0, 249 <vscale x 32 x i8> %1, 250 <vscale x 32 x i8> %2, 251 i32 %3) 252 253 ret <vscale x 32 x i8> %a 254} 255 256declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8( 257 <vscale x 32 x i8>, 258 <vscale x 32 x i8>, 259 <vscale x 32 x i8>, 260 <vscale x 32 x i1>, 261 i32); 262 263define <vscale x 32 x i8> @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { 264; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: 265; CHECK: # %bb.0: # %entry 266; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu 267; CHECK-NEXT: vmadd.vv v8, v12, v16, v0.t 268; CHECK-NEXT: ret 269entry: 270 %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8( 271 <vscale x 32 x i8> %0, 272 <vscale x 32 x i8> %1, 273 <vscale x 32 x i8> %2, 274 <vscale x 32 x i1> %3, 275 i32 %4) 276 277 ret <vscale x 32 x i8> %a 278} 279 280declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16( 281 <vscale x 1 x i16>, 282 <vscale x 1 x i16>, 283 <vscale x 1 x i16>, 284 i32); 285 286define <vscale x 1 x i16> @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind { 287; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16: 288; CHECK: # %bb.0: # %entry 289; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu 290; CHECK-NEXT: vmadd.vv v8, v9, v10 291; CHECK-NEXT: ret 292entry: 293 %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16( 294 <vscale x 1 x i16> %0, 295 <vscale x 1 x i16> %1, 296 <vscale x 1 x i16> %2, 297 i32 %3) 298 299 ret <vscale x 1 x i16> %a 300} 301 302declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16( 303 <vscale x 1 x i16>, 304 <vscale x 1 x i16>, 305 <vscale x 1 x i16>, 306 <vscale x 1 x i1>, 307 i32); 308 309define <vscale x 1 x i16> @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 310; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: 311; CHECK: # %bb.0: # %entry 312; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu 313; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t 314; CHECK-NEXT: ret 315entry: 316 %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16( 317 <vscale x 1 x i16> %0, 318 <vscale x 1 x i16> %1, 319 <vscale x 1 x i16> %2, 320 <vscale x 1 x i1> %3, 321 i32 %4) 322 323 ret <vscale x 1 x i16> %a 324} 325 326declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16( 327 <vscale x 2 x i16>, 328 <vscale x 2 x i16>, 329 <vscale x 2 x i16>, 330 i32); 331 332define <vscale x 2 x i16> @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind { 333; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16: 334; CHECK: # %bb.0: # %entry 335; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu 336; CHECK-NEXT: vmadd.vv v8, v9, v10 337; CHECK-NEXT: ret 338entry: 339 %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16( 340 <vscale x 2 x i16> %0, 341 <vscale x 2 x i16> %1, 342 <vscale x 2 x i16> %2, 343 i32 %3) 344 345 ret <vscale x 2 x i16> %a 346} 347 348declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16( 349 <vscale x 2 x i16>, 350 <vscale x 2 x i16>, 351 <vscale x 2 x i16>, 352 <vscale x 2 x i1>, 353 i32); 354 355define <vscale x 2 x i16> @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 356; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: 357; CHECK: # %bb.0: # %entry 358; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu 359; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t 360; CHECK-NEXT: ret 361entry: 362 %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16( 363 <vscale x 2 x i16> %0, 364 <vscale x 2 x i16> %1, 365 <vscale x 2 x i16> %2, 366 <vscale x 2 x i1> %3, 367 i32 %4) 368 369 ret <vscale x 2 x i16> %a 370} 371 372declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16( 373 <vscale x 4 x i16>, 374 <vscale x 4 x i16>, 375 <vscale x 4 x i16>, 376 i32); 377 378define <vscale x 4 x i16> @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { 379; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16: 380; CHECK: # %bb.0: # %entry 381; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu 382; CHECK-NEXT: vmadd.vv v8, v9, v10 383; CHECK-NEXT: ret 384entry: 385 %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16( 386 <vscale x 4 x i16> %0, 387 <vscale x 4 x i16> %1, 388 <vscale x 4 x i16> %2, 389 i32 %3) 390 391 ret <vscale x 4 x i16> %a 392} 393 394declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16( 395 <vscale x 4 x i16>, 396 <vscale x 4 x i16>, 397 <vscale x 4 x i16>, 398 <vscale x 4 x i1>, 399 i32); 400 401define <vscale x 4 x i16> @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 402; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: 403; CHECK: # %bb.0: # %entry 404; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu 405; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t 406; CHECK-NEXT: ret 407entry: 408 %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16( 409 <vscale x 4 x i16> %0, 410 <vscale x 4 x i16> %1, 411 <vscale x 4 x i16> %2, 412 <vscale x 4 x i1> %3, 413 i32 %4) 414 415 ret <vscale x 4 x i16> %a 416} 417 418declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16( 419 <vscale x 8 x i16>, 420 <vscale x 8 x i16>, 421 <vscale x 8 x i16>, 422 i32); 423 424define <vscale x 8 x i16> @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind { 425; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16: 426; CHECK: # %bb.0: # %entry 427; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu 428; CHECK-NEXT: vmadd.vv v8, v10, v12 429; CHECK-NEXT: ret 430entry: 431 %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16( 432 <vscale x 8 x i16> %0, 433 <vscale x 8 x i16> %1, 434 <vscale x 8 x i16> %2, 435 i32 %3) 436 437 ret <vscale x 8 x i16> %a 438} 439 440declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16( 441 <vscale x 8 x i16>, 442 <vscale x 8 x i16>, 443 <vscale x 8 x i16>, 444 <vscale x 8 x i1>, 445 i32); 446 447define <vscale x 8 x i16> @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 448; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: 449; CHECK: # %bb.0: # %entry 450; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu 451; CHECK-NEXT: vmadd.vv v8, v10, v12, v0.t 452; CHECK-NEXT: ret 453entry: 454 %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16( 455 <vscale x 8 x i16> %0, 456 <vscale x 8 x i16> %1, 457 <vscale x 8 x i16> %2, 458 <vscale x 8 x i1> %3, 459 i32 %4) 460 461 ret <vscale x 8 x i16> %a 462} 463 464declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16( 465 <vscale x 16 x i16>, 466 <vscale x 16 x i16>, 467 <vscale x 16 x i16>, 468 i32); 469 470define <vscale x 16 x i16> @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind { 471; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16: 472; CHECK: # %bb.0: # %entry 473; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu 474; CHECK-NEXT: vmadd.vv v8, v12, v16 475; CHECK-NEXT: ret 476entry: 477 %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16( 478 <vscale x 16 x i16> %0, 479 <vscale x 16 x i16> %1, 480 <vscale x 16 x i16> %2, 481 i32 %3) 482 483 ret <vscale x 16 x i16> %a 484} 485 486declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16( 487 <vscale x 16 x i16>, 488 <vscale x 16 x i16>, 489 <vscale x 16 x i16>, 490 <vscale x 16 x i1>, 491 i32); 492 493define <vscale x 16 x i16> @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 494; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: 495; CHECK: # %bb.0: # %entry 496; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu 497; CHECK-NEXT: vmadd.vv v8, v12, v16, v0.t 498; CHECK-NEXT: ret 499entry: 500 %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16( 501 <vscale x 16 x i16> %0, 502 <vscale x 16 x i16> %1, 503 <vscale x 16 x i16> %2, 504 <vscale x 16 x i1> %3, 505 i32 %4) 506 507 ret <vscale x 16 x i16> %a 508} 509 510declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32( 511 <vscale x 1 x i32>, 512 <vscale x 1 x i32>, 513 <vscale x 1 x i32>, 514 i32); 515 516define <vscale x 1 x i32> @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind { 517; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32: 518; CHECK: # %bb.0: # %entry 519; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu 520; CHECK-NEXT: vmadd.vv v8, v9, v10 521; CHECK-NEXT: ret 522entry: 523 %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32( 524 <vscale x 1 x i32> %0, 525 <vscale x 1 x i32> %1, 526 <vscale x 1 x i32> %2, 527 i32 %3) 528 529 ret <vscale x 1 x i32> %a 530} 531 532declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32( 533 <vscale x 1 x i32>, 534 <vscale x 1 x i32>, 535 <vscale x 1 x i32>, 536 <vscale x 1 x i1>, 537 i32); 538 539define <vscale x 1 x i32> @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 540; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: 541; CHECK: # %bb.0: # %entry 542; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu 543; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t 544; CHECK-NEXT: ret 545entry: 546 %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32( 547 <vscale x 1 x i32> %0, 548 <vscale x 1 x i32> %1, 549 <vscale x 1 x i32> %2, 550 <vscale x 1 x i1> %3, 551 i32 %4) 552 553 ret <vscale x 1 x i32> %a 554} 555 556declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32( 557 <vscale x 2 x i32>, 558 <vscale x 2 x i32>, 559 <vscale x 2 x i32>, 560 i32); 561 562define <vscale x 2 x i32> @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { 563; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32: 564; CHECK: # %bb.0: # %entry 565; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu 566; CHECK-NEXT: vmadd.vv v8, v9, v10 567; CHECK-NEXT: ret 568entry: 569 %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32( 570 <vscale x 2 x i32> %0, 571 <vscale x 2 x i32> %1, 572 <vscale x 2 x i32> %2, 573 i32 %3) 574 575 ret <vscale x 2 x i32> %a 576} 577 578declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32( 579 <vscale x 2 x i32>, 580 <vscale x 2 x i32>, 581 <vscale x 2 x i32>, 582 <vscale x 2 x i1>, 583 i32); 584 585define <vscale x 2 x i32> @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 586; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: 587; CHECK: # %bb.0: # %entry 588; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu 589; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t 590; CHECK-NEXT: ret 591entry: 592 %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32( 593 <vscale x 2 x i32> %0, 594 <vscale x 2 x i32> %1, 595 <vscale x 2 x i32> %2, 596 <vscale x 2 x i1> %3, 597 i32 %4) 598 599 ret <vscale x 2 x i32> %a 600} 601 602declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32( 603 <vscale x 4 x i32>, 604 <vscale x 4 x i32>, 605 <vscale x 4 x i32>, 606 i32); 607 608define <vscale x 4 x i32> @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind { 609; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32: 610; CHECK: # %bb.0: # %entry 611; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu 612; CHECK-NEXT: vmadd.vv v8, v10, v12 613; CHECK-NEXT: ret 614entry: 615 %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32( 616 <vscale x 4 x i32> %0, 617 <vscale x 4 x i32> %1, 618 <vscale x 4 x i32> %2, 619 i32 %3) 620 621 ret <vscale x 4 x i32> %a 622} 623 624declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32( 625 <vscale x 4 x i32>, 626 <vscale x 4 x i32>, 627 <vscale x 4 x i32>, 628 <vscale x 4 x i1>, 629 i32); 630 631define <vscale x 4 x i32> @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 632; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: 633; CHECK: # %bb.0: # %entry 634; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu 635; CHECK-NEXT: vmadd.vv v8, v10, v12, v0.t 636; CHECK-NEXT: ret 637entry: 638 %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32( 639 <vscale x 4 x i32> %0, 640 <vscale x 4 x i32> %1, 641 <vscale x 4 x i32> %2, 642 <vscale x 4 x i1> %3, 643 i32 %4) 644 645 ret <vscale x 4 x i32> %a 646} 647 648declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32( 649 <vscale x 8 x i32>, 650 <vscale x 8 x i32>, 651 <vscale x 8 x i32>, 652 i32); 653 654define <vscale x 8 x i32> @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind { 655; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32: 656; CHECK: # %bb.0: # %entry 657; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu 658; CHECK-NEXT: vmadd.vv v8, v12, v16 659; CHECK-NEXT: ret 660entry: 661 %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32( 662 <vscale x 8 x i32> %0, 663 <vscale x 8 x i32> %1, 664 <vscale x 8 x i32> %2, 665 i32 %3) 666 667 ret <vscale x 8 x i32> %a 668} 669 670declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32( 671 <vscale x 8 x i32>, 672 <vscale x 8 x i32>, 673 <vscale x 8 x i32>, 674 <vscale x 8 x i1>, 675 i32); 676 677define <vscale x 8 x i32> @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 678; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: 679; CHECK: # %bb.0: # %entry 680; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu 681; CHECK-NEXT: vmadd.vv v8, v12, v16, v0.t 682; CHECK-NEXT: ret 683entry: 684 %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32( 685 <vscale x 8 x i32> %0, 686 <vscale x 8 x i32> %1, 687 <vscale x 8 x i32> %2, 688 <vscale x 8 x i1> %3, 689 i32 %4) 690 691 ret <vscale x 8 x i32> %a 692} 693 694declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64( 695 <vscale x 1 x i64>, 696 <vscale x 1 x i64>, 697 <vscale x 1 x i64>, 698 i32); 699 700define <vscale x 1 x i64> @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { 701; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64: 702; CHECK: # %bb.0: # %entry 703; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu 704; CHECK-NEXT: vmadd.vv v8, v9, v10 705; CHECK-NEXT: ret 706entry: 707 %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64( 708 <vscale x 1 x i64> %0, 709 <vscale x 1 x i64> %1, 710 <vscale x 1 x i64> %2, 711 i32 %3) 712 713 ret <vscale x 1 x i64> %a 714} 715 716declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64( 717 <vscale x 1 x i64>, 718 <vscale x 1 x i64>, 719 <vscale x 1 x i64>, 720 <vscale x 1 x i1>, 721 i32); 722 723define <vscale x 1 x i64> @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 724; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: 725; CHECK: # %bb.0: # %entry 726; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu 727; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t 728; CHECK-NEXT: ret 729entry: 730 %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64( 731 <vscale x 1 x i64> %0, 732 <vscale x 1 x i64> %1, 733 <vscale x 1 x i64> %2, 734 <vscale x 1 x i1> %3, 735 i32 %4) 736 737 ret <vscale x 1 x i64> %a 738} 739 740declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64( 741 <vscale x 2 x i64>, 742 <vscale x 2 x i64>, 743 <vscale x 2 x i64>, 744 i32); 745 746define <vscale x 2 x i64> @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind { 747; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64: 748; CHECK: # %bb.0: # %entry 749; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu 750; CHECK-NEXT: vmadd.vv v8, v10, v12 751; CHECK-NEXT: ret 752entry: 753 %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64( 754 <vscale x 2 x i64> %0, 755 <vscale x 2 x i64> %1, 756 <vscale x 2 x i64> %2, 757 i32 %3) 758 759 ret <vscale x 2 x i64> %a 760} 761 762declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64( 763 <vscale x 2 x i64>, 764 <vscale x 2 x i64>, 765 <vscale x 2 x i64>, 766 <vscale x 2 x i1>, 767 i32); 768 769define <vscale x 2 x i64> @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 770; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: 771; CHECK: # %bb.0: # %entry 772; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu 773; CHECK-NEXT: vmadd.vv v8, v10, v12, v0.t 774; CHECK-NEXT: ret 775entry: 776 %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64( 777 <vscale x 2 x i64> %0, 778 <vscale x 2 x i64> %1, 779 <vscale x 2 x i64> %2, 780 <vscale x 2 x i1> %3, 781 i32 %4) 782 783 ret <vscale x 2 x i64> %a 784} 785 786declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64( 787 <vscale x 4 x i64>, 788 <vscale x 4 x i64>, 789 <vscale x 4 x i64>, 790 i32); 791 792define <vscale x 4 x i64> @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind { 793; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64: 794; CHECK: # %bb.0: # %entry 795; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu 796; CHECK-NEXT: vmadd.vv v8, v12, v16 797; CHECK-NEXT: ret 798entry: 799 %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64( 800 <vscale x 4 x i64> %0, 801 <vscale x 4 x i64> %1, 802 <vscale x 4 x i64> %2, 803 i32 %3) 804 805 ret <vscale x 4 x i64> %a 806} 807 808declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64( 809 <vscale x 4 x i64>, 810 <vscale x 4 x i64>, 811 <vscale x 4 x i64>, 812 <vscale x 4 x i1>, 813 i32); 814 815define <vscale x 4 x i64> @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 816; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: 817; CHECK: # %bb.0: # %entry 818; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu 819; CHECK-NEXT: vmadd.vv v8, v12, v16, v0.t 820; CHECK-NEXT: ret 821entry: 822 %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64( 823 <vscale x 4 x i64> %0, 824 <vscale x 4 x i64> %1, 825 <vscale x 4 x i64> %2, 826 <vscale x 4 x i1> %3, 827 i32 %4) 828 829 ret <vscale x 4 x i64> %a 830} 831 832declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8( 833 <vscale x 1 x i8>, 834 i8, 835 <vscale x 1 x i8>, 836 i32); 837 838define <vscale x 1 x i8> @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind { 839; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8: 840; CHECK: # %bb.0: # %entry 841; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu 842; CHECK-NEXT: vmadd.vx v8, a0, v9 843; CHECK-NEXT: ret 844entry: 845 %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8( 846 <vscale x 1 x i8> %0, 847 i8 %1, 848 <vscale x 1 x i8> %2, 849 i32 %3) 850 851 ret <vscale x 1 x i8> %a 852} 853 854declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8( 855 <vscale x 1 x i8>, 856 i8, 857 <vscale x 1 x i8>, 858 <vscale x 1 x i1>, 859 i32); 860 861define <vscale x 1 x i8> @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 862; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8: 863; CHECK: # %bb.0: # %entry 864; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu 865; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t 866; CHECK-NEXT: ret 867entry: 868 %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8( 869 <vscale x 1 x i8> %0, 870 i8 %1, 871 <vscale x 1 x i8> %2, 872 <vscale x 1 x i1> %3, 873 i32 %4) 874 875 ret <vscale x 1 x i8> %a 876} 877 878declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8( 879 <vscale x 2 x i8>, 880 i8, 881 <vscale x 2 x i8>, 882 i32); 883 884define <vscale x 2 x i8> @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind { 885; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8: 886; CHECK: # %bb.0: # %entry 887; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu 888; CHECK-NEXT: vmadd.vx v8, a0, v9 889; CHECK-NEXT: ret 890entry: 891 %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8( 892 <vscale x 2 x i8> %0, 893 i8 %1, 894 <vscale x 2 x i8> %2, 895 i32 %3) 896 897 ret <vscale x 2 x i8> %a 898} 899 900declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8( 901 <vscale x 2 x i8>, 902 i8, 903 <vscale x 2 x i8>, 904 <vscale x 2 x i1>, 905 i32); 906 907define <vscale x 2 x i8> @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 908; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8: 909; CHECK: # %bb.0: # %entry 910; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu 911; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t 912; CHECK-NEXT: ret 913entry: 914 %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8( 915 <vscale x 2 x i8> %0, 916 i8 %1, 917 <vscale x 2 x i8> %2, 918 <vscale x 2 x i1> %3, 919 i32 %4) 920 921 ret <vscale x 2 x i8> %a 922} 923 924declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8( 925 <vscale x 4 x i8>, 926 i8, 927 <vscale x 4 x i8>, 928 i32); 929 930define <vscale x 4 x i8> @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind { 931; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8: 932; CHECK: # %bb.0: # %entry 933; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu 934; CHECK-NEXT: vmadd.vx v8, a0, v9 935; CHECK-NEXT: ret 936entry: 937 %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8( 938 <vscale x 4 x i8> %0, 939 i8 %1, 940 <vscale x 4 x i8> %2, 941 i32 %3) 942 943 ret <vscale x 4 x i8> %a 944} 945 946declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8( 947 <vscale x 4 x i8>, 948 i8, 949 <vscale x 4 x i8>, 950 <vscale x 4 x i1>, 951 i32); 952 953define <vscale x 4 x i8> @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 954; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8: 955; CHECK: # %bb.0: # %entry 956; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu 957; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t 958; CHECK-NEXT: ret 959entry: 960 %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8( 961 <vscale x 4 x i8> %0, 962 i8 %1, 963 <vscale x 4 x i8> %2, 964 <vscale x 4 x i1> %3, 965 i32 %4) 966 967 ret <vscale x 4 x i8> %a 968} 969 970declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8( 971 <vscale x 8 x i8>, 972 i8, 973 <vscale x 8 x i8>, 974 i32); 975 976define <vscale x 8 x i8> @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind { 977; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8: 978; CHECK: # %bb.0: # %entry 979; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu 980; CHECK-NEXT: vmadd.vx v8, a0, v9 981; CHECK-NEXT: ret 982entry: 983 %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8( 984 <vscale x 8 x i8> %0, 985 i8 %1, 986 <vscale x 8 x i8> %2, 987 i32 %3) 988 989 ret <vscale x 8 x i8> %a 990} 991 992declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8( 993 <vscale x 8 x i8>, 994 i8, 995 <vscale x 8 x i8>, 996 <vscale x 8 x i1>, 997 i32); 998 999define <vscale x 8 x i8> @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 1000; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8: 1001; CHECK: # %bb.0: # %entry 1002; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu 1003; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t 1004; CHECK-NEXT: ret 1005entry: 1006 %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8( 1007 <vscale x 8 x i8> %0, 1008 i8 %1, 1009 <vscale x 8 x i8> %2, 1010 <vscale x 8 x i1> %3, 1011 i32 %4) 1012 1013 ret <vscale x 8 x i8> %a 1014} 1015 1016declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8( 1017 <vscale x 16 x i8>, 1018 i8, 1019 <vscale x 16 x i8>, 1020 i32); 1021 1022define <vscale x 16 x i8> @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind { 1023; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8: 1024; CHECK: # %bb.0: # %entry 1025; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu 1026; CHECK-NEXT: vmadd.vx v8, a0, v10 1027; CHECK-NEXT: ret 1028entry: 1029 %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8( 1030 <vscale x 16 x i8> %0, 1031 i8 %1, 1032 <vscale x 16 x i8> %2, 1033 i32 %3) 1034 1035 ret <vscale x 16 x i8> %a 1036} 1037 1038declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8( 1039 <vscale x 16 x i8>, 1040 i8, 1041 <vscale x 16 x i8>, 1042 <vscale x 16 x i1>, 1043 i32); 1044 1045define <vscale x 16 x i8> @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 1046; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8: 1047; CHECK: # %bb.0: # %entry 1048; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu 1049; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t 1050; CHECK-NEXT: ret 1051entry: 1052 %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8( 1053 <vscale x 16 x i8> %0, 1054 i8 %1, 1055 <vscale x 16 x i8> %2, 1056 <vscale x 16 x i1> %3, 1057 i32 %4) 1058 1059 ret <vscale x 16 x i8> %a 1060} 1061 1062declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8( 1063 <vscale x 32 x i8>, 1064 i8, 1065 <vscale x 32 x i8>, 1066 i32); 1067 1068define <vscale x 32 x i8> @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind { 1069; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8: 1070; CHECK: # %bb.0: # %entry 1071; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu 1072; CHECK-NEXT: vmadd.vx v8, a0, v12 1073; CHECK-NEXT: ret 1074entry: 1075 %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8( 1076 <vscale x 32 x i8> %0, 1077 i8 %1, 1078 <vscale x 32 x i8> %2, 1079 i32 %3) 1080 1081 ret <vscale x 32 x i8> %a 1082} 1083 1084declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8( 1085 <vscale x 32 x i8>, 1086 i8, 1087 <vscale x 32 x i8>, 1088 <vscale x 32 x i1>, 1089 i32); 1090 1091define <vscale x 32 x i8> @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { 1092; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8: 1093; CHECK: # %bb.0: # %entry 1094; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu 1095; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t 1096; CHECK-NEXT: ret 1097entry: 1098 %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8( 1099 <vscale x 32 x i8> %0, 1100 i8 %1, 1101 <vscale x 32 x i8> %2, 1102 <vscale x 32 x i1> %3, 1103 i32 %4) 1104 1105 ret <vscale x 32 x i8> %a 1106} 1107 1108declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16( 1109 <vscale x 1 x i16>, 1110 i16, 1111 <vscale x 1 x i16>, 1112 i32); 1113 1114define <vscale x 1 x i16> @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind { 1115; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16: 1116; CHECK: # %bb.0: # %entry 1117; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu 1118; CHECK-NEXT: vmadd.vx v8, a0, v9 1119; CHECK-NEXT: ret 1120entry: 1121 %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16( 1122 <vscale x 1 x i16> %0, 1123 i16 %1, 1124 <vscale x 1 x i16> %2, 1125 i32 %3) 1126 1127 ret <vscale x 1 x i16> %a 1128} 1129 1130declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16( 1131 <vscale x 1 x i16>, 1132 i16, 1133 <vscale x 1 x i16>, 1134 <vscale x 1 x i1>, 1135 i32); 1136 1137define <vscale x 1 x i16> @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 1138; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16: 1139; CHECK: # %bb.0: # %entry 1140; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu 1141; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t 1142; CHECK-NEXT: ret 1143entry: 1144 %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16( 1145 <vscale x 1 x i16> %0, 1146 i16 %1, 1147 <vscale x 1 x i16> %2, 1148 <vscale x 1 x i1> %3, 1149 i32 %4) 1150 1151 ret <vscale x 1 x i16> %a 1152} 1153 1154declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16( 1155 <vscale x 2 x i16>, 1156 i16, 1157 <vscale x 2 x i16>, 1158 i32); 1159 1160define <vscale x 2 x i16> @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind { 1161; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16: 1162; CHECK: # %bb.0: # %entry 1163; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu 1164; CHECK-NEXT: vmadd.vx v8, a0, v9 1165; CHECK-NEXT: ret 1166entry: 1167 %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16( 1168 <vscale x 2 x i16> %0, 1169 i16 %1, 1170 <vscale x 2 x i16> %2, 1171 i32 %3) 1172 1173 ret <vscale x 2 x i16> %a 1174} 1175 1176declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16( 1177 <vscale x 2 x i16>, 1178 i16, 1179 <vscale x 2 x i16>, 1180 <vscale x 2 x i1>, 1181 i32); 1182 1183define <vscale x 2 x i16> @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 1184; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16: 1185; CHECK: # %bb.0: # %entry 1186; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu 1187; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t 1188; CHECK-NEXT: ret 1189entry: 1190 %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16( 1191 <vscale x 2 x i16> %0, 1192 i16 %1, 1193 <vscale x 2 x i16> %2, 1194 <vscale x 2 x i1> %3, 1195 i32 %4) 1196 1197 ret <vscale x 2 x i16> %a 1198} 1199 1200declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16( 1201 <vscale x 4 x i16>, 1202 i16, 1203 <vscale x 4 x i16>, 1204 i32); 1205 1206define <vscale x 4 x i16> @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind { 1207; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16: 1208; CHECK: # %bb.0: # %entry 1209; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu 1210; CHECK-NEXT: vmadd.vx v8, a0, v9 1211; CHECK-NEXT: ret 1212entry: 1213 %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16( 1214 <vscale x 4 x i16> %0, 1215 i16 %1, 1216 <vscale x 4 x i16> %2, 1217 i32 %3) 1218 1219 ret <vscale x 4 x i16> %a 1220} 1221 1222declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16( 1223 <vscale x 4 x i16>, 1224 i16, 1225 <vscale x 4 x i16>, 1226 <vscale x 4 x i1>, 1227 i32); 1228 1229define <vscale x 4 x i16> @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 1230; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16: 1231; CHECK: # %bb.0: # %entry 1232; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu 1233; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t 1234; CHECK-NEXT: ret 1235entry: 1236 %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16( 1237 <vscale x 4 x i16> %0, 1238 i16 %1, 1239 <vscale x 4 x i16> %2, 1240 <vscale x 4 x i1> %3, 1241 i32 %4) 1242 1243 ret <vscale x 4 x i16> %a 1244} 1245 1246declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16( 1247 <vscale x 8 x i16>, 1248 i16, 1249 <vscale x 8 x i16>, 1250 i32); 1251 1252define <vscale x 8 x i16> @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind { 1253; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16: 1254; CHECK: # %bb.0: # %entry 1255; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu 1256; CHECK-NEXT: vmadd.vx v8, a0, v10 1257; CHECK-NEXT: ret 1258entry: 1259 %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16( 1260 <vscale x 8 x i16> %0, 1261 i16 %1, 1262 <vscale x 8 x i16> %2, 1263 i32 %3) 1264 1265 ret <vscale x 8 x i16> %a 1266} 1267 1268declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16( 1269 <vscale x 8 x i16>, 1270 i16, 1271 <vscale x 8 x i16>, 1272 <vscale x 8 x i1>, 1273 i32); 1274 1275define <vscale x 8 x i16> @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 1276; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16: 1277; CHECK: # %bb.0: # %entry 1278; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu 1279; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t 1280; CHECK-NEXT: ret 1281entry: 1282 %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16( 1283 <vscale x 8 x i16> %0, 1284 i16 %1, 1285 <vscale x 8 x i16> %2, 1286 <vscale x 8 x i1> %3, 1287 i32 %4) 1288 1289 ret <vscale x 8 x i16> %a 1290} 1291 1292declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16( 1293 <vscale x 16 x i16>, 1294 i16, 1295 <vscale x 16 x i16>, 1296 i32); 1297 1298define <vscale x 16 x i16> @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind { 1299; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16: 1300; CHECK: # %bb.0: # %entry 1301; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu 1302; CHECK-NEXT: vmadd.vx v8, a0, v12 1303; CHECK-NEXT: ret 1304entry: 1305 %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16( 1306 <vscale x 16 x i16> %0, 1307 i16 %1, 1308 <vscale x 16 x i16> %2, 1309 i32 %3) 1310 1311 ret <vscale x 16 x i16> %a 1312} 1313 1314declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16( 1315 <vscale x 16 x i16>, 1316 i16, 1317 <vscale x 16 x i16>, 1318 <vscale x 16 x i1>, 1319 i32); 1320 1321define <vscale x 16 x i16> @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 1322; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16: 1323; CHECK: # %bb.0: # %entry 1324; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu 1325; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t 1326; CHECK-NEXT: ret 1327entry: 1328 %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16( 1329 <vscale x 16 x i16> %0, 1330 i16 %1, 1331 <vscale x 16 x i16> %2, 1332 <vscale x 16 x i1> %3, 1333 i32 %4) 1334 1335 ret <vscale x 16 x i16> %a 1336} 1337 1338declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32( 1339 <vscale x 1 x i32>, 1340 i32, 1341 <vscale x 1 x i32>, 1342 i32); 1343 1344define <vscale x 1 x i32> @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind { 1345; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32: 1346; CHECK: # %bb.0: # %entry 1347; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 1348; CHECK-NEXT: vmadd.vx v8, a0, v9 1349; CHECK-NEXT: ret 1350entry: 1351 %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32( 1352 <vscale x 1 x i32> %0, 1353 i32 %1, 1354 <vscale x 1 x i32> %2, 1355 i32 %3) 1356 1357 ret <vscale x 1 x i32> %a 1358} 1359 1360declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32( 1361 <vscale x 1 x i32>, 1362 i32, 1363 <vscale x 1 x i32>, 1364 <vscale x 1 x i1>, 1365 i32); 1366 1367define <vscale x 1 x i32> @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 1368; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32: 1369; CHECK: # %bb.0: # %entry 1370; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 1371; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t 1372; CHECK-NEXT: ret 1373entry: 1374 %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32( 1375 <vscale x 1 x i32> %0, 1376 i32 %1, 1377 <vscale x 1 x i32> %2, 1378 <vscale x 1 x i1> %3, 1379 i32 %4) 1380 1381 ret <vscale x 1 x i32> %a 1382} 1383 1384declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32( 1385 <vscale x 2 x i32>, 1386 i32, 1387 <vscale x 2 x i32>, 1388 i32); 1389 1390define <vscale x 2 x i32> @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind { 1391; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32: 1392; CHECK: # %bb.0: # %entry 1393; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 1394; CHECK-NEXT: vmadd.vx v8, a0, v9 1395; CHECK-NEXT: ret 1396entry: 1397 %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32( 1398 <vscale x 2 x i32> %0, 1399 i32 %1, 1400 <vscale x 2 x i32> %2, 1401 i32 %3) 1402 1403 ret <vscale x 2 x i32> %a 1404} 1405 1406declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32( 1407 <vscale x 2 x i32>, 1408 i32, 1409 <vscale x 2 x i32>, 1410 <vscale x 2 x i1>, 1411 i32); 1412 1413define <vscale x 2 x i32> @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 1414; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32: 1415; CHECK: # %bb.0: # %entry 1416; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 1417; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t 1418; CHECK-NEXT: ret 1419entry: 1420 %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32( 1421 <vscale x 2 x i32> %0, 1422 i32 %1, 1423 <vscale x 2 x i32> %2, 1424 <vscale x 2 x i1> %3, 1425 i32 %4) 1426 1427 ret <vscale x 2 x i32> %a 1428} 1429 1430declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32( 1431 <vscale x 4 x i32>, 1432 i32, 1433 <vscale x 4 x i32>, 1434 i32); 1435 1436define <vscale x 4 x i32> @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind { 1437; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32: 1438; CHECK: # %bb.0: # %entry 1439; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 1440; CHECK-NEXT: vmadd.vx v8, a0, v10 1441; CHECK-NEXT: ret 1442entry: 1443 %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32( 1444 <vscale x 4 x i32> %0, 1445 i32 %1, 1446 <vscale x 4 x i32> %2, 1447 i32 %3) 1448 1449 ret <vscale x 4 x i32> %a 1450} 1451 1452declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32( 1453 <vscale x 4 x i32>, 1454 i32, 1455 <vscale x 4 x i32>, 1456 <vscale x 4 x i1>, 1457 i32); 1458 1459define <vscale x 4 x i32> @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 1460; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32: 1461; CHECK: # %bb.0: # %entry 1462; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 1463; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t 1464; CHECK-NEXT: ret 1465entry: 1466 %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32( 1467 <vscale x 4 x i32> %0, 1468 i32 %1, 1469 <vscale x 4 x i32> %2, 1470 <vscale x 4 x i1> %3, 1471 i32 %4) 1472 1473 ret <vscale x 4 x i32> %a 1474} 1475 1476declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32( 1477 <vscale x 8 x i32>, 1478 i32, 1479 <vscale x 8 x i32>, 1480 i32); 1481 1482define <vscale x 8 x i32> @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind { 1483; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32: 1484; CHECK: # %bb.0: # %entry 1485; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 1486; CHECK-NEXT: vmadd.vx v8, a0, v12 1487; CHECK-NEXT: ret 1488entry: 1489 %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32( 1490 <vscale x 8 x i32> %0, 1491 i32 %1, 1492 <vscale x 8 x i32> %2, 1493 i32 %3) 1494 1495 ret <vscale x 8 x i32> %a 1496} 1497 1498declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32( 1499 <vscale x 8 x i32>, 1500 i32, 1501 <vscale x 8 x i32>, 1502 <vscale x 8 x i1>, 1503 i32); 1504 1505define <vscale x 8 x i32> @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 1506; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32: 1507; CHECK: # %bb.0: # %entry 1508; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 1509; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t 1510; CHECK-NEXT: ret 1511entry: 1512 %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32( 1513 <vscale x 8 x i32> %0, 1514 i32 %1, 1515 <vscale x 8 x i32> %2, 1516 <vscale x 8 x i1> %3, 1517 i32 %4) 1518 1519 ret <vscale x 8 x i32> %a 1520} 1521 1522declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64( 1523 <vscale x 1 x i64>, 1524 i64, 1525 <vscale x 1 x i64>, 1526 i32); 1527 1528define <vscale x 1 x i64> @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind { 1529; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64: 1530; CHECK: # %bb.0: # %entry 1531; CHECK-NEXT: addi sp, sp, -16 1532; CHECK-NEXT: sw a1, 12(sp) 1533; CHECK-NEXT: sw a0, 8(sp) 1534; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu 1535; CHECK-NEXT: addi a0, sp, 8 1536; CHECK-NEXT: vlse64.v v25, (a0), zero 1537; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu 1538; CHECK-NEXT: vmadd.vv v8, v25, v9 1539; CHECK-NEXT: addi sp, sp, 16 1540; CHECK-NEXT: ret 1541entry: 1542 %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64( 1543 <vscale x 1 x i64> %0, 1544 i64 %1, 1545 <vscale x 1 x i64> %2, 1546 i32 %3) 1547 1548 ret <vscale x 1 x i64> %a 1549} 1550 1551declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64( 1552 <vscale x 1 x i64>, 1553 i64, 1554 <vscale x 1 x i64>, 1555 <vscale x 1 x i1>, 1556 i32); 1557 1558define <vscale x 1 x i64> @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 1559; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64: 1560; CHECK: # %bb.0: # %entry 1561; CHECK-NEXT: addi sp, sp, -16 1562; CHECK-NEXT: sw a1, 12(sp) 1563; CHECK-NEXT: sw a0, 8(sp) 1564; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu 1565; CHECK-NEXT: addi a0, sp, 8 1566; CHECK-NEXT: vlse64.v v25, (a0), zero 1567; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu 1568; CHECK-NEXT: vmadd.vv v8, v25, v9, v0.t 1569; CHECK-NEXT: addi sp, sp, 16 1570; CHECK-NEXT: ret 1571entry: 1572 %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64( 1573 <vscale x 1 x i64> %0, 1574 i64 %1, 1575 <vscale x 1 x i64> %2, 1576 <vscale x 1 x i1> %3, 1577 i32 %4) 1578 1579 ret <vscale x 1 x i64> %a 1580} 1581 1582declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64( 1583 <vscale x 2 x i64>, 1584 i64, 1585 <vscale x 2 x i64>, 1586 i32); 1587 1588define <vscale x 2 x i64> @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind { 1589; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64: 1590; CHECK: # %bb.0: # %entry 1591; CHECK-NEXT: addi sp, sp, -16 1592; CHECK-NEXT: sw a1, 12(sp) 1593; CHECK-NEXT: sw a0, 8(sp) 1594; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu 1595; CHECK-NEXT: addi a0, sp, 8 1596; CHECK-NEXT: vlse64.v v26, (a0), zero 1597; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu 1598; CHECK-NEXT: vmadd.vv v8, v26, v10 1599; CHECK-NEXT: addi sp, sp, 16 1600; CHECK-NEXT: ret 1601entry: 1602 %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64( 1603 <vscale x 2 x i64> %0, 1604 i64 %1, 1605 <vscale x 2 x i64> %2, 1606 i32 %3) 1607 1608 ret <vscale x 2 x i64> %a 1609} 1610 1611declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64( 1612 <vscale x 2 x i64>, 1613 i64, 1614 <vscale x 2 x i64>, 1615 <vscale x 2 x i1>, 1616 i32); 1617 1618define <vscale x 2 x i64> @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 1619; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64: 1620; CHECK: # %bb.0: # %entry 1621; CHECK-NEXT: addi sp, sp, -16 1622; CHECK-NEXT: sw a1, 12(sp) 1623; CHECK-NEXT: sw a0, 8(sp) 1624; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu 1625; CHECK-NEXT: addi a0, sp, 8 1626; CHECK-NEXT: vlse64.v v26, (a0), zero 1627; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu 1628; CHECK-NEXT: vmadd.vv v8, v26, v10, v0.t 1629; CHECK-NEXT: addi sp, sp, 16 1630; CHECK-NEXT: ret 1631entry: 1632 %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64( 1633 <vscale x 2 x i64> %0, 1634 i64 %1, 1635 <vscale x 2 x i64> %2, 1636 <vscale x 2 x i1> %3, 1637 i32 %4) 1638 1639 ret <vscale x 2 x i64> %a 1640} 1641 1642declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64( 1643 <vscale x 4 x i64>, 1644 i64, 1645 <vscale x 4 x i64>, 1646 i32); 1647 1648define <vscale x 4 x i64> @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind { 1649; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64: 1650; CHECK: # %bb.0: # %entry 1651; CHECK-NEXT: addi sp, sp, -16 1652; CHECK-NEXT: sw a1, 12(sp) 1653; CHECK-NEXT: sw a0, 8(sp) 1654; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu 1655; CHECK-NEXT: addi a0, sp, 8 1656; CHECK-NEXT: vlse64.v v28, (a0), zero 1657; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu 1658; CHECK-NEXT: vmadd.vv v8, v28, v12 1659; CHECK-NEXT: addi sp, sp, 16 1660; CHECK-NEXT: ret 1661entry: 1662 %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64( 1663 <vscale x 4 x i64> %0, 1664 i64 %1, 1665 <vscale x 4 x i64> %2, 1666 i32 %3) 1667 1668 ret <vscale x 4 x i64> %a 1669} 1670 1671declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64( 1672 <vscale x 4 x i64>, 1673 i64, 1674 <vscale x 4 x i64>, 1675 <vscale x 4 x i1>, 1676 i32); 1677 1678define <vscale x 4 x i64> @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 1679; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64: 1680; CHECK: # %bb.0: # %entry 1681; CHECK-NEXT: addi sp, sp, -16 1682; CHECK-NEXT: sw a1, 12(sp) 1683; CHECK-NEXT: sw a0, 8(sp) 1684; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu 1685; CHECK-NEXT: addi a0, sp, 8 1686; CHECK-NEXT: vlse64.v v28, (a0), zero 1687; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu 1688; CHECK-NEXT: vmadd.vv v8, v28, v12, v0.t 1689; CHECK-NEXT: addi sp, sp, 16 1690; CHECK-NEXT: ret 1691entry: 1692 %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64( 1693 <vscale x 4 x i64> %0, 1694 i64 %1, 1695 <vscale x 4 x i64> %2, 1696 <vscale x 4 x i1> %3, 1697 i32 %4) 1698 1699 ret <vscale x 4 x i64> %a 1700} 1701