1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ 3; RUN: < %s | FileCheck %s 4declare <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8( 5 <vscale x 1 x i8>, 6 i8, 7 i64); 8 9define <vscale x 1 x i8> @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind { 10; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8: 11; CHECK: # %bb.0: # %entry 12; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu 13; CHECK-NEXT: vslide1up.vx v25, v8, a0 14; CHECK-NEXT: vmv1r.v v8, v25 15; CHECK-NEXT: ret 16entry: 17 %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8( 18 <vscale x 1 x i8> %0, 19 i8 %1, 20 i64 %2) 21 22 ret <vscale x 1 x i8> %a 23} 24 25declare <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8( 26 <vscale x 1 x i8>, 27 <vscale x 1 x i8>, 28 i8, 29 <vscale x 1 x i1>, 30 i64); 31 32define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 33; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: 34; CHECK: # %bb.0: # %entry 35; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu 36; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 37; CHECK-NEXT: ret 38entry: 39 %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8( 40 <vscale x 1 x i8> %0, 41 <vscale x 1 x i8> %1, 42 i8 %2, 43 <vscale x 1 x i1> %3, 44 i64 %4) 45 46 ret <vscale x 1 x i8> %a 47} 48 49declare <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8( 50 <vscale x 2 x i8>, 51 i8, 52 i64); 53 54define <vscale x 2 x i8> @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind { 55; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8: 56; CHECK: # %bb.0: # %entry 57; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu 58; CHECK-NEXT: vslide1up.vx v25, v8, a0 59; CHECK-NEXT: vmv1r.v v8, v25 60; CHECK-NEXT: ret 61entry: 62 %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8( 63 <vscale x 2 x i8> %0, 64 i8 %1, 65 i64 %2) 66 67 ret <vscale x 2 x i8> %a 68} 69 70declare <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8( 71 <vscale x 2 x i8>, 72 <vscale x 2 x i8>, 73 i8, 74 <vscale x 2 x i1>, 75 i64); 76 77define <vscale x 2 x i8> @intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 78; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8: 79; CHECK: # %bb.0: # %entry 80; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu 81; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 82; CHECK-NEXT: ret 83entry: 84 %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8( 85 <vscale x 2 x i8> %0, 86 <vscale x 2 x i8> %1, 87 i8 %2, 88 <vscale x 2 x i1> %3, 89 i64 %4) 90 91 ret <vscale x 2 x i8> %a 92} 93 94declare <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8( 95 <vscale x 4 x i8>, 96 i8, 97 i64); 98 99define <vscale x 4 x i8> @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind { 100; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8: 101; CHECK: # %bb.0: # %entry 102; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu 103; CHECK-NEXT: vslide1up.vx v25, v8, a0 104; CHECK-NEXT: vmv1r.v v8, v25 105; CHECK-NEXT: ret 106entry: 107 %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8( 108 <vscale x 4 x i8> %0, 109 i8 %1, 110 i64 %2) 111 112 ret <vscale x 4 x i8> %a 113} 114 115declare <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8( 116 <vscale x 4 x i8>, 117 <vscale x 4 x i8>, 118 i8, 119 <vscale x 4 x i1>, 120 i64); 121 122define <vscale x 4 x i8> @intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 123; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8: 124; CHECK: # %bb.0: # %entry 125; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu 126; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 127; CHECK-NEXT: ret 128entry: 129 %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8( 130 <vscale x 4 x i8> %0, 131 <vscale x 4 x i8> %1, 132 i8 %2, 133 <vscale x 4 x i1> %3, 134 i64 %4) 135 136 ret <vscale x 4 x i8> %a 137} 138 139declare <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8( 140 <vscale x 8 x i8>, 141 i8, 142 i64); 143 144define <vscale x 8 x i8> @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind { 145; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8: 146; CHECK: # %bb.0: # %entry 147; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu 148; CHECK-NEXT: vslide1up.vx v25, v8, a0 149; CHECK-NEXT: vmv1r.v v8, v25 150; CHECK-NEXT: ret 151entry: 152 %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8( 153 <vscale x 8 x i8> %0, 154 i8 %1, 155 i64 %2) 156 157 ret <vscale x 8 x i8> %a 158} 159 160declare <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8( 161 <vscale x 8 x i8>, 162 <vscale x 8 x i8>, 163 i8, 164 <vscale x 8 x i1>, 165 i64); 166 167define <vscale x 8 x i8> @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 168; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8: 169; CHECK: # %bb.0: # %entry 170; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu 171; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 172; CHECK-NEXT: ret 173entry: 174 %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8( 175 <vscale x 8 x i8> %0, 176 <vscale x 8 x i8> %1, 177 i8 %2, 178 <vscale x 8 x i1> %3, 179 i64 %4) 180 181 ret <vscale x 8 x i8> %a 182} 183 184declare <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8( 185 <vscale x 16 x i8>, 186 i8, 187 i64); 188 189define <vscale x 16 x i8> @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind { 190; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8: 191; CHECK: # %bb.0: # %entry 192; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu 193; CHECK-NEXT: vslide1up.vx v26, v8, a0 194; CHECK-NEXT: vmv2r.v v8, v26 195; CHECK-NEXT: ret 196entry: 197 %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8( 198 <vscale x 16 x i8> %0, 199 i8 %1, 200 i64 %2) 201 202 ret <vscale x 16 x i8> %a 203} 204 205declare <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8( 206 <vscale x 16 x i8>, 207 <vscale x 16 x i8>, 208 i8, 209 <vscale x 16 x i1>, 210 i64); 211 212define <vscale x 16 x i8> @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 213; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8: 214; CHECK: # %bb.0: # %entry 215; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu 216; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t 217; CHECK-NEXT: ret 218entry: 219 %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8( 220 <vscale x 16 x i8> %0, 221 <vscale x 16 x i8> %1, 222 i8 %2, 223 <vscale x 16 x i1> %3, 224 i64 %4) 225 226 ret <vscale x 16 x i8> %a 227} 228 229declare <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8( 230 <vscale x 32 x i8>, 231 i8, 232 i64); 233 234define <vscale x 32 x i8> @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind { 235; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8: 236; CHECK: # %bb.0: # %entry 237; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu 238; CHECK-NEXT: vslide1up.vx v28, v8, a0 239; CHECK-NEXT: vmv4r.v v8, v28 240; CHECK-NEXT: ret 241entry: 242 %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8( 243 <vscale x 32 x i8> %0, 244 i8 %1, 245 i64 %2) 246 247 ret <vscale x 32 x i8> %a 248} 249 250declare <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8( 251 <vscale x 32 x i8>, 252 <vscale x 32 x i8>, 253 i8, 254 <vscale x 32 x i1>, 255 i64); 256 257define <vscale x 32 x i8> @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind { 258; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8: 259; CHECK: # %bb.0: # %entry 260; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu 261; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t 262; CHECK-NEXT: ret 263entry: 264 %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8( 265 <vscale x 32 x i8> %0, 266 <vscale x 32 x i8> %1, 267 i8 %2, 268 <vscale x 32 x i1> %3, 269 i64 %4) 270 271 ret <vscale x 32 x i8> %a 272} 273 274declare <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8( 275 <vscale x 64 x i8>, 276 i8, 277 i64); 278 279define <vscale x 64 x i8> @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind { 280; CHECK-LABEL: intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8: 281; CHECK: # %bb.0: # %entry 282; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 283; CHECK-NEXT: vslide1up.vx v16, v8, a0 284; CHECK-NEXT: vmv8r.v v8, v16 285; CHECK-NEXT: ret 286entry: 287 %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8( 288 <vscale x 64 x i8> %0, 289 i8 %1, 290 i64 %2) 291 292 ret <vscale x 64 x i8> %a 293} 294 295declare <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8( 296 <vscale x 64 x i8>, 297 <vscale x 64 x i8>, 298 i8, 299 <vscale x 64 x i1>, 300 i64); 301 302define <vscale x 64 x i8> @intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind { 303; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8: 304; CHECK: # %bb.0: # %entry 305; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu 306; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t 307; CHECK-NEXT: ret 308entry: 309 %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8( 310 <vscale x 64 x i8> %0, 311 <vscale x 64 x i8> %1, 312 i8 %2, 313 <vscale x 64 x i1> %3, 314 i64 %4) 315 316 ret <vscale x 64 x i8> %a 317} 318 319declare <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16( 320 <vscale x 1 x i16>, 321 i16, 322 i64); 323 324define <vscale x 1 x i16> @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind { 325; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16: 326; CHECK: # %bb.0: # %entry 327; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu 328; CHECK-NEXT: vslide1up.vx v25, v8, a0 329; CHECK-NEXT: vmv1r.v v8, v25 330; CHECK-NEXT: ret 331entry: 332 %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16( 333 <vscale x 1 x i16> %0, 334 i16 %1, 335 i64 %2) 336 337 ret <vscale x 1 x i16> %a 338} 339 340declare <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16( 341 <vscale x 1 x i16>, 342 <vscale x 1 x i16>, 343 i16, 344 <vscale x 1 x i1>, 345 i64); 346 347define <vscale x 1 x i16> @intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 348; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16: 349; CHECK: # %bb.0: # %entry 350; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu 351; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 352; CHECK-NEXT: ret 353entry: 354 %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16( 355 <vscale x 1 x i16> %0, 356 <vscale x 1 x i16> %1, 357 i16 %2, 358 <vscale x 1 x i1> %3, 359 i64 %4) 360 361 ret <vscale x 1 x i16> %a 362} 363 364declare <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16( 365 <vscale x 2 x i16>, 366 i16, 367 i64); 368 369define <vscale x 2 x i16> @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind { 370; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16: 371; CHECK: # %bb.0: # %entry 372; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu 373; CHECK-NEXT: vslide1up.vx v25, v8, a0 374; CHECK-NEXT: vmv1r.v v8, v25 375; CHECK-NEXT: ret 376entry: 377 %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16( 378 <vscale x 2 x i16> %0, 379 i16 %1, 380 i64 %2) 381 382 ret <vscale x 2 x i16> %a 383} 384 385declare <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16( 386 <vscale x 2 x i16>, 387 <vscale x 2 x i16>, 388 i16, 389 <vscale x 2 x i1>, 390 i64); 391 392define <vscale x 2 x i16> @intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 393; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16: 394; CHECK: # %bb.0: # %entry 395; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu 396; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 397; CHECK-NEXT: ret 398entry: 399 %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16( 400 <vscale x 2 x i16> %0, 401 <vscale x 2 x i16> %1, 402 i16 %2, 403 <vscale x 2 x i1> %3, 404 i64 %4) 405 406 ret <vscale x 2 x i16> %a 407} 408 409declare <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16( 410 <vscale x 4 x i16>, 411 i16, 412 i64); 413 414define <vscale x 4 x i16> @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind { 415; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16: 416; CHECK: # %bb.0: # %entry 417; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu 418; CHECK-NEXT: vslide1up.vx v25, v8, a0 419; CHECK-NEXT: vmv1r.v v8, v25 420; CHECK-NEXT: ret 421entry: 422 %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16( 423 <vscale x 4 x i16> %0, 424 i16 %1, 425 i64 %2) 426 427 ret <vscale x 4 x i16> %a 428} 429 430declare <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16( 431 <vscale x 4 x i16>, 432 <vscale x 4 x i16>, 433 i16, 434 <vscale x 4 x i1>, 435 i64); 436 437define <vscale x 4 x i16> @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 438; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16: 439; CHECK: # %bb.0: # %entry 440; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu 441; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 442; CHECK-NEXT: ret 443entry: 444 %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16( 445 <vscale x 4 x i16> %0, 446 <vscale x 4 x i16> %1, 447 i16 %2, 448 <vscale x 4 x i1> %3, 449 i64 %4) 450 451 ret <vscale x 4 x i16> %a 452} 453 454declare <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16( 455 <vscale x 8 x i16>, 456 i16, 457 i64); 458 459define <vscale x 8 x i16> @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind { 460; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16: 461; CHECK: # %bb.0: # %entry 462; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu 463; CHECK-NEXT: vslide1up.vx v26, v8, a0 464; CHECK-NEXT: vmv2r.v v8, v26 465; CHECK-NEXT: ret 466entry: 467 %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16( 468 <vscale x 8 x i16> %0, 469 i16 %1, 470 i64 %2) 471 472 ret <vscale x 8 x i16> %a 473} 474 475declare <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16( 476 <vscale x 8 x i16>, 477 <vscale x 8 x i16>, 478 i16, 479 <vscale x 8 x i1>, 480 i64); 481 482define <vscale x 8 x i16> @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 483; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16: 484; CHECK: # %bb.0: # %entry 485; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu 486; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t 487; CHECK-NEXT: ret 488entry: 489 %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16( 490 <vscale x 8 x i16> %0, 491 <vscale x 8 x i16> %1, 492 i16 %2, 493 <vscale x 8 x i1> %3, 494 i64 %4) 495 496 ret <vscale x 8 x i16> %a 497} 498 499declare <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16( 500 <vscale x 16 x i16>, 501 i16, 502 i64); 503 504define <vscale x 16 x i16> @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind { 505; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16: 506; CHECK: # %bb.0: # %entry 507; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 508; CHECK-NEXT: vslide1up.vx v28, v8, a0 509; CHECK-NEXT: vmv4r.v v8, v28 510; CHECK-NEXT: ret 511entry: 512 %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16( 513 <vscale x 16 x i16> %0, 514 i16 %1, 515 i64 %2) 516 517 ret <vscale x 16 x i16> %a 518} 519 520declare <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16( 521 <vscale x 16 x i16>, 522 <vscale x 16 x i16>, 523 i16, 524 <vscale x 16 x i1>, 525 i64); 526 527define <vscale x 16 x i16> @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 528; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16: 529; CHECK: # %bb.0: # %entry 530; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu 531; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t 532; CHECK-NEXT: ret 533entry: 534 %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16( 535 <vscale x 16 x i16> %0, 536 <vscale x 16 x i16> %1, 537 i16 %2, 538 <vscale x 16 x i1> %3, 539 i64 %4) 540 541 ret <vscale x 16 x i16> %a 542} 543 544declare <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16( 545 <vscale x 32 x i16>, 546 i16, 547 i64); 548 549define <vscale x 32 x i16> @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind { 550; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16: 551; CHECK: # %bb.0: # %entry 552; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 553; CHECK-NEXT: vslide1up.vx v16, v8, a0 554; CHECK-NEXT: vmv8r.v v8, v16 555; CHECK-NEXT: ret 556entry: 557 %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16( 558 <vscale x 32 x i16> %0, 559 i16 %1, 560 i64 %2) 561 562 ret <vscale x 32 x i16> %a 563} 564 565declare <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16( 566 <vscale x 32 x i16>, 567 <vscale x 32 x i16>, 568 i16, 569 <vscale x 32 x i1>, 570 i64); 571 572define <vscale x 32 x i16> @intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind { 573; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16: 574; CHECK: # %bb.0: # %entry 575; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu 576; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t 577; CHECK-NEXT: ret 578entry: 579 %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16( 580 <vscale x 32 x i16> %0, 581 <vscale x 32 x i16> %1, 582 i16 %2, 583 <vscale x 32 x i1> %3, 584 i64 %4) 585 586 ret <vscale x 32 x i16> %a 587} 588 589declare <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32( 590 <vscale x 1 x i32>, 591 i32, 592 i64); 593 594define <vscale x 1 x i32> @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind { 595; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32: 596; CHECK: # %bb.0: # %entry 597; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu 598; CHECK-NEXT: vslide1up.vx v25, v8, a0 599; CHECK-NEXT: vmv1r.v v8, v25 600; CHECK-NEXT: ret 601entry: 602 %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32( 603 <vscale x 1 x i32> %0, 604 i32 %1, 605 i64 %2) 606 607 ret <vscale x 1 x i32> %a 608} 609 610declare <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32( 611 <vscale x 1 x i32>, 612 <vscale x 1 x i32>, 613 i32, 614 <vscale x 1 x i1>, 615 i64); 616 617define <vscale x 1 x i32> @intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 618; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32: 619; CHECK: # %bb.0: # %entry 620; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 621; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 622; CHECK-NEXT: ret 623entry: 624 %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32( 625 <vscale x 1 x i32> %0, 626 <vscale x 1 x i32> %1, 627 i32 %2, 628 <vscale x 1 x i1> %3, 629 i64 %4) 630 631 ret <vscale x 1 x i32> %a 632} 633 634declare <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32( 635 <vscale x 2 x i32>, 636 i32, 637 i64); 638 639define <vscale x 2 x i32> @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind { 640; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32: 641; CHECK: # %bb.0: # %entry 642; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu 643; CHECK-NEXT: vslide1up.vx v25, v8, a0 644; CHECK-NEXT: vmv1r.v v8, v25 645; CHECK-NEXT: ret 646entry: 647 %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32( 648 <vscale x 2 x i32> %0, 649 i32 %1, 650 i64 %2) 651 652 ret <vscale x 2 x i32> %a 653} 654 655declare <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32( 656 <vscale x 2 x i32>, 657 <vscale x 2 x i32>, 658 i32, 659 <vscale x 2 x i1>, 660 i64); 661 662define <vscale x 2 x i32> @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 663; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32: 664; CHECK: # %bb.0: # %entry 665; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 666; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 667; CHECK-NEXT: ret 668entry: 669 %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32( 670 <vscale x 2 x i32> %0, 671 <vscale x 2 x i32> %1, 672 i32 %2, 673 <vscale x 2 x i1> %3, 674 i64 %4) 675 676 ret <vscale x 2 x i32> %a 677} 678 679declare <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32( 680 <vscale x 4 x i32>, 681 i32, 682 i64); 683 684define <vscale x 4 x i32> @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind { 685; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32: 686; CHECK: # %bb.0: # %entry 687; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu 688; CHECK-NEXT: vslide1up.vx v26, v8, a0 689; CHECK-NEXT: vmv2r.v v8, v26 690; CHECK-NEXT: ret 691entry: 692 %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32( 693 <vscale x 4 x i32> %0, 694 i32 %1, 695 i64 %2) 696 697 ret <vscale x 4 x i32> %a 698} 699 700declare <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32( 701 <vscale x 4 x i32>, 702 <vscale x 4 x i32>, 703 i32, 704 <vscale x 4 x i1>, 705 i64); 706 707define <vscale x 4 x i32> @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 708; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32: 709; CHECK: # %bb.0: # %entry 710; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 711; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t 712; CHECK-NEXT: ret 713entry: 714 %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32( 715 <vscale x 4 x i32> %0, 716 <vscale x 4 x i32> %1, 717 i32 %2, 718 <vscale x 4 x i1> %3, 719 i64 %4) 720 721 ret <vscale x 4 x i32> %a 722} 723 724declare <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32( 725 <vscale x 8 x i32>, 726 i32, 727 i64); 728 729define <vscale x 8 x i32> @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind { 730; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32: 731; CHECK: # %bb.0: # %entry 732; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu 733; CHECK-NEXT: vslide1up.vx v28, v8, a0 734; CHECK-NEXT: vmv4r.v v8, v28 735; CHECK-NEXT: ret 736entry: 737 %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32( 738 <vscale x 8 x i32> %0, 739 i32 %1, 740 i64 %2) 741 742 ret <vscale x 8 x i32> %a 743} 744 745declare <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32( 746 <vscale x 8 x i32>, 747 <vscale x 8 x i32>, 748 i32, 749 <vscale x 8 x i1>, 750 i64); 751 752define <vscale x 8 x i32> @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 753; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32: 754; CHECK: # %bb.0: # %entry 755; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 756; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t 757; CHECK-NEXT: ret 758entry: 759 %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32( 760 <vscale x 8 x i32> %0, 761 <vscale x 8 x i32> %1, 762 i32 %2, 763 <vscale x 8 x i1> %3, 764 i64 %4) 765 766 ret <vscale x 8 x i32> %a 767} 768 769declare <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32( 770 <vscale x 16 x i32>, 771 i32, 772 i64); 773 774define <vscale x 16 x i32> @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind { 775; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32: 776; CHECK: # %bb.0: # %entry 777; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 778; CHECK-NEXT: vslide1up.vx v16, v8, a0 779; CHECK-NEXT: vmv8r.v v8, v16 780; CHECK-NEXT: ret 781entry: 782 %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32( 783 <vscale x 16 x i32> %0, 784 i32 %1, 785 i64 %2) 786 787 ret <vscale x 16 x i32> %a 788} 789 790declare <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32( 791 <vscale x 16 x i32>, 792 <vscale x 16 x i32>, 793 i32, 794 <vscale x 16 x i1>, 795 i64); 796 797define <vscale x 16 x i32> @intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 798; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32: 799; CHECK: # %bb.0: # %entry 800; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 801; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t 802; CHECK-NEXT: ret 803entry: 804 %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32( 805 <vscale x 16 x i32> %0, 806 <vscale x 16 x i32> %1, 807 i32 %2, 808 <vscale x 16 x i1> %3, 809 i64 %4) 810 811 ret <vscale x 16 x i32> %a 812} 813 814declare <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 815 <vscale x 1 x i64>, 816 i64, 817 i64); 818 819define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind { 820; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: 821; CHECK: # %bb.0: # %entry 822; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu 823; CHECK-NEXT: vslide1up.vx v25, v8, a0 824; CHECK-NEXT: vmv1r.v v8, v25 825; CHECK-NEXT: ret 826entry: 827 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 828 <vscale x 1 x i64> %0, 829 i64 %1, 830 i64 %2) 831 832 ret <vscale x 1 x i64> %a 833} 834 835declare <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64( 836 <vscale x 1 x i64>, 837 <vscale x 1 x i64>, 838 i64, 839 <vscale x 1 x i1>, 840 i64); 841 842define <vscale x 1 x i64> @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 843; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64: 844; CHECK: # %bb.0: # %entry 845; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 846; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t 847; CHECK-NEXT: ret 848entry: 849 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64( 850 <vscale x 1 x i64> %0, 851 <vscale x 1 x i64> %1, 852 i64 %2, 853 <vscale x 1 x i1> %3, 854 i64 %4) 855 856 ret <vscale x 1 x i64> %a 857} 858 859declare <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64( 860 <vscale x 2 x i64>, 861 i64, 862 i64); 863 864define <vscale x 2 x i64> @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind { 865; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64: 866; CHECK: # %bb.0: # %entry 867; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu 868; CHECK-NEXT: vslide1up.vx v26, v8, a0 869; CHECK-NEXT: vmv2r.v v8, v26 870; CHECK-NEXT: ret 871entry: 872 %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64( 873 <vscale x 2 x i64> %0, 874 i64 %1, 875 i64 %2) 876 877 ret <vscale x 2 x i64> %a 878} 879 880declare <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64( 881 <vscale x 2 x i64>, 882 <vscale x 2 x i64>, 883 i64, 884 <vscale x 2 x i1>, 885 i64); 886 887define <vscale x 2 x i64> @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 888; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64: 889; CHECK: # %bb.0: # %entry 890; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 891; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t 892; CHECK-NEXT: ret 893entry: 894 %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64( 895 <vscale x 2 x i64> %0, 896 <vscale x 2 x i64> %1, 897 i64 %2, 898 <vscale x 2 x i1> %3, 899 i64 %4) 900 901 ret <vscale x 2 x i64> %a 902} 903 904declare <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64( 905 <vscale x 4 x i64>, 906 i64, 907 i64); 908 909define <vscale x 4 x i64> @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind { 910; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64: 911; CHECK: # %bb.0: # %entry 912; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu 913; CHECK-NEXT: vslide1up.vx v28, v8, a0 914; CHECK-NEXT: vmv4r.v v8, v28 915; CHECK-NEXT: ret 916entry: 917 %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64( 918 <vscale x 4 x i64> %0, 919 i64 %1, 920 i64 %2) 921 922 ret <vscale x 4 x i64> %a 923} 924 925declare <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64( 926 <vscale x 4 x i64>, 927 <vscale x 4 x i64>, 928 i64, 929 <vscale x 4 x i1>, 930 i64); 931 932define <vscale x 4 x i64> @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 933; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64: 934; CHECK: # %bb.0: # %entry 935; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 936; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t 937; CHECK-NEXT: ret 938entry: 939 %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64( 940 <vscale x 4 x i64> %0, 941 <vscale x 4 x i64> %1, 942 i64 %2, 943 <vscale x 4 x i1> %3, 944 i64 %4) 945 946 ret <vscale x 4 x i64> %a 947} 948 949declare <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64( 950 <vscale x 8 x i64>, 951 i64, 952 i64); 953 954define <vscale x 8 x i64> @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind { 955; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64: 956; CHECK: # %bb.0: # %entry 957; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu 958; CHECK-NEXT: vslide1up.vx v16, v8, a0 959; CHECK-NEXT: vmv8r.v v8, v16 960; CHECK-NEXT: ret 961entry: 962 %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64( 963 <vscale x 8 x i64> %0, 964 i64 %1, 965 i64 %2) 966 967 ret <vscale x 8 x i64> %a 968} 969 970declare <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64( 971 <vscale x 8 x i64>, 972 <vscale x 8 x i64>, 973 i64, 974 <vscale x 8 x i1>, 975 i64); 976 977define <vscale x 8 x i64> @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 978; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64: 979; CHECK: # %bb.0: # %entry 980; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 981; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t 982; CHECK-NEXT: ret 983entry: 984 %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64( 985 <vscale x 8 x i64> %0, 986 <vscale x 8 x i64> %1, 987 i64 %2, 988 <vscale x 8 x i1> %3, 989 i64 %4) 990 991 ret <vscale x 8 x i64> %a 992} 993