1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ 3; RUN: < %s | FileCheck %s 4declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8( 5 <vscale x 1 x i8>, 6 <vscale x 1 x i8>, 7 i64); 8 9define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind { 10; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8: 11; CHECK: # %bb.0: # %entry 12; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 13; CHECK-NEXT: vsll.vv v8, v8, v9 14; CHECK-NEXT: ret 15entry: 16 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8( 17 <vscale x 1 x i8> %0, 18 <vscale x 1 x i8> %1, 19 i64 %2) 20 21 ret <vscale x 1 x i8> %a 22} 23 24declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( 25 <vscale x 1 x i8>, 26 <vscale x 1 x i8>, 27 <vscale x 1 x i8>, 28 <vscale x 1 x i1>, 29 i64); 30 31define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 32; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: 33; CHECK: # %bb.0: # %entry 34; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu 35; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t 36; CHECK-NEXT: ret 37entry: 38 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( 39 <vscale x 1 x i8> %0, 40 <vscale x 1 x i8> %1, 41 <vscale x 1 x i8> %2, 42 <vscale x 1 x i1> %3, 43 i64 %4) 44 45 ret <vscale x 1 x i8> %a 46} 47 48declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8( 49 <vscale x 2 x i8>, 50 <vscale x 2 x i8>, 51 i64); 52 53define <vscale x 2 x i8> @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind { 54; CHECK-LABEL: intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8: 55; CHECK: # %bb.0: # %entry 56; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 57; CHECK-NEXT: vsll.vv v8, v8, v9 58; CHECK-NEXT: ret 59entry: 60 %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8( 61 <vscale x 2 x i8> %0, 62 <vscale x 2 x i8> %1, 63 i64 %2) 64 65 ret <vscale x 2 x i8> %a 66} 67 68declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8( 69 <vscale x 2 x i8>, 70 <vscale x 2 x i8>, 71 <vscale x 2 x i8>, 72 <vscale x 2 x i1>, 73 i64); 74 75define <vscale x 2 x i8> @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 76; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8: 77; CHECK: # %bb.0: # %entry 78; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu 79; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t 80; CHECK-NEXT: ret 81entry: 82 %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8( 83 <vscale x 2 x i8> %0, 84 <vscale x 2 x i8> %1, 85 <vscale x 2 x i8> %2, 86 <vscale x 2 x i1> %3, 87 i64 %4) 88 89 ret <vscale x 2 x i8> %a 90} 91 92declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8( 93 <vscale x 4 x i8>, 94 <vscale x 4 x i8>, 95 i64); 96 97define <vscale x 4 x i8> @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind { 98; CHECK-LABEL: intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8: 99; CHECK: # %bb.0: # %entry 100; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 101; CHECK-NEXT: vsll.vv v8, v8, v9 102; CHECK-NEXT: ret 103entry: 104 %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8( 105 <vscale x 4 x i8> %0, 106 <vscale x 4 x i8> %1, 107 i64 %2) 108 109 ret <vscale x 4 x i8> %a 110} 111 112declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8( 113 <vscale x 4 x i8>, 114 <vscale x 4 x i8>, 115 <vscale x 4 x i8>, 116 <vscale x 4 x i1>, 117 i64); 118 119define <vscale x 4 x i8> @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 120; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8: 121; CHECK: # %bb.0: # %entry 122; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu 123; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t 124; CHECK-NEXT: ret 125entry: 126 %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8( 127 <vscale x 4 x i8> %0, 128 <vscale x 4 x i8> %1, 129 <vscale x 4 x i8> %2, 130 <vscale x 4 x i1> %3, 131 i64 %4) 132 133 ret <vscale x 4 x i8> %a 134} 135 136declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8( 137 <vscale x 8 x i8>, 138 <vscale x 8 x i8>, 139 i64); 140 141define <vscale x 8 x i8> @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind { 142; CHECK-LABEL: intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8: 143; CHECK: # %bb.0: # %entry 144; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 145; CHECK-NEXT: vsll.vv v8, v8, v9 146; CHECK-NEXT: ret 147entry: 148 %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8( 149 <vscale x 8 x i8> %0, 150 <vscale x 8 x i8> %1, 151 i64 %2) 152 153 ret <vscale x 8 x i8> %a 154} 155 156declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8( 157 <vscale x 8 x i8>, 158 <vscale x 8 x i8>, 159 <vscale x 8 x i8>, 160 <vscale x 8 x i1>, 161 i64); 162 163define <vscale x 8 x i8> @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 164; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8: 165; CHECK: # %bb.0: # %entry 166; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu 167; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t 168; CHECK-NEXT: ret 169entry: 170 %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8( 171 <vscale x 8 x i8> %0, 172 <vscale x 8 x i8> %1, 173 <vscale x 8 x i8> %2, 174 <vscale x 8 x i1> %3, 175 i64 %4) 176 177 ret <vscale x 8 x i8> %a 178} 179 180declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8( 181 <vscale x 16 x i8>, 182 <vscale x 16 x i8>, 183 i64); 184 185define <vscale x 16 x i8> @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind { 186; CHECK-LABEL: intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8: 187; CHECK: # %bb.0: # %entry 188; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 189; CHECK-NEXT: vsll.vv v8, v8, v10 190; CHECK-NEXT: ret 191entry: 192 %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8( 193 <vscale x 16 x i8> %0, 194 <vscale x 16 x i8> %1, 195 i64 %2) 196 197 ret <vscale x 16 x i8> %a 198} 199 200declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8( 201 <vscale x 16 x i8>, 202 <vscale x 16 x i8>, 203 <vscale x 16 x i8>, 204 <vscale x 16 x i1>, 205 i64); 206 207define <vscale x 16 x i8> @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 208; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8: 209; CHECK: # %bb.0: # %entry 210; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu 211; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t 212; CHECK-NEXT: ret 213entry: 214 %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8( 215 <vscale x 16 x i8> %0, 216 <vscale x 16 x i8> %1, 217 <vscale x 16 x i8> %2, 218 <vscale x 16 x i1> %3, 219 i64 %4) 220 221 ret <vscale x 16 x i8> %a 222} 223 224declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8( 225 <vscale x 32 x i8>, 226 <vscale x 32 x i8>, 227 i64); 228 229define <vscale x 32 x i8> @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind { 230; CHECK-LABEL: intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8: 231; CHECK: # %bb.0: # %entry 232; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 233; CHECK-NEXT: vsll.vv v8, v8, v12 234; CHECK-NEXT: ret 235entry: 236 %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8( 237 <vscale x 32 x i8> %0, 238 <vscale x 32 x i8> %1, 239 i64 %2) 240 241 ret <vscale x 32 x i8> %a 242} 243 244declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8( 245 <vscale x 32 x i8>, 246 <vscale x 32 x i8>, 247 <vscale x 32 x i8>, 248 <vscale x 32 x i1>, 249 i64); 250 251define <vscale x 32 x i8> @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { 252; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8: 253; CHECK: # %bb.0: # %entry 254; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu 255; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t 256; CHECK-NEXT: ret 257entry: 258 %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8( 259 <vscale x 32 x i8> %0, 260 <vscale x 32 x i8> %1, 261 <vscale x 32 x i8> %2, 262 <vscale x 32 x i1> %3, 263 i64 %4) 264 265 ret <vscale x 32 x i8> %a 266} 267 268declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8( 269 <vscale x 64 x i8>, 270 <vscale x 64 x i8>, 271 i64); 272 273define <vscale x 64 x i8> @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind { 274; CHECK-LABEL: intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8: 275; CHECK: # %bb.0: # %entry 276; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu 277; CHECK-NEXT: vsll.vv v8, v8, v16 278; CHECK-NEXT: ret 279entry: 280 %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8( 281 <vscale x 64 x i8> %0, 282 <vscale x 64 x i8> %1, 283 i64 %2) 284 285 ret <vscale x 64 x i8> %a 286} 287 288declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8( 289 <vscale x 64 x i8>, 290 <vscale x 64 x i8>, 291 <vscale x 64 x i8>, 292 <vscale x 64 x i1>, 293 i64); 294 295define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind { 296; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8: 297; CHECK: # %bb.0: # %entry 298; CHECK-NEXT: vl8r.v v24, (a0) 299; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu 300; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t 301; CHECK-NEXT: ret 302entry: 303 %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8( 304 <vscale x 64 x i8> %0, 305 <vscale x 64 x i8> %1, 306 <vscale x 64 x i8> %2, 307 <vscale x 64 x i1> %3, 308 i64 %4) 309 310 ret <vscale x 64 x i8> %a 311} 312 313declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16( 314 <vscale x 1 x i16>, 315 <vscale x 1 x i16>, 316 i64); 317 318define <vscale x 1 x i16> @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind { 319; CHECK-LABEL: intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16: 320; CHECK: # %bb.0: # %entry 321; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 322; CHECK-NEXT: vsll.vv v8, v8, v9 323; CHECK-NEXT: ret 324entry: 325 %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16( 326 <vscale x 1 x i16> %0, 327 <vscale x 1 x i16> %1, 328 i64 %2) 329 330 ret <vscale x 1 x i16> %a 331} 332 333declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16( 334 <vscale x 1 x i16>, 335 <vscale x 1 x i16>, 336 <vscale x 1 x i16>, 337 <vscale x 1 x i1>, 338 i64); 339 340define <vscale x 1 x i16> @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 341; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16: 342; CHECK: # %bb.0: # %entry 343; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu 344; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t 345; CHECK-NEXT: ret 346entry: 347 %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16( 348 <vscale x 1 x i16> %0, 349 <vscale x 1 x i16> %1, 350 <vscale x 1 x i16> %2, 351 <vscale x 1 x i1> %3, 352 i64 %4) 353 354 ret <vscale x 1 x i16> %a 355} 356 357declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16( 358 <vscale x 2 x i16>, 359 <vscale x 2 x i16>, 360 i64); 361 362define <vscale x 2 x i16> @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind { 363; CHECK-LABEL: intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16: 364; CHECK: # %bb.0: # %entry 365; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 366; CHECK-NEXT: vsll.vv v8, v8, v9 367; CHECK-NEXT: ret 368entry: 369 %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16( 370 <vscale x 2 x i16> %0, 371 <vscale x 2 x i16> %1, 372 i64 %2) 373 374 ret <vscale x 2 x i16> %a 375} 376 377declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16( 378 <vscale x 2 x i16>, 379 <vscale x 2 x i16>, 380 <vscale x 2 x i16>, 381 <vscale x 2 x i1>, 382 i64); 383 384define <vscale x 2 x i16> @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 385; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16: 386; CHECK: # %bb.0: # %entry 387; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu 388; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t 389; CHECK-NEXT: ret 390entry: 391 %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16( 392 <vscale x 2 x i16> %0, 393 <vscale x 2 x i16> %1, 394 <vscale x 2 x i16> %2, 395 <vscale x 2 x i1> %3, 396 i64 %4) 397 398 ret <vscale x 2 x i16> %a 399} 400 401declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16( 402 <vscale x 4 x i16>, 403 <vscale x 4 x i16>, 404 i64); 405 406define <vscale x 4 x i16> @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind { 407; CHECK-LABEL: intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16: 408; CHECK: # %bb.0: # %entry 409; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 410; CHECK-NEXT: vsll.vv v8, v8, v9 411; CHECK-NEXT: ret 412entry: 413 %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16( 414 <vscale x 4 x i16> %0, 415 <vscale x 4 x i16> %1, 416 i64 %2) 417 418 ret <vscale x 4 x i16> %a 419} 420 421declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16( 422 <vscale x 4 x i16>, 423 <vscale x 4 x i16>, 424 <vscale x 4 x i16>, 425 <vscale x 4 x i1>, 426 i64); 427 428define <vscale x 4 x i16> @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 429; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16: 430; CHECK: # %bb.0: # %entry 431; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu 432; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t 433; CHECK-NEXT: ret 434entry: 435 %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16( 436 <vscale x 4 x i16> %0, 437 <vscale x 4 x i16> %1, 438 <vscale x 4 x i16> %2, 439 <vscale x 4 x i1> %3, 440 i64 %4) 441 442 ret <vscale x 4 x i16> %a 443} 444 445declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16( 446 <vscale x 8 x i16>, 447 <vscale x 8 x i16>, 448 i64); 449 450define <vscale x 8 x i16> @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind { 451; CHECK-LABEL: intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16: 452; CHECK: # %bb.0: # %entry 453; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 454; CHECK-NEXT: vsll.vv v8, v8, v10 455; CHECK-NEXT: ret 456entry: 457 %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16( 458 <vscale x 8 x i16> %0, 459 <vscale x 8 x i16> %1, 460 i64 %2) 461 462 ret <vscale x 8 x i16> %a 463} 464 465declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16( 466 <vscale x 8 x i16>, 467 <vscale x 8 x i16>, 468 <vscale x 8 x i16>, 469 <vscale x 8 x i1>, 470 i64); 471 472define <vscale x 8 x i16> @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 473; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16: 474; CHECK: # %bb.0: # %entry 475; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu 476; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t 477; CHECK-NEXT: ret 478entry: 479 %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16( 480 <vscale x 8 x i16> %0, 481 <vscale x 8 x i16> %1, 482 <vscale x 8 x i16> %2, 483 <vscale x 8 x i1> %3, 484 i64 %4) 485 486 ret <vscale x 8 x i16> %a 487} 488 489declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16( 490 <vscale x 16 x i16>, 491 <vscale x 16 x i16>, 492 i64); 493 494define <vscale x 16 x i16> @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind { 495; CHECK-LABEL: intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16: 496; CHECK: # %bb.0: # %entry 497; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 498; CHECK-NEXT: vsll.vv v8, v8, v12 499; CHECK-NEXT: ret 500entry: 501 %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16( 502 <vscale x 16 x i16> %0, 503 <vscale x 16 x i16> %1, 504 i64 %2) 505 506 ret <vscale x 16 x i16> %a 507} 508 509declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16( 510 <vscale x 16 x i16>, 511 <vscale x 16 x i16>, 512 <vscale x 16 x i16>, 513 <vscale x 16 x i1>, 514 i64); 515 516define <vscale x 16 x i16> @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 517; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16: 518; CHECK: # %bb.0: # %entry 519; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu 520; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t 521; CHECK-NEXT: ret 522entry: 523 %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16( 524 <vscale x 16 x i16> %0, 525 <vscale x 16 x i16> %1, 526 <vscale x 16 x i16> %2, 527 <vscale x 16 x i1> %3, 528 i64 %4) 529 530 ret <vscale x 16 x i16> %a 531} 532 533declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16( 534 <vscale x 32 x i16>, 535 <vscale x 32 x i16>, 536 i64); 537 538define <vscale x 32 x i16> @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind { 539; CHECK-LABEL: intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16: 540; CHECK: # %bb.0: # %entry 541; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu 542; CHECK-NEXT: vsll.vv v8, v8, v16 543; CHECK-NEXT: ret 544entry: 545 %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16( 546 <vscale x 32 x i16> %0, 547 <vscale x 32 x i16> %1, 548 i64 %2) 549 550 ret <vscale x 32 x i16> %a 551} 552 553declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16( 554 <vscale x 32 x i16>, 555 <vscale x 32 x i16>, 556 <vscale x 32 x i16>, 557 <vscale x 32 x i1>, 558 i64); 559 560define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { 561; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16: 562; CHECK: # %bb.0: # %entry 563; CHECK-NEXT: vl8re16.v v24, (a0) 564; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu 565; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t 566; CHECK-NEXT: ret 567entry: 568 %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16( 569 <vscale x 32 x i16> %0, 570 <vscale x 32 x i16> %1, 571 <vscale x 32 x i16> %2, 572 <vscale x 32 x i1> %3, 573 i64 %4) 574 575 ret <vscale x 32 x i16> %a 576} 577 578declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32( 579 <vscale x 1 x i32>, 580 <vscale x 1 x i32>, 581 i64); 582 583define <vscale x 1 x i32> @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind { 584; CHECK-LABEL: intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32: 585; CHECK: # %bb.0: # %entry 586; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 587; CHECK-NEXT: vsll.vv v8, v8, v9 588; CHECK-NEXT: ret 589entry: 590 %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32( 591 <vscale x 1 x i32> %0, 592 <vscale x 1 x i32> %1, 593 i64 %2) 594 595 ret <vscale x 1 x i32> %a 596} 597 598declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32( 599 <vscale x 1 x i32>, 600 <vscale x 1 x i32>, 601 <vscale x 1 x i32>, 602 <vscale x 1 x i1>, 603 i64); 604 605define <vscale x 1 x i32> @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 606; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32: 607; CHECK: # %bb.0: # %entry 608; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu 609; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t 610; CHECK-NEXT: ret 611entry: 612 %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32( 613 <vscale x 1 x i32> %0, 614 <vscale x 1 x i32> %1, 615 <vscale x 1 x i32> %2, 616 <vscale x 1 x i1> %3, 617 i64 %4) 618 619 ret <vscale x 1 x i32> %a 620} 621 622declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32( 623 <vscale x 2 x i32>, 624 <vscale x 2 x i32>, 625 i64); 626 627define <vscale x 2 x i32> @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind { 628; CHECK-LABEL: intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32: 629; CHECK: # %bb.0: # %entry 630; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 631; CHECK-NEXT: vsll.vv v8, v8, v9 632; CHECK-NEXT: ret 633entry: 634 %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32( 635 <vscale x 2 x i32> %0, 636 <vscale x 2 x i32> %1, 637 i64 %2) 638 639 ret <vscale x 2 x i32> %a 640} 641 642declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32( 643 <vscale x 2 x i32>, 644 <vscale x 2 x i32>, 645 <vscale x 2 x i32>, 646 <vscale x 2 x i1>, 647 i64); 648 649define <vscale x 2 x i32> @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 650; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32: 651; CHECK: # %bb.0: # %entry 652; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu 653; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t 654; CHECK-NEXT: ret 655entry: 656 %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32( 657 <vscale x 2 x i32> %0, 658 <vscale x 2 x i32> %1, 659 <vscale x 2 x i32> %2, 660 <vscale x 2 x i1> %3, 661 i64 %4) 662 663 ret <vscale x 2 x i32> %a 664} 665 666declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32( 667 <vscale x 4 x i32>, 668 <vscale x 4 x i32>, 669 i64); 670 671define <vscale x 4 x i32> @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind { 672; CHECK-LABEL: intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32: 673; CHECK: # %bb.0: # %entry 674; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 675; CHECK-NEXT: vsll.vv v8, v8, v10 676; CHECK-NEXT: ret 677entry: 678 %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32( 679 <vscale x 4 x i32> %0, 680 <vscale x 4 x i32> %1, 681 i64 %2) 682 683 ret <vscale x 4 x i32> %a 684} 685 686declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32( 687 <vscale x 4 x i32>, 688 <vscale x 4 x i32>, 689 <vscale x 4 x i32>, 690 <vscale x 4 x i1>, 691 i64); 692 693define <vscale x 4 x i32> @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 694; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32: 695; CHECK: # %bb.0: # %entry 696; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu 697; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t 698; CHECK-NEXT: ret 699entry: 700 %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32( 701 <vscale x 4 x i32> %0, 702 <vscale x 4 x i32> %1, 703 <vscale x 4 x i32> %2, 704 <vscale x 4 x i1> %3, 705 i64 %4) 706 707 ret <vscale x 4 x i32> %a 708} 709 710declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32( 711 <vscale x 8 x i32>, 712 <vscale x 8 x i32>, 713 i64); 714 715define <vscale x 8 x i32> @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind { 716; CHECK-LABEL: intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32: 717; CHECK: # %bb.0: # %entry 718; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 719; CHECK-NEXT: vsll.vv v8, v8, v12 720; CHECK-NEXT: ret 721entry: 722 %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32( 723 <vscale x 8 x i32> %0, 724 <vscale x 8 x i32> %1, 725 i64 %2) 726 727 ret <vscale x 8 x i32> %a 728} 729 730declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32( 731 <vscale x 8 x i32>, 732 <vscale x 8 x i32>, 733 <vscale x 8 x i32>, 734 <vscale x 8 x i1>, 735 i64); 736 737define <vscale x 8 x i32> @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 738; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32: 739; CHECK: # %bb.0: # %entry 740; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu 741; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t 742; CHECK-NEXT: ret 743entry: 744 %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32( 745 <vscale x 8 x i32> %0, 746 <vscale x 8 x i32> %1, 747 <vscale x 8 x i32> %2, 748 <vscale x 8 x i1> %3, 749 i64 %4) 750 751 ret <vscale x 8 x i32> %a 752} 753 754declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32( 755 <vscale x 16 x i32>, 756 <vscale x 16 x i32>, 757 i64); 758 759define <vscale x 16 x i32> @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind { 760; CHECK-LABEL: intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32: 761; CHECK: # %bb.0: # %entry 762; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu 763; CHECK-NEXT: vsll.vv v8, v8, v16 764; CHECK-NEXT: ret 765entry: 766 %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32( 767 <vscale x 16 x i32> %0, 768 <vscale x 16 x i32> %1, 769 i64 %2) 770 771 ret <vscale x 16 x i32> %a 772} 773 774declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32( 775 <vscale x 16 x i32>, 776 <vscale x 16 x i32>, 777 <vscale x 16 x i32>, 778 <vscale x 16 x i1>, 779 i64); 780 781define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 782; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32: 783; CHECK: # %bb.0: # %entry 784; CHECK-NEXT: vl8re32.v v24, (a0) 785; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 786; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t 787; CHECK-NEXT: ret 788entry: 789 %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32( 790 <vscale x 16 x i32> %0, 791 <vscale x 16 x i32> %1, 792 <vscale x 16 x i32> %2, 793 <vscale x 16 x i1> %3, 794 i64 %4) 795 796 ret <vscale x 16 x i32> %a 797} 798 799declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64( 800 <vscale x 1 x i64>, 801 <vscale x 1 x i64>, 802 i64); 803 804define <vscale x 1 x i64> @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind { 805; CHECK-LABEL: intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64: 806; CHECK: # %bb.0: # %entry 807; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 808; CHECK-NEXT: vsll.vv v8, v8, v9 809; CHECK-NEXT: ret 810entry: 811 %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64( 812 <vscale x 1 x i64> %0, 813 <vscale x 1 x i64> %1, 814 i64 %2) 815 816 ret <vscale x 1 x i64> %a 817} 818 819declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64( 820 <vscale x 1 x i64>, 821 <vscale x 1 x i64>, 822 <vscale x 1 x i64>, 823 <vscale x 1 x i1>, 824 i64); 825 826define <vscale x 1 x i64> @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 827; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64: 828; CHECK: # %bb.0: # %entry 829; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu 830; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t 831; CHECK-NEXT: ret 832entry: 833 %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64( 834 <vscale x 1 x i64> %0, 835 <vscale x 1 x i64> %1, 836 <vscale x 1 x i64> %2, 837 <vscale x 1 x i1> %3, 838 i64 %4) 839 840 ret <vscale x 1 x i64> %a 841} 842 843declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64( 844 <vscale x 2 x i64>, 845 <vscale x 2 x i64>, 846 i64); 847 848define <vscale x 2 x i64> @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind { 849; CHECK-LABEL: intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64: 850; CHECK: # %bb.0: # %entry 851; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 852; CHECK-NEXT: vsll.vv v8, v8, v10 853; CHECK-NEXT: ret 854entry: 855 %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64( 856 <vscale x 2 x i64> %0, 857 <vscale x 2 x i64> %1, 858 i64 %2) 859 860 ret <vscale x 2 x i64> %a 861} 862 863declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64( 864 <vscale x 2 x i64>, 865 <vscale x 2 x i64>, 866 <vscale x 2 x i64>, 867 <vscale x 2 x i1>, 868 i64); 869 870define <vscale x 2 x i64> @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 871; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64: 872; CHECK: # %bb.0: # %entry 873; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu 874; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t 875; CHECK-NEXT: ret 876entry: 877 %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64( 878 <vscale x 2 x i64> %0, 879 <vscale x 2 x i64> %1, 880 <vscale x 2 x i64> %2, 881 <vscale x 2 x i1> %3, 882 i64 %4) 883 884 ret <vscale x 2 x i64> %a 885} 886 887declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64( 888 <vscale x 4 x i64>, 889 <vscale x 4 x i64>, 890 i64); 891 892define <vscale x 4 x i64> @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind { 893; CHECK-LABEL: intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64: 894; CHECK: # %bb.0: # %entry 895; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 896; CHECK-NEXT: vsll.vv v8, v8, v12 897; CHECK-NEXT: ret 898entry: 899 %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64( 900 <vscale x 4 x i64> %0, 901 <vscale x 4 x i64> %1, 902 i64 %2) 903 904 ret <vscale x 4 x i64> %a 905} 906 907declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64( 908 <vscale x 4 x i64>, 909 <vscale x 4 x i64>, 910 <vscale x 4 x i64>, 911 <vscale x 4 x i1>, 912 i64); 913 914define <vscale x 4 x i64> @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 915; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64: 916; CHECK: # %bb.0: # %entry 917; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu 918; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t 919; CHECK-NEXT: ret 920entry: 921 %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64( 922 <vscale x 4 x i64> %0, 923 <vscale x 4 x i64> %1, 924 <vscale x 4 x i64> %2, 925 <vscale x 4 x i1> %3, 926 i64 %4) 927 928 ret <vscale x 4 x i64> %a 929} 930 931declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64( 932 <vscale x 8 x i64>, 933 <vscale x 8 x i64>, 934 i64); 935 936define <vscale x 8 x i64> @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind { 937; CHECK-LABEL: intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64: 938; CHECK: # %bb.0: # %entry 939; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu 940; CHECK-NEXT: vsll.vv v8, v8, v16 941; CHECK-NEXT: ret 942entry: 943 %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64( 944 <vscale x 8 x i64> %0, 945 <vscale x 8 x i64> %1, 946 i64 %2) 947 948 ret <vscale x 8 x i64> %a 949} 950 951declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64( 952 <vscale x 8 x i64>, 953 <vscale x 8 x i64>, 954 <vscale x 8 x i64>, 955 <vscale x 8 x i1>, 956 i64); 957 958define <vscale x 8 x i64> @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 959; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64: 960; CHECK: # %bb.0: # %entry 961; CHECK-NEXT: vl8re64.v v24, (a0) 962; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 963; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t 964; CHECK-NEXT: ret 965entry: 966 %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64( 967 <vscale x 8 x i64> %0, 968 <vscale x 8 x i64> %1, 969 <vscale x 8 x i64> %2, 970 <vscale x 8 x i1> %3, 971 i64 %4) 972 973 ret <vscale x 8 x i64> %a 974} 975 976declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8( 977 <vscale x 1 x i8>, 978 i64, 979 i64); 980 981define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i64 %1, i64 %2) nounwind { 982; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8: 983; CHECK: # %bb.0: # %entry 984; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu 985; CHECK-NEXT: vsll.vx v8, v8, a0 986; CHECK-NEXT: ret 987entry: 988 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8( 989 <vscale x 1 x i8> %0, 990 i64 %1, 991 i64 %2) 992 993 ret <vscale x 1 x i8> %a 994} 995 996declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8( 997 <vscale x 1 x i8>, 998 <vscale x 1 x i8>, 999 i64, 1000 <vscale x 1 x i1>, 1001 i64); 1002 1003define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 1004; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8: 1005; CHECK: # %bb.0: # %entry 1006; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu 1007; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t 1008; CHECK-NEXT: ret 1009entry: 1010 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8( 1011 <vscale x 1 x i8> %0, 1012 <vscale x 1 x i8> %1, 1013 i64 %2, 1014 <vscale x 1 x i1> %3, 1015 i64 %4) 1016 1017 ret <vscale x 1 x i8> %a 1018} 1019 1020declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8( 1021 <vscale x 2 x i8>, 1022 i64, 1023 i64); 1024 1025define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i64 %1, i64 %2) nounwind { 1026; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8: 1027; CHECK: # %bb.0: # %entry 1028; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu 1029; CHECK-NEXT: vsll.vx v8, v8, a0 1030; CHECK-NEXT: ret 1031entry: 1032 %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8( 1033 <vscale x 2 x i8> %0, 1034 i64 %1, 1035 i64 %2) 1036 1037 ret <vscale x 2 x i8> %a 1038} 1039 1040declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8( 1041 <vscale x 2 x i8>, 1042 <vscale x 2 x i8>, 1043 i64, 1044 <vscale x 2 x i1>, 1045 i64); 1046 1047define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 1048; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8: 1049; CHECK: # %bb.0: # %entry 1050; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu 1051; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t 1052; CHECK-NEXT: ret 1053entry: 1054 %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8( 1055 <vscale x 2 x i8> %0, 1056 <vscale x 2 x i8> %1, 1057 i64 %2, 1058 <vscale x 2 x i1> %3, 1059 i64 %4) 1060 1061 ret <vscale x 2 x i8> %a 1062} 1063 1064declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8( 1065 <vscale x 4 x i8>, 1066 i64, 1067 i64); 1068 1069define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i64 %1, i64 %2) nounwind { 1070; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8: 1071; CHECK: # %bb.0: # %entry 1072; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu 1073; CHECK-NEXT: vsll.vx v8, v8, a0 1074; CHECK-NEXT: ret 1075entry: 1076 %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8( 1077 <vscale x 4 x i8> %0, 1078 i64 %1, 1079 i64 %2) 1080 1081 ret <vscale x 4 x i8> %a 1082} 1083 1084declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8( 1085 <vscale x 4 x i8>, 1086 <vscale x 4 x i8>, 1087 i64, 1088 <vscale x 4 x i1>, 1089 i64); 1090 1091define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 1092; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8: 1093; CHECK: # %bb.0: # %entry 1094; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu 1095; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t 1096; CHECK-NEXT: ret 1097entry: 1098 %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8( 1099 <vscale x 4 x i8> %0, 1100 <vscale x 4 x i8> %1, 1101 i64 %2, 1102 <vscale x 4 x i1> %3, 1103 i64 %4) 1104 1105 ret <vscale x 4 x i8> %a 1106} 1107 1108declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8( 1109 <vscale x 8 x i8>, 1110 i64, 1111 i64); 1112 1113define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i64 %1, i64 %2) nounwind { 1114; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8: 1115; CHECK: # %bb.0: # %entry 1116; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu 1117; CHECK-NEXT: vsll.vx v8, v8, a0 1118; CHECK-NEXT: ret 1119entry: 1120 %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8( 1121 <vscale x 8 x i8> %0, 1122 i64 %1, 1123 i64 %2) 1124 1125 ret <vscale x 8 x i8> %a 1126} 1127 1128declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8( 1129 <vscale x 8 x i8>, 1130 <vscale x 8 x i8>, 1131 i64, 1132 <vscale x 8 x i1>, 1133 i64); 1134 1135define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 1136; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8: 1137; CHECK: # %bb.0: # %entry 1138; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu 1139; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t 1140; CHECK-NEXT: ret 1141entry: 1142 %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8( 1143 <vscale x 8 x i8> %0, 1144 <vscale x 8 x i8> %1, 1145 i64 %2, 1146 <vscale x 8 x i1> %3, 1147 i64 %4) 1148 1149 ret <vscale x 8 x i8> %a 1150} 1151 1152declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8( 1153 <vscale x 16 x i8>, 1154 i64, 1155 i64); 1156 1157define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i64 %1, i64 %2) nounwind { 1158; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8: 1159; CHECK: # %bb.0: # %entry 1160; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu 1161; CHECK-NEXT: vsll.vx v8, v8, a0 1162; CHECK-NEXT: ret 1163entry: 1164 %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8( 1165 <vscale x 16 x i8> %0, 1166 i64 %1, 1167 i64 %2) 1168 1169 ret <vscale x 16 x i8> %a 1170} 1171 1172declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8( 1173 <vscale x 16 x i8>, 1174 <vscale x 16 x i8>, 1175 i64, 1176 <vscale x 16 x i1>, 1177 i64); 1178 1179define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 1180; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8: 1181; CHECK: # %bb.0: # %entry 1182; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu 1183; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t 1184; CHECK-NEXT: ret 1185entry: 1186 %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8( 1187 <vscale x 16 x i8> %0, 1188 <vscale x 16 x i8> %1, 1189 i64 %2, 1190 <vscale x 16 x i1> %3, 1191 i64 %4) 1192 1193 ret <vscale x 16 x i8> %a 1194} 1195 1196declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8( 1197 <vscale x 32 x i8>, 1198 i64, 1199 i64); 1200 1201define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i64 %1, i64 %2) nounwind { 1202; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8: 1203; CHECK: # %bb.0: # %entry 1204; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu 1205; CHECK-NEXT: vsll.vx v8, v8, a0 1206; CHECK-NEXT: ret 1207entry: 1208 %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8( 1209 <vscale x 32 x i8> %0, 1210 i64 %1, 1211 i64 %2) 1212 1213 ret <vscale x 32 x i8> %a 1214} 1215 1216declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8( 1217 <vscale x 32 x i8>, 1218 <vscale x 32 x i8>, 1219 i64, 1220 <vscale x 32 x i1>, 1221 i64); 1222 1223define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind { 1224; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8: 1225; CHECK: # %bb.0: # %entry 1226; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu 1227; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t 1228; CHECK-NEXT: ret 1229entry: 1230 %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8( 1231 <vscale x 32 x i8> %0, 1232 <vscale x 32 x i8> %1, 1233 i64 %2, 1234 <vscale x 32 x i1> %3, 1235 i64 %4) 1236 1237 ret <vscale x 32 x i8> %a 1238} 1239 1240declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8( 1241 <vscale x 64 x i8>, 1242 i64, 1243 i64); 1244 1245define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i64 %1, i64 %2) nounwind { 1246; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8: 1247; CHECK: # %bb.0: # %entry 1248; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 1249; CHECK-NEXT: vsll.vx v8, v8, a0 1250; CHECK-NEXT: ret 1251entry: 1252 %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8( 1253 <vscale x 64 x i8> %0, 1254 i64 %1, 1255 i64 %2) 1256 1257 ret <vscale x 64 x i8> %a 1258} 1259 1260declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8( 1261 <vscale x 64 x i8>, 1262 <vscale x 64 x i8>, 1263 i64, 1264 <vscale x 64 x i1>, 1265 i64); 1266 1267define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind { 1268; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8: 1269; CHECK: # %bb.0: # %entry 1270; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu 1271; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t 1272; CHECK-NEXT: ret 1273entry: 1274 %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8( 1275 <vscale x 64 x i8> %0, 1276 <vscale x 64 x i8> %1, 1277 i64 %2, 1278 <vscale x 64 x i1> %3, 1279 i64 %4) 1280 1281 ret <vscale x 64 x i8> %a 1282} 1283 1284declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16( 1285 <vscale x 1 x i16>, 1286 i64, 1287 i64); 1288 1289define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind { 1290; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16: 1291; CHECK: # %bb.0: # %entry 1292; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu 1293; CHECK-NEXT: vsll.vx v8, v8, a0 1294; CHECK-NEXT: ret 1295entry: 1296 %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16( 1297 <vscale x 1 x i16> %0, 1298 i64 %1, 1299 i64 %2) 1300 1301 ret <vscale x 1 x i16> %a 1302} 1303 1304declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16( 1305 <vscale x 1 x i16>, 1306 <vscale x 1 x i16>, 1307 i64, 1308 <vscale x 1 x i1>, 1309 i64); 1310 1311define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 1312; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16: 1313; CHECK: # %bb.0: # %entry 1314; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu 1315; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t 1316; CHECK-NEXT: ret 1317entry: 1318 %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16( 1319 <vscale x 1 x i16> %0, 1320 <vscale x 1 x i16> %1, 1321 i64 %2, 1322 <vscale x 1 x i1> %3, 1323 i64 %4) 1324 1325 ret <vscale x 1 x i16> %a 1326} 1327 1328declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16( 1329 <vscale x 2 x i16>, 1330 i64, 1331 i64); 1332 1333define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind { 1334; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16: 1335; CHECK: # %bb.0: # %entry 1336; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu 1337; CHECK-NEXT: vsll.vx v8, v8, a0 1338; CHECK-NEXT: ret 1339entry: 1340 %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16( 1341 <vscale x 2 x i16> %0, 1342 i64 %1, 1343 i64 %2) 1344 1345 ret <vscale x 2 x i16> %a 1346} 1347 1348declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16( 1349 <vscale x 2 x i16>, 1350 <vscale x 2 x i16>, 1351 i64, 1352 <vscale x 2 x i1>, 1353 i64); 1354 1355define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 1356; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16: 1357; CHECK: # %bb.0: # %entry 1358; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu 1359; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t 1360; CHECK-NEXT: ret 1361entry: 1362 %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16( 1363 <vscale x 2 x i16> %0, 1364 <vscale x 2 x i16> %1, 1365 i64 %2, 1366 <vscale x 2 x i1> %3, 1367 i64 %4) 1368 1369 ret <vscale x 2 x i16> %a 1370} 1371 1372declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16( 1373 <vscale x 4 x i16>, 1374 i64, 1375 i64); 1376 1377define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind { 1378; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16: 1379; CHECK: # %bb.0: # %entry 1380; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu 1381; CHECK-NEXT: vsll.vx v8, v8, a0 1382; CHECK-NEXT: ret 1383entry: 1384 %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16( 1385 <vscale x 4 x i16> %0, 1386 i64 %1, 1387 i64 %2) 1388 1389 ret <vscale x 4 x i16> %a 1390} 1391 1392declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16( 1393 <vscale x 4 x i16>, 1394 <vscale x 4 x i16>, 1395 i64, 1396 <vscale x 4 x i1>, 1397 i64); 1398 1399define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 1400; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16: 1401; CHECK: # %bb.0: # %entry 1402; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu 1403; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t 1404; CHECK-NEXT: ret 1405entry: 1406 %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16( 1407 <vscale x 4 x i16> %0, 1408 <vscale x 4 x i16> %1, 1409 i64 %2, 1410 <vscale x 4 x i1> %3, 1411 i64 %4) 1412 1413 ret <vscale x 4 x i16> %a 1414} 1415 1416declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16( 1417 <vscale x 8 x i16>, 1418 i64, 1419 i64); 1420 1421define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind { 1422; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16: 1423; CHECK: # %bb.0: # %entry 1424; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu 1425; CHECK-NEXT: vsll.vx v8, v8, a0 1426; CHECK-NEXT: ret 1427entry: 1428 %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16( 1429 <vscale x 8 x i16> %0, 1430 i64 %1, 1431 i64 %2) 1432 1433 ret <vscale x 8 x i16> %a 1434} 1435 1436declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16( 1437 <vscale x 8 x i16>, 1438 <vscale x 8 x i16>, 1439 i64, 1440 <vscale x 8 x i1>, 1441 i64); 1442 1443define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 1444; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16: 1445; CHECK: # %bb.0: # %entry 1446; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu 1447; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t 1448; CHECK-NEXT: ret 1449entry: 1450 %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16( 1451 <vscale x 8 x i16> %0, 1452 <vscale x 8 x i16> %1, 1453 i64 %2, 1454 <vscale x 8 x i1> %3, 1455 i64 %4) 1456 1457 ret <vscale x 8 x i16> %a 1458} 1459 1460declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16( 1461 <vscale x 16 x i16>, 1462 i64, 1463 i64); 1464 1465define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind { 1466; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16: 1467; CHECK: # %bb.0: # %entry 1468; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 1469; CHECK-NEXT: vsll.vx v8, v8, a0 1470; CHECK-NEXT: ret 1471entry: 1472 %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16( 1473 <vscale x 16 x i16> %0, 1474 i64 %1, 1475 i64 %2) 1476 1477 ret <vscale x 16 x i16> %a 1478} 1479 1480declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16( 1481 <vscale x 16 x i16>, 1482 <vscale x 16 x i16>, 1483 i64, 1484 <vscale x 16 x i1>, 1485 i64); 1486 1487define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 1488; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16: 1489; CHECK: # %bb.0: # %entry 1490; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu 1491; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t 1492; CHECK-NEXT: ret 1493entry: 1494 %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16( 1495 <vscale x 16 x i16> %0, 1496 <vscale x 16 x i16> %1, 1497 i64 %2, 1498 <vscale x 16 x i1> %3, 1499 i64 %4) 1500 1501 ret <vscale x 16 x i16> %a 1502} 1503 1504declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16( 1505 <vscale x 32 x i16>, 1506 i64, 1507 i64); 1508 1509define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind { 1510; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16: 1511; CHECK: # %bb.0: # %entry 1512; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 1513; CHECK-NEXT: vsll.vx v8, v8, a0 1514; CHECK-NEXT: ret 1515entry: 1516 %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16( 1517 <vscale x 32 x i16> %0, 1518 i64 %1, 1519 i64 %2) 1520 1521 ret <vscale x 32 x i16> %a 1522} 1523 1524declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16( 1525 <vscale x 32 x i16>, 1526 <vscale x 32 x i16>, 1527 i64, 1528 <vscale x 32 x i1>, 1529 i64); 1530 1531define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind { 1532; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16: 1533; CHECK: # %bb.0: # %entry 1534; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu 1535; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t 1536; CHECK-NEXT: ret 1537entry: 1538 %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16( 1539 <vscale x 32 x i16> %0, 1540 <vscale x 32 x i16> %1, 1541 i64 %2, 1542 <vscale x 32 x i1> %3, 1543 i64 %4) 1544 1545 ret <vscale x 32 x i16> %a 1546} 1547 1548declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32( 1549 <vscale x 1 x i32>, 1550 i64, 1551 i64); 1552 1553define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind { 1554; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32: 1555; CHECK: # %bb.0: # %entry 1556; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu 1557; CHECK-NEXT: vsll.vx v8, v8, a0 1558; CHECK-NEXT: ret 1559entry: 1560 %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32( 1561 <vscale x 1 x i32> %0, 1562 i64 %1, 1563 i64 %2) 1564 1565 ret <vscale x 1 x i32> %a 1566} 1567 1568declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32( 1569 <vscale x 1 x i32>, 1570 <vscale x 1 x i32>, 1571 i64, 1572 <vscale x 1 x i1>, 1573 i64); 1574 1575define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 1576; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32: 1577; CHECK: # %bb.0: # %entry 1578; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 1579; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t 1580; CHECK-NEXT: ret 1581entry: 1582 %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32( 1583 <vscale x 1 x i32> %0, 1584 <vscale x 1 x i32> %1, 1585 i64 %2, 1586 <vscale x 1 x i1> %3, 1587 i64 %4) 1588 1589 ret <vscale x 1 x i32> %a 1590} 1591 1592declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32( 1593 <vscale x 2 x i32>, 1594 i64, 1595 i64); 1596 1597define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind { 1598; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32: 1599; CHECK: # %bb.0: # %entry 1600; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu 1601; CHECK-NEXT: vsll.vx v8, v8, a0 1602; CHECK-NEXT: ret 1603entry: 1604 %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32( 1605 <vscale x 2 x i32> %0, 1606 i64 %1, 1607 i64 %2) 1608 1609 ret <vscale x 2 x i32> %a 1610} 1611 1612declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32( 1613 <vscale x 2 x i32>, 1614 <vscale x 2 x i32>, 1615 i64, 1616 <vscale x 2 x i1>, 1617 i64); 1618 1619define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 1620; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32: 1621; CHECK: # %bb.0: # %entry 1622; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 1623; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t 1624; CHECK-NEXT: ret 1625entry: 1626 %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32( 1627 <vscale x 2 x i32> %0, 1628 <vscale x 2 x i32> %1, 1629 i64 %2, 1630 <vscale x 2 x i1> %3, 1631 i64 %4) 1632 1633 ret <vscale x 2 x i32> %a 1634} 1635 1636declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32( 1637 <vscale x 4 x i32>, 1638 i64, 1639 i64); 1640 1641define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind { 1642; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32: 1643; CHECK: # %bb.0: # %entry 1644; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu 1645; CHECK-NEXT: vsll.vx v8, v8, a0 1646; CHECK-NEXT: ret 1647entry: 1648 %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32( 1649 <vscale x 4 x i32> %0, 1650 i64 %1, 1651 i64 %2) 1652 1653 ret <vscale x 4 x i32> %a 1654} 1655 1656declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32( 1657 <vscale x 4 x i32>, 1658 <vscale x 4 x i32>, 1659 i64, 1660 <vscale x 4 x i1>, 1661 i64); 1662 1663define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 1664; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32: 1665; CHECK: # %bb.0: # %entry 1666; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 1667; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t 1668; CHECK-NEXT: ret 1669entry: 1670 %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32( 1671 <vscale x 4 x i32> %0, 1672 <vscale x 4 x i32> %1, 1673 i64 %2, 1674 <vscale x 4 x i1> %3, 1675 i64 %4) 1676 1677 ret <vscale x 4 x i32> %a 1678} 1679 1680declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32( 1681 <vscale x 8 x i32>, 1682 i64, 1683 i64); 1684 1685define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind { 1686; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32: 1687; CHECK: # %bb.0: # %entry 1688; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu 1689; CHECK-NEXT: vsll.vx v8, v8, a0 1690; CHECK-NEXT: ret 1691entry: 1692 %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32( 1693 <vscale x 8 x i32> %0, 1694 i64 %1, 1695 i64 %2) 1696 1697 ret <vscale x 8 x i32> %a 1698} 1699 1700declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32( 1701 <vscale x 8 x i32>, 1702 <vscale x 8 x i32>, 1703 i64, 1704 <vscale x 8 x i1>, 1705 i64); 1706 1707define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 1708; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32: 1709; CHECK: # %bb.0: # %entry 1710; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 1711; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t 1712; CHECK-NEXT: ret 1713entry: 1714 %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32( 1715 <vscale x 8 x i32> %0, 1716 <vscale x 8 x i32> %1, 1717 i64 %2, 1718 <vscale x 8 x i1> %3, 1719 i64 %4) 1720 1721 ret <vscale x 8 x i32> %a 1722} 1723 1724declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32( 1725 <vscale x 16 x i32>, 1726 i64, 1727 i64); 1728 1729define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind { 1730; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32: 1731; CHECK: # %bb.0: # %entry 1732; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 1733; CHECK-NEXT: vsll.vx v8, v8, a0 1734; CHECK-NEXT: ret 1735entry: 1736 %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32( 1737 <vscale x 16 x i32> %0, 1738 i64 %1, 1739 i64 %2) 1740 1741 ret <vscale x 16 x i32> %a 1742} 1743 1744declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32( 1745 <vscale x 16 x i32>, 1746 <vscale x 16 x i32>, 1747 i64, 1748 <vscale x 16 x i1>, 1749 i64); 1750 1751define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind { 1752; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32: 1753; CHECK: # %bb.0: # %entry 1754; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 1755; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t 1756; CHECK-NEXT: ret 1757entry: 1758 %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32( 1759 <vscale x 16 x i32> %0, 1760 <vscale x 16 x i32> %1, 1761 i64 %2, 1762 <vscale x 16 x i1> %3, 1763 i64 %4) 1764 1765 ret <vscale x 16 x i32> %a 1766} 1767 1768declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64( 1769 <vscale x 1 x i64>, 1770 i64, 1771 i64); 1772 1773define <vscale x 1 x i64> @intrinsic_vsll_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind { 1774; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64: 1775; CHECK: # %bb.0: # %entry 1776; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu 1777; CHECK-NEXT: vsll.vx v8, v8, a0 1778; CHECK-NEXT: ret 1779entry: 1780 %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64( 1781 <vscale x 1 x i64> %0, 1782 i64 %1, 1783 i64 %2) 1784 1785 ret <vscale x 1 x i64> %a 1786} 1787 1788declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64( 1789 <vscale x 1 x i64>, 1790 <vscale x 1 x i64>, 1791 i64, 1792 <vscale x 1 x i1>, 1793 i64); 1794 1795define <vscale x 1 x i64> @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind { 1796; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64: 1797; CHECK: # %bb.0: # %entry 1798; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 1799; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t 1800; CHECK-NEXT: ret 1801entry: 1802 %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64( 1803 <vscale x 1 x i64> %0, 1804 <vscale x 1 x i64> %1, 1805 i64 %2, 1806 <vscale x 1 x i1> %3, 1807 i64 %4) 1808 1809 ret <vscale x 1 x i64> %a 1810} 1811 1812declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64( 1813 <vscale x 2 x i64>, 1814 i64, 1815 i64); 1816 1817define <vscale x 2 x i64> @intrinsic_vsll_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind { 1818; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64: 1819; CHECK: # %bb.0: # %entry 1820; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu 1821; CHECK-NEXT: vsll.vx v8, v8, a0 1822; CHECK-NEXT: ret 1823entry: 1824 %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64( 1825 <vscale x 2 x i64> %0, 1826 i64 %1, 1827 i64 %2) 1828 1829 ret <vscale x 2 x i64> %a 1830} 1831 1832declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64( 1833 <vscale x 2 x i64>, 1834 <vscale x 2 x i64>, 1835 i64, 1836 <vscale x 2 x i1>, 1837 i64); 1838 1839define <vscale x 2 x i64> @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind { 1840; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64: 1841; CHECK: # %bb.0: # %entry 1842; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 1843; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t 1844; CHECK-NEXT: ret 1845entry: 1846 %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64( 1847 <vscale x 2 x i64> %0, 1848 <vscale x 2 x i64> %1, 1849 i64 %2, 1850 <vscale x 2 x i1> %3, 1851 i64 %4) 1852 1853 ret <vscale x 2 x i64> %a 1854} 1855 1856declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64( 1857 <vscale x 4 x i64>, 1858 i64, 1859 i64); 1860 1861define <vscale x 4 x i64> @intrinsic_vsll_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind { 1862; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64: 1863; CHECK: # %bb.0: # %entry 1864; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu 1865; CHECK-NEXT: vsll.vx v8, v8, a0 1866; CHECK-NEXT: ret 1867entry: 1868 %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64( 1869 <vscale x 4 x i64> %0, 1870 i64 %1, 1871 i64 %2) 1872 1873 ret <vscale x 4 x i64> %a 1874} 1875 1876declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64( 1877 <vscale x 4 x i64>, 1878 <vscale x 4 x i64>, 1879 i64, 1880 <vscale x 4 x i1>, 1881 i64); 1882 1883define <vscale x 4 x i64> @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind { 1884; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64: 1885; CHECK: # %bb.0: # %entry 1886; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 1887; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t 1888; CHECK-NEXT: ret 1889entry: 1890 %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64( 1891 <vscale x 4 x i64> %0, 1892 <vscale x 4 x i64> %1, 1893 i64 %2, 1894 <vscale x 4 x i1> %3, 1895 i64 %4) 1896 1897 ret <vscale x 4 x i64> %a 1898} 1899 1900declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64( 1901 <vscale x 8 x i64>, 1902 i64, 1903 i64); 1904 1905define <vscale x 8 x i64> @intrinsic_vsll_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind { 1906; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64: 1907; CHECK: # %bb.0: # %entry 1908; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu 1909; CHECK-NEXT: vsll.vx v8, v8, a0 1910; CHECK-NEXT: ret 1911entry: 1912 %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64( 1913 <vscale x 8 x i64> %0, 1914 i64 %1, 1915 i64 %2) 1916 1917 ret <vscale x 8 x i64> %a 1918} 1919 1920declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64( 1921 <vscale x 8 x i64>, 1922 <vscale x 8 x i64>, 1923 i64, 1924 <vscale x 8 x i1>, 1925 i64); 1926 1927define <vscale x 8 x i64> @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind { 1928; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64: 1929; CHECK: # %bb.0: # %entry 1930; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 1931; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t 1932; CHECK-NEXT: ret 1933entry: 1934 %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64( 1935 <vscale x 8 x i64> %0, 1936 <vscale x 8 x i64> %1, 1937 i64 %2, 1938 <vscale x 8 x i1> %3, 1939 i64 %4) 1940 1941 ret <vscale x 8 x i64> %a 1942} 1943 1944define <vscale x 1 x i8> @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind { 1945; CHECK-LABEL: intrinsic_vsll_vi_nxv1i8_nxv1i8_i8: 1946; CHECK: # %bb.0: # %entry 1947; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 1948; CHECK-NEXT: vsll.vi v8, v8, 9 1949; CHECK-NEXT: ret 1950entry: 1951 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8( 1952 <vscale x 1 x i8> %0, 1953 i64 9, 1954 i64 %1) 1955 1956 ret <vscale x 1 x i8> %a 1957} 1958 1959define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { 1960; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8: 1961; CHECK: # %bb.0: # %entry 1962; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu 1963; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t 1964; CHECK-NEXT: ret 1965entry: 1966 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8( 1967 <vscale x 1 x i8> %0, 1968 <vscale x 1 x i8> %1, 1969 i64 9, 1970 <vscale x 1 x i1> %2, 1971 i64 %3) 1972 1973 ret <vscale x 1 x i8> %a 1974} 1975 1976define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind { 1977; CHECK-LABEL: intrinsic_vsll_vi_nxv2i8_nxv2i8_i8: 1978; CHECK: # %bb.0: # %entry 1979; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 1980; CHECK-NEXT: vsll.vi v8, v8, 9 1981; CHECK-NEXT: ret 1982entry: 1983 %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8( 1984 <vscale x 2 x i8> %0, 1985 i64 9, 1986 i64 %1) 1987 1988 ret <vscale x 2 x i8> %a 1989} 1990 1991define <vscale x 2 x i8> @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { 1992; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8: 1993; CHECK: # %bb.0: # %entry 1994; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu 1995; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t 1996; CHECK-NEXT: ret 1997entry: 1998 %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8( 1999 <vscale x 2 x i8> %0, 2000 <vscale x 2 x i8> %1, 2001 i64 9, 2002 <vscale x 2 x i1> %2, 2003 i64 %3) 2004 2005 ret <vscale x 2 x i8> %a 2006} 2007 2008define <vscale x 4 x i8> @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind { 2009; CHECK-LABEL: intrinsic_vsll_vi_nxv4i8_nxv4i8_i8: 2010; CHECK: # %bb.0: # %entry 2011; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 2012; CHECK-NEXT: vsll.vi v8, v8, 9 2013; CHECK-NEXT: ret 2014entry: 2015 %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8( 2016 <vscale x 4 x i8> %0, 2017 i64 9, 2018 i64 %1) 2019 2020 ret <vscale x 4 x i8> %a 2021} 2022 2023define <vscale x 4 x i8> @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { 2024; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8: 2025; CHECK: # %bb.0: # %entry 2026; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu 2027; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t 2028; CHECK-NEXT: ret 2029entry: 2030 %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8( 2031 <vscale x 4 x i8> %0, 2032 <vscale x 4 x i8> %1, 2033 i64 9, 2034 <vscale x 4 x i1> %2, 2035 i64 %3) 2036 2037 ret <vscale x 4 x i8> %a 2038} 2039 2040define <vscale x 8 x i8> @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind { 2041; CHECK-LABEL: intrinsic_vsll_vi_nxv8i8_nxv8i8_i8: 2042; CHECK: # %bb.0: # %entry 2043; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 2044; CHECK-NEXT: vsll.vi v8, v8, 9 2045; CHECK-NEXT: ret 2046entry: 2047 %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8( 2048 <vscale x 8 x i8> %0, 2049 i64 9, 2050 i64 %1) 2051 2052 ret <vscale x 8 x i8> %a 2053} 2054 2055define <vscale x 8 x i8> @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { 2056; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8: 2057; CHECK: # %bb.0: # %entry 2058; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu 2059; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t 2060; CHECK-NEXT: ret 2061entry: 2062 %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8( 2063 <vscale x 8 x i8> %0, 2064 <vscale x 8 x i8> %1, 2065 i64 9, 2066 <vscale x 8 x i1> %2, 2067 i64 %3) 2068 2069 ret <vscale x 8 x i8> %a 2070} 2071 2072define <vscale x 16 x i8> @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind { 2073; CHECK-LABEL: intrinsic_vsll_vi_nxv16i8_nxv16i8_i8: 2074; CHECK: # %bb.0: # %entry 2075; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 2076; CHECK-NEXT: vsll.vi v8, v8, 9 2077; CHECK-NEXT: ret 2078entry: 2079 %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8( 2080 <vscale x 16 x i8> %0, 2081 i64 9, 2082 i64 %1) 2083 2084 ret <vscale x 16 x i8> %a 2085} 2086 2087define <vscale x 16 x i8> @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { 2088; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8: 2089; CHECK: # %bb.0: # %entry 2090; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu 2091; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t 2092; CHECK-NEXT: ret 2093entry: 2094 %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8( 2095 <vscale x 16 x i8> %0, 2096 <vscale x 16 x i8> %1, 2097 i64 9, 2098 <vscale x 16 x i1> %2, 2099 i64 %3) 2100 2101 ret <vscale x 16 x i8> %a 2102} 2103 2104define <vscale x 32 x i8> @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind { 2105; CHECK-LABEL: intrinsic_vsll_vi_nxv32i8_nxv32i8_i8: 2106; CHECK: # %bb.0: # %entry 2107; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 2108; CHECK-NEXT: vsll.vi v8, v8, 9 2109; CHECK-NEXT: ret 2110entry: 2111 %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8( 2112 <vscale x 32 x i8> %0, 2113 i64 9, 2114 i64 %1) 2115 2116 ret <vscale x 32 x i8> %a 2117} 2118 2119define <vscale x 32 x i8> @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind { 2120; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8: 2121; CHECK: # %bb.0: # %entry 2122; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu 2123; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t 2124; CHECK-NEXT: ret 2125entry: 2126 %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8( 2127 <vscale x 32 x i8> %0, 2128 <vscale x 32 x i8> %1, 2129 i64 9, 2130 <vscale x 32 x i1> %2, 2131 i64 %3) 2132 2133 ret <vscale x 32 x i8> %a 2134} 2135 2136define <vscale x 64 x i8> @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind { 2137; CHECK-LABEL: intrinsic_vsll_vi_nxv64i8_nxv64i8_i8: 2138; CHECK: # %bb.0: # %entry 2139; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu 2140; CHECK-NEXT: vsll.vi v8, v8, 9 2141; CHECK-NEXT: ret 2142entry: 2143 %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8( 2144 <vscale x 64 x i8> %0, 2145 i64 9, 2146 i64 %1) 2147 2148 ret <vscale x 64 x i8> %a 2149} 2150 2151define <vscale x 64 x i8> @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind { 2152; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8: 2153; CHECK: # %bb.0: # %entry 2154; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu 2155; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t 2156; CHECK-NEXT: ret 2157entry: 2158 %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8( 2159 <vscale x 64 x i8> %0, 2160 <vscale x 64 x i8> %1, 2161 i64 9, 2162 <vscale x 64 x i1> %2, 2163 i64 %3) 2164 2165 ret <vscale x 64 x i8> %a 2166} 2167 2168define <vscale x 1 x i16> @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind { 2169; CHECK-LABEL: intrinsic_vsll_vi_nxv1i16_nxv1i16_i16: 2170; CHECK: # %bb.0: # %entry 2171; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 2172; CHECK-NEXT: vsll.vi v8, v8, 9 2173; CHECK-NEXT: ret 2174entry: 2175 %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16( 2176 <vscale x 1 x i16> %0, 2177 i64 9, 2178 i64 %1) 2179 2180 ret <vscale x 1 x i16> %a 2181} 2182 2183define <vscale x 1 x i16> @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { 2184; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16: 2185; CHECK: # %bb.0: # %entry 2186; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu 2187; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t 2188; CHECK-NEXT: ret 2189entry: 2190 %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16( 2191 <vscale x 1 x i16> %0, 2192 <vscale x 1 x i16> %1, 2193 i64 9, 2194 <vscale x 1 x i1> %2, 2195 i64 %3) 2196 2197 ret <vscale x 1 x i16> %a 2198} 2199 2200define <vscale x 2 x i16> @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind { 2201; CHECK-LABEL: intrinsic_vsll_vi_nxv2i16_nxv2i16_i16: 2202; CHECK: # %bb.0: # %entry 2203; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 2204; CHECK-NEXT: vsll.vi v8, v8, 9 2205; CHECK-NEXT: ret 2206entry: 2207 %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16( 2208 <vscale x 2 x i16> %0, 2209 i64 9, 2210 i64 %1) 2211 2212 ret <vscale x 2 x i16> %a 2213} 2214 2215define <vscale x 2 x i16> @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { 2216; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16: 2217; CHECK: # %bb.0: # %entry 2218; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu 2219; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t 2220; CHECK-NEXT: ret 2221entry: 2222 %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16( 2223 <vscale x 2 x i16> %0, 2224 <vscale x 2 x i16> %1, 2225 i64 9, 2226 <vscale x 2 x i1> %2, 2227 i64 %3) 2228 2229 ret <vscale x 2 x i16> %a 2230} 2231 2232define <vscale x 4 x i16> @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind { 2233; CHECK-LABEL: intrinsic_vsll_vi_nxv4i16_nxv4i16_i16: 2234; CHECK: # %bb.0: # %entry 2235; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 2236; CHECK-NEXT: vsll.vi v8, v8, 9 2237; CHECK-NEXT: ret 2238entry: 2239 %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16( 2240 <vscale x 4 x i16> %0, 2241 i64 9, 2242 i64 %1) 2243 2244 ret <vscale x 4 x i16> %a 2245} 2246 2247define <vscale x 4 x i16> @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { 2248; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16: 2249; CHECK: # %bb.0: # %entry 2250; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu 2251; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t 2252; CHECK-NEXT: ret 2253entry: 2254 %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16( 2255 <vscale x 4 x i16> %0, 2256 <vscale x 4 x i16> %1, 2257 i64 9, 2258 <vscale x 4 x i1> %2, 2259 i64 %3) 2260 2261 ret <vscale x 4 x i16> %a 2262} 2263 2264define <vscale x 8 x i16> @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind { 2265; CHECK-LABEL: intrinsic_vsll_vi_nxv8i16_nxv8i16_i16: 2266; CHECK: # %bb.0: # %entry 2267; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 2268; CHECK-NEXT: vsll.vi v8, v8, 9 2269; CHECK-NEXT: ret 2270entry: 2271 %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16( 2272 <vscale x 8 x i16> %0, 2273 i64 9, 2274 i64 %1) 2275 2276 ret <vscale x 8 x i16> %a 2277} 2278 2279define <vscale x 8 x i16> @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { 2280; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16: 2281; CHECK: # %bb.0: # %entry 2282; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu 2283; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t 2284; CHECK-NEXT: ret 2285entry: 2286 %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16( 2287 <vscale x 8 x i16> %0, 2288 <vscale x 8 x i16> %1, 2289 i64 9, 2290 <vscale x 8 x i1> %2, 2291 i64 %3) 2292 2293 ret <vscale x 8 x i16> %a 2294} 2295 2296define <vscale x 16 x i16> @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind { 2297; CHECK-LABEL: intrinsic_vsll_vi_nxv16i16_nxv16i16_i16: 2298; CHECK: # %bb.0: # %entry 2299; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 2300; CHECK-NEXT: vsll.vi v8, v8, 9 2301; CHECK-NEXT: ret 2302entry: 2303 %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16( 2304 <vscale x 16 x i16> %0, 2305 i64 9, 2306 i64 %1) 2307 2308 ret <vscale x 16 x i16> %a 2309} 2310 2311define <vscale x 16 x i16> @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { 2312; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16: 2313; CHECK: # %bb.0: # %entry 2314; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu 2315; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t 2316; CHECK-NEXT: ret 2317entry: 2318 %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16( 2319 <vscale x 16 x i16> %0, 2320 <vscale x 16 x i16> %1, 2321 i64 9, 2322 <vscale x 16 x i1> %2, 2323 i64 %3) 2324 2325 ret <vscale x 16 x i16> %a 2326} 2327 2328define <vscale x 32 x i16> @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind { 2329; CHECK-LABEL: intrinsic_vsll_vi_nxv32i16_nxv32i16_i16: 2330; CHECK: # %bb.0: # %entry 2331; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu 2332; CHECK-NEXT: vsll.vi v8, v8, 9 2333; CHECK-NEXT: ret 2334entry: 2335 %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16( 2336 <vscale x 32 x i16> %0, 2337 i64 9, 2338 i64 %1) 2339 2340 ret <vscale x 32 x i16> %a 2341} 2342 2343define <vscale x 32 x i16> @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind { 2344; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16: 2345; CHECK: # %bb.0: # %entry 2346; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu 2347; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t 2348; CHECK-NEXT: ret 2349entry: 2350 %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16( 2351 <vscale x 32 x i16> %0, 2352 <vscale x 32 x i16> %1, 2353 i64 9, 2354 <vscale x 32 x i1> %2, 2355 i64 %3) 2356 2357 ret <vscale x 32 x i16> %a 2358} 2359 2360define <vscale x 1 x i32> @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind { 2361; CHECK-LABEL: intrinsic_vsll_vi_nxv1i32_nxv1i32_i32: 2362; CHECK: # %bb.0: # %entry 2363; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 2364; CHECK-NEXT: vsll.vi v8, v8, 9 2365; CHECK-NEXT: ret 2366entry: 2367 %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32( 2368 <vscale x 1 x i32> %0, 2369 i64 9, 2370 i64 %1) 2371 2372 ret <vscale x 1 x i32> %a 2373} 2374 2375define <vscale x 1 x i32> @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { 2376; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32: 2377; CHECK: # %bb.0: # %entry 2378; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu 2379; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t 2380; CHECK-NEXT: ret 2381entry: 2382 %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32( 2383 <vscale x 1 x i32> %0, 2384 <vscale x 1 x i32> %1, 2385 i64 9, 2386 <vscale x 1 x i1> %2, 2387 i64 %3) 2388 2389 ret <vscale x 1 x i32> %a 2390} 2391 2392define <vscale x 2 x i32> @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind { 2393; CHECK-LABEL: intrinsic_vsll_vi_nxv2i32_nxv2i32_i32: 2394; CHECK: # %bb.0: # %entry 2395; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 2396; CHECK-NEXT: vsll.vi v8, v8, 9 2397; CHECK-NEXT: ret 2398entry: 2399 %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32( 2400 <vscale x 2 x i32> %0, 2401 i64 9, 2402 i64 %1) 2403 2404 ret <vscale x 2 x i32> %a 2405} 2406 2407define <vscale x 2 x i32> @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { 2408; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32: 2409; CHECK: # %bb.0: # %entry 2410; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu 2411; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t 2412; CHECK-NEXT: ret 2413entry: 2414 %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32( 2415 <vscale x 2 x i32> %0, 2416 <vscale x 2 x i32> %1, 2417 i64 9, 2418 <vscale x 2 x i1> %2, 2419 i64 %3) 2420 2421 ret <vscale x 2 x i32> %a 2422} 2423 2424define <vscale x 4 x i32> @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind { 2425; CHECK-LABEL: intrinsic_vsll_vi_nxv4i32_nxv4i32_i32: 2426; CHECK: # %bb.0: # %entry 2427; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 2428; CHECK-NEXT: vsll.vi v8, v8, 9 2429; CHECK-NEXT: ret 2430entry: 2431 %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32( 2432 <vscale x 4 x i32> %0, 2433 i64 9, 2434 i64 %1) 2435 2436 ret <vscale x 4 x i32> %a 2437} 2438 2439define <vscale x 4 x i32> @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { 2440; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32: 2441; CHECK: # %bb.0: # %entry 2442; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu 2443; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t 2444; CHECK-NEXT: ret 2445entry: 2446 %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32( 2447 <vscale x 4 x i32> %0, 2448 <vscale x 4 x i32> %1, 2449 i64 9, 2450 <vscale x 4 x i1> %2, 2451 i64 %3) 2452 2453 ret <vscale x 4 x i32> %a 2454} 2455 2456define <vscale x 8 x i32> @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind { 2457; CHECK-LABEL: intrinsic_vsll_vi_nxv8i32_nxv8i32_i32: 2458; CHECK: # %bb.0: # %entry 2459; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 2460; CHECK-NEXT: vsll.vi v8, v8, 9 2461; CHECK-NEXT: ret 2462entry: 2463 %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32( 2464 <vscale x 8 x i32> %0, 2465 i64 9, 2466 i64 %1) 2467 2468 ret <vscale x 8 x i32> %a 2469} 2470 2471define <vscale x 8 x i32> @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { 2472; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32: 2473; CHECK: # %bb.0: # %entry 2474; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu 2475; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t 2476; CHECK-NEXT: ret 2477entry: 2478 %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32( 2479 <vscale x 8 x i32> %0, 2480 <vscale x 8 x i32> %1, 2481 i64 9, 2482 <vscale x 8 x i1> %2, 2483 i64 %3) 2484 2485 ret <vscale x 8 x i32> %a 2486} 2487 2488define <vscale x 16 x i32> @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind { 2489; CHECK-LABEL: intrinsic_vsll_vi_nxv16i32_nxv16i32_i32: 2490; CHECK: # %bb.0: # %entry 2491; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu 2492; CHECK-NEXT: vsll.vi v8, v8, 9 2493; CHECK-NEXT: ret 2494entry: 2495 %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32( 2496 <vscale x 16 x i32> %0, 2497 i64 9, 2498 i64 %1) 2499 2500 ret <vscale x 16 x i32> %a 2501} 2502 2503define <vscale x 16 x i32> @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { 2504; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32: 2505; CHECK: # %bb.0: # %entry 2506; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu 2507; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t 2508; CHECK-NEXT: ret 2509entry: 2510 %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32( 2511 <vscale x 16 x i32> %0, 2512 <vscale x 16 x i32> %1, 2513 i64 9, 2514 <vscale x 16 x i1> %2, 2515 i64 %3) 2516 2517 ret <vscale x 16 x i32> %a 2518} 2519 2520define <vscale x 1 x i64> @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind { 2521; CHECK-LABEL: intrinsic_vsll_vi_nxv1i64_nxv1i64_i64: 2522; CHECK: # %bb.0: # %entry 2523; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 2524; CHECK-NEXT: vsll.vi v8, v8, 9 2525; CHECK-NEXT: ret 2526entry: 2527 %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64( 2528 <vscale x 1 x i64> %0, 2529 i64 9, 2530 i64 %1) 2531 2532 ret <vscale x 1 x i64> %a 2533} 2534 2535define <vscale x 1 x i64> @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { 2536; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64: 2537; CHECK: # %bb.0: # %entry 2538; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu 2539; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t 2540; CHECK-NEXT: ret 2541entry: 2542 %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64( 2543 <vscale x 1 x i64> %0, 2544 <vscale x 1 x i64> %1, 2545 i64 9, 2546 <vscale x 1 x i1> %2, 2547 i64 %3) 2548 2549 ret <vscale x 1 x i64> %a 2550} 2551 2552define <vscale x 2 x i64> @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind { 2553; CHECK-LABEL: intrinsic_vsll_vi_nxv2i64_nxv2i64_i64: 2554; CHECK: # %bb.0: # %entry 2555; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 2556; CHECK-NEXT: vsll.vi v8, v8, 9 2557; CHECK-NEXT: ret 2558entry: 2559 %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64( 2560 <vscale x 2 x i64> %0, 2561 i64 9, 2562 i64 %1) 2563 2564 ret <vscale x 2 x i64> %a 2565} 2566 2567define <vscale x 2 x i64> @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { 2568; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64: 2569; CHECK: # %bb.0: # %entry 2570; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu 2571; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t 2572; CHECK-NEXT: ret 2573entry: 2574 %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64( 2575 <vscale x 2 x i64> %0, 2576 <vscale x 2 x i64> %1, 2577 i64 9, 2578 <vscale x 2 x i1> %2, 2579 i64 %3) 2580 2581 ret <vscale x 2 x i64> %a 2582} 2583 2584define <vscale x 4 x i64> @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind { 2585; CHECK-LABEL: intrinsic_vsll_vi_nxv4i64_nxv4i64_i64: 2586; CHECK: # %bb.0: # %entry 2587; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 2588; CHECK-NEXT: vsll.vi v8, v8, 9 2589; CHECK-NEXT: ret 2590entry: 2591 %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64( 2592 <vscale x 4 x i64> %0, 2593 i64 9, 2594 i64 %1) 2595 2596 ret <vscale x 4 x i64> %a 2597} 2598 2599define <vscale x 4 x i64> @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { 2600; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64: 2601; CHECK: # %bb.0: # %entry 2602; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu 2603; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t 2604; CHECK-NEXT: ret 2605entry: 2606 %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64( 2607 <vscale x 4 x i64> %0, 2608 <vscale x 4 x i64> %1, 2609 i64 9, 2610 <vscale x 4 x i1> %2, 2611 i64 %3) 2612 2613 ret <vscale x 4 x i64> %a 2614} 2615 2616define <vscale x 8 x i64> @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind { 2617; CHECK-LABEL: intrinsic_vsll_vi_nxv8i64_nxv8i64_i64: 2618; CHECK: # %bb.0: # %entry 2619; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu 2620; CHECK-NEXT: vsll.vi v8, v8, 9 2621; CHECK-NEXT: ret 2622entry: 2623 %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64( 2624 <vscale x 8 x i64> %0, 2625 i64 9, 2626 i64 %1) 2627 2628 ret <vscale x 8 x i64> %a 2629} 2630 2631define <vscale x 8 x i64> @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { 2632; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64: 2633; CHECK: # %bb.0: # %entry 2634; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu 2635; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t 2636; CHECK-NEXT: ret 2637entry: 2638 %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64( 2639 <vscale x 8 x i64> %0, 2640 <vscale x 8 x i64> %1, 2641 i64 9, 2642 <vscale x 8 x i1> %2, 2643 i64 %3) 2644 2645 ret <vscale x 8 x i64> %a 2646} 2647