1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ 3; RUN: < %s | FileCheck %s 4declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8( 5 <vscale x 1 x i8>, 6 <vscale x 1 x i8>, 7 i32); 8 9define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind { 10; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: 11; CHECK: # %bb.0: # %entry 12; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 13; CHECK-NEXT: vssra.vv v8, v8, v9 14; CHECK-NEXT: ret 15entry: 16 %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8( 17 <vscale x 1 x i8> %0, 18 <vscale x 1 x i8> %1, 19 i32 %2) 20 21 ret <vscale x 1 x i8> %a 22} 23 24declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8( 25 <vscale x 1 x i8>, 26 <vscale x 1 x i8>, 27 <vscale x 1 x i8>, 28 <vscale x 1 x i1>, 29 i32); 30 31define <vscale x 1 x i8> @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 32; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8: 33; CHECK: # %bb.0: # %entry 34; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu 35; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t 36; CHECK-NEXT: ret 37entry: 38 %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8( 39 <vscale x 1 x i8> %0, 40 <vscale x 1 x i8> %1, 41 <vscale x 1 x i8> %2, 42 <vscale x 1 x i1> %3, 43 i32 %4) 44 45 ret <vscale x 1 x i8> %a 46} 47 48declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8( 49 <vscale x 2 x i8>, 50 <vscale x 2 x i8>, 51 i32); 52 53define <vscale x 2 x i8> @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind { 54; CHECK-LABEL: intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8: 55; CHECK: # %bb.0: # %entry 56; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 57; CHECK-NEXT: vssra.vv v8, v8, v9 58; CHECK-NEXT: ret 59entry: 60 %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8( 61 <vscale x 2 x i8> %0, 62 <vscale x 2 x i8> %1, 63 i32 %2) 64 65 ret <vscale x 2 x i8> %a 66} 67 68declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8( 69 <vscale x 2 x i8>, 70 <vscale x 2 x i8>, 71 <vscale x 2 x i8>, 72 <vscale x 2 x i1>, 73 i32); 74 75define <vscale x 2 x i8> @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 76; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8: 77; CHECK: # %bb.0: # %entry 78; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu 79; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t 80; CHECK-NEXT: ret 81entry: 82 %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8( 83 <vscale x 2 x i8> %0, 84 <vscale x 2 x i8> %1, 85 <vscale x 2 x i8> %2, 86 <vscale x 2 x i1> %3, 87 i32 %4) 88 89 ret <vscale x 2 x i8> %a 90} 91 92declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8( 93 <vscale x 4 x i8>, 94 <vscale x 4 x i8>, 95 i32); 96 97define <vscale x 4 x i8> @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind { 98; CHECK-LABEL: intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8: 99; CHECK: # %bb.0: # %entry 100; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 101; CHECK-NEXT: vssra.vv v8, v8, v9 102; CHECK-NEXT: ret 103entry: 104 %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8( 105 <vscale x 4 x i8> %0, 106 <vscale x 4 x i8> %1, 107 i32 %2) 108 109 ret <vscale x 4 x i8> %a 110} 111 112declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8( 113 <vscale x 4 x i8>, 114 <vscale x 4 x i8>, 115 <vscale x 4 x i8>, 116 <vscale x 4 x i1>, 117 i32); 118 119define <vscale x 4 x i8> @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 120; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8: 121; CHECK: # %bb.0: # %entry 122; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu 123; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t 124; CHECK-NEXT: ret 125entry: 126 %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8( 127 <vscale x 4 x i8> %0, 128 <vscale x 4 x i8> %1, 129 <vscale x 4 x i8> %2, 130 <vscale x 4 x i1> %3, 131 i32 %4) 132 133 ret <vscale x 4 x i8> %a 134} 135 136declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8( 137 <vscale x 8 x i8>, 138 <vscale x 8 x i8>, 139 i32); 140 141define <vscale x 8 x i8> @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind { 142; CHECK-LABEL: intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8: 143; CHECK: # %bb.0: # %entry 144; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 145; CHECK-NEXT: vssra.vv v8, v8, v9 146; CHECK-NEXT: ret 147entry: 148 %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8( 149 <vscale x 8 x i8> %0, 150 <vscale x 8 x i8> %1, 151 i32 %2) 152 153 ret <vscale x 8 x i8> %a 154} 155 156declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8( 157 <vscale x 8 x i8>, 158 <vscale x 8 x i8>, 159 <vscale x 8 x i8>, 160 <vscale x 8 x i1>, 161 i32); 162 163define <vscale x 8 x i8> @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 164; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8: 165; CHECK: # %bb.0: # %entry 166; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu 167; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t 168; CHECK-NEXT: ret 169entry: 170 %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8( 171 <vscale x 8 x i8> %0, 172 <vscale x 8 x i8> %1, 173 <vscale x 8 x i8> %2, 174 <vscale x 8 x i1> %3, 175 i32 %4) 176 177 ret <vscale x 8 x i8> %a 178} 179 180declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8( 181 <vscale x 16 x i8>, 182 <vscale x 16 x i8>, 183 i32); 184 185define <vscale x 16 x i8> @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind { 186; CHECK-LABEL: intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8: 187; CHECK: # %bb.0: # %entry 188; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 189; CHECK-NEXT: vssra.vv v8, v8, v10 190; CHECK-NEXT: ret 191entry: 192 %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8( 193 <vscale x 16 x i8> %0, 194 <vscale x 16 x i8> %1, 195 i32 %2) 196 197 ret <vscale x 16 x i8> %a 198} 199 200declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8( 201 <vscale x 16 x i8>, 202 <vscale x 16 x i8>, 203 <vscale x 16 x i8>, 204 <vscale x 16 x i1>, 205 i32); 206 207define <vscale x 16 x i8> @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 208; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8: 209; CHECK: # %bb.0: # %entry 210; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu 211; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t 212; CHECK-NEXT: ret 213entry: 214 %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8( 215 <vscale x 16 x i8> %0, 216 <vscale x 16 x i8> %1, 217 <vscale x 16 x i8> %2, 218 <vscale x 16 x i1> %3, 219 i32 %4) 220 221 ret <vscale x 16 x i8> %a 222} 223 224declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8( 225 <vscale x 32 x i8>, 226 <vscale x 32 x i8>, 227 i32); 228 229define <vscale x 32 x i8> @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind { 230; CHECK-LABEL: intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8: 231; CHECK: # %bb.0: # %entry 232; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 233; CHECK-NEXT: vssra.vv v8, v8, v12 234; CHECK-NEXT: ret 235entry: 236 %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8( 237 <vscale x 32 x i8> %0, 238 <vscale x 32 x i8> %1, 239 i32 %2) 240 241 ret <vscale x 32 x i8> %a 242} 243 244declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8( 245 <vscale x 32 x i8>, 246 <vscale x 32 x i8>, 247 <vscale x 32 x i8>, 248 <vscale x 32 x i1>, 249 i32); 250 251define <vscale x 32 x i8> @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { 252; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8: 253; CHECK: # %bb.0: # %entry 254; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu 255; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t 256; CHECK-NEXT: ret 257entry: 258 %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8( 259 <vscale x 32 x i8> %0, 260 <vscale x 32 x i8> %1, 261 <vscale x 32 x i8> %2, 262 <vscale x 32 x i1> %3, 263 i32 %4) 264 265 ret <vscale x 32 x i8> %a 266} 267 268declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8( 269 <vscale x 64 x i8>, 270 <vscale x 64 x i8>, 271 i32); 272 273define <vscale x 64 x i8> @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind { 274; CHECK-LABEL: intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8: 275; CHECK: # %bb.0: # %entry 276; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu 277; CHECK-NEXT: vssra.vv v8, v8, v16 278; CHECK-NEXT: ret 279entry: 280 %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8( 281 <vscale x 64 x i8> %0, 282 <vscale x 64 x i8> %1, 283 i32 %2) 284 285 ret <vscale x 64 x i8> %a 286} 287 288declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8( 289 <vscale x 64 x i8>, 290 <vscale x 64 x i8>, 291 <vscale x 64 x i8>, 292 <vscale x 64 x i1>, 293 i32); 294 295define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind { 296; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8: 297; CHECK: # %bb.0: # %entry 298; CHECK-NEXT: vl8r.v v24, (a0) 299; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu 300; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t 301; CHECK-NEXT: ret 302entry: 303 %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8( 304 <vscale x 64 x i8> %0, 305 <vscale x 64 x i8> %1, 306 <vscale x 64 x i8> %2, 307 <vscale x 64 x i1> %3, 308 i32 %4) 309 310 ret <vscale x 64 x i8> %a 311} 312 313declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16( 314 <vscale x 1 x i16>, 315 <vscale x 1 x i16>, 316 i32); 317 318define <vscale x 1 x i16> @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind { 319; CHECK-LABEL: intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16: 320; CHECK: # %bb.0: # %entry 321; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 322; CHECK-NEXT: vssra.vv v8, v8, v9 323; CHECK-NEXT: ret 324entry: 325 %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16( 326 <vscale x 1 x i16> %0, 327 <vscale x 1 x i16> %1, 328 i32 %2) 329 330 ret <vscale x 1 x i16> %a 331} 332 333declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16( 334 <vscale x 1 x i16>, 335 <vscale x 1 x i16>, 336 <vscale x 1 x i16>, 337 <vscale x 1 x i1>, 338 i32); 339 340define <vscale x 1 x i16> @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 341; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16: 342; CHECK: # %bb.0: # %entry 343; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu 344; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t 345; CHECK-NEXT: ret 346entry: 347 %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16( 348 <vscale x 1 x i16> %0, 349 <vscale x 1 x i16> %1, 350 <vscale x 1 x i16> %2, 351 <vscale x 1 x i1> %3, 352 i32 %4) 353 354 ret <vscale x 1 x i16> %a 355} 356 357declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16( 358 <vscale x 2 x i16>, 359 <vscale x 2 x i16>, 360 i32); 361 362define <vscale x 2 x i16> @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind { 363; CHECK-LABEL: intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16: 364; CHECK: # %bb.0: # %entry 365; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 366; CHECK-NEXT: vssra.vv v8, v8, v9 367; CHECK-NEXT: ret 368entry: 369 %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16( 370 <vscale x 2 x i16> %0, 371 <vscale x 2 x i16> %1, 372 i32 %2) 373 374 ret <vscale x 2 x i16> %a 375} 376 377declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16( 378 <vscale x 2 x i16>, 379 <vscale x 2 x i16>, 380 <vscale x 2 x i16>, 381 <vscale x 2 x i1>, 382 i32); 383 384define <vscale x 2 x i16> @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 385; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16: 386; CHECK: # %bb.0: # %entry 387; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu 388; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t 389; CHECK-NEXT: ret 390entry: 391 %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16( 392 <vscale x 2 x i16> %0, 393 <vscale x 2 x i16> %1, 394 <vscale x 2 x i16> %2, 395 <vscale x 2 x i1> %3, 396 i32 %4) 397 398 ret <vscale x 2 x i16> %a 399} 400 401declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16( 402 <vscale x 4 x i16>, 403 <vscale x 4 x i16>, 404 i32); 405 406define <vscale x 4 x i16> @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind { 407; CHECK-LABEL: intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16: 408; CHECK: # %bb.0: # %entry 409; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 410; CHECK-NEXT: vssra.vv v8, v8, v9 411; CHECK-NEXT: ret 412entry: 413 %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16( 414 <vscale x 4 x i16> %0, 415 <vscale x 4 x i16> %1, 416 i32 %2) 417 418 ret <vscale x 4 x i16> %a 419} 420 421declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16( 422 <vscale x 4 x i16>, 423 <vscale x 4 x i16>, 424 <vscale x 4 x i16>, 425 <vscale x 4 x i1>, 426 i32); 427 428define <vscale x 4 x i16> @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 429; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16: 430; CHECK: # %bb.0: # %entry 431; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu 432; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t 433; CHECK-NEXT: ret 434entry: 435 %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16( 436 <vscale x 4 x i16> %0, 437 <vscale x 4 x i16> %1, 438 <vscale x 4 x i16> %2, 439 <vscale x 4 x i1> %3, 440 i32 %4) 441 442 ret <vscale x 4 x i16> %a 443} 444 445declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16( 446 <vscale x 8 x i16>, 447 <vscale x 8 x i16>, 448 i32); 449 450define <vscale x 8 x i16> @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind { 451; CHECK-LABEL: intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16: 452; CHECK: # %bb.0: # %entry 453; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 454; CHECK-NEXT: vssra.vv v8, v8, v10 455; CHECK-NEXT: ret 456entry: 457 %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16( 458 <vscale x 8 x i16> %0, 459 <vscale x 8 x i16> %1, 460 i32 %2) 461 462 ret <vscale x 8 x i16> %a 463} 464 465declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16( 466 <vscale x 8 x i16>, 467 <vscale x 8 x i16>, 468 <vscale x 8 x i16>, 469 <vscale x 8 x i1>, 470 i32); 471 472define <vscale x 8 x i16> @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 473; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16: 474; CHECK: # %bb.0: # %entry 475; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu 476; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t 477; CHECK-NEXT: ret 478entry: 479 %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16( 480 <vscale x 8 x i16> %0, 481 <vscale x 8 x i16> %1, 482 <vscale x 8 x i16> %2, 483 <vscale x 8 x i1> %3, 484 i32 %4) 485 486 ret <vscale x 8 x i16> %a 487} 488 489declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16( 490 <vscale x 16 x i16>, 491 <vscale x 16 x i16>, 492 i32); 493 494define <vscale x 16 x i16> @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind { 495; CHECK-LABEL: intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16: 496; CHECK: # %bb.0: # %entry 497; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 498; CHECK-NEXT: vssra.vv v8, v8, v12 499; CHECK-NEXT: ret 500entry: 501 %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16( 502 <vscale x 16 x i16> %0, 503 <vscale x 16 x i16> %1, 504 i32 %2) 505 506 ret <vscale x 16 x i16> %a 507} 508 509declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16( 510 <vscale x 16 x i16>, 511 <vscale x 16 x i16>, 512 <vscale x 16 x i16>, 513 <vscale x 16 x i1>, 514 i32); 515 516define <vscale x 16 x i16> @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 517; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16: 518; CHECK: # %bb.0: # %entry 519; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu 520; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t 521; CHECK-NEXT: ret 522entry: 523 %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16( 524 <vscale x 16 x i16> %0, 525 <vscale x 16 x i16> %1, 526 <vscale x 16 x i16> %2, 527 <vscale x 16 x i1> %3, 528 i32 %4) 529 530 ret <vscale x 16 x i16> %a 531} 532 533declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16( 534 <vscale x 32 x i16>, 535 <vscale x 32 x i16>, 536 i32); 537 538define <vscale x 32 x i16> @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind { 539; CHECK-LABEL: intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16: 540; CHECK: # %bb.0: # %entry 541; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu 542; CHECK-NEXT: vssra.vv v8, v8, v16 543; CHECK-NEXT: ret 544entry: 545 %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16( 546 <vscale x 32 x i16> %0, 547 <vscale x 32 x i16> %1, 548 i32 %2) 549 550 ret <vscale x 32 x i16> %a 551} 552 553declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16( 554 <vscale x 32 x i16>, 555 <vscale x 32 x i16>, 556 <vscale x 32 x i16>, 557 <vscale x 32 x i1>, 558 i32); 559 560define <vscale x 32 x i16> @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { 561; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16: 562; CHECK: # %bb.0: # %entry 563; CHECK-NEXT: vl8re16.v v24, (a0) 564; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu 565; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t 566; CHECK-NEXT: ret 567entry: 568 %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16( 569 <vscale x 32 x i16> %0, 570 <vscale x 32 x i16> %1, 571 <vscale x 32 x i16> %2, 572 <vscale x 32 x i1> %3, 573 i32 %4) 574 575 ret <vscale x 32 x i16> %a 576} 577 578declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32( 579 <vscale x 1 x i32>, 580 <vscale x 1 x i32>, 581 i32); 582 583define <vscale x 1 x i32> @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind { 584; CHECK-LABEL: intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32: 585; CHECK: # %bb.0: # %entry 586; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 587; CHECK-NEXT: vssra.vv v8, v8, v9 588; CHECK-NEXT: ret 589entry: 590 %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32( 591 <vscale x 1 x i32> %0, 592 <vscale x 1 x i32> %1, 593 i32 %2) 594 595 ret <vscale x 1 x i32> %a 596} 597 598declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32( 599 <vscale x 1 x i32>, 600 <vscale x 1 x i32>, 601 <vscale x 1 x i32>, 602 <vscale x 1 x i1>, 603 i32); 604 605define <vscale x 1 x i32> @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 606; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32: 607; CHECK: # %bb.0: # %entry 608; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu 609; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t 610; CHECK-NEXT: ret 611entry: 612 %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32( 613 <vscale x 1 x i32> %0, 614 <vscale x 1 x i32> %1, 615 <vscale x 1 x i32> %2, 616 <vscale x 1 x i1> %3, 617 i32 %4) 618 619 ret <vscale x 1 x i32> %a 620} 621 622declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32( 623 <vscale x 2 x i32>, 624 <vscale x 2 x i32>, 625 i32); 626 627define <vscale x 2 x i32> @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind { 628; CHECK-LABEL: intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32: 629; CHECK: # %bb.0: # %entry 630; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 631; CHECK-NEXT: vssra.vv v8, v8, v9 632; CHECK-NEXT: ret 633entry: 634 %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32( 635 <vscale x 2 x i32> %0, 636 <vscale x 2 x i32> %1, 637 i32 %2) 638 639 ret <vscale x 2 x i32> %a 640} 641 642declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32( 643 <vscale x 2 x i32>, 644 <vscale x 2 x i32>, 645 <vscale x 2 x i32>, 646 <vscale x 2 x i1>, 647 i32); 648 649define <vscale x 2 x i32> @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 650; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32: 651; CHECK: # %bb.0: # %entry 652; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu 653; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t 654; CHECK-NEXT: ret 655entry: 656 %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32( 657 <vscale x 2 x i32> %0, 658 <vscale x 2 x i32> %1, 659 <vscale x 2 x i32> %2, 660 <vscale x 2 x i1> %3, 661 i32 %4) 662 663 ret <vscale x 2 x i32> %a 664} 665 666declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32( 667 <vscale x 4 x i32>, 668 <vscale x 4 x i32>, 669 i32); 670 671define <vscale x 4 x i32> @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind { 672; CHECK-LABEL: intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32: 673; CHECK: # %bb.0: # %entry 674; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 675; CHECK-NEXT: vssra.vv v8, v8, v10 676; CHECK-NEXT: ret 677entry: 678 %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32( 679 <vscale x 4 x i32> %0, 680 <vscale x 4 x i32> %1, 681 i32 %2) 682 683 ret <vscale x 4 x i32> %a 684} 685 686declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32( 687 <vscale x 4 x i32>, 688 <vscale x 4 x i32>, 689 <vscale x 4 x i32>, 690 <vscale x 4 x i1>, 691 i32); 692 693define <vscale x 4 x i32> @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 694; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32: 695; CHECK: # %bb.0: # %entry 696; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu 697; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t 698; CHECK-NEXT: ret 699entry: 700 %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32( 701 <vscale x 4 x i32> %0, 702 <vscale x 4 x i32> %1, 703 <vscale x 4 x i32> %2, 704 <vscale x 4 x i1> %3, 705 i32 %4) 706 707 ret <vscale x 4 x i32> %a 708} 709 710declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32( 711 <vscale x 8 x i32>, 712 <vscale x 8 x i32>, 713 i32); 714 715define <vscale x 8 x i32> @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind { 716; CHECK-LABEL: intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32: 717; CHECK: # %bb.0: # %entry 718; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 719; CHECK-NEXT: vssra.vv v8, v8, v12 720; CHECK-NEXT: ret 721entry: 722 %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32( 723 <vscale x 8 x i32> %0, 724 <vscale x 8 x i32> %1, 725 i32 %2) 726 727 ret <vscale x 8 x i32> %a 728} 729 730declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32( 731 <vscale x 8 x i32>, 732 <vscale x 8 x i32>, 733 <vscale x 8 x i32>, 734 <vscale x 8 x i1>, 735 i32); 736 737define <vscale x 8 x i32> @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 738; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32: 739; CHECK: # %bb.0: # %entry 740; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu 741; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t 742; CHECK-NEXT: ret 743entry: 744 %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32( 745 <vscale x 8 x i32> %0, 746 <vscale x 8 x i32> %1, 747 <vscale x 8 x i32> %2, 748 <vscale x 8 x i1> %3, 749 i32 %4) 750 751 ret <vscale x 8 x i32> %a 752} 753 754declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32( 755 <vscale x 16 x i32>, 756 <vscale x 16 x i32>, 757 i32); 758 759define <vscale x 16 x i32> @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind { 760; CHECK-LABEL: intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32: 761; CHECK: # %bb.0: # %entry 762; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu 763; CHECK-NEXT: vssra.vv v8, v8, v16 764; CHECK-NEXT: ret 765entry: 766 %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32( 767 <vscale x 16 x i32> %0, 768 <vscale x 16 x i32> %1, 769 i32 %2) 770 771 ret <vscale x 16 x i32> %a 772} 773 774declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32( 775 <vscale x 16 x i32>, 776 <vscale x 16 x i32>, 777 <vscale x 16 x i32>, 778 <vscale x 16 x i1>, 779 i32); 780 781define <vscale x 16 x i32> @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 782; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32: 783; CHECK: # %bb.0: # %entry 784; CHECK-NEXT: vl8re32.v v24, (a0) 785; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 786; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t 787; CHECK-NEXT: ret 788entry: 789 %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32( 790 <vscale x 16 x i32> %0, 791 <vscale x 16 x i32> %1, 792 <vscale x 16 x i32> %2, 793 <vscale x 16 x i1> %3, 794 i32 %4) 795 796 ret <vscale x 16 x i32> %a 797} 798 799declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8( 800 <vscale x 1 x i8>, 801 i32, 802 i32); 803 804define <vscale x 1 x i8> @intrinsic_vssra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i32 %1, i32 %2) nounwind { 805; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8: 806; CHECK: # %bb.0: # %entry 807; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu 808; CHECK-NEXT: vssra.vx v8, v8, a0 809; CHECK-NEXT: ret 810entry: 811 %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8( 812 <vscale x 1 x i8> %0, 813 i32 %1, 814 i32 %2) 815 816 ret <vscale x 1 x i8> %a 817} 818 819declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8( 820 <vscale x 1 x i8>, 821 <vscale x 1 x i8>, 822 i32, 823 <vscale x 1 x i1>, 824 i32); 825 826define <vscale x 1 x i8> @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 827; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8: 828; CHECK: # %bb.0: # %entry 829; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu 830; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t 831; CHECK-NEXT: ret 832entry: 833 %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8( 834 <vscale x 1 x i8> %0, 835 <vscale x 1 x i8> %1, 836 i32 %2, 837 <vscale x 1 x i1> %3, 838 i32 %4) 839 840 ret <vscale x 1 x i8> %a 841} 842 843declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8( 844 <vscale x 2 x i8>, 845 i32, 846 i32); 847 848define <vscale x 2 x i8> @intrinsic_vssra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i32 %1, i32 %2) nounwind { 849; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8: 850; CHECK: # %bb.0: # %entry 851; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu 852; CHECK-NEXT: vssra.vx v8, v8, a0 853; CHECK-NEXT: ret 854entry: 855 %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8( 856 <vscale x 2 x i8> %0, 857 i32 %1, 858 i32 %2) 859 860 ret <vscale x 2 x i8> %a 861} 862 863declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8( 864 <vscale x 2 x i8>, 865 <vscale x 2 x i8>, 866 i32, 867 <vscale x 2 x i1>, 868 i32); 869 870define <vscale x 2 x i8> @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 871; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8: 872; CHECK: # %bb.0: # %entry 873; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu 874; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t 875; CHECK-NEXT: ret 876entry: 877 %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8( 878 <vscale x 2 x i8> %0, 879 <vscale x 2 x i8> %1, 880 i32 %2, 881 <vscale x 2 x i1> %3, 882 i32 %4) 883 884 ret <vscale x 2 x i8> %a 885} 886 887declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8( 888 <vscale x 4 x i8>, 889 i32, 890 i32); 891 892define <vscale x 4 x i8> @intrinsic_vssra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i32 %1, i32 %2) nounwind { 893; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8: 894; CHECK: # %bb.0: # %entry 895; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu 896; CHECK-NEXT: vssra.vx v8, v8, a0 897; CHECK-NEXT: ret 898entry: 899 %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8( 900 <vscale x 4 x i8> %0, 901 i32 %1, 902 i32 %2) 903 904 ret <vscale x 4 x i8> %a 905} 906 907declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8( 908 <vscale x 4 x i8>, 909 <vscale x 4 x i8>, 910 i32, 911 <vscale x 4 x i1>, 912 i32); 913 914define <vscale x 4 x i8> @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 915; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8: 916; CHECK: # %bb.0: # %entry 917; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu 918; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t 919; CHECK-NEXT: ret 920entry: 921 %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8( 922 <vscale x 4 x i8> %0, 923 <vscale x 4 x i8> %1, 924 i32 %2, 925 <vscale x 4 x i1> %3, 926 i32 %4) 927 928 ret <vscale x 4 x i8> %a 929} 930 931declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8( 932 <vscale x 8 x i8>, 933 i32, 934 i32); 935 936define <vscale x 8 x i8> @intrinsic_vssra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i32 %1, i32 %2) nounwind { 937; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8: 938; CHECK: # %bb.0: # %entry 939; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu 940; CHECK-NEXT: vssra.vx v8, v8, a0 941; CHECK-NEXT: ret 942entry: 943 %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8( 944 <vscale x 8 x i8> %0, 945 i32 %1, 946 i32 %2) 947 948 ret <vscale x 8 x i8> %a 949} 950 951declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8( 952 <vscale x 8 x i8>, 953 <vscale x 8 x i8>, 954 i32, 955 <vscale x 8 x i1>, 956 i32); 957 958define <vscale x 8 x i8> @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 959; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8: 960; CHECK: # %bb.0: # %entry 961; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu 962; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t 963; CHECK-NEXT: ret 964entry: 965 %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8( 966 <vscale x 8 x i8> %0, 967 <vscale x 8 x i8> %1, 968 i32 %2, 969 <vscale x 8 x i1> %3, 970 i32 %4) 971 972 ret <vscale x 8 x i8> %a 973} 974 975declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8( 976 <vscale x 16 x i8>, 977 i32, 978 i32); 979 980define <vscale x 16 x i8> @intrinsic_vssra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i32 %1, i32 %2) nounwind { 981; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8: 982; CHECK: # %bb.0: # %entry 983; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu 984; CHECK-NEXT: vssra.vx v8, v8, a0 985; CHECK-NEXT: ret 986entry: 987 %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8( 988 <vscale x 16 x i8> %0, 989 i32 %1, 990 i32 %2) 991 992 ret <vscale x 16 x i8> %a 993} 994 995declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8( 996 <vscale x 16 x i8>, 997 <vscale x 16 x i8>, 998 i32, 999 <vscale x 16 x i1>, 1000 i32); 1001 1002define <vscale x 16 x i8> @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 1003; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8: 1004; CHECK: # %bb.0: # %entry 1005; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu 1006; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t 1007; CHECK-NEXT: ret 1008entry: 1009 %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8( 1010 <vscale x 16 x i8> %0, 1011 <vscale x 16 x i8> %1, 1012 i32 %2, 1013 <vscale x 16 x i1> %3, 1014 i32 %4) 1015 1016 ret <vscale x 16 x i8> %a 1017} 1018 1019declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8( 1020 <vscale x 32 x i8>, 1021 i32, 1022 i32); 1023 1024define <vscale x 32 x i8> @intrinsic_vssra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i32 %1, i32 %2) nounwind { 1025; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8: 1026; CHECK: # %bb.0: # %entry 1027; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu 1028; CHECK-NEXT: vssra.vx v8, v8, a0 1029; CHECK-NEXT: ret 1030entry: 1031 %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8( 1032 <vscale x 32 x i8> %0, 1033 i32 %1, 1034 i32 %2) 1035 1036 ret <vscale x 32 x i8> %a 1037} 1038 1039declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8( 1040 <vscale x 32 x i8>, 1041 <vscale x 32 x i8>, 1042 i32, 1043 <vscale x 32 x i1>, 1044 i32); 1045 1046define <vscale x 32 x i8> @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind { 1047; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8: 1048; CHECK: # %bb.0: # %entry 1049; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu 1050; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t 1051; CHECK-NEXT: ret 1052entry: 1053 %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8( 1054 <vscale x 32 x i8> %0, 1055 <vscale x 32 x i8> %1, 1056 i32 %2, 1057 <vscale x 32 x i1> %3, 1058 i32 %4) 1059 1060 ret <vscale x 32 x i8> %a 1061} 1062 1063declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8( 1064 <vscale x 64 x i8>, 1065 i32, 1066 i32); 1067 1068define <vscale x 64 x i8> @intrinsic_vssra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i32 %1, i32 %2) nounwind { 1069; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8: 1070; CHECK: # %bb.0: # %entry 1071; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 1072; CHECK-NEXT: vssra.vx v8, v8, a0 1073; CHECK-NEXT: ret 1074entry: 1075 %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8( 1076 <vscale x 64 x i8> %0, 1077 i32 %1, 1078 i32 %2) 1079 1080 ret <vscale x 64 x i8> %a 1081} 1082 1083declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8( 1084 <vscale x 64 x i8>, 1085 <vscale x 64 x i8>, 1086 i32, 1087 <vscale x 64 x i1>, 1088 i32); 1089 1090define <vscale x 64 x i8> @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind { 1091; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8: 1092; CHECK: # %bb.0: # %entry 1093; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu 1094; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t 1095; CHECK-NEXT: ret 1096entry: 1097 %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8( 1098 <vscale x 64 x i8> %0, 1099 <vscale x 64 x i8> %1, 1100 i32 %2, 1101 <vscale x 64 x i1> %3, 1102 i32 %4) 1103 1104 ret <vscale x 64 x i8> %a 1105} 1106 1107declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16( 1108 <vscale x 1 x i16>, 1109 i32, 1110 i32); 1111 1112define <vscale x 1 x i16> @intrinsic_vssra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind { 1113; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16: 1114; CHECK: # %bb.0: # %entry 1115; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu 1116; CHECK-NEXT: vssra.vx v8, v8, a0 1117; CHECK-NEXT: ret 1118entry: 1119 %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16( 1120 <vscale x 1 x i16> %0, 1121 i32 %1, 1122 i32 %2) 1123 1124 ret <vscale x 1 x i16> %a 1125} 1126 1127declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16( 1128 <vscale x 1 x i16>, 1129 <vscale x 1 x i16>, 1130 i32, 1131 <vscale x 1 x i1>, 1132 i32); 1133 1134define <vscale x 1 x i16> @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 1135; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16: 1136; CHECK: # %bb.0: # %entry 1137; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu 1138; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t 1139; CHECK-NEXT: ret 1140entry: 1141 %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16( 1142 <vscale x 1 x i16> %0, 1143 <vscale x 1 x i16> %1, 1144 i32 %2, 1145 <vscale x 1 x i1> %3, 1146 i32 %4) 1147 1148 ret <vscale x 1 x i16> %a 1149} 1150 1151declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16( 1152 <vscale x 2 x i16>, 1153 i32, 1154 i32); 1155 1156define <vscale x 2 x i16> @intrinsic_vssra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind { 1157; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16: 1158; CHECK: # %bb.0: # %entry 1159; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu 1160; CHECK-NEXT: vssra.vx v8, v8, a0 1161; CHECK-NEXT: ret 1162entry: 1163 %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16( 1164 <vscale x 2 x i16> %0, 1165 i32 %1, 1166 i32 %2) 1167 1168 ret <vscale x 2 x i16> %a 1169} 1170 1171declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16( 1172 <vscale x 2 x i16>, 1173 <vscale x 2 x i16>, 1174 i32, 1175 <vscale x 2 x i1>, 1176 i32); 1177 1178define <vscale x 2 x i16> @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 1179; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16: 1180; CHECK: # %bb.0: # %entry 1181; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu 1182; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t 1183; CHECK-NEXT: ret 1184entry: 1185 %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16( 1186 <vscale x 2 x i16> %0, 1187 <vscale x 2 x i16> %1, 1188 i32 %2, 1189 <vscale x 2 x i1> %3, 1190 i32 %4) 1191 1192 ret <vscale x 2 x i16> %a 1193} 1194 1195declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16( 1196 <vscale x 4 x i16>, 1197 i32, 1198 i32); 1199 1200define <vscale x 4 x i16> @intrinsic_vssra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind { 1201; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16: 1202; CHECK: # %bb.0: # %entry 1203; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu 1204; CHECK-NEXT: vssra.vx v8, v8, a0 1205; CHECK-NEXT: ret 1206entry: 1207 %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16( 1208 <vscale x 4 x i16> %0, 1209 i32 %1, 1210 i32 %2) 1211 1212 ret <vscale x 4 x i16> %a 1213} 1214 1215declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16( 1216 <vscale x 4 x i16>, 1217 <vscale x 4 x i16>, 1218 i32, 1219 <vscale x 4 x i1>, 1220 i32); 1221 1222define <vscale x 4 x i16> @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 1223; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16: 1224; CHECK: # %bb.0: # %entry 1225; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu 1226; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t 1227; CHECK-NEXT: ret 1228entry: 1229 %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16( 1230 <vscale x 4 x i16> %0, 1231 <vscale x 4 x i16> %1, 1232 i32 %2, 1233 <vscale x 4 x i1> %3, 1234 i32 %4) 1235 1236 ret <vscale x 4 x i16> %a 1237} 1238 1239declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16( 1240 <vscale x 8 x i16>, 1241 i32, 1242 i32); 1243 1244define <vscale x 8 x i16> @intrinsic_vssra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind { 1245; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16: 1246; CHECK: # %bb.0: # %entry 1247; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu 1248; CHECK-NEXT: vssra.vx v8, v8, a0 1249; CHECK-NEXT: ret 1250entry: 1251 %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16( 1252 <vscale x 8 x i16> %0, 1253 i32 %1, 1254 i32 %2) 1255 1256 ret <vscale x 8 x i16> %a 1257} 1258 1259declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16( 1260 <vscale x 8 x i16>, 1261 <vscale x 8 x i16>, 1262 i32, 1263 <vscale x 8 x i1>, 1264 i32); 1265 1266define <vscale x 8 x i16> @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 1267; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16: 1268; CHECK: # %bb.0: # %entry 1269; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu 1270; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t 1271; CHECK-NEXT: ret 1272entry: 1273 %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16( 1274 <vscale x 8 x i16> %0, 1275 <vscale x 8 x i16> %1, 1276 i32 %2, 1277 <vscale x 8 x i1> %3, 1278 i32 %4) 1279 1280 ret <vscale x 8 x i16> %a 1281} 1282 1283declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16( 1284 <vscale x 16 x i16>, 1285 i32, 1286 i32); 1287 1288define <vscale x 16 x i16> @intrinsic_vssra_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind { 1289; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16: 1290; CHECK: # %bb.0: # %entry 1291; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 1292; CHECK-NEXT: vssra.vx v8, v8, a0 1293; CHECK-NEXT: ret 1294entry: 1295 %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16( 1296 <vscale x 16 x i16> %0, 1297 i32 %1, 1298 i32 %2) 1299 1300 ret <vscale x 16 x i16> %a 1301} 1302 1303declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16( 1304 <vscale x 16 x i16>, 1305 <vscale x 16 x i16>, 1306 i32, 1307 <vscale x 16 x i1>, 1308 i32); 1309 1310define <vscale x 16 x i16> @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 1311; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16: 1312; CHECK: # %bb.0: # %entry 1313; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu 1314; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t 1315; CHECK-NEXT: ret 1316entry: 1317 %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16( 1318 <vscale x 16 x i16> %0, 1319 <vscale x 16 x i16> %1, 1320 i32 %2, 1321 <vscale x 16 x i1> %3, 1322 i32 %4) 1323 1324 ret <vscale x 16 x i16> %a 1325} 1326 1327declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16( 1328 <vscale x 32 x i16>, 1329 i32, 1330 i32); 1331 1332define <vscale x 32 x i16> @intrinsic_vssra_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind { 1333; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16: 1334; CHECK: # %bb.0: # %entry 1335; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 1336; CHECK-NEXT: vssra.vx v8, v8, a0 1337; CHECK-NEXT: ret 1338entry: 1339 %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16( 1340 <vscale x 32 x i16> %0, 1341 i32 %1, 1342 i32 %2) 1343 1344 ret <vscale x 32 x i16> %a 1345} 1346 1347declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16( 1348 <vscale x 32 x i16>, 1349 <vscale x 32 x i16>, 1350 i32, 1351 <vscale x 32 x i1>, 1352 i32); 1353 1354define <vscale x 32 x i16> @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind { 1355; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16: 1356; CHECK: # %bb.0: # %entry 1357; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu 1358; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t 1359; CHECK-NEXT: ret 1360entry: 1361 %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16( 1362 <vscale x 32 x i16> %0, 1363 <vscale x 32 x i16> %1, 1364 i32 %2, 1365 <vscale x 32 x i1> %3, 1366 i32 %4) 1367 1368 ret <vscale x 32 x i16> %a 1369} 1370 1371declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32( 1372 <vscale x 1 x i32>, 1373 i32, 1374 i32); 1375 1376define <vscale x 1 x i32> @intrinsic_vssra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind { 1377; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32: 1378; CHECK: # %bb.0: # %entry 1379; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu 1380; CHECK-NEXT: vssra.vx v8, v8, a0 1381; CHECK-NEXT: ret 1382entry: 1383 %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32( 1384 <vscale x 1 x i32> %0, 1385 i32 %1, 1386 i32 %2) 1387 1388 ret <vscale x 1 x i32> %a 1389} 1390 1391declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32( 1392 <vscale x 1 x i32>, 1393 <vscale x 1 x i32>, 1394 i32, 1395 <vscale x 1 x i1>, 1396 i32); 1397 1398define <vscale x 1 x i32> @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 1399; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32: 1400; CHECK: # %bb.0: # %entry 1401; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu 1402; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t 1403; CHECK-NEXT: ret 1404entry: 1405 %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32( 1406 <vscale x 1 x i32> %0, 1407 <vscale x 1 x i32> %1, 1408 i32 %2, 1409 <vscale x 1 x i1> %3, 1410 i32 %4) 1411 1412 ret <vscale x 1 x i32> %a 1413} 1414 1415declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32( 1416 <vscale x 2 x i32>, 1417 i32, 1418 i32); 1419 1420define <vscale x 2 x i32> @intrinsic_vssra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind { 1421; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32: 1422; CHECK: # %bb.0: # %entry 1423; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu 1424; CHECK-NEXT: vssra.vx v8, v8, a0 1425; CHECK-NEXT: ret 1426entry: 1427 %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32( 1428 <vscale x 2 x i32> %0, 1429 i32 %1, 1430 i32 %2) 1431 1432 ret <vscale x 2 x i32> %a 1433} 1434 1435declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32( 1436 <vscale x 2 x i32>, 1437 <vscale x 2 x i32>, 1438 i32, 1439 <vscale x 2 x i1>, 1440 i32); 1441 1442define <vscale x 2 x i32> @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 1443; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32: 1444; CHECK: # %bb.0: # %entry 1445; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu 1446; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t 1447; CHECK-NEXT: ret 1448entry: 1449 %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32( 1450 <vscale x 2 x i32> %0, 1451 <vscale x 2 x i32> %1, 1452 i32 %2, 1453 <vscale x 2 x i1> %3, 1454 i32 %4) 1455 1456 ret <vscale x 2 x i32> %a 1457} 1458 1459declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32( 1460 <vscale x 4 x i32>, 1461 i32, 1462 i32); 1463 1464define <vscale x 4 x i32> @intrinsic_vssra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind { 1465; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32: 1466; CHECK: # %bb.0: # %entry 1467; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu 1468; CHECK-NEXT: vssra.vx v8, v8, a0 1469; CHECK-NEXT: ret 1470entry: 1471 %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32( 1472 <vscale x 4 x i32> %0, 1473 i32 %1, 1474 i32 %2) 1475 1476 ret <vscale x 4 x i32> %a 1477} 1478 1479declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32( 1480 <vscale x 4 x i32>, 1481 <vscale x 4 x i32>, 1482 i32, 1483 <vscale x 4 x i1>, 1484 i32); 1485 1486define <vscale x 4 x i32> @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 1487; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32: 1488; CHECK: # %bb.0: # %entry 1489; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu 1490; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t 1491; CHECK-NEXT: ret 1492entry: 1493 %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32( 1494 <vscale x 4 x i32> %0, 1495 <vscale x 4 x i32> %1, 1496 i32 %2, 1497 <vscale x 4 x i1> %3, 1498 i32 %4) 1499 1500 ret <vscale x 4 x i32> %a 1501} 1502 1503declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32( 1504 <vscale x 8 x i32>, 1505 i32, 1506 i32); 1507 1508define <vscale x 8 x i32> @intrinsic_vssra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind { 1509; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32: 1510; CHECK: # %bb.0: # %entry 1511; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu 1512; CHECK-NEXT: vssra.vx v8, v8, a0 1513; CHECK-NEXT: ret 1514entry: 1515 %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32( 1516 <vscale x 8 x i32> %0, 1517 i32 %1, 1518 i32 %2) 1519 1520 ret <vscale x 8 x i32> %a 1521} 1522 1523declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32( 1524 <vscale x 8 x i32>, 1525 <vscale x 8 x i32>, 1526 i32, 1527 <vscale x 8 x i1>, 1528 i32); 1529 1530define <vscale x 8 x i32> @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 1531; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32: 1532; CHECK: # %bb.0: # %entry 1533; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu 1534; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t 1535; CHECK-NEXT: ret 1536entry: 1537 %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32( 1538 <vscale x 8 x i32> %0, 1539 <vscale x 8 x i32> %1, 1540 i32 %2, 1541 <vscale x 8 x i1> %3, 1542 i32 %4) 1543 1544 ret <vscale x 8 x i32> %a 1545} 1546 1547declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32( 1548 <vscale x 16 x i32>, 1549 i32, 1550 i32); 1551 1552define <vscale x 16 x i32> @intrinsic_vssra_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind { 1553; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32: 1554; CHECK: # %bb.0: # %entry 1555; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 1556; CHECK-NEXT: vssra.vx v8, v8, a0 1557; CHECK-NEXT: ret 1558entry: 1559 %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32( 1560 <vscale x 16 x i32> %0, 1561 i32 %1, 1562 i32 %2) 1563 1564 ret <vscale x 16 x i32> %a 1565} 1566 1567declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32( 1568 <vscale x 16 x i32>, 1569 <vscale x 16 x i32>, 1570 i32, 1571 <vscale x 16 x i1>, 1572 i32); 1573 1574define <vscale x 16 x i32> @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind { 1575; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32: 1576; CHECK: # %bb.0: # %entry 1577; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu 1578; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t 1579; CHECK-NEXT: ret 1580entry: 1581 %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32( 1582 <vscale x 16 x i32> %0, 1583 <vscale x 16 x i32> %1, 1584 i32 %2, 1585 <vscale x 16 x i1> %3, 1586 i32 %4) 1587 1588 ret <vscale x 16 x i32> %a 1589} 1590 1591declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64( 1592 <vscale x 1 x i64>, 1593 i32, 1594 i32); 1595 1596define <vscale x 1 x i64> @intrinsic_vssra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind { 1597; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64: 1598; CHECK: # %bb.0: # %entry 1599; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu 1600; CHECK-NEXT: vssra.vx v8, v8, a0 1601; CHECK-NEXT: ret 1602entry: 1603 %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64( 1604 <vscale x 1 x i64> %0, 1605 i32 %1, 1606 i32 %2) 1607 1608 ret <vscale x 1 x i64> %a 1609} 1610 1611declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64( 1612 <vscale x 1 x i64>, 1613 <vscale x 1 x i64>, 1614 i32, 1615 <vscale x 1 x i1>, 1616 i32); 1617 1618define <vscale x 1 x i64> @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind { 1619; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64: 1620; CHECK: # %bb.0: # %entry 1621; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu 1622; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t 1623; CHECK-NEXT: ret 1624entry: 1625 %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64( 1626 <vscale x 1 x i64> %0, 1627 <vscale x 1 x i64> %1, 1628 i32 %2, 1629 <vscale x 1 x i1> %3, 1630 i32 %4) 1631 1632 ret <vscale x 1 x i64> %a 1633} 1634 1635declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64( 1636 <vscale x 2 x i64>, 1637 i32, 1638 i32); 1639 1640define <vscale x 2 x i64> @intrinsic_vssra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind { 1641; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64: 1642; CHECK: # %bb.0: # %entry 1643; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu 1644; CHECK-NEXT: vssra.vx v8, v8, a0 1645; CHECK-NEXT: ret 1646entry: 1647 %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64( 1648 <vscale x 2 x i64> %0, 1649 i32 %1, 1650 i32 %2) 1651 1652 ret <vscale x 2 x i64> %a 1653} 1654 1655declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64( 1656 <vscale x 2 x i64>, 1657 <vscale x 2 x i64>, 1658 i32, 1659 <vscale x 2 x i1>, 1660 i32); 1661 1662define <vscale x 2 x i64> @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind { 1663; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64: 1664; CHECK: # %bb.0: # %entry 1665; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu 1666; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t 1667; CHECK-NEXT: ret 1668entry: 1669 %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64( 1670 <vscale x 2 x i64> %0, 1671 <vscale x 2 x i64> %1, 1672 i32 %2, 1673 <vscale x 2 x i1> %3, 1674 i32 %4) 1675 1676 ret <vscale x 2 x i64> %a 1677} 1678 1679declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64( 1680 <vscale x 4 x i64>, 1681 i32, 1682 i32); 1683 1684define <vscale x 4 x i64> @intrinsic_vssra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind { 1685; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64: 1686; CHECK: # %bb.0: # %entry 1687; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu 1688; CHECK-NEXT: vssra.vx v8, v8, a0 1689; CHECK-NEXT: ret 1690entry: 1691 %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64( 1692 <vscale x 4 x i64> %0, 1693 i32 %1, 1694 i32 %2) 1695 1696 ret <vscale x 4 x i64> %a 1697} 1698 1699declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64( 1700 <vscale x 4 x i64>, 1701 <vscale x 4 x i64>, 1702 i32, 1703 <vscale x 4 x i1>, 1704 i32); 1705 1706define <vscale x 4 x i64> @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind { 1707; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64: 1708; CHECK: # %bb.0: # %entry 1709; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu 1710; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t 1711; CHECK-NEXT: ret 1712entry: 1713 %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64( 1714 <vscale x 4 x i64> %0, 1715 <vscale x 4 x i64> %1, 1716 i32 %2, 1717 <vscale x 4 x i1> %3, 1718 i32 %4) 1719 1720 ret <vscale x 4 x i64> %a 1721} 1722 1723declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64( 1724 <vscale x 8 x i64>, 1725 i32, 1726 i32); 1727 1728define <vscale x 8 x i64> @intrinsic_vssra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind { 1729; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64: 1730; CHECK: # %bb.0: # %entry 1731; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu 1732; CHECK-NEXT: vssra.vx v8, v8, a0 1733; CHECK-NEXT: ret 1734entry: 1735 %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64( 1736 <vscale x 8 x i64> %0, 1737 i32 %1, 1738 i32 %2) 1739 1740 ret <vscale x 8 x i64> %a 1741} 1742 1743declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64( 1744 <vscale x 8 x i64>, 1745 <vscale x 8 x i64>, 1746 i32, 1747 <vscale x 8 x i1>, 1748 i32); 1749 1750define <vscale x 8 x i64> @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind { 1751; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64: 1752; CHECK: # %bb.0: # %entry 1753; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu 1754; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t 1755; CHECK-NEXT: ret 1756entry: 1757 %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64( 1758 <vscale x 8 x i64> %0, 1759 <vscale x 8 x i64> %1, 1760 i32 %2, 1761 <vscale x 8 x i1> %3, 1762 i32 %4) 1763 1764 ret <vscale x 8 x i64> %a 1765} 1766 1767define <vscale x 1 x i8> @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind { 1768; CHECK-LABEL: intrinsic_vssra_vi_nxv1i8_nxv1i8_i8: 1769; CHECK: # %bb.0: # %entry 1770; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 1771; CHECK-NEXT: vssra.vi v8, v8, 9 1772; CHECK-NEXT: ret 1773entry: 1774 %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8( 1775 <vscale x 1 x i8> %0, 1776 i32 9, 1777 i32 %1) 1778 1779 ret <vscale x 1 x i8> %a 1780} 1781 1782define <vscale x 1 x i8> @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { 1783; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8: 1784; CHECK: # %bb.0: # %entry 1785; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu 1786; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t 1787; CHECK-NEXT: ret 1788entry: 1789 %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8( 1790 <vscale x 1 x i8> %0, 1791 <vscale x 1 x i8> %1, 1792 i32 9, 1793 <vscale x 1 x i1> %2, 1794 i32 %3) 1795 1796 ret <vscale x 1 x i8> %a 1797} 1798 1799define <vscale x 2 x i8> @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind { 1800; CHECK-LABEL: intrinsic_vssra_vi_nxv2i8_nxv2i8_i8: 1801; CHECK: # %bb.0: # %entry 1802; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 1803; CHECK-NEXT: vssra.vi v8, v8, 9 1804; CHECK-NEXT: ret 1805entry: 1806 %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8( 1807 <vscale x 2 x i8> %0, 1808 i32 9, 1809 i32 %1) 1810 1811 ret <vscale x 2 x i8> %a 1812} 1813 1814define <vscale x 2 x i8> @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { 1815; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8: 1816; CHECK: # %bb.0: # %entry 1817; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu 1818; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t 1819; CHECK-NEXT: ret 1820entry: 1821 %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8( 1822 <vscale x 2 x i8> %0, 1823 <vscale x 2 x i8> %1, 1824 i32 9, 1825 <vscale x 2 x i1> %2, 1826 i32 %3) 1827 1828 ret <vscale x 2 x i8> %a 1829} 1830 1831define <vscale x 4 x i8> @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind { 1832; CHECK-LABEL: intrinsic_vssra_vi_nxv4i8_nxv4i8_i8: 1833; CHECK: # %bb.0: # %entry 1834; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 1835; CHECK-NEXT: vssra.vi v8, v8, 9 1836; CHECK-NEXT: ret 1837entry: 1838 %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8( 1839 <vscale x 4 x i8> %0, 1840 i32 9, 1841 i32 %1) 1842 1843 ret <vscale x 4 x i8> %a 1844} 1845 1846define <vscale x 4 x i8> @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { 1847; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8: 1848; CHECK: # %bb.0: # %entry 1849; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu 1850; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t 1851; CHECK-NEXT: ret 1852entry: 1853 %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8( 1854 <vscale x 4 x i8> %0, 1855 <vscale x 4 x i8> %1, 1856 i32 9, 1857 <vscale x 4 x i1> %2, 1858 i32 %3) 1859 1860 ret <vscale x 4 x i8> %a 1861} 1862 1863define <vscale x 8 x i8> @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind { 1864; CHECK-LABEL: intrinsic_vssra_vi_nxv8i8_nxv8i8_i8: 1865; CHECK: # %bb.0: # %entry 1866; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 1867; CHECK-NEXT: vssra.vi v8, v8, 9 1868; CHECK-NEXT: ret 1869entry: 1870 %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8( 1871 <vscale x 8 x i8> %0, 1872 i32 9, 1873 i32 %1) 1874 1875 ret <vscale x 8 x i8> %a 1876} 1877 1878define <vscale x 8 x i8> @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { 1879; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8: 1880; CHECK: # %bb.0: # %entry 1881; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu 1882; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t 1883; CHECK-NEXT: ret 1884entry: 1885 %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8( 1886 <vscale x 8 x i8> %0, 1887 <vscale x 8 x i8> %1, 1888 i32 9, 1889 <vscale x 8 x i1> %2, 1890 i32 %3) 1891 1892 ret <vscale x 8 x i8> %a 1893} 1894 1895define <vscale x 16 x i8> @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind { 1896; CHECK-LABEL: intrinsic_vssra_vi_nxv16i8_nxv16i8_i8: 1897; CHECK: # %bb.0: # %entry 1898; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 1899; CHECK-NEXT: vssra.vi v8, v8, 9 1900; CHECK-NEXT: ret 1901entry: 1902 %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8( 1903 <vscale x 16 x i8> %0, 1904 i32 9, 1905 i32 %1) 1906 1907 ret <vscale x 16 x i8> %a 1908} 1909 1910define <vscale x 16 x i8> @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { 1911; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8: 1912; CHECK: # %bb.0: # %entry 1913; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu 1914; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t 1915; CHECK-NEXT: ret 1916entry: 1917 %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8( 1918 <vscale x 16 x i8> %0, 1919 <vscale x 16 x i8> %1, 1920 i32 9, 1921 <vscale x 16 x i1> %2, 1922 i32 %3) 1923 1924 ret <vscale x 16 x i8> %a 1925} 1926 1927define <vscale x 32 x i8> @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind { 1928; CHECK-LABEL: intrinsic_vssra_vi_nxv32i8_nxv32i8_i8: 1929; CHECK: # %bb.0: # %entry 1930; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 1931; CHECK-NEXT: vssra.vi v8, v8, 9 1932; CHECK-NEXT: ret 1933entry: 1934 %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8( 1935 <vscale x 32 x i8> %0, 1936 i32 9, 1937 i32 %1) 1938 1939 ret <vscale x 32 x i8> %a 1940} 1941 1942define <vscale x 32 x i8> @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind { 1943; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8: 1944; CHECK: # %bb.0: # %entry 1945; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu 1946; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t 1947; CHECK-NEXT: ret 1948entry: 1949 %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8( 1950 <vscale x 32 x i8> %0, 1951 <vscale x 32 x i8> %1, 1952 i32 9, 1953 <vscale x 32 x i1> %2, 1954 i32 %3) 1955 1956 ret <vscale x 32 x i8> %a 1957} 1958 1959define <vscale x 64 x i8> @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind { 1960; CHECK-LABEL: intrinsic_vssra_vi_nxv64i8_nxv64i8_i8: 1961; CHECK: # %bb.0: # %entry 1962; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu 1963; CHECK-NEXT: vssra.vi v8, v8, 9 1964; CHECK-NEXT: ret 1965entry: 1966 %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8( 1967 <vscale x 64 x i8> %0, 1968 i32 9, 1969 i32 %1) 1970 1971 ret <vscale x 64 x i8> %a 1972} 1973 1974define <vscale x 64 x i8> @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind { 1975; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8: 1976; CHECK: # %bb.0: # %entry 1977; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu 1978; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t 1979; CHECK-NEXT: ret 1980entry: 1981 %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8( 1982 <vscale x 64 x i8> %0, 1983 <vscale x 64 x i8> %1, 1984 i32 9, 1985 <vscale x 64 x i1> %2, 1986 i32 %3) 1987 1988 ret <vscale x 64 x i8> %a 1989} 1990 1991define <vscale x 1 x i16> @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind { 1992; CHECK-LABEL: intrinsic_vssra_vi_nxv1i16_nxv1i16_i16: 1993; CHECK: # %bb.0: # %entry 1994; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 1995; CHECK-NEXT: vssra.vi v8, v8, 9 1996; CHECK-NEXT: ret 1997entry: 1998 %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16( 1999 <vscale x 1 x i16> %0, 2000 i32 9, 2001 i32 %1) 2002 2003 ret <vscale x 1 x i16> %a 2004} 2005 2006define <vscale x 1 x i16> @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { 2007; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16: 2008; CHECK: # %bb.0: # %entry 2009; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu 2010; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t 2011; CHECK-NEXT: ret 2012entry: 2013 %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16( 2014 <vscale x 1 x i16> %0, 2015 <vscale x 1 x i16> %1, 2016 i32 9, 2017 <vscale x 1 x i1> %2, 2018 i32 %3) 2019 2020 ret <vscale x 1 x i16> %a 2021} 2022 2023define <vscale x 2 x i16> @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind { 2024; CHECK-LABEL: intrinsic_vssra_vi_nxv2i16_nxv2i16_i16: 2025; CHECK: # %bb.0: # %entry 2026; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 2027; CHECK-NEXT: vssra.vi v8, v8, 9 2028; CHECK-NEXT: ret 2029entry: 2030 %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16( 2031 <vscale x 2 x i16> %0, 2032 i32 9, 2033 i32 %1) 2034 2035 ret <vscale x 2 x i16> %a 2036} 2037 2038define <vscale x 2 x i16> @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { 2039; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16: 2040; CHECK: # %bb.0: # %entry 2041; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu 2042; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t 2043; CHECK-NEXT: ret 2044entry: 2045 %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16( 2046 <vscale x 2 x i16> %0, 2047 <vscale x 2 x i16> %1, 2048 i32 9, 2049 <vscale x 2 x i1> %2, 2050 i32 %3) 2051 2052 ret <vscale x 2 x i16> %a 2053} 2054 2055define <vscale x 4 x i16> @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind { 2056; CHECK-LABEL: intrinsic_vssra_vi_nxv4i16_nxv4i16_i16: 2057; CHECK: # %bb.0: # %entry 2058; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 2059; CHECK-NEXT: vssra.vi v8, v8, 9 2060; CHECK-NEXT: ret 2061entry: 2062 %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16( 2063 <vscale x 4 x i16> %0, 2064 i32 9, 2065 i32 %1) 2066 2067 ret <vscale x 4 x i16> %a 2068} 2069 2070define <vscale x 4 x i16> @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { 2071; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16: 2072; CHECK: # %bb.0: # %entry 2073; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu 2074; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t 2075; CHECK-NEXT: ret 2076entry: 2077 %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16( 2078 <vscale x 4 x i16> %0, 2079 <vscale x 4 x i16> %1, 2080 i32 9, 2081 <vscale x 4 x i1> %2, 2082 i32 %3) 2083 2084 ret <vscale x 4 x i16> %a 2085} 2086 2087define <vscale x 8 x i16> @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind { 2088; CHECK-LABEL: intrinsic_vssra_vi_nxv8i16_nxv8i16_i16: 2089; CHECK: # %bb.0: # %entry 2090; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 2091; CHECK-NEXT: vssra.vi v8, v8, 9 2092; CHECK-NEXT: ret 2093entry: 2094 %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16( 2095 <vscale x 8 x i16> %0, 2096 i32 9, 2097 i32 %1) 2098 2099 ret <vscale x 8 x i16> %a 2100} 2101 2102define <vscale x 8 x i16> @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { 2103; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16: 2104; CHECK: # %bb.0: # %entry 2105; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu 2106; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t 2107; CHECK-NEXT: ret 2108entry: 2109 %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16( 2110 <vscale x 8 x i16> %0, 2111 <vscale x 8 x i16> %1, 2112 i32 9, 2113 <vscale x 8 x i1> %2, 2114 i32 %3) 2115 2116 ret <vscale x 8 x i16> %a 2117} 2118 2119define <vscale x 16 x i16> @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind { 2120; CHECK-LABEL: intrinsic_vssra_vi_nxv16i16_nxv16i16_i16: 2121; CHECK: # %bb.0: # %entry 2122; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 2123; CHECK-NEXT: vssra.vi v8, v8, 9 2124; CHECK-NEXT: ret 2125entry: 2126 %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16( 2127 <vscale x 16 x i16> %0, 2128 i32 9, 2129 i32 %1) 2130 2131 ret <vscale x 16 x i16> %a 2132} 2133 2134define <vscale x 16 x i16> @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { 2135; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16: 2136; CHECK: # %bb.0: # %entry 2137; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu 2138; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t 2139; CHECK-NEXT: ret 2140entry: 2141 %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16( 2142 <vscale x 16 x i16> %0, 2143 <vscale x 16 x i16> %1, 2144 i32 9, 2145 <vscale x 16 x i1> %2, 2146 i32 %3) 2147 2148 ret <vscale x 16 x i16> %a 2149} 2150 2151define <vscale x 32 x i16> @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind { 2152; CHECK-LABEL: intrinsic_vssra_vi_nxv32i16_nxv32i16_i16: 2153; CHECK: # %bb.0: # %entry 2154; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu 2155; CHECK-NEXT: vssra.vi v8, v8, 9 2156; CHECK-NEXT: ret 2157entry: 2158 %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16( 2159 <vscale x 32 x i16> %0, 2160 i32 9, 2161 i32 %1) 2162 2163 ret <vscale x 32 x i16> %a 2164} 2165 2166define <vscale x 32 x i16> @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind { 2167; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16: 2168; CHECK: # %bb.0: # %entry 2169; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu 2170; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t 2171; CHECK-NEXT: ret 2172entry: 2173 %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16( 2174 <vscale x 32 x i16> %0, 2175 <vscale x 32 x i16> %1, 2176 i32 9, 2177 <vscale x 32 x i1> %2, 2178 i32 %3) 2179 2180 ret <vscale x 32 x i16> %a 2181} 2182 2183define <vscale x 1 x i32> @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind { 2184; CHECK-LABEL: intrinsic_vssra_vi_nxv1i32_nxv1i32_i32: 2185; CHECK: # %bb.0: # %entry 2186; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 2187; CHECK-NEXT: vssra.vi v8, v8, 9 2188; CHECK-NEXT: ret 2189entry: 2190 %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32( 2191 <vscale x 1 x i32> %0, 2192 i32 9, 2193 i32 %1) 2194 2195 ret <vscale x 1 x i32> %a 2196} 2197 2198define <vscale x 1 x i32> @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { 2199; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32: 2200; CHECK: # %bb.0: # %entry 2201; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu 2202; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t 2203; CHECK-NEXT: ret 2204entry: 2205 %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32( 2206 <vscale x 1 x i32> %0, 2207 <vscale x 1 x i32> %1, 2208 i32 9, 2209 <vscale x 1 x i1> %2, 2210 i32 %3) 2211 2212 ret <vscale x 1 x i32> %a 2213} 2214 2215define <vscale x 2 x i32> @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind { 2216; CHECK-LABEL: intrinsic_vssra_vi_nxv2i32_nxv2i32_i32: 2217; CHECK: # %bb.0: # %entry 2218; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 2219; CHECK-NEXT: vssra.vi v8, v8, 9 2220; CHECK-NEXT: ret 2221entry: 2222 %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32( 2223 <vscale x 2 x i32> %0, 2224 i32 9, 2225 i32 %1) 2226 2227 ret <vscale x 2 x i32> %a 2228} 2229 2230define <vscale x 2 x i32> @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { 2231; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32: 2232; CHECK: # %bb.0: # %entry 2233; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu 2234; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t 2235; CHECK-NEXT: ret 2236entry: 2237 %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32( 2238 <vscale x 2 x i32> %0, 2239 <vscale x 2 x i32> %1, 2240 i32 9, 2241 <vscale x 2 x i1> %2, 2242 i32 %3) 2243 2244 ret <vscale x 2 x i32> %a 2245} 2246 2247define <vscale x 4 x i32> @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind { 2248; CHECK-LABEL: intrinsic_vssra_vi_nxv4i32_nxv4i32_i32: 2249; CHECK: # %bb.0: # %entry 2250; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 2251; CHECK-NEXT: vssra.vi v8, v8, 9 2252; CHECK-NEXT: ret 2253entry: 2254 %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32( 2255 <vscale x 4 x i32> %0, 2256 i32 9, 2257 i32 %1) 2258 2259 ret <vscale x 4 x i32> %a 2260} 2261 2262define <vscale x 4 x i32> @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { 2263; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32: 2264; CHECK: # %bb.0: # %entry 2265; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu 2266; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t 2267; CHECK-NEXT: ret 2268entry: 2269 %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32( 2270 <vscale x 4 x i32> %0, 2271 <vscale x 4 x i32> %1, 2272 i32 9, 2273 <vscale x 4 x i1> %2, 2274 i32 %3) 2275 2276 ret <vscale x 4 x i32> %a 2277} 2278 2279define <vscale x 8 x i32> @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind { 2280; CHECK-LABEL: intrinsic_vssra_vi_nxv8i32_nxv8i32_i32: 2281; CHECK: # %bb.0: # %entry 2282; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 2283; CHECK-NEXT: vssra.vi v8, v8, 9 2284; CHECK-NEXT: ret 2285entry: 2286 %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32( 2287 <vscale x 8 x i32> %0, 2288 i32 9, 2289 i32 %1) 2290 2291 ret <vscale x 8 x i32> %a 2292} 2293 2294define <vscale x 8 x i32> @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { 2295; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32: 2296; CHECK: # %bb.0: # %entry 2297; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu 2298; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t 2299; CHECK-NEXT: ret 2300entry: 2301 %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32( 2302 <vscale x 8 x i32> %0, 2303 <vscale x 8 x i32> %1, 2304 i32 9, 2305 <vscale x 8 x i1> %2, 2306 i32 %3) 2307 2308 ret <vscale x 8 x i32> %a 2309} 2310 2311define <vscale x 16 x i32> @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind { 2312; CHECK-LABEL: intrinsic_vssra_vi_nxv16i32_nxv16i32_i32: 2313; CHECK: # %bb.0: # %entry 2314; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu 2315; CHECK-NEXT: vssra.vi v8, v8, 9 2316; CHECK-NEXT: ret 2317entry: 2318 %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32( 2319 <vscale x 16 x i32> %0, 2320 i32 9, 2321 i32 %1) 2322 2323 ret <vscale x 16 x i32> %a 2324} 2325 2326define <vscale x 16 x i32> @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { 2327; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32: 2328; CHECK: # %bb.0: # %entry 2329; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu 2330; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t 2331; CHECK-NEXT: ret 2332entry: 2333 %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32( 2334 <vscale x 16 x i32> %0, 2335 <vscale x 16 x i32> %1, 2336 i32 9, 2337 <vscale x 16 x i1> %2, 2338 i32 %3) 2339 2340 ret <vscale x 16 x i32> %a 2341} 2342