1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
5  <vscale x 1 x i16>,
6  <vscale x 1 x i8>,
7  i64);
8
9define <vscale x 1 x i8> @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
10; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
13; CHECK-NEXT:    vnsrl.wv v8, v8, v9
14; CHECK-NEXT:    ret
15entry:
16  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
17    <vscale x 1 x i16> %0,
18    <vscale x 1 x i8> %1,
19    i64 %2)
20
21  ret <vscale x 1 x i8> %a
22}
23
24declare <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8(
25  <vscale x 1 x i8>,
26  <vscale x 1 x i16>,
27  <vscale x 1 x i8>,
28  <vscale x 1 x i1>,
29  i64);
30
31define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
32; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8:
33; CHECK:       # %bb.0: # %entry
34; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
35; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
36; CHECK-NEXT:    ret
37entry:
38  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8(
39    <vscale x 1 x i8> %0,
40    <vscale x 1 x i16> %1,
41    <vscale x 1 x i8> %2,
42    <vscale x 1 x i1> %3,
43    i64 %4)
44
45  ret <vscale x 1 x i8> %a
46}
47
48declare <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8(
49  <vscale x 2 x i16>,
50  <vscale x 2 x i8>,
51  i64);
52
53define <vscale x 2 x i8> @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
54; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8:
55; CHECK:       # %bb.0: # %entry
56; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
57; CHECK-NEXT:    vnsrl.wv v8, v8, v9
58; CHECK-NEXT:    ret
59entry:
60  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8(
61    <vscale x 2 x i16> %0,
62    <vscale x 2 x i8> %1,
63    i64 %2)
64
65  ret <vscale x 2 x i8> %a
66}
67
68declare <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8(
69  <vscale x 2 x i8>,
70  <vscale x 2 x i16>,
71  <vscale x 2 x i8>,
72  <vscale x 2 x i1>,
73  i64);
74
75define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
76; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8:
77; CHECK:       # %bb.0: # %entry
78; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
79; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
80; CHECK-NEXT:    ret
81entry:
82  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8(
83    <vscale x 2 x i8> %0,
84    <vscale x 2 x i16> %1,
85    <vscale x 2 x i8> %2,
86    <vscale x 2 x i1> %3,
87    i64 %4)
88
89  ret <vscale x 2 x i8> %a
90}
91
92declare <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8(
93  <vscale x 4 x i16>,
94  <vscale x 4 x i8>,
95  i64);
96
97define <vscale x 4 x i8> @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
98; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8:
99; CHECK:       # %bb.0: # %entry
100; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
101; CHECK-NEXT:    vnsrl.wv v8, v8, v9
102; CHECK-NEXT:    ret
103entry:
104  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8(
105    <vscale x 4 x i16> %0,
106    <vscale x 4 x i8> %1,
107    i64 %2)
108
109  ret <vscale x 4 x i8> %a
110}
111
112declare <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8(
113  <vscale x 4 x i8>,
114  <vscale x 4 x i16>,
115  <vscale x 4 x i8>,
116  <vscale x 4 x i1>,
117  i64);
118
119define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
120; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8:
121; CHECK:       # %bb.0: # %entry
122; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
123; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
124; CHECK-NEXT:    ret
125entry:
126  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8(
127    <vscale x 4 x i8> %0,
128    <vscale x 4 x i16> %1,
129    <vscale x 4 x i8> %2,
130    <vscale x 4 x i1> %3,
131    i64 %4)
132
133  ret <vscale x 4 x i8> %a
134}
135
136declare <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8(
137  <vscale x 8 x i16>,
138  <vscale x 8 x i8>,
139  i64);
140
141define <vscale x 8 x i8> @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
142; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8:
143; CHECK:       # %bb.0: # %entry
144; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
145; CHECK-NEXT:    vnsrl.wv v25, v8, v10
146; CHECK-NEXT:    vmv1r.v v8, v25
147; CHECK-NEXT:    ret
148entry:
149  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8(
150    <vscale x 8 x i16> %0,
151    <vscale x 8 x i8> %1,
152    i64 %2)
153
154  ret <vscale x 8 x i8> %a
155}
156
157declare <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8(
158  <vscale x 8 x i8>,
159  <vscale x 8 x i16>,
160  <vscale x 8 x i8>,
161  <vscale x 8 x i1>,
162  i64);
163
164define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
165; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8:
166; CHECK:       # %bb.0: # %entry
167; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
168; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
169; CHECK-NEXT:    ret
170entry:
171  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8(
172    <vscale x 8 x i8> %0,
173    <vscale x 8 x i16> %1,
174    <vscale x 8 x i8> %2,
175    <vscale x 8 x i1> %3,
176    i64 %4)
177
178  ret <vscale x 8 x i8> %a
179}
180
181declare <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8(
182  <vscale x 16 x i16>,
183  <vscale x 16 x i8>,
184  i64);
185
186define <vscale x 16 x i8> @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
187; CHECK-LABEL: intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8:
188; CHECK:       # %bb.0: # %entry
189; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
190; CHECK-NEXT:    vnsrl.wv v26, v8, v12
191; CHECK-NEXT:    vmv2r.v v8, v26
192; CHECK-NEXT:    ret
193entry:
194  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8(
195    <vscale x 16 x i16> %0,
196    <vscale x 16 x i8> %1,
197    i64 %2)
198
199  ret <vscale x 16 x i8> %a
200}
201
202declare <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8(
203  <vscale x 16 x i8>,
204  <vscale x 16 x i16>,
205  <vscale x 16 x i8>,
206  <vscale x 16 x i1>,
207  i64);
208
209define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
210; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8:
211; CHECK:       # %bb.0: # %entry
212; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
213; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
214; CHECK-NEXT:    ret
215entry:
216  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8(
217    <vscale x 16 x i8> %0,
218    <vscale x 16 x i16> %1,
219    <vscale x 16 x i8> %2,
220    <vscale x 16 x i1> %3,
221    i64 %4)
222
223  ret <vscale x 16 x i8> %a
224}
225
226declare <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8(
227  <vscale x 32 x i16>,
228  <vscale x 32 x i8>,
229  i64);
230
231define <vscale x 32 x i8> @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
232; CHECK-LABEL: intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8:
233; CHECK:       # %bb.0: # %entry
234; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
235; CHECK-NEXT:    vnsrl.wv v28, v8, v16
236; CHECK-NEXT:    vmv4r.v v8, v28
237; CHECK-NEXT:    ret
238entry:
239  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8(
240    <vscale x 32 x i16> %0,
241    <vscale x 32 x i8> %1,
242    i64 %2)
243
244  ret <vscale x 32 x i8> %a
245}
246
247declare <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8(
248  <vscale x 32 x i8>,
249  <vscale x 32 x i16>,
250  <vscale x 32 x i8>,
251  <vscale x 32 x i1>,
252  i64);
253
254define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
255; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8:
256; CHECK:       # %bb.0: # %entry
257; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
258; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
259; CHECK-NEXT:    ret
260entry:
261  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8(
262    <vscale x 32 x i8> %0,
263    <vscale x 32 x i16> %1,
264    <vscale x 32 x i8> %2,
265    <vscale x 32 x i1> %3,
266    i64 %4)
267
268  ret <vscale x 32 x i8> %a
269}
270
271declare <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16(
272  <vscale x 1 x i32>,
273  <vscale x 1 x i16>,
274  i64);
275
276define <vscale x 1 x i16> @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
277; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16:
278; CHECK:       # %bb.0: # %entry
279; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
280; CHECK-NEXT:    vnsrl.wv v8, v8, v9
281; CHECK-NEXT:    ret
282entry:
283  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16(
284    <vscale x 1 x i32> %0,
285    <vscale x 1 x i16> %1,
286    i64 %2)
287
288  ret <vscale x 1 x i16> %a
289}
290
291declare <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16(
292  <vscale x 1 x i16>,
293  <vscale x 1 x i32>,
294  <vscale x 1 x i16>,
295  <vscale x 1 x i1>,
296  i64);
297
298define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
299; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16:
300; CHECK:       # %bb.0: # %entry
301; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
302; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
303; CHECK-NEXT:    ret
304entry:
305  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16(
306    <vscale x 1 x i16> %0,
307    <vscale x 1 x i32> %1,
308    <vscale x 1 x i16> %2,
309    <vscale x 1 x i1> %3,
310    i64 %4)
311
312  ret <vscale x 1 x i16> %a
313}
314
315declare <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16(
316  <vscale x 2 x i32>,
317  <vscale x 2 x i16>,
318  i64);
319
320define <vscale x 2 x i16> @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
321; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16:
322; CHECK:       # %bb.0: # %entry
323; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
324; CHECK-NEXT:    vnsrl.wv v8, v8, v9
325; CHECK-NEXT:    ret
326entry:
327  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16(
328    <vscale x 2 x i32> %0,
329    <vscale x 2 x i16> %1,
330    i64 %2)
331
332  ret <vscale x 2 x i16> %a
333}
334
335declare <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16(
336  <vscale x 2 x i16>,
337  <vscale x 2 x i32>,
338  <vscale x 2 x i16>,
339  <vscale x 2 x i1>,
340  i64);
341
342define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
343; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16:
344; CHECK:       # %bb.0: # %entry
345; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
346; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
347; CHECK-NEXT:    ret
348entry:
349  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16(
350    <vscale x 2 x i16> %0,
351    <vscale x 2 x i32> %1,
352    <vscale x 2 x i16> %2,
353    <vscale x 2 x i1> %3,
354    i64 %4)
355
356  ret <vscale x 2 x i16> %a
357}
358
359declare <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(
360  <vscale x 4 x i32>,
361  <vscale x 4 x i16>,
362  i64);
363
364define <vscale x 4 x i16> @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
365; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16:
366; CHECK:       # %bb.0: # %entry
367; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
368; CHECK-NEXT:    vnsrl.wv v25, v8, v10
369; CHECK-NEXT:    vmv1r.v v8, v25
370; CHECK-NEXT:    ret
371entry:
372  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(
373    <vscale x 4 x i32> %0,
374    <vscale x 4 x i16> %1,
375    i64 %2)
376
377  ret <vscale x 4 x i16> %a
378}
379
380declare <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16(
381  <vscale x 4 x i16>,
382  <vscale x 4 x i32>,
383  <vscale x 4 x i16>,
384  <vscale x 4 x i1>,
385  i64);
386
387define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
388; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16:
389; CHECK:       # %bb.0: # %entry
390; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
391; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
392; CHECK-NEXT:    ret
393entry:
394  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16(
395    <vscale x 4 x i16> %0,
396    <vscale x 4 x i32> %1,
397    <vscale x 4 x i16> %2,
398    <vscale x 4 x i1> %3,
399    i64 %4)
400
401  ret <vscale x 4 x i16> %a
402}
403
404declare <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16(
405  <vscale x 8 x i32>,
406  <vscale x 8 x i16>,
407  i64);
408
409define <vscale x 8 x i16> @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
410; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16:
411; CHECK:       # %bb.0: # %entry
412; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
413; CHECK-NEXT:    vnsrl.wv v26, v8, v12
414; CHECK-NEXT:    vmv2r.v v8, v26
415; CHECK-NEXT:    ret
416entry:
417  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16(
418    <vscale x 8 x i32> %0,
419    <vscale x 8 x i16> %1,
420    i64 %2)
421
422  ret <vscale x 8 x i16> %a
423}
424
425declare <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16(
426  <vscale x 8 x i16>,
427  <vscale x 8 x i32>,
428  <vscale x 8 x i16>,
429  <vscale x 8 x i1>,
430  i64);
431
432define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
433; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16:
434; CHECK:       # %bb.0: # %entry
435; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
436; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
437; CHECK-NEXT:    ret
438entry:
439  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16(
440    <vscale x 8 x i16> %0,
441    <vscale x 8 x i32> %1,
442    <vscale x 8 x i16> %2,
443    <vscale x 8 x i1> %3,
444    i64 %4)
445
446  ret <vscale x 8 x i16> %a
447}
448
449declare <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16(
450  <vscale x 16 x i32>,
451  <vscale x 16 x i16>,
452  i64);
453
454define <vscale x 16 x i16> @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
455; CHECK-LABEL: intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16:
456; CHECK:       # %bb.0: # %entry
457; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
458; CHECK-NEXT:    vnsrl.wv v28, v8, v16
459; CHECK-NEXT:    vmv4r.v v8, v28
460; CHECK-NEXT:    ret
461entry:
462  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16(
463    <vscale x 16 x i32> %0,
464    <vscale x 16 x i16> %1,
465    i64 %2)
466
467  ret <vscale x 16 x i16> %a
468}
469
470declare <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16(
471  <vscale x 16 x i16>,
472  <vscale x 16 x i32>,
473  <vscale x 16 x i16>,
474  <vscale x 16 x i1>,
475  i64);
476
477define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
478; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16:
479; CHECK:       # %bb.0: # %entry
480; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
481; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
482; CHECK-NEXT:    ret
483entry:
484  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16(
485    <vscale x 16 x i16> %0,
486    <vscale x 16 x i32> %1,
487    <vscale x 16 x i16> %2,
488    <vscale x 16 x i1> %3,
489    i64 %4)
490
491  ret <vscale x 16 x i16> %a
492}
493
494declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
495  <vscale x 1 x i64>,
496  <vscale x 1 x i32>,
497  i64);
498
499define <vscale x 1 x i32> @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
500; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32:
501; CHECK:       # %bb.0: # %entry
502; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
503; CHECK-NEXT:    vnsrl.wv v8, v8, v9
504; CHECK-NEXT:    ret
505entry:
506  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
507    <vscale x 1 x i64> %0,
508    <vscale x 1 x i32> %1,
509    i64 %2)
510
511  ret <vscale x 1 x i32> %a
512}
513
514declare <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
515  <vscale x 1 x i32>,
516  <vscale x 1 x i64>,
517  <vscale x 1 x i32>,
518  <vscale x 1 x i1>,
519  i64);
520
521define <vscale x 1 x i32> @intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
522; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32:
523; CHECK:       # %bb.0: # %entry
524; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
525; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
526; CHECK-NEXT:    ret
527entry:
528  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
529    <vscale x 1 x i32> %0,
530    <vscale x 1 x i64> %1,
531    <vscale x 1 x i32> %2,
532    <vscale x 1 x i1> %3,
533    i64 %4)
534
535  ret <vscale x 1 x i32> %a
536}
537
538declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
539  <vscale x 2 x i64>,
540  <vscale x 2 x i32>,
541  i64);
542
543define <vscale x 2 x i32> @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
544; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32:
545; CHECK:       # %bb.0: # %entry
546; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
547; CHECK-NEXT:    vnsrl.wv v25, v8, v10
548; CHECK-NEXT:    vmv1r.v v8, v25
549; CHECK-NEXT:    ret
550entry:
551  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
552    <vscale x 2 x i64> %0,
553    <vscale x 2 x i32> %1,
554    i64 %2)
555
556  ret <vscale x 2 x i32> %a
557}
558
559declare <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
560  <vscale x 2 x i32>,
561  <vscale x 2 x i64>,
562  <vscale x 2 x i32>,
563  <vscale x 2 x i1>,
564  i64);
565
566define <vscale x 2 x i32> @intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
567; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32:
568; CHECK:       # %bb.0: # %entry
569; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
570; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
571; CHECK-NEXT:    ret
572entry:
573  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
574    <vscale x 2 x i32> %0,
575    <vscale x 2 x i64> %1,
576    <vscale x 2 x i32> %2,
577    <vscale x 2 x i1> %3,
578    i64 %4)
579
580  ret <vscale x 2 x i32> %a
581}
582
583declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
584  <vscale x 4 x i64>,
585  <vscale x 4 x i32>,
586  i64);
587
588define <vscale x 4 x i32> @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
589; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32:
590; CHECK:       # %bb.0: # %entry
591; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
592; CHECK-NEXT:    vnsrl.wv v26, v8, v12
593; CHECK-NEXT:    vmv2r.v v8, v26
594; CHECK-NEXT:    ret
595entry:
596  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
597    <vscale x 4 x i64> %0,
598    <vscale x 4 x i32> %1,
599    i64 %2)
600
601  ret <vscale x 4 x i32> %a
602}
603
604declare <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
605  <vscale x 4 x i32>,
606  <vscale x 4 x i64>,
607  <vscale x 4 x i32>,
608  <vscale x 4 x i1>,
609  i64);
610
611define <vscale x 4 x i32> @intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
612; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32:
613; CHECK:       # %bb.0: # %entry
614; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
615; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
616; CHECK-NEXT:    ret
617entry:
618  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
619    <vscale x 4 x i32> %0,
620    <vscale x 4 x i64> %1,
621    <vscale x 4 x i32> %2,
622    <vscale x 4 x i1> %3,
623    i64 %4)
624
625  ret <vscale x 4 x i32> %a
626}
627
628declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
629  <vscale x 8 x i64>,
630  <vscale x 8 x i32>,
631  i64);
632
633define <vscale x 8 x i32> @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
634; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32:
635; CHECK:       # %bb.0: # %entry
636; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
637; CHECK-NEXT:    vnsrl.wv v28, v8, v16
638; CHECK-NEXT:    vmv4r.v v8, v28
639; CHECK-NEXT:    ret
640entry:
641  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
642    <vscale x 8 x i64> %0,
643    <vscale x 8 x i32> %1,
644    i64 %2)
645
646  ret <vscale x 8 x i32> %a
647}
648
649declare <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
650  <vscale x 8 x i32>,
651  <vscale x 8 x i64>,
652  <vscale x 8 x i32>,
653  <vscale x 8 x i1>,
654  i64);
655
656define <vscale x 8 x i32> @intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
657; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32:
658; CHECK:       # %bb.0: # %entry
659; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
660; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
661; CHECK-NEXT:    ret
662entry:
663  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
664    <vscale x 8 x i32> %0,
665    <vscale x 8 x i64> %1,
666    <vscale x 8 x i32> %2,
667    <vscale x 8 x i1> %3,
668    i64 %4)
669
670  ret <vscale x 8 x i32> %a
671}
672
673declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
674  <vscale x 1 x i16>,
675  i64,
676  i64);
677
678define <vscale x 1 x i8> @intrinsic_vnsrl_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
679; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i8_nxv1i16:
680; CHECK:       # %bb.0: # %entry
681; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
682; CHECK-NEXT:    vnsrl.wx v8, v8, a0
683; CHECK-NEXT:    ret
684entry:
685  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
686    <vscale x 1 x i16> %0,
687    i64 %1,
688    i64 %2)
689
690  ret <vscale x 1 x i8> %a
691}
692
693declare <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
694  <vscale x 1 x i8>,
695  <vscale x 1 x i16>,
696  i64,
697  <vscale x 1 x i1>,
698  i64);
699
700define <vscale x 1 x i8> @intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
701; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16:
702; CHECK:       # %bb.0: # %entry
703; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
704; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
705; CHECK-NEXT:    ret
706entry:
707  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
708    <vscale x 1 x i8> %0,
709    <vscale x 1 x i16> %1,
710    i64 %2,
711    <vscale x 1 x i1> %3,
712    i64 %4)
713
714  ret <vscale x 1 x i8> %a
715}
716
717declare <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
718  <vscale x 2 x i16>,
719  i64,
720  i64);
721
722define <vscale x 2 x i8> @intrinsic_vnsrl_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
723; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i8_nxv2i16:
724; CHECK:       # %bb.0: # %entry
725; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
726; CHECK-NEXT:    vnsrl.wx v8, v8, a0
727; CHECK-NEXT:    ret
728entry:
729  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
730    <vscale x 2 x i16> %0,
731    i64 %1,
732    i64 %2)
733
734  ret <vscale x 2 x i8> %a
735}
736
737declare <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
738  <vscale x 2 x i8>,
739  <vscale x 2 x i16>,
740  i64,
741  <vscale x 2 x i1>,
742  i64);
743
744define <vscale x 2 x i8> @intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
745; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16:
746; CHECK:       # %bb.0: # %entry
747; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
748; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
749; CHECK-NEXT:    ret
750entry:
751  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
752    <vscale x 2 x i8> %0,
753    <vscale x 2 x i16> %1,
754    i64 %2,
755    <vscale x 2 x i1> %3,
756    i64 %4)
757
758  ret <vscale x 2 x i8> %a
759}
760
761declare <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
762  <vscale x 4 x i16>,
763  i64,
764  i64);
765
766define <vscale x 4 x i8> @intrinsic_vnsrl_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
767; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i8_nxv4i16:
768; CHECK:       # %bb.0: # %entry
769; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
770; CHECK-NEXT:    vnsrl.wx v8, v8, a0
771; CHECK-NEXT:    ret
772entry:
773  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
774    <vscale x 4 x i16> %0,
775    i64 %1,
776    i64 %2)
777
778  ret <vscale x 4 x i8> %a
779}
780
781declare <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
782  <vscale x 4 x i8>,
783  <vscale x 4 x i16>,
784  i64,
785  <vscale x 4 x i1>,
786  i64);
787
788define <vscale x 4 x i8> @intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
789; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16:
790; CHECK:       # %bb.0: # %entry
791; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
792; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
793; CHECK-NEXT:    ret
794entry:
795  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
796    <vscale x 4 x i8> %0,
797    <vscale x 4 x i16> %1,
798    i64 %2,
799    <vscale x 4 x i1> %3,
800    i64 %4)
801
802  ret <vscale x 4 x i8> %a
803}
804
805declare <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
806  <vscale x 8 x i16>,
807  i64,
808  i64);
809
810define <vscale x 8 x i8> @intrinsic_vnsrl_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
811; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i8_nxv8i16:
812; CHECK:       # %bb.0: # %entry
813; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
814; CHECK-NEXT:    vnsrl.wx v25, v8, a0
815; CHECK-NEXT:    vmv1r.v v8, v25
816; CHECK-NEXT:    ret
817entry:
818  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
819    <vscale x 8 x i16> %0,
820    i64 %1,
821    i64 %2)
822
823  ret <vscale x 8 x i8> %a
824}
825
826declare <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
827  <vscale x 8 x i8>,
828  <vscale x 8 x i16>,
829  i64,
830  <vscale x 8 x i1>,
831  i64);
832
833define <vscale x 8 x i8> @intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
834; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16:
835; CHECK:       # %bb.0: # %entry
836; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
837; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
838; CHECK-NEXT:    ret
839entry:
840  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
841    <vscale x 8 x i8> %0,
842    <vscale x 8 x i16> %1,
843    i64 %2,
844    <vscale x 8 x i1> %3,
845    i64 %4)
846
847  ret <vscale x 8 x i8> %a
848}
849
850declare <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
851  <vscale x 16 x i16>,
852  i64,
853  i64);
854
855define <vscale x 16 x i8> @intrinsic_vnsrl_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
856; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i8_nxv16i16:
857; CHECK:       # %bb.0: # %entry
858; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
859; CHECK-NEXT:    vnsrl.wx v26, v8, a0
860; CHECK-NEXT:    vmv2r.v v8, v26
861; CHECK-NEXT:    ret
862entry:
863  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
864    <vscale x 16 x i16> %0,
865    i64 %1,
866    i64 %2)
867
868  ret <vscale x 16 x i8> %a
869}
870
871declare <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
872  <vscale x 16 x i8>,
873  <vscale x 16 x i16>,
874  i64,
875  <vscale x 16 x i1>,
876  i64);
877
878define <vscale x 16 x i8> @intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
879; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16:
880; CHECK:       # %bb.0: # %entry
881; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
882; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
883; CHECK-NEXT:    ret
884entry:
885  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
886    <vscale x 16 x i8> %0,
887    <vscale x 16 x i16> %1,
888    i64 %2,
889    <vscale x 16 x i1> %3,
890    i64 %4)
891
892  ret <vscale x 16 x i8> %a
893}
894
895declare <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
896  <vscale x 32 x i16>,
897  i64,
898  i64);
899
900define <vscale x 32 x i8> @intrinsic_vnsrl_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
901; CHECK-LABEL: intrinsic_vnsrl_vx_nxv32i8_nxv32i16:
902; CHECK:       # %bb.0: # %entry
903; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
904; CHECK-NEXT:    vnsrl.wx v28, v8, a0
905; CHECK-NEXT:    vmv4r.v v8, v28
906; CHECK-NEXT:    ret
907entry:
908  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
909    <vscale x 32 x i16> %0,
910    i64 %1,
911    i64 %2)
912
913  ret <vscale x 32 x i8> %a
914}
915
916declare <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
917  <vscale x 32 x i8>,
918  <vscale x 32 x i16>,
919  i64,
920  <vscale x 32 x i1>,
921  i64);
922
923define <vscale x 32 x i8> @intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
924; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16:
925; CHECK:       # %bb.0: # %entry
926; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
927; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
928; CHECK-NEXT:    ret
929entry:
930  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
931    <vscale x 32 x i8> %0,
932    <vscale x 32 x i16> %1,
933    i64 %2,
934    <vscale x 32 x i1> %3,
935    i64 %4)
936
937  ret <vscale x 32 x i8> %a
938}
939
940declare <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
941  <vscale x 1 x i32>,
942  i64,
943  i64);
944
945define <vscale x 1 x i16> @intrinsic_vnsrl_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
946; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i16_nxv1i32:
947; CHECK:       # %bb.0: # %entry
948; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
949; CHECK-NEXT:    vnsrl.wx v8, v8, a0
950; CHECK-NEXT:    ret
951entry:
952  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
953    <vscale x 1 x i32> %0,
954    i64 %1,
955    i64 %2)
956
957  ret <vscale x 1 x i16> %a
958}
959
960declare <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
961  <vscale x 1 x i16>,
962  <vscale x 1 x i32>,
963  i64,
964  <vscale x 1 x i1>,
965  i64);
966
967define <vscale x 1 x i16> @intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
968; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32:
969; CHECK:       # %bb.0: # %entry
970; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
971; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
972; CHECK-NEXT:    ret
973entry:
974  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
975    <vscale x 1 x i16> %0,
976    <vscale x 1 x i32> %1,
977    i64 %2,
978    <vscale x 1 x i1> %3,
979    i64 %4)
980
981  ret <vscale x 1 x i16> %a
982}
983
984declare <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
985  <vscale x 2 x i32>,
986  i64,
987  i64);
988
989define <vscale x 2 x i16> @intrinsic_vnsrl_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
990; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i16_nxv2i32:
991; CHECK:       # %bb.0: # %entry
992; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
993; CHECK-NEXT:    vnsrl.wx v8, v8, a0
994; CHECK-NEXT:    ret
995entry:
996  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
997    <vscale x 2 x i32> %0,
998    i64 %1,
999    i64 %2)
1000
1001  ret <vscale x 2 x i16> %a
1002}
1003
1004declare <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
1005  <vscale x 2 x i16>,
1006  <vscale x 2 x i32>,
1007  i64,
1008  <vscale x 2 x i1>,
1009  i64);
1010
1011define <vscale x 2 x i16> @intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1012; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32:
1013; CHECK:       # %bb.0: # %entry
1014; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
1015; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
1016; CHECK-NEXT:    ret
1017entry:
1018  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
1019    <vscale x 2 x i16> %0,
1020    <vscale x 2 x i32> %1,
1021    i64 %2,
1022    <vscale x 2 x i1> %3,
1023    i64 %4)
1024
1025  ret <vscale x 2 x i16> %a
1026}
1027
1028declare <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
1029  <vscale x 4 x i32>,
1030  i64,
1031  i64);
1032
1033define <vscale x 4 x i16> @intrinsic_vnsrl_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
1034; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i16_nxv4i32:
1035; CHECK:       # %bb.0: # %entry
1036; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1037; CHECK-NEXT:    vnsrl.wx v25, v8, a0
1038; CHECK-NEXT:    vmv1r.v v8, v25
1039; CHECK-NEXT:    ret
1040entry:
1041  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
1042    <vscale x 4 x i32> %0,
1043    i64 %1,
1044    i64 %2)
1045
1046  ret <vscale x 4 x i16> %a
1047}
1048
1049declare <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
1050  <vscale x 4 x i16>,
1051  <vscale x 4 x i32>,
1052  i64,
1053  <vscale x 4 x i1>,
1054  i64);
1055
1056define <vscale x 4 x i16> @intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1057; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32:
1058; CHECK:       # %bb.0: # %entry
1059; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
1060; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
1061; CHECK-NEXT:    ret
1062entry:
1063  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
1064    <vscale x 4 x i16> %0,
1065    <vscale x 4 x i32> %1,
1066    i64 %2,
1067    <vscale x 4 x i1> %3,
1068    i64 %4)
1069
1070  ret <vscale x 4 x i16> %a
1071}
1072
1073declare <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
1074  <vscale x 8 x i32>,
1075  i64,
1076  i64);
1077
1078define <vscale x 8 x i16> @intrinsic_vnsrl_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
1079; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i16_nxv8i32:
1080; CHECK:       # %bb.0: # %entry
1081; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1082; CHECK-NEXT:    vnsrl.wx v26, v8, a0
1083; CHECK-NEXT:    vmv2r.v v8, v26
1084; CHECK-NEXT:    ret
1085entry:
1086  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
1087    <vscale x 8 x i32> %0,
1088    i64 %1,
1089    i64 %2)
1090
1091  ret <vscale x 8 x i16> %a
1092}
1093
1094declare <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
1095  <vscale x 8 x i16>,
1096  <vscale x 8 x i32>,
1097  i64,
1098  <vscale x 8 x i1>,
1099  i64);
1100
1101define <vscale x 8 x i16> @intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1102; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32:
1103; CHECK:       # %bb.0: # %entry
1104; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
1105; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
1106; CHECK-NEXT:    ret
1107entry:
1108  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
1109    <vscale x 8 x i16> %0,
1110    <vscale x 8 x i32> %1,
1111    i64 %2,
1112    <vscale x 8 x i1> %3,
1113    i64 %4)
1114
1115  ret <vscale x 8 x i16> %a
1116}
1117
1118declare <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
1119  <vscale x 16 x i32>,
1120  i64,
1121  i64);
1122
1123define <vscale x 16 x i16> @intrinsic_vnsrl_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
1124; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i16_nxv16i32:
1125; CHECK:       # %bb.0: # %entry
1126; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1127; CHECK-NEXT:    vnsrl.wx v28, v8, a0
1128; CHECK-NEXT:    vmv4r.v v8, v28
1129; CHECK-NEXT:    ret
1130entry:
1131  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
1132    <vscale x 16 x i32> %0,
1133    i64 %1,
1134    i64 %2)
1135
1136  ret <vscale x 16 x i16> %a
1137}
1138
1139declare <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
1140  <vscale x 16 x i16>,
1141  <vscale x 16 x i32>,
1142  i64,
1143  <vscale x 16 x i1>,
1144  i64);
1145
1146define <vscale x 16 x i16> @intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1147; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32:
1148; CHECK:       # %bb.0: # %entry
1149; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
1150; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
1151; CHECK-NEXT:    ret
1152entry:
1153  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
1154    <vscale x 16 x i16> %0,
1155    <vscale x 16 x i32> %1,
1156    i64 %2,
1157    <vscale x 16 x i1> %3,
1158    i64 %4)
1159
1160  ret <vscale x 16 x i16> %a
1161}
1162
1163declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
1164  <vscale x 1 x i64>,
1165  i64,
1166  i64);
1167
1168define <vscale x 1 x i32> @intrinsic_vnsrl_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
1169; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i32_nxv1i64:
1170; CHECK:       # %bb.0: # %entry
1171; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1172; CHECK-NEXT:    vnsrl.wx v8, v8, a0
1173; CHECK-NEXT:    ret
1174entry:
1175  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
1176    <vscale x 1 x i64> %0,
1177    i64 %1,
1178    i64 %2)
1179
1180  ret <vscale x 1 x i32> %a
1181}
1182
1183declare <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
1184  <vscale x 1 x i32>,
1185  <vscale x 1 x i64>,
1186  i64,
1187  <vscale x 1 x i1>,
1188  i64);
1189
1190define <vscale x 1 x i32> @intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1191; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64:
1192; CHECK:       # %bb.0: # %entry
1193; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1194; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
1195; CHECK-NEXT:    ret
1196entry:
1197  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
1198    <vscale x 1 x i32> %0,
1199    <vscale x 1 x i64> %1,
1200    i64 %2,
1201    <vscale x 1 x i1> %3,
1202    i64 %4)
1203
1204  ret <vscale x 1 x i32> %a
1205}
1206
1207declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
1208  <vscale x 2 x i64>,
1209  i64,
1210  i64);
1211
1212define <vscale x 2 x i32> @intrinsic_vnsrl_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
1213; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i32_nxv2i64:
1214; CHECK:       # %bb.0: # %entry
1215; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1216; CHECK-NEXT:    vnsrl.wx v25, v8, a0
1217; CHECK-NEXT:    vmv1r.v v8, v25
1218; CHECK-NEXT:    ret
1219entry:
1220  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
1221    <vscale x 2 x i64> %0,
1222    i64 %1,
1223    i64 %2)
1224
1225  ret <vscale x 2 x i32> %a
1226}
1227
1228declare <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
1229  <vscale x 2 x i32>,
1230  <vscale x 2 x i64>,
1231  i64,
1232  <vscale x 2 x i1>,
1233  i64);
1234
1235define <vscale x 2 x i32> @intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1236; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64:
1237; CHECK:       # %bb.0: # %entry
1238; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1239; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
1240; CHECK-NEXT:    ret
1241entry:
1242  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
1243    <vscale x 2 x i32> %0,
1244    <vscale x 2 x i64> %1,
1245    i64 %2,
1246    <vscale x 2 x i1> %3,
1247    i64 %4)
1248
1249  ret <vscale x 2 x i32> %a
1250}
1251
1252declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
1253  <vscale x 4 x i64>,
1254  i64,
1255  i64);
1256
1257define <vscale x 4 x i32> @intrinsic_vnsrl_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
1258; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i32_nxv4i64:
1259; CHECK:       # %bb.0: # %entry
1260; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1261; CHECK-NEXT:    vnsrl.wx v26, v8, a0
1262; CHECK-NEXT:    vmv2r.v v8, v26
1263; CHECK-NEXT:    ret
1264entry:
1265  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
1266    <vscale x 4 x i64> %0,
1267    i64 %1,
1268    i64 %2)
1269
1270  ret <vscale x 4 x i32> %a
1271}
1272
1273declare <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
1274  <vscale x 4 x i32>,
1275  <vscale x 4 x i64>,
1276  i64,
1277  <vscale x 4 x i1>,
1278  i64);
1279
1280define <vscale x 4 x i32> @intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1281; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64:
1282; CHECK:       # %bb.0: # %entry
1283; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1284; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
1285; CHECK-NEXT:    ret
1286entry:
1287  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
1288    <vscale x 4 x i32> %0,
1289    <vscale x 4 x i64> %1,
1290    i64 %2,
1291    <vscale x 4 x i1> %3,
1292    i64 %4)
1293
1294  ret <vscale x 4 x i32> %a
1295}
1296
1297declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
1298  <vscale x 8 x i64>,
1299  i64,
1300  i64);
1301
1302define <vscale x 8 x i32> @intrinsic_vnsrl_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
1303; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i32_nxv8i64:
1304; CHECK:       # %bb.0: # %entry
1305; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1306; CHECK-NEXT:    vnsrl.wx v28, v8, a0
1307; CHECK-NEXT:    vmv4r.v v8, v28
1308; CHECK-NEXT:    ret
1309entry:
1310  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
1311    <vscale x 8 x i64> %0,
1312    i64 %1,
1313    i64 %2)
1314
1315  ret <vscale x 8 x i32> %a
1316}
1317
1318declare <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
1319  <vscale x 8 x i32>,
1320  <vscale x 8 x i64>,
1321  i64,
1322  <vscale x 8 x i1>,
1323  i64);
1324
1325define <vscale x 8 x i32> @intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1326; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64:
1327; CHECK:       # %bb.0: # %entry
1328; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1329; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
1330; CHECK-NEXT:    ret
1331entry:
1332  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
1333    <vscale x 8 x i32> %0,
1334    <vscale x 8 x i64> %1,
1335    i64 %2,
1336    <vscale x 8 x i1> %3,
1337    i64 %4)
1338
1339  ret <vscale x 8 x i32> %a
1340}
1341
1342define <vscale x 1 x i8> @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i64 %1) nounwind {
1343; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8:
1344; CHECK:       # %bb.0: # %entry
1345; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
1346; CHECK-NEXT:    vnsrl.wi v8, v8, 9
1347; CHECK-NEXT:    ret
1348entry:
1349  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
1350    <vscale x 1 x i16> %0,
1351    i64 9,
1352    i64 %1)
1353
1354  ret <vscale x 1 x i8> %a
1355}
1356
1357define <vscale x 1 x i8> @intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
1358; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8:
1359; CHECK:       # %bb.0: # %entry
1360; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
1361; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
1362; CHECK-NEXT:    ret
1363entry:
1364  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
1365    <vscale x 1 x i8> %0,
1366    <vscale x 1 x i16> %1,
1367    i64 9,
1368    <vscale x 1 x i1> %2,
1369    i64 %3)
1370
1371  ret <vscale x 1 x i8> %a
1372}
1373
1374define <vscale x 2 x i8> @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i64 %1) nounwind {
1375; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8:
1376; CHECK:       # %bb.0: # %entry
1377; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
1378; CHECK-NEXT:    vnsrl.wi v8, v8, 9
1379; CHECK-NEXT:    ret
1380entry:
1381  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
1382    <vscale x 2 x i16> %0,
1383    i64 9,
1384    i64 %1)
1385
1386  ret <vscale x 2 x i8> %a
1387}
1388
1389define <vscale x 2 x i8> @intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
1390; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8:
1391; CHECK:       # %bb.0: # %entry
1392; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
1393; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
1394; CHECK-NEXT:    ret
1395entry:
1396  %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
1397    <vscale x 2 x i8> %0,
1398    <vscale x 2 x i16> %1,
1399    i64 9,
1400    <vscale x 2 x i1> %2,
1401    i64 %3)
1402
1403  ret <vscale x 2 x i8> %a
1404}
1405
1406define <vscale x 4 x i8> @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i64 %1) nounwind {
1407; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8:
1408; CHECK:       # %bb.0: # %entry
1409; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1410; CHECK-NEXT:    vnsrl.wi v8, v8, 9
1411; CHECK-NEXT:    ret
1412entry:
1413  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
1414    <vscale x 4 x i16> %0,
1415    i64 9,
1416    i64 %1)
1417
1418  ret <vscale x 4 x i8> %a
1419}
1420
1421define <vscale x 4 x i8> @intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
1422; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8:
1423; CHECK:       # %bb.0: # %entry
1424; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
1425; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
1426; CHECK-NEXT:    ret
1427entry:
1428  %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
1429    <vscale x 4 x i8> %0,
1430    <vscale x 4 x i16> %1,
1431    i64 9,
1432    <vscale x 4 x i1> %2,
1433    i64 %3)
1434
1435  ret <vscale x 4 x i8> %a
1436}
1437
1438define <vscale x 8 x i8> @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i64 %1) nounwind {
1439; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8:
1440; CHECK:       # %bb.0: # %entry
1441; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
1442; CHECK-NEXT:    vnsrl.wi v25, v8, 9
1443; CHECK-NEXT:    vmv1r.v v8, v25
1444; CHECK-NEXT:    ret
1445entry:
1446  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
1447    <vscale x 8 x i16> %0,
1448    i64 9,
1449    i64 %1)
1450
1451  ret <vscale x 8 x i8> %a
1452}
1453
1454define <vscale x 8 x i8> @intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
1455; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8:
1456; CHECK:       # %bb.0: # %entry
1457; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
1458; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
1459; CHECK-NEXT:    ret
1460entry:
1461  %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
1462    <vscale x 8 x i8> %0,
1463    <vscale x 8 x i16> %1,
1464    i64 9,
1465    <vscale x 8 x i1> %2,
1466    i64 %3)
1467
1468  ret <vscale x 8 x i8> %a
1469}
1470
1471define <vscale x 16 x i8> @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i64 %1) nounwind {
1472; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8:
1473; CHECK:       # %bb.0: # %entry
1474; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
1475; CHECK-NEXT:    vnsrl.wi v26, v8, 9
1476; CHECK-NEXT:    vmv2r.v v8, v26
1477; CHECK-NEXT:    ret
1478entry:
1479  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
1480    <vscale x 16 x i16> %0,
1481    i64 9,
1482    i64 %1)
1483
1484  ret <vscale x 16 x i8> %a
1485}
1486
1487define <vscale x 16 x i8> @intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
1488; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8:
1489; CHECK:       # %bb.0: # %entry
1490; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
1491; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
1492; CHECK-NEXT:    ret
1493entry:
1494  %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
1495    <vscale x 16 x i8> %0,
1496    <vscale x 16 x i16> %1,
1497    i64 9,
1498    <vscale x 16 x i1> %2,
1499    i64 %3)
1500
1501  ret <vscale x 16 x i8> %a
1502}
1503
1504define <vscale x 32 x i8> @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i64 %1) nounwind {
1505; CHECK-LABEL: intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8:
1506; CHECK:       # %bb.0: # %entry
1507; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
1508; CHECK-NEXT:    vnsrl.wi v28, v8, 9
1509; CHECK-NEXT:    vmv4r.v v8, v28
1510; CHECK-NEXT:    ret
1511entry:
1512  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
1513    <vscale x 32 x i16> %0,
1514    i64 9,
1515    i64 %1)
1516
1517  ret <vscale x 32 x i8> %a
1518}
1519
1520define <vscale x 32 x i8> @intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
1521; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8:
1522; CHECK:       # %bb.0: # %entry
1523; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
1524; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
1525; CHECK-NEXT:    ret
1526entry:
1527  %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
1528    <vscale x 32 x i8> %0,
1529    <vscale x 32 x i16> %1,
1530    i64 9,
1531    <vscale x 32 x i1> %2,
1532    i64 %3)
1533
1534  ret <vscale x 32 x i8> %a
1535}
1536
1537define <vscale x 1 x i16> @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i64 %1) nounwind {
1538; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16:
1539; CHECK:       # %bb.0: # %entry
1540; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
1541; CHECK-NEXT:    vnsrl.wi v8, v8, 9
1542; CHECK-NEXT:    ret
1543entry:
1544  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
1545    <vscale x 1 x i32> %0,
1546    i64 9,
1547    i64 %1)
1548
1549  ret <vscale x 1 x i16> %a
1550}
1551
1552define <vscale x 1 x i16> @intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
1553; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16:
1554; CHECK:       # %bb.0: # %entry
1555; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
1556; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
1557; CHECK-NEXT:    ret
1558entry:
1559  %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
1560    <vscale x 1 x i16> %0,
1561    <vscale x 1 x i32> %1,
1562    i64 9,
1563    <vscale x 1 x i1> %2,
1564    i64 %3)
1565
1566  ret <vscale x 1 x i16> %a
1567}
1568
1569define <vscale x 2 x i16> @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i64 %1) nounwind {
1570; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16:
1571; CHECK:       # %bb.0: # %entry
1572; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
1573; CHECK-NEXT:    vnsrl.wi v8, v8, 9
1574; CHECK-NEXT:    ret
1575entry:
1576  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
1577    <vscale x 2 x i32> %0,
1578    i64 9,
1579    i64 %1)
1580
1581  ret <vscale x 2 x i16> %a
1582}
1583
1584define <vscale x 2 x i16> @intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
1585; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16:
1586; CHECK:       # %bb.0: # %entry
1587; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
1588; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
1589; CHECK-NEXT:    ret
1590entry:
1591  %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
1592    <vscale x 2 x i16> %0,
1593    <vscale x 2 x i32> %1,
1594    i64 9,
1595    <vscale x 2 x i1> %2,
1596    i64 %3)
1597
1598  ret <vscale x 2 x i16> %a
1599}
1600
1601define <vscale x 4 x i16> @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i64 %1) nounwind {
1602; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16:
1603; CHECK:       # %bb.0: # %entry
1604; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
1605; CHECK-NEXT:    vnsrl.wi v25, v8, 9
1606; CHECK-NEXT:    vmv1r.v v8, v25
1607; CHECK-NEXT:    ret
1608entry:
1609  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
1610    <vscale x 4 x i32> %0,
1611    i64 9,
1612    i64 %1)
1613
1614  ret <vscale x 4 x i16> %a
1615}
1616
1617define <vscale x 4 x i16> @intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
1618; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16:
1619; CHECK:       # %bb.0: # %entry
1620; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
1621; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
1622; CHECK-NEXT:    ret
1623entry:
1624  %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
1625    <vscale x 4 x i16> %0,
1626    <vscale x 4 x i32> %1,
1627    i64 9,
1628    <vscale x 4 x i1> %2,
1629    i64 %3)
1630
1631  ret <vscale x 4 x i16> %a
1632}
1633
1634define <vscale x 8 x i16> @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i64 %1) nounwind {
1635; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16:
1636; CHECK:       # %bb.0: # %entry
1637; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
1638; CHECK-NEXT:    vnsrl.wi v26, v8, 9
1639; CHECK-NEXT:    vmv2r.v v8, v26
1640; CHECK-NEXT:    ret
1641entry:
1642  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
1643    <vscale x 8 x i32> %0,
1644    i64 9,
1645    i64 %1)
1646
1647  ret <vscale x 8 x i16> %a
1648}
1649
1650define <vscale x 8 x i16> @intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
1651; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16:
1652; CHECK:       # %bb.0: # %entry
1653; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
1654; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
1655; CHECK-NEXT:    ret
1656entry:
1657  %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
1658    <vscale x 8 x i16> %0,
1659    <vscale x 8 x i32> %1,
1660    i64 9,
1661    <vscale x 8 x i1> %2,
1662    i64 %3)
1663
1664  ret <vscale x 8 x i16> %a
1665}
1666
1667define <vscale x 16 x i16> @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i64 %1) nounwind {
1668; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16:
1669; CHECK:       # %bb.0: # %entry
1670; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
1671; CHECK-NEXT:    vnsrl.wi v28, v8, 9
1672; CHECK-NEXT:    vmv4r.v v8, v28
1673; CHECK-NEXT:    ret
1674entry:
1675  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
1676    <vscale x 16 x i32> %0,
1677    i64 9,
1678    i64 %1)
1679
1680  ret <vscale x 16 x i16> %a
1681}
1682
1683define <vscale x 16 x i16> @intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
1684; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16:
1685; CHECK:       # %bb.0: # %entry
1686; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
1687; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
1688; CHECK-NEXT:    ret
1689entry:
1690  %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
1691    <vscale x 16 x i16> %0,
1692    <vscale x 16 x i32> %1,
1693    i64 9,
1694    <vscale x 16 x i1> %2,
1695    i64 %3)
1696
1697  ret <vscale x 16 x i16> %a
1698}
1699
1700define <vscale x 1 x i32> @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i64 %1) nounwind {
1701; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32:
1702; CHECK:       # %bb.0: # %entry
1703; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1704; CHECK-NEXT:    vnsrl.wi v8, v8, 9
1705; CHECK-NEXT:    ret
1706entry:
1707  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
1708    <vscale x 1 x i64> %0,
1709    i64 9,
1710    i64 %1)
1711
1712  ret <vscale x 1 x i32> %a
1713}
1714
1715define <vscale x 1 x i32> @intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
1716; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32:
1717; CHECK:       # %bb.0: # %entry
1718; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
1719; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
1720; CHECK-NEXT:    ret
1721entry:
1722  %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
1723    <vscale x 1 x i32> %0,
1724    <vscale x 1 x i64> %1,
1725    i64 9,
1726    <vscale x 1 x i1> %2,
1727    i64 %3)
1728
1729  ret <vscale x 1 x i32> %a
1730}
1731
1732define <vscale x 2 x i32> @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i64 %1) nounwind {
1733; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32:
1734; CHECK:       # %bb.0: # %entry
1735; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1736; CHECK-NEXT:    vnsrl.wi v25, v8, 9
1737; CHECK-NEXT:    vmv1r.v v8, v25
1738; CHECK-NEXT:    ret
1739entry:
1740  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
1741    <vscale x 2 x i64> %0,
1742    i64 9,
1743    i64 %1)
1744
1745  ret <vscale x 2 x i32> %a
1746}
1747
1748define <vscale x 2 x i32> @intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
1749; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32:
1750; CHECK:       # %bb.0: # %entry
1751; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
1752; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
1753; CHECK-NEXT:    ret
1754entry:
1755  %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
1756    <vscale x 2 x i32> %0,
1757    <vscale x 2 x i64> %1,
1758    i64 9,
1759    <vscale x 2 x i1> %2,
1760    i64 %3)
1761
1762  ret <vscale x 2 x i32> %a
1763}
1764
1765define <vscale x 4 x i32> @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i64 %1) nounwind {
1766; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32:
1767; CHECK:       # %bb.0: # %entry
1768; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1769; CHECK-NEXT:    vnsrl.wi v26, v8, 9
1770; CHECK-NEXT:    vmv2r.v v8, v26
1771; CHECK-NEXT:    ret
1772entry:
1773  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
1774    <vscale x 4 x i64> %0,
1775    i64 9,
1776    i64 %1)
1777
1778  ret <vscale x 4 x i32> %a
1779}
1780
1781define <vscale x 4 x i32> @intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
1782; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32:
1783; CHECK:       # %bb.0: # %entry
1784; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
1785; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
1786; CHECK-NEXT:    ret
1787entry:
1788  %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
1789    <vscale x 4 x i32> %0,
1790    <vscale x 4 x i64> %1,
1791    i64 9,
1792    <vscale x 4 x i1> %2,
1793    i64 %3)
1794
1795  ret <vscale x 4 x i32> %a
1796}
1797
1798define <vscale x 8 x i32> @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i64 %1) nounwind {
1799; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32:
1800; CHECK:       # %bb.0: # %entry
1801; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1802; CHECK-NEXT:    vnsrl.wi v28, v8, 9
1803; CHECK-NEXT:    vmv4r.v v8, v28
1804; CHECK-NEXT:    ret
1805entry:
1806  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
1807    <vscale x 8 x i64> %0,
1808    i64 9,
1809    i64 %1)
1810
1811  ret <vscale x 8 x i32> %a
1812}
1813
1814define <vscale x 8 x i32> @intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
1815; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32:
1816; CHECK:       # %bb.0: # %entry
1817; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
1818; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
1819; CHECK-NEXT:    ret
1820entry:
1821  %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
1822    <vscale x 8 x i32> %0,
1823    <vscale x 8 x i64> %1,
1824    i64 9,
1825    <vscale x 8 x i1> %2,
1826    i64 %3)
1827
1828  ret <vscale x 8 x i32> %a
1829}
1830