1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
5  <vscale x 1 x i8>,
6  i32);
7
8define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i8> %0, i32 %1) nounwind {
9; CHECK-LABEL: intrinsic_vzext_vf8_nxv1i64:
10; CHECK:       # %bb.0: # %entry
11; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
12; CHECK-NEXT:    vzext.vf8 v25, v8
13; CHECK-NEXT:    vmv1r.v v8, v25
14; CHECK-NEXT:    ret
15entry:
16  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
17    <vscale x 1 x i8> %0,
18    i32 %1)
19
20  ret <vscale x 1 x i64> %a
21}
22
23declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
24  <vscale x 1 x i64>,
25  <vscale x 1 x i8>,
26  <vscale x 1 x i1>,
27  i32);
28
29define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
30; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64:
31; CHECK:       # %bb.0: # %entry
32; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
33; CHECK-NEXT:    vzext.vf8 v8, v9, v0.t
34; CHECK-NEXT:    ret
35entry:
36  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
37    <vscale x 1 x i64> %1,
38    <vscale x 1 x i8> %2,
39    <vscale x 1 x i1> %0,
40    i32 %3)
41
42  ret <vscale x 1 x i64> %a
43}
44
45declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
46  <vscale x 2 x i8>,
47  i32);
48
49define <vscale x 2 x i64> @intrinsic_vzext_vf8_nxv2i64(<vscale x 2 x i8> %0, i32 %1) nounwind {
50; CHECK-LABEL: intrinsic_vzext_vf8_nxv2i64:
51; CHECK:       # %bb.0: # %entry
52; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
53; CHECK-NEXT:    vzext.vf8 v26, v8
54; CHECK-NEXT:    vmv2r.v v8, v26
55; CHECK-NEXT:    ret
56entry:
57  %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
58    <vscale x 2 x i8> %0,
59    i32 %1)
60
61  ret <vscale x 2 x i64> %a
62}
63
64declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8(
65  <vscale x 2 x i64>,
66  <vscale x 2 x i8>,
67  <vscale x 2 x i1>,
68  i32);
69
70define <vscale x 2 x i64> @intrinsic_vzext_mask_vf8_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
71; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv2i64:
72; CHECK:       # %bb.0: # %entry
73; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
74; CHECK-NEXT:    vzext.vf8 v8, v10, v0.t
75; CHECK-NEXT:    ret
76entry:
77  %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8(
78    <vscale x 2 x i64> %1,
79    <vscale x 2 x i8> %2,
80    <vscale x 2 x i1> %0,
81    i32 %3)
82
83  ret <vscale x 2 x i64> %a
84}
85
86declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
87  <vscale x 4 x i8>,
88  i32);
89
90define <vscale x 4 x i64> @intrinsic_vzext_vf8_nxv4i64(<vscale x 4 x i8> %0, i32 %1) nounwind {
91; CHECK-LABEL: intrinsic_vzext_vf8_nxv4i64:
92; CHECK:       # %bb.0: # %entry
93; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
94; CHECK-NEXT:    vzext.vf8 v28, v8
95; CHECK-NEXT:    vmv4r.v v8, v28
96; CHECK-NEXT:    ret
97entry:
98  %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
99    <vscale x 4 x i8> %0,
100    i32 %1)
101
102  ret <vscale x 4 x i64> %a
103}
104
105declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8(
106  <vscale x 4 x i64>,
107  <vscale x 4 x i8>,
108  <vscale x 4 x i1>,
109  i32);
110
111define <vscale x 4 x i64> @intrinsic_vzext_mask_vf8_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
112; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv4i64:
113; CHECK:       # %bb.0: # %entry
114; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
115; CHECK-NEXT:    vzext.vf8 v8, v12, v0.t
116; CHECK-NEXT:    ret
117entry:
118  %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8(
119    <vscale x 4 x i64> %1,
120    <vscale x 4 x i8> %2,
121    <vscale x 4 x i1> %0,
122    i32 %3)
123
124  ret <vscale x 4 x i64> %a
125}
126
127declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
128  <vscale x 8 x i8>,
129  i32);
130
131define <vscale x 8 x i64> @intrinsic_vzext_vf8_nxv8i64(<vscale x 8 x i8> %0, i32 %1) nounwind {
132; CHECK-LABEL: intrinsic_vzext_vf8_nxv8i64:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
135; CHECK-NEXT:    vzext.vf8 v16, v8
136; CHECK-NEXT:    vmv8r.v v8, v16
137; CHECK-NEXT:    ret
138entry:
139  %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
140    <vscale x 8 x i8> %0,
141    i32 %1)
142
143  ret <vscale x 8 x i64> %a
144}
145
146declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8(
147  <vscale x 8 x i64>,
148  <vscale x 8 x i8>,
149  <vscale x 8 x i1>,
150  i32);
151
152define <vscale x 8 x i64> @intrinsic_vzext_mask_vf8_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
153; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv8i64:
154; CHECK:       # %bb.0: # %entry
155; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
156; CHECK-NEXT:    vzext.vf8 v8, v16, v0.t
157; CHECK-NEXT:    ret
158entry:
159  %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8(
160    <vscale x 8 x i64> %1,
161    <vscale x 8 x i8> %2,
162    <vscale x 8 x i1> %0,
163    i32 %3)
164
165  ret <vscale x 8 x i64> %a
166}
167
168declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
169  <vscale x 1 x i16>,
170  i32);
171
172define <vscale x 1 x i64> @intrinsic_vzext_vf4_nxv1i64(<vscale x 1 x i16> %0, i32 %1) nounwind {
173; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i64:
174; CHECK:       # %bb.0: # %entry
175; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
176; CHECK-NEXT:    vzext.vf4 v25, v8
177; CHECK-NEXT:    vmv1r.v v8, v25
178; CHECK-NEXT:    ret
179entry:
180  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
181    <vscale x 1 x i16> %0,
182    i32 %1)
183
184  ret <vscale x 1 x i64> %a
185}
186
187declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
188  <vscale x 1 x i64>,
189  <vscale x 1 x i16>,
190  <vscale x 1 x i1>,
191  i32);
192
193define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
194; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64:
195; CHECK:       # %bb.0: # %entry
196; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
197; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
198; CHECK-NEXT:    ret
199entry:
200  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
201    <vscale x 1 x i64> %1,
202    <vscale x 1 x i16> %2,
203    <vscale x 1 x i1> %0,
204    i32 %3)
205
206  ret <vscale x 1 x i64> %a
207}
208
209declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
210  <vscale x 2 x i16>,
211  i32);
212
213define <vscale x 2 x i64> @intrinsic_vzext_vf4_nxv2i64(<vscale x 2 x i16> %0, i32 %1) nounwind {
214; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i64:
215; CHECK:       # %bb.0: # %entry
216; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
217; CHECK-NEXT:    vzext.vf4 v26, v8
218; CHECK-NEXT:    vmv2r.v v8, v26
219; CHECK-NEXT:    ret
220entry:
221  %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
222    <vscale x 2 x i16> %0,
223    i32 %1)
224
225  ret <vscale x 2 x i64> %a
226}
227
228declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16(
229  <vscale x 2 x i64>,
230  <vscale x 2 x i16>,
231  <vscale x 2 x i1>,
232  i32);
233
234define <vscale x 2 x i64> @intrinsic_vzext_mask_vf4_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
235; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i64:
236; CHECK:       # %bb.0: # %entry
237; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
238; CHECK-NEXT:    vzext.vf4 v8, v10, v0.t
239; CHECK-NEXT:    ret
240entry:
241  %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16(
242    <vscale x 2 x i64> %1,
243    <vscale x 2 x i16> %2,
244    <vscale x 2 x i1> %0,
245    i32 %3)
246
247  ret <vscale x 2 x i64> %a
248}
249
250declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
251  <vscale x 4 x i16>,
252  i32);
253
254define <vscale x 4 x i64> @intrinsic_vzext_vf4_nxv4i64(<vscale x 4 x i16> %0, i32 %1) nounwind {
255; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i64:
256; CHECK:       # %bb.0: # %entry
257; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
258; CHECK-NEXT:    vzext.vf4 v28, v8
259; CHECK-NEXT:    vmv4r.v v8, v28
260; CHECK-NEXT:    ret
261entry:
262  %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
263    <vscale x 4 x i16> %0,
264    i32 %1)
265
266  ret <vscale x 4 x i64> %a
267}
268
269declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16(
270  <vscale x 4 x i64>,
271  <vscale x 4 x i16>,
272  <vscale x 4 x i1>,
273  i32);
274
275define <vscale x 4 x i64> @intrinsic_vzext_mask_vf4_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
276; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i64:
277; CHECK:       # %bb.0: # %entry
278; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
279; CHECK-NEXT:    vzext.vf4 v8, v12, v0.t
280; CHECK-NEXT:    ret
281entry:
282  %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16(
283    <vscale x 4 x i64> %1,
284    <vscale x 4 x i16> %2,
285    <vscale x 4 x i1> %0,
286    i32 %3)
287
288  ret <vscale x 4 x i64> %a
289}
290
291declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
292  <vscale x 8 x i16>,
293  i32);
294
295define <vscale x 8 x i64> @intrinsic_vzext_vf4_nxv8i64(<vscale x 8 x i16> %0, i32 %1) nounwind {
296; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i64:
297; CHECK:       # %bb.0: # %entry
298; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
299; CHECK-NEXT:    vzext.vf4 v16, v8
300; CHECK-NEXT:    vmv8r.v v8, v16
301; CHECK-NEXT:    ret
302entry:
303  %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
304    <vscale x 8 x i16> %0,
305    i32 %1)
306
307  ret <vscale x 8 x i64> %a
308}
309
310declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16(
311  <vscale x 8 x i64>,
312  <vscale x 8 x i16>,
313  <vscale x 8 x i1>,
314  i32);
315
316define <vscale x 8 x i64> @intrinsic_vzext_mask_vf4_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
317; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i64:
318; CHECK:       # %bb.0: # %entry
319; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
320; CHECK-NEXT:    vzext.vf4 v8, v16, v0.t
321; CHECK-NEXT:    ret
322entry:
323  %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16(
324    <vscale x 8 x i64> %1,
325    <vscale x 8 x i16> %2,
326    <vscale x 8 x i1> %0,
327    i32 %3)
328
329  ret <vscale x 8 x i64> %a
330}
331
332declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
333  <vscale x 1 x i8>,
334  i32);
335
336define <vscale x 1 x i32> @intrinsic_vzext_vf4_nxv1i32(<vscale x 1 x i8> %0, i32 %1) nounwind {
337; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i32:
338; CHECK:       # %bb.0: # %entry
339; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
340; CHECK-NEXT:    vzext.vf4 v25, v8
341; CHECK-NEXT:    vmv1r.v v8, v25
342; CHECK-NEXT:    ret
343entry:
344  %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
345    <vscale x 1 x i8> %0,
346    i32 %1)
347
348  ret <vscale x 1 x i32> %a
349}
350
351declare <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8(
352  <vscale x 1 x i32>,
353  <vscale x 1 x i8>,
354  <vscale x 1 x i1>,
355  i32);
356
357define <vscale x 1 x i32> @intrinsic_vzext_mask_vf4_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
358; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i32:
359; CHECK:       # %bb.0: # %entry
360; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
361; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
362; CHECK-NEXT:    ret
363entry:
364  %a = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8(
365    <vscale x 1 x i32> %1,
366    <vscale x 1 x i8> %2,
367    <vscale x 1 x i1> %0,
368    i32 %3)
369
370  ret <vscale x 1 x i32> %a
371}
372
373declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
374  <vscale x 2 x i8>,
375  i32);
376
377define <vscale x 2 x i32> @intrinsic_vzext_vf4_nxv2i32(<vscale x 2 x i8> %0, i32 %1) nounwind {
378; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i32:
379; CHECK:       # %bb.0: # %entry
380; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
381; CHECK-NEXT:    vzext.vf4 v25, v8
382; CHECK-NEXT:    vmv1r.v v8, v25
383; CHECK-NEXT:    ret
384entry:
385  %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
386    <vscale x 2 x i8> %0,
387    i32 %1)
388
389  ret <vscale x 2 x i32> %a
390}
391
392declare <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8(
393  <vscale x 2 x i32>,
394  <vscale x 2 x i8>,
395  <vscale x 2 x i1>,
396  i32);
397
398define <vscale x 2 x i32> @intrinsic_vzext_mask_vf4_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
399; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i32:
400; CHECK:       # %bb.0: # %entry
401; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
402; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
403; CHECK-NEXT:    ret
404entry:
405  %a = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8(
406    <vscale x 2 x i32> %1,
407    <vscale x 2 x i8> %2,
408    <vscale x 2 x i1> %0,
409    i32 %3)
410
411  ret <vscale x 2 x i32> %a
412}
413
414declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
415  <vscale x 4 x i8>,
416  i32);
417
418define <vscale x 4 x i32> @intrinsic_vzext_vf4_nxv4i32(<vscale x 4 x i8> %0, i32 %1) nounwind {
419; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i32:
420; CHECK:       # %bb.0: # %entry
421; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
422; CHECK-NEXT:    vzext.vf4 v26, v8
423; CHECK-NEXT:    vmv2r.v v8, v26
424; CHECK-NEXT:    ret
425entry:
426  %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
427    <vscale x 4 x i8> %0,
428    i32 %1)
429
430  ret <vscale x 4 x i32> %a
431}
432
433declare <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8(
434  <vscale x 4 x i32>,
435  <vscale x 4 x i8>,
436  <vscale x 4 x i1>,
437  i32);
438
439define <vscale x 4 x i32> @intrinsic_vzext_mask_vf4_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
440; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i32:
441; CHECK:       # %bb.0: # %entry
442; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
443; CHECK-NEXT:    vzext.vf4 v8, v10, v0.t
444; CHECK-NEXT:    ret
445entry:
446  %a = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8(
447    <vscale x 4 x i32> %1,
448    <vscale x 4 x i8> %2,
449    <vscale x 4 x i1> %0,
450    i32 %3)
451
452  ret <vscale x 4 x i32> %a
453}
454
455declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
456  <vscale x 8 x i8>,
457  i32);
458
459define <vscale x 8 x i32> @intrinsic_vzext_vf4_nxv8i32(<vscale x 8 x i8> %0, i32 %1) nounwind {
460; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i32:
461; CHECK:       # %bb.0: # %entry
462; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
463; CHECK-NEXT:    vzext.vf4 v28, v8
464; CHECK-NEXT:    vmv4r.v v8, v28
465; CHECK-NEXT:    ret
466entry:
467  %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
468    <vscale x 8 x i8> %0,
469    i32 %1)
470
471  ret <vscale x 8 x i32> %a
472}
473
474declare <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8(
475  <vscale x 8 x i32>,
476  <vscale x 8 x i8>,
477  <vscale x 8 x i1>,
478  i32);
479
480define <vscale x 8 x i32> @intrinsic_vzext_mask_vf4_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
481; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i32:
482; CHECK:       # %bb.0: # %entry
483; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
484; CHECK-NEXT:    vzext.vf4 v8, v12, v0.t
485; CHECK-NEXT:    ret
486entry:
487  %a = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8(
488    <vscale x 8 x i32> %1,
489    <vscale x 8 x i8> %2,
490    <vscale x 8 x i1> %0,
491    i32 %3)
492
493  ret <vscale x 8 x i32> %a
494}
495
496declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
497  <vscale x 16 x i8>,
498  i32);
499
500define <vscale x 16 x i32> @intrinsic_vzext_vf4_nxv16i32(<vscale x 16 x i8> %0, i32 %1) nounwind {
501; CHECK-LABEL: intrinsic_vzext_vf4_nxv16i32:
502; CHECK:       # %bb.0: # %entry
503; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
504; CHECK-NEXT:    vzext.vf4 v16, v8
505; CHECK-NEXT:    vmv8r.v v8, v16
506; CHECK-NEXT:    ret
507entry:
508  %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
509    <vscale x 16 x i8> %0,
510    i32 %1)
511
512  ret <vscale x 16 x i32> %a
513}
514
515declare <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8(
516  <vscale x 16 x i32>,
517  <vscale x 16 x i8>,
518  <vscale x 16 x i1>,
519  i32);
520
521define <vscale x 16 x i32> @intrinsic_vzext_mask_vf4_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
522; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv16i32:
523; CHECK:       # %bb.0: # %entry
524; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
525; CHECK-NEXT:    vzext.vf4 v8, v16, v0.t
526; CHECK-NEXT:    ret
527entry:
528  %a = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8(
529    <vscale x 16 x i32> %1,
530    <vscale x 16 x i8> %2,
531    <vscale x 16 x i1> %0,
532    i32 %3)
533
534  ret <vscale x 16 x i32> %a
535}
536
537declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
538  <vscale x 1 x i32>,
539  i32);
540
541define <vscale x 1 x i64> @intrinsic_vzext_vf2_nxv1i64(<vscale x 1 x i32> %0, i32 %1) nounwind {
542; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i64:
543; CHECK:       # %bb.0: # %entry
544; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
545; CHECK-NEXT:    vzext.vf2 v25, v8
546; CHECK-NEXT:    vmv1r.v v8, v25
547; CHECK-NEXT:    ret
548entry:
549  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
550    <vscale x 1 x i32> %0,
551    i32 %1)
552
553  ret <vscale x 1 x i64> %a
554}
555
556declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
557  <vscale x 1 x i64>,
558  <vscale x 1 x i32>,
559  <vscale x 1 x i1>,
560  i32);
561
562define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
563; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64:
564; CHECK:       # %bb.0: # %entry
565; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
566; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
567; CHECK-NEXT:    ret
568entry:
569  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
570    <vscale x 1 x i64> %1,
571    <vscale x 1 x i32> %2,
572    <vscale x 1 x i1> %0,
573    i32 %3)
574
575  ret <vscale x 1 x i64> %a
576}
577
578declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
579  <vscale x 2 x i32>,
580  i32);
581
582define <vscale x 2 x i64> @intrinsic_vzext_vf2_nxv2i64(<vscale x 2 x i32> %0, i32 %1) nounwind {
583; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i64:
584; CHECK:       # %bb.0: # %entry
585; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
586; CHECK-NEXT:    vzext.vf2 v26, v8
587; CHECK-NEXT:    vmv2r.v v8, v26
588; CHECK-NEXT:    ret
589entry:
590  %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
591    <vscale x 2 x i32> %0,
592    i32 %1)
593
594  ret <vscale x 2 x i64> %a
595}
596
597declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32(
598  <vscale x 2 x i64>,
599  <vscale x 2 x i32>,
600  <vscale x 2 x i1>,
601  i32);
602
603define <vscale x 2 x i64> @intrinsic_vzext_mask_vf2_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
604; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i64:
605; CHECK:       # %bb.0: # %entry
606; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
607; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
608; CHECK-NEXT:    ret
609entry:
610  %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32(
611    <vscale x 2 x i64> %1,
612    <vscale x 2 x i32> %2,
613    <vscale x 2 x i1> %0,
614    i32 %3)
615
616  ret <vscale x 2 x i64> %a
617}
618
619declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
620  <vscale x 4 x i32>,
621  i32);
622
623define <vscale x 4 x i64> @intrinsic_vzext_vf2_nxv4i64(<vscale x 4 x i32> %0, i32 %1) nounwind {
624; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i64:
625; CHECK:       # %bb.0: # %entry
626; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
627; CHECK-NEXT:    vzext.vf2 v28, v8
628; CHECK-NEXT:    vmv4r.v v8, v28
629; CHECK-NEXT:    ret
630entry:
631  %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
632    <vscale x 4 x i32> %0,
633    i32 %1)
634
635  ret <vscale x 4 x i64> %a
636}
637
638declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32(
639  <vscale x 4 x i64>,
640  <vscale x 4 x i32>,
641  <vscale x 4 x i1>,
642  i32);
643
644define <vscale x 4 x i64> @intrinsic_vzext_mask_vf2_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
645; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i64:
646; CHECK:       # %bb.0: # %entry
647; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
648; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
649; CHECK-NEXT:    ret
650entry:
651  %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32(
652    <vscale x 4 x i64> %1,
653    <vscale x 4 x i32> %2,
654    <vscale x 4 x i1> %0,
655    i32 %3)
656
657  ret <vscale x 4 x i64> %a
658}
659
660declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
661  <vscale x 8 x i32>,
662  i32);
663
664define <vscale x 8 x i64> @intrinsic_vzext_vf2_nxv8i64(<vscale x 8 x i32> %0, i32 %1) nounwind {
665; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i64:
666; CHECK:       # %bb.0: # %entry
667; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
668; CHECK-NEXT:    vzext.vf2 v16, v8
669; CHECK-NEXT:    vmv8r.v v8, v16
670; CHECK-NEXT:    ret
671entry:
672  %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
673    <vscale x 8 x i32> %0,
674    i32 %1)
675
676  ret <vscale x 8 x i64> %a
677}
678
679declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32(
680  <vscale x 8 x i64>,
681  <vscale x 8 x i32>,
682  <vscale x 8 x i1>,
683  i32);
684
685define <vscale x 8 x i64> @intrinsic_vzext_mask_vf2_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
686; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i64:
687; CHECK:       # %bb.0: # %entry
688; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
689; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
690; CHECK-NEXT:    ret
691entry:
692  %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32(
693    <vscale x 8 x i64> %1,
694    <vscale x 8 x i32> %2,
695    <vscale x 8 x i1> %0,
696    i32 %3)
697
698  ret <vscale x 8 x i64> %a
699}
700
701declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
702  <vscale x 1 x i16>,
703  i32);
704
705define <vscale x 1 x i32> @intrinsic_vzext_vf2_nxv1i32(<vscale x 1 x i16> %0, i32 %1) nounwind {
706; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i32:
707; CHECK:       # %bb.0: # %entry
708; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
709; CHECK-NEXT:    vzext.vf2 v25, v8
710; CHECK-NEXT:    vmv1r.v v8, v25
711; CHECK-NEXT:    ret
712entry:
713  %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
714    <vscale x 1 x i16> %0,
715    i32 %1)
716
717  ret <vscale x 1 x i32> %a
718}
719
720declare <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16(
721  <vscale x 1 x i32>,
722  <vscale x 1 x i16>,
723  <vscale x 1 x i1>,
724  i32);
725
726define <vscale x 1 x i32> @intrinsic_vzext_mask_vf2_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
727; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i32:
728; CHECK:       # %bb.0: # %entry
729; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
730; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
731; CHECK-NEXT:    ret
732entry:
733  %a = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16(
734    <vscale x 1 x i32> %1,
735    <vscale x 1 x i16> %2,
736    <vscale x 1 x i1> %0,
737    i32 %3)
738
739  ret <vscale x 1 x i32> %a
740}
741
742declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
743  <vscale x 2 x i16>,
744  i32);
745
746define <vscale x 2 x i32> @intrinsic_vzext_vf2_nxv2i32(<vscale x 2 x i16> %0, i32 %1) nounwind {
747; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i32:
748; CHECK:       # %bb.0: # %entry
749; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
750; CHECK-NEXT:    vzext.vf2 v25, v8
751; CHECK-NEXT:    vmv1r.v v8, v25
752; CHECK-NEXT:    ret
753entry:
754  %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
755    <vscale x 2 x i16> %0,
756    i32 %1)
757
758  ret <vscale x 2 x i32> %a
759}
760
761declare <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16(
762  <vscale x 2 x i32>,
763  <vscale x 2 x i16>,
764  <vscale x 2 x i1>,
765  i32);
766
767define <vscale x 2 x i32> @intrinsic_vzext_mask_vf2_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
768; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i32:
769; CHECK:       # %bb.0: # %entry
770; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
771; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
772; CHECK-NEXT:    ret
773entry:
774  %a = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16(
775    <vscale x 2 x i32> %1,
776    <vscale x 2 x i16> %2,
777    <vscale x 2 x i1> %0,
778    i32 %3)
779
780  ret <vscale x 2 x i32> %a
781}
782
783declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
784  <vscale x 4 x i16>,
785  i32);
786
787define <vscale x 4 x i32> @intrinsic_vzext_vf2_nxv4i32(<vscale x 4 x i16> %0, i32 %1) nounwind {
788; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i32:
789; CHECK:       # %bb.0: # %entry
790; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
791; CHECK-NEXT:    vzext.vf2 v26, v8
792; CHECK-NEXT:    vmv2r.v v8, v26
793; CHECK-NEXT:    ret
794entry:
795  %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
796    <vscale x 4 x i16> %0,
797    i32 %1)
798
799  ret <vscale x 4 x i32> %a
800}
801
802declare <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16(
803  <vscale x 4 x i32>,
804  <vscale x 4 x i16>,
805  <vscale x 4 x i1>,
806  i32);
807
808define <vscale x 4 x i32> @intrinsic_vzext_mask_vf2_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
809; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i32:
810; CHECK:       # %bb.0: # %entry
811; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
812; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
813; CHECK-NEXT:    ret
814entry:
815  %a = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16(
816    <vscale x 4 x i32> %1,
817    <vscale x 4 x i16> %2,
818    <vscale x 4 x i1> %0,
819    i32 %3)
820
821  ret <vscale x 4 x i32> %a
822}
823
824declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
825  <vscale x 8 x i16>,
826  i32);
827
828define <vscale x 8 x i32> @intrinsic_vzext_vf2_nxv8i32(<vscale x 8 x i16> %0, i32 %1) nounwind {
829; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i32:
830; CHECK:       # %bb.0: # %entry
831; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
832; CHECK-NEXT:    vzext.vf2 v28, v8
833; CHECK-NEXT:    vmv4r.v v8, v28
834; CHECK-NEXT:    ret
835entry:
836  %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
837    <vscale x 8 x i16> %0,
838    i32 %1)
839
840  ret <vscale x 8 x i32> %a
841}
842
843declare <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16(
844  <vscale x 8 x i32>,
845  <vscale x 8 x i16>,
846  <vscale x 8 x i1>,
847  i32);
848
849define <vscale x 8 x i32> @intrinsic_vzext_mask_vf2_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
850; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i32:
851; CHECK:       # %bb.0: # %entry
852; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
853; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
854; CHECK-NEXT:    ret
855entry:
856  %a = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16(
857    <vscale x 8 x i32> %1,
858    <vscale x 8 x i16> %2,
859    <vscale x 8 x i1> %0,
860    i32 %3)
861
862  ret <vscale x 8 x i32> %a
863}
864
865declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
866  <vscale x 16 x i16>,
867  i32);
868
869define <vscale x 16 x i32> @intrinsic_vzext_vf2_nxv16i32(<vscale x 16 x i16> %0, i32 %1) nounwind {
870; CHECK-LABEL: intrinsic_vzext_vf2_nxv16i32:
871; CHECK:       # %bb.0: # %entry
872; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
873; CHECK-NEXT:    vzext.vf2 v16, v8
874; CHECK-NEXT:    vmv8r.v v8, v16
875; CHECK-NEXT:    ret
876entry:
877  %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
878    <vscale x 16 x i16> %0,
879    i32 %1)
880
881  ret <vscale x 16 x i32> %a
882}
883
884declare <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16(
885  <vscale x 16 x i32>,
886  <vscale x 16 x i16>,
887  <vscale x 16 x i1>,
888  i32);
889
890define <vscale x 16 x i32> @intrinsic_vzext_mask_vf2_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
891; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i32:
892; CHECK:       # %bb.0: # %entry
893; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
894; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
895; CHECK-NEXT:    ret
896entry:
897  %a = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16(
898    <vscale x 16 x i32> %1,
899    <vscale x 16 x i16> %2,
900    <vscale x 16 x i1> %0,
901    i32 %3)
902
903  ret <vscale x 16 x i32> %a
904}
905
906declare <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
907  <vscale x 1 x i8>,
908  i32);
909
910define <vscale x 1 x i16> @intrinsic_vzext_vf2_nxv1i16(<vscale x 1 x i8> %0, i32 %1) nounwind {
911; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i16:
912; CHECK:       # %bb.0: # %entry
913; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
914; CHECK-NEXT:    vzext.vf2 v25, v8
915; CHECK-NEXT:    vmv1r.v v8, v25
916; CHECK-NEXT:    ret
917entry:
918  %a = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
919    <vscale x 1 x i8> %0,
920    i32 %1)
921
922  ret <vscale x 1 x i16> %a
923}
924
925declare <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8(
926  <vscale x 1 x i16>,
927  <vscale x 1 x i8>,
928  <vscale x 1 x i1>,
929  i32);
930
931define <vscale x 1 x i16> @intrinsic_vzext_mask_vf2_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
932; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i16:
933; CHECK:       # %bb.0: # %entry
934; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
935; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
936; CHECK-NEXT:    ret
937entry:
938  %a = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8(
939    <vscale x 1 x i16> %1,
940    <vscale x 1 x i8> %2,
941    <vscale x 1 x i1> %0,
942    i32 %3)
943
944  ret <vscale x 1 x i16> %a
945}
946
947declare <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
948  <vscale x 2 x i8>,
949  i32);
950
951define <vscale x 2 x i16> @intrinsic_vzext_vf2_nxv2i16(<vscale x 2 x i8> %0, i32 %1) nounwind {
952; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i16:
953; CHECK:       # %bb.0: # %entry
954; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
955; CHECK-NEXT:    vzext.vf2 v25, v8
956; CHECK-NEXT:    vmv1r.v v8, v25
957; CHECK-NEXT:    ret
958entry:
959  %a = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
960    <vscale x 2 x i8> %0,
961    i32 %1)
962
963  ret <vscale x 2 x i16> %a
964}
965
966declare <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8(
967  <vscale x 2 x i16>,
968  <vscale x 2 x i8>,
969  <vscale x 2 x i1>,
970  i32);
971
972define <vscale x 2 x i16> @intrinsic_vzext_mask_vf2_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
973; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i16:
974; CHECK:       # %bb.0: # %entry
975; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
976; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
977; CHECK-NEXT:    ret
978entry:
979  %a = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8(
980    <vscale x 2 x i16> %1,
981    <vscale x 2 x i8> %2,
982    <vscale x 2 x i1> %0,
983    i32 %3)
984
985  ret <vscale x 2 x i16> %a
986}
987
988declare <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
989  <vscale x 4 x i8>,
990  i32);
991
992define <vscale x 4 x i16> @intrinsic_vzext_vf2_nxv4i16(<vscale x 4 x i8> %0, i32 %1) nounwind {
993; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i16:
994; CHECK:       # %bb.0: # %entry
995; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
996; CHECK-NEXT:    vzext.vf2 v25, v8
997; CHECK-NEXT:    vmv1r.v v8, v25
998; CHECK-NEXT:    ret
999entry:
1000  %a = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
1001    <vscale x 4 x i8> %0,
1002    i32 %1)
1003
1004  ret <vscale x 4 x i16> %a
1005}
1006
1007declare <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8(
1008  <vscale x 4 x i16>,
1009  <vscale x 4 x i8>,
1010  <vscale x 4 x i1>,
1011  i32);
1012
1013define <vscale x 4 x i16> @intrinsic_vzext_mask_vf2_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
1014; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i16:
1015; CHECK:       # %bb.0: # %entry
1016; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
1017; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
1018; CHECK-NEXT:    ret
1019entry:
1020  %a = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8(
1021    <vscale x 4 x i16> %1,
1022    <vscale x 4 x i8> %2,
1023    <vscale x 4 x i1> %0,
1024    i32 %3)
1025
1026  ret <vscale x 4 x i16> %a
1027}
1028
1029declare <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
1030  <vscale x 8 x i8>,
1031  i32);
1032
1033define <vscale x 8 x i16> @intrinsic_vzext_vf2_nxv8i16(<vscale x 8 x i8> %0, i32 %1) nounwind {
1034; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i16:
1035; CHECK:       # %bb.0: # %entry
1036; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
1037; CHECK-NEXT:    vzext.vf2 v26, v8
1038; CHECK-NEXT:    vmv2r.v v8, v26
1039; CHECK-NEXT:    ret
1040entry:
1041  %a = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
1042    <vscale x 8 x i8> %0,
1043    i32 %1)
1044
1045  ret <vscale x 8 x i16> %a
1046}
1047
1048declare <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8(
1049  <vscale x 8 x i16>,
1050  <vscale x 8 x i8>,
1051  <vscale x 8 x i1>,
1052  i32);
1053
1054define <vscale x 8 x i16> @intrinsic_vzext_mask_vf2_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
1055; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i16:
1056; CHECK:       # %bb.0: # %entry
1057; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
1058; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
1059; CHECK-NEXT:    ret
1060entry:
1061  %a = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8(
1062    <vscale x 8 x i16> %1,
1063    <vscale x 8 x i8> %2,
1064    <vscale x 8 x i1> %0,
1065    i32 %3)
1066
1067  ret <vscale x 8 x i16> %a
1068}
1069
1070declare <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
1071  <vscale x 16 x i8>,
1072  i32);
1073
1074define <vscale x 16 x i16> @intrinsic_vzext_vf2_nxv16i16(<vscale x 16 x i8> %0, i32 %1) nounwind {
1075; CHECK-LABEL: intrinsic_vzext_vf2_nxv16i16:
1076; CHECK:       # %bb.0: # %entry
1077; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
1078; CHECK-NEXT:    vzext.vf2 v28, v8
1079; CHECK-NEXT:    vmv4r.v v8, v28
1080; CHECK-NEXT:    ret
1081entry:
1082  %a = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
1083    <vscale x 16 x i8> %0,
1084    i32 %1)
1085
1086  ret <vscale x 16 x i16> %a
1087}
1088
1089declare <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8(
1090  <vscale x 16 x i16>,
1091  <vscale x 16 x i8>,
1092  <vscale x 16 x i1>,
1093  i32);
1094
1095define <vscale x 16 x i16> @intrinsic_vzext_mask_vf2_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
1096; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i16:
1097; CHECK:       # %bb.0: # %entry
1098; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
1099; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
1100; CHECK-NEXT:    ret
1101entry:
1102  %a = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8(
1103    <vscale x 16 x i16> %1,
1104    <vscale x 16 x i8> %2,
1105    <vscale x 16 x i1> %0,
1106    i32 %3)
1107
1108  ret <vscale x 16 x i16> %a
1109}
1110
1111declare <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
1112  <vscale x 32 x i8>,
1113  i32);
1114
1115define <vscale x 32 x i16> @intrinsic_vzext_vf2_nxv32i16(<vscale x 32 x i8> %0, i32 %1) nounwind {
1116; CHECK-LABEL: intrinsic_vzext_vf2_nxv32i16:
1117; CHECK:       # %bb.0: # %entry
1118; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
1119; CHECK-NEXT:    vzext.vf2 v16, v8
1120; CHECK-NEXT:    vmv8r.v v8, v16
1121; CHECK-NEXT:    ret
1122entry:
1123  %a = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
1124    <vscale x 32 x i8> %0,
1125    i32 %1)
1126
1127  ret <vscale x 32 x i16> %a
1128}
1129
1130declare <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8(
1131  <vscale x 32 x i16>,
1132  <vscale x 32 x i8>,
1133  <vscale x 32 x i1>,
1134  i32);
1135
1136define <vscale x 32 x i16> @intrinsic_vzext_mask_vf2_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
1137; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv32i16:
1138; CHECK:       # %bb.0: # %entry
1139; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
1140; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
1141; CHECK-NEXT:    ret
1142entry:
1143  %a = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8(
1144    <vscale x 32 x i16> %1,
1145    <vscale x 32 x i8> %2,
1146    <vscale x 32 x i1> %0,
1147    i32 %3)
1148
1149  ret <vscale x 32 x i16> %a
1150}
1151