1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
5  <vscale x 1 x half>,
6  i64);
7
8define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
9; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16:
10; CHECK:       # %bb.0: # %entry
11; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
12; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
13; CHECK-NEXT:    vmv1r.v v8, v25
14; CHECK-NEXT:    ret
15entry:
16  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
17    <vscale x 1 x half> %0,
18    i64 %1)
19
20  ret <vscale x 1 x i8> %a
21}
22
23declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
24  <vscale x 1 x i8>,
25  <vscale x 1 x half>,
26  <vscale x 1 x i1>,
27  i64);
28
29define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
30; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16:
31; CHECK:       # %bb.0: # %entry
32; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
33; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
34; CHECK-NEXT:    ret
35entry:
36  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
37    <vscale x 1 x i8> %0,
38    <vscale x 1 x half> %1,
39    <vscale x 1 x i1> %2,
40    i64 %3)
41
42  ret <vscale x 1 x i8> %a
43}
44
45declare <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
46  <vscale x 2 x half>,
47  i64);
48
49define <vscale x 2 x i8> @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
50; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16:
51; CHECK:       # %bb.0: # %entry
52; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
53; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
54; CHECK-NEXT:    vmv1r.v v8, v25
55; CHECK-NEXT:    ret
56entry:
57  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
58    <vscale x 2 x half> %0,
59    i64 %1)
60
61  ret <vscale x 2 x i8> %a
62}
63
64declare <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16(
65  <vscale x 2 x i8>,
66  <vscale x 2 x half>,
67  <vscale x 2 x i1>,
68  i64);
69
70define <vscale x 2 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x i8> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
71; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16:
72; CHECK:       # %bb.0: # %entry
73; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
74; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
75; CHECK-NEXT:    ret
76entry:
77  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16(
78    <vscale x 2 x i8> %0,
79    <vscale x 2 x half> %1,
80    <vscale x 2 x i1> %2,
81    i64 %3)
82
83  ret <vscale x 2 x i8> %a
84}
85
86declare <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
87  <vscale x 4 x half>,
88  i64);
89
90define <vscale x 4 x i8> @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
91; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16:
92; CHECK:       # %bb.0: # %entry
93; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
94; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
95; CHECK-NEXT:    vmv1r.v v8, v25
96; CHECK-NEXT:    ret
97entry:
98  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
99    <vscale x 4 x half> %0,
100    i64 %1)
101
102  ret <vscale x 4 x i8> %a
103}
104
105declare <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16(
106  <vscale x 4 x i8>,
107  <vscale x 4 x half>,
108  <vscale x 4 x i1>,
109  i64);
110
111define <vscale x 4 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x i8> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
112; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16:
113; CHECK:       # %bb.0: # %entry
114; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
115; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
116; CHECK-NEXT:    ret
117entry:
118  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16(
119    <vscale x 4 x i8> %0,
120    <vscale x 4 x half> %1,
121    <vscale x 4 x i1> %2,
122    i64 %3)
123
124  ret <vscale x 4 x i8> %a
125}
126
127declare <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
128  <vscale x 8 x half>,
129  i64);
130
131define <vscale x 8 x i8> @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
132; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
135; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
136; CHECK-NEXT:    vmv1r.v v8, v25
137; CHECK-NEXT:    ret
138entry:
139  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
140    <vscale x 8 x half> %0,
141    i64 %1)
142
143  ret <vscale x 8 x i8> %a
144}
145
146declare <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16(
147  <vscale x 8 x i8>,
148  <vscale x 8 x half>,
149  <vscale x 8 x i1>,
150  i64);
151
152define <vscale x 8 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x i8> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
153; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16:
154; CHECK:       # %bb.0: # %entry
155; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
156; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
157; CHECK-NEXT:    ret
158entry:
159  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16(
160    <vscale x 8 x i8> %0,
161    <vscale x 8 x half> %1,
162    <vscale x 8 x i1> %2,
163    i64 %3)
164
165  ret <vscale x 8 x i8> %a
166}
167
168declare <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
169  <vscale x 16 x half>,
170  i64);
171
172define <vscale x 16 x i8> @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
173; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16:
174; CHECK:       # %bb.0: # %entry
175; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
176; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
177; CHECK-NEXT:    vmv2r.v v8, v26
178; CHECK-NEXT:    ret
179entry:
180  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
181    <vscale x 16 x half> %0,
182    i64 %1)
183
184  ret <vscale x 16 x i8> %a
185}
186
187declare <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16(
188  <vscale x 16 x i8>,
189  <vscale x 16 x half>,
190  <vscale x 16 x i1>,
191  i64);
192
193define <vscale x 16 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x i8> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
194; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16:
195; CHECK:       # %bb.0: # %entry
196; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
197; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
198; CHECK-NEXT:    ret
199entry:
200  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16(
201    <vscale x 16 x i8> %0,
202    <vscale x 16 x half> %1,
203    <vscale x 16 x i1> %2,
204    i64 %3)
205
206  ret <vscale x 16 x i8> %a
207}
208
209declare <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
210  <vscale x 32 x half>,
211  i64);
212
213define <vscale x 32 x i8> @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x half> %0, i64 %1) nounwind {
214; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16:
215; CHECK:       # %bb.0: # %entry
216; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
217; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
218; CHECK-NEXT:    vmv4r.v v8, v28
219; CHECK-NEXT:    ret
220entry:
221  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
222    <vscale x 32 x half> %0,
223    i64 %1)
224
225  ret <vscale x 32 x i8> %a
226}
227
228declare <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16(
229  <vscale x 32 x i8>,
230  <vscale x 32 x half>,
231  <vscale x 32 x i1>,
232  i64);
233
234define <vscale x 32 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x i8> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
235; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16:
236; CHECK:       # %bb.0: # %entry
237; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
238; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
239; CHECK-NEXT:    ret
240entry:
241  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16(
242    <vscale x 32 x i8> %0,
243    <vscale x 32 x half> %1,
244    <vscale x 32 x i1> %2,
245    i64 %3)
246
247  ret <vscale x 32 x i8> %a
248}
249
250declare <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
251  <vscale x 1 x float>,
252  i64);
253
254define <vscale x 1 x i16> @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
255; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32:
256; CHECK:       # %bb.0: # %entry
257; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
258; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
259; CHECK-NEXT:    vmv1r.v v8, v25
260; CHECK-NEXT:    ret
261entry:
262  %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
263    <vscale x 1 x float> %0,
264    i64 %1)
265
266  ret <vscale x 1 x i16> %a
267}
268
269declare <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32(
270  <vscale x 1 x i16>,
271  <vscale x 1 x float>,
272  <vscale x 1 x i1>,
273  i64);
274
275define <vscale x 1 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x i16> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
276; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32:
277; CHECK:       # %bb.0: # %entry
278; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
279; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
280; CHECK-NEXT:    ret
281entry:
282  %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32(
283    <vscale x 1 x i16> %0,
284    <vscale x 1 x float> %1,
285    <vscale x 1 x i1> %2,
286    i64 %3)
287
288  ret <vscale x 1 x i16> %a
289}
290
291declare <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
292  <vscale x 2 x float>,
293  i64);
294
295define <vscale x 2 x i16> @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
296; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32:
297; CHECK:       # %bb.0: # %entry
298; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
299; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
300; CHECK-NEXT:    vmv1r.v v8, v25
301; CHECK-NEXT:    ret
302entry:
303  %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
304    <vscale x 2 x float> %0,
305    i64 %1)
306
307  ret <vscale x 2 x i16> %a
308}
309
310declare <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32(
311  <vscale x 2 x i16>,
312  <vscale x 2 x float>,
313  <vscale x 2 x i1>,
314  i64);
315
316define <vscale x 2 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x i16> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
317; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32:
318; CHECK:       # %bb.0: # %entry
319; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
320; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
321; CHECK-NEXT:    ret
322entry:
323  %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32(
324    <vscale x 2 x i16> %0,
325    <vscale x 2 x float> %1,
326    <vscale x 2 x i1> %2,
327    i64 %3)
328
329  ret <vscale x 2 x i16> %a
330}
331
332declare <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
333  <vscale x 4 x float>,
334  i64);
335
336define <vscale x 4 x i16> @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
337; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32:
338; CHECK:       # %bb.0: # %entry
339; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
340; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
341; CHECK-NEXT:    vmv1r.v v8, v25
342; CHECK-NEXT:    ret
343entry:
344  %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
345    <vscale x 4 x float> %0,
346    i64 %1)
347
348  ret <vscale x 4 x i16> %a
349}
350
351declare <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32(
352  <vscale x 4 x i16>,
353  <vscale x 4 x float>,
354  <vscale x 4 x i1>,
355  i64);
356
357define <vscale x 4 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x i16> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
358; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32:
359; CHECK:       # %bb.0: # %entry
360; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
361; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
362; CHECK-NEXT:    ret
363entry:
364  %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32(
365    <vscale x 4 x i16> %0,
366    <vscale x 4 x float> %1,
367    <vscale x 4 x i1> %2,
368    i64 %3)
369
370  ret <vscale x 4 x i16> %a
371}
372
373declare <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
374  <vscale x 8 x float>,
375  i64);
376
377define <vscale x 8 x i16> @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
378; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32:
379; CHECK:       # %bb.0: # %entry
380; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
381; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
382; CHECK-NEXT:    vmv2r.v v8, v26
383; CHECK-NEXT:    ret
384entry:
385  %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
386    <vscale x 8 x float> %0,
387    i64 %1)
388
389  ret <vscale x 8 x i16> %a
390}
391
392declare <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32(
393  <vscale x 8 x i16>,
394  <vscale x 8 x float>,
395  <vscale x 8 x i1>,
396  i64);
397
398define <vscale x 8 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
399; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32:
400; CHECK:       # %bb.0: # %entry
401; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
402; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
403; CHECK-NEXT:    ret
404entry:
405  %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32(
406    <vscale x 8 x i16> %0,
407    <vscale x 8 x float> %1,
408    <vscale x 8 x i1> %2,
409    i64 %3)
410
411  ret <vscale x 8 x i16> %a
412}
413
414declare <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
415  <vscale x 16 x float>,
416  i64);
417
418define <vscale x 16 x i16> @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x float> %0, i64 %1) nounwind {
419; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32:
420; CHECK:       # %bb.0: # %entry
421; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
422; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
423; CHECK-NEXT:    vmv4r.v v8, v28
424; CHECK-NEXT:    ret
425entry:
426  %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
427    <vscale x 16 x float> %0,
428    i64 %1)
429
430  ret <vscale x 16 x i16> %a
431}
432
433declare <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32(
434  <vscale x 16 x i16>,
435  <vscale x 16 x float>,
436  <vscale x 16 x i1>,
437  i64);
438
439define <vscale x 16 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x i16> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
440; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32:
441; CHECK:       # %bb.0: # %entry
442; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
443; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
444; CHECK-NEXT:    ret
445entry:
446  %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32(
447    <vscale x 16 x i16> %0,
448    <vscale x 16 x float> %1,
449    <vscale x 16 x i1> %2,
450    i64 %3)
451
452  ret <vscale x 16 x i16> %a
453}
454
455declare <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
456  <vscale x 1 x double>,
457  i64);
458
459define <vscale x 1 x i32> @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x double> %0, i64 %1) nounwind {
460; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64:
461; CHECK:       # %bb.0: # %entry
462; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
463; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
464; CHECK-NEXT:    vmv1r.v v8, v25
465; CHECK-NEXT:    ret
466entry:
467  %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
468    <vscale x 1 x double> %0,
469    i64 %1)
470
471  ret <vscale x 1 x i32> %a
472}
473
474declare <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64(
475  <vscale x 1 x i32>,
476  <vscale x 1 x double>,
477  <vscale x 1 x i1>,
478  i64);
479
480define <vscale x 1 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x i32> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
481; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64:
482; CHECK:       # %bb.0: # %entry
483; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
484; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
485; CHECK-NEXT:    ret
486entry:
487  %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64(
488    <vscale x 1 x i32> %0,
489    <vscale x 1 x double> %1,
490    <vscale x 1 x i1> %2,
491    i64 %3)
492
493  ret <vscale x 1 x i32> %a
494}
495
496declare <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
497  <vscale x 2 x double>,
498  i64);
499
500define <vscale x 2 x i32> @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x double> %0, i64 %1) nounwind {
501; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64:
502; CHECK:       # %bb.0: # %entry
503; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
504; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
505; CHECK-NEXT:    vmv1r.v v8, v25
506; CHECK-NEXT:    ret
507entry:
508  %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
509    <vscale x 2 x double> %0,
510    i64 %1)
511
512  ret <vscale x 2 x i32> %a
513}
514
515declare <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64(
516  <vscale x 2 x i32>,
517  <vscale x 2 x double>,
518  <vscale x 2 x i1>,
519  i64);
520
521define <vscale x 2 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x i32> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
522; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64:
523; CHECK:       # %bb.0: # %entry
524; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
525; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
526; CHECK-NEXT:    ret
527entry:
528  %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64(
529    <vscale x 2 x i32> %0,
530    <vscale x 2 x double> %1,
531    <vscale x 2 x i1> %2,
532    i64 %3)
533
534  ret <vscale x 2 x i32> %a
535}
536
537declare <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
538  <vscale x 4 x double>,
539  i64);
540
541define <vscale x 4 x i32> @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x double> %0, i64 %1) nounwind {
542; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64:
543; CHECK:       # %bb.0: # %entry
544; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
545; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
546; CHECK-NEXT:    vmv2r.v v8, v26
547; CHECK-NEXT:    ret
548entry:
549  %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
550    <vscale x 4 x double> %0,
551    i64 %1)
552
553  ret <vscale x 4 x i32> %a
554}
555
556declare <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64(
557  <vscale x 4 x i32>,
558  <vscale x 4 x double>,
559  <vscale x 4 x i1>,
560  i64);
561
562define <vscale x 4 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x i32> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
563; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64:
564; CHECK:       # %bb.0: # %entry
565; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
566; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
567; CHECK-NEXT:    ret
568entry:
569  %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64(
570    <vscale x 4 x i32> %0,
571    <vscale x 4 x double> %1,
572    <vscale x 4 x i1> %2,
573    i64 %3)
574
575  ret <vscale x 4 x i32> %a
576}
577
578declare <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
579  <vscale x 8 x double>,
580  i64);
581
582define <vscale x 8 x i32> @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x double> %0, i64 %1) nounwind {
583; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64:
584; CHECK:       # %bb.0: # %entry
585; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
586; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
587; CHECK-NEXT:    vmv4r.v v8, v28
588; CHECK-NEXT:    ret
589entry:
590  %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
591    <vscale x 8 x double> %0,
592    i64 %1)
593
594  ret <vscale x 8 x i32> %a
595}
596
597declare <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64(
598  <vscale x 8 x i32>,
599  <vscale x 8 x double>,
600  <vscale x 8 x i1>,
601  i64);
602
603define <vscale x 8 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
604; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64:
605; CHECK:       # %bb.0: # %entry
606; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
607; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
608; CHECK-NEXT:    ret
609entry:
610  %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64(
611    <vscale x 8 x i32> %0,
612    <vscale x 8 x double> %1,
613    <vscale x 8 x i1> %2,
614    i64 %3)
615
616  ret <vscale x 8 x i32> %a
617}
618