1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
5  <vscale x 1 x half>,
6  i64);
7
8define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
9; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
10; CHECK:       # %bb.0: # %entry
11; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
12; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
13; CHECK-NEXT:    vmv1r.v v8, v25
14; CHECK-NEXT:    ret
15entry:
16  %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
17    <vscale x 1 x half> %0,
18    i64 %1)
19
20  ret <vscale x 1 x float> %a
21}
22
23declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
24  <vscale x 1 x float>,
25  <vscale x 1 x half>,
26  <vscale x 1 x i1>,
27  i64);
28
29define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
30; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16:
31; CHECK:       # %bb.0: # %entry
32; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
33; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
34; CHECK-NEXT:    ret
35entry:
36  %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
37    <vscale x 1 x float> %0,
38    <vscale x 1 x half> %1,
39    <vscale x 1 x i1> %2,
40    i64 %3)
41
42  ret <vscale x 1 x float> %a
43}
44
45declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
46  <vscale x 2 x half>,
47  i64);
48
49define <vscale x 2 x float> @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16(<vscale x 2 x half> %0, i64 %1) nounwind {
50; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16:
51; CHECK:       # %bb.0: # %entry
52; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
53; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
54; CHECK-NEXT:    vmv1r.v v8, v25
55; CHECK-NEXT:    ret
56entry:
57  %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
58    <vscale x 2 x half> %0,
59    i64 %1)
60
61  ret <vscale x 2 x float> %a
62}
63
64declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16(
65  <vscale x 2 x float>,
66  <vscale x 2 x half>,
67  <vscale x 2 x i1>,
68  i64);
69
70define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
71; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16:
72; CHECK:       # %bb.0: # %entry
73; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
74; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
75; CHECK-NEXT:    ret
76entry:
77  %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16(
78    <vscale x 2 x float> %0,
79    <vscale x 2 x half> %1,
80    <vscale x 2 x i1> %2,
81    i64 %3)
82
83  ret <vscale x 2 x float> %a
84}
85
86declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
87  <vscale x 4 x half>,
88  i64);
89
90define <vscale x 4 x float> @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16(<vscale x 4 x half> %0, i64 %1) nounwind {
91; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16:
92; CHECK:       # %bb.0: # %entry
93; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
94; CHECK-NEXT:    vfwcvt.f.f.v v26, v8
95; CHECK-NEXT:    vmv2r.v v8, v26
96; CHECK-NEXT:    ret
97entry:
98  %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
99    <vscale x 4 x half> %0,
100    i64 %1)
101
102  ret <vscale x 4 x float> %a
103}
104
105declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16(
106  <vscale x 4 x float>,
107  <vscale x 4 x half>,
108  <vscale x 4 x i1>,
109  i64);
110
111define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
112; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16:
113; CHECK:       # %bb.0: # %entry
114; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
115; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
116; CHECK-NEXT:    ret
117entry:
118  %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16(
119    <vscale x 4 x float> %0,
120    <vscale x 4 x half> %1,
121    <vscale x 4 x i1> %2,
122    i64 %3)
123
124  ret <vscale x 4 x float> %a
125}
126
127declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
128  <vscale x 8 x half>,
129  i64);
130
131define <vscale x 8 x float> @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16(<vscale x 8 x half> %0, i64 %1) nounwind {
132; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
135; CHECK-NEXT:    vfwcvt.f.f.v v28, v8
136; CHECK-NEXT:    vmv4r.v v8, v28
137; CHECK-NEXT:    ret
138entry:
139  %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
140    <vscale x 8 x half> %0,
141    i64 %1)
142
143  ret <vscale x 8 x float> %a
144}
145
146declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16(
147  <vscale x 8 x float>,
148  <vscale x 8 x half>,
149  <vscale x 8 x i1>,
150  i64);
151
152define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
153; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16:
154; CHECK:       # %bb.0: # %entry
155; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
156; CHECK-NEXT:    vfwcvt.f.f.v v8, v12, v0.t
157; CHECK-NEXT:    ret
158entry:
159  %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16(
160    <vscale x 8 x float> %0,
161    <vscale x 8 x half> %1,
162    <vscale x 8 x i1> %2,
163    i64 %3)
164
165  ret <vscale x 8 x float> %a
166}
167
168declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
169  <vscale x 16 x half>,
170  i64);
171
172define <vscale x 16 x float> @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16(<vscale x 16 x half> %0, i64 %1) nounwind {
173; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16:
174; CHECK:       # %bb.0: # %entry
175; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
176; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
177; CHECK-NEXT:    vmv8r.v v8, v16
178; CHECK-NEXT:    ret
179entry:
180  %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
181    <vscale x 16 x half> %0,
182    i64 %1)
183
184  ret <vscale x 16 x float> %a
185}
186
187declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16(
188  <vscale x 16 x float>,
189  <vscale x 16 x half>,
190  <vscale x 16 x i1>,
191  i64);
192
193define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
194; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16:
195; CHECK:       # %bb.0: # %entry
196; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
197; CHECK-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
198; CHECK-NEXT:    ret
199entry:
200  %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16(
201    <vscale x 16 x float> %0,
202    <vscale x 16 x half> %1,
203    <vscale x 16 x i1> %2,
204    i64 %3)
205
206  ret <vscale x 16 x float> %a
207}
208
209declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
210  <vscale x 1 x float>,
211  i64);
212
213define <vscale x 1 x double> @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
214; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32:
215; CHECK:       # %bb.0: # %entry
216; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
217; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
218; CHECK-NEXT:    vmv1r.v v8, v25
219; CHECK-NEXT:    ret
220entry:
221  %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
222    <vscale x 1 x float> %0,
223    i64 %1)
224
225  ret <vscale x 1 x double> %a
226}
227
228declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32(
229  <vscale x 1 x double>,
230  <vscale x 1 x float>,
231  <vscale x 1 x i1>,
232  i64);
233
234define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
235; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32:
236; CHECK:       # %bb.0: # %entry
237; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
238; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
239; CHECK-NEXT:    ret
240entry:
241  %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32(
242    <vscale x 1 x double> %0,
243    <vscale x 1 x float> %1,
244    <vscale x 1 x i1> %2,
245    i64 %3)
246
247  ret <vscale x 1 x double> %a
248}
249
250declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
251  <vscale x 2 x float>,
252  i64);
253
254define <vscale x 2 x double> @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32(<vscale x 2 x float> %0, i64 %1) nounwind {
255; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32:
256; CHECK:       # %bb.0: # %entry
257; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
258; CHECK-NEXT:    vfwcvt.f.f.v v26, v8
259; CHECK-NEXT:    vmv2r.v v8, v26
260; CHECK-NEXT:    ret
261entry:
262  %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
263    <vscale x 2 x float> %0,
264    i64 %1)
265
266  ret <vscale x 2 x double> %a
267}
268
269declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32(
270  <vscale x 2 x double>,
271  <vscale x 2 x float>,
272  <vscale x 2 x i1>,
273  i64);
274
275define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
276; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32:
277; CHECK:       # %bb.0: # %entry
278; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
279; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
280; CHECK-NEXT:    ret
281entry:
282  %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32(
283    <vscale x 2 x double> %0,
284    <vscale x 2 x float> %1,
285    <vscale x 2 x i1> %2,
286    i64 %3)
287
288  ret <vscale x 2 x double> %a
289}
290
291declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
292  <vscale x 4 x float>,
293  i64);
294
295define <vscale x 4 x double> @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32(<vscale x 4 x float> %0, i64 %1) nounwind {
296; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32:
297; CHECK:       # %bb.0: # %entry
298; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
299; CHECK-NEXT:    vfwcvt.f.f.v v28, v8
300; CHECK-NEXT:    vmv4r.v v8, v28
301; CHECK-NEXT:    ret
302entry:
303  %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
304    <vscale x 4 x float> %0,
305    i64 %1)
306
307  ret <vscale x 4 x double> %a
308}
309
310declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32(
311  <vscale x 4 x double>,
312  <vscale x 4 x float>,
313  <vscale x 4 x i1>,
314  i64);
315
316define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
317; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32:
318; CHECK:       # %bb.0: # %entry
319; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
320; CHECK-NEXT:    vfwcvt.f.f.v v8, v12, v0.t
321; CHECK-NEXT:    ret
322entry:
323  %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32(
324    <vscale x 4 x double> %0,
325    <vscale x 4 x float> %1,
326    <vscale x 4 x i1> %2,
327    i64 %3)
328
329  ret <vscale x 4 x double> %a
330}
331
332declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
333  <vscale x 8 x float>,
334  i64);
335
336define <vscale x 8 x double> @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32(<vscale x 8 x float> %0, i64 %1) nounwind {
337; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32:
338; CHECK:       # %bb.0: # %entry
339; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
340; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
341; CHECK-NEXT:    vmv8r.v v8, v16
342; CHECK-NEXT:    ret
343entry:
344  %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
345    <vscale x 8 x float> %0,
346    i64 %1)
347
348  ret <vscale x 8 x double> %a
349}
350
351declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32(
352  <vscale x 8 x double>,
353  <vscale x 8 x float>,
354  <vscale x 8 x i1>,
355  i64);
356
357define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
358; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32:
359; CHECK:       # %bb.0: # %entry
360; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
361; CHECK-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
362; CHECK-NEXT:    ret
363entry:
364  %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32(
365    <vscale x 8 x double> %0,
366    <vscale x 8 x float> %1,
367    <vscale x 8 x i1> %2,
368    i64 %3)
369
370  ret <vscale x 8 x double> %a
371}
372