1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
5  <vscale x 1 x float>,
6  <vscale x 1 x half>,
7  <vscale x 1 x half>,
8  i32);
9
10define <vscale x 1 x float>  @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
11; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16:
12; CHECK:       # %bb.0: # %entry
13; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
14; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
15; CHECK-NEXT:    ret
16entry:
17  %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
18    <vscale x 1 x float> %0,
19    <vscale x 1 x half> %1,
20    <vscale x 1 x half> %2,
21    i32 %3)
22
23  ret <vscale x 1 x float> %a
24}
25
26declare <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16(
27  <vscale x 1 x float>,
28  <vscale x 1 x half>,
29  <vscale x 1 x half>,
30  <vscale x 1 x i1>,
31  i32);
32
33define <vscale x 1 x float>  @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
34; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16:
35; CHECK:       # %bb.0: # %entry
36; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
37; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
38; CHECK-NEXT:    ret
39entry:
40  %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16(
41    <vscale x 1 x float> %0,
42    <vscale x 1 x half> %1,
43    <vscale x 1 x half> %2,
44    <vscale x 1 x i1> %3,
45    i32 %4)
46
47  ret <vscale x 1 x float> %a
48}
49
50declare <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16(
51  <vscale x 2 x float>,
52  <vscale x 2 x half>,
53  <vscale x 2 x half>,
54  i32);
55
56define <vscale x 2 x float>  @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
57; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16:
58; CHECK:       # %bb.0: # %entry
59; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
60; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
61; CHECK-NEXT:    ret
62entry:
63  %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16(
64    <vscale x 2 x float> %0,
65    <vscale x 2 x half> %1,
66    <vscale x 2 x half> %2,
67    i32 %3)
68
69  ret <vscale x 2 x float> %a
70}
71
72declare <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16(
73  <vscale x 2 x float>,
74  <vscale x 2 x half>,
75  <vscale x 2 x half>,
76  <vscale x 2 x i1>,
77  i32);
78
79define <vscale x 2 x float>  @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
80; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16:
81; CHECK:       # %bb.0: # %entry
82; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
83; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
84; CHECK-NEXT:    ret
85entry:
86  %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16(
87    <vscale x 2 x float> %0,
88    <vscale x 2 x half> %1,
89    <vscale x 2 x half> %2,
90    <vscale x 2 x i1> %3,
91    i32 %4)
92
93  ret <vscale x 2 x float> %a
94}
95
96declare <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16(
97  <vscale x 4 x float>,
98  <vscale x 4 x half>,
99  <vscale x 4 x half>,
100  i32);
101
102define <vscale x 4 x float>  @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
103; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16:
104; CHECK:       # %bb.0: # %entry
105; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
106; CHECK-NEXT:    vfwmsac.vv v8, v10, v11
107; CHECK-NEXT:    ret
108entry:
109  %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16(
110    <vscale x 4 x float> %0,
111    <vscale x 4 x half> %1,
112    <vscale x 4 x half> %2,
113    i32 %3)
114
115  ret <vscale x 4 x float> %a
116}
117
118declare <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16(
119  <vscale x 4 x float>,
120  <vscale x 4 x half>,
121  <vscale x 4 x half>,
122  <vscale x 4 x i1>,
123  i32);
124
125define <vscale x 4 x float>  @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
126; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16:
127; CHECK:       # %bb.0: # %entry
128; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
129; CHECK-NEXT:    vfwmsac.vv v8, v10, v11, v0.t
130; CHECK-NEXT:    ret
131entry:
132  %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16(
133    <vscale x 4 x float> %0,
134    <vscale x 4 x half> %1,
135    <vscale x 4 x half> %2,
136    <vscale x 4 x i1> %3,
137    i32 %4)
138
139  ret <vscale x 4 x float> %a
140}
141
142declare <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16(
143  <vscale x 8 x float>,
144  <vscale x 8 x half>,
145  <vscale x 8 x half>,
146  i32);
147
148define <vscale x 8 x float>  @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
149; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16:
150; CHECK:       # %bb.0: # %entry
151; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
152; CHECK-NEXT:    vfwmsac.vv v8, v12, v14
153; CHECK-NEXT:    ret
154entry:
155  %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16(
156    <vscale x 8 x float> %0,
157    <vscale x 8 x half> %1,
158    <vscale x 8 x half> %2,
159    i32 %3)
160
161  ret <vscale x 8 x float> %a
162}
163
164declare <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16(
165  <vscale x 8 x float>,
166  <vscale x 8 x half>,
167  <vscale x 8 x half>,
168  <vscale x 8 x i1>,
169  i32);
170
171define <vscale x 8 x float>  @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
172; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16:
173; CHECK:       # %bb.0: # %entry
174; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
175; CHECK-NEXT:    vfwmsac.vv v8, v12, v14, v0.t
176; CHECK-NEXT:    ret
177entry:
178  %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16(
179    <vscale x 8 x float> %0,
180    <vscale x 8 x half> %1,
181    <vscale x 8 x half> %2,
182    <vscale x 8 x i1> %3,
183    i32 %4)
184
185  ret <vscale x 8 x float> %a
186}
187
188declare <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16f16(
189  <vscale x 16 x float>,
190  <vscale x 16 x half>,
191  <vscale x 16 x half>,
192  i32);
193
194define <vscale x 16 x float>  @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
195; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16:
196; CHECK:       # %bb.0: # %entry
197; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
198; CHECK-NEXT:    vfwmsac.vv v8, v16, v20
199; CHECK-NEXT:    ret
200entry:
201  %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16f16(
202    <vscale x 16 x float> %0,
203    <vscale x 16 x half> %1,
204    <vscale x 16 x half> %2,
205    i32 %3)
206
207  ret <vscale x 16 x float> %a
208}
209
210declare <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16(
211  <vscale x 16 x float>,
212  <vscale x 16 x half>,
213  <vscale x 16 x half>,
214  <vscale x 16 x i1>,
215  i32);
216
217define <vscale x 16 x float>  @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
218; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16:
219; CHECK:       # %bb.0: # %entry
220; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
221; CHECK-NEXT:    vfwmsac.vv v8, v16, v20, v0.t
222; CHECK-NEXT:    ret
223entry:
224  %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16(
225    <vscale x 16 x float> %0,
226    <vscale x 16 x half> %1,
227    <vscale x 16 x half> %2,
228    <vscale x 16 x i1> %3,
229    i32 %4)
230
231  ret <vscale x 16 x float> %a
232}
233
234declare <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32(
235  <vscale x 1 x double>,
236  <vscale x 1 x float>,
237  <vscale x 1 x float>,
238  i32);
239
240define <vscale x 1 x double>  @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
241; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32:
242; CHECK:       # %bb.0: # %entry
243; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
244; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
245; CHECK-NEXT:    ret
246entry:
247  %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32(
248    <vscale x 1 x double> %0,
249    <vscale x 1 x float> %1,
250    <vscale x 1 x float> %2,
251    i32 %3)
252
253  ret <vscale x 1 x double> %a
254}
255
256declare <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32(
257  <vscale x 1 x double>,
258  <vscale x 1 x float>,
259  <vscale x 1 x float>,
260  <vscale x 1 x i1>,
261  i32);
262
263define <vscale x 1 x double>  @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
264; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32:
265; CHECK:       # %bb.0: # %entry
266; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
267; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
268; CHECK-NEXT:    ret
269entry:
270  %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32(
271    <vscale x 1 x double> %0,
272    <vscale x 1 x float> %1,
273    <vscale x 1 x float> %2,
274    <vscale x 1 x i1> %3,
275    i32 %4)
276
277  ret <vscale x 1 x double> %a
278}
279
280declare <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32(
281  <vscale x 2 x double>,
282  <vscale x 2 x float>,
283  <vscale x 2 x float>,
284  i32);
285
286define <vscale x 2 x double>  @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
287; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32:
288; CHECK:       # %bb.0: # %entry
289; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
290; CHECK-NEXT:    vfwmsac.vv v8, v10, v11
291; CHECK-NEXT:    ret
292entry:
293  %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32(
294    <vscale x 2 x double> %0,
295    <vscale x 2 x float> %1,
296    <vscale x 2 x float> %2,
297    i32 %3)
298
299  ret <vscale x 2 x double> %a
300}
301
302declare <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32(
303  <vscale x 2 x double>,
304  <vscale x 2 x float>,
305  <vscale x 2 x float>,
306  <vscale x 2 x i1>,
307  i32);
308
309define <vscale x 2 x double>  @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
310; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32:
311; CHECK:       # %bb.0: # %entry
312; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
313; CHECK-NEXT:    vfwmsac.vv v8, v10, v11, v0.t
314; CHECK-NEXT:    ret
315entry:
316  %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32(
317    <vscale x 2 x double> %0,
318    <vscale x 2 x float> %1,
319    <vscale x 2 x float> %2,
320    <vscale x 2 x i1> %3,
321    i32 %4)
322
323  ret <vscale x 2 x double> %a
324}
325
326declare <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32(
327  <vscale x 4 x double>,
328  <vscale x 4 x float>,
329  <vscale x 4 x float>,
330  i32);
331
332define <vscale x 4 x double>  @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
333; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32:
334; CHECK:       # %bb.0: # %entry
335; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
336; CHECK-NEXT:    vfwmsac.vv v8, v12, v14
337; CHECK-NEXT:    ret
338entry:
339  %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32(
340    <vscale x 4 x double> %0,
341    <vscale x 4 x float> %1,
342    <vscale x 4 x float> %2,
343    i32 %3)
344
345  ret <vscale x 4 x double> %a
346}
347
348declare <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32(
349  <vscale x 4 x double>,
350  <vscale x 4 x float>,
351  <vscale x 4 x float>,
352  <vscale x 4 x i1>,
353  i32);
354
355define <vscale x 4 x double>  @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
356; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32:
357; CHECK:       # %bb.0: # %entry
358; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
359; CHECK-NEXT:    vfwmsac.vv v8, v12, v14, v0.t
360; CHECK-NEXT:    ret
361entry:
362  %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32(
363    <vscale x 4 x double> %0,
364    <vscale x 4 x float> %1,
365    <vscale x 4 x float> %2,
366    <vscale x 4 x i1> %3,
367    i32 %4)
368
369  ret <vscale x 4 x double> %a
370}
371
372declare <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32(
373  <vscale x 8 x double>,
374  <vscale x 8 x float>,
375  <vscale x 8 x float>,
376  i32);
377
378define <vscale x 8 x double>  @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
379; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32:
380; CHECK:       # %bb.0: # %entry
381; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
382; CHECK-NEXT:    vfwmsac.vv v8, v16, v20
383; CHECK-NEXT:    ret
384entry:
385  %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32(
386    <vscale x 8 x double> %0,
387    <vscale x 8 x float> %1,
388    <vscale x 8 x float> %2,
389    i32 %3)
390
391  ret <vscale x 8 x double> %a
392}
393
394declare <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32(
395  <vscale x 8 x double>,
396  <vscale x 8 x float>,
397  <vscale x 8 x float>,
398  <vscale x 8 x i1>,
399  i32);
400
401define <vscale x 8 x double>  @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
402; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32:
403; CHECK:       # %bb.0: # %entry
404; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
405; CHECK-NEXT:    vfwmsac.vv v8, v16, v20, v0.t
406; CHECK-NEXT:    ret
407entry:
408  %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32(
409    <vscale x 8 x double> %0,
410    <vscale x 8 x float> %1,
411    <vscale x 8 x float> %2,
412    <vscale x 8 x i1> %3,
413    i32 %4)
414
415  ret <vscale x 8 x double> %a
416}
417
418declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.f16(
419  <vscale x 1 x float>,
420  half,
421  <vscale x 1 x half>,
422  i32);
423
424define <vscale x 1 x float>  @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
425; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16:
426; CHECK:       # %bb.0: # %entry
427; CHECK-NEXT:    fmv.h.x ft0, a0
428; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
429; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
430; CHECK-NEXT:    ret
431entry:
432  %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.f16(
433    <vscale x 1 x float> %0,
434    half %1,
435    <vscale x 1 x half> %2,
436    i32 %3)
437
438  ret <vscale x 1 x float> %a
439}
440
441declare <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.f16(
442  <vscale x 1 x float>,
443  half,
444  <vscale x 1 x half>,
445  <vscale x 1 x i1>,
446  i32);
447
448define <vscale x 1 x float> @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
449; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16:
450; CHECK:       # %bb.0: # %entry
451; CHECK-NEXT:    fmv.h.x ft0, a0
452; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
453; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
454; CHECK-NEXT:    ret
455entry:
456  %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.f16(
457    <vscale x 1 x float> %0,
458    half %1,
459    <vscale x 1 x half> %2,
460    <vscale x 1 x i1> %3,
461    i32 %4)
462
463  ret <vscale x 1 x float> %a
464}
465
466declare <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.f16(
467  <vscale x 2 x float>,
468  half,
469  <vscale x 2 x half>,
470  i32);
471
472define <vscale x 2 x float>  @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
473; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16:
474; CHECK:       # %bb.0: # %entry
475; CHECK-NEXT:    fmv.h.x ft0, a0
476; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
477; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
478; CHECK-NEXT:    ret
479entry:
480  %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.f16(
481    <vscale x 2 x float> %0,
482    half %1,
483    <vscale x 2 x half> %2,
484    i32 %3)
485
486  ret <vscale x 2 x float> %a
487}
488
489declare <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.f16(
490  <vscale x 2 x float>,
491  half,
492  <vscale x 2 x half>,
493  <vscale x 2 x i1>,
494  i32);
495
496define <vscale x 2 x float> @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
497; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16:
498; CHECK:       # %bb.0: # %entry
499; CHECK-NEXT:    fmv.h.x ft0, a0
500; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
501; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
502; CHECK-NEXT:    ret
503entry:
504  %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.f16(
505    <vscale x 2 x float> %0,
506    half %1,
507    <vscale x 2 x half> %2,
508    <vscale x 2 x i1> %3,
509    i32 %4)
510
511  ret <vscale x 2 x float> %a
512}
513
514declare <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.f16(
515  <vscale x 4 x float>,
516  half,
517  <vscale x 4 x half>,
518  i32);
519
520define <vscale x 4 x float>  @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
521; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16:
522; CHECK:       # %bb.0: # %entry
523; CHECK-NEXT:    fmv.h.x ft0, a0
524; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
525; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10
526; CHECK-NEXT:    ret
527entry:
528  %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.f16(
529    <vscale x 4 x float> %0,
530    half %1,
531    <vscale x 4 x half> %2,
532    i32 %3)
533
534  ret <vscale x 4 x float> %a
535}
536
537declare <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.f16(
538  <vscale x 4 x float>,
539  half,
540  <vscale x 4 x half>,
541  <vscale x 4 x i1>,
542  i32);
543
544define <vscale x 4 x float> @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
545; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16:
546; CHECK:       # %bb.0: # %entry
547; CHECK-NEXT:    fmv.h.x ft0, a0
548; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
549; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10, v0.t
550; CHECK-NEXT:    ret
551entry:
552  %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.f16(
553    <vscale x 4 x float> %0,
554    half %1,
555    <vscale x 4 x half> %2,
556    <vscale x 4 x i1> %3,
557    i32 %4)
558
559  ret <vscale x 4 x float> %a
560}
561
562declare <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.f16(
563  <vscale x 8 x float>,
564  half,
565  <vscale x 8 x half>,
566  i32);
567
568define <vscale x 8 x float>  @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
569; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16:
570; CHECK:       # %bb.0: # %entry
571; CHECK-NEXT:    fmv.h.x ft0, a0
572; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
573; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12
574; CHECK-NEXT:    ret
575entry:
576  %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.f16(
577    <vscale x 8 x float> %0,
578    half %1,
579    <vscale x 8 x half> %2,
580    i32 %3)
581
582  ret <vscale x 8 x float> %a
583}
584
585declare <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.f16(
586  <vscale x 8 x float>,
587  half,
588  <vscale x 8 x half>,
589  <vscale x 8 x i1>,
590  i32);
591
592define <vscale x 8 x float> @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
593; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16:
594; CHECK:       # %bb.0: # %entry
595; CHECK-NEXT:    fmv.h.x ft0, a0
596; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
597; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12, v0.t
598; CHECK-NEXT:    ret
599entry:
600  %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.f16(
601    <vscale x 8 x float> %0,
602    half %1,
603    <vscale x 8 x half> %2,
604    <vscale x 8 x i1> %3,
605    i32 %4)
606
607  ret <vscale x 8 x float> %a
608}
609
610declare <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.f16(
611  <vscale x 16 x float>,
612  half,
613  <vscale x 16 x half>,
614  i32);
615
616define <vscale x 16 x float>  @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
617; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16:
618; CHECK:       # %bb.0: # %entry
619; CHECK-NEXT:    fmv.h.x ft0, a0
620; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
621; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16
622; CHECK-NEXT:    ret
623entry:
624  %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.f16(
625    <vscale x 16 x float> %0,
626    half %1,
627    <vscale x 16 x half> %2,
628    i32 %3)
629
630  ret <vscale x 16 x float> %a
631}
632
633declare <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.f16(
634  <vscale x 16 x float>,
635  half,
636  <vscale x 16 x half>,
637  <vscale x 16 x i1>,
638  i32);
639
640define <vscale x 16 x float> @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
641; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16:
642; CHECK:       # %bb.0: # %entry
643; CHECK-NEXT:    fmv.h.x ft0, a0
644; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
645; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16, v0.t
646; CHECK-NEXT:    ret
647entry:
648  %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.f16(
649    <vscale x 16 x float> %0,
650    half %1,
651    <vscale x 16 x half> %2,
652    <vscale x 16 x i1> %3,
653    i32 %4)
654
655  ret <vscale x 16 x float> %a
656}
657
658declare <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32(
659  <vscale x 1 x double>,
660  float,
661  <vscale x 1 x float>,
662  i32);
663
664define <vscale x 1 x double>  @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
665; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32:
666; CHECK:       # %bb.0: # %entry
667; CHECK-NEXT:    fmv.w.x ft0, a0
668; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
669; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
670; CHECK-NEXT:    ret
671entry:
672  %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32(
673    <vscale x 1 x double> %0,
674    float %1,
675    <vscale x 1 x float> %2,
676    i32 %3)
677
678  ret <vscale x 1 x double> %a
679}
680
681declare <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32(
682  <vscale x 1 x double>,
683  float,
684  <vscale x 1 x float>,
685  <vscale x 1 x i1>,
686  i32);
687
688define <vscale x 1 x double> @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
689; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32:
690; CHECK:       # %bb.0: # %entry
691; CHECK-NEXT:    fmv.w.x ft0, a0
692; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
693; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
694; CHECK-NEXT:    ret
695entry:
696  %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32(
697    <vscale x 1 x double> %0,
698    float %1,
699    <vscale x 1 x float> %2,
700    <vscale x 1 x i1> %3,
701    i32 %4)
702
703  ret <vscale x 1 x double> %a
704}
705
706declare <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32(
707  <vscale x 2 x double>,
708  float,
709  <vscale x 2 x float>,
710  i32);
711
712define <vscale x 2 x double>  @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
713; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32:
714; CHECK:       # %bb.0: # %entry
715; CHECK-NEXT:    fmv.w.x ft0, a0
716; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
717; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10
718; CHECK-NEXT:    ret
719entry:
720  %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32(
721    <vscale x 2 x double> %0,
722    float %1,
723    <vscale x 2 x float> %2,
724    i32 %3)
725
726  ret <vscale x 2 x double> %a
727}
728
729declare <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32(
730  <vscale x 2 x double>,
731  float,
732  <vscale x 2 x float>,
733  <vscale x 2 x i1>,
734  i32);
735
736define <vscale x 2 x double> @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
737; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32:
738; CHECK:       # %bb.0: # %entry
739; CHECK-NEXT:    fmv.w.x ft0, a0
740; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
741; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10, v0.t
742; CHECK-NEXT:    ret
743entry:
744  %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32(
745    <vscale x 2 x double> %0,
746    float %1,
747    <vscale x 2 x float> %2,
748    <vscale x 2 x i1> %3,
749    i32 %4)
750
751  ret <vscale x 2 x double> %a
752}
753
754declare <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32(
755  <vscale x 4 x double>,
756  float,
757  <vscale x 4 x float>,
758  i32);
759
760define <vscale x 4 x double>  @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
761; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32:
762; CHECK:       # %bb.0: # %entry
763; CHECK-NEXT:    fmv.w.x ft0, a0
764; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
765; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12
766; CHECK-NEXT:    ret
767entry:
768  %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32(
769    <vscale x 4 x double> %0,
770    float %1,
771    <vscale x 4 x float> %2,
772    i32 %3)
773
774  ret <vscale x 4 x double> %a
775}
776
777declare <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32(
778  <vscale x 4 x double>,
779  float,
780  <vscale x 4 x float>,
781  <vscale x 4 x i1>,
782  i32);
783
784define <vscale x 4 x double> @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
785; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32:
786; CHECK:       # %bb.0: # %entry
787; CHECK-NEXT:    fmv.w.x ft0, a0
788; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
789; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12, v0.t
790; CHECK-NEXT:    ret
791entry:
792  %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32(
793    <vscale x 4 x double> %0,
794    float %1,
795    <vscale x 4 x float> %2,
796    <vscale x 4 x i1> %3,
797    i32 %4)
798
799  ret <vscale x 4 x double> %a
800}
801
802declare <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32(
803  <vscale x 8 x double>,
804  float,
805  <vscale x 8 x float>,
806  i32);
807
808define <vscale x 8 x double>  @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
809; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32:
810; CHECK:       # %bb.0: # %entry
811; CHECK-NEXT:    fmv.w.x ft0, a0
812; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
813; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16
814; CHECK-NEXT:    ret
815entry:
816  %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32(
817    <vscale x 8 x double> %0,
818    float %1,
819    <vscale x 8 x float> %2,
820    i32 %3)
821
822  ret <vscale x 8 x double> %a
823}
824
825declare <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32(
826  <vscale x 8 x double>,
827  float,
828  <vscale x 8 x float>,
829  <vscale x 8 x i1>,
830  i32);
831
832define <vscale x 8 x double> @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
833; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32:
834; CHECK:       # %bb.0: # %entry
835; CHECK-NEXT:    fmv.w.x ft0, a0
836; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
837; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16, v0.t
838; CHECK-NEXT:    ret
839entry:
840  %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32(
841    <vscale x 8 x double> %0,
842    float %1,
843    <vscale x 8 x float> %2,
844    <vscale x 8 x i1> %3,
845    i32 %4)
846
847  ret <vscale x 8 x double> %a
848}
849