1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
5  <vscale x 1 x half>,
6  <vscale x 1 x half>,
7  <vscale x 1 x half>,
8  i32);
9
10define <vscale x 1 x half>  @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
11; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16:
12; CHECK:       # %bb.0: # %entry
13; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
14; CHECK-NEXT:    vfmsac.vv v8, v9, v10
15; CHECK-NEXT:    ret
16entry:
17  %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
18    <vscale x 1 x half> %0,
19    <vscale x 1 x half> %1,
20    <vscale x 1 x half> %2,
21    i32 %3)
22
23  ret <vscale x 1 x half> %a
24}
25
26declare <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
27  <vscale x 1 x half>,
28  <vscale x 1 x half>,
29  <vscale x 1 x half>,
30  <vscale x 1 x i1>,
31  i32);
32
33define <vscale x 1 x half>  @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
34; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
35; CHECK:       # %bb.0: # %entry
36; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
37; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
38; CHECK-NEXT:    ret
39entry:
40  %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
41    <vscale x 1 x half> %0,
42    <vscale x 1 x half> %1,
43    <vscale x 1 x half> %2,
44    <vscale x 1 x i1> %3,
45    i32 %4)
46
47  ret <vscale x 1 x half> %a
48}
49
50declare <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
51  <vscale x 2 x half>,
52  <vscale x 2 x half>,
53  <vscale x 2 x half>,
54  i32);
55
56define <vscale x 2 x half>  @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
57; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16:
58; CHECK:       # %bb.0: # %entry
59; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
60; CHECK-NEXT:    vfmsac.vv v8, v9, v10
61; CHECK-NEXT:    ret
62entry:
63  %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
64    <vscale x 2 x half> %0,
65    <vscale x 2 x half> %1,
66    <vscale x 2 x half> %2,
67    i32 %3)
68
69  ret <vscale x 2 x half> %a
70}
71
72declare <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
73  <vscale x 2 x half>,
74  <vscale x 2 x half>,
75  <vscale x 2 x half>,
76  <vscale x 2 x i1>,
77  i32);
78
79define <vscale x 2 x half>  @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
80; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
81; CHECK:       # %bb.0: # %entry
82; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
83; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
84; CHECK-NEXT:    ret
85entry:
86  %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
87    <vscale x 2 x half> %0,
88    <vscale x 2 x half> %1,
89    <vscale x 2 x half> %2,
90    <vscale x 2 x i1> %3,
91    i32 %4)
92
93  ret <vscale x 2 x half> %a
94}
95
96declare <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
97  <vscale x 4 x half>,
98  <vscale x 4 x half>,
99  <vscale x 4 x half>,
100  i32);
101
102define <vscale x 4 x half>  @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
103; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16:
104; CHECK:       # %bb.0: # %entry
105; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
106; CHECK-NEXT:    vfmsac.vv v8, v9, v10
107; CHECK-NEXT:    ret
108entry:
109  %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
110    <vscale x 4 x half> %0,
111    <vscale x 4 x half> %1,
112    <vscale x 4 x half> %2,
113    i32 %3)
114
115  ret <vscale x 4 x half> %a
116}
117
118declare <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
119  <vscale x 4 x half>,
120  <vscale x 4 x half>,
121  <vscale x 4 x half>,
122  <vscale x 4 x i1>,
123  i32);
124
125define <vscale x 4 x half>  @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
126; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
127; CHECK:       # %bb.0: # %entry
128; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
129; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
130; CHECK-NEXT:    ret
131entry:
132  %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
133    <vscale x 4 x half> %0,
134    <vscale x 4 x half> %1,
135    <vscale x 4 x half> %2,
136    <vscale x 4 x i1> %3,
137    i32 %4)
138
139  ret <vscale x 4 x half> %a
140}
141
142declare <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
143  <vscale x 8 x half>,
144  <vscale x 8 x half>,
145  <vscale x 8 x half>,
146  i32);
147
148define <vscale x 8 x half>  @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
149; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16:
150; CHECK:       # %bb.0: # %entry
151; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
152; CHECK-NEXT:    vfmsac.vv v8, v10, v12
153; CHECK-NEXT:    ret
154entry:
155  %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
156    <vscale x 8 x half> %0,
157    <vscale x 8 x half> %1,
158    <vscale x 8 x half> %2,
159    i32 %3)
160
161  ret <vscale x 8 x half> %a
162}
163
164declare <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
165  <vscale x 8 x half>,
166  <vscale x 8 x half>,
167  <vscale x 8 x half>,
168  <vscale x 8 x i1>,
169  i32);
170
171define <vscale x 8 x half>  @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
172; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
173; CHECK:       # %bb.0: # %entry
174; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
175; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
176; CHECK-NEXT:    ret
177entry:
178  %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
179    <vscale x 8 x half> %0,
180    <vscale x 8 x half> %1,
181    <vscale x 8 x half> %2,
182    <vscale x 8 x i1> %3,
183    i32 %4)
184
185  ret <vscale x 8 x half> %a
186}
187
188declare <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16(
189  <vscale x 16 x half>,
190  <vscale x 16 x half>,
191  <vscale x 16 x half>,
192  i32);
193
194define <vscale x 16 x half>  @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
195; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16:
196; CHECK:       # %bb.0: # %entry
197; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
198; CHECK-NEXT:    vfmsac.vv v8, v12, v16
199; CHECK-NEXT:    ret
200entry:
201  %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16(
202    <vscale x 16 x half> %0,
203    <vscale x 16 x half> %1,
204    <vscale x 16 x half> %2,
205    i32 %3)
206
207  ret <vscale x 16 x half> %a
208}
209
210declare <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16(
211  <vscale x 16 x half>,
212  <vscale x 16 x half>,
213  <vscale x 16 x half>,
214  <vscale x 16 x i1>,
215  i32);
216
217define <vscale x 16 x half>  @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
218; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16:
219; CHECK:       # %bb.0: # %entry
220; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
221; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
222; CHECK-NEXT:    ret
223entry:
224  %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16(
225    <vscale x 16 x half> %0,
226    <vscale x 16 x half> %1,
227    <vscale x 16 x half> %2,
228    <vscale x 16 x i1> %3,
229    i32 %4)
230
231  ret <vscale x 16 x half> %a
232}
233
234declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
235  <vscale x 1 x float>,
236  <vscale x 1 x float>,
237  <vscale x 1 x float>,
238  i32);
239
240define <vscale x 1 x float>  @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
241; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32:
242; CHECK:       # %bb.0: # %entry
243; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
244; CHECK-NEXT:    vfmsac.vv v8, v9, v10
245; CHECK-NEXT:    ret
246entry:
247  %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
248    <vscale x 1 x float> %0,
249    <vscale x 1 x float> %1,
250    <vscale x 1 x float> %2,
251    i32 %3)
252
253  ret <vscale x 1 x float> %a
254}
255
256declare <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
257  <vscale x 1 x float>,
258  <vscale x 1 x float>,
259  <vscale x 1 x float>,
260  <vscale x 1 x i1>,
261  i32);
262
263define <vscale x 1 x float>  @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
264; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
265; CHECK:       # %bb.0: # %entry
266; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
267; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
268; CHECK-NEXT:    ret
269entry:
270  %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
271    <vscale x 1 x float> %0,
272    <vscale x 1 x float> %1,
273    <vscale x 1 x float> %2,
274    <vscale x 1 x i1> %3,
275    i32 %4)
276
277  ret <vscale x 1 x float> %a
278}
279
280declare <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
281  <vscale x 2 x float>,
282  <vscale x 2 x float>,
283  <vscale x 2 x float>,
284  i32);
285
286define <vscale x 2 x float>  @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
287; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32:
288; CHECK:       # %bb.0: # %entry
289; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
290; CHECK-NEXT:    vfmsac.vv v8, v9, v10
291; CHECK-NEXT:    ret
292entry:
293  %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
294    <vscale x 2 x float> %0,
295    <vscale x 2 x float> %1,
296    <vscale x 2 x float> %2,
297    i32 %3)
298
299  ret <vscale x 2 x float> %a
300}
301
302declare <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
303  <vscale x 2 x float>,
304  <vscale x 2 x float>,
305  <vscale x 2 x float>,
306  <vscale x 2 x i1>,
307  i32);
308
309define <vscale x 2 x float>  @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
310; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
311; CHECK:       # %bb.0: # %entry
312; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
313; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
314; CHECK-NEXT:    ret
315entry:
316  %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
317    <vscale x 2 x float> %0,
318    <vscale x 2 x float> %1,
319    <vscale x 2 x float> %2,
320    <vscale x 2 x i1> %3,
321    i32 %4)
322
323  ret <vscale x 2 x float> %a
324}
325
326declare <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
327  <vscale x 4 x float>,
328  <vscale x 4 x float>,
329  <vscale x 4 x float>,
330  i32);
331
332define <vscale x 4 x float>  @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
333; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32:
334; CHECK:       # %bb.0: # %entry
335; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
336; CHECK-NEXT:    vfmsac.vv v8, v10, v12
337; CHECK-NEXT:    ret
338entry:
339  %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
340    <vscale x 4 x float> %0,
341    <vscale x 4 x float> %1,
342    <vscale x 4 x float> %2,
343    i32 %3)
344
345  ret <vscale x 4 x float> %a
346}
347
348declare <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
349  <vscale x 4 x float>,
350  <vscale x 4 x float>,
351  <vscale x 4 x float>,
352  <vscale x 4 x i1>,
353  i32);
354
355define <vscale x 4 x float>  @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
356; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
357; CHECK:       # %bb.0: # %entry
358; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
359; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
360; CHECK-NEXT:    ret
361entry:
362  %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
363    <vscale x 4 x float> %0,
364    <vscale x 4 x float> %1,
365    <vscale x 4 x float> %2,
366    <vscale x 4 x i1> %3,
367    i32 %4)
368
369  ret <vscale x 4 x float> %a
370}
371
372declare <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32(
373  <vscale x 8 x float>,
374  <vscale x 8 x float>,
375  <vscale x 8 x float>,
376  i32);
377
378define <vscale x 8 x float>  @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
379; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32:
380; CHECK:       # %bb.0: # %entry
381; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
382; CHECK-NEXT:    vfmsac.vv v8, v12, v16
383; CHECK-NEXT:    ret
384entry:
385  %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32(
386    <vscale x 8 x float> %0,
387    <vscale x 8 x float> %1,
388    <vscale x 8 x float> %2,
389    i32 %3)
390
391  ret <vscale x 8 x float> %a
392}
393
394declare <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32(
395  <vscale x 8 x float>,
396  <vscale x 8 x float>,
397  <vscale x 8 x float>,
398  <vscale x 8 x i1>,
399  i32);
400
401define <vscale x 8 x float>  @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
402; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32:
403; CHECK:       # %bb.0: # %entry
404; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
405; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
406; CHECK-NEXT:    ret
407entry:
408  %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32(
409    <vscale x 8 x float> %0,
410    <vscale x 8 x float> %1,
411    <vscale x 8 x float> %2,
412    <vscale x 8 x i1> %3,
413    i32 %4)
414
415  ret <vscale x 8 x float> %a
416}
417
418declare <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
419  <vscale x 1 x double>,
420  <vscale x 1 x double>,
421  <vscale x 1 x double>,
422  i32);
423
424define <vscale x 1 x double>  @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
425; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64:
426; CHECK:       # %bb.0: # %entry
427; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
428; CHECK-NEXT:    vfmsac.vv v8, v9, v10
429; CHECK-NEXT:    ret
430entry:
431  %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
432    <vscale x 1 x double> %0,
433    <vscale x 1 x double> %1,
434    <vscale x 1 x double> %2,
435    i32 %3)
436
437  ret <vscale x 1 x double> %a
438}
439
440declare <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
441  <vscale x 1 x double>,
442  <vscale x 1 x double>,
443  <vscale x 1 x double>,
444  <vscale x 1 x i1>,
445  i32);
446
447define <vscale x 1 x double>  @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
448; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64:
449; CHECK:       # %bb.0: # %entry
450; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
451; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
452; CHECK-NEXT:    ret
453entry:
454  %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
455    <vscale x 1 x double> %0,
456    <vscale x 1 x double> %1,
457    <vscale x 1 x double> %2,
458    <vscale x 1 x i1> %3,
459    i32 %4)
460
461  ret <vscale x 1 x double> %a
462}
463
464declare <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
465  <vscale x 2 x double>,
466  <vscale x 2 x double>,
467  <vscale x 2 x double>,
468  i32);
469
470define <vscale x 2 x double>  @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
471; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64:
472; CHECK:       # %bb.0: # %entry
473; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
474; CHECK-NEXT:    vfmsac.vv v8, v10, v12
475; CHECK-NEXT:    ret
476entry:
477  %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
478    <vscale x 2 x double> %0,
479    <vscale x 2 x double> %1,
480    <vscale x 2 x double> %2,
481    i32 %3)
482
483  ret <vscale x 2 x double> %a
484}
485
486declare <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
487  <vscale x 2 x double>,
488  <vscale x 2 x double>,
489  <vscale x 2 x double>,
490  <vscale x 2 x i1>,
491  i32);
492
493define <vscale x 2 x double>  @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
494; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64:
495; CHECK:       # %bb.0: # %entry
496; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
497; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
498; CHECK-NEXT:    ret
499entry:
500  %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
501    <vscale x 2 x double> %0,
502    <vscale x 2 x double> %1,
503    <vscale x 2 x double> %2,
504    <vscale x 2 x i1> %3,
505    i32 %4)
506
507  ret <vscale x 2 x double> %a
508}
509
510declare <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64(
511  <vscale x 4 x double>,
512  <vscale x 4 x double>,
513  <vscale x 4 x double>,
514  i32);
515
516define <vscale x 4 x double>  @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
517; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64:
518; CHECK:       # %bb.0: # %entry
519; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
520; CHECK-NEXT:    vfmsac.vv v8, v12, v16
521; CHECK-NEXT:    ret
522entry:
523  %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64(
524    <vscale x 4 x double> %0,
525    <vscale x 4 x double> %1,
526    <vscale x 4 x double> %2,
527    i32 %3)
528
529  ret <vscale x 4 x double> %a
530}
531
532declare <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64(
533  <vscale x 4 x double>,
534  <vscale x 4 x double>,
535  <vscale x 4 x double>,
536  <vscale x 4 x i1>,
537  i32);
538
539define <vscale x 4 x double>  @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
540; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64:
541; CHECK:       # %bb.0: # %entry
542; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
543; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
544; CHECK-NEXT:    ret
545entry:
546  %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64(
547    <vscale x 4 x double> %0,
548    <vscale x 4 x double> %1,
549    <vscale x 4 x double> %2,
550    <vscale x 4 x i1> %3,
551    i32 %4)
552
553  ret <vscale x 4 x double> %a
554}
555
556declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
557  <vscale x 1 x half>,
558  half,
559  <vscale x 1 x half>,
560  i32);
561
562define <vscale x 1 x half>  @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
563; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16:
564; CHECK:       # %bb.0: # %entry
565; CHECK-NEXT:    fmv.h.x ft0, a0
566; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
567; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
568; CHECK-NEXT:    ret
569entry:
570  %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
571    <vscale x 1 x half> %0,
572    half %1,
573    <vscale x 1 x half> %2,
574    i32 %3)
575
576  ret <vscale x 1 x half> %a
577}
578
579declare <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16(
580  <vscale x 1 x half>,
581  half,
582  <vscale x 1 x half>,
583  <vscale x 1 x i1>,
584  i32);
585
586define <vscale x 1 x half> @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
587; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16:
588; CHECK:       # %bb.0: # %entry
589; CHECK-NEXT:    fmv.h.x ft0, a0
590; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
591; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
592; CHECK-NEXT:    ret
593entry:
594  %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16(
595    <vscale x 1 x half> %0,
596    half %1,
597    <vscale x 1 x half> %2,
598    <vscale x 1 x i1> %3,
599    i32 %4)
600
601  ret <vscale x 1 x half> %a
602}
603
604declare <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16(
605  <vscale x 2 x half>,
606  half,
607  <vscale x 2 x half>,
608  i32);
609
610define <vscale x 2 x half>  @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
611; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16:
612; CHECK:       # %bb.0: # %entry
613; CHECK-NEXT:    fmv.h.x ft0, a0
614; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
615; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
616; CHECK-NEXT:    ret
617entry:
618  %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16(
619    <vscale x 2 x half> %0,
620    half %1,
621    <vscale x 2 x half> %2,
622    i32 %3)
623
624  ret <vscale x 2 x half> %a
625}
626
627declare <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16(
628  <vscale x 2 x half>,
629  half,
630  <vscale x 2 x half>,
631  <vscale x 2 x i1>,
632  i32);
633
634define <vscale x 2 x half> @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
635; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16:
636; CHECK:       # %bb.0: # %entry
637; CHECK-NEXT:    fmv.h.x ft0, a0
638; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
639; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
640; CHECK-NEXT:    ret
641entry:
642  %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16(
643    <vscale x 2 x half> %0,
644    half %1,
645    <vscale x 2 x half> %2,
646    <vscale x 2 x i1> %3,
647    i32 %4)
648
649  ret <vscale x 2 x half> %a
650}
651
652declare <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16(
653  <vscale x 4 x half>,
654  half,
655  <vscale x 4 x half>,
656  i32);
657
658define <vscale x 4 x half>  @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
659; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16:
660; CHECK:       # %bb.0: # %entry
661; CHECK-NEXT:    fmv.h.x ft0, a0
662; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
663; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
664; CHECK-NEXT:    ret
665entry:
666  %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16(
667    <vscale x 4 x half> %0,
668    half %1,
669    <vscale x 4 x half> %2,
670    i32 %3)
671
672  ret <vscale x 4 x half> %a
673}
674
675declare <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16(
676  <vscale x 4 x half>,
677  half,
678  <vscale x 4 x half>,
679  <vscale x 4 x i1>,
680  i32);
681
682define <vscale x 4 x half> @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
683; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16:
684; CHECK:       # %bb.0: # %entry
685; CHECK-NEXT:    fmv.h.x ft0, a0
686; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
687; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
688; CHECK-NEXT:    ret
689entry:
690  %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16(
691    <vscale x 4 x half> %0,
692    half %1,
693    <vscale x 4 x half> %2,
694    <vscale x 4 x i1> %3,
695    i32 %4)
696
697  ret <vscale x 4 x half> %a
698}
699
700declare <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16(
701  <vscale x 8 x half>,
702  half,
703  <vscale x 8 x half>,
704  i32);
705
706define <vscale x 8 x half>  @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
707; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16:
708; CHECK:       # %bb.0: # %entry
709; CHECK-NEXT:    fmv.h.x ft0, a0
710; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
711; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
712; CHECK-NEXT:    ret
713entry:
714  %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16(
715    <vscale x 8 x half> %0,
716    half %1,
717    <vscale x 8 x half> %2,
718    i32 %3)
719
720  ret <vscale x 8 x half> %a
721}
722
723declare <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16(
724  <vscale x 8 x half>,
725  half,
726  <vscale x 8 x half>,
727  <vscale x 8 x i1>,
728  i32);
729
730define <vscale x 8 x half> @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
731; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16:
732; CHECK:       # %bb.0: # %entry
733; CHECK-NEXT:    fmv.h.x ft0, a0
734; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
735; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
736; CHECK-NEXT:    ret
737entry:
738  %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16(
739    <vscale x 8 x half> %0,
740    half %1,
741    <vscale x 8 x half> %2,
742    <vscale x 8 x i1> %3,
743    i32 %4)
744
745  ret <vscale x 8 x half> %a
746}
747
748declare <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16(
749  <vscale x 16 x half>,
750  half,
751  <vscale x 16 x half>,
752  i32);
753
754define <vscale x 16 x half>  @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
755; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16:
756; CHECK:       # %bb.0: # %entry
757; CHECK-NEXT:    fmv.h.x ft0, a0
758; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
759; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
760; CHECK-NEXT:    ret
761entry:
762  %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16(
763    <vscale x 16 x half> %0,
764    half %1,
765    <vscale x 16 x half> %2,
766    i32 %3)
767
768  ret <vscale x 16 x half> %a
769}
770
771declare <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16(
772  <vscale x 16 x half>,
773  half,
774  <vscale x 16 x half>,
775  <vscale x 16 x i1>,
776  i32);
777
778define <vscale x 16 x half> @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
779; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16:
780; CHECK:       # %bb.0: # %entry
781; CHECK-NEXT:    fmv.h.x ft0, a0
782; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
783; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
784; CHECK-NEXT:    ret
785entry:
786  %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16(
787    <vscale x 16 x half> %0,
788    half %1,
789    <vscale x 16 x half> %2,
790    <vscale x 16 x i1> %3,
791    i32 %4)
792
793  ret <vscale x 16 x half> %a
794}
795
796declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32(
797  <vscale x 1 x float>,
798  float,
799  <vscale x 1 x float>,
800  i32);
801
802define <vscale x 1 x float>  @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
803; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32:
804; CHECK:       # %bb.0: # %entry
805; CHECK-NEXT:    fmv.w.x ft0, a0
806; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
807; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
808; CHECK-NEXT:    ret
809entry:
810  %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32(
811    <vscale x 1 x float> %0,
812    float %1,
813    <vscale x 1 x float> %2,
814    i32 %3)
815
816  ret <vscale x 1 x float> %a
817}
818
819declare <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32(
820  <vscale x 1 x float>,
821  float,
822  <vscale x 1 x float>,
823  <vscale x 1 x i1>,
824  i32);
825
826define <vscale x 1 x float> @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
827; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32:
828; CHECK:       # %bb.0: # %entry
829; CHECK-NEXT:    fmv.w.x ft0, a0
830; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
831; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
832; CHECK-NEXT:    ret
833entry:
834  %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32(
835    <vscale x 1 x float> %0,
836    float %1,
837    <vscale x 1 x float> %2,
838    <vscale x 1 x i1> %3,
839    i32 %4)
840
841  ret <vscale x 1 x float> %a
842}
843
844declare <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32(
845  <vscale x 2 x float>,
846  float,
847  <vscale x 2 x float>,
848  i32);
849
850define <vscale x 2 x float>  @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
851; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32:
852; CHECK:       # %bb.0: # %entry
853; CHECK-NEXT:    fmv.w.x ft0, a0
854; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
855; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
856; CHECK-NEXT:    ret
857entry:
858  %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32(
859    <vscale x 2 x float> %0,
860    float %1,
861    <vscale x 2 x float> %2,
862    i32 %3)
863
864  ret <vscale x 2 x float> %a
865}
866
867declare <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32(
868  <vscale x 2 x float>,
869  float,
870  <vscale x 2 x float>,
871  <vscale x 2 x i1>,
872  i32);
873
874define <vscale x 2 x float> @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
875; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32:
876; CHECK:       # %bb.0: # %entry
877; CHECK-NEXT:    fmv.w.x ft0, a0
878; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
879; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
880; CHECK-NEXT:    ret
881entry:
882  %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32(
883    <vscale x 2 x float> %0,
884    float %1,
885    <vscale x 2 x float> %2,
886    <vscale x 2 x i1> %3,
887    i32 %4)
888
889  ret <vscale x 2 x float> %a
890}
891
892declare <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32(
893  <vscale x 4 x float>,
894  float,
895  <vscale x 4 x float>,
896  i32);
897
898define <vscale x 4 x float>  @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
899; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32:
900; CHECK:       # %bb.0: # %entry
901; CHECK-NEXT:    fmv.w.x ft0, a0
902; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
903; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
904; CHECK-NEXT:    ret
905entry:
906  %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32(
907    <vscale x 4 x float> %0,
908    float %1,
909    <vscale x 4 x float> %2,
910    i32 %3)
911
912  ret <vscale x 4 x float> %a
913}
914
915declare <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32(
916  <vscale x 4 x float>,
917  float,
918  <vscale x 4 x float>,
919  <vscale x 4 x i1>,
920  i32);
921
922define <vscale x 4 x float> @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
923; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32:
924; CHECK:       # %bb.0: # %entry
925; CHECK-NEXT:    fmv.w.x ft0, a0
926; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
927; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
928; CHECK-NEXT:    ret
929entry:
930  %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32(
931    <vscale x 4 x float> %0,
932    float %1,
933    <vscale x 4 x float> %2,
934    <vscale x 4 x i1> %3,
935    i32 %4)
936
937  ret <vscale x 4 x float> %a
938}
939
940declare <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32(
941  <vscale x 8 x float>,
942  float,
943  <vscale x 8 x float>,
944  i32);
945
946define <vscale x 8 x float>  @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
947; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32:
948; CHECK:       # %bb.0: # %entry
949; CHECK-NEXT:    fmv.w.x ft0, a0
950; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
951; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
952; CHECK-NEXT:    ret
953entry:
954  %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32(
955    <vscale x 8 x float> %0,
956    float %1,
957    <vscale x 8 x float> %2,
958    i32 %3)
959
960  ret <vscale x 8 x float> %a
961}
962
963declare <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32(
964  <vscale x 8 x float>,
965  float,
966  <vscale x 8 x float>,
967  <vscale x 8 x i1>,
968  i32);
969
970define <vscale x 8 x float> @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
971; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32:
972; CHECK:       # %bb.0: # %entry
973; CHECK-NEXT:    fmv.w.x ft0, a0
974; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
975; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
976; CHECK-NEXT:    ret
977entry:
978  %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32(
979    <vscale x 8 x float> %0,
980    float %1,
981    <vscale x 8 x float> %2,
982    <vscale x 8 x i1> %3,
983    i32 %4)
984
985  ret <vscale x 8 x float> %a
986}
987
988declare <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64(
989  <vscale x 1 x double>,
990  double,
991  <vscale x 1 x double>,
992  i32);
993
994define <vscale x 1 x double>  @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
995; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64:
996; CHECK:       # %bb.0: # %entry
997; CHECK-NEXT:    addi sp, sp, -16
998; CHECK-NEXT:    sw a0, 8(sp)
999; CHECK-NEXT:    sw a1, 12(sp)
1000; CHECK-NEXT:    fld ft0, 8(sp)
1001; CHECK-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
1002; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
1003; CHECK-NEXT:    addi sp, sp, 16
1004; CHECK-NEXT:    ret
1005entry:
1006  %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64(
1007    <vscale x 1 x double> %0,
1008    double %1,
1009    <vscale x 1 x double> %2,
1010    i32 %3)
1011
1012  ret <vscale x 1 x double> %a
1013}
1014
1015declare <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64(
1016  <vscale x 1 x double>,
1017  double,
1018  <vscale x 1 x double>,
1019  <vscale x 1 x i1>,
1020  i32);
1021
1022define <vscale x 1 x double> @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1023; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64:
1024; CHECK:       # %bb.0: # %entry
1025; CHECK-NEXT:    addi sp, sp, -16
1026; CHECK-NEXT:    sw a0, 8(sp)
1027; CHECK-NEXT:    sw a1, 12(sp)
1028; CHECK-NEXT:    fld ft0, 8(sp)
1029; CHECK-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
1030; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
1031; CHECK-NEXT:    addi sp, sp, 16
1032; CHECK-NEXT:    ret
1033entry:
1034  %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64(
1035    <vscale x 1 x double> %0,
1036    double %1,
1037    <vscale x 1 x double> %2,
1038    <vscale x 1 x i1> %3,
1039    i32 %4)
1040
1041  ret <vscale x 1 x double> %a
1042}
1043
1044declare <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64(
1045  <vscale x 2 x double>,
1046  double,
1047  <vscale x 2 x double>,
1048  i32);
1049
1050define <vscale x 2 x double>  @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
1051; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64:
1052; CHECK:       # %bb.0: # %entry
1053; CHECK-NEXT:    addi sp, sp, -16
1054; CHECK-NEXT:    sw a0, 8(sp)
1055; CHECK-NEXT:    sw a1, 12(sp)
1056; CHECK-NEXT:    fld ft0, 8(sp)
1057; CHECK-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
1058; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
1059; CHECK-NEXT:    addi sp, sp, 16
1060; CHECK-NEXT:    ret
1061entry:
1062  %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64(
1063    <vscale x 2 x double> %0,
1064    double %1,
1065    <vscale x 2 x double> %2,
1066    i32 %3)
1067
1068  ret <vscale x 2 x double> %a
1069}
1070
1071declare <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64(
1072  <vscale x 2 x double>,
1073  double,
1074  <vscale x 2 x double>,
1075  <vscale x 2 x i1>,
1076  i32);
1077
1078define <vscale x 2 x double> @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1079; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64:
1080; CHECK:       # %bb.0: # %entry
1081; CHECK-NEXT:    addi sp, sp, -16
1082; CHECK-NEXT:    sw a0, 8(sp)
1083; CHECK-NEXT:    sw a1, 12(sp)
1084; CHECK-NEXT:    fld ft0, 8(sp)
1085; CHECK-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
1086; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
1087; CHECK-NEXT:    addi sp, sp, 16
1088; CHECK-NEXT:    ret
1089entry:
1090  %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64(
1091    <vscale x 2 x double> %0,
1092    double %1,
1093    <vscale x 2 x double> %2,
1094    <vscale x 2 x i1> %3,
1095    i32 %4)
1096
1097  ret <vscale x 2 x double> %a
1098}
1099
1100declare <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64(
1101  <vscale x 4 x double>,
1102  double,
1103  <vscale x 4 x double>,
1104  i32);
1105
1106define <vscale x 4 x double>  @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
1107; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64:
1108; CHECK:       # %bb.0: # %entry
1109; CHECK-NEXT:    addi sp, sp, -16
1110; CHECK-NEXT:    sw a0, 8(sp)
1111; CHECK-NEXT:    sw a1, 12(sp)
1112; CHECK-NEXT:    fld ft0, 8(sp)
1113; CHECK-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
1114; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
1115; CHECK-NEXT:    addi sp, sp, 16
1116; CHECK-NEXT:    ret
1117entry:
1118  %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64(
1119    <vscale x 4 x double> %0,
1120    double %1,
1121    <vscale x 4 x double> %2,
1122    i32 %3)
1123
1124  ret <vscale x 4 x double> %a
1125}
1126
1127declare <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64(
1128  <vscale x 4 x double>,
1129  double,
1130  <vscale x 4 x double>,
1131  <vscale x 4 x i1>,
1132  i32);
1133
1134define <vscale x 4 x double> @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1135; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64:
1136; CHECK:       # %bb.0: # %entry
1137; CHECK-NEXT:    addi sp, sp, -16
1138; CHECK-NEXT:    sw a0, 8(sp)
1139; CHECK-NEXT:    sw a1, 12(sp)
1140; CHECK-NEXT:    fld ft0, 8(sp)
1141; CHECK-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
1142; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
1143; CHECK-NEXT:    addi sp, sp, 16
1144; CHECK-NEXT:    ret
1145entry:
1146  %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64(
1147    <vscale x 4 x double> %0,
1148    double %1,
1149    <vscale x 4 x double> %2,
1150    <vscale x 4 x i1> %3,
1151    i32 %4)
1152
1153  ret <vscale x 4 x double> %a
1154}
1155