1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \
3; RUN:   -mattr=+d -verify-machineinstrs \
4; RUN:   < %s | FileCheck %s
5declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
6  <vscale x 1 x half>,
7  <vscale x 1 x half>,
8  i64);
9
10define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
11; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
12; CHECK:       # %bb.0: # %entry
13; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
14; CHECK-NEXT:    vfadd.vv v8, v8, v9
15; CHECK-NEXT:    ret
16entry:
17  %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
18    <vscale x 1 x half> %0,
19    <vscale x 1 x half> %1,
20    i64 %2)
21
22  ret <vscale x 1 x half> %a
23}
24
25declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
26  <vscale x 1 x half>,
27  <vscale x 1 x half>,
28  <vscale x 1 x half>,
29  <vscale x 1 x i1>,
30  i64);
31
32define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
33; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
34; CHECK:       # %bb.0: # %entry
35; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
36; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
37; CHECK-NEXT:    ret
38entry:
39  %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
40    <vscale x 1 x half> %0,
41    <vscale x 1 x half> %1,
42    <vscale x 1 x half> %2,
43    <vscale x 1 x i1> %3,
44    i64 %4)
45
46  ret <vscale x 1 x half> %a
47}
48
49declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
50  <vscale x 2 x half>,
51  <vscale x 2 x half>,
52  i64);
53
54define <vscale x 2 x half> @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
55; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16:
56; CHECK:       # %bb.0: # %entry
57; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
58; CHECK-NEXT:    vfadd.vv v8, v8, v9
59; CHECK-NEXT:    ret
60entry:
61  %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
62    <vscale x 2 x half> %0,
63    <vscale x 2 x half> %1,
64    i64 %2)
65
66  ret <vscale x 2 x half> %a
67}
68
69declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
70  <vscale x 2 x half>,
71  <vscale x 2 x half>,
72  <vscale x 2 x half>,
73  <vscale x 2 x i1>,
74  i64);
75
76define <vscale x 2 x half> @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
77; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
78; CHECK:       # %bb.0: # %entry
79; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
80; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
81; CHECK-NEXT:    ret
82entry:
83  %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
84    <vscale x 2 x half> %0,
85    <vscale x 2 x half> %1,
86    <vscale x 2 x half> %2,
87    <vscale x 2 x i1> %3,
88    i64 %4)
89
90  ret <vscale x 2 x half> %a
91}
92
93declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
94  <vscale x 4 x half>,
95  <vscale x 4 x half>,
96  i64);
97
98define <vscale x 4 x half> @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
99; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16:
100; CHECK:       # %bb.0: # %entry
101; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
102; CHECK-NEXT:    vfadd.vv v8, v8, v9
103; CHECK-NEXT:    ret
104entry:
105  %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
106    <vscale x 4 x half> %0,
107    <vscale x 4 x half> %1,
108    i64 %2)
109
110  ret <vscale x 4 x half> %a
111}
112
113declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
114  <vscale x 4 x half>,
115  <vscale x 4 x half>,
116  <vscale x 4 x half>,
117  <vscale x 4 x i1>,
118  i64);
119
120define <vscale x 4 x half> @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
121; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
122; CHECK:       # %bb.0: # %entry
123; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
124; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
125; CHECK-NEXT:    ret
126entry:
127  %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
128    <vscale x 4 x half> %0,
129    <vscale x 4 x half> %1,
130    <vscale x 4 x half> %2,
131    <vscale x 4 x i1> %3,
132    i64 %4)
133
134  ret <vscale x 4 x half> %a
135}
136
137declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
138  <vscale x 8 x half>,
139  <vscale x 8 x half>,
140  i64);
141
142define <vscale x 8 x half> @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
143; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16:
144; CHECK:       # %bb.0: # %entry
145; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
146; CHECK-NEXT:    vfadd.vv v8, v8, v10
147; CHECK-NEXT:    ret
148entry:
149  %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
150    <vscale x 8 x half> %0,
151    <vscale x 8 x half> %1,
152    i64 %2)
153
154  ret <vscale x 8 x half> %a
155}
156
157declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
158  <vscale x 8 x half>,
159  <vscale x 8 x half>,
160  <vscale x 8 x half>,
161  <vscale x 8 x i1>,
162  i64);
163
164define <vscale x 8 x half> @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
165; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
166; CHECK:       # %bb.0: # %entry
167; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
168; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
169; CHECK-NEXT:    ret
170entry:
171  %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
172    <vscale x 8 x half> %0,
173    <vscale x 8 x half> %1,
174    <vscale x 8 x half> %2,
175    <vscale x 8 x i1> %3,
176    i64 %4)
177
178  ret <vscale x 8 x half> %a
179}
180
181declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
182  <vscale x 16 x half>,
183  <vscale x 16 x half>,
184  i64);
185
186define <vscale x 16 x half> @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
187; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16:
188; CHECK:       # %bb.0: # %entry
189; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
190; CHECK-NEXT:    vfadd.vv v8, v8, v12
191; CHECK-NEXT:    ret
192entry:
193  %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
194    <vscale x 16 x half> %0,
195    <vscale x 16 x half> %1,
196    i64 %2)
197
198  ret <vscale x 16 x half> %a
199}
200
201declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
202  <vscale x 16 x half>,
203  <vscale x 16 x half>,
204  <vscale x 16 x half>,
205  <vscale x 16 x i1>,
206  i64);
207
208define <vscale x 16 x half> @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
209; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
210; CHECK:       # %bb.0: # %entry
211; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
212; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
213; CHECK-NEXT:    ret
214entry:
215  %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
216    <vscale x 16 x half> %0,
217    <vscale x 16 x half> %1,
218    <vscale x 16 x half> %2,
219    <vscale x 16 x i1> %3,
220    i64 %4)
221
222  ret <vscale x 16 x half> %a
223}
224
225declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
226  <vscale x 32 x half>,
227  <vscale x 32 x half>,
228  i64);
229
230define <vscale x 32 x half> @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i64 %2) nounwind {
231; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16:
232; CHECK:       # %bb.0: # %entry
233; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
234; CHECK-NEXT:    vfadd.vv v8, v8, v16
235; CHECK-NEXT:    ret
236entry:
237  %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
238    <vscale x 32 x half> %0,
239    <vscale x 32 x half> %1,
240    i64 %2)
241
242  ret <vscale x 32 x half> %a
243}
244
245declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
246  <vscale x 32 x half>,
247  <vscale x 32 x half>,
248  <vscale x 32 x half>,
249  <vscale x 32 x i1>,
250  i64);
251
252define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
253; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16:
254; CHECK:       # %bb.0: # %entry
255; CHECK-NEXT:    vl8re16.v v24, (a0)
256; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
257; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
258; CHECK-NEXT:    ret
259entry:
260  %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
261    <vscale x 32 x half> %0,
262    <vscale x 32 x half> %1,
263    <vscale x 32 x half> %2,
264    <vscale x 32 x i1> %3,
265    i64 %4)
266
267  ret <vscale x 32 x half> %a
268}
269
270declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
271  <vscale x 1 x float>,
272  <vscale x 1 x float>,
273  i64);
274
275define <vscale x 1 x float> @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
276; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32:
277; CHECK:       # %bb.0: # %entry
278; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
279; CHECK-NEXT:    vfadd.vv v8, v8, v9
280; CHECK-NEXT:    ret
281entry:
282  %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
283    <vscale x 1 x float> %0,
284    <vscale x 1 x float> %1,
285    i64 %2)
286
287  ret <vscale x 1 x float> %a
288}
289
290declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
291  <vscale x 1 x float>,
292  <vscale x 1 x float>,
293  <vscale x 1 x float>,
294  <vscale x 1 x i1>,
295  i64);
296
297define <vscale x 1 x float> @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
298; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
299; CHECK:       # %bb.0: # %entry
300; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
301; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
302; CHECK-NEXT:    ret
303entry:
304  %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
305    <vscale x 1 x float> %0,
306    <vscale x 1 x float> %1,
307    <vscale x 1 x float> %2,
308    <vscale x 1 x i1> %3,
309    i64 %4)
310
311  ret <vscale x 1 x float> %a
312}
313
314declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
315  <vscale x 2 x float>,
316  <vscale x 2 x float>,
317  i64);
318
319define <vscale x 2 x float> @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
320; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32:
321; CHECK:       # %bb.0: # %entry
322; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
323; CHECK-NEXT:    vfadd.vv v8, v8, v9
324; CHECK-NEXT:    ret
325entry:
326  %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
327    <vscale x 2 x float> %0,
328    <vscale x 2 x float> %1,
329    i64 %2)
330
331  ret <vscale x 2 x float> %a
332}
333
334declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
335  <vscale x 2 x float>,
336  <vscale x 2 x float>,
337  <vscale x 2 x float>,
338  <vscale x 2 x i1>,
339  i64);
340
341define <vscale x 2 x float> @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
342; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
343; CHECK:       # %bb.0: # %entry
344; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
345; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
346; CHECK-NEXT:    ret
347entry:
348  %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
349    <vscale x 2 x float> %0,
350    <vscale x 2 x float> %1,
351    <vscale x 2 x float> %2,
352    <vscale x 2 x i1> %3,
353    i64 %4)
354
355  ret <vscale x 2 x float> %a
356}
357
358declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
359  <vscale x 4 x float>,
360  <vscale x 4 x float>,
361  i64);
362
363define <vscale x 4 x float> @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
364; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32:
365; CHECK:       # %bb.0: # %entry
366; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
367; CHECK-NEXT:    vfadd.vv v8, v8, v10
368; CHECK-NEXT:    ret
369entry:
370  %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
371    <vscale x 4 x float> %0,
372    <vscale x 4 x float> %1,
373    i64 %2)
374
375  ret <vscale x 4 x float> %a
376}
377
378declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
379  <vscale x 4 x float>,
380  <vscale x 4 x float>,
381  <vscale x 4 x float>,
382  <vscale x 4 x i1>,
383  i64);
384
385define <vscale x 4 x float> @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
386; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
387; CHECK:       # %bb.0: # %entry
388; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
389; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
390; CHECK-NEXT:    ret
391entry:
392  %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
393    <vscale x 4 x float> %0,
394    <vscale x 4 x float> %1,
395    <vscale x 4 x float> %2,
396    <vscale x 4 x i1> %3,
397    i64 %4)
398
399  ret <vscale x 4 x float> %a
400}
401
402declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
403  <vscale x 8 x float>,
404  <vscale x 8 x float>,
405  i64);
406
407define <vscale x 8 x float> @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
408; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32:
409; CHECK:       # %bb.0: # %entry
410; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
411; CHECK-NEXT:    vfadd.vv v8, v8, v12
412; CHECK-NEXT:    ret
413entry:
414  %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
415    <vscale x 8 x float> %0,
416    <vscale x 8 x float> %1,
417    i64 %2)
418
419  ret <vscale x 8 x float> %a
420}
421
422declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
423  <vscale x 8 x float>,
424  <vscale x 8 x float>,
425  <vscale x 8 x float>,
426  <vscale x 8 x i1>,
427  i64);
428
429define <vscale x 8 x float> @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
430; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
431; CHECK:       # %bb.0: # %entry
432; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
433; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
434; CHECK-NEXT:    ret
435entry:
436  %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
437    <vscale x 8 x float> %0,
438    <vscale x 8 x float> %1,
439    <vscale x 8 x float> %2,
440    <vscale x 8 x i1> %3,
441    i64 %4)
442
443  ret <vscale x 8 x float> %a
444}
445
446declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
447  <vscale x 16 x float>,
448  <vscale x 16 x float>,
449  i64);
450
451define <vscale x 16 x float> @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i64 %2) nounwind {
452; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32:
453; CHECK:       # %bb.0: # %entry
454; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
455; CHECK-NEXT:    vfadd.vv v8, v8, v16
456; CHECK-NEXT:    ret
457entry:
458  %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
459    <vscale x 16 x float> %0,
460    <vscale x 16 x float> %1,
461    i64 %2)
462
463  ret <vscale x 16 x float> %a
464}
465
466declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
467  <vscale x 16 x float>,
468  <vscale x 16 x float>,
469  <vscale x 16 x float>,
470  <vscale x 16 x i1>,
471  i64);
472
473define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
474; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32:
475; CHECK:       # %bb.0: # %entry
476; CHECK-NEXT:    vl8re32.v v24, (a0)
477; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
478; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
479; CHECK-NEXT:    ret
480entry:
481  %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
482    <vscale x 16 x float> %0,
483    <vscale x 16 x float> %1,
484    <vscale x 16 x float> %2,
485    <vscale x 16 x i1> %3,
486    i64 %4)
487
488  ret <vscale x 16 x float> %a
489}
490
491declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
492  <vscale x 1 x double>,
493  <vscale x 1 x double>,
494  i64);
495
496define <vscale x 1 x double> @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
497; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64:
498; CHECK:       # %bb.0: # %entry
499; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
500; CHECK-NEXT:    vfadd.vv v8, v8, v9
501; CHECK-NEXT:    ret
502entry:
503  %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
504    <vscale x 1 x double> %0,
505    <vscale x 1 x double> %1,
506    i64 %2)
507
508  ret <vscale x 1 x double> %a
509}
510
511declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
512  <vscale x 1 x double>,
513  <vscale x 1 x double>,
514  <vscale x 1 x double>,
515  <vscale x 1 x i1>,
516  i64);
517
518define <vscale x 1 x double> @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
519; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
520; CHECK:       # %bb.0: # %entry
521; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
522; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
523; CHECK-NEXT:    ret
524entry:
525  %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
526    <vscale x 1 x double> %0,
527    <vscale x 1 x double> %1,
528    <vscale x 1 x double> %2,
529    <vscale x 1 x i1> %3,
530    i64 %4)
531
532  ret <vscale x 1 x double> %a
533}
534
535declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
536  <vscale x 2 x double>,
537  <vscale x 2 x double>,
538  i64);
539
540define <vscale x 2 x double> @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
541; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64:
542; CHECK:       # %bb.0: # %entry
543; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
544; CHECK-NEXT:    vfadd.vv v8, v8, v10
545; CHECK-NEXT:    ret
546entry:
547  %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
548    <vscale x 2 x double> %0,
549    <vscale x 2 x double> %1,
550    i64 %2)
551
552  ret <vscale x 2 x double> %a
553}
554
555declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
556  <vscale x 2 x double>,
557  <vscale x 2 x double>,
558  <vscale x 2 x double>,
559  <vscale x 2 x i1>,
560  i64);
561
562define <vscale x 2 x double> @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
563; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
564; CHECK:       # %bb.0: # %entry
565; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
566; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
567; CHECK-NEXT:    ret
568entry:
569  %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
570    <vscale x 2 x double> %0,
571    <vscale x 2 x double> %1,
572    <vscale x 2 x double> %2,
573    <vscale x 2 x i1> %3,
574    i64 %4)
575
576  ret <vscale x 2 x double> %a
577}
578
579declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
580  <vscale x 4 x double>,
581  <vscale x 4 x double>,
582  i64);
583
584define <vscale x 4 x double> @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
585; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64:
586; CHECK:       # %bb.0: # %entry
587; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
588; CHECK-NEXT:    vfadd.vv v8, v8, v12
589; CHECK-NEXT:    ret
590entry:
591  %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
592    <vscale x 4 x double> %0,
593    <vscale x 4 x double> %1,
594    i64 %2)
595
596  ret <vscale x 4 x double> %a
597}
598
599declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
600  <vscale x 4 x double>,
601  <vscale x 4 x double>,
602  <vscale x 4 x double>,
603  <vscale x 4 x i1>,
604  i64);
605
606define <vscale x 4 x double> @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
607; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
608; CHECK:       # %bb.0: # %entry
609; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
610; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
611; CHECK-NEXT:    ret
612entry:
613  %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
614    <vscale x 4 x double> %0,
615    <vscale x 4 x double> %1,
616    <vscale x 4 x double> %2,
617    <vscale x 4 x i1> %3,
618    i64 %4)
619
620  ret <vscale x 4 x double> %a
621}
622
623declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
624  <vscale x 8 x double>,
625  <vscale x 8 x double>,
626  i64);
627
628define <vscale x 8 x double> @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i64 %2) nounwind {
629; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64:
630; CHECK:       # %bb.0: # %entry
631; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
632; CHECK-NEXT:    vfadd.vv v8, v8, v16
633; CHECK-NEXT:    ret
634entry:
635  %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
636    <vscale x 8 x double> %0,
637    <vscale x 8 x double> %1,
638    i64 %2)
639
640  ret <vscale x 8 x double> %a
641}
642
643declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
644  <vscale x 8 x double>,
645  <vscale x 8 x double>,
646  <vscale x 8 x double>,
647  <vscale x 8 x i1>,
648  i64);
649
650define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
651; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64:
652; CHECK:       # %bb.0: # %entry
653; CHECK-NEXT:    vl8re64.v v24, (a0)
654; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
655; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
656; CHECK-NEXT:    ret
657entry:
658  %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
659    <vscale x 8 x double> %0,
660    <vscale x 8 x double> %1,
661    <vscale x 8 x double> %2,
662    <vscale x 8 x i1> %3,
663    i64 %4)
664
665  ret <vscale x 8 x double> %a
666}
667
668declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
669  <vscale x 1 x half>,
670  half,
671  i64);
672
673define <vscale x 1 x half> @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
674; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16:
675; CHECK:       # %bb.0: # %entry
676; CHECK-NEXT:    fmv.h.x ft0, a0
677; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
678; CHECK-NEXT:    vfadd.vf v8, v8, ft0
679; CHECK-NEXT:    ret
680entry:
681  %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
682    <vscale x 1 x half> %0,
683    half %1,
684    i64 %2)
685
686  ret <vscale x 1 x half> %a
687}
688
689declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
690  <vscale x 1 x half>,
691  <vscale x 1 x half>,
692  half,
693  <vscale x 1 x i1>,
694  i64);
695
696define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
697; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16:
698; CHECK:       # %bb.0: # %entry
699; CHECK-NEXT:    fmv.h.x ft0, a0
700; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
701; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
702; CHECK-NEXT:    ret
703entry:
704  %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
705    <vscale x 1 x half> %0,
706    <vscale x 1 x half> %1,
707    half %2,
708    <vscale x 1 x i1> %3,
709    i64 %4)
710
711  ret <vscale x 1 x half> %a
712}
713
714declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
715  <vscale x 2 x half>,
716  half,
717  i64);
718
719define <vscale x 2 x half> @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
720; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16:
721; CHECK:       # %bb.0: # %entry
722; CHECK-NEXT:    fmv.h.x ft0, a0
723; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
724; CHECK-NEXT:    vfadd.vf v8, v8, ft0
725; CHECK-NEXT:    ret
726entry:
727  %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
728    <vscale x 2 x half> %0,
729    half %1,
730    i64 %2)
731
732  ret <vscale x 2 x half> %a
733}
734
735declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
736  <vscale x 2 x half>,
737  <vscale x 2 x half>,
738  half,
739  <vscale x 2 x i1>,
740  i64);
741
742define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
743; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16:
744; CHECK:       # %bb.0: # %entry
745; CHECK-NEXT:    fmv.h.x ft0, a0
746; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
747; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
748; CHECK-NEXT:    ret
749entry:
750  %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
751    <vscale x 2 x half> %0,
752    <vscale x 2 x half> %1,
753    half %2,
754    <vscale x 2 x i1> %3,
755    i64 %4)
756
757  ret <vscale x 2 x half> %a
758}
759
760declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
761  <vscale x 4 x half>,
762  half,
763  i64);
764
765define <vscale x 4 x half> @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
766; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16:
767; CHECK:       # %bb.0: # %entry
768; CHECK-NEXT:    fmv.h.x ft0, a0
769; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
770; CHECK-NEXT:    vfadd.vf v8, v8, ft0
771; CHECK-NEXT:    ret
772entry:
773  %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
774    <vscale x 4 x half> %0,
775    half %1,
776    i64 %2)
777
778  ret <vscale x 4 x half> %a
779}
780
781declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
782  <vscale x 4 x half>,
783  <vscale x 4 x half>,
784  half,
785  <vscale x 4 x i1>,
786  i64);
787
788define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
789; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16:
790; CHECK:       # %bb.0: # %entry
791; CHECK-NEXT:    fmv.h.x ft0, a0
792; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
793; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
794; CHECK-NEXT:    ret
795entry:
796  %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
797    <vscale x 4 x half> %0,
798    <vscale x 4 x half> %1,
799    half %2,
800    <vscale x 4 x i1> %3,
801    i64 %4)
802
803  ret <vscale x 4 x half> %a
804}
805
806declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
807  <vscale x 8 x half>,
808  half,
809  i64);
810
811define <vscale x 8 x half> @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
812; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16:
813; CHECK:       # %bb.0: # %entry
814; CHECK-NEXT:    fmv.h.x ft0, a0
815; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
816; CHECK-NEXT:    vfadd.vf v8, v8, ft0
817; CHECK-NEXT:    ret
818entry:
819  %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
820    <vscale x 8 x half> %0,
821    half %1,
822    i64 %2)
823
824  ret <vscale x 8 x half> %a
825}
826
827declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
828  <vscale x 8 x half>,
829  <vscale x 8 x half>,
830  half,
831  <vscale x 8 x i1>,
832  i64);
833
834define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
835; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16:
836; CHECK:       # %bb.0: # %entry
837; CHECK-NEXT:    fmv.h.x ft0, a0
838; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
839; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
840; CHECK-NEXT:    ret
841entry:
842  %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
843    <vscale x 8 x half> %0,
844    <vscale x 8 x half> %1,
845    half %2,
846    <vscale x 8 x i1> %3,
847    i64 %4)
848
849  ret <vscale x 8 x half> %a
850}
851
852declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
853  <vscale x 16 x half>,
854  half,
855  i64);
856
857define <vscale x 16 x half> @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
858; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16:
859; CHECK:       # %bb.0: # %entry
860; CHECK-NEXT:    fmv.h.x ft0, a0
861; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
862; CHECK-NEXT:    vfadd.vf v8, v8, ft0
863; CHECK-NEXT:    ret
864entry:
865  %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
866    <vscale x 16 x half> %0,
867    half %1,
868    i64 %2)
869
870  ret <vscale x 16 x half> %a
871}
872
873declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
874  <vscale x 16 x half>,
875  <vscale x 16 x half>,
876  half,
877  <vscale x 16 x i1>,
878  i64);
879
880define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
881; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16:
882; CHECK:       # %bb.0: # %entry
883; CHECK-NEXT:    fmv.h.x ft0, a0
884; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
885; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
886; CHECK-NEXT:    ret
887entry:
888  %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
889    <vscale x 16 x half> %0,
890    <vscale x 16 x half> %1,
891    half %2,
892    <vscale x 16 x i1> %3,
893    i64 %4)
894
895  ret <vscale x 16 x half> %a
896}
897
898declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
899  <vscale x 32 x half>,
900  half,
901  i64);
902
903define <vscale x 32 x half> @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
904; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16:
905; CHECK:       # %bb.0: # %entry
906; CHECK-NEXT:    fmv.h.x ft0, a0
907; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
908; CHECK-NEXT:    vfadd.vf v8, v8, ft0
909; CHECK-NEXT:    ret
910entry:
911  %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
912    <vscale x 32 x half> %0,
913    half %1,
914    i64 %2)
915
916  ret <vscale x 32 x half> %a
917}
918
919declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
920  <vscale x 32 x half>,
921  <vscale x 32 x half>,
922  half,
923  <vscale x 32 x i1>,
924  i64);
925
926define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
927; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16:
928; CHECK:       # %bb.0: # %entry
929; CHECK-NEXT:    fmv.h.x ft0, a0
930; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
931; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
932; CHECK-NEXT:    ret
933entry:
934  %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
935    <vscale x 32 x half> %0,
936    <vscale x 32 x half> %1,
937    half %2,
938    <vscale x 32 x i1> %3,
939    i64 %4)
940
941  ret <vscale x 32 x half> %a
942}
943
944declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
945  <vscale x 1 x float>,
946  float,
947  i64);
948
949define <vscale x 1 x float> @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
950; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32:
951; CHECK:       # %bb.0: # %entry
952; CHECK-NEXT:    fmv.w.x ft0, a0
953; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
954; CHECK-NEXT:    vfadd.vf v8, v8, ft0
955; CHECK-NEXT:    ret
956entry:
957  %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
958    <vscale x 1 x float> %0,
959    float %1,
960    i64 %2)
961
962  ret <vscale x 1 x float> %a
963}
964
965declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
966  <vscale x 1 x float>,
967  <vscale x 1 x float>,
968  float,
969  <vscale x 1 x i1>,
970  i64);
971
972define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
973; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32:
974; CHECK:       # %bb.0: # %entry
975; CHECK-NEXT:    fmv.w.x ft0, a0
976; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
977; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
978; CHECK-NEXT:    ret
979entry:
980  %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
981    <vscale x 1 x float> %0,
982    <vscale x 1 x float> %1,
983    float %2,
984    <vscale x 1 x i1> %3,
985    i64 %4)
986
987  ret <vscale x 1 x float> %a
988}
989
990declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
991  <vscale x 2 x float>,
992  float,
993  i64);
994
995define <vscale x 2 x float> @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
996; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32:
997; CHECK:       # %bb.0: # %entry
998; CHECK-NEXT:    fmv.w.x ft0, a0
999; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1000; CHECK-NEXT:    vfadd.vf v8, v8, ft0
1001; CHECK-NEXT:    ret
1002entry:
1003  %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
1004    <vscale x 2 x float> %0,
1005    float %1,
1006    i64 %2)
1007
1008  ret <vscale x 2 x float> %a
1009}
1010
1011declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
1012  <vscale x 2 x float>,
1013  <vscale x 2 x float>,
1014  float,
1015  <vscale x 2 x i1>,
1016  i64);
1017
1018define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1019; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32:
1020; CHECK:       # %bb.0: # %entry
1021; CHECK-NEXT:    fmv.w.x ft0, a0
1022; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1023; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
1024; CHECK-NEXT:    ret
1025entry:
1026  %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
1027    <vscale x 2 x float> %0,
1028    <vscale x 2 x float> %1,
1029    float %2,
1030    <vscale x 2 x i1> %3,
1031    i64 %4)
1032
1033  ret <vscale x 2 x float> %a
1034}
1035
1036declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
1037  <vscale x 4 x float>,
1038  float,
1039  i64);
1040
1041define <vscale x 4 x float> @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
1042; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32:
1043; CHECK:       # %bb.0: # %entry
1044; CHECK-NEXT:    fmv.w.x ft0, a0
1045; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1046; CHECK-NEXT:    vfadd.vf v8, v8, ft0
1047; CHECK-NEXT:    ret
1048entry:
1049  %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
1050    <vscale x 4 x float> %0,
1051    float %1,
1052    i64 %2)
1053
1054  ret <vscale x 4 x float> %a
1055}
1056
1057declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
1058  <vscale x 4 x float>,
1059  <vscale x 4 x float>,
1060  float,
1061  <vscale x 4 x i1>,
1062  i64);
1063
1064define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1065; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32:
1066; CHECK:       # %bb.0: # %entry
1067; CHECK-NEXT:    fmv.w.x ft0, a0
1068; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1069; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
1070; CHECK-NEXT:    ret
1071entry:
1072  %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
1073    <vscale x 4 x float> %0,
1074    <vscale x 4 x float> %1,
1075    float %2,
1076    <vscale x 4 x i1> %3,
1077    i64 %4)
1078
1079  ret <vscale x 4 x float> %a
1080}
1081
1082declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
1083  <vscale x 8 x float>,
1084  float,
1085  i64);
1086
1087define <vscale x 8 x float> @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
1088; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32:
1089; CHECK:       # %bb.0: # %entry
1090; CHECK-NEXT:    fmv.w.x ft0, a0
1091; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1092; CHECK-NEXT:    vfadd.vf v8, v8, ft0
1093; CHECK-NEXT:    ret
1094entry:
1095  %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
1096    <vscale x 8 x float> %0,
1097    float %1,
1098    i64 %2)
1099
1100  ret <vscale x 8 x float> %a
1101}
1102
1103declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
1104  <vscale x 8 x float>,
1105  <vscale x 8 x float>,
1106  float,
1107  <vscale x 8 x i1>,
1108  i64);
1109
1110define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1111; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32:
1112; CHECK:       # %bb.0: # %entry
1113; CHECK-NEXT:    fmv.w.x ft0, a0
1114; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1115; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
1116; CHECK-NEXT:    ret
1117entry:
1118  %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
1119    <vscale x 8 x float> %0,
1120    <vscale x 8 x float> %1,
1121    float %2,
1122    <vscale x 8 x i1> %3,
1123    i64 %4)
1124
1125  ret <vscale x 8 x float> %a
1126}
1127
1128declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
1129  <vscale x 16 x float>,
1130  float,
1131  i64);
1132
1133define <vscale x 16 x float> @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
1134; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32:
1135; CHECK:       # %bb.0: # %entry
1136; CHECK-NEXT:    fmv.w.x ft0, a0
1137; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
1138; CHECK-NEXT:    vfadd.vf v8, v8, ft0
1139; CHECK-NEXT:    ret
1140entry:
1141  %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
1142    <vscale x 16 x float> %0,
1143    float %1,
1144    i64 %2)
1145
1146  ret <vscale x 16 x float> %a
1147}
1148
1149declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
1150  <vscale x 16 x float>,
1151  <vscale x 16 x float>,
1152  float,
1153  <vscale x 16 x i1>,
1154  i64);
1155
1156define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1157; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32:
1158; CHECK:       # %bb.0: # %entry
1159; CHECK-NEXT:    fmv.w.x ft0, a0
1160; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
1161; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
1162; CHECK-NEXT:    ret
1163entry:
1164  %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
1165    <vscale x 16 x float> %0,
1166    <vscale x 16 x float> %1,
1167    float %2,
1168    <vscale x 16 x i1> %3,
1169    i64 %4)
1170
1171  ret <vscale x 16 x float> %a
1172}
1173
1174declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
1175  <vscale x 1 x double>,
1176  double,
1177  i64);
1178
1179define <vscale x 1 x double> @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
1180; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64:
1181; CHECK:       # %bb.0: # %entry
1182; CHECK-NEXT:    fmv.d.x ft0, a0
1183; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
1184; CHECK-NEXT:    vfadd.vf v8, v8, ft0
1185; CHECK-NEXT:    ret
1186entry:
1187  %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
1188    <vscale x 1 x double> %0,
1189    double %1,
1190    i64 %2)
1191
1192  ret <vscale x 1 x double> %a
1193}
1194
1195declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
1196  <vscale x 1 x double>,
1197  <vscale x 1 x double>,
1198  double,
1199  <vscale x 1 x i1>,
1200  i64);
1201
1202define <vscale x 1 x double> @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1203; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64:
1204; CHECK:       # %bb.0: # %entry
1205; CHECK-NEXT:    fmv.d.x ft0, a0
1206; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
1207; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
1208; CHECK-NEXT:    ret
1209entry:
1210  %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
1211    <vscale x 1 x double> %0,
1212    <vscale x 1 x double> %1,
1213    double %2,
1214    <vscale x 1 x i1> %3,
1215    i64 %4)
1216
1217  ret <vscale x 1 x double> %a
1218}
1219
1220declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
1221  <vscale x 2 x double>,
1222  double,
1223  i64);
1224
1225define <vscale x 2 x double> @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
1226; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64:
1227; CHECK:       # %bb.0: # %entry
1228; CHECK-NEXT:    fmv.d.x ft0, a0
1229; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
1230; CHECK-NEXT:    vfadd.vf v8, v8, ft0
1231; CHECK-NEXT:    ret
1232entry:
1233  %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
1234    <vscale x 2 x double> %0,
1235    double %1,
1236    i64 %2)
1237
1238  ret <vscale x 2 x double> %a
1239}
1240
1241declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
1242  <vscale x 2 x double>,
1243  <vscale x 2 x double>,
1244  double,
1245  <vscale x 2 x i1>,
1246  i64);
1247
1248define <vscale x 2 x double> @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1249; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64:
1250; CHECK:       # %bb.0: # %entry
1251; CHECK-NEXT:    fmv.d.x ft0, a0
1252; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
1253; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
1254; CHECK-NEXT:    ret
1255entry:
1256  %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
1257    <vscale x 2 x double> %0,
1258    <vscale x 2 x double> %1,
1259    double %2,
1260    <vscale x 2 x i1> %3,
1261    i64 %4)
1262
1263  ret <vscale x 2 x double> %a
1264}
1265
1266declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
1267  <vscale x 4 x double>,
1268  double,
1269  i64);
1270
1271define <vscale x 4 x double> @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
1272; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64:
1273; CHECK:       # %bb.0: # %entry
1274; CHECK-NEXT:    fmv.d.x ft0, a0
1275; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
1276; CHECK-NEXT:    vfadd.vf v8, v8, ft0
1277; CHECK-NEXT:    ret
1278entry:
1279  %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
1280    <vscale x 4 x double> %0,
1281    double %1,
1282    i64 %2)
1283
1284  ret <vscale x 4 x double> %a
1285}
1286
1287declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
1288  <vscale x 4 x double>,
1289  <vscale x 4 x double>,
1290  double,
1291  <vscale x 4 x i1>,
1292  i64);
1293
1294define <vscale x 4 x double> @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1295; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64:
1296; CHECK:       # %bb.0: # %entry
1297; CHECK-NEXT:    fmv.d.x ft0, a0
1298; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
1299; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
1300; CHECK-NEXT:    ret
1301entry:
1302  %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
1303    <vscale x 4 x double> %0,
1304    <vscale x 4 x double> %1,
1305    double %2,
1306    <vscale x 4 x i1> %3,
1307    i64 %4)
1308
1309  ret <vscale x 4 x double> %a
1310}
1311
1312declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
1313  <vscale x 8 x double>,
1314  double,
1315  i64);
1316
1317define <vscale x 8 x double> @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
1318; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64:
1319; CHECK:       # %bb.0: # %entry
1320; CHECK-NEXT:    fmv.d.x ft0, a0
1321; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
1322; CHECK-NEXT:    vfadd.vf v8, v8, ft0
1323; CHECK-NEXT:    ret
1324entry:
1325  %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
1326    <vscale x 8 x double> %0,
1327    double %1,
1328    i64 %2)
1329
1330  ret <vscale x 8 x double> %a
1331}
1332
1333declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
1334  <vscale x 8 x double>,
1335  <vscale x 8 x double>,
1336  double,
1337  <vscale x 8 x i1>,
1338  i64);
1339
1340define <vscale x 8 x double> @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1341; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64:
1342; CHECK:       # %bb.0: # %entry
1343; CHECK-NEXT:    fmv.d.x ft0, a0
1344; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
1345; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
1346; CHECK-NEXT:    ret
1347entry:
1348  %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
1349    <vscale x 8 x double> %0,
1350    <vscale x 8 x double> %1,
1351    double %2,
1352    <vscale x 8 x i1> %3,
1353    i64 %4)
1354
1355  ret <vscale x 8 x double> %a
1356}
1357