1; RUN: llc -mtriple=aarch64-linux-gnu -mattr=sve -mattr=+use-experimental-zeroing-pseudos < %s 2>%t | FileCheck %s
2; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
3
4; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
5; WARN-NOT: warning
6
7;
8; FADD
9;
10
11define <vscale x 8 x half> @fadd_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
12; CHECK-LABEL: fadd_h_zero:
13; CHECK:      movprfx z0.h, p0/z, z0.h
14; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h
15; CHECK-NEXT: ret
16  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
17  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %pg,
18                                                            <vscale x 8 x half> %a_z,
19                                                            <vscale x 8 x half> %b)
20  ret <vscale x 8 x half> %out
21}
22
23define <vscale x 4 x float> @fadd_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
24; CHECK-LABEL: fadd_s_zero:
25; CHECK:      movprfx z0.s, p0/z, z0.s
26; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s
27; CHECK-NEXT: ret
28  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
29  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %pg,
30                                                             <vscale x 4 x float> %a_z,
31                                                             <vscale x 4 x float> %b)
32  ret <vscale x 4 x float> %out
33}
34
35define <vscale x 2 x double> @fadd_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
36; CHECK-LABEL: fadd_d_zero:
37; CHECK:      movprfx z0.d, p0/z, z0.d
38; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
39; CHECK-NEXT: ret
40  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
41  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %pg,
42                                                              <vscale x 2 x double> %a_z,
43                                                              <vscale x 2 x double> %b)
44  ret <vscale x 2 x double> %out
45}
46
47;
48; FMAX
49;
50
51define <vscale x 8 x half> @fmax_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
52; CHECK-LABEL: fmax_h_zero:
53; CHECK:      movprfx z0.h, p0/z, z0.h
54; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h
55; CHECK-NEXT: ret
56  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
57  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1> %pg,
58                                                            <vscale x 8 x half> %a_z,
59                                                            <vscale x 8 x half> %b)
60  ret <vscale x 8 x half> %out
61}
62
63define <vscale x 4 x float> @fmax_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
64; CHECK-LABEL: fmax_s_zero:
65; CHECK:      movprfx z0.s, p0/z, z0.s
66; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s
67; CHECK-NEXT: ret
68  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
69  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1> %pg,
70                                                             <vscale x 4 x float> %a_z,
71                                                             <vscale x 4 x float> %b)
72  ret <vscale x 4 x float> %out
73}
74
75define <vscale x 2 x double> @fmax_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
76; CHECK-LABEL: fmax_d_zero:
77; CHECK:      movprfx z0.d, p0/z, z0.d
78; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z1.d
79; CHECK-NEXT: ret
80  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
81  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1> %pg,
82                                                              <vscale x 2 x double> %a_z,
83                                                              <vscale x 2 x double> %b)
84  ret <vscale x 2 x double> %out
85}
86
87;
88; FMAXNM
89;
90
91define <vscale x 8 x half> @fmaxnm_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
92; CHECK-LABEL: fmaxnm_h_zero:
93; CHECK:      movprfx z0.h, p0/z, z0.h
94; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h
95; CHECK-NEXT: ret
96  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
97  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1> %pg,
98                                                              <vscale x 8 x half> %a_z,
99                                                              <vscale x 8 x half> %b)
100  ret <vscale x 8 x half> %out
101}
102
103define <vscale x 4 x float> @fmaxnm_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
104; CHECK-LABEL: fmaxnm_s_zero:
105; CHECK:      movprfx z0.s, p0/z, z0.s
106; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s
107; CHECK-NEXT: ret
108  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
109  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1> %pg,
110                                                               <vscale x 4 x float> %a_z,
111                                                               <vscale x 4 x float> %b)
112  ret <vscale x 4 x float> %out
113}
114
115define <vscale x 2 x double> @fmaxnm_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
116; CHECK-LABEL: fmaxnm_d_zero:
117; CHECK:      movprfx z0.d, p0/z, z0.d
118; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d
119; CHECK-NEXT: ret
120  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
121  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1> %pg,
122                                                                <vscale x 2 x double> %a_z,
123                                                                <vscale x 2 x double> %b)
124  ret <vscale x 2 x double> %out
125}
126
127;
128; FMIN
129;
130
131define <vscale x 8 x half> @fmin_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
132; CHECK-LABEL: fmin_h_zero:
133; CHECK:      movprfx z0.h, p0/z, z0.h
134; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h
135; CHECK-NEXT: ret
136  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
137  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1> %pg,
138                                                            <vscale x 8 x half> %a_z,
139                                                            <vscale x 8 x half> %b)
140  ret <vscale x 8 x half> %out
141}
142
143define <vscale x 4 x float> @fmin_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
144; CHECK-LABEL: fmin_s_zero:
145; CHECK:      movprfx z0.s, p0/z, z0.s
146; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s
147; CHECK-NEXT: ret
148  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
149  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1> %pg,
150                                                             <vscale x 4 x float> %a_z,
151                                                             <vscale x 4 x float> %b)
152  ret <vscale x 4 x float> %out
153}
154
155define <vscale x 2 x double> @fmin_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
156; CHECK-LABEL: fmin_d_zero:
157; CHECK:      movprfx z0.d, p0/z, z0.d
158; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z1.d
159; CHECK-NEXT: ret
160  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
161  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1> %pg,
162                                                              <vscale x 2 x double> %a_z,
163                                                              <vscale x 2 x double> %b)
164  ret <vscale x 2 x double> %out
165}
166
167;
168; FMINNM
169;
170
171define <vscale x 8 x half> @fminnm_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
172; CHECK-LABEL: fminnm_h_zero:
173; CHECK:      movprfx z0.h, p0/z, z0.h
174; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h
175; CHECK-NEXT: ret
176  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
177  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1> %pg,
178                                                              <vscale x 8 x half> %a_z,
179                                                              <vscale x 8 x half> %b)
180  ret <vscale x 8 x half> %out
181}
182
183define <vscale x 4 x float> @fminnm_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
184; CHECK-LABEL: fminnm_s_zero:
185; CHECK:      movprfx z0.s, p0/z, z0.s
186; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s
187; CHECK-NEXT: ret
188  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
189  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1> %pg,
190                                                               <vscale x 4 x float> %a_z,
191                                                               <vscale x 4 x float> %b)
192  ret <vscale x 4 x float> %out
193}
194
195define <vscale x 2 x double> @fminnm_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
196; CHECK-LABEL: fminnm_d_zero:
197; CHECK:      movprfx z0.d, p0/z, z0.d
198; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z1.d
199; CHECK-NEXT: ret
200  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
201  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1> %pg,
202                                                                <vscale x 2 x double> %a_z,
203                                                                <vscale x 2 x double> %b)
204  ret <vscale x 2 x double> %out
205}
206
207;
208; FMUL
209;
210
211define <vscale x 8 x half> @fmul_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
212; CHECK-LABEL: fmul_h_zero:
213; CHECK:      movprfx z0.h, p0/z, z0.h
214; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h
215; CHECK-NEXT: ret
216  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
217  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg,
218                                                            <vscale x 8 x half> %a_z,
219                                                            <vscale x 8 x half> %b)
220  ret <vscale x 8 x half> %out
221}
222
223define <vscale x 4 x float> @fmul_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
224; CHECK-LABEL: fmul_s_zero:
225; CHECK:      movprfx z0.s, p0/z, z0.s
226; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s
227; CHECK-NEXT: ret
228  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
229  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg,
230                                                             <vscale x 4 x float> %a_z,
231                                                             <vscale x 4 x float> %b)
232  ret <vscale x 4 x float> %out
233}
234
235define <vscale x 2 x double> @fmul_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
236; CHECK-LABEL: fmul_d_zero:
237; CHECK:      movprfx z0.d, p0/z, z0.d
238; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d
239; CHECK-NEXT: ret
240  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
241  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg,
242                                                              <vscale x 2 x double> %a_z,
243                                                              <vscale x 2 x double> %b)
244  ret <vscale x 2 x double> %out
245}
246
247;
248; FSUB
249;
250
251define <vscale x 8 x half> @fsub_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
252; CHECK-LABEL: fsub_h_zero:
253; CHECK:      movprfx z0.h, p0/z, z0.h
254; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h
255; CHECK-NEXT: ret
256  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
257  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1> %pg,
258                                                            <vscale x 8 x half> %a_z,
259                                                            <vscale x 8 x half> %b)
260  ret <vscale x 8 x half> %out
261}
262
263define <vscale x 4 x float> @fsub_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
264; CHECK-LABEL: fsub_s_zero:
265; CHECK:      movprfx z0.s, p0/z, z0.s
266; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s
267; CHECK-NEXT: ret
268  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
269  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1> %pg,
270                                                             <vscale x 4 x float> %a_z,
271                                                             <vscale x 4 x float> %b)
272  ret <vscale x 4 x float> %out
273}
274
275define <vscale x 2 x double> @fsub_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
276; CHECK-LABEL: fsub_d_zero:
277; CHECK:      movprfx z0.d, p0/z, z0.d
278; CHECK-NEXT: fsub z0.d, p0/m, z0.d, z1.d
279; CHECK-NEXT: ret
280  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
281  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %pg,
282                                                              <vscale x 2 x double> %a_z,
283                                                              <vscale x 2 x double> %b)
284  ret <vscale x 2 x double> %out
285}
286
287;
288; FSUBR
289;
290
291define <vscale x 8 x half> @fsubr_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
292; CHECK-LABEL: fsubr_h_zero:
293; CHECK:      movprfx z0.h, p0/z, z0.h
294; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, z1.h
295; CHECK-NEXT: ret
296  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
297  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %pg,
298                                                             <vscale x 8 x half> %a_z,
299                                                             <vscale x 8 x half> %b)
300  ret <vscale x 8 x half> %out
301}
302
303define <vscale x 4 x float> @fsubr_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
304; CHECK-LABEL: fsubr_s_zero:
305; CHECK:      movprfx z0.s, p0/z, z0.s
306; CHECK-NEXT: fsubr z0.s, p0/m, z0.s, z1.s
307; CHECK-NEXT: ret
308  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
309  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %pg,
310                                                              <vscale x 4 x float> %a_z,
311                                                              <vscale x 4 x float> %b)
312  ret <vscale x 4 x float> %out
313}
314
315define <vscale x 2 x double> @fsubr_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
316; CHECK-LABEL: fsubr_d_zero:
317; CHECK:      movprfx z0.d, p0/z, z0.d
318; CHECK-NEXT: fsubr z0.d, p0/m, z0.d, z1.d
319; CHECK-NEXT: ret
320  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
321  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %pg,
322                                                               <vscale x 2 x double> %a_z,
323                                                               <vscale x 2 x double> %b)
324  ret <vscale x 2 x double> %out
325}
326
327declare <vscale x 8 x half> @llvm.aarch64.sve.fabd.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
328declare <vscale x 4 x float> @llvm.aarch64.sve.fabd.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
329declare <vscale x 2 x double> @llvm.aarch64.sve.fabd.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
330
331declare <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
332declare <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
333declare <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
334
335declare <vscale x 8 x half> @llvm.aarch64.sve.fdiv.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
336declare <vscale x 4 x float> @llvm.aarch64.sve.fdiv.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
337declare <vscale x 2 x double> @llvm.aarch64.sve.fdiv.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
338
339declare <vscale x 8 x half> @llvm.aarch64.sve.fdivr.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
340declare <vscale x 4 x float> @llvm.aarch64.sve.fdivr.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
341declare <vscale x 2 x double> @llvm.aarch64.sve.fdivr.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
342
343declare <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
344declare <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
345declare <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
346
347declare <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
348declare <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
349declare <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
350
351declare <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
352declare <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
353declare <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
354
355declare <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
356declare <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
357declare <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
358
359declare <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
360declare <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
361declare <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
362
363declare <vscale x 8 x half> @llvm.aarch64.sve.fmulx.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
364declare <vscale x 4 x float> @llvm.aarch64.sve.fmulx.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
365declare <vscale x 2 x double> @llvm.aarch64.sve.fmulx.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
366
367declare <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
368declare <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
369declare <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
370
371declare <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
372declare <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
373declare <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
374