1; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s -mcpu=cyclone | FileCheck %s
2
3define signext i8 @test_vaddv_s8(<8 x i8> %a1) {
4; CHECK-LABEL: test_vaddv_s8:
5; CHECK: addv.8b b[[REGNUM:[0-9]+]], v0
6; CHECK-NEXT: smov.b w0, v[[REGNUM]][0]
7; CHECK-NEXT: ret
8entry:
9  %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a1)
10  %0 = trunc i32 %vaddv.i to i8
11  ret i8 %0
12}
13
14define signext i16 @test_vaddv_s16(<4 x i16> %a1) {
15; CHECK-LABEL: test_vaddv_s16:
16; CHECK: addv.4h h[[REGNUM:[0-9]+]], v0
17; CHECK-NEXT: smov.h w0, v[[REGNUM]][0]
18; CHECK-NEXT: ret
19entry:
20  %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a1)
21  %0 = trunc i32 %vaddv.i to i16
22  ret i16 %0
23}
24
25define i32 @test_vaddv_s32(<2 x i32> %a1) {
26; CHECK-LABEL: test_vaddv_s32:
27; 2 x i32 is not supported by the ISA, thus, this is a special case
28; CHECK: addp.2s v[[REGNUM:[0-9]+]], v0, v0
29; CHECK-NEXT: fmov w0, s[[REGNUM]]
30; CHECK-NEXT: ret
31entry:
32  %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32> %a1)
33  ret i32 %vaddv.i
34}
35
36define i64 @test_vaddv_s64(<2 x i64> %a1) {
37; CHECK-LABEL: test_vaddv_s64:
38; CHECK: addp.2d [[REGNUM:d[0-9]+]], v0
39; CHECK-NEXT: fmov x0, [[REGNUM]]
40; CHECK-NEXT: ret
41entry:
42  %vaddv.i = tail call i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64> %a1)
43  ret i64 %vaddv.i
44}
45
46define zeroext i8 @test_vaddv_u8(<8 x i8> %a1) {
47; CHECK-LABEL: test_vaddv_u8:
48; CHECK: addv.8b b[[REGNUM:[0-9]+]], v0
49; CHECK-NEXT: fmov w0, s[[REGNUM]]
50; CHECK-NEXT: ret
51entry:
52  %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8> %a1)
53  %0 = trunc i32 %vaddv.i to i8
54  ret i8 %0
55}
56
57define i32 @test_vaddv_u8_masked(<8 x i8> %a1) {
58; CHECK-LABEL: test_vaddv_u8_masked:
59; CHECK: addv.8b b[[REGNUM:[0-9]+]], v0
60; CHECK-NEXT: fmov w0, s[[REGNUM]]
61; CHECK-NEXT: ret
62entry:
63  %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8> %a1)
64  %0 = and i32 %vaddv.i, 511 ; 0x1ff
65  ret i32 %0
66}
67
68define zeroext i16 @test_vaddv_u16(<4 x i16> %a1) {
69; CHECK-LABEL: test_vaddv_u16:
70; CHECK: addv.4h h[[REGNUM:[0-9]+]], v0
71; CHECK-NEXT: fmov w0, s[[REGNUM]]
72; CHECK-NEXT: ret
73entry:
74  %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> %a1)
75  %0 = trunc i32 %vaddv.i to i16
76  ret i16 %0
77}
78
79define i32 @test_vaddv_u16_masked(<4 x i16> %a1) {
80; CHECK-LABEL: test_vaddv_u16_masked:
81; CHECK: addv.4h h[[REGNUM:[0-9]+]], v0
82; CHECK-NEXT: fmov w0, s[[REGNUM]]
83; CHECK-NEXT: ret
84entry:
85  %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> %a1)
86  %0 = and i32 %vaddv.i, 3276799 ; 0x31ffff
87  ret i32 %0
88}
89
90define i32 @test_vaddv_u32(<2 x i32> %a1) {
91; CHECK-LABEL: test_vaddv_u32:
92; 2 x i32 is not supported by the ISA, thus, this is a special case
93; CHECK: addp.2s v[[REGNUM:[0-9]+]], v0, v0
94; CHECK-NEXT: fmov w0, s[[REGNUM]]
95; CHECK-NEXT: ret
96entry:
97  %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v2i32(<2 x i32> %a1)
98  ret i32 %vaddv.i
99}
100
101define float @test_vaddv_f32(<2 x float> %a1) {
102; CHECK-LABEL: test_vaddv_f32:
103; CHECK: faddp.2s s0, v0
104; CHECK-NEXT: ret
105entry:
106  %vaddv.i = tail call float @llvm.aarch64.neon.faddv.f32.v2f32(<2 x float> %a1)
107  ret float %vaddv.i
108}
109
110define float @test_vaddv_v4f32(<4 x float> %a1) {
111; CHECK-LABEL: test_vaddv_v4f32:
112; CHECK: faddp.4s [[REGNUM:v[0-9]+]], v0, v0
113; CHECK: faddp.2s s0, [[REGNUM]]
114; CHECK-NEXT: ret
115entry:
116  %vaddv.i = tail call float @llvm.aarch64.neon.faddv.f32.v4f32(<4 x float> %a1)
117  ret float %vaddv.i
118}
119
120define double @test_vaddv_f64(<2 x double> %a1) {
121; CHECK-LABEL: test_vaddv_f64:
122; CHECK: faddp.2d d0, v0
123; CHECK-NEXT: ret
124entry:
125  %vaddv.i = tail call double @llvm.aarch64.neon.faddv.f64.v2f64(<2 x double> %a1)
126  ret double %vaddv.i
127}
128
129define i64 @test_vaddv_u64(<2 x i64> %a1) {
130; CHECK-LABEL: test_vaddv_u64:
131; CHECK: addp.2d [[REGNUM:d[0-9]+]], v0
132; CHECK-NEXT: fmov x0, [[REGNUM]]
133; CHECK-NEXT: ret
134entry:
135  %vaddv.i = tail call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %a1)
136  ret i64 %vaddv.i
137}
138
139define <1 x i64> @test_vaddv_u64_to_vec(<2 x i64> %a1) {
140; CHECK-LABEL: test_vaddv_u64_to_vec:
141; CHECK: addp.2d d0, v0
142; CHECK-NOT: fmov
143; CHECK-NOT: ins
144; CHECK: ret
145entry:
146  %vaddv.i = tail call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %a1)
147  %vec = insertelement <1 x i64> undef, i64 %vaddv.i, i32 0
148  ret <1 x i64> %vec
149}
150
151define signext i8 @test_vaddvq_s8(<16 x i8> %a1) {
152; CHECK-LABEL: test_vaddvq_s8:
153; CHECK: addv.16b b[[REGNUM:[0-9]+]], v0
154; CHECK-NEXT: smov.b w0, v[[REGNUM]][0]
155; CHECK-NEXT: ret
156entry:
157  %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a1)
158  %0 = trunc i32 %vaddv.i to i8
159  ret i8 %0
160}
161
162define signext i16 @test_vaddvq_s16(<8 x i16> %a1) {
163; CHECK-LABEL: test_vaddvq_s16:
164; CHECK: addv.8h h[[REGNUM:[0-9]+]], v0
165; CHECK-NEXT: smov.h w0, v[[REGNUM]][0]
166; CHECK-NEXT: ret
167entry:
168  %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a1)
169  %0 = trunc i32 %vaddv.i to i16
170  ret i16 %0
171}
172
173define i32 @test_vaddvq_s32(<4 x i32> %a1) {
174; CHECK-LABEL: test_vaddvq_s32:
175; CHECK: addv.4s [[REGNUM:s[0-9]+]], v0
176; CHECK-NEXT: fmov w0, [[REGNUM]]
177; CHECK-NEXT: ret
178entry:
179  %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a1)
180  ret i32 %vaddv.i
181}
182
183define zeroext i8 @test_vaddvq_u8(<16 x i8> %a1) {
184; CHECK-LABEL: test_vaddvq_u8:
185; CHECK: addv.16b b[[REGNUM:[0-9]+]], v0
186; CHECK-NEXT: fmov w0, s[[REGNUM]]
187; CHECK-NEXT: ret
188entry:
189  %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8> %a1)
190  %0 = trunc i32 %vaddv.i to i8
191  ret i8 %0
192}
193
194define zeroext i16 @test_vaddvq_u16(<8 x i16> %a1) {
195; CHECK-LABEL: test_vaddvq_u16:
196; CHECK: addv.8h h[[REGNUM:[0-9]+]], v0
197; CHECK-NEXT: fmov w0, s[[REGNUM]]
198; CHECK-NEXT: ret
199entry:
200  %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> %a1)
201  %0 = trunc i32 %vaddv.i to i16
202  ret i16 %0
203}
204
205define i32 @test_vaddvq_u32(<4 x i32> %a1) {
206; CHECK-LABEL: test_vaddvq_u32:
207; CHECK: addv.4s [[REGNUM:s[0-9]+]], v0
208; CHECK-NEXT: fmov [[FMOVRES:w[0-9]+]], [[REGNUM]]
209; CHECK-NEXT: ret
210entry:
211  %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> %a1)
212  ret i32 %vaddv.i
213}
214
215declare i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32>)
216
217declare i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16>)
218
219declare i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8>)
220
221declare i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32>)
222
223declare i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16>)
224
225declare i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8>)
226
227declare i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64>)
228
229declare i32 @llvm.aarch64.neon.uaddv.i32.v2i32(<2 x i32>)
230
231declare i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16>)
232
233declare i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8>)
234
235declare i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32>)
236
237declare i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64>)
238
239declare i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16>)
240
241declare i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8>)
242
243declare float @llvm.aarch64.neon.faddv.f32.v2f32(<2 x float> %a1)
244declare float @llvm.aarch64.neon.faddv.f32.v4f32(<4 x float> %a1)
245declare double @llvm.aarch64.neon.faddv.f64.v2f64(<2 x double> %a1)
246