1; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=generic | FileCheck %s
2
3; Function Attrs: nounwind readnone
4declare i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64>)
5declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
6declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
7declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>)
8
9define i8 @add_B(<16 x i8>* %arr)  {
10; CHECK-LABEL: add_B
11; CHECK: addv {{b[0-9]+}}, {{v[0-9]+}}.16b
12  %bin.rdx = load <16 x i8>, <16 x i8>* %arr
13  %r = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %bin.rdx)
14  ret i8 %r
15}
16
17define i16 @add_H(<8 x i16>* %arr)  {
18; CHECK-LABEL: add_H
19; CHECK: addv {{h[0-9]+}}, {{v[0-9]+}}.8h
20  %bin.rdx = load <8 x i16>, <8 x i16>* %arr
21  %r = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %bin.rdx)
22  ret i16 %r
23}
24
25define i32 @add_S( <4 x i32>* %arr)  {
26; CHECK-LABEL: add_S
27; CHECK: addv {{s[0-9]+}}, {{v[0-9]+}}.4s
28  %bin.rdx = load <4 x i32>, <4 x i32>* %arr
29  %r = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %bin.rdx)
30  ret i32 %r
31}
32
33define i64 @add_D(<2 x i64>* %arr)  {
34; CHECK-LABEL: add_D
35; CHECK-NOT: addv
36  %bin.rdx = load <2 x i64>, <2 x i64>* %arr
37  %r = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %bin.rdx)
38  ret i64 %r
39}
40
41declare i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32>)
42
43define i32 @oversized_ADDV_256(i8* noalias nocapture readonly %arg1, i8* noalias nocapture readonly %arg2) {
44; CHECK-LABEL: oversized_ADDV_256
45; CHECK: addv {{s[0-9]+}}, {{v[0-9]+}}.4s
46entry:
47  %0 = bitcast i8* %arg1 to <8 x i8>*
48  %1 = load <8 x i8>, <8 x i8>* %0, align 1
49  %2 = zext <8 x i8> %1 to <8 x i32>
50  %3 = bitcast i8* %arg2 to <8 x i8>*
51  %4 = load <8 x i8>, <8 x i8>* %3, align 1
52  %5 = zext <8 x i8> %4 to <8 x i32>
53  %6 = sub nsw <8 x i32> %2, %5
54  %7 = icmp slt <8 x i32> %6, zeroinitializer
55  %8 = sub nsw <8 x i32> zeroinitializer, %6
56  %9 = select <8 x i1> %7, <8 x i32> %8, <8 x i32> %6
57  %r = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %9)
58  ret i32 %r
59}
60
61declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32>)
62
63define i32 @oversized_ADDV_512(<16 x i32>* %arr)  {
64; CHECK-LABEL: oversized_ADDV_512
65; CHECK: addv {{s[0-9]+}}, {{v[0-9]+}}.4s
66  %bin.rdx = load <16 x i32>, <16 x i32>* %arr
67  %r = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %bin.rdx)
68  ret i32 %r
69}
70