1; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
2
3declare <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8>, <8 x i8>)
4
5define <8 x i8> @test_addp_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
6; Using registers other than v0, v1 are possible, but would be odd.
7; CHECK: test_addp_v8i8:
8  %tmp1 = call <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
9; CHECK: addp v0.8b, v0.8b, v1.8b
10  ret <8 x i8> %tmp1
11}
12
13declare <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8>, <16 x i8>)
14
15define <16 x i8> @test_addp_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
16; CHECK: test_addp_v16i8:
17  %tmp1 = call <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
18; CHECK: addp v0.16b, v0.16b, v1.16b
19  ret <16 x i8> %tmp1
20}
21
22declare <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16>, <4 x i16>)
23
24define <4 x i16> @test_addp_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
25; CHECK: test_addp_v4i16:
26  %tmp1 = call <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
27; CHECK: addp v0.4h, v0.4h, v1.4h
28  ret <4 x i16> %tmp1
29}
30
31declare <8 x i16> @llvm.aarch64.neon.addp.v8i16(<8 x i16>, <8 x i16>)
32
33define <8 x i16> @test_addp_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
34; CHECK: test_addp_v8i16:
35  %tmp1 = call <8 x i16> @llvm.aarch64.neon.addp.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
36; CHECK: addp v0.8h, v0.8h, v1.8h
37  ret <8 x i16> %tmp1
38}
39
40declare <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32>, <2 x i32>)
41
42define <2 x i32> @test_addp_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
43; CHECK: test_addp_v2i32:
44  %tmp1 = call <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
45; CHECK: addp v0.2s, v0.2s, v1.2s
46  ret <2 x i32> %tmp1
47}
48
49declare <4 x i32> @llvm.aarch64.neon.addp.v4i32(<4 x i32>, <4 x i32>)
50
51define <4 x i32> @test_addp_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
52; CHECK: test_addp_v4i32:
53  %tmp1 = call <4 x i32> @llvm.aarch64.neon.addp.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
54; CHECK: addp v0.4s, v0.4s, v1.4s
55  ret <4 x i32> %tmp1
56}
57
58
59declare <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64>, <2 x i64>)
60
61define <2 x i64> @test_addp_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
62; CHECK: test_addp_v2i64:
63        %val = call <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
64; CHECK: addp v0.2d, v0.2d, v1.2d
65        ret <2 x i64> %val
66}
67
68declare <2 x float> @llvm.aarch64.neon.addp.v2f32(<2 x float>, <2 x float>)
69declare <4 x float> @llvm.aarch64.neon.addp.v4f32(<4 x float>, <4 x float>)
70declare <2 x double> @llvm.aarch64.neon.addp.v2f64(<2 x double>, <2 x double>)
71
72define <2 x float> @test_faddp_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
73; CHECK: test_faddp_v2f32:
74        %val = call <2 x float> @llvm.aarch64.neon.addp.v2f32(<2 x float> %lhs, <2 x float> %rhs)
75; CHECK: faddp v0.2s, v0.2s, v1.2s
76        ret <2 x float> %val
77}
78
79define <4 x float> @test_faddp_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
80; CHECK: test_faddp_v4f32:
81        %val = call <4 x float> @llvm.aarch64.neon.addp.v4f32(<4 x float> %lhs, <4 x float> %rhs)
82; CHECK: faddp v0.4s, v0.4s, v1.4s
83        ret <4 x float> %val
84}
85
86define <2 x double> @test_faddp_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
87; CHECK: test_faddp_v2f64:
88        %val = call <2 x double> @llvm.aarch64.neon.addp.v2f64(<2 x double> %lhs, <2 x double> %rhs)
89; CHECK: faddp v0.2d, v0.2d, v1.2d
90        ret <2 x double> %val
91}
92
93define i32 @test_vaddv.v2i32(<2 x i32> %a) {
94; CHECK-LABEL: test_vaddv.v2i32
95; CHECK: addp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
96  %1 = tail call i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32> %a)
97  ret i32 %1
98}
99
100declare i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32>)
101