1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -O3 -mtriple=powerpc64le-unknown-unknown -ppc-asm-full-reg-names \
3; RUN:   -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
4define dso_local <16 x i8 > @vectorsaddb(<16 x i8 > %a, <16 x i8 > %b) {
5; CHECK-LABEL: vectorsaddb:
6; CHECK:       # %bb.0: # %entry
7; CHECK-NEXT:    vaddsbs v2, v2, v3
8; CHECK-NEXT:    blr
9entry:
10  %call = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
11  ret <16 x i8> %call
12}
13
14define dso_local <16 x i8 > @vectorssubb(<16 x i8 > %a, <16 x i8 > %b) {
15; CHECK-LABEL: vectorssubb:
16; CHECK:       # %bb.0: # %entry
17; CHECK-NEXT:    vsubsbs v2, v2, v3
18; CHECK-NEXT:    blr
19entry:
20  %call = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
21  ret <16 x i8> %call
22}
23
24define dso_local <16 x i8 > @vectoruaddb(<16 x i8 > %a, <16 x i8 > %b) {
25; CHECK-LABEL: vectoruaddb:
26; CHECK:       # %bb.0: # %entry
27; CHECK-NEXT:    vaddubs v2, v2, v3
28; CHECK-NEXT:    blr
29entry:
30  %call = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
31  ret <16 x i8> %call
32}
33
34define dso_local <16 x i8 > @vectorusubb(<16 x i8 > %a, <16 x i8 > %b) {
35; CHECK-LABEL: vectorusubb:
36; CHECK:       # %bb.0: # %entry
37; CHECK-NEXT:    vsububs v2, v2, v3
38; CHECK-NEXT:    blr
39entry:
40  %call = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
41  ret <16 x i8> %call
42}
43
44define dso_local <8 x i16 > @vectorsaddh(<8 x i16 > %a, <8 x i16 > %b) {
45; CHECK-LABEL: vectorsaddh:
46; CHECK:       # %bb.0: # %entry
47; CHECK-NEXT:    vaddshs v2, v2, v3
48; CHECK-NEXT:    blr
49entry:
50  %call = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
51  ret <8 x i16> %call
52}
53
54define dso_local <8 x i16 > @vectorssubh(<8 x i16 > %a, <8 x i16 > %b) {
55; CHECK-LABEL: vectorssubh:
56; CHECK:       # %bb.0: # %entry
57; CHECK-NEXT:    vsubshs v2, v2, v3
58; CHECK-NEXT:    blr
59entry:
60  %call = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
61  ret <8 x i16> %call
62}
63
64define dso_local <8 x i16 > @vectoruaddh(<8 x i16 > %a, <8 x i16 > %b) {
65; CHECK-LABEL: vectoruaddh:
66; CHECK:       # %bb.0: # %entry
67; CHECK-NEXT:    vadduhs v2, v2, v3
68; CHECK-NEXT:    blr
69entry:
70  %call = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
71  ret <8 x i16> %call
72}
73
74define dso_local <8 x i16 > @vectorusubh(<8 x i16 > %a, <8 x i16 > %b) {
75; CHECK-LABEL: vectorusubh:
76; CHECK:       # %bb.0: # %entry
77; CHECK-NEXT:    vsubuhs v2, v2, v3
78; CHECK-NEXT:    blr
79entry:
80  %call = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
81  ret <8 x i16> %call
82}
83
84define dso_local <4 x i32 > @vectorsaddw(<4 x i32 > %a, <4 x i32 > %b) {
85; CHECK-LABEL: vectorsaddw:
86; CHECK:       # %bb.0: # %entry
87; CHECK-NEXT:    vaddsws v2, v2, v3
88; CHECK-NEXT:    blr
89entry:
90  %call = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %a, <4 x i32> %b)
91  ret <4 x i32> %call
92}
93
94define dso_local <4 x i32 > @vectorssubw(<4 x i32 > %a, <4 x i32 > %b) {
95; CHECK-LABEL: vectorssubw:
96; CHECK:       # %bb.0: # %entry
97; CHECK-NEXT:    vsubsws v2, v2, v3
98; CHECK-NEXT:    blr
99entry:
100  %call = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %a, <4 x i32> %b)
101  ret <4 x i32> %call
102}
103
104define dso_local <4 x i32 > @vectoruaddw(<4 x i32 > %a, <4 x i32 > %b) {
105; CHECK-LABEL: vectoruaddw:
106; CHECK:       # %bb.0: # %entry
107; CHECK-NEXT:    vadduws v2, v2, v3
108; CHECK-NEXT:    blr
109entry:
110  %call = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %a, <4 x i32> %b)
111  ret <4 x i32> %call
112}
113
114define dso_local <4 x i32 > @vectorusubw(<4 x i32 > %a, <4 x i32 > %b) {
115; CHECK-LABEL: vectorusubw:
116; CHECK:       # %bb.0: # %entry
117; CHECK-NEXT:    vsubuws v2, v2, v3
118; CHECK-NEXT:    blr
119entry:
120  %call = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %a, <4 x i32> %b)
121  ret <4 x i32> %call
122}
123
124declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
125declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>)
126declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
127declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
128declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
129declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
130declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
131declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
132declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
133declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
134declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
135declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>)
136