1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE42
5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
6; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
7; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512F
8; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX,AVX512,AVX512BW
9
10declare  i32 @llvm.uadd.sat.i32  (i32, i32)
11declare  i64 @llvm.uadd.sat.i64  (i64, i64)
12declare  <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
13
14; fold (uadd_sat x, undef) -> -1
15define i32 @combine_undef_i32(i32 %a0) {
16; CHECK-LABEL: combine_undef_i32:
17; CHECK:       # %bb.0:
18; CHECK-NEXT:    movl $-1, %eax
19; CHECK-NEXT:    retq
20  %res = call i32 @llvm.uadd.sat.i32(i32 %a0, i32 undef)
21  ret i32 %res
22}
23
24define <8 x i16> @combine_undef_v8i16(<8 x i16> %a0) {
25; SSE-LABEL: combine_undef_v8i16:
26; SSE:       # %bb.0:
27; SSE-NEXT:    pcmpeqd %xmm0, %xmm0
28; SSE-NEXT:    retq
29;
30; AVX-LABEL: combine_undef_v8i16:
31; AVX:       # %bb.0:
32; AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
33; AVX-NEXT:    retq
34  %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> %a0)
35  ret <8 x i16> %res
36}
37
38; fold (uadd_sat c1, c2) -> c3
39define i32 @combine_constfold_i32() {
40; CHECK-LABEL: combine_constfold_i32:
41; CHECK:       # %bb.0:
42; CHECK-NEXT:    movl $-1, %eax
43; CHECK-NEXT:    retq
44  %res = call i32 @llvm.uadd.sat.i32(i32 4294967295, i32 100)
45  ret i32 %res
46}
47
48define <8 x i16> @combine_constfold_v8i16() {
49; SSE-LABEL: combine_constfold_v8i16:
50; SSE:       # %bb.0:
51; SSE-NEXT:    movaps {{.*#+}} xmm0 = [1,65535,256,65535,65535,65535,2,65535]
52; SSE-NEXT:    retq
53;
54; AVX-LABEL: combine_constfold_v8i16:
55; AVX:       # %bb.0:
56; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [1,65535,256,65535,65535,65535,2,65535]
57; AVX-NEXT:    retq
58  %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
59  ret <8 x i16> %res
60}
61
62define <8 x i16> @combine_constfold_undef_v8i16() {
63; SSE-LABEL: combine_constfold_undef_v8i16:
64; SSE:       # %bb.0:
65; SSE-NEXT:    movaps {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,65535,2,65535]
66; SSE-NEXT:    retq
67;
68; AVX-LABEL: combine_constfold_undef_v8i16:
69; AVX:       # %bb.0:
70; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,65535,2,65535]
71; AVX-NEXT:    retq
72  %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 undef, i16 undef, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
73  ret <8 x i16> %res
74}
75
76; fold (uadd_sat c, x) -> (add_ssat x, c)
77define i32 @combine_constant_i32(i32 %a0) {
78; CHECK-LABEL: combine_constant_i32:
79; CHECK:       # %bb.0:
80; CHECK-NEXT:    incl %edi
81; CHECK-NEXT:    movl $-1, %eax
82; CHECK-NEXT:    cmovnel %edi, %eax
83; CHECK-NEXT:    retq
84  %1 = call i32 @llvm.uadd.sat.i32(i32 1, i32 %a0)
85  ret i32 %1
86}
87
88define <8 x i16> @combine_constant_v8i16(<8 x i16> %a0) {
89; SSE-LABEL: combine_constant_v8i16:
90; SSE:       # %bb.0:
91; SSE-NEXT:    paddusw {{.*}}(%rip), %xmm0
92; SSE-NEXT:    retq
93;
94; AVX-LABEL: combine_constant_v8i16:
95; AVX:       # %bb.0:
96; AVX-NEXT:    vpaddusw {{.*}}(%rip), %xmm0, %xmm0
97; AVX-NEXT:    retq
98  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0)
99  ret <8 x i16> %1
100}
101
102; fold (uadd_sat c, 0) -> x
103define i32 @combine_zero_i32(i32 %a0) {
104; CHECK-LABEL: combine_zero_i32:
105; CHECK:       # %bb.0:
106; CHECK-NEXT:    movl %edi, %eax
107; CHECK-NEXT:    retq
108  %1 = call i32 @llvm.uadd.sat.i32(i32 %a0, i32 0)
109  ret i32 %1
110}
111
112define <8 x i16> @combine_zero_v8i16(<8 x i16> %a0) {
113; CHECK-LABEL: combine_zero_v8i16:
114; CHECK:       # %bb.0:
115; CHECK-NEXT:    retq
116  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer)
117  ret <8 x i16> %1
118}
119
120; fold (uadd_sat x, y) -> (add x, y) iff no overflow
121define i32 @combine_no_overflow_i32(i32 %a0, i32 %a1) {
122; CHECK-LABEL: combine_no_overflow_i32:
123; CHECK:       # %bb.0:
124; CHECK-NEXT:    # kill: def $esi killed $esi def $rsi
125; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
126; CHECK-NEXT:    shrl $16, %edi
127; CHECK-NEXT:    shrl $16, %esi
128; CHECK-NEXT:    leal (%rsi,%rdi), %eax
129; CHECK-NEXT:    retq
130  %1 = lshr i32 %a0, 16
131  %2 = lshr i32 %a1, 16
132  %3 = call i32 @llvm.uadd.sat.i32(i32 %1, i32 %2)
133  ret i32 %3
134}
135
136define <8 x i16> @combine_no_overflow_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
137; SSE-LABEL: combine_no_overflow_v8i16:
138; SSE:       # %bb.0:
139; SSE-NEXT:    psrlw $10, %xmm0
140; SSE-NEXT:    psrlw $10, %xmm1
141; SSE-NEXT:    paddw %xmm1, %xmm0
142; SSE-NEXT:    retq
143;
144; AVX-LABEL: combine_no_overflow_v8i16:
145; AVX:       # %bb.0:
146; AVX-NEXT:    vpsrlw $10, %xmm0, %xmm0
147; AVX-NEXT:    vpsrlw $10, %xmm1, %xmm1
148; AVX-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
149; AVX-NEXT:    retq
150  %1 = lshr <8 x i16> %a0, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
151  %2 = lshr <8 x i16> %a1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
152  %3 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2)
153  ret <8 x i16> %3
154}
155