1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s --check-prefix=CHECK --check-prefix=FASTINCDEC
3; RUN: llc < %s -mtriple=x86_64-- -mattr=slow-incdec | FileCheck %s --check-prefix=CHECK --check-prefix=SLOWINCDEC
4
5define i32 @test_add_1_cmov_slt(i64* %p, i32 %a0, i32 %a1) #0 {
6; FASTINCDEC-LABEL: test_add_1_cmov_slt:
7; FASTINCDEC:       # %bb.0: # %entry
8; FASTINCDEC-NEXT:    movl %esi, %eax
9; FASTINCDEC-NEXT:    lock incq (%rdi)
10; FASTINCDEC-NEXT:    cmovgl %edx, %eax
11; FASTINCDEC-NEXT:    retq
12;
13; SLOWINCDEC-LABEL: test_add_1_cmov_slt:
14; SLOWINCDEC:       # %bb.0: # %entry
15; SLOWINCDEC-NEXT:    movl %esi, %eax
16; SLOWINCDEC-NEXT:    lock addq $1, (%rdi)
17; SLOWINCDEC-NEXT:    cmovgl %edx, %eax
18; SLOWINCDEC-NEXT:    retq
19entry:
20  %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
21  %tmp1 = icmp slt i64 %tmp0, 0
22  %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1
23  ret i32 %tmp2
24}
25
26define i32 @test_add_1_cmov_sge(i64* %p, i32 %a0, i32 %a1) #0 {
27; FASTINCDEC-LABEL: test_add_1_cmov_sge:
28; FASTINCDEC:       # %bb.0: # %entry
29; FASTINCDEC-NEXT:    movl %esi, %eax
30; FASTINCDEC-NEXT:    lock incq (%rdi)
31; FASTINCDEC-NEXT:    cmovlel %edx, %eax
32; FASTINCDEC-NEXT:    retq
33;
34; SLOWINCDEC-LABEL: test_add_1_cmov_sge:
35; SLOWINCDEC:       # %bb.0: # %entry
36; SLOWINCDEC-NEXT:    movl %esi, %eax
37; SLOWINCDEC-NEXT:    lock addq $1, (%rdi)
38; SLOWINCDEC-NEXT:    cmovlel %edx, %eax
39; SLOWINCDEC-NEXT:    retq
40entry:
41  %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
42  %tmp1 = icmp sge i64 %tmp0, 0
43  %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1
44  ret i32 %tmp2
45}
46
47define i32 @test_sub_1_cmov_sle(i64* %p, i32 %a0, i32 %a1) #0 {
48; FASTINCDEC-LABEL: test_sub_1_cmov_sle:
49; FASTINCDEC:       # %bb.0: # %entry
50; FASTINCDEC-NEXT:    movl %esi, %eax
51; FASTINCDEC-NEXT:    lock decq (%rdi)
52; FASTINCDEC-NEXT:    cmovgel %edx, %eax
53; FASTINCDEC-NEXT:    retq
54;
55; SLOWINCDEC-LABEL: test_sub_1_cmov_sle:
56; SLOWINCDEC:       # %bb.0: # %entry
57; SLOWINCDEC-NEXT:    movl %esi, %eax
58; SLOWINCDEC-NEXT:    lock subq $1, (%rdi)
59; SLOWINCDEC-NEXT:    cmovgel %edx, %eax
60; SLOWINCDEC-NEXT:    retq
61entry:
62  %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
63  %tmp1 = icmp sle i64 %tmp0, 0
64  %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1
65  ret i32 %tmp2
66}
67
68define i32 @test_sub_1_cmov_sgt(i64* %p, i32 %a0, i32 %a1) #0 {
69; FASTINCDEC-LABEL: test_sub_1_cmov_sgt:
70; FASTINCDEC:       # %bb.0: # %entry
71; FASTINCDEC-NEXT:    movl %esi, %eax
72; FASTINCDEC-NEXT:    lock decq (%rdi)
73; FASTINCDEC-NEXT:    cmovll %edx, %eax
74; FASTINCDEC-NEXT:    retq
75;
76; SLOWINCDEC-LABEL: test_sub_1_cmov_sgt:
77; SLOWINCDEC:       # %bb.0: # %entry
78; SLOWINCDEC-NEXT:    movl %esi, %eax
79; SLOWINCDEC-NEXT:    lock addq $-1, (%rdi)
80; SLOWINCDEC-NEXT:    cmovll %edx, %eax
81; SLOWINCDEC-NEXT:    retq
82entry:
83  %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
84  %tmp1 = icmp sgt i64 %tmp0, 0
85  %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1
86  ret i32 %tmp2
87}
88
89; FIXME: (setcc slt x, 0) gets combined into shr early.
90define i8 @test_add_1_setcc_slt(i64* %p) #0 {
91; CHECK-LABEL: test_add_1_setcc_slt:
92; CHECK:       # %bb.0: # %entry
93; CHECK-NEXT:    movl $1, %eax
94; CHECK-NEXT:    lock xaddq %rax, (%rdi)
95; CHECK-NEXT:    shrq $63, %rax
96; CHECK-NEXT:    # kill: def $al killed $al killed $rax
97; CHECK-NEXT:    retq
98entry:
99  %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
100  %tmp1 = icmp slt i64 %tmp0, 0
101  %tmp2 = zext i1 %tmp1 to i8
102  ret i8 %tmp2
103}
104
105define i8 @test_sub_1_setcc_sgt(i64* %p) #0 {
106; FASTINCDEC-LABEL: test_sub_1_setcc_sgt:
107; FASTINCDEC:       # %bb.0: # %entry
108; FASTINCDEC-NEXT:    lock decq (%rdi)
109; FASTINCDEC-NEXT:    setge %al
110; FASTINCDEC-NEXT:    retq
111;
112; SLOWINCDEC-LABEL: test_sub_1_setcc_sgt:
113; SLOWINCDEC:       # %bb.0: # %entry
114; SLOWINCDEC-NEXT:    lock addq $-1, (%rdi)
115; SLOWINCDEC-NEXT:    setge %al
116; SLOWINCDEC-NEXT:    retq
117entry:
118  %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
119  %tmp1 = icmp sgt i64 %tmp0, 0
120  %tmp2 = zext i1 %tmp1 to i8
121  ret i8 %tmp2
122}
123
124define i32 @test_add_1_brcond_sge(i64* %p, i32 %a0, i32 %a1) #0 {
125; FASTINCDEC-LABEL: test_add_1_brcond_sge:
126; FASTINCDEC:       # %bb.0: # %entry
127; FASTINCDEC-NEXT:    lock incq (%rdi)
128; FASTINCDEC-NEXT:    jle .LBB6_2
129; FASTINCDEC-NEXT:  # %bb.1: # %t
130; FASTINCDEC-NEXT:    movl %esi, %eax
131; FASTINCDEC-NEXT:    retq
132; FASTINCDEC-NEXT:  .LBB6_2: # %f
133; FASTINCDEC-NEXT:    movl %edx, %eax
134; FASTINCDEC-NEXT:    retq
135;
136; SLOWINCDEC-LABEL: test_add_1_brcond_sge:
137; SLOWINCDEC:       # %bb.0: # %entry
138; SLOWINCDEC-NEXT:    lock addq $1, (%rdi)
139; SLOWINCDEC-NEXT:    jle .LBB6_2
140; SLOWINCDEC-NEXT:  # %bb.1: # %t
141; SLOWINCDEC-NEXT:    movl %esi, %eax
142; SLOWINCDEC-NEXT:    retq
143; SLOWINCDEC-NEXT:  .LBB6_2: # %f
144; SLOWINCDEC-NEXT:    movl %edx, %eax
145; SLOWINCDEC-NEXT:    retq
146entry:
147  %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
148  %tmp1 = icmp sge i64 %tmp0, 0
149  br i1 %tmp1, label %t, label %f
150t:
151  ret i32 %a0
152f:
153  ret i32 %a1
154}
155
156; Also make sure we don't muck with condition codes that we should ignore.
157; No need to test unsigned comparisons, as they should all be simplified.
158
159define i32 @test_add_1_cmov_sle(i64* %p, i32 %a0, i32 %a1) #0 {
160; CHECK-LABEL: test_add_1_cmov_sle:
161; CHECK:       # %bb.0: # %entry
162; CHECK-NEXT:    movl %esi, %eax
163; CHECK-NEXT:    movl $1, %ecx
164; CHECK-NEXT:    lock xaddq %rcx, (%rdi)
165; CHECK-NEXT:    testq %rcx, %rcx
166; CHECK-NEXT:    cmovgl %edx, %eax
167; CHECK-NEXT:    retq
168entry:
169  %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
170  %tmp1 = icmp sle i64 %tmp0, 0
171  %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1
172  ret i32 %tmp2
173}
174
175define i32 @test_add_1_cmov_sgt(i64* %p, i32 %a0, i32 %a1) #0 {
176; CHECK-LABEL: test_add_1_cmov_sgt:
177; CHECK:       # %bb.0: # %entry
178; CHECK-NEXT:    movl %esi, %eax
179; CHECK-NEXT:    movl $1, %ecx
180; CHECK-NEXT:    lock xaddq %rcx, (%rdi)
181; CHECK-NEXT:    testq %rcx, %rcx
182; CHECK-NEXT:    cmovlel %edx, %eax
183; CHECK-NEXT:    retq
184entry:
185  %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
186  %tmp1 = icmp sgt i64 %tmp0, 0
187  %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1
188  ret i32 %tmp2
189}
190
191; Test a result being used by more than just the comparison.
192
193define i8 @test_add_1_setcc_sgt_reuse(i64* %p, i64* %p2) #0 {
194; CHECK-LABEL: test_add_1_setcc_sgt_reuse:
195; CHECK:       # %bb.0: # %entry
196; CHECK-NEXT:    movl $1, %ecx
197; CHECK-NEXT:    lock xaddq %rcx, (%rdi)
198; CHECK-NEXT:    testq %rcx, %rcx
199; CHECK-NEXT:    setg %al
200; CHECK-NEXT:    movq %rcx, (%rsi)
201; CHECK-NEXT:    retq
202entry:
203  %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
204  %tmp1 = icmp sgt i64 %tmp0, 0
205  %tmp2 = zext i1 %tmp1 to i8
206  store i64 %tmp0, i64* %p2
207  ret i8 %tmp2
208}
209
210define i8 @test_sub_2_setcc_sgt(i64* %p) #0 {
211; CHECK-LABEL: test_sub_2_setcc_sgt:
212; CHECK:       # %bb.0: # %entry
213; CHECK-NEXT:    movq $-2, %rax
214; CHECK-NEXT:    lock xaddq %rax, (%rdi)
215; CHECK-NEXT:    testq %rax, %rax
216; CHECK-NEXT:    setg %al
217; CHECK-NEXT:    retq
218entry:
219  %tmp0 = atomicrmw sub i64* %p, i64 2 seq_cst
220  %tmp1 = icmp sgt i64 %tmp0, 0
221  %tmp2 = zext i1 %tmp1 to i8
222  ret i8 %tmp2
223}
224
225; TODO: It's possible to use "lock inc" here, but both cmovs need to be updated.
226define i8 @test_add_1_cmov_cmov(i64* %p, i8* %q) #0 {
227; CHECK-LABEL: test_add_1_cmov_cmov:
228; CHECK:       # %bb.0: # %entry
229; CHECK-NEXT:    movl $1, %eax
230; CHECK-NEXT:    lock xaddq %rax, (%rdi)
231; CHECK-NEXT:    testq %rax, %rax
232; CHECK-NEXT:    movl $12, %eax
233; CHECK-NEXT:    movl $34, %ecx
234; CHECK-NEXT:    cmovsl %eax, %ecx
235; CHECK-NEXT:    movb %cl, (%rsi)
236; CHECK-NEXT:    movl $56, %ecx
237; CHECK-NEXT:    movl $78, %eax
238; CHECK-NEXT:    cmovsl %ecx, %eax
239; CHECK-NEXT:    # kill: def $al killed $al killed $eax
240; CHECK-NEXT:    retq
241entry:
242  %add = atomicrmw add i64* %p, i64 1 seq_cst
243  %cmp = icmp slt i64 %add, 0
244  %s1 = select i1 %cmp, i8 12, i8 34
245  store i8 %s1, i8* %q
246  %s2 = select i1 %cmp, i8 56, i8 78
247  ret i8 %s2
248}
249
250define i8 @test_sub_1_cmp_1_setcc_eq(i64* %p) #0 {
251; FASTINCDEC-LABEL: test_sub_1_cmp_1_setcc_eq:
252; FASTINCDEC:       # %bb.0: # %entry
253; FASTINCDEC-NEXT:    lock decq (%rdi)
254; FASTINCDEC-NEXT:    sete %al
255; FASTINCDEC-NEXT:    retq
256;
257; SLOWINCDEC-LABEL: test_sub_1_cmp_1_setcc_eq:
258; SLOWINCDEC:       # %bb.0: # %entry
259; SLOWINCDEC-NEXT:    lock subq $1, (%rdi)
260; SLOWINCDEC-NEXT:    sete %al
261; SLOWINCDEC-NEXT:    retq
262entry:
263  %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
264  %tmp1 = icmp eq i64 %tmp0, 1
265  %tmp2 = zext i1 %tmp1 to i8
266  ret i8 %tmp2
267}
268
269define i8 @test_sub_1_cmp_1_setcc_ne(i64* %p) #0 {
270; FASTINCDEC-LABEL: test_sub_1_cmp_1_setcc_ne:
271; FASTINCDEC:       # %bb.0: # %entry
272; FASTINCDEC-NEXT:    lock decq (%rdi)
273; FASTINCDEC-NEXT:    setne %al
274; FASTINCDEC-NEXT:    retq
275;
276; SLOWINCDEC-LABEL: test_sub_1_cmp_1_setcc_ne:
277; SLOWINCDEC:       # %bb.0: # %entry
278; SLOWINCDEC-NEXT:    lock subq $1, (%rdi)
279; SLOWINCDEC-NEXT:    setne %al
280; SLOWINCDEC-NEXT:    retq
281entry:
282  %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
283  %tmp1 = icmp ne i64 %tmp0, 1
284  %tmp2 = zext i1 %tmp1 to i8
285  ret i8 %tmp2
286}
287
288define i8 @test_sub_1_cmp_1_setcc_ugt(i64* %p) #0 {
289; CHECK-LABEL: test_sub_1_cmp_1_setcc_ugt:
290; CHECK:       # %bb.0: # %entry
291; CHECK-NEXT:    lock subq $1, (%rdi)
292; CHECK-NEXT:    seta %al
293; CHECK-NEXT:    retq
294entry:
295  %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
296  %tmp1 = icmp ugt i64 %tmp0, 1
297  %tmp2 = zext i1 %tmp1 to i8
298  ret i8 %tmp2
299}
300
301define i8 @test_sub_1_cmp_1_setcc_sle(i64* %p) #0 {
302; FASTINCDEC-LABEL: test_sub_1_cmp_1_setcc_sle:
303; FASTINCDEC:       # %bb.0: # %entry
304; FASTINCDEC-NEXT:    lock decq (%rdi)
305; FASTINCDEC-NEXT:    setle %al
306; FASTINCDEC-NEXT:    retq
307;
308; SLOWINCDEC-LABEL: test_sub_1_cmp_1_setcc_sle:
309; SLOWINCDEC:       # %bb.0: # %entry
310; SLOWINCDEC-NEXT:    lock subq $1, (%rdi)
311; SLOWINCDEC-NEXT:    setle %al
312; SLOWINCDEC-NEXT:    retq
313entry:
314  %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
315  %tmp1 = icmp sle i64 %tmp0, 1
316  %tmp2 = zext i1 %tmp1 to i8
317  ret i8 %tmp2
318}
319
320define i8 @test_sub_3_cmp_3_setcc_eq(i64* %p) #0 {
321; CHECK-LABEL: test_sub_3_cmp_3_setcc_eq:
322; CHECK:       # %bb.0: # %entry
323; CHECK-NEXT:    lock subq $3, (%rdi)
324; CHECK-NEXT:    sete %al
325; CHECK-NEXT:    retq
326entry:
327  %tmp0 = atomicrmw sub i64* %p, i64 3 seq_cst
328  %tmp1 = icmp eq i64 %tmp0, 3
329  %tmp2 = zext i1 %tmp1 to i8
330  ret i8 %tmp2
331}
332
333define i8 @test_sub_3_cmp_3_setcc_uge(i64* %p) #0 {
334; CHECK-LABEL: test_sub_3_cmp_3_setcc_uge:
335; CHECK:       # %bb.0: # %entry
336; CHECK-NEXT:    lock subq $3, (%rdi)
337; CHECK-NEXT:    setae %al
338; CHECK-NEXT:    retq
339entry:
340  %tmp0 = atomicrmw sub i64* %p, i64 3 seq_cst
341  %tmp1 = icmp uge i64 %tmp0, 3
342  %tmp2 = zext i1 %tmp1 to i8
343  ret i8 %tmp2
344}
345
346attributes #0 = { nounwind }
347