1; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X32
2
3@sc64 = external global i64
4
5define void @atomic_fetch_add64() nounwind {
6; X64-LABEL:   atomic_fetch_add64:
7; X32-LABEL:   atomic_fetch_add64:
8entry:
9  %t1 = atomicrmw add  i64* @sc64, i64 1 acquire
10; X32:       addl
11; X32:       adcl
12; X32:       lock
13; X32:       cmpxchg8b
14  %t2 = atomicrmw add  i64* @sc64, i64 3 acquire
15; X32:       addl
16; X32:       adcl
17; X32:       lock
18; X32:       cmpxchg8b
19  %t3 = atomicrmw add  i64* @sc64, i64 5 acquire
20; X32:       addl
21; X32:       adcl
22; X32:       lock
23; X32:       cmpxchg8b
24  %t4 = atomicrmw add  i64* @sc64, i64 %t3 acquire
25; X32:       addl
26; X32:       adcl
27; X32:       lock
28; X32:       cmpxchg8b
29  ret void
30; X32:       ret
31}
32
33define void @atomic_fetch_sub64() nounwind {
34; X64-LABEL:   atomic_fetch_sub64:
35; X32-LABEL:   atomic_fetch_sub64:
36  %t1 = atomicrmw sub  i64* @sc64, i64 1 acquire
37; X32:       addl $-1
38; X32:       adcl $-1
39; X32:       lock
40; X32:       cmpxchg8b
41  %t2 = atomicrmw sub  i64* @sc64, i64 3 acquire
42; X32:       addl $-3
43; X32:       adcl $-1
44; X32:       lock
45; X32:       cmpxchg8b
46  %t3 = atomicrmw sub  i64* @sc64, i64 5 acquire
47; X32:       addl $-5
48; X32:       adcl $-1
49; X32:       lock
50; X32:       cmpxchg8b
51  %t4 = atomicrmw sub  i64* @sc64, i64 %t3 acquire
52; X32:       subl
53; X32:       sbbl
54; X32:       lock
55; X32:       cmpxchg8b
56  ret void
57; X32:       ret
58}
59
60define void @atomic_fetch_and64() nounwind {
61; X64-LABEL:   atomic_fetch_and:64
62; X32-LABEL:   atomic_fetch_and64:
63  %t1 = atomicrmw and  i64* @sc64, i64 3 acquire
64; X32:       andl $3
65; X32-NOT:       andl
66; X32:       lock
67; X32:       cmpxchg8b
68  %t2 = atomicrmw and  i64* @sc64, i64 4294967297 acquire
69; X32:       andl $1
70; X32:       andl $1
71; X32:       lock
72; X32:       cmpxchg8b
73  %t3 = atomicrmw and  i64* @sc64, i64 %t2 acquire
74; X32:       andl
75; X32:       andl
76; X32:       lock
77; X32:       cmpxchg8b
78  ret void
79; X32:       ret
80}
81
82define void @atomic_fetch_or64() nounwind {
83; X64-LABEL:   atomic_fetch_or64:
84; X32-LABEL:   atomic_fetch_or64:
85  %t1 = atomicrmw or   i64* @sc64, i64 3 acquire
86; X32:       orl $3
87; X32-NOT:       orl
88; X32:       lock
89; X32:       cmpxchg8b
90  %t2 = atomicrmw or   i64* @sc64, i64 4294967297 acquire
91; X32:       orl $1
92; X32:       orl $1
93; X32:       lock
94; X32:       cmpxchg8b
95  %t3 = atomicrmw or   i64* @sc64, i64 %t2 acquire
96; X32:       orl
97; X32:       orl
98; X32:       lock
99; X32:       cmpxchg8b
100  ret void
101; X32:       ret
102}
103
104define void @atomic_fetch_xor64() nounwind {
105; X64-LABEL:   atomic_fetch_xor:64
106; X32-LABEL:   atomic_fetch_xor64:
107  %t1 = atomicrmw xor  i64* @sc64, i64 3 acquire
108; X32:       xorl
109; X32-NOT:       xorl
110; X32:       lock
111; X32:       cmpxchg8b
112  %t2 = atomicrmw xor  i64* @sc64, i64 4294967297 acquire
113; X32:       xorl $1
114; X32:       xorl $1
115; X32:       lock
116; X32:       cmpxchg8b
117  %t3 = atomicrmw xor  i64* @sc64, i64 %t2 acquire
118; X32:       xorl
119; X32:       xorl
120; X32:       lock
121; X32:       cmpxchg8b
122  ret void
123; X32:       ret
124}
125
126define void @atomic_fetch_nand64(i64 %x) nounwind {
127; X64-LABEL:   atomic_fetch_nand64:
128; X32-LABEL:   atomic_fetch_nand64:
129  %t1 = atomicrmw nand i64* @sc64, i64 %x acquire
130; X32:       andl
131; X32:       andl
132; X32:       notl
133; X32:       notl
134; X32:       lock
135; X32:       cmpxchg8b
136  ret void
137; X32:       ret
138}
139
140define void @atomic_fetch_max64(i64 %x) nounwind {
141; X64-LABEL:   atomic_fetch_max:64
142; X32-LABEL:   atomic_fetch_max64:
143  %t1 = atomicrmw max  i64* @sc64, i64 %x acquire
144; X32:       subl
145; X32:       subl
146; X32:       cmov
147; X32:       cmov
148; X32:       lock
149; X32:       cmpxchg8b
150  ret void
151; X32:       ret
152}
153
154define void @atomic_fetch_min64(i64 %x) nounwind {
155; X64-LABEL:   atomic_fetch_min64:
156; X32-LABEL:   atomic_fetch_min64:
157  %t1 = atomicrmw min  i64* @sc64, i64 %x acquire
158; X32:       subl
159; X32:       subl
160; X32:       cmov
161; X32:       cmov
162; X32:       lock
163; X32:       cmpxchg8b
164  ret void
165; X32:       ret
166}
167
168define void @atomic_fetch_umax64(i64 %x) nounwind {
169; X64-LABEL:   atomic_fetch_umax:64
170; X32-LABEL:   atomic_fetch_umax64:
171  %t1 = atomicrmw umax i64* @sc64, i64 %x acquire
172; X32:       subl
173; X32:       subl
174; X32:       cmov
175; X32:       cmov
176; X32:       lock
177; X32:       cmpxchg8b
178  ret void
179; X32:       ret
180}
181
182define void @atomic_fetch_umin64(i64 %x) nounwind {
183; X64-LABEL:   atomic_fetch_umin64:
184; X32-LABEL:   atomic_fetch_umin64:
185  %t1 = atomicrmw umin i64* @sc64, i64 %x acquire
186; X32:       subl
187; X32:       subl
188; X32:       cmov
189; X32:       cmov
190; X32:       lock
191; X32:       cmpxchg8b
192  ret void
193; X32:       ret
194}
195
196define void @atomic_fetch_cmpxchg64() nounwind {
197; X64-LABEL:   atomic_fetch_cmpxchg:64
198; X32-LABEL:   atomic_fetch_cmpxchg64:
199  %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
200; X32:       lock
201; X32:       cmpxchg8b
202  ret void
203; X32:       ret
204}
205
206define void @atomic_fetch_store64(i64 %x) nounwind {
207; X64-LABEL:   atomic_fetch_store64:
208; X32-LABEL:   atomic_fetch_store64:
209  store atomic i64 %x, i64* @sc64 release, align 8
210; X32:       lock
211; X32:       cmpxchg8b
212  ret void
213; X32:       ret
214}
215
216define void @atomic_fetch_swap64(i64 %x) nounwind {
217; X64-LABEL:   atomic_fetch_swap64:
218; X32-LABEL:   atomic_fetch_swap64:
219  %t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
220; X32:       lock
221; X32:       xchg8b
222  ret void
223; X32:       ret
224}
225