1; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X32
2
3@sc64 = external global i64
4
5define void @atomic_fetch_add64() nounwind {
6; X32:   atomic_fetch_add64
7entry:
8  %t1 = atomicrmw add  i64* @sc64, i64 1 acquire
9; X32:       addl
10; X32:       adcl
11; X32:       lock
12; X32:       cmpxchg8b
13  %t2 = atomicrmw add  i64* @sc64, i64 3 acquire
14; X32:       addl
15; X32:       adcl
16; X32:       lock
17; X32:       cmpxchg8b
18  %t3 = atomicrmw add  i64* @sc64, i64 5 acquire
19; X32:       addl
20; X32:       adcl
21; X32:       lock
22; X32:       cmpxchg8b
23  %t4 = atomicrmw add  i64* @sc64, i64 %t3 acquire
24; X32:       addl
25; X32:       adcl
26; X32:       lock
27; X32:       cmpxchg8b
28  ret void
29; X32:       ret
30}
31
32define void @atomic_fetch_sub64() nounwind {
33; X32:   atomic_fetch_sub64
34  %t1 = atomicrmw sub  i64* @sc64, i64 1 acquire
35; X32:       subl
36; X32:       sbbl
37; X32:       lock
38; X32:       cmpxchg8b
39  %t2 = atomicrmw sub  i64* @sc64, i64 3 acquire
40; X32:       subl
41; X32:       sbbl
42; X32:       lock
43; X32:       cmpxchg8b
44  %t3 = atomicrmw sub  i64* @sc64, i64 5 acquire
45; X32:       subl
46; X32:       sbbl
47; X32:       lock
48; X32:       cmpxchg8b
49  %t4 = atomicrmw sub  i64* @sc64, i64 %t3 acquire
50; X32:       subl
51; X32:       sbbl
52; X32:       lock
53; X32:       cmpxchg8b
54  ret void
55; X32:       ret
56}
57
58define void @atomic_fetch_and64() nounwind {
59; X32:   atomic_fetch_and64
60  %t1 = atomicrmw and  i64* @sc64, i64 3 acquire
61; X32:       andl
62; X32:       andl
63; X32:       lock
64; X32:       cmpxchg8b
65  %t2 = atomicrmw and  i64* @sc64, i64 5 acquire
66; X32:       andl
67; X32:       andl
68; X32:       lock
69; X32:       cmpxchg8b
70  %t3 = atomicrmw and  i64* @sc64, i64 %t2 acquire
71; X32:       andl
72; X32:       andl
73; X32:       lock
74; X32:       cmpxchg8b
75  ret void
76; X32:       ret
77}
78
79define void @atomic_fetch_or64() nounwind {
80; X32:   atomic_fetch_or64
81  %t1 = atomicrmw or   i64* @sc64, i64 3 acquire
82; X32:       orl
83; X32:       orl
84; X32:       lock
85; X32:       cmpxchg8b
86  %t2 = atomicrmw or   i64* @sc64, i64 5 acquire
87; X32:       orl
88; X32:       orl
89; X32:       lock
90; X32:       cmpxchg8b
91  %t3 = atomicrmw or   i64* @sc64, i64 %t2 acquire
92; X32:       orl
93; X32:       orl
94; X32:       lock
95; X32:       cmpxchg8b
96  ret void
97; X32:       ret
98}
99
100define void @atomic_fetch_xor64() nounwind {
101; X32:   atomic_fetch_xor64
102  %t1 = atomicrmw xor  i64* @sc64, i64 3 acquire
103; X32:       xorl
104; X32:       xorl
105; X32:       lock
106; X32:       cmpxchg8b
107  %t2 = atomicrmw xor  i64* @sc64, i64 5 acquire
108; X32:       xorl
109; X32:       xorl
110; X32:       lock
111; X32:       cmpxchg8b
112  %t3 = atomicrmw xor  i64* @sc64, i64 %t2 acquire
113; X32:       xorl
114; X32:       xorl
115; X32:       lock
116; X32:       cmpxchg8b
117  ret void
118; X32:       ret
119}
120
121define void @atomic_fetch_nand64(i64 %x) nounwind {
122; X32:   atomic_fetch_nand64
123  %t1 = atomicrmw nand i64* @sc64, i64 %x acquire
124; X32:       andl
125; X32:       andl
126; X32:       notl
127; X32:       notl
128; X32:       lock
129; X32:       cmpxchg8b
130  ret void
131; X32:       ret
132}
133
134define void @atomic_fetch_max64(i64 %x) nounwind {
135  %t1 = atomicrmw max  i64* @sc64, i64 %x acquire
136; X32:       cmpl
137; X32:       cmpl
138; X32:       cmov
139; X32:       cmov
140; X32:       cmov
141; X32:       lock
142; X32:       cmpxchg8b
143  ret void
144; X32:       ret
145}
146
147define void @atomic_fetch_min64(i64 %x) nounwind {
148  %t1 = atomicrmw min  i64* @sc64, i64 %x acquire
149; X32:       cmpl
150; X32:       cmpl
151; X32:       cmov
152; X32:       cmov
153; X32:       cmov
154; X32:       lock
155; X32:       cmpxchg8b
156  ret void
157; X32:       ret
158}
159
160define void @atomic_fetch_umax64(i64 %x) nounwind {
161  %t1 = atomicrmw umax i64* @sc64, i64 %x acquire
162; X32:       cmpl
163; X32:       cmpl
164; X32:       cmov
165; X32:       cmov
166; X32:       cmov
167; X32:       lock
168; X32:       cmpxchg8b
169  ret void
170; X32:       ret
171}
172
173define void @atomic_fetch_umin64(i64 %x) nounwind {
174  %t1 = atomicrmw umin i64* @sc64, i64 %x acquire
175; X32:       cmpl
176; X32:       cmpl
177; X32:       cmov
178; X32:       cmov
179; X32:       cmov
180; X32:       lock
181; X32:       cmpxchg8b
182  ret void
183; X32:       ret
184}
185
186define void @atomic_fetch_cmpxchg64() nounwind {
187  %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire
188; X32:       lock
189; X32:       cmpxchg8b
190  ret void
191; X32:       ret
192}
193
194define void @atomic_fetch_store64(i64 %x) nounwind {
195  store atomic i64 %x, i64* @sc64 release, align 8
196; X32:       lock
197; X32:       cmpxchg8b
198  ret void
199; X32:       ret
200}
201
202define void @atomic_fetch_swap64(i64 %x) nounwind {
203  %t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
204; X32:       lock
205; X32:       xchg8b
206  ret void
207; X32:       ret
208}
209