1; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X64
2
3@sc64 = external global i64
4
5define void @atomic_fetch_add64() nounwind {
6; X64:   atomic_fetch_add64
7entry:
8  %t1 = atomicrmw add  i64* @sc64, i64 1 acquire
9; X64:       lock
10; X64:       incq
11  %t2 = atomicrmw add  i64* @sc64, i64 3 acquire
12; X64:       lock
13; X64:       addq $3
14  %t3 = atomicrmw add  i64* @sc64, i64 5 acquire
15; X64:       lock
16; X64:       xaddq
17  %t4 = atomicrmw add  i64* @sc64, i64 %t3 acquire
18; X64:       lock
19; X64:       addq
20  ret void
21; X64:       ret
22}
23
24define void @atomic_fetch_sub64() nounwind {
25; X64:   atomic_fetch_sub64
26  %t1 = atomicrmw sub  i64* @sc64, i64 1 acquire
27; X64:       lock
28; X64:       decq
29  %t2 = atomicrmw sub  i64* @sc64, i64 3 acquire
30; X64:       lock
31; X64:       subq $3
32  %t3 = atomicrmw sub  i64* @sc64, i64 5 acquire
33; X64:       lock
34; X64:       xaddq
35  %t4 = atomicrmw sub  i64* @sc64, i64 %t3 acquire
36; X64:       lock
37; X64:       subq
38  ret void
39; X64:       ret
40}
41
42define void @atomic_fetch_and64() nounwind {
43; X64:   atomic_fetch_and64
44  %t1 = atomicrmw and  i64* @sc64, i64 3 acquire
45; X64:       lock
46; X64:       andq $3
47  %t2 = atomicrmw and  i64* @sc64, i64 5 acquire
48; X64:       andq
49; X64:       lock
50; X64:       cmpxchgq
51  %t3 = atomicrmw and  i64* @sc64, i64 %t2 acquire
52; X64:       lock
53; X64:       andq
54  ret void
55; X64:       ret
56}
57
58define void @atomic_fetch_or64() nounwind {
59; X64:   atomic_fetch_or64
60  %t1 = atomicrmw or   i64* @sc64, i64 3 acquire
61; X64:       lock
62; X64:       orq $3
63  %t2 = atomicrmw or   i64* @sc64, i64 5 acquire
64; X64:       orq
65; X64:       lock
66; X64:       cmpxchgq
67  %t3 = atomicrmw or   i64* @sc64, i64 %t2 acquire
68; X64:       lock
69; X64:       orq
70  ret void
71; X64:       ret
72}
73
74define void @atomic_fetch_xor64() nounwind {
75; X64:   atomic_fetch_xor64
76  %t1 = atomicrmw xor  i64* @sc64, i64 3 acquire
77; X64:       lock
78; X64:       xorq $3
79  %t2 = atomicrmw xor  i64* @sc64, i64 5 acquire
80; X64:       xorq
81; X64:       lock
82; X64:       cmpxchgq
83  %t3 = atomicrmw xor  i64* @sc64, i64 %t2 acquire
84; X64:       lock
85; X64:       xorq
86  ret void
87; X64:       ret
88}
89
90define void @atomic_fetch_nand64(i64 %x) nounwind {
91; X64:   atomic_fetch_nand64
92; X32:   atomic_fetch_nand64
93  %t1 = atomicrmw nand i64* @sc64, i64 %x acquire
94; X64:       andq
95; X64:       notq
96; X64:       lock
97; X64:       cmpxchgq
98; X32:       andl
99; X32:       andl
100; X32:       notl
101; X32:       notl
102; X32:       lock
103; X32:       cmpxchg8b
104  ret void
105; X64:       ret
106; X32:       ret
107}
108
109define void @atomic_fetch_max64(i64 %x) nounwind {
110  %t1 = atomicrmw max  i64* @sc64, i64 %x acquire
111; X64:       cmpq
112; X64:       cmov
113; X64:       lock
114; X64:       cmpxchgq
115
116; X32:       cmpl
117; X32:       cmpl
118; X32:       cmov
119; X32:       cmov
120; X32:       cmov
121; X32:       lock
122; X32:       cmpxchg8b
123  ret void
124; X64:       ret
125; X32:       ret
126}
127
128define void @atomic_fetch_min64(i64 %x) nounwind {
129  %t1 = atomicrmw min  i64* @sc64, i64 %x acquire
130; X64:       cmpq
131; X64:       cmov
132; X64:       lock
133; X64:       cmpxchgq
134
135; X32:       cmpl
136; X32:       cmpl
137; X32:       cmov
138; X32:       cmov
139; X32:       cmov
140; X32:       lock
141; X32:       cmpxchg8b
142  ret void
143; X64:       ret
144; X32:       ret
145}
146
147define void @atomic_fetch_umax64(i64 %x) nounwind {
148  %t1 = atomicrmw umax i64* @sc64, i64 %x acquire
149; X64:       cmpq
150; X64:       cmov
151; X64:       lock
152; X64:       cmpxchgq
153
154; X32:       cmpl
155; X32:       cmpl
156; X32:       cmov
157; X32:       cmov
158; X32:       cmov
159; X32:       lock
160; X32:       cmpxchg8b
161  ret void
162; X64:       ret
163; X32:       ret
164}
165
166define void @atomic_fetch_umin64(i64 %x) nounwind {
167  %t1 = atomicrmw umin i64* @sc64, i64 %x acquire
168; X64:       cmpq
169; X64:       cmov
170; X64:       lock
171; X64:       cmpxchgq
172
173; X32:       cmpl
174; X32:       cmpl
175; X32:       cmov
176; X32:       cmov
177; X32:       cmov
178; X32:       lock
179; X32:       cmpxchg8b
180  ret void
181; X64:       ret
182; X32:       ret
183}
184
185define void @atomic_fetch_cmpxchg64() nounwind {
186  %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire
187; X64:       lock
188; X64:       cmpxchgq
189; X32:       lock
190; X32:       cmpxchg8b
191  ret void
192; X64:       ret
193; X32:       ret
194}
195
196define void @atomic_fetch_store64(i64 %x) nounwind {
197  store atomic i64 %x, i64* @sc64 release, align 8
198; X64-NOT:   lock
199; X64:       movq
200; X32:       lock
201; X32:       cmpxchg8b
202  ret void
203; X64:       ret
204; X32:       ret
205}
206
207define void @atomic_fetch_swap64(i64 %x) nounwind {
208  %t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
209; X64-NOT:   lock
210; X64:       xchgq
211; X32:       lock
212; X32:       xchg8b
213  ret void
214; X64:       ret
215; X32:       ret
216}
217