1; RUN: opt -instcombine -S -o - %s | FileCheck %s
2; Check that we can replace `atomicrmw <op> LHS, 0` with `load atomic LHS`.
3; This is possible when:
4; - <op> LHS, 0 == LHS
5; - the ordering of atomicrmw is compatible with a load (i.e., no release semantic)
6
7; CHECK-LABEL: atomic_add_zero
8; CHECK-NEXT: %res = load atomic i32, i32* %addr monotonic, align 4
9; CHECK-NEXT: ret i32 %res
10define i32 @atomic_add_zero(i32* %addr) {
11  %res = atomicrmw add i32* %addr, i32 0 monotonic
12  ret i32 %res
13}
14
15; CHECK-LABEL: atomic_or_zero
16; CHECK-NEXT: %res = load atomic i32, i32* %addr monotonic, align 4
17; CHECK-NEXT: ret i32 %res
18define i32 @atomic_or_zero(i32* %addr) {
19  %res = atomicrmw add i32* %addr, i32 0 monotonic
20  ret i32 %res
21}
22
23; CHECK-LABEL: atomic_sub_zero
24; CHECK-NEXT: %res = load atomic i32, i32* %addr monotonic, align 4
25; CHECK-NEXT: ret i32 %res
26define i32 @atomic_sub_zero(i32* %addr) {
27  %res = atomicrmw sub i32* %addr, i32 0 monotonic
28  ret i32 %res
29}
30
31; CHECK-LABEL: atomic_and_allones
32; CHECK-NEXT: %res = load atomic i32, i32* %addr monotonic, align 4
33; CHECK-NEXT: ret i32 %res
34define i32 @atomic_and_allones(i32* %addr) {
35  %res = atomicrmw and i32* %addr, i32 -1 monotonic
36  ret i32 %res
37}
38; CHECK-LABEL: atomic_umin_uint_max
39; CHECK-NEXT: %res = load atomic i32, i32* %addr monotonic, align 4
40; CHECK-NEXT: ret i32 %res
41define i32 @atomic_umin_uint_max(i32* %addr) {
42  %res = atomicrmw umin i32* %addr, i32 -1 monotonic
43  ret i32 %res
44}
45
46; CHECK-LABEL: atomic_umax_zero
47; CHECK-NEXT: %res = load atomic i32, i32* %addr monotonic, align 4
48; CHECK-NEXT: ret i32 %res
49define i32 @atomic_umax_zero(i32* %addr) {
50  %res = atomicrmw umax i32* %addr, i32 0 monotonic
51  ret i32 %res
52}
53
54; CHECK-LABEL: atomic_min_smax_char
55; CHECK-NEXT: %res = load atomic i8, i8* %addr monotonic, align 1
56; CHECK-NEXT: ret i8 %res
57define i8 @atomic_min_smax_char(i8* %addr) {
58  %res = atomicrmw min i8* %addr, i8 127 monotonic
59  ret i8 %res
60}
61
62; CHECK-LABEL: atomic_max_smin_char
63; CHECK-NEXT: %res = load atomic i8, i8* %addr monotonic, align 1
64; CHECK-NEXT: ret i8 %res
65define i8 @atomic_max_smin_char(i8* %addr) {
66  %res = atomicrmw max i8* %addr, i8 -128 monotonic
67  ret i8 %res
68}
69
70; CHECK-LABEL: atomic_fsub
71; CHECK-NEXT: %res = load atomic float, float* %addr monotonic, align 4
72; CHECK-NEXT: ret float %res
73define float @atomic_fsub_zero(float* %addr) {
74  %res = atomicrmw fsub float* %addr, float 0.0 monotonic
75  ret float %res
76}
77
78; CHECK-LABEL: atomic_fadd
79; CHECK-NEXT: %res = load atomic float, float* %addr monotonic, align 4
80; CHECK-NEXT: ret float %res
81define float @atomic_fadd_zero(float* %addr) {
82  %res = atomicrmw fadd float* %addr, float -0.0 monotonic
83  ret float %res
84}
85
86; CHECK-LABEL: atomic_fsub_canon
87; CHECK-NEXT: %res = atomicrmw fadd float* %addr, float -0.000000e+00 release
88; CHECK-NEXT: ret float %res
89define float @atomic_fsub_canon(float* %addr) {
90  %res = atomicrmw fsub float* %addr, float 0.0 release
91  ret float %res
92}
93; CHECK-LABEL: atomic_fadd_canon
94; CHECK-NEXT: %res = atomicrmw fadd float* %addr, float -0.000000e+00 release
95; CHECK-NEXT: ret float %res
96define float @atomic_fadd_canon(float* %addr) {
97  %res = atomicrmw fadd float* %addr, float -0.0 release
98  ret float %res
99}
100
101; Can't replace a volatile w/a load; this would eliminate a volatile store.
102; CHECK-LABEL: atomic_sub_zero_volatile
103; CHECK-NEXT: %res = atomicrmw volatile sub i64* %addr, i64 0 acquire
104; CHECK-NEXT: ret i64 %res
105define i64 @atomic_sub_zero_volatile(i64* %addr) {
106  %res = atomicrmw volatile sub i64* %addr, i64 0 acquire
107  ret i64 %res
108}
109
110
111; Check that the transformation properly preserve the syncscope.
112; CHECK-LABEL: atomic_syncscope
113; CHECK-NEXT: %res = load atomic i16, i16* %addr syncscope("some_syncscope") acquire, align 2
114; CHECK-NEXT: ret i16 %res
115define i16 @atomic_syncscope(i16* %addr) {
116  %res = atomicrmw or i16* %addr, i16 0 syncscope("some_syncscope") acquire
117  ret i16 %res
118}
119
120; By eliminating the store part of the atomicrmw, we would get rid of the
121; release semantic, which is incorrect.  We can canonicalize the operation.
122; CHECK-LABEL: atomic_seq_cst
123; CHECK-NEXT: %res = atomicrmw or i16* %addr, i16 0 seq_cst
124; CHECK-NEXT: ret i16 %res
125define i16 @atomic_seq_cst(i16* %addr) {
126  %res = atomicrmw add i16* %addr, i16 0 seq_cst
127  ret i16 %res
128}
129
130; Check that the transformation does not apply when the value is changed by
131; the atomic operation (non zero constant).
132; CHECK-LABEL: atomic_add_non_zero
133; CHECK-NEXT: %res = atomicrmw add i16* %addr, i16 2 monotonic
134; CHECK-NEXT: ret i16 %res
135define i16 @atomic_add_non_zero(i16* %addr) {
136  %res = atomicrmw add i16* %addr, i16 2 monotonic
137  ret i16 %res
138}
139
140; CHECK-LABEL: atomic_xor_zero
141; CHECK-NEXT: %res = load atomic i16, i16* %addr monotonic, align 2
142; CHECK-NEXT: ret i16 %res
143define i16 @atomic_xor_zero(i16* %addr) {
144  %res = atomicrmw xor i16* %addr, i16 0 monotonic
145  ret i16 %res
146}
147
148; Check that the transformation does not apply when the ordering is
149; incompatible with a load (release).  Do canonicalize.
150; CHECK-LABEL: atomic_release
151; CHECK-NEXT: %res = atomicrmw or i16* %addr, i16 0 release
152; CHECK-NEXT: ret i16 %res
153define i16 @atomic_release(i16* %addr) {
154  %res = atomicrmw sub i16* %addr, i16 0 release
155  ret i16 %res
156}
157
158; Check that the transformation does not apply when the ordering is
159; incompatible with a load (acquire, release).  Do canonicalize.
160; CHECK-LABEL: atomic_acq_rel
161; CHECK-NEXT: %res = atomicrmw or i16* %addr, i16 0 acq_rel
162; CHECK-NEXT: ret i16 %res
163define i16 @atomic_acq_rel(i16* %addr) {
164  %res = atomicrmw xor i16* %addr, i16 0 acq_rel
165  ret i16 %res
166}
167
168
169; CHECK-LABEL: sat_or_allones
170; CHECK-NEXT: %res = atomicrmw xchg i32* %addr, i32 -1 monotonic
171; CHECK-NEXT: ret i32 %res
172define i32 @sat_or_allones(i32* %addr) {
173  %res = atomicrmw or i32* %addr, i32 -1 monotonic
174  ret i32 %res
175}
176
177; CHECK-LABEL: sat_and_zero
178; CHECK-NEXT: %res = atomicrmw xchg i32* %addr, i32 0 monotonic
179; CHECK-NEXT: ret i32 %res
180define i32 @sat_and_zero(i32* %addr) {
181  %res = atomicrmw and i32* %addr, i32 0 monotonic
182  ret i32 %res
183}
184; CHECK-LABEL: sat_umin_uint_min
185; CHECK-NEXT: %res = atomicrmw xchg i32* %addr, i32 0 monotonic
186; CHECK-NEXT: ret i32 %res
187define i32 @sat_umin_uint_min(i32* %addr) {
188  %res = atomicrmw umin i32* %addr, i32 0 monotonic
189  ret i32 %res
190}
191
192; CHECK-LABEL: sat_umax_uint_max
193; CHECK-NEXT: %res = atomicrmw xchg i32* %addr, i32 -1 monotonic
194; CHECK-NEXT: ret i32 %res
195define i32 @sat_umax_uint_max(i32* %addr) {
196  %res = atomicrmw umax i32* %addr, i32 -1 monotonic
197  ret i32 %res
198}
199
200; CHECK-LABEL: sat_min_smin_char
201; CHECK-NEXT: %res = atomicrmw xchg i8* %addr, i8 -128 monotonic
202; CHECK-NEXT: ret i8 %res
203define i8 @sat_min_smin_char(i8* %addr) {
204  %res = atomicrmw min i8* %addr, i8 -128 monotonic
205  ret i8 %res
206}
207
208; CHECK-LABEL: sat_max_smax_char
209; CHECK-NEXT: %res = atomicrmw xchg i8* %addr, i8 127 monotonic
210; CHECK-NEXT: ret i8 %res
211define i8 @sat_max_smax_char(i8* %addr) {
212  %res = atomicrmw max i8* %addr, i8 127 monotonic
213  ret i8 %res
214}
215
216; CHECK-LABEL: sat_fadd_nan
217; CHECK-NEXT: %res = atomicrmw xchg double* %addr, double 0x7FF00000FFFFFFFF release
218; CHECK-NEXT: ret double %res
219define double @sat_fadd_nan(double* %addr) {
220  %res = atomicrmw fadd double* %addr, double 0x7FF00000FFFFFFFF release
221  ret double %res
222}
223
224; CHECK-LABEL: sat_fsub_nan
225; CHECK-NEXT: %res = atomicrmw xchg double* %addr, double 0x7FF00000FFFFFFFF release
226; CHECK-NEXT: ret double %res
227define double @sat_fsub_nan(double* %addr) {
228  %res = atomicrmw fsub double* %addr, double 0x7FF00000FFFFFFFF release
229  ret double %res
230}
231
232; CHECK-LABEL: sat_fsub_nan_unused
233; CHECK-NEXT: store atomic double 0x7FF00000FFFFFFFF, double* %addr monotonic, align 8
234; CHECK-NEXT: ret void
235define void @sat_fsub_nan_unused(double* %addr) {
236  atomicrmw fsub double* %addr, double 0x7FF00000FFFFFFFF monotonic
237  ret void
238}
239
240; CHECK-LABEL: xchg_unused_monotonic
241; CHECK-NEXT: store atomic i32 0, i32* %addr monotonic, align 4
242; CHECK-NEXT: ret void
243define void @xchg_unused_monotonic(i32* %addr) {
244  atomicrmw xchg i32* %addr, i32 0 monotonic
245  ret void
246}
247
248; CHECK-LABEL: xchg_unused_release
249; CHECK-NEXT: store atomic i32 -1, i32* %addr release, align 4
250; CHECK-NEXT: ret void
251define void @xchg_unused_release(i32* %addr) {
252  atomicrmw xchg i32* %addr, i32 -1 release
253  ret void
254}
255
256; CHECK-LABEL: xchg_unused_seq_cst
257; CHECK-NEXT: atomicrmw xchg i32* %addr, i32 0 seq_cst
258; CHECK-NEXT: ret void
259define void @xchg_unused_seq_cst(i32* %addr) {
260  atomicrmw xchg i32* %addr, i32 0 seq_cst
261  ret void
262}
263
264; CHECK-LABEL: xchg_unused_volatile
265; CHECK-NEXT: atomicrmw volatile xchg i32* %addr, i32 0 monotonic
266; CHECK-NEXT: ret void
267define void @xchg_unused_volatile(i32* %addr) {
268  atomicrmw volatile xchg i32* %addr, i32 0 monotonic
269  ret void
270}
271
272; CHECK-LABEL: sat_or_allones_unused
273; CHECK-NEXT: store atomic i32 -1, i32* %addr monotonic, align 4
274; CHECK-NEXT: ret void
275define void @sat_or_allones_unused(i32* %addr) {
276  atomicrmw or i32* %addr, i32 -1 monotonic
277  ret void
278}
279
280
281; CHECK-LABEL: undef_operand_unused
282; CHECK-NEXT: atomicrmw or i32* %addr, i32 undef monotonic
283; CHECK-NEXT: ret void
284define void @undef_operand_unused(i32* %addr) {
285  atomicrmw or i32* %addr, i32 undef monotonic
286  ret void
287}
288
289; CHECK-LABEL: undef_operand_used
290; CHECK-NEXT: %res = atomicrmw or i32* %addr, i32 undef monotonic
291; CHECK-NEXT: ret i32 %res
292define i32 @undef_operand_used(i32* %addr) {
293  %res = atomicrmw or i32* %addr, i32 undef monotonic
294  ret i32 %res
295}
296
297
298
299