1 /* PR target/49244 */
2 /* { dg-do compile } */
3 /* { dg-options "-O2" } */
4
5 void bar (void);
6
7 __attribute__((noinline, noclone)) int
f1(int * a,int bit)8 f1 (int *a, int bit)
9 {
10 unsigned int mask = (1u << bit);
11 return (__sync_fetch_and_or (a, mask) & mask) != 0;
12 }
13
14 __attribute__((noinline, noclone)) int
f2(int * a,int bit)15 f2 (int *a, int bit)
16 {
17 unsigned int mask = (1u << bit);
18 unsigned int t1 = __atomic_fetch_or (a, mask, __ATOMIC_RELAXED);
19 unsigned int t2 = t1 & mask;
20 return t2 != 0;
21 }
22
23 __attribute__((noinline, noclone)) long int
f3(long int * a,int bit)24 f3 (long int *a, int bit)
25 {
26 unsigned long int mask = (1ul << bit);
27 return (__atomic_fetch_or (a, mask, __ATOMIC_SEQ_CST) & mask) == 0;
28 }
29
30 __attribute__((noinline, noclone)) int
f4(int * a)31 f4 (int *a)
32 {
33 unsigned int mask = (1u << 7);
34 return (__sync_fetch_and_or (a, mask) & mask) != 0;
35 }
36
37 __attribute__((noinline, noclone)) int
f5(int * a)38 f5 (int *a)
39 {
40 unsigned int mask = (1u << 13);
41 return (__atomic_fetch_or (a, mask, __ATOMIC_RELAXED) & mask) != 0;
42 }
43
44 __attribute__((noinline, noclone)) int
f6(int * a)45 f6 (int *a)
46 {
47 unsigned int mask = (1u << 0);
48 return (__atomic_fetch_or (a, mask, __ATOMIC_SEQ_CST) & mask) != 0;
49 }
50
51 __attribute__((noinline, noclone)) void
f7(int * a,int bit)52 f7 (int *a, int bit)
53 {
54 unsigned int mask = (1u << bit);
55 if ((__sync_fetch_and_xor (a, mask) & mask) != 0)
56 bar ();
57 }
58
59 __attribute__((noinline, noclone)) void
f8(int * a,int bit)60 f8 (int *a, int bit)
61 {
62 unsigned int mask = (1u << bit);
63 if ((__atomic_fetch_xor (a, mask, __ATOMIC_RELAXED) & mask) == 0)
64 bar ();
65 }
66
67 __attribute__((noinline, noclone)) int
f9(int * a,int bit)68 f9 (int *a, int bit)
69 {
70 unsigned int mask = (1u << bit);
71 return (__atomic_fetch_xor (a, mask, __ATOMIC_SEQ_CST) & mask) != 0;
72 }
73
74 __attribute__((noinline, noclone)) int
f10(int * a)75 f10 (int *a)
76 {
77 unsigned int mask = (1u << 7);
78 return (__sync_fetch_and_xor (a, mask) & mask) != 0;
79 }
80
81 __attribute__((noinline, noclone)) int
f11(int * a)82 f11 (int *a)
83 {
84 unsigned int mask = (1u << 13);
85 return (__atomic_fetch_xor (a, mask, __ATOMIC_RELAXED) & mask) != 0;
86 }
87
88 __attribute__((noinline, noclone)) int
f12(int * a)89 f12 (int *a)
90 {
91 unsigned int mask = (1u << 0);
92 return (__atomic_fetch_xor (a, mask, __ATOMIC_SEQ_CST) & mask) != 0;
93 }
94
95 __attribute__((noinline, noclone)) int
f13(int * a,int bit)96 f13 (int *a, int bit)
97 {
98 unsigned int mask = (1u << bit);
99 return (__sync_fetch_and_and (a, ~mask) & mask) != 0;
100 }
101
102 __attribute__((noinline, noclone)) int
f14(int * a,int bit)103 f14 (int *a, int bit)
104 {
105 unsigned int mask = (1u << bit);
106 return (__atomic_fetch_and (a, ~mask, __ATOMIC_RELAXED) & mask) != 0;
107 }
108
109 __attribute__((noinline, noclone)) int
f15(int * a,int bit)110 f15 (int *a, int bit)
111 {
112 unsigned int mask = (1u << bit);
113 return (__atomic_fetch_and (a, ~mask, __ATOMIC_SEQ_CST) & mask) != 0;
114 }
115
116 __attribute__((noinline, noclone)) int
f16(int * a)117 f16 (int *a)
118 {
119 unsigned int mask = (1u << 7);
120 return (__sync_fetch_and_and (a, ~mask) & mask) != 0;
121 }
122
123 __attribute__((noinline, noclone)) int
f17(int * a)124 f17 (int *a)
125 {
126 unsigned int mask = (1u << 13);
127 return (__atomic_fetch_and (a, ~mask, __ATOMIC_RELAXED) & mask) != 0;
128 }
129
130 __attribute__((noinline, noclone)) int
f18(int * a)131 f18 (int *a)
132 {
133 unsigned int mask = (1u << 0);
134 return (__atomic_fetch_and (a, ~mask, __ATOMIC_SEQ_CST) & mask) != 0;
135 }
136
137 __attribute__((noinline, noclone)) unsigned long int
f19(unsigned long int * a,int bit)138 f19 (unsigned long int *a, int bit)
139 {
140 unsigned long int mask = (1ul << bit);
141 return (__atomic_xor_fetch (a, mask, __ATOMIC_SEQ_CST) & mask) != 0;
142 }
143
144 __attribute__((noinline, noclone)) unsigned long int
f20(unsigned long int * a)145 f20 (unsigned long int *a)
146 {
147 unsigned long int mask = (1ul << 7);
148 return (__atomic_xor_fetch (a, mask, __ATOMIC_SEQ_CST) & mask) == 0;
149 }
150
151 __attribute__((noinline, noclone)) int
f21(int * a,int bit)152 f21 (int *a, int bit)
153 {
154 unsigned int mask = (1u << bit);
155 return (__sync_fetch_and_or (a, mask) & mask);
156 }
157
158 __attribute__((noinline, noclone)) unsigned long int
f22(unsigned long int * a)159 f22 (unsigned long int *a)
160 {
161 unsigned long int mask = (1ul << 7);
162 return (__atomic_xor_fetch (a, mask, __ATOMIC_SEQ_CST) & mask);
163 }
164
165 __attribute__((noinline, noclone)) unsigned long int
f23(unsigned long int * a)166 f23 (unsigned long int *a)
167 {
168 unsigned long int mask = (1ul << 7);
169 return (__atomic_fetch_xor (a, mask, __ATOMIC_SEQ_CST) & mask);
170 }
171
172 __attribute__((noinline, noclone)) unsigned short int
f24(unsigned short int * a)173 f24 (unsigned short int *a)
174 {
175 unsigned short int mask = (1u << 7);
176 return (__sync_fetch_and_or (a, mask) & mask) != 0;
177 }
178
179 __attribute__((noinline, noclone)) unsigned short int
f25(unsigned short int * a)180 f25 (unsigned short int *a)
181 {
182 unsigned short int mask = (1u << 7);
183 return (__atomic_fetch_or (a, mask, __ATOMIC_SEQ_CST) & mask) != 0;
184 }
185
186 /* { dg-final { scan-assembler-times "lock;?\[ \t\]*bts" 9 } } */
187 /* { dg-final { scan-assembler-times "lock;?\[ \t\]*btc" 10 } } */
188 /* { dg-final { scan-assembler-times "lock;?\[ \t\]*btr" 6 } } */
189