1 /* Linux-specific atomic operations for ARM EABI.
2 Copyright (C) 2008-2020 Free Software Foundation, Inc.
3 Contributed by CodeSourcery.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
25
26 /* Kernel helper for compare-and-exchange. */
27 typedef int (__kernel_cmpxchg_t) (int oldval, int newval, int *ptr);
28
29 #define STR(X) #X
30 #define XSTR(X) STR(X)
31
32 #define KERNEL_CMPXCHG 0xffff0fc0
33
34 #if __FDPIC__
35 /* Non-FDPIC ABIs call __kernel_cmpxchg directly by dereferencing its
36 address, but under FDPIC we would generate a broken call
37 sequence. That's why we have to implement __kernel_cmpxchg and
38 __kernel_dmb here: this way, the FDPIC call sequence works. */
39 #define __kernel_cmpxchg __fdpic_cmpxchg
40 #else
41 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) KERNEL_CMPXCHG)
42 #endif
43
44 /* Kernel helper for memory barrier. */
45 typedef void (__kernel_dmb_t) (void);
46
47 #define KERNEL_DMB 0xffff0fa0
48
49 #if __FDPIC__
50 #define __kernel_dmb __fdpic_dmb
51 #else
52 #define __kernel_dmb (*(__kernel_dmb_t *) KERNEL_DMB)
53 #endif
54
55 #if __FDPIC__
__fdpic_cmpxchg(int oldval,int newval,int * ptr)56 static int __fdpic_cmpxchg (int oldval, int newval, int *ptr)
57 {
58 int result;
59
60 asm volatile (
61 "ldr ip, 1f\n\t"
62 "bx ip\n\t"
63 "1:\n\t"
64 ".word " XSTR(KERNEL_CMPXCHG) "\n\t"
65 : "=r" (result)
66 : "r" (oldval) , "r" (newval), "r" (ptr)
67 : "r3", "memory");
68 /* The result is actually returned by the kernel helper, we need
69 this to avoid a warning. */
70 return result;
71 }
72
__fdpic_dmb(void)73 static void __fdpic_dmb (void)
74 {
75 asm volatile (
76 "ldr ip, 1f\n\t"
77 "bx ip\n\t"
78 "1:\n\t"
79 ".word " XSTR(KERNEL_DMB) "\n\t"
80 );
81 }
82
83 #endif
84
85 /* Note: we implement byte, short and int versions of atomic operations using
86 the above kernel helpers; see linux-atomic-64bit.c for "long long" (64-bit)
87 operations. */
88
89 #define HIDDEN __attribute__ ((visibility ("hidden")))
90
91 #ifdef __ARMEL__
92 #define INVERT_MASK_1 0
93 #define INVERT_MASK_2 0
94 #else
95 #define INVERT_MASK_1 24
96 #define INVERT_MASK_2 16
97 #endif
98
99 #define MASK_1 0xffu
100 #define MASK_2 0xffffu
101
102 #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
103 int HIDDEN \
104 __sync_fetch_and_##OP##_4 (int *ptr, int val) \
105 { \
106 int failure, tmp; \
107 \
108 do { \
109 tmp = *ptr; \
110 failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
111 } while (failure != 0); \
112 \
113 return tmp; \
114 }
115
116 FETCH_AND_OP_WORD (add, , +)
117 FETCH_AND_OP_WORD (sub, , -)
118 FETCH_AND_OP_WORD (or, , |)
119 FETCH_AND_OP_WORD (and, , &)
120 FETCH_AND_OP_WORD (xor, , ^)
121 FETCH_AND_OP_WORD (nand, ~, &)
122
123 #define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
124 #define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
125
126 /* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
127 subword-sized quantities. */
128
129 #define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
130 TYPE HIDDEN \
131 NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \
132 { \
133 int *wordptr = (int *) ((unsigned int) ptr & ~3); \
134 unsigned int mask, shift, oldval, newval; \
135 int failure; \
136 \
137 shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
138 mask = MASK_##WIDTH << shift; \
139 \
140 do { \
141 oldval = *wordptr; \
142 newval = ((PFX_OP (((oldval & mask) >> shift) \
143 INF_OP (unsigned int) val)) << shift) & mask; \
144 newval |= oldval & ~mask; \
145 failure = __kernel_cmpxchg (oldval, newval, wordptr); \
146 } while (failure != 0); \
147 \
148 return (RETURN & mask) >> shift; \
149 }
150
151 SUBWORD_SYNC_OP (add, , +, short, 2, oldval)
152 SUBWORD_SYNC_OP (sub, , -, short, 2, oldval)
153 SUBWORD_SYNC_OP (or, , |, short, 2, oldval)
154 SUBWORD_SYNC_OP (and, , &, short, 2, oldval)
155 SUBWORD_SYNC_OP (xor, , ^, short, 2, oldval)
156 SUBWORD_SYNC_OP (nand, ~, &, short, 2, oldval)
157
158 SUBWORD_SYNC_OP (add, , +, signed char, 1, oldval)
159 SUBWORD_SYNC_OP (sub, , -, signed char, 1, oldval)
160 SUBWORD_SYNC_OP (or, , |, signed char, 1, oldval)
161 SUBWORD_SYNC_OP (and, , &, signed char, 1, oldval)
162 SUBWORD_SYNC_OP (xor, , ^, signed char, 1, oldval)
163 SUBWORD_SYNC_OP (nand, ~, &, signed char, 1, oldval)
164
165 #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
166 int HIDDEN \
167 __sync_##OP##_and_fetch_4 (int *ptr, int val) \
168 { \
169 int tmp, failure; \
170 \
171 do { \
172 tmp = *ptr; \
173 failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
174 } while (failure != 0); \
175 \
176 return PFX_OP (tmp INF_OP val); \
177 }
178
179 OP_AND_FETCH_WORD (add, , +)
180 OP_AND_FETCH_WORD (sub, , -)
181 OP_AND_FETCH_WORD (or, , |)
182 OP_AND_FETCH_WORD (and, , &)
183 OP_AND_FETCH_WORD (xor, , ^)
184 OP_AND_FETCH_WORD (nand, ~, &)
185
186 SUBWORD_SYNC_OP (add, , +, short, 2, newval)
187 SUBWORD_SYNC_OP (sub, , -, short, 2, newval)
188 SUBWORD_SYNC_OP (or, , |, short, 2, newval)
189 SUBWORD_SYNC_OP (and, , &, short, 2, newval)
190 SUBWORD_SYNC_OP (xor, , ^, short, 2, newval)
191 SUBWORD_SYNC_OP (nand, ~, &, short, 2, newval)
192
193 SUBWORD_SYNC_OP (add, , +, signed char, 1, newval)
194 SUBWORD_SYNC_OP (sub, , -, signed char, 1, newval)
195 SUBWORD_SYNC_OP (or, , |, signed char, 1, newval)
196 SUBWORD_SYNC_OP (and, , &, signed char, 1, newval)
197 SUBWORD_SYNC_OP (xor, , ^, signed char, 1, newval)
198 SUBWORD_SYNC_OP (nand, ~, &, signed char, 1, newval)
199
200 int HIDDEN
__sync_val_compare_and_swap_4(int * ptr,int oldval,int newval)201 __sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
202 {
203 int actual_oldval, fail;
204
205 while (1)
206 {
207 actual_oldval = *ptr;
208
209 if (__builtin_expect (oldval != actual_oldval, 0))
210 return actual_oldval;
211
212 fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
213
214 if (__builtin_expect (!fail, 1))
215 return oldval;
216 }
217 }
218
219 #define SUBWORD_VAL_CAS(TYPE, WIDTH) \
220 TYPE HIDDEN \
221 __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
222 TYPE newval) \
223 { \
224 int *wordptr = (int *)((unsigned int) ptr & ~3), fail; \
225 unsigned int mask, shift, actual_oldval, actual_newval; \
226 \
227 shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
228 mask = MASK_##WIDTH << shift; \
229 \
230 while (1) \
231 { \
232 actual_oldval = *wordptr; \
233 \
234 if (__builtin_expect (((actual_oldval & mask) >> shift) != \
235 ((unsigned int) oldval & MASK_##WIDTH), 0)) \
236 return (actual_oldval & mask) >> shift; \
237 \
238 actual_newval = (actual_oldval & ~mask) \
239 | (((unsigned int) newval << shift) & mask); \
240 \
241 fail = __kernel_cmpxchg (actual_oldval, actual_newval, \
242 wordptr); \
243 \
244 if (__builtin_expect (!fail, 1)) \
245 return oldval; \
246 } \
247 }
248
249 SUBWORD_VAL_CAS (short, 2)
250 SUBWORD_VAL_CAS (signed char, 1)
251
252 typedef unsigned char bool;
253
254 bool HIDDEN
__sync_bool_compare_and_swap_4(int * ptr,int oldval,int newval)255 __sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
256 {
257 int failure = __kernel_cmpxchg (oldval, newval, ptr);
258 return (failure == 0);
259 }
260
261 #define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
262 bool HIDDEN \
263 __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
264 TYPE newval) \
265 { \
266 TYPE actual_oldval \
267 = __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \
268 return (oldval == actual_oldval); \
269 }
270
271 SUBWORD_BOOL_CAS (short, 2)
272 SUBWORD_BOOL_CAS (signed char, 1)
273
274 void HIDDEN
__sync_synchronize(void)275 __sync_synchronize (void)
276 {
277 __kernel_dmb ();
278 }
279
280 int HIDDEN
__sync_lock_test_and_set_4(int * ptr,int val)281 __sync_lock_test_and_set_4 (int *ptr, int val)
282 {
283 int failure, oldval;
284
285 do {
286 oldval = *ptr;
287 failure = __kernel_cmpxchg (oldval, val, ptr);
288 } while (failure != 0);
289
290 return oldval;
291 }
292
293 #define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \
294 TYPE HIDDEN \
295 __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
296 { \
297 int failure; \
298 unsigned int oldval, newval, shift, mask; \
299 int *wordptr = (int *) ((unsigned int) ptr & ~3); \
300 \
301 shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
302 mask = MASK_##WIDTH << shift; \
303 \
304 do { \
305 oldval = *wordptr; \
306 newval = (oldval & ~mask) \
307 | (((unsigned int) val << shift) & mask); \
308 failure = __kernel_cmpxchg (oldval, newval, wordptr); \
309 } while (failure != 0); \
310 \
311 return (oldval & mask) >> shift; \
312 }
313
314 SUBWORD_TEST_AND_SET (short, 2)
315 SUBWORD_TEST_AND_SET (signed char, 1)
316
317 #define SYNC_LOCK_RELEASE(TYPE, WIDTH) \
318 void HIDDEN \
319 __sync_lock_release_##WIDTH (TYPE *ptr) \
320 { \
321 /* All writes before this point must be seen before we release \
322 the lock itself. */ \
323 __kernel_dmb (); \
324 *ptr = 0; \
325 }
326
327 SYNC_LOCK_RELEASE (long long, 8)
328 SYNC_LOCK_RELEASE (int, 4)
329 SYNC_LOCK_RELEASE (short, 2)
330 SYNC_LOCK_RELEASE (char, 1)
331