1 //===-- atomic.c - Implement support functions for atomic operations.------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // atomic.c defines a set of functions for performing atomic accesses on
10 // arbitrary-sized memory locations. This design uses locks that should
11 // be fast in the uncontended case, for two reasons:
12 //
13 // 1) This code must work with C programs that do not link to anything
14 // (including pthreads) and so it should not depend on any pthread
15 // functions.
16 // 2) Atomic operations, rather than explicit mutexes, are most commonly used
17 // on code where contended operations are rate.
18 //
19 // To avoid needing a per-object lock, this code allocates an array of
20 // locks and hashes the object pointers to find the one that it should use.
21 // For operations that must be atomic on two locations, the lower lock is
22 // always acquired first, to avoid deadlock.
23 //
24 //===----------------------------------------------------------------------===//
25
26 #include <stdbool.h>
27 #include <stdint.h>
28 #include <string.h>
29
30 #include "assembly.h"
31
32 // Clang objects if you redefine a builtin. This little hack allows us to
33 // define a function with the same name as an intrinsic.
34 #pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
35 #pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
36 #pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
37 #pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME( \
38 __atomic_compare_exchange)
39 #pragma redefine_extname __atomic_is_lock_free_c SYMBOL_NAME( \
40 __atomic_is_lock_free)
41
42 /// Number of locks. This allocates one page on 32-bit platforms, two on
43 /// 64-bit. This can be specified externally if a different trade between
44 /// memory usage and contention probability is required for a given platform.
45 #ifndef SPINLOCK_COUNT
46 #define SPINLOCK_COUNT (1 << 10)
47 #endif
48 static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
49
50 ////////////////////////////////////////////////////////////////////////////////
51 // Platform-specific lock implementation. Falls back to spinlocks if none is
52 // defined. Each platform should define the Lock type, and corresponding
53 // lock() and unlock() functions.
54 ////////////////////////////////////////////////////////////////////////////////
55 #ifdef __FreeBSD__
56 #include <errno.h>
57 // clang-format off
58 #include <sys/types.h>
59 #include <machine/atomic.h>
60 #include <sys/umtx.h>
61 // clang-format on
62 typedef struct _usem Lock;
unlock(Lock * l)63 __inline static void unlock(Lock *l) {
64 __c11_atomic_store((_Atomic(uint32_t) *)&l->_count, 1, __ATOMIC_RELEASE);
65 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
66 if (l->_has_waiters)
67 _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
68 }
lock(Lock * l)69 __inline static void lock(Lock *l) {
70 uint32_t old = 1;
71 while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t) *)&l->_count,
72 &old, 0, __ATOMIC_ACQUIRE,
73 __ATOMIC_RELAXED)) {
74 _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
75 old = 1;
76 }
77 }
78 /// locks for atomic operations
79 static Lock locks[SPINLOCK_COUNT] = {[0 ... SPINLOCK_COUNT - 1] = {0, 1, 0}};
80
81 #elif defined(__APPLE__)
82 #include <libkern/OSAtomic.h>
83 typedef OSSpinLock Lock;
unlock(Lock * l)84 __inline static void unlock(Lock *l) { OSSpinLockUnlock(l); }
85 /// Locks a lock. In the current implementation, this is potentially
86 /// unbounded in the contended case.
lock(Lock * l)87 __inline static void lock(Lock *l) { OSSpinLockLock(l); }
88 static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
89
90 #else
91 typedef _Atomic(uintptr_t) Lock;
92 /// Unlock a lock. This is a release operation.
unlock(Lock * l)93 __inline static void unlock(Lock *l) {
94 __c11_atomic_store(l, 0, __ATOMIC_RELEASE);
95 }
96 /// Locks a lock. In the current implementation, this is potentially
97 /// unbounded in the contended case.
lock(Lock * l)98 __inline static void lock(Lock *l) {
99 uintptr_t old = 0;
100 while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
101 __ATOMIC_RELAXED))
102 old = 0;
103 }
104 /// locks for atomic operations
105 static Lock locks[SPINLOCK_COUNT];
106 #endif
107
108 /// Returns a lock to use for a given pointer.
lock_for_pointer(void * ptr)109 static __inline Lock *lock_for_pointer(void *ptr) {
110 intptr_t hash = (intptr_t)ptr;
111 // Disregard the lowest 4 bits. We want all values that may be part of the
112 // same memory operation to hash to the same value and therefore use the same
113 // lock.
114 hash >>= 4;
115 // Use the next bits as the basis for the hash
116 intptr_t low = hash & SPINLOCK_MASK;
117 // Now use the high(er) set of bits to perturb the hash, so that we don't
118 // get collisions from atomic fields in a single object
119 hash >>= 16;
120 hash ^= low;
121 // Return a pointer to the word to use
122 return locks + (hash & SPINLOCK_MASK);
123 }
124
125 /// Macros for determining whether a size is lock free.
126 #define ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(size, p) \
127 (__atomic_always_lock_free(size, p) || \
128 (__atomic_always_lock_free(size, 0) && ((uintptr_t)p % size) == 0))
129 #define IS_LOCK_FREE_1(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(1, p)
130 #define IS_LOCK_FREE_2(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(2, p)
131 #define IS_LOCK_FREE_4(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(4, p)
132 #define IS_LOCK_FREE_8(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(8, p)
133 #define IS_LOCK_FREE_16(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(16, p)
134
135 /// Macro that calls the compiler-generated lock-free versions of functions
136 /// when they exist.
137 #define TRY_LOCK_FREE_CASE(n, type, ptr) \
138 case n: \
139 if (IS_LOCK_FREE_##n(ptr)) { \
140 LOCK_FREE_ACTION(type); \
141 } \
142 break;
143 #ifdef __SIZEOF_INT128__
144 #define TRY_LOCK_FREE_CASE_16(p) TRY_LOCK_FREE_CASE(16, __uint128_t, p)
145 #else
146 #define TRY_LOCK_FREE_CASE_16(p) /* __uint128_t not available */
147 #endif
148
149 #define LOCK_FREE_CASES(ptr) \
150 do { \
151 switch (size) { \
152 TRY_LOCK_FREE_CASE(1, uint8_t, ptr) \
153 TRY_LOCK_FREE_CASE(2, uint16_t, ptr) \
154 TRY_LOCK_FREE_CASE(4, uint32_t, ptr) \
155 TRY_LOCK_FREE_CASE(8, uint64_t, ptr) \
156 TRY_LOCK_FREE_CASE_16(ptr) /* __uint128_t may not be supported */ \
157 default: \
158 break; \
159 } \
160 } while (0)
161
162 /// Whether atomic operations for the given size (and alignment) are lock-free.
__atomic_is_lock_free_c(size_t size,void * ptr)163 bool __atomic_is_lock_free_c(size_t size, void *ptr) {
164 #define LOCK_FREE_ACTION(type) return true;
165 LOCK_FREE_CASES(ptr);
166 #undef LOCK_FREE_ACTION
167 return false;
168 }
169
170 /// An atomic load operation. This is atomic with respect to the source
171 /// pointer only.
__atomic_load_c(int size,void * src,void * dest,int model)172 void __atomic_load_c(int size, void *src, void *dest, int model) {
173 #define LOCK_FREE_ACTION(type) \
174 *((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model); \
175 return;
176 LOCK_FREE_CASES(src);
177 #undef LOCK_FREE_ACTION
178 Lock *l = lock_for_pointer(src);
179 lock(l);
180 memcpy(dest, src, size);
181 unlock(l);
182 }
183
184 /// An atomic store operation. This is atomic with respect to the destination
185 /// pointer only.
__atomic_store_c(int size,void * dest,void * src,int model)186 void __atomic_store_c(int size, void *dest, void *src, int model) {
187 #define LOCK_FREE_ACTION(type) \
188 __c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model); \
189 return;
190 LOCK_FREE_CASES(dest);
191 #undef LOCK_FREE_ACTION
192 Lock *l = lock_for_pointer(dest);
193 lock(l);
194 memcpy(dest, src, size);
195 unlock(l);
196 }
197
198 /// Atomic compare and exchange operation. If the value at *ptr is identical
199 /// to the value at *expected, then this copies value at *desired to *ptr. If
200 /// they are not, then this stores the current value from *ptr in *expected.
201 ///
202 /// This function returns 1 if the exchange takes place or 0 if it fails.
__atomic_compare_exchange_c(int size,void * ptr,void * expected,void * desired,int success,int failure)203 int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
204 void *desired, int success, int failure) {
205 #define LOCK_FREE_ACTION(type) \
206 return __c11_atomic_compare_exchange_strong( \
207 (_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success, \
208 failure)
209 LOCK_FREE_CASES(ptr);
210 #undef LOCK_FREE_ACTION
211 Lock *l = lock_for_pointer(ptr);
212 lock(l);
213 if (memcmp(ptr, expected, size) == 0) {
214 memcpy(ptr, desired, size);
215 unlock(l);
216 return 1;
217 }
218 memcpy(expected, ptr, size);
219 unlock(l);
220 return 0;
221 }
222
223 /// Performs an atomic exchange operation between two pointers. This is atomic
224 /// with respect to the target address.
__atomic_exchange_c(int size,void * ptr,void * val,void * old,int model)225 void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
226 #define LOCK_FREE_ACTION(type) \
227 *(type *)old = \
228 __c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model); \
229 return;
230 LOCK_FREE_CASES(ptr);
231 #undef LOCK_FREE_ACTION
232 Lock *l = lock_for_pointer(ptr);
233 lock(l);
234 memcpy(old, ptr, size);
235 memcpy(ptr, val, size);
236 unlock(l);
237 }
238
239 ////////////////////////////////////////////////////////////////////////////////
240 // Where the size is known at compile time, the compiler may emit calls to
241 // specialised versions of the above functions.
242 ////////////////////////////////////////////////////////////////////////////////
243 #ifdef __SIZEOF_INT128__
244 #define OPTIMISED_CASES \
245 OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \
246 OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \
247 OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \
248 OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t) \
249 OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
250 #else
251 #define OPTIMISED_CASES \
252 OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \
253 OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \
254 OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \
255 OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
256 #endif
257
258 #define OPTIMISED_CASE(n, lockfree, type) \
259 type __atomic_load_##n(type *src, int model) { \
260 if (lockfree(src)) \
261 return __c11_atomic_load((_Atomic(type) *)src, model); \
262 Lock *l = lock_for_pointer(src); \
263 lock(l); \
264 type val = *src; \
265 unlock(l); \
266 return val; \
267 }
268 OPTIMISED_CASES
269 #undef OPTIMISED_CASE
270
271 #define OPTIMISED_CASE(n, lockfree, type) \
272 void __atomic_store_##n(type *dest, type val, int model) { \
273 if (lockfree(dest)) { \
274 __c11_atomic_store((_Atomic(type) *)dest, val, model); \
275 return; \
276 } \
277 Lock *l = lock_for_pointer(dest); \
278 lock(l); \
279 *dest = val; \
280 unlock(l); \
281 return; \
282 }
283 OPTIMISED_CASES
284 #undef OPTIMISED_CASE
285
286 #define OPTIMISED_CASE(n, lockfree, type) \
287 type __atomic_exchange_##n(type *dest, type val, int model) { \
288 if (lockfree(dest)) \
289 return __c11_atomic_exchange((_Atomic(type) *)dest, val, model); \
290 Lock *l = lock_for_pointer(dest); \
291 lock(l); \
292 type tmp = *dest; \
293 *dest = val; \
294 unlock(l); \
295 return tmp; \
296 }
297 OPTIMISED_CASES
298 #undef OPTIMISED_CASE
299
300 #define OPTIMISED_CASE(n, lockfree, type) \
301 bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \
302 int success, int failure) { \
303 if (lockfree(ptr)) \
304 return __c11_atomic_compare_exchange_strong( \
305 (_Atomic(type) *)ptr, expected, desired, success, failure); \
306 Lock *l = lock_for_pointer(ptr); \
307 lock(l); \
308 if (*ptr == *expected) { \
309 *ptr = desired; \
310 unlock(l); \
311 return true; \
312 } \
313 *expected = *ptr; \
314 unlock(l); \
315 return false; \
316 }
317 OPTIMISED_CASES
318 #undef OPTIMISED_CASE
319
320 ////////////////////////////////////////////////////////////////////////////////
321 // Atomic read-modify-write operations for integers of various sizes.
322 ////////////////////////////////////////////////////////////////////////////////
323 #define ATOMIC_RMW(n, lockfree, type, opname, op) \
324 type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) { \
325 if (lockfree(ptr)) \
326 return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model); \
327 Lock *l = lock_for_pointer(ptr); \
328 lock(l); \
329 type tmp = *ptr; \
330 *ptr = tmp op val; \
331 unlock(l); \
332 return tmp; \
333 }
334
335 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
336 OPTIMISED_CASES
337 #undef OPTIMISED_CASE
338 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
339 OPTIMISED_CASES
340 #undef OPTIMISED_CASE
341 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
342 OPTIMISED_CASES
343 #undef OPTIMISED_CASE
344 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
345 OPTIMISED_CASES
346 #undef OPTIMISED_CASE
347 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
348 OPTIMISED_CASES
349 #undef OPTIMISED_CASE
350