1 //===-- atomic.c - Implement support functions for atomic operations.------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  atomic.c defines a set of functions for performing atomic accesses on
10 //  arbitrary-sized memory locations.  This design uses locks that should
11 //  be fast in the uncontended case, for two reasons:
12 //
13 //  1) This code must work with C programs that do not link to anything
14 //     (including pthreads) and so it should not depend on any pthread
15 //     functions.
16 //  2) Atomic operations, rather than explicit mutexes, are most commonly used
17 //     on code where contended operations are rate.
18 //
19 //  To avoid needing a per-object lock, this code allocates an array of
20 //  locks and hashes the object pointers to find the one that it should use.
21 //  For operations that must be atomic on two locations, the lower lock is
22 //  always acquired first, to avoid deadlock.
23 //
24 //===----------------------------------------------------------------------===//
25 
26 #include <stdbool.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 
30 #include "assembly.h"
31 
32 // We use __builtin_mem* here to avoid dependencies on libc-provided headers.
33 #define memcpy __builtin_memcpy
34 #define memcmp __builtin_memcmp
35 
36 // Clang objects if you redefine a builtin.  This little hack allows us to
37 // define a function with the same name as an intrinsic.
38 #pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
39 #pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
40 #pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
41 #pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME(              \
42     __atomic_compare_exchange)
43 #pragma redefine_extname __atomic_is_lock_free_c SYMBOL_NAME(                  \
44     __atomic_is_lock_free)
45 
46 /// Number of locks.  This allocates one page on 32-bit platforms, two on
47 /// 64-bit.  This can be specified externally if a different trade between
48 /// memory usage and contention probability is required for a given platform.
49 #ifndef SPINLOCK_COUNT
50 #define SPINLOCK_COUNT (1 << 10)
51 #endif
52 static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
53 
54 ////////////////////////////////////////////////////////////////////////////////
55 // Platform-specific lock implementation.  Falls back to spinlocks if none is
56 // defined.  Each platform should define the Lock type, and corresponding
57 // lock() and unlock() functions.
58 ////////////////////////////////////////////////////////////////////////////////
59 #if defined(__FreeBSD__) || defined(__DragonFly__)
60 #include <errno.h>
61 // clang-format off
62 #include <sys/types.h>
63 #include <machine/atomic.h>
64 #include <sys/umtx.h>
65 // clang-format on
66 typedef struct _usem Lock;
unlock(Lock * l)67 __inline static void unlock(Lock *l) {
68   __c11_atomic_store((_Atomic(uint32_t) *)&l->_count, 1, __ATOMIC_RELEASE);
69   __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
70   if (l->_has_waiters)
71     _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
72 }
lock(Lock * l)73 __inline static void lock(Lock *l) {
74   uint32_t old = 1;
75   while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t) *)&l->_count,
76                                              &old, 0, __ATOMIC_ACQUIRE,
77                                              __ATOMIC_RELAXED)) {
78     _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
79     old = 1;
80   }
81 }
82 /// locks for atomic operations
83 static Lock locks[SPINLOCK_COUNT] = {[0 ... SPINLOCK_COUNT - 1] = {0, 1, 0}};
84 
85 #elif defined(__APPLE__)
86 #include <libkern/OSAtomic.h>
87 typedef OSSpinLock Lock;
unlock(Lock * l)88 __inline static void unlock(Lock *l) { OSSpinLockUnlock(l); }
89 /// Locks a lock.  In the current implementation, this is potentially
90 /// unbounded in the contended case.
lock(Lock * l)91 __inline static void lock(Lock *l) { OSSpinLockLock(l); }
92 static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
93 
94 #else
95 typedef _Atomic(uintptr_t) Lock;
96 /// Unlock a lock.  This is a release operation.
unlock(Lock * l)97 __inline static void unlock(Lock *l) {
98   __c11_atomic_store(l, 0, __ATOMIC_RELEASE);
99 }
100 /// Locks a lock.  In the current implementation, this is potentially
101 /// unbounded in the contended case.
lock(Lock * l)102 __inline static void lock(Lock *l) {
103   uintptr_t old = 0;
104   while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
105                                              __ATOMIC_RELAXED))
106     old = 0;
107 }
108 /// locks for atomic operations
109 static Lock locks[SPINLOCK_COUNT];
110 #endif
111 
112 /// Returns a lock to use for a given pointer.
lock_for_pointer(void * ptr)113 static __inline Lock *lock_for_pointer(void *ptr) {
114   intptr_t hash = (intptr_t)ptr;
115   // Disregard the lowest 4 bits.  We want all values that may be part of the
116   // same memory operation to hash to the same value and therefore use the same
117   // lock.
118   hash >>= 4;
119   // Use the next bits as the basis for the hash
120   intptr_t low = hash & SPINLOCK_MASK;
121   // Now use the high(er) set of bits to perturb the hash, so that we don't
122   // get collisions from atomic fields in a single object
123   hash >>= 16;
124   hash ^= low;
125   // Return a pointer to the word to use
126   return locks + (hash & SPINLOCK_MASK);
127 }
128 
129 /// Macros for determining whether a size is lock free.
130 #define ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(size, p)                  \
131   (__atomic_always_lock_free(size, p) ||                                       \
132    (__atomic_always_lock_free(size, 0) && ((uintptr_t)p % size) == 0))
133 #define IS_LOCK_FREE_1(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(1, p)
134 #define IS_LOCK_FREE_2(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(2, p)
135 #define IS_LOCK_FREE_4(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(4, p)
136 #define IS_LOCK_FREE_8(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(8, p)
137 #define IS_LOCK_FREE_16(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(16, p)
138 
139 /// Macro that calls the compiler-generated lock-free versions of functions
140 /// when they exist.
141 #define TRY_LOCK_FREE_CASE(n, type, ptr)                                       \
142   case n:                                                                      \
143     if (IS_LOCK_FREE_##n(ptr)) {                                               \
144       LOCK_FREE_ACTION(type);                                                  \
145     }                                                                          \
146     break;
147 #ifdef __SIZEOF_INT128__
148 #define TRY_LOCK_FREE_CASE_16(p) TRY_LOCK_FREE_CASE(16, __uint128_t, p)
149 #else
150 #define TRY_LOCK_FREE_CASE_16(p) /* __uint128_t not available */
151 #endif
152 
153 #define LOCK_FREE_CASES(ptr)                                                   \
154   do {                                                                         \
155     switch (size) {                                                            \
156       TRY_LOCK_FREE_CASE(1, uint8_t, ptr)                                      \
157       TRY_LOCK_FREE_CASE(2, uint16_t, ptr)                                     \
158       TRY_LOCK_FREE_CASE(4, uint32_t, ptr)                                     \
159       TRY_LOCK_FREE_CASE(8, uint64_t, ptr)                                     \
160       TRY_LOCK_FREE_CASE_16(ptr) /* __uint128_t may not be supported */        \
161     default:                                                                   \
162       break;                                                                   \
163     }                                                                          \
164   } while (0)
165 
166 /// Whether atomic operations for the given size (and alignment) are lock-free.
__atomic_is_lock_free_c(size_t size,void * ptr)167 bool __atomic_is_lock_free_c(size_t size, void *ptr) {
168 #define LOCK_FREE_ACTION(type) return true;
169   LOCK_FREE_CASES(ptr);
170 #undef LOCK_FREE_ACTION
171   return false;
172 }
173 
174 /// An atomic load operation.  This is atomic with respect to the source
175 /// pointer only.
__atomic_load_c(int size,void * src,void * dest,int model)176 void __atomic_load_c(int size, void *src, void *dest, int model) {
177 #define LOCK_FREE_ACTION(type)                                                 \
178   *((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model);            \
179   return;
180   LOCK_FREE_CASES(src);
181 #undef LOCK_FREE_ACTION
182   Lock *l = lock_for_pointer(src);
183   lock(l);
184   memcpy(dest, src, size);
185   unlock(l);
186 }
187 
188 /// An atomic store operation.  This is atomic with respect to the destination
189 /// pointer only.
__atomic_store_c(int size,void * dest,void * src,int model)190 void __atomic_store_c(int size, void *dest, void *src, int model) {
191 #define LOCK_FREE_ACTION(type)                                                 \
192   __c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model);              \
193   return;
194   LOCK_FREE_CASES(dest);
195 #undef LOCK_FREE_ACTION
196   Lock *l = lock_for_pointer(dest);
197   lock(l);
198   memcpy(dest, src, size);
199   unlock(l);
200 }
201 
202 /// Atomic compare and exchange operation.  If the value at *ptr is identical
203 /// to the value at *expected, then this copies value at *desired to *ptr.  If
204 /// they  are not, then this stores the current value from *ptr in *expected.
205 ///
206 /// This function returns 1 if the exchange takes place or 0 if it fails.
__atomic_compare_exchange_c(int size,void * ptr,void * expected,void * desired,int success,int failure)207 int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
208                                 void *desired, int success, int failure) {
209 #define LOCK_FREE_ACTION(type)                                                 \
210   return __c11_atomic_compare_exchange_strong(                                 \
211       (_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success,       \
212       failure)
213   LOCK_FREE_CASES(ptr);
214 #undef LOCK_FREE_ACTION
215   Lock *l = lock_for_pointer(ptr);
216   lock(l);
217   if (memcmp(ptr, expected, size) == 0) {
218     memcpy(ptr, desired, size);
219     unlock(l);
220     return 1;
221   }
222   memcpy(expected, ptr, size);
223   unlock(l);
224   return 0;
225 }
226 
227 /// Performs an atomic exchange operation between two pointers.  This is atomic
228 /// with respect to the target address.
__atomic_exchange_c(int size,void * ptr,void * val,void * old,int model)229 void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
230 #define LOCK_FREE_ACTION(type)                                                 \
231   *(type *)old =                                                               \
232       __c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model);        \
233   return;
234   LOCK_FREE_CASES(ptr);
235 #undef LOCK_FREE_ACTION
236   Lock *l = lock_for_pointer(ptr);
237   lock(l);
238   memcpy(old, ptr, size);
239   memcpy(ptr, val, size);
240   unlock(l);
241 }
242 
243 ////////////////////////////////////////////////////////////////////////////////
244 // Where the size is known at compile time, the compiler may emit calls to
245 // specialised versions of the above functions.
246 ////////////////////////////////////////////////////////////////////////////////
247 #ifdef __SIZEOF_INT128__
248 #define OPTIMISED_CASES                                                        \
249   OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)                                   \
250   OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)                                  \
251   OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)                                  \
252   OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)                                  \
253   OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
254 #else
255 #define OPTIMISED_CASES                                                        \
256   OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)                                   \
257   OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)                                  \
258   OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)                                  \
259   OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
260 #endif
261 
262 #define OPTIMISED_CASE(n, lockfree, type)                                      \
263   type __atomic_load_##n(type *src, int model) {                               \
264     if (lockfree(src))                                                         \
265       return __c11_atomic_load((_Atomic(type) *)src, model);                   \
266     Lock *l = lock_for_pointer(src);                                           \
267     lock(l);                                                                   \
268     type val = *src;                                                           \
269     unlock(l);                                                                 \
270     return val;                                                                \
271   }
272 OPTIMISED_CASES
273 #undef OPTIMISED_CASE
274 
275 #define OPTIMISED_CASE(n, lockfree, type)                                      \
276   void __atomic_store_##n(type *dest, type val, int model) {                   \
277     if (lockfree(dest)) {                                                      \
278       __c11_atomic_store((_Atomic(type) *)dest, val, model);                   \
279       return;                                                                  \
280     }                                                                          \
281     Lock *l = lock_for_pointer(dest);                                          \
282     lock(l);                                                                   \
283     *dest = val;                                                               \
284     unlock(l);                                                                 \
285     return;                                                                    \
286   }
287 OPTIMISED_CASES
288 #undef OPTIMISED_CASE
289 
290 #define OPTIMISED_CASE(n, lockfree, type)                                      \
291   type __atomic_exchange_##n(type *dest, type val, int model) {                \
292     if (lockfree(dest))                                                        \
293       return __c11_atomic_exchange((_Atomic(type) *)dest, val, model);         \
294     Lock *l = lock_for_pointer(dest);                                          \
295     lock(l);                                                                   \
296     type tmp = *dest;                                                          \
297     *dest = val;                                                               \
298     unlock(l);                                                                 \
299     return tmp;                                                                \
300   }
301 OPTIMISED_CASES
302 #undef OPTIMISED_CASE
303 
304 #define OPTIMISED_CASE(n, lockfree, type)                                      \
305   bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired,  \
306                                      int success, int failure) {               \
307     if (lockfree(ptr))                                                         \
308       return __c11_atomic_compare_exchange_strong(                             \
309           (_Atomic(type) *)ptr, expected, desired, success, failure);          \
310     Lock *l = lock_for_pointer(ptr);                                           \
311     lock(l);                                                                   \
312     if (*ptr == *expected) {                                                   \
313       *ptr = desired;                                                          \
314       unlock(l);                                                               \
315       return true;                                                             \
316     }                                                                          \
317     *expected = *ptr;                                                          \
318     unlock(l);                                                                 \
319     return false;                                                              \
320   }
321 OPTIMISED_CASES
322 #undef OPTIMISED_CASE
323 
324 ////////////////////////////////////////////////////////////////////////////////
325 // Atomic read-modify-write operations for integers of various sizes.
326 ////////////////////////////////////////////////////////////////////////////////
327 #define ATOMIC_RMW(n, lockfree, type, opname, op)                              \
328   type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) {         \
329     if (lockfree(ptr))                                                         \
330       return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model);    \
331     Lock *l = lock_for_pointer(ptr);                                           \
332     lock(l);                                                                   \
333     type tmp = *ptr;                                                           \
334     *ptr = tmp op val;                                                         \
335     unlock(l);                                                                 \
336     return tmp;                                                                \
337   }
338 
339 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
340 OPTIMISED_CASES
341 #undef OPTIMISED_CASE
342 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
343 OPTIMISED_CASES
344 #undef OPTIMISED_CASE
345 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
346 OPTIMISED_CASES
347 #undef OPTIMISED_CASE
348 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
349 OPTIMISED_CASES
350 #undef OPTIMISED_CASE
351 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
352 OPTIMISED_CASES
353 #undef OPTIMISED_CASE
354