1 /*-------------------------------------------------------------------------
2 *
3 * atomics.c
4 * Non-Inline parts of the atomics implementation
5 *
6 * Portions Copyright (c) 2013-2020, PostgreSQL Global Development Group
7 *
8 *
9 * IDENTIFICATION
10 * src/backend/port/atomics.c
11 *
12 *-------------------------------------------------------------------------
13 */
14 #include "postgres.h"
15
16 #include "miscadmin.h"
17 #include "port/atomics.h"
18 #include "storage/spin.h"
19
20 #ifdef PG_HAVE_MEMORY_BARRIER_EMULATION
21 #ifdef WIN32
22 #error "barriers are required (and provided) on WIN32 platforms"
23 #endif
24 #include <signal.h>
25 #endif
26
27 #ifdef PG_HAVE_MEMORY_BARRIER_EMULATION
28 void
pg_spinlock_barrier(void)29 pg_spinlock_barrier(void)
30 {
31 /*
32 * NB: we have to be reentrant here, some barriers are placed in signal
33 * handlers.
34 *
35 * We use kill(0) for the fallback barrier as we assume that kernels on
36 * systems old enough to require fallback barrier support will include an
37 * appropriate barrier while checking the existence of the postmaster pid.
38 */
39 (void) kill(PostmasterPid, 0);
40 }
41 #endif
42
43 #ifdef PG_HAVE_COMPILER_BARRIER_EMULATION
44 void
pg_extern_compiler_barrier(void)45 pg_extern_compiler_barrier(void)
46 {
47 /* do nothing */
48 }
49 #endif
50
51
52 #ifdef PG_HAVE_ATOMIC_FLAG_SIMULATION
53
54 void
pg_atomic_init_flag_impl(volatile pg_atomic_flag * ptr)55 pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
56 {
57 StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
58 "size mismatch of atomic_flag vs slock_t");
59
60 #ifndef HAVE_SPINLOCKS
61
62 /*
63 * NB: If we're using semaphore based TAS emulation, be careful to use a
64 * separate set of semaphores. Otherwise we'd get in trouble if an atomic
65 * var would be manipulated while spinlock is held.
66 */
67 s_init_lock_sema((slock_t *) &ptr->sema, true);
68 #else
69 SpinLockInit((slock_t *) &ptr->sema);
70 #endif
71
72 ptr->value = false;
73 }
74
75 bool
pg_atomic_test_set_flag_impl(volatile pg_atomic_flag * ptr)76 pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
77 {
78 uint32 oldval;
79
80 SpinLockAcquire((slock_t *) &ptr->sema);
81 oldval = ptr->value;
82 ptr->value = true;
83 SpinLockRelease((slock_t *) &ptr->sema);
84
85 return oldval == 0;
86 }
87
88 void
pg_atomic_clear_flag_impl(volatile pg_atomic_flag * ptr)89 pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
90 {
91 SpinLockAcquire((slock_t *) &ptr->sema);
92 ptr->value = false;
93 SpinLockRelease((slock_t *) &ptr->sema);
94 }
95
96 bool
pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag * ptr)97 pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
98 {
99 return ptr->value == 0;
100 }
101
102 #endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
103
104 #ifdef PG_HAVE_ATOMIC_U32_SIMULATION
105 void
pg_atomic_init_u32_impl(volatile pg_atomic_uint32 * ptr,uint32 val_)106 pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
107 {
108 StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
109 "size mismatch of atomic_uint32 vs slock_t");
110
111 /*
112 * If we're using semaphore based atomic flags, be careful about nested
113 * usage of atomics while a spinlock is held.
114 */
115 #ifndef HAVE_SPINLOCKS
116 s_init_lock_sema((slock_t *) &ptr->sema, true);
117 #else
118 SpinLockInit((slock_t *) &ptr->sema);
119 #endif
120 ptr->value = val_;
121 }
122
123 void
pg_atomic_write_u32_impl(volatile pg_atomic_uint32 * ptr,uint32 val)124 pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
125 {
126 /*
127 * One might think that an unlocked write doesn't need to acquire the
128 * spinlock, but one would be wrong. Even an unlocked write has to cause a
129 * concurrent pg_atomic_compare_exchange_u32() (et al) to fail.
130 */
131 SpinLockAcquire((slock_t *) &ptr->sema);
132 ptr->value = val;
133 SpinLockRelease((slock_t *) &ptr->sema);
134 }
135
136 bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 * ptr,uint32 * expected,uint32 newval)137 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
138 uint32 *expected, uint32 newval)
139 {
140 bool ret;
141
142 /*
143 * Do atomic op under a spinlock. It might look like we could just skip
144 * the cmpxchg if the lock isn't available, but that'd just emulate a
145 * 'weak' compare and swap. I.e. one that allows spurious failures. Since
146 * several algorithms rely on a strong variant and that is efficiently
147 * implementable on most major architectures let's emulate it here as
148 * well.
149 */
150 SpinLockAcquire((slock_t *) &ptr->sema);
151
152 /* perform compare/exchange logic */
153 ret = ptr->value == *expected;
154 *expected = ptr->value;
155 if (ret)
156 ptr->value = newval;
157
158 /* and release lock */
159 SpinLockRelease((slock_t *) &ptr->sema);
160
161 return ret;
162 }
163
164 uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 * ptr,int32 add_)165 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
166 {
167 uint32 oldval;
168
169 SpinLockAcquire((slock_t *) &ptr->sema);
170 oldval = ptr->value;
171 ptr->value += add_;
172 SpinLockRelease((slock_t *) &ptr->sema);
173 return oldval;
174 }
175
176 #endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
177
178
179 #ifdef PG_HAVE_ATOMIC_U64_SIMULATION
180
181 void
pg_atomic_init_u64_impl(volatile pg_atomic_uint64 * ptr,uint64 val_)182 pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
183 {
184 StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
185 "size mismatch of atomic_uint64 vs slock_t");
186
187 /*
188 * If we're using semaphore based atomic flags, be careful about nested
189 * usage of atomics while a spinlock is held.
190 */
191 #ifndef HAVE_SPINLOCKS
192 s_init_lock_sema((slock_t *) &ptr->sema, true);
193 #else
194 SpinLockInit((slock_t *) &ptr->sema);
195 #endif
196 ptr->value = val_;
197 }
198
199 bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 * ptr,uint64 * expected,uint64 newval)200 pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
201 uint64 *expected, uint64 newval)
202 {
203 bool ret;
204
205 /*
206 * Do atomic op under a spinlock. It might look like we could just skip
207 * the cmpxchg if the lock isn't available, but that'd just emulate a
208 * 'weak' compare and swap. I.e. one that allows spurious failures. Since
209 * several algorithms rely on a strong variant and that is efficiently
210 * implementable on most major architectures let's emulate it here as
211 * well.
212 */
213 SpinLockAcquire((slock_t *) &ptr->sema);
214
215 /* perform compare/exchange logic */
216 ret = ptr->value == *expected;
217 *expected = ptr->value;
218 if (ret)
219 ptr->value = newval;
220
221 /* and release lock */
222 SpinLockRelease((slock_t *) &ptr->sema);
223
224 return ret;
225 }
226
227 uint64
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 * ptr,int64 add_)228 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
229 {
230 uint64 oldval;
231
232 SpinLockAcquire((slock_t *) &ptr->sema);
233 oldval = ptr->value;
234 ptr->value += add_;
235 SpinLockRelease((slock_t *) &ptr->sema);
236 return oldval;
237 }
238
239 #endif /* PG_HAVE_ATOMIC_U64_SIMULATION */
240