1 /*-------------------------------------------------------------------------
2 *
3 * atomics.c
4 * Non-Inline parts of the atomics implementation
5 *
6 * Portions Copyright (c) 2013-2016, PostgreSQL Global Development Group
7 *
8 *
9 * IDENTIFICATION
10 * src/backend/port/atomics.c
11 *
12 *-------------------------------------------------------------------------
13 */
14 #include "postgres.h"
15
16 #include "miscadmin.h"
17 #include "port/atomics.h"
18 #include "storage/spin.h"
19
20 #ifdef PG_HAVE_MEMORY_BARRIER_EMULATION
21 #ifdef WIN32
22 #error "barriers are required (and provided) on WIN32 platforms"
23 #endif
24 #include <sys/types.h>
25 #include <signal.h>
26 #endif
27
28 #ifdef PG_HAVE_MEMORY_BARRIER_EMULATION
29 void
pg_spinlock_barrier(void)30 pg_spinlock_barrier(void)
31 {
32 /*
33 * NB: we have to be reentrant here, some barriers are placed in signal
34 * handlers.
35 *
36 * We use kill(0) for the fallback barrier as we assume that kernels on
37 * systems old enough to require fallback barrier support will include an
38 * appropriate barrier while checking the existence of the postmaster pid.
39 */
40 (void) kill(PostmasterPid, 0);
41 }
42 #endif
43
44 #ifdef PG_HAVE_COMPILER_BARRIER_EMULATION
45 void
pg_extern_compiler_barrier(void)46 pg_extern_compiler_barrier(void)
47 {
48 /* do nothing */
49 }
50 #endif
51
52
53 #ifdef PG_HAVE_ATOMIC_FLAG_SIMULATION
54
55 void
pg_atomic_init_flag_impl(volatile pg_atomic_flag * ptr)56 pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
57 {
58 StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
59 "size mismatch of atomic_flag vs slock_t");
60
61 #ifndef HAVE_SPINLOCKS
62
63 /*
64 * NB: If we're using semaphore based TAS emulation, be careful to use a
65 * separate set of semaphores. Otherwise we'd get in trouble if an atomic
66 * var would be manipulated while spinlock is held.
67 */
68 s_init_lock_sema((slock_t *) &ptr->sema, true);
69 #else
70 SpinLockInit((slock_t *) &ptr->sema);
71 #endif
72
73 ptr->value = false;
74 }
75
76 bool
pg_atomic_test_set_flag_impl(volatile pg_atomic_flag * ptr)77 pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
78 {
79 uint32 oldval;
80
81 SpinLockAcquire((slock_t *) &ptr->sema);
82 oldval = ptr->value;
83 ptr->value = true;
84 SpinLockRelease((slock_t *) &ptr->sema);
85
86 return oldval == 0;
87 }
88
89 void
pg_atomic_clear_flag_impl(volatile pg_atomic_flag * ptr)90 pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
91 {
92 SpinLockAcquire((slock_t *) &ptr->sema);
93 ptr->value = false;
94 SpinLockRelease((slock_t *) &ptr->sema);
95 }
96
97 bool
pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag * ptr)98 pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
99 {
100 return ptr->value == 0;
101 }
102
103 #endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
104
105 #ifdef PG_HAVE_ATOMIC_U32_SIMULATION
106 void
pg_atomic_init_u32_impl(volatile pg_atomic_uint32 * ptr,uint32 val_)107 pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
108 {
109 StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
110 "size mismatch of atomic_flag vs slock_t");
111
112 /*
113 * If we're using semaphore based atomic flags, be careful about nested
114 * usage of atomics while a spinlock is held.
115 */
116 #ifndef HAVE_SPINLOCKS
117 s_init_lock_sema((slock_t *) &ptr->sema, true);
118 #else
119 SpinLockInit((slock_t *) &ptr->sema);
120 #endif
121 ptr->value = val_;
122 }
123
124 void
pg_atomic_write_u32_impl(volatile pg_atomic_uint32 * ptr,uint32 val)125 pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
126 {
127 /*
128 * One might think that an unlocked write doesn't need to acquire the
129 * spinlock, but one would be wrong. Even an unlocked write has to cause a
130 * concurrent pg_atomic_compare_exchange_u32() (et al) to fail.
131 */
132 SpinLockAcquire((slock_t *) &ptr->sema);
133 ptr->value = val;
134 SpinLockRelease((slock_t *) &ptr->sema);
135 }
136
137 bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 * ptr,uint32 * expected,uint32 newval)138 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
139 uint32 *expected, uint32 newval)
140 {
141 bool ret;
142
143 /*
144 * Do atomic op under a spinlock. It might look like we could just skip
145 * the cmpxchg if the lock isn't available, but that'd just emulate a
146 * 'weak' compare and swap. I.e. one that allows spurious failures. Since
147 * several algorithms rely on a strong variant and that is efficiently
148 * implementable on most major architectures let's emulate it here as
149 * well.
150 */
151 SpinLockAcquire((slock_t *) &ptr->sema);
152
153 /* perform compare/exchange logic */
154 ret = ptr->value == *expected;
155 *expected = ptr->value;
156 if (ret)
157 ptr->value = newval;
158
159 /* and release lock */
160 SpinLockRelease((slock_t *) &ptr->sema);
161
162 return ret;
163 }
164
165 uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 * ptr,int32 add_)166 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
167 {
168 uint32 oldval;
169
170 SpinLockAcquire((slock_t *) &ptr->sema);
171 oldval = ptr->value;
172 ptr->value += add_;
173 SpinLockRelease((slock_t *) &ptr->sema);
174 return oldval;
175 }
176
177 #endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
178