1 /* This header supplies atomic operations. So far, we rely on GCC's
2  * atomic builtins. During configure, we check if atomic operatons are
3  * available. If they are not, I am making the necessary provisioning to live without them if
4  * they are not available. Please note that you should only use the macros
5  * here if you think you can actually live WITHOUT an explicit atomic operation,
6  * because in the non-presence of them, we simply do it without atomicitiy.
7  * Which, for word-aligned data types, usually (but only usually!) should work.
8  *
9  * We are using the functions described in
10  * http:/gcc.gnu.org/onlinedocs/gcc/Atomic-Builtins.html
11  *
12  * THESE MACROS MUST ONLY BE USED WITH WORD-SIZED DATA TYPES!
13  *
14  * Note: this file was obtained at 2015-12-16 from the rsyslog project.
15  *
16  * Copyright 2008-2015 Rainer Gerhards and Adiscon GmbH.
17  *
18  * This file is part of the rsyslog runtime library.
19  *
20  * Licensed under the Apache License, Version 2.0 (the "License");
21  * you may not use this file except in compliance with the License.
22  * You may obtain a copy of the License at
23  *
24  *       http://www.apache.org/licenses/LICENSE-2.0
25  *       -or-
26  *       see COPYING.ASL20 in the source distribution
27  *
28  * Unless required by applicable law or agreed to in writing, software
29  * distributed under the License is distributed on an "AS IS" BASIS,
30  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
31  * See the License for the specific language governing permissions and
32  * limitations under the License.
33  */
34 #ifndef FJ_INCLUDED_ATOMIC_H
35 #define FJ_INCLUDED_ATOMIC_H
36 
37 #ifdef HAVE_ATOMIC_BUILTINS
38 #	define ATOMIC_SUB(data, val, phlpmut) __sync_fetch_and_sub(data, val)
39 #	define ATOMIC_ADD(data, val) __sync_fetch_and_add(&(data), val)
40 #	define ATOMIC_INC(data, phlpmut) ((void) __sync_fetch_and_add(data, 1))
41 #	define ATOMIC_INC_AND_FETCH_int(data, phlpmut) __sync_fetch_and_add(data, 1)
42 #	define ATOMIC_INC_AND_FETCH_unsigned(data, phlpmut) __sync_fetch_and_add(data, 1)
43 #	define ATOMIC_DEC(data, phlpmut) ((void) __sync_sub_and_fetch(data, 1))
44 #	define ATOMIC_DEC_AND_FETCH(data, phlpmut) __sync_sub_and_fetch(data, 1)
45 #	define ATOMIC_FETCH_32BIT(data, phlpmut) ((unsigned) __sync_fetch_and_and(data, 0xffffffff))
46 #	define ATOMIC_STORE_1_TO_32BIT(data) __sync_lock_test_and_set(&(data), 1)
47 #	define ATOMIC_STORE_0_TO_INT(data, phlpmut) __sync_fetch_and_and(data, 0)
48 #	define ATOMIC_STORE_1_TO_INT(data, phlpmut) __sync_fetch_and_or(data, 1)
49 #	define ATOMIC_STORE_INT_TO_INT(data, val) __sync_fetch_and_or(&(data), (val))
50 #	define ATOMIC_CAS(data, oldVal, newVal, phlpmut) __sync_bool_compare_and_swap(data, (oldVal), (newVal))
51 #	define ATOMIC_CAS_time_t(data, oldVal, newVal, phlpmut) __sync_bool_compare_and_swap(data, (oldVal), (newVal))
52 #	define ATOMIC_CAS_VAL(data, oldVal, newVal, phlpmut) __sync_val_compare_and_swap(data, (oldVal), (newVal));
53 
54 	/* functions below are not needed if we have atomics */
55 #	define DEF_ATOMIC_HELPER_MUT(x)
56 #	define INIT_ATOMIC_HELPER_MUT(x)
57 #	define DESTROY_ATOMIC_HELPER_MUT(x)
58 
59 	/* the following operations should preferrably be done atomic, but it is
60 	 * not fatal if not -- that means we can live with some missed updates. So be
61 	 * sure to use these macros only if that really does not matter!
62 	 */
63 #	define PREFER_ATOMIC_INC(data) ((void) __sync_fetch_and_add(&(data), 1))
64 #else
65 	/* note that we gained parctical proof that theoretical problems DO occur
66 	 * if we do not properly address them. See this blog post for details:
67 	 * http://blog.gerhards.net/2009/01/rsyslog-data-race-analysis.html
68 	 * The bottom line is that if there are no atomics available, we should NOT
69 	 * simply go ahead and do without them - use mutexes or other things.
70 	 * rgerhards, 2009-01-30
71 	 */
72 	#include <pthread.h>
73 #	define ATOMIC_INC(data, phlpmut)  { \
74 		pthread_mutex_lock(phlpmut); \
75 		++(*(data)); \
76 		pthread_mutex_unlock(phlpmut); \
77 	}
78 
79 #	define ATOMIC_STORE_0_TO_INT(data, hlpmut)  { \
80 		pthread_mutex_lock(hlpmut); \
81 		*(data) = 0; \
82 		pthread_mutex_unlock(hlpmut); \
83 	}
84 
85 #	define ATOMIC_STORE_1_TO_INT(data, hlpmut) { \
86 		pthread_mutex_lock(hlpmut); \
87 		*(data) = 1; \
88 		pthread_mutex_unlock(hlpmut); \
89 	}
90 
91 	static inline int
ATOMIC_CAS(int * data,int oldVal,int newVal,pthread_mutex_t * phlpmut)92 	ATOMIC_CAS(int *data, int oldVal, int newVal, pthread_mutex_t *phlpmut) {
93 		int bSuccess;
94 		pthread_mutex_lock(phlpmut);
95 		if(*data == oldVal) {
96 			*data = newVal;
97 			bSuccess = 1;
98 		} else {
99 			bSuccess = 0;
100 		}
101 		pthread_mutex_unlock(phlpmut);
102 		return(bSuccess);
103 	}
104 
105 	static inline int
ATOMIC_CAS_time_t(time_t * data,time_t oldVal,time_t newVal,pthread_mutex_t * phlpmut)106 	ATOMIC_CAS_time_t(time_t *data, time_t oldVal, time_t newVal, pthread_mutex_t *phlpmut) {
107 		int bSuccess;
108 		pthread_mutex_lock(phlpmut);
109 		if(*data == oldVal) {
110 			*data = newVal;
111 			bSuccess = 1;
112 		} else {
113 			bSuccess = 0;
114 		}
115 		pthread_mutex_unlock(phlpmut);
116 		return(bSuccess);
117 	}
118 
119 
120 	static inline int
ATOMIC_CAS_VAL(int * data,int oldVal,int newVal,pthread_mutex_t * phlpmut)121 	ATOMIC_CAS_VAL(int *data, int oldVal, int newVal, pthread_mutex_t *phlpmut) {
122 		int val;
123 		pthread_mutex_lock(phlpmut);
124 		if(*data == oldVal) {
125 			*data = newVal;
126 		}
127 		val = *data;
128 		pthread_mutex_unlock(phlpmut);
129 		return(val);
130 	}
131 
132 #	define ATOMIC_DEC(data, phlpmut)  { \
133 		pthread_mutex_lock(phlpmut); \
134 		--(*(data)); \
135 		pthread_mutex_unlock(phlpmut); \
136 	}
137 
138 	static inline int
ATOMIC_INC_AND_FETCH_int(int * data,pthread_mutex_t * phlpmut)139 	ATOMIC_INC_AND_FETCH_int(int *data, pthread_mutex_t *phlpmut) {
140 		int val;
141 		pthread_mutex_lock(phlpmut);
142 		val = ++(*data);
143 		pthread_mutex_unlock(phlpmut);
144 		return(val);
145 	}
146 
147 	static inline unsigned
ATOMIC_INC_AND_FETCH_unsigned(unsigned * data,pthread_mutex_t * phlpmut)148 	ATOMIC_INC_AND_FETCH_unsigned(unsigned *data, pthread_mutex_t *phlpmut) {
149 		unsigned val;
150 		pthread_mutex_lock(phlpmut);
151 		val = ++(*data);
152 		pthread_mutex_unlock(phlpmut);
153 		return(val);
154 	}
155 
156 	static inline int
ATOMIC_DEC_AND_FETCH(int * data,pthread_mutex_t * phlpmut)157 	ATOMIC_DEC_AND_FETCH(int *data, pthread_mutex_t *phlpmut) {
158 		int val;
159 		pthread_mutex_lock(phlpmut);
160 		val = --(*data);
161 		pthread_mutex_unlock(phlpmut);
162 		return(val);
163 	}
164 
165 	static inline int
ATOMIC_FETCH_32BIT(int * data,pthread_mutex_t * phlpmut)166 	ATOMIC_FETCH_32BIT(int *data, pthread_mutex_t *phlpmut) {
167 		int val;
168 		pthread_mutex_lock(phlpmut);
169 		val = (*data);
170 		pthread_mutex_unlock(phlpmut);
171 		return(val);
172 	}
173 
174 	static inline void
ATOMIC_SUB(int * data,int val,pthread_mutex_t * phlpmut)175 	ATOMIC_SUB(int *data, int val, pthread_mutex_t *phlpmut) {
176 		pthread_mutex_lock(phlpmut);
177 		(*data) -= val;
178 		pthread_mutex_unlock(phlpmut);
179 	}
180 #	define DEF_ATOMIC_HELPER_MUT(x)  pthread_mutex_t x;
181 #	define INIT_ATOMIC_HELPER_MUT(x) pthread_mutex_init(&(x), NULL);
182 #	define DESTROY_ATOMIC_HELPER_MUT(x) pthread_mutex_destroy(&(x));
183 
184 #	define PREFER_ATOMIC_INC(data) ((void) ++data)
185 
186 #endif
187 
188 /* we need to handle 64bit atomics seperately as some platforms have
189  * 32 bit atomics, but not 64 bit ones... -- rgerhards, 2010-12-01
190  */
191 #if 0 /* currently disabled, we don't need it now and dont' have the data types present */
192 #ifdef HAVE_ATOMIC_BUILTINS64
193 #	define ATOMIC_INC_uint64(data, phlpmut) ((void) __sync_fetch_and_add(data, 1))
194 #	define ATOMIC_DEC_unit64(data, phlpmut) ((void) __sync_sub_and_fetch(data, 1))
195 #	define ATOMIC_INC_AND_FETCH_uint64(data, phlpmut) __sync_fetch_and_add(data, 1)
196 
197 #	define DEF_ATOMIC_HELPER_MUT64(x)
198 #	define INIT_ATOMIC_HELPER_MUT64(x)
199 #	define DESTROY_ATOMIC_HELPER_MUT64(x)
200 #else
201 #	define ATOMIC_INC_uint64(data, phlpmut)  { \
202 		pthread_mutex_lock(phlpmut); \
203 		++(*(data)); \
204 		pthread_mutex_unlock(phlpmut); \
205 	}
206 #	define ATOMIC_DEC_uint64(data, phlpmut)  { \
207 		pthread_mutex_lock(phlpmut); \
208 		--(*(data)); \
209 		pthread_mutex_unlock(phlpmut); \
210 	}
211 
212 	static inline unsigned
213 	ATOMIC_INC_AND_FETCH_uint64(uint64 *data, pthread_mutex_t *phlpmut) {
214 		uint64 val;
215 		pthread_mutex_lock(phlpmut);
216 		val = ++(*data);
217 		pthread_mutex_unlock(phlpmut);
218 		return(val);
219 	}
220 
221 #	define DEF_ATOMIC_HELPER_MUT64(x)  pthread_mutex_t x;
222 #	define INIT_ATOMIC_HELPER_MUT64(x) pthread_mutex_init(&(x), NULL)
223 #	define DESTROY_ATOMIC_HELPER_MUT64(x) pthread_mutex_destroy(&(x))
224 #endif /* #ifdef HAVE_ATOMIC_BUILTINS64 */
225 #endif
226 
227 #endif /* #ifndef INCLUDED_ATOMIC_H */
228