xref: /freebsd/sys/i386/include/atomic.h (revision 3157ba21)
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define	_MACHINE_ATOMIC_H_
30 
31 #ifndef _SYS_CDEFS_H_
32 #error this file needs sys/cdefs.h as a prerequisite
33 #endif
34 
35 #define	mb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
36 #define	wmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
37 #define	rmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
38 
39 /*
40  * Various simple operations on memory, each of which is atomic in the
41  * presence of interrupts and multiple processors.
42  *
43  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
44  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
45  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
46  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
47  *
48  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
49  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
50  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
51  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
52  *
53  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
54  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
55  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
56  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
57  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
58  *
59  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
60  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
61  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
62  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
63  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
64  */
65 
66 /*
67  * The above functions are expanded inline in the statically-linked
68  * kernel.  Lock prefixes are generated if an SMP kernel is being
69  * built.
70  *
71  * Kernel modules call real functions which are built into the kernel.
72  * This allows kernel modules to be portable between UP and SMP systems.
73  */
74 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
75 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
76 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
77 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
78 
79 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
80 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
81 
82 #define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)			\
83 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p);	\
84 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
85 
86 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
87 
88 /*
89  * For userland, always use lock prefixes so that the binaries will run
90  * on both SMP and !SMP systems.
91  */
92 #if defined(SMP) || !defined(_KERNEL)
93 #define	MPLOCKED	"lock ; "
94 #else
95 #define	MPLOCKED
96 #endif
97 
98 /*
99  * The assembly is volatilized to avoid code chunk removal by the compiler.
100  * GCC aggressively reorders operations and memory clobbering is necessary
101  * in order to avoid that for memory barriers.
102  */
103 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
104 static __inline void					\
105 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
106 {							\
107 	__asm __volatile(MPLOCKED OP			\
108 	: "=m" (*p)					\
109 	: CONS (V), "m" (*p));				\
110 }							\
111 							\
112 static __inline void					\
113 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
114 {							\
115 	__asm __volatile(MPLOCKED OP			\
116 	: "=m" (*p)					\
117 	: CONS (V), "m" (*p)				\
118 	: "memory");					\
119 }							\
120 struct __hack
121 
122 /*
123  * Atomic compare and set, used by the mutex functions
124  *
125  * if (*dst == expect) *dst = src (all 32 bit words)
126  *
127  * Returns 0 on failure, non-zero on success
128  */
129 
130 #ifdef CPU_DISABLE_CMPXCHG
131 
132 static __inline int
133 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
134 {
135 	u_char res;
136 
137 	__asm __volatile(
138 	"	pushfl ;		"
139 	"	cli ;			"
140 	"	cmpl	%3,%4 ;		"
141 	"	jne	1f ;		"
142 	"	movl	%2,%1 ;		"
143 	"1:				"
144 	"       sete	%0 ;		"
145 	"	popfl ;			"
146 	"# atomic_cmpset_int"
147 	: "=q" (res),			/* 0 */
148 	  "=m" (*dst)			/* 1 */
149 	: "r" (src),			/* 2 */
150 	  "r" (expect),			/* 3 */
151 	  "m" (*dst)			/* 4 */
152 	: "memory");
153 
154 	return (res);
155 }
156 
157 #else /* !CPU_DISABLE_CMPXCHG */
158 
159 static __inline int
160 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
161 {
162 	u_char res;
163 
164 	__asm __volatile(
165 	"	" MPLOCKED "		"
166 	"	cmpxchgl %2,%1 ;	"
167 	"       sete	%0 ;		"
168 	"1:				"
169 	"# atomic_cmpset_int"
170 	: "=a" (res),			/* 0 */
171 	  "=m" (*dst)			/* 1 */
172 	: "r" (src),			/* 2 */
173 	  "a" (expect),			/* 3 */
174 	  "m" (*dst)			/* 4 */
175 	: "memory");
176 
177 	return (res);
178 }
179 
180 #endif /* CPU_DISABLE_CMPXCHG */
181 
182 /*
183  * Atomically add the value of v to the integer pointed to by p and return
184  * the previous value of *p.
185  */
186 static __inline u_int
187 atomic_fetchadd_int(volatile u_int *p, u_int v)
188 {
189 
190 	__asm __volatile(
191 	"	" MPLOCKED "		"
192 	"	xaddl	%0, %1 ;	"
193 	"# atomic_fetchadd_int"
194 	: "+r" (v),			/* 0 (result) */
195 	  "=m" (*p)			/* 1 */
196 	: "m" (*p));			/* 2 */
197 
198 	return (v);
199 }
200 
201 #if defined(_KERNEL) && !defined(SMP)
202 
203 /*
204  * We assume that a = b will do atomic loads and stores.  However, on a
205  * PentiumPro or higher, reads may pass writes, so for that case we have
206  * to use a serializing instruction (i.e. with LOCK) to do the load in
207  * SMP kernels.  For UP kernels, however, the cache of the single processor
208  * is always consistent, so we only need to take care of compiler.
209  */
210 #define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
211 static __inline u_##TYPE				\
212 atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
213 {							\
214 	u_##TYPE tmp;					\
215 							\
216 	tmp = *p;					\
217 	__asm __volatile("" : : : "memory");		\
218 	return (tmp);					\
219 }							\
220 							\
221 static __inline void					\
222 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
223 {							\
224 	__asm __volatile("" : : : "memory");		\
225 	*p = v;						\
226 }							\
227 struct __hack
228 
229 #else /* !(_KERNEL && !SMP) */
230 
231 #define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
232 static __inline u_##TYPE				\
233 atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
234 {							\
235 	u_##TYPE res;					\
236 							\
237 	__asm __volatile(MPLOCKED LOP			\
238 	: "=a" (res),			/* 0 */		\
239 	  "=m" (*p)			/* 1 */		\
240 	: "m" (*p)			/* 2 */		\
241 	: "memory");					\
242 							\
243 	return (res);					\
244 }							\
245 							\
246 /*							\
247  * The XCHG instruction asserts LOCK automagically.	\
248  */							\
249 static __inline void					\
250 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
251 {							\
252 	__asm __volatile(SOP				\
253 	: "=m" (*p),			/* 0 */		\
254 	  "+r" (v)			/* 1 */		\
255 	: "m" (*p)			/* 2 */		\
256 	: "memory");					\
257 }							\
258 struct __hack
259 
260 #endif /* _KERNEL && !SMP */
261 
262 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
263 
264 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
265 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
266 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
267 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
268 
269 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
270 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
271 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
272 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
273 
274 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
275 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
276 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
277 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
278 
279 ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
280 ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
281 ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
282 ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
283 
284 ATOMIC_STORE_LOAD(char,	"cmpxchgb %b0,%1", "xchgb %b1,%0");
285 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
286 ATOMIC_STORE_LOAD(int,	"cmpxchgl %0,%1",  "xchgl %1,%0");
287 ATOMIC_STORE_LOAD(long,	"cmpxchgl %0,%1",  "xchgl %1,%0");
288 
289 #undef ATOMIC_ASM
290 #undef ATOMIC_STORE_LOAD
291 
292 #ifndef WANT_FUNCTIONS
293 
294 static __inline int
295 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
296 {
297 
298 	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
299 	    (u_int)src));
300 }
301 
302 static __inline u_long
303 atomic_fetchadd_long(volatile u_long *p, u_long v)
304 {
305 
306 	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
307 }
308 
309 /* Read the current value and store a zero in the destination. */
310 #ifdef __GNUCLIKE_ASM
311 
312 static __inline u_int
313 atomic_readandclear_int(volatile u_int *addr)
314 {
315 	u_int res;
316 
317 	res = 0;
318 	__asm __volatile(
319 	"	xchgl	%1,%0 ;		"
320 	"# atomic_readandclear_int"
321 	: "+r" (res),			/* 0 */
322 	  "=m" (*addr)			/* 1 */
323 	: "m" (*addr));
324 
325 	return (res);
326 }
327 
328 static __inline u_long
329 atomic_readandclear_long(volatile u_long *addr)
330 {
331 	u_long res;
332 
333 	res = 0;
334 	__asm __volatile(
335 	"	xchgl	%1,%0 ;		"
336 	"# atomic_readandclear_long"
337 	: "+r" (res),			/* 0 */
338 	  "=m" (*addr)			/* 1 */
339 	: "m" (*addr));
340 
341 	return (res);
342 }
343 
344 #else /* !__GNUCLIKE_ASM */
345 
346 u_int	atomic_readandclear_int(volatile u_int *addr);
347 u_long	atomic_readandclear_long(volatile u_long *addr);
348 
349 #endif /* __GNUCLIKE_ASM */
350 
351 #define	atomic_set_acq_char		atomic_set_barr_char
352 #define	atomic_set_rel_char		atomic_set_barr_char
353 #define	atomic_clear_acq_char		atomic_clear_barr_char
354 #define	atomic_clear_rel_char		atomic_clear_barr_char
355 #define	atomic_add_acq_char		atomic_add_barr_char
356 #define	atomic_add_rel_char		atomic_add_barr_char
357 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
358 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
359 
360 #define	atomic_set_acq_short		atomic_set_barr_short
361 #define	atomic_set_rel_short		atomic_set_barr_short
362 #define	atomic_clear_acq_short		atomic_clear_barr_short
363 #define	atomic_clear_rel_short		atomic_clear_barr_short
364 #define	atomic_add_acq_short		atomic_add_barr_short
365 #define	atomic_add_rel_short		atomic_add_barr_short
366 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
367 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
368 
369 #define	atomic_set_acq_int		atomic_set_barr_int
370 #define	atomic_set_rel_int		atomic_set_barr_int
371 #define	atomic_clear_acq_int		atomic_clear_barr_int
372 #define	atomic_clear_rel_int		atomic_clear_barr_int
373 #define	atomic_add_acq_int		atomic_add_barr_int
374 #define	atomic_add_rel_int		atomic_add_barr_int
375 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
376 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
377 #define	atomic_cmpset_acq_int		atomic_cmpset_int
378 #define	atomic_cmpset_rel_int		atomic_cmpset_int
379 
380 #define	atomic_set_acq_long		atomic_set_barr_long
381 #define	atomic_set_rel_long		atomic_set_barr_long
382 #define	atomic_clear_acq_long		atomic_clear_barr_long
383 #define	atomic_clear_rel_long		atomic_clear_barr_long
384 #define	atomic_add_acq_long		atomic_add_barr_long
385 #define	atomic_add_rel_long		atomic_add_barr_long
386 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
387 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
388 #define	atomic_cmpset_acq_long		atomic_cmpset_long
389 #define	atomic_cmpset_rel_long		atomic_cmpset_long
390 
391 /* Operations on 8-bit bytes. */
392 #define	atomic_set_8		atomic_set_char
393 #define	atomic_set_acq_8	atomic_set_acq_char
394 #define	atomic_set_rel_8	atomic_set_rel_char
395 #define	atomic_clear_8		atomic_clear_char
396 #define	atomic_clear_acq_8	atomic_clear_acq_char
397 #define	atomic_clear_rel_8	atomic_clear_rel_char
398 #define	atomic_add_8		atomic_add_char
399 #define	atomic_add_acq_8	atomic_add_acq_char
400 #define	atomic_add_rel_8	atomic_add_rel_char
401 #define	atomic_subtract_8	atomic_subtract_char
402 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
403 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
404 #define	atomic_load_acq_8	atomic_load_acq_char
405 #define	atomic_store_rel_8	atomic_store_rel_char
406 
407 /* Operations on 16-bit words. */
408 #define	atomic_set_16		atomic_set_short
409 #define	atomic_set_acq_16	atomic_set_acq_short
410 #define	atomic_set_rel_16	atomic_set_rel_short
411 #define	atomic_clear_16		atomic_clear_short
412 #define	atomic_clear_acq_16	atomic_clear_acq_short
413 #define	atomic_clear_rel_16	atomic_clear_rel_short
414 #define	atomic_add_16		atomic_add_short
415 #define	atomic_add_acq_16	atomic_add_acq_short
416 #define	atomic_add_rel_16	atomic_add_rel_short
417 #define	atomic_subtract_16	atomic_subtract_short
418 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
419 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
420 #define	atomic_load_acq_16	atomic_load_acq_short
421 #define	atomic_store_rel_16	atomic_store_rel_short
422 
423 /* Operations on 32-bit double words. */
424 #define	atomic_set_32		atomic_set_int
425 #define	atomic_set_acq_32	atomic_set_acq_int
426 #define	atomic_set_rel_32	atomic_set_rel_int
427 #define	atomic_clear_32		atomic_clear_int
428 #define	atomic_clear_acq_32	atomic_clear_acq_int
429 #define	atomic_clear_rel_32	atomic_clear_rel_int
430 #define	atomic_add_32		atomic_add_int
431 #define	atomic_add_acq_32	atomic_add_acq_int
432 #define	atomic_add_rel_32	atomic_add_rel_int
433 #define	atomic_subtract_32	atomic_subtract_int
434 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
435 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
436 #define	atomic_load_acq_32	atomic_load_acq_int
437 #define	atomic_store_rel_32	atomic_store_rel_int
438 #define	atomic_cmpset_32	atomic_cmpset_int
439 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
440 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
441 #define	atomic_readandclear_32	atomic_readandclear_int
442 #define	atomic_fetchadd_32	atomic_fetchadd_int
443 
444 /* Operations on pointers. */
445 #define	atomic_set_ptr(p, v) \
446 	atomic_set_int((volatile u_int *)(p), (u_int)(v))
447 #define	atomic_set_acq_ptr(p, v) \
448 	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
449 #define	atomic_set_rel_ptr(p, v) \
450 	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
451 #define	atomic_clear_ptr(p, v) \
452 	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
453 #define	atomic_clear_acq_ptr(p, v) \
454 	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
455 #define	atomic_clear_rel_ptr(p, v) \
456 	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
457 #define	atomic_add_ptr(p, v) \
458 	atomic_add_int((volatile u_int *)(p), (u_int)(v))
459 #define	atomic_add_acq_ptr(p, v) \
460 	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
461 #define	atomic_add_rel_ptr(p, v) \
462 	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
463 #define	atomic_subtract_ptr(p, v) \
464 	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
465 #define	atomic_subtract_acq_ptr(p, v) \
466 	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
467 #define	atomic_subtract_rel_ptr(p, v) \
468 	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
469 #define	atomic_load_acq_ptr(p) \
470 	atomic_load_acq_int((volatile u_int *)(p))
471 #define	atomic_store_rel_ptr(p, v) \
472 	atomic_store_rel_int((volatile u_int *)(p), (v))
473 #define	atomic_cmpset_ptr(dst, old, new) \
474 	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
475 #define	atomic_cmpset_acq_ptr(dst, old, new) \
476 	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
477 	    (u_int)(new))
478 #define	atomic_cmpset_rel_ptr(dst, old, new) \
479 	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
480 	    (u_int)(new))
481 #define	atomic_readandclear_ptr(p) \
482 	atomic_readandclear_int((volatile u_int *)(p))
483 
484 #endif /* !WANT_FUNCTIONS */
485 
486 #endif /* !_MACHINE_ATOMIC_H_ */
487