xref: /freebsd/sys/i386/include/atomic.h (revision b0b1dbdd)
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define	_MACHINE_ATOMIC_H_
30 
31 #ifndef _SYS_CDEFS_H_
32 #error this file needs sys/cdefs.h as a prerequisite
33 #endif
34 
35 #ifdef _KERNEL
36 #include <machine/md_var.h>
37 #include <machine/specialreg.h>
38 #endif
39 
40 #ifndef __OFFSETOF_MONITORBUF
41 /*
42  * __OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
43  *
44  * The open-coded number is used instead of the symbolic expression to
45  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
46  * An assertion in i386/vm_machdep.c ensures that the value is correct.
47  */
48 #define	__OFFSETOF_MONITORBUF	0x180
49 
50 static __inline void
51 __mbk(void)
52 {
53 
54 	__asm __volatile("lock; addl $0,%%fs:%0"
55 	    : "+m" (*(u_int *)__OFFSETOF_MONITORBUF) : : "memory", "cc");
56 }
57 
58 static __inline void
59 __mbu(void)
60 {
61 
62 	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
63 }
64 #endif
65 
66 /*
67  * Various simple operations on memory, each of which is atomic in the
68  * presence of interrupts and multiple processors.
69  *
70  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
71  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
72  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
73  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
74  *
75  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
76  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
77  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
78  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
79  *
80  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
81  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
82  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
83  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
84  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
85  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
86  *
87  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
88  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
89  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
90  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
91  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
92  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
93  */
94 
95 /*
96  * The above functions are expanded inline in the statically-linked
97  * kernel.  Lock prefixes are generated if an SMP kernel is being
98  * built.
99  *
100  * Kernel modules call real functions which are built into the kernel.
101  * This allows kernel modules to be portable between UP and SMP systems.
102  */
103 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
104 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
105 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
106 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
107 
108 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
109 int	atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
110 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
111 int	atomic_testandset_int(volatile u_int *p, u_int v);
112 int	atomic_testandclear_int(volatile u_int *p, u_int v);
113 void	atomic_thread_fence_acq(void);
114 void	atomic_thread_fence_acq_rel(void);
115 void	atomic_thread_fence_rel(void);
116 void	atomic_thread_fence_seq_cst(void);
117 
118 #define	ATOMIC_LOAD(TYPE)					\
119 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
120 #define	ATOMIC_STORE(TYPE)					\
121 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
122 
123 int		atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
124 uint64_t	atomic_load_acq_64(volatile uint64_t *);
125 void		atomic_store_rel_64(volatile uint64_t *, uint64_t);
126 uint64_t	atomic_swap_64(volatile uint64_t *, uint64_t);
127 
128 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
129 
130 /*
131  * For userland, always use lock prefixes so that the binaries will run
132  * on both SMP and !SMP systems.
133  */
134 #if defined(SMP) || !defined(_KERNEL)
135 #define	MPLOCKED	"lock ; "
136 #else
137 #define	MPLOCKED
138 #endif
139 
140 /*
141  * The assembly is volatilized to avoid code chunk removal by the compiler.
142  * GCC aggressively reorders operations and memory clobbering is necessary
143  * in order to avoid that for memory barriers.
144  */
145 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
146 static __inline void					\
147 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
148 {							\
149 	__asm __volatile(MPLOCKED OP			\
150 	: "+m" (*p)					\
151 	: CONS (V)					\
152 	: "cc");					\
153 }							\
154 							\
155 static __inline void					\
156 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
157 {							\
158 	__asm __volatile(MPLOCKED OP			\
159 	: "+m" (*p)					\
160 	: CONS (V)					\
161 	: "memory", "cc");				\
162 }							\
163 struct __hack
164 
165 /*
166  * Atomic compare and set, used by the mutex functions
167  *
168  * if (*dst == expect) *dst = src (all 32 bit words)
169  *
170  * Returns 0 on failure, non-zero on success
171  */
172 
173 static __inline int
174 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
175 {
176 	u_char res;
177 
178 	__asm __volatile(
179 	"	" MPLOCKED "		"
180 	"	cmpxchgl %3,%1 ;	"
181 	"       sete	%0 ;		"
182 	"# atomic_cmpset_int"
183 	: "=q" (res),			/* 0 */
184 	  "+m" (*dst),			/* 1 */
185 	  "+a" (expect)			/* 2 */
186 	: "r" (src)			/* 3 */
187 	: "memory", "cc");
188 	return (res);
189 }
190 
191 static __inline int
192 atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src)
193 {
194 	u_char res;
195 
196 	__asm __volatile(
197 	"	" MPLOCKED "		"
198 	"	cmpxchgl %3,%1 ;	"
199 	"       sete	%0 ;		"
200 	"# atomic_cmpset_int"
201 	: "=q" (res),			/* 0 */
202 	  "+m" (*dst),			/* 1 */
203 	  "+a" (*expect)		/* 2 */
204 	: "r" (src)			/* 3 */
205 	: "memory", "cc");
206 	return (res);
207 }
208 
209 /*
210  * Atomically add the value of v to the integer pointed to by p and return
211  * the previous value of *p.
212  */
213 static __inline u_int
214 atomic_fetchadd_int(volatile u_int *p, u_int v)
215 {
216 
217 	__asm __volatile(
218 	"	" MPLOCKED "		"
219 	"	xaddl	%0,%1 ;		"
220 	"# atomic_fetchadd_int"
221 	: "+r" (v),			/* 0 */
222 	  "+m" (*p)			/* 1 */
223 	: : "cc");
224 	return (v);
225 }
226 
227 static __inline int
228 atomic_testandset_int(volatile u_int *p, u_int v)
229 {
230 	u_char res;
231 
232 	__asm __volatile(
233 	"	" MPLOCKED "		"
234 	"	btsl	%2,%1 ;		"
235 	"	setc	%0 ;		"
236 	"# atomic_testandset_int"
237 	: "=q" (res),			/* 0 */
238 	  "+m" (*p)			/* 1 */
239 	: "Ir" (v & 0x1f)		/* 2 */
240 	: "cc");
241 	return (res);
242 }
243 
244 static __inline int
245 atomic_testandclear_int(volatile u_int *p, u_int v)
246 {
247 	u_char res;
248 
249 	__asm __volatile(
250 	"	" MPLOCKED "		"
251 	"	btrl	%2,%1 ;		"
252 	"	setc	%0 ;		"
253 	"# atomic_testandclear_int"
254 	: "=q" (res),			/* 0 */
255 	  "+m" (*p)			/* 1 */
256 	: "Ir" (v & 0x1f)		/* 2 */
257 	: "cc");
258 	return (res);
259 }
260 
261 /*
262  * We assume that a = b will do atomic loads and stores.  Due to the
263  * IA32 memory model, a simple store guarantees release semantics.
264  *
265  * However, a load may pass a store if they are performed on distinct
266  * addresses, so we need Store/Load barrier for sequentially
267  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
268  * Store/Load barrier, as recommended by the AMD Software Optimization
269  * Guide, and not mfence.  In the kernel, we use a private per-cpu
270  * cache line for "mem", to avoid introducing false data
271  * dependencies.  In user space, we use the word at the top of the
272  * stack.
273  *
274  * For UP kernels, however, the memory of the single processor is
275  * always consistent, so we only need to stop the compiler from
276  * reordering accesses in a way that violates the semantics of acquire
277  * and release.
278  */
279 
280 #if defined(_KERNEL)
281 #if defined(SMP)
282 #define	__storeload_barrier()	__mbk()
283 #else /* _KERNEL && UP */
284 #define	__storeload_barrier()	__compiler_membar()
285 #endif /* SMP */
286 #else /* !_KERNEL */
287 #define	__storeload_barrier()	__mbu()
288 #endif /* _KERNEL*/
289 
290 #define	ATOMIC_LOAD(TYPE)					\
291 static __inline u_##TYPE					\
292 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
293 {								\
294 	u_##TYPE res;						\
295 								\
296 	res = *p;						\
297 	__compiler_membar();					\
298 	return (res);						\
299 }								\
300 struct __hack
301 
302 #define	ATOMIC_STORE(TYPE)					\
303 static __inline void						\
304 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
305 {								\
306 								\
307 	__compiler_membar();					\
308 	*p = v;							\
309 }								\
310 struct __hack
311 
312 static __inline void
313 atomic_thread_fence_acq(void)
314 {
315 
316 	__compiler_membar();
317 }
318 
319 static __inline void
320 atomic_thread_fence_rel(void)
321 {
322 
323 	__compiler_membar();
324 }
325 
326 static __inline void
327 atomic_thread_fence_acq_rel(void)
328 {
329 
330 	__compiler_membar();
331 }
332 
333 static __inline void
334 atomic_thread_fence_seq_cst(void)
335 {
336 
337 	__storeload_barrier();
338 }
339 
340 #ifdef _KERNEL
341 
342 #ifdef WANT_FUNCTIONS
343 int		atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
344 int		atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
345 uint64_t	atomic_load_acq_64_i386(volatile uint64_t *);
346 uint64_t	atomic_load_acq_64_i586(volatile uint64_t *);
347 void		atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
348 void		atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
349 uint64_t	atomic_swap_64_i386(volatile uint64_t *, uint64_t);
350 uint64_t	atomic_swap_64_i586(volatile uint64_t *, uint64_t);
351 #endif
352 
353 /* I486 does not support SMP or CMPXCHG8B. */
354 static __inline int
355 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
356 {
357 	volatile uint32_t *p;
358 	u_char res;
359 
360 	p = (volatile uint32_t *)dst;
361 	__asm __volatile(
362 	"	pushfl ;		"
363 	"	cli ;			"
364 	"	xorl	%1,%%eax ;	"
365 	"	xorl	%2,%%edx ;	"
366 	"	orl	%%edx,%%eax ;	"
367 	"	jne	1f ;		"
368 	"	movl	%4,%1 ;		"
369 	"	movl	%5,%2 ;		"
370 	"1:				"
371 	"	sete	%3 ;		"
372 	"	popfl"
373 	: "+A" (expect),		/* 0 */
374 	  "+m" (*p),			/* 1 */
375 	  "+m" (*(p + 1)),		/* 2 */
376 	  "=q" (res)			/* 3 */
377 	: "r" ((uint32_t)src),		/* 4 */
378 	  "r" ((uint32_t)(src >> 32))	/* 5 */
379 	: "memory", "cc");
380 	return (res);
381 }
382 
383 static __inline uint64_t
384 atomic_load_acq_64_i386(volatile uint64_t *p)
385 {
386 	volatile uint32_t *q;
387 	uint64_t res;
388 
389 	q = (volatile uint32_t *)p;
390 	__asm __volatile(
391 	"	pushfl ;		"
392 	"	cli ;			"
393 	"	movl	%1,%%eax ;	"
394 	"	movl	%2,%%edx ;	"
395 	"	popfl"
396 	: "=&A" (res)			/* 0 */
397 	: "m" (*q),			/* 1 */
398 	  "m" (*(q + 1))		/* 2 */
399 	: "memory");
400 	return (res);
401 }
402 
403 static __inline void
404 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
405 {
406 	volatile uint32_t *q;
407 
408 	q = (volatile uint32_t *)p;
409 	__asm __volatile(
410 	"	pushfl ;		"
411 	"	cli ;			"
412 	"	movl	%%eax,%0 ;	"
413 	"	movl	%%edx,%1 ;	"
414 	"	popfl"
415 	: "=m" (*q),			/* 0 */
416 	  "=m" (*(q + 1))		/* 1 */
417 	: "A" (v)			/* 2 */
418 	: "memory");
419 }
420 
421 static __inline uint64_t
422 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
423 {
424 	volatile uint32_t *q;
425 	uint64_t res;
426 
427 	q = (volatile uint32_t *)p;
428 	__asm __volatile(
429 	"	pushfl ;		"
430 	"	cli ;			"
431 	"	movl	%1,%%eax ;	"
432 	"	movl	%2,%%edx ;	"
433 	"	movl	%4,%2 ;		"
434 	"	movl	%3,%1 ;		"
435 	"	popfl"
436 	: "=&A" (res),			/* 0 */
437 	  "+m" (*q),			/* 1 */
438 	  "+m" (*(q + 1))		/* 2 */
439 	: "r" ((uint32_t)v),		/* 3 */
440 	  "r" ((uint32_t)(v >> 32)));	/* 4 */
441 	return (res);
442 }
443 
444 static __inline int
445 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
446 {
447 	u_char res;
448 
449 	__asm __volatile(
450 	"	" MPLOCKED "		"
451 	"	cmpxchg8b %1 ;		"
452 	"	sete	%0"
453 	: "=q" (res),			/* 0 */
454 	  "+m" (*dst),			/* 1 */
455 	  "+A" (expect)			/* 2 */
456 	: "b" ((uint32_t)src),		/* 3 */
457 	  "c" ((uint32_t)(src >> 32))	/* 4 */
458 	: "memory", "cc");
459 	return (res);
460 }
461 
462 static __inline uint64_t
463 atomic_load_acq_64_i586(volatile uint64_t *p)
464 {
465 	uint64_t res;
466 
467 	__asm __volatile(
468 	"	movl	%%ebx,%%eax ;	"
469 	"	movl	%%ecx,%%edx ;	"
470 	"	" MPLOCKED "		"
471 	"	cmpxchg8b %1"
472 	: "=&A" (res),			/* 0 */
473 	  "+m" (*p)			/* 1 */
474 	: : "memory", "cc");
475 	return (res);
476 }
477 
478 static __inline void
479 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
480 {
481 
482 	__asm __volatile(
483 	"	movl	%%eax,%%ebx ;	"
484 	"	movl	%%edx,%%ecx ;	"
485 	"1:				"
486 	"	" MPLOCKED "		"
487 	"	cmpxchg8b %0 ;		"
488 	"	jne	1b"
489 	: "+m" (*p),			/* 0 */
490 	  "+A" (v)			/* 1 */
491 	: : "ebx", "ecx", "memory", "cc");
492 }
493 
494 static __inline uint64_t
495 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
496 {
497 
498 	__asm __volatile(
499 	"	movl	%%eax,%%ebx ;	"
500 	"	movl	%%edx,%%ecx ;	"
501 	"1:				"
502 	"	" MPLOCKED "		"
503 	"	cmpxchg8b %0 ;		"
504 	"	jne	1b"
505 	: "+m" (*p),			/* 0 */
506 	  "+A" (v)			/* 1 */
507 	: : "ebx", "ecx", "memory", "cc");
508 	return (v);
509 }
510 
511 static __inline int
512 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
513 {
514 
515 	if ((cpu_feature & CPUID_CX8) == 0)
516 		return (atomic_cmpset_64_i386(dst, expect, src));
517 	else
518 		return (atomic_cmpset_64_i586(dst, expect, src));
519 }
520 
521 static __inline uint64_t
522 atomic_load_acq_64(volatile uint64_t *p)
523 {
524 
525 	if ((cpu_feature & CPUID_CX8) == 0)
526 		return (atomic_load_acq_64_i386(p));
527 	else
528 		return (atomic_load_acq_64_i586(p));
529 }
530 
531 static __inline void
532 atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
533 {
534 
535 	if ((cpu_feature & CPUID_CX8) == 0)
536 		atomic_store_rel_64_i386(p, v);
537 	else
538 		atomic_store_rel_64_i586(p, v);
539 }
540 
541 static __inline uint64_t
542 atomic_swap_64(volatile uint64_t *p, uint64_t v)
543 {
544 
545 	if ((cpu_feature & CPUID_CX8) == 0)
546 		return (atomic_swap_64_i386(p, v));
547 	else
548 		return (atomic_swap_64_i586(p, v));
549 }
550 
551 #endif /* _KERNEL */
552 
553 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
554 
555 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
556 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
557 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
558 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
559 
560 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
561 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
562 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
563 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
564 
565 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
566 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
567 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
568 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
569 
570 ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
571 ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
572 ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
573 ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
574 
575 #define	ATOMIC_LOADSTORE(TYPE)				\
576 	ATOMIC_LOAD(TYPE);				\
577 	ATOMIC_STORE(TYPE)
578 
579 ATOMIC_LOADSTORE(char);
580 ATOMIC_LOADSTORE(short);
581 ATOMIC_LOADSTORE(int);
582 ATOMIC_LOADSTORE(long);
583 
584 #undef ATOMIC_ASM
585 #undef ATOMIC_LOAD
586 #undef ATOMIC_STORE
587 #undef ATOMIC_LOADSTORE
588 
589 #ifndef WANT_FUNCTIONS
590 
591 static __inline int
592 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
593 {
594 
595 	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
596 	    (u_int)src));
597 }
598 
599 static __inline u_long
600 atomic_fetchadd_long(volatile u_long *p, u_long v)
601 {
602 
603 	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
604 }
605 
606 static __inline int
607 atomic_testandset_long(volatile u_long *p, u_int v)
608 {
609 
610 	return (atomic_testandset_int((volatile u_int *)p, v));
611 }
612 
613 static __inline int
614 atomic_testandclear_long(volatile u_long *p, u_int v)
615 {
616 
617 	return (atomic_testandclear_int((volatile u_int *)p, v));
618 }
619 
620 /* Read the current value and store a new value in the destination. */
621 #ifdef __GNUCLIKE_ASM
622 
623 static __inline u_int
624 atomic_swap_int(volatile u_int *p, u_int v)
625 {
626 
627 	__asm __volatile(
628 	"	xchgl	%1,%0 ;		"
629 	"# atomic_swap_int"
630 	: "+r" (v),			/* 0 */
631 	  "+m" (*p));			/* 1 */
632 	return (v);
633 }
634 
635 static __inline u_long
636 atomic_swap_long(volatile u_long *p, u_long v)
637 {
638 
639 	return (atomic_swap_int((volatile u_int *)p, (u_int)v));
640 }
641 
642 #else /* !__GNUCLIKE_ASM */
643 
644 u_int	atomic_swap_int(volatile u_int *p, u_int v);
645 u_long	atomic_swap_long(volatile u_long *p, u_long v);
646 
647 #endif /* __GNUCLIKE_ASM */
648 
649 #define	atomic_set_acq_char		atomic_set_barr_char
650 #define	atomic_set_rel_char		atomic_set_barr_char
651 #define	atomic_clear_acq_char		atomic_clear_barr_char
652 #define	atomic_clear_rel_char		atomic_clear_barr_char
653 #define	atomic_add_acq_char		atomic_add_barr_char
654 #define	atomic_add_rel_char		atomic_add_barr_char
655 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
656 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
657 
658 #define	atomic_set_acq_short		atomic_set_barr_short
659 #define	atomic_set_rel_short		atomic_set_barr_short
660 #define	atomic_clear_acq_short		atomic_clear_barr_short
661 #define	atomic_clear_rel_short		atomic_clear_barr_short
662 #define	atomic_add_acq_short		atomic_add_barr_short
663 #define	atomic_add_rel_short		atomic_add_barr_short
664 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
665 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
666 
667 #define	atomic_set_acq_int		atomic_set_barr_int
668 #define	atomic_set_rel_int		atomic_set_barr_int
669 #define	atomic_clear_acq_int		atomic_clear_barr_int
670 #define	atomic_clear_rel_int		atomic_clear_barr_int
671 #define	atomic_add_acq_int		atomic_add_barr_int
672 #define	atomic_add_rel_int		atomic_add_barr_int
673 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
674 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
675 #define	atomic_cmpset_acq_int		atomic_cmpset_int
676 #define	atomic_cmpset_rel_int		atomic_cmpset_int
677 #define	atomic_fcmpset_acq_int		atomic_fcmpset_int
678 #define	atomic_fcmpset_rel_int		atomic_fcmpset_int
679 
680 #define	atomic_set_acq_long		atomic_set_barr_long
681 #define	atomic_set_rel_long		atomic_set_barr_long
682 #define	atomic_clear_acq_long		atomic_clear_barr_long
683 #define	atomic_clear_rel_long		atomic_clear_barr_long
684 #define	atomic_add_acq_long		atomic_add_barr_long
685 #define	atomic_add_rel_long		atomic_add_barr_long
686 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
687 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
688 #define	atomic_cmpset_acq_long		atomic_cmpset_long
689 #define	atomic_cmpset_rel_long		atomic_cmpset_long
690 #define	atomic_fcmpset_acq_long		atomic_fcmpset_long
691 #define	atomic_fcmpset_rel_long		atomic_fcmpset_long
692 
693 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
694 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
695 
696 /* Operations on 8-bit bytes. */
697 #define	atomic_set_8		atomic_set_char
698 #define	atomic_set_acq_8	atomic_set_acq_char
699 #define	atomic_set_rel_8	atomic_set_rel_char
700 #define	atomic_clear_8		atomic_clear_char
701 #define	atomic_clear_acq_8	atomic_clear_acq_char
702 #define	atomic_clear_rel_8	atomic_clear_rel_char
703 #define	atomic_add_8		atomic_add_char
704 #define	atomic_add_acq_8	atomic_add_acq_char
705 #define	atomic_add_rel_8	atomic_add_rel_char
706 #define	atomic_subtract_8	atomic_subtract_char
707 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
708 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
709 #define	atomic_load_acq_8	atomic_load_acq_char
710 #define	atomic_store_rel_8	atomic_store_rel_char
711 
712 /* Operations on 16-bit words. */
713 #define	atomic_set_16		atomic_set_short
714 #define	atomic_set_acq_16	atomic_set_acq_short
715 #define	atomic_set_rel_16	atomic_set_rel_short
716 #define	atomic_clear_16		atomic_clear_short
717 #define	atomic_clear_acq_16	atomic_clear_acq_short
718 #define	atomic_clear_rel_16	atomic_clear_rel_short
719 #define	atomic_add_16		atomic_add_short
720 #define	atomic_add_acq_16	atomic_add_acq_short
721 #define	atomic_add_rel_16	atomic_add_rel_short
722 #define	atomic_subtract_16	atomic_subtract_short
723 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
724 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
725 #define	atomic_load_acq_16	atomic_load_acq_short
726 #define	atomic_store_rel_16	atomic_store_rel_short
727 
728 /* Operations on 32-bit double words. */
729 #define	atomic_set_32		atomic_set_int
730 #define	atomic_set_acq_32	atomic_set_acq_int
731 #define	atomic_set_rel_32	atomic_set_rel_int
732 #define	atomic_clear_32		atomic_clear_int
733 #define	atomic_clear_acq_32	atomic_clear_acq_int
734 #define	atomic_clear_rel_32	atomic_clear_rel_int
735 #define	atomic_add_32		atomic_add_int
736 #define	atomic_add_acq_32	atomic_add_acq_int
737 #define	atomic_add_rel_32	atomic_add_rel_int
738 #define	atomic_subtract_32	atomic_subtract_int
739 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
740 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
741 #define	atomic_load_acq_32	atomic_load_acq_int
742 #define	atomic_store_rel_32	atomic_store_rel_int
743 #define	atomic_cmpset_32	atomic_cmpset_int
744 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
745 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
746 #define	atomic_fcmpset_32	atomic_fcmpset_int
747 #define	atomic_fcmpset_acq_32	atomic_fcmpset_acq_int
748 #define	atomic_fcmpset_rel_32	atomic_fcmpset_rel_int
749 #define	atomic_swap_32		atomic_swap_int
750 #define	atomic_readandclear_32	atomic_readandclear_int
751 #define	atomic_fetchadd_32	atomic_fetchadd_int
752 #define	atomic_testandset_32	atomic_testandset_int
753 #define	atomic_testandclear_32	atomic_testandclear_int
754 
755 /* Operations on pointers. */
756 #define	atomic_set_ptr(p, v) \
757 	atomic_set_int((volatile u_int *)(p), (u_int)(v))
758 #define	atomic_set_acq_ptr(p, v) \
759 	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
760 #define	atomic_set_rel_ptr(p, v) \
761 	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
762 #define	atomic_clear_ptr(p, v) \
763 	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
764 #define	atomic_clear_acq_ptr(p, v) \
765 	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
766 #define	atomic_clear_rel_ptr(p, v) \
767 	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
768 #define	atomic_add_ptr(p, v) \
769 	atomic_add_int((volatile u_int *)(p), (u_int)(v))
770 #define	atomic_add_acq_ptr(p, v) \
771 	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
772 #define	atomic_add_rel_ptr(p, v) \
773 	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
774 #define	atomic_subtract_ptr(p, v) \
775 	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
776 #define	atomic_subtract_acq_ptr(p, v) \
777 	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
778 #define	atomic_subtract_rel_ptr(p, v) \
779 	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
780 #define	atomic_load_acq_ptr(p) \
781 	atomic_load_acq_int((volatile u_int *)(p))
782 #define	atomic_store_rel_ptr(p, v) \
783 	atomic_store_rel_int((volatile u_int *)(p), (v))
784 #define	atomic_cmpset_ptr(dst, old, new) \
785 	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
786 #define	atomic_cmpset_acq_ptr(dst, old, new) \
787 	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
788 	    (u_int)(new))
789 #define	atomic_cmpset_rel_ptr(dst, old, new) \
790 	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
791 	    (u_int)(new))
792 #define	atomic_fcmpset_ptr(dst, old, new) \
793 	atomic_fcmpset_int((volatile u_int *)(dst), (u_int *)(old), (u_int)(new))
794 #define	atomic_fcmpset_acq_ptr(dst, old, new) \
795 	atomic_fcmpset_acq_int((volatile u_int *)(dst), (u_int *)(old), \
796 	    (u_int)(new))
797 #define	atomic_fcmpset_rel_ptr(dst, old, new) \
798 	atomic_fcmpset_rel_int((volatile u_int *)(dst), (u_int *)(old), \
799 	    (u_int)(new))
800 #define	atomic_swap_ptr(p, v) \
801 	atomic_swap_int((volatile u_int *)(p), (u_int)(v))
802 #define	atomic_readandclear_ptr(p) \
803 	atomic_readandclear_int((volatile u_int *)(p))
804 
805 #endif /* !WANT_FUNCTIONS */
806 
807 #if defined(_KERNEL)
808 #define	mb()	__mbk()
809 #define	wmb()	__mbk()
810 #define	rmb()	__mbk()
811 #else
812 #define	mb()	__mbu()
813 #define	wmb()	__mbu()
814 #define	rmb()	__mbu()
815 #endif
816 
817 #endif /* !_MACHINE_ATOMIC_H_ */
818