xref: /freebsd/sys/amd64/include/atomic.h (revision 190cef3d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1998 Doug Rabson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 #ifndef _MACHINE_ATOMIC_H_
31 #define	_MACHINE_ATOMIC_H_
32 
33 #ifndef _SYS_CDEFS_H_
34 #error this file needs sys/cdefs.h as a prerequisite
35 #endif
36 
37 /*
38  * To express interprocessor (as opposed to processor and device) memory
39  * ordering constraints, use the atomic_*() functions with acquire and release
40  * semantics rather than the *mb() functions.  An architecture's memory
41  * ordering (or memory consistency) model governs the order in which a
42  * program's accesses to different locations may be performed by an
43  * implementation of that architecture.  In general, for memory regions
44  * defined as writeback cacheable, the memory ordering implemented by amd64
45  * processors preserves the program ordering of a load followed by a load, a
46  * load followed by a store, and a store followed by a store.  Only a store
47  * followed by a load to a different memory location may be reordered.
48  * Therefore, except for special cases, like non-temporal memory accesses or
49  * memory regions defined as write combining, the memory ordering effects
50  * provided by the sfence instruction in the wmb() function and the lfence
51  * instruction in the rmb() function are redundant.  In contrast, the
52  * atomic_*() functions with acquire and release semantics do not perform
53  * redundant instructions for ordinary cases of interprocessor memory
54  * ordering on any architecture.
55  */
56 #define	mb()	__asm __volatile("mfence;" : : : "memory")
57 #define	wmb()	__asm __volatile("sfence;" : : : "memory")
58 #define	rmb()	__asm __volatile("lfence;" : : : "memory")
59 
60 #include <sys/atomic_common.h>
61 
62 /*
63  * Various simple operations on memory, each of which is atomic in the
64  * presence of interrupts and multiple processors.
65  *
66  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
67  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
68  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
69  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
70  *
71  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
72  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
73  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
74  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
75  *
76  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
77  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
78  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
79  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
80  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
81  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
82  *
83  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
84  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
85  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
86  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
87  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
88  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
89  */
90 
91 /*
92  * The above functions are expanded inline in the statically-linked
93  * kernel.  Lock prefixes are generated if an SMP kernel is being
94  * built.
95  *
96  * Kernel modules call real functions which are built into the kernel.
97  * This allows kernel modules to be portable between UP and SMP systems.
98  */
99 #if !defined(__GNUCLIKE_ASM)
100 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
101 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
102 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
103 
104 int	atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src);
105 int	atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src);
106 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
107 int	atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
108 int	atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src);
109 int	atomic_fcmpset_short(volatile u_short *dst, u_short *expect,
110 	    u_short src);
111 int	atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
112 int	atomic_fcmpset_long(volatile u_long *dst, u_long *expect, u_long src);
113 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
114 u_long	atomic_fetchadd_long(volatile u_long *p, u_long v);
115 int	atomic_testandset_int(volatile u_int *p, u_int v);
116 int	atomic_testandset_long(volatile u_long *p, u_int v);
117 int	atomic_testandclear_int(volatile u_int *p, u_int v);
118 int	atomic_testandclear_long(volatile u_long *p, u_int v);
119 void	atomic_thread_fence_acq(void);
120 void	atomic_thread_fence_acq_rel(void);
121 void	atomic_thread_fence_rel(void);
122 void	atomic_thread_fence_seq_cst(void);
123 
124 #define	ATOMIC_LOAD(TYPE)					\
125 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
126 #define	ATOMIC_STORE(TYPE)					\
127 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
128 
129 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
130 
131 /*
132  * For userland, always use lock prefixes so that the binaries will run
133  * on both SMP and !SMP systems.
134  */
135 #if defined(SMP) || !defined(_KERNEL) || defined(KLD_MODULE)
136 #define	MPLOCKED	"lock ; "
137 #else
138 #define	MPLOCKED
139 #endif
140 
141 /*
142  * The assembly is volatilized to avoid code chunk removal by the compiler.
143  * GCC aggressively reorders operations and memory clobbering is necessary
144  * in order to avoid that for memory barriers.
145  */
146 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
147 static __inline void					\
148 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
149 {							\
150 	__asm __volatile(MPLOCKED OP			\
151 	: "+m" (*p)					\
152 	: CONS (V)					\
153 	: "cc");					\
154 }							\
155 							\
156 static __inline void					\
157 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
158 {							\
159 	__asm __volatile(MPLOCKED OP			\
160 	: "+m" (*p)					\
161 	: CONS (V)					\
162 	: "memory", "cc");				\
163 }							\
164 struct __hack
165 
166 /*
167  * Atomic compare and set, used by the mutex functions.
168  *
169  * cmpset:
170  *	if (*dst == expect)
171  *		*dst = src
172  *
173  * fcmpset:
174  *	if (*dst == *expect)
175  *		*dst = src
176  *	else
177  *		*expect = *dst
178  *
179  * Returns 0 on failure, non-zero on success.
180  */
181 #define	ATOMIC_CMPSET(TYPE)				\
182 static __inline int					\
183 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
184 {							\
185 	u_char res;					\
186 							\
187 	__asm __volatile(				\
188 	"	" MPLOCKED "		"		\
189 	"	cmpxchg %3,%1 ;	"			\
190 	"	sete	%0 ;		"		\
191 	"# atomic_cmpset_" #TYPE "	"		\
192 	: "=q" (res),			/* 0 */		\
193 	  "+m" (*dst),			/* 1 */		\
194 	  "+a" (expect)			/* 2 */		\
195 	: "r" (src)			/* 3 */		\
196 	: "memory", "cc");				\
197 	return (res);					\
198 }							\
199 							\
200 static __inline int					\
201 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
202 {							\
203 	u_char res;					\
204 							\
205 	__asm __volatile(				\
206 	"	" MPLOCKED "		"		\
207 	"	cmpxchg %3,%1 ;		"		\
208 	"	sete	%0 ;		"		\
209 	"# atomic_fcmpset_" #TYPE "	"		\
210 	: "=q" (res),			/* 0 */		\
211 	  "+m" (*dst),			/* 1 */		\
212 	  "+a" (*expect)		/* 2 */		\
213 	: "r" (src)			/* 3 */		\
214 	: "memory", "cc");				\
215 	return (res);					\
216 }
217 
218 ATOMIC_CMPSET(char);
219 ATOMIC_CMPSET(short);
220 ATOMIC_CMPSET(int);
221 ATOMIC_CMPSET(long);
222 
223 /*
224  * Atomically add the value of v to the integer pointed to by p and return
225  * the previous value of *p.
226  */
227 static __inline u_int
228 atomic_fetchadd_int(volatile u_int *p, u_int v)
229 {
230 
231 	__asm __volatile(
232 	"	" MPLOCKED "		"
233 	"	xaddl	%0,%1 ;		"
234 	"# atomic_fetchadd_int"
235 	: "+r" (v),			/* 0 */
236 	  "+m" (*p)			/* 1 */
237 	: : "cc");
238 	return (v);
239 }
240 
241 /*
242  * Atomically add the value of v to the long integer pointed to by p and return
243  * the previous value of *p.
244  */
245 static __inline u_long
246 atomic_fetchadd_long(volatile u_long *p, u_long v)
247 {
248 
249 	__asm __volatile(
250 	"	" MPLOCKED "		"
251 	"	xaddq	%0,%1 ;		"
252 	"# atomic_fetchadd_long"
253 	: "+r" (v),			/* 0 */
254 	  "+m" (*p)			/* 1 */
255 	: : "cc");
256 	return (v);
257 }
258 
259 static __inline int
260 atomic_testandset_int(volatile u_int *p, u_int v)
261 {
262 	u_char res;
263 
264 	__asm __volatile(
265 	"	" MPLOCKED "		"
266 	"	btsl	%2,%1 ;		"
267 	"	setc	%0 ;		"
268 	"# atomic_testandset_int"
269 	: "=q" (res),			/* 0 */
270 	  "+m" (*p)			/* 1 */
271 	: "Ir" (v & 0x1f)		/* 2 */
272 	: "cc");
273 	return (res);
274 }
275 
276 static __inline int
277 atomic_testandset_long(volatile u_long *p, u_int v)
278 {
279 	u_char res;
280 
281 	__asm __volatile(
282 	"	" MPLOCKED "		"
283 	"	btsq	%2,%1 ;		"
284 	"	setc	%0 ;		"
285 	"# atomic_testandset_long"
286 	: "=q" (res),			/* 0 */
287 	  "+m" (*p)			/* 1 */
288 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
289 	: "cc");
290 	return (res);
291 }
292 
293 static __inline int
294 atomic_testandclear_int(volatile u_int *p, u_int v)
295 {
296 	u_char res;
297 
298 	__asm __volatile(
299 	"	" MPLOCKED "		"
300 	"	btrl	%2,%1 ;		"
301 	"	setc	%0 ;		"
302 	"# atomic_testandclear_int"
303 	: "=q" (res),			/* 0 */
304 	  "+m" (*p)			/* 1 */
305 	: "Ir" (v & 0x1f)		/* 2 */
306 	: "cc");
307 	return (res);
308 }
309 
310 static __inline int
311 atomic_testandclear_long(volatile u_long *p, u_int v)
312 {
313 	u_char res;
314 
315 	__asm __volatile(
316 	"	" MPLOCKED "		"
317 	"	btrq	%2,%1 ;		"
318 	"	setc	%0 ;		"
319 	"# atomic_testandclear_long"
320 	: "=q" (res),			/* 0 */
321 	  "+m" (*p)			/* 1 */
322 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
323 	: "cc");
324 	return (res);
325 }
326 
327 /*
328  * We assume that a = b will do atomic loads and stores.  Due to the
329  * IA32 memory model, a simple store guarantees release semantics.
330  *
331  * However, a load may pass a store if they are performed on distinct
332  * addresses, so we need a Store/Load barrier for sequentially
333  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
334  * Store/Load barrier, as recommended by the AMD Software Optimization
335  * Guide, and not mfence.  To avoid false data dependencies, we use a
336  * special address for "mem".  In the kernel, we use a private per-cpu
337  * cache line.  In user space, we use a word in the stack's red zone
338  * (-8(%rsp)).
339  *
340  * For UP kernels, however, the memory of the single processor is
341  * always consistent, so we only need to stop the compiler from
342  * reordering accesses in a way that violates the semantics of acquire
343  * and release.
344  */
345 
346 #if defined(_KERNEL)
347 
348 /*
349  * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
350  *
351  * The open-coded number is used instead of the symbolic expression to
352  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
353  * An assertion in amd64/vm_machdep.c ensures that the value is correct.
354  */
355 #define	OFFSETOF_MONITORBUF	0x100
356 
357 #if defined(SMP) || defined(KLD_MODULE)
358 static __inline void
359 __storeload_barrier(void)
360 {
361 
362 	__asm __volatile("lock; addl $0,%%gs:%0"
363 	    : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
364 }
365 #else /* _KERNEL && UP */
366 static __inline void
367 __storeload_barrier(void)
368 {
369 
370 	__compiler_membar();
371 }
372 #endif /* SMP */
373 #else /* !_KERNEL */
374 static __inline void
375 __storeload_barrier(void)
376 {
377 
378 	__asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
379 }
380 #endif /* _KERNEL*/
381 
382 #define	ATOMIC_LOAD(TYPE)					\
383 static __inline u_##TYPE					\
384 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
385 {								\
386 	u_##TYPE res;						\
387 								\
388 	res = *p;						\
389 	__compiler_membar();					\
390 	return (res);						\
391 }								\
392 struct __hack
393 
394 #define	ATOMIC_STORE(TYPE)					\
395 static __inline void						\
396 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
397 {								\
398 								\
399 	__compiler_membar();					\
400 	*p = v;							\
401 }								\
402 struct __hack
403 
404 static __inline void
405 atomic_thread_fence_acq(void)
406 {
407 
408 	__compiler_membar();
409 }
410 
411 static __inline void
412 atomic_thread_fence_rel(void)
413 {
414 
415 	__compiler_membar();
416 }
417 
418 static __inline void
419 atomic_thread_fence_acq_rel(void)
420 {
421 
422 	__compiler_membar();
423 }
424 
425 static __inline void
426 atomic_thread_fence_seq_cst(void)
427 {
428 
429 	__storeload_barrier();
430 }
431 
432 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
433 
434 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
435 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
436 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
437 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
438 
439 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
440 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
441 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
442 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
443 
444 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
445 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
446 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
447 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
448 
449 ATOMIC_ASM(set,	     long,  "orq %1,%0",   "er",  v);
450 ATOMIC_ASM(clear,    long,  "andq %1,%0",  "er", ~v);
451 ATOMIC_ASM(add,	     long,  "addq %1,%0",  "er",  v);
452 ATOMIC_ASM(subtract, long,  "subq %1,%0",  "er",  v);
453 
454 #define	ATOMIC_LOADSTORE(TYPE)					\
455 	ATOMIC_LOAD(TYPE);					\
456 	ATOMIC_STORE(TYPE)
457 
458 ATOMIC_LOADSTORE(char);
459 ATOMIC_LOADSTORE(short);
460 ATOMIC_LOADSTORE(int);
461 ATOMIC_LOADSTORE(long);
462 
463 #undef ATOMIC_ASM
464 #undef ATOMIC_LOAD
465 #undef ATOMIC_STORE
466 #undef ATOMIC_LOADSTORE
467 #ifndef WANT_FUNCTIONS
468 
469 /* Read the current value and store a new value in the destination. */
470 #ifdef __GNUCLIKE_ASM
471 
472 static __inline u_int
473 atomic_swap_int(volatile u_int *p, u_int v)
474 {
475 
476 	__asm __volatile(
477 	"	xchgl	%1,%0 ;		"
478 	"# atomic_swap_int"
479 	: "+r" (v),			/* 0 */
480 	  "+m" (*p));			/* 1 */
481 	return (v);
482 }
483 
484 static __inline u_long
485 atomic_swap_long(volatile u_long *p, u_long v)
486 {
487 
488 	__asm __volatile(
489 	"	xchgq	%1,%0 ;		"
490 	"# atomic_swap_long"
491 	: "+r" (v),			/* 0 */
492 	  "+m" (*p));			/* 1 */
493 	return (v);
494 }
495 
496 #else /* !__GNUCLIKE_ASM */
497 
498 u_int	atomic_swap_int(volatile u_int *p, u_int v);
499 u_long	atomic_swap_long(volatile u_long *p, u_long v);
500 
501 #endif /* __GNUCLIKE_ASM */
502 
503 #define	atomic_set_acq_char		atomic_set_barr_char
504 #define	atomic_set_rel_char		atomic_set_barr_char
505 #define	atomic_clear_acq_char		atomic_clear_barr_char
506 #define	atomic_clear_rel_char		atomic_clear_barr_char
507 #define	atomic_add_acq_char		atomic_add_barr_char
508 #define	atomic_add_rel_char		atomic_add_barr_char
509 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
510 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
511 #define	atomic_cmpset_acq_char		atomic_cmpset_char
512 #define	atomic_cmpset_rel_char		atomic_cmpset_char
513 #define	atomic_fcmpset_acq_char		atomic_fcmpset_char
514 #define	atomic_fcmpset_rel_char		atomic_fcmpset_char
515 
516 #define	atomic_set_acq_short		atomic_set_barr_short
517 #define	atomic_set_rel_short		atomic_set_barr_short
518 #define	atomic_clear_acq_short		atomic_clear_barr_short
519 #define	atomic_clear_rel_short		atomic_clear_barr_short
520 #define	atomic_add_acq_short		atomic_add_barr_short
521 #define	atomic_add_rel_short		atomic_add_barr_short
522 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
523 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
524 #define	atomic_cmpset_acq_short		atomic_cmpset_short
525 #define	atomic_cmpset_rel_short		atomic_cmpset_short
526 #define	atomic_fcmpset_acq_short	atomic_fcmpset_short
527 #define	atomic_fcmpset_rel_short	atomic_fcmpset_short
528 
529 #define	atomic_set_acq_int		atomic_set_barr_int
530 #define	atomic_set_rel_int		atomic_set_barr_int
531 #define	atomic_clear_acq_int		atomic_clear_barr_int
532 #define	atomic_clear_rel_int		atomic_clear_barr_int
533 #define	atomic_add_acq_int		atomic_add_barr_int
534 #define	atomic_add_rel_int		atomic_add_barr_int
535 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
536 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
537 #define	atomic_cmpset_acq_int		atomic_cmpset_int
538 #define	atomic_cmpset_rel_int		atomic_cmpset_int
539 #define	atomic_fcmpset_acq_int		atomic_fcmpset_int
540 #define	atomic_fcmpset_rel_int		atomic_fcmpset_int
541 
542 #define	atomic_set_acq_long		atomic_set_barr_long
543 #define	atomic_set_rel_long		atomic_set_barr_long
544 #define	atomic_clear_acq_long		atomic_clear_barr_long
545 #define	atomic_clear_rel_long		atomic_clear_barr_long
546 #define	atomic_add_acq_long		atomic_add_barr_long
547 #define	atomic_add_rel_long		atomic_add_barr_long
548 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
549 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
550 #define	atomic_cmpset_acq_long		atomic_cmpset_long
551 #define	atomic_cmpset_rel_long		atomic_cmpset_long
552 #define	atomic_fcmpset_acq_long		atomic_fcmpset_long
553 #define	atomic_fcmpset_rel_long		atomic_fcmpset_long
554 
555 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
556 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
557 
558 /* Operations on 8-bit bytes. */
559 #define	atomic_set_8		atomic_set_char
560 #define	atomic_set_acq_8	atomic_set_acq_char
561 #define	atomic_set_rel_8	atomic_set_rel_char
562 #define	atomic_clear_8		atomic_clear_char
563 #define	atomic_clear_acq_8	atomic_clear_acq_char
564 #define	atomic_clear_rel_8	atomic_clear_rel_char
565 #define	atomic_add_8		atomic_add_char
566 #define	atomic_add_acq_8	atomic_add_acq_char
567 #define	atomic_add_rel_8	atomic_add_rel_char
568 #define	atomic_subtract_8	atomic_subtract_char
569 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
570 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
571 #define	atomic_load_acq_8	atomic_load_acq_char
572 #define	atomic_store_rel_8	atomic_store_rel_char
573 #define	atomic_cmpset_8		atomic_cmpset_char
574 #define	atomic_cmpset_acq_8	atomic_cmpset_acq_char
575 #define	atomic_cmpset_rel_8	atomic_cmpset_rel_char
576 #define	atomic_fcmpset_8	atomic_fcmpset_char
577 #define	atomic_fcmpset_acq_8	atomic_fcmpset_acq_char
578 #define	atomic_fcmpset_rel_8	atomic_fcmpset_rel_char
579 
580 /* Operations on 16-bit words. */
581 #define	atomic_set_16		atomic_set_short
582 #define	atomic_set_acq_16	atomic_set_acq_short
583 #define	atomic_set_rel_16	atomic_set_rel_short
584 #define	atomic_clear_16		atomic_clear_short
585 #define	atomic_clear_acq_16	atomic_clear_acq_short
586 #define	atomic_clear_rel_16	atomic_clear_rel_short
587 #define	atomic_add_16		atomic_add_short
588 #define	atomic_add_acq_16	atomic_add_acq_short
589 #define	atomic_add_rel_16	atomic_add_rel_short
590 #define	atomic_subtract_16	atomic_subtract_short
591 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
592 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
593 #define	atomic_load_acq_16	atomic_load_acq_short
594 #define	atomic_store_rel_16	atomic_store_rel_short
595 #define	atomic_cmpset_16	atomic_cmpset_short
596 #define	atomic_cmpset_acq_16	atomic_cmpset_acq_short
597 #define	atomic_cmpset_rel_16	atomic_cmpset_rel_short
598 #define	atomic_fcmpset_16	atomic_fcmpset_short
599 #define	atomic_fcmpset_acq_16	atomic_fcmpset_acq_short
600 #define	atomic_fcmpset_rel_16	atomic_fcmpset_rel_short
601 
602 /* Operations on 32-bit double words. */
603 #define	atomic_set_32		atomic_set_int
604 #define	atomic_set_acq_32	atomic_set_acq_int
605 #define	atomic_set_rel_32	atomic_set_rel_int
606 #define	atomic_clear_32		atomic_clear_int
607 #define	atomic_clear_acq_32	atomic_clear_acq_int
608 #define	atomic_clear_rel_32	atomic_clear_rel_int
609 #define	atomic_add_32		atomic_add_int
610 #define	atomic_add_acq_32	atomic_add_acq_int
611 #define	atomic_add_rel_32	atomic_add_rel_int
612 #define	atomic_subtract_32	atomic_subtract_int
613 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
614 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
615 #define	atomic_load_acq_32	atomic_load_acq_int
616 #define	atomic_store_rel_32	atomic_store_rel_int
617 #define	atomic_cmpset_32	atomic_cmpset_int
618 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
619 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
620 #define	atomic_fcmpset_32	atomic_fcmpset_int
621 #define	atomic_fcmpset_acq_32	atomic_fcmpset_acq_int
622 #define	atomic_fcmpset_rel_32	atomic_fcmpset_rel_int
623 #define	atomic_swap_32		atomic_swap_int
624 #define	atomic_readandclear_32	atomic_readandclear_int
625 #define	atomic_fetchadd_32	atomic_fetchadd_int
626 #define	atomic_testandset_32	atomic_testandset_int
627 #define	atomic_testandclear_32	atomic_testandclear_int
628 
629 /* Operations on 64-bit quad words. */
630 #define	atomic_set_64		atomic_set_long
631 #define	atomic_set_acq_64	atomic_set_acq_long
632 #define	atomic_set_rel_64	atomic_set_rel_long
633 #define	atomic_clear_64		atomic_clear_long
634 #define	atomic_clear_acq_64	atomic_clear_acq_long
635 #define	atomic_clear_rel_64	atomic_clear_rel_long
636 #define	atomic_add_64		atomic_add_long
637 #define	atomic_add_acq_64	atomic_add_acq_long
638 #define	atomic_add_rel_64	atomic_add_rel_long
639 #define	atomic_subtract_64	atomic_subtract_long
640 #define	atomic_subtract_acq_64	atomic_subtract_acq_long
641 #define	atomic_subtract_rel_64	atomic_subtract_rel_long
642 #define	atomic_load_acq_64	atomic_load_acq_long
643 #define	atomic_store_rel_64	atomic_store_rel_long
644 #define	atomic_cmpset_64	atomic_cmpset_long
645 #define	atomic_cmpset_acq_64	atomic_cmpset_acq_long
646 #define	atomic_cmpset_rel_64	atomic_cmpset_rel_long
647 #define	atomic_fcmpset_64	atomic_fcmpset_long
648 #define	atomic_fcmpset_acq_64	atomic_fcmpset_acq_long
649 #define	atomic_fcmpset_rel_64	atomic_fcmpset_rel_long
650 #define	atomic_swap_64		atomic_swap_long
651 #define	atomic_readandclear_64	atomic_readandclear_long
652 #define	atomic_fetchadd_64	atomic_fetchadd_long
653 #define	atomic_testandset_64	atomic_testandset_long
654 #define	atomic_testandclear_64	atomic_testandclear_long
655 
656 /* Operations on pointers. */
657 #define	atomic_set_ptr		atomic_set_long
658 #define	atomic_set_acq_ptr	atomic_set_acq_long
659 #define	atomic_set_rel_ptr	atomic_set_rel_long
660 #define	atomic_clear_ptr	atomic_clear_long
661 #define	atomic_clear_acq_ptr	atomic_clear_acq_long
662 #define	atomic_clear_rel_ptr	atomic_clear_rel_long
663 #define	atomic_add_ptr		atomic_add_long
664 #define	atomic_add_acq_ptr	atomic_add_acq_long
665 #define	atomic_add_rel_ptr	atomic_add_rel_long
666 #define	atomic_subtract_ptr	atomic_subtract_long
667 #define	atomic_subtract_acq_ptr	atomic_subtract_acq_long
668 #define	atomic_subtract_rel_ptr	atomic_subtract_rel_long
669 #define	atomic_load_acq_ptr	atomic_load_acq_long
670 #define	atomic_store_rel_ptr	atomic_store_rel_long
671 #define	atomic_cmpset_ptr	atomic_cmpset_long
672 #define	atomic_cmpset_acq_ptr	atomic_cmpset_acq_long
673 #define	atomic_cmpset_rel_ptr	atomic_cmpset_rel_long
674 #define	atomic_fcmpset_ptr	atomic_fcmpset_long
675 #define	atomic_fcmpset_acq_ptr	atomic_fcmpset_acq_long
676 #define	atomic_fcmpset_rel_ptr	atomic_fcmpset_rel_long
677 #define	atomic_swap_ptr		atomic_swap_long
678 #define	atomic_readandclear_ptr	atomic_readandclear_long
679 
680 #endif /* !WANT_FUNCTIONS */
681 
682 #endif /* !_MACHINE_ATOMIC_H_ */
683