xref: /freebsd/sys/amd64/include/atomic.h (revision c03c5b1c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1998 Doug Rabson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 #ifndef _MACHINE_ATOMIC_H_
31 #define	_MACHINE_ATOMIC_H_
32 
33 #ifndef _SYS_CDEFS_H_
34 #error this file needs sys/cdefs.h as a prerequisite
35 #endif
36 
37 /*
38  * To express interprocessor (as opposed to processor and device) memory
39  * ordering constraints, use the atomic_*() functions with acquire and release
40  * semantics rather than the *mb() functions.  An architecture's memory
41  * ordering (or memory consistency) model governs the order in which a
42  * program's accesses to different locations may be performed by an
43  * implementation of that architecture.  In general, for memory regions
44  * defined as writeback cacheable, the memory ordering implemented by amd64
45  * processors preserves the program ordering of a load followed by a load, a
46  * load followed by a store, and a store followed by a store.  Only a store
47  * followed by a load to a different memory location may be reordered.
48  * Therefore, except for special cases, like non-temporal memory accesses or
49  * memory regions defined as write combining, the memory ordering effects
50  * provided by the sfence instruction in the wmb() function and the lfence
51  * instruction in the rmb() function are redundant.  In contrast, the
52  * atomic_*() functions with acquire and release semantics do not perform
53  * redundant instructions for ordinary cases of interprocessor memory
54  * ordering on any architecture.
55  */
56 #define	mb()	__asm __volatile("mfence;" : : : "memory")
57 #define	wmb()	__asm __volatile("sfence;" : : : "memory")
58 #define	rmb()	__asm __volatile("lfence;" : : : "memory")
59 
60 #ifdef _KERNEL
61 /*
62  * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
63  *
64  * The open-coded number is used instead of the symbolic expression to
65  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
66  * An assertion in amd64/vm_machdep.c ensures that the value is correct.
67  */
68 #define	OFFSETOF_MONITORBUF	0x100
69 #endif
70 
71 #if defined(SAN_NEEDS_INTERCEPTORS) && !defined(SAN_RUNTIME)
72 #include <sys/atomic_san.h>
73 #else
74 #include <sys/atomic_common.h>
75 
76 /*
77  * Various simple operations on memory, each of which is atomic in the
78  * presence of interrupts and multiple processors.
79  *
80  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
81  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
82  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
83  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
84  *
85  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
86  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
87  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
88  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
89  *
90  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
91  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
92  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
93  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
94  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
95  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
96  *
97  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
98  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
99  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
100  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
101  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
102  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
103  */
104 
105 #if !defined(__GNUCLIKE_ASM)
106 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
107 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
108 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
109 
110 int	atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src);
111 int	atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src);
112 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
113 int	atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
114 int	atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src);
115 int	atomic_fcmpset_short(volatile u_short *dst, u_short *expect,
116 	    u_short src);
117 int	atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
118 int	atomic_fcmpset_long(volatile u_long *dst, u_long *expect, u_long src);
119 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
120 u_long	atomic_fetchadd_long(volatile u_long *p, u_long v);
121 int	atomic_testandset_int(volatile u_int *p, u_int v);
122 int	atomic_testandset_long(volatile u_long *p, u_int v);
123 int	atomic_testandclear_int(volatile u_int *p, u_int v);
124 int	atomic_testandclear_long(volatile u_long *p, u_int v);
125 void	atomic_thread_fence_acq(void);
126 void	atomic_thread_fence_acq_rel(void);
127 void	atomic_thread_fence_rel(void);
128 void	atomic_thread_fence_seq_cst(void);
129 
130 #define	ATOMIC_LOAD(TYPE)					\
131 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
132 #define	ATOMIC_STORE(TYPE)					\
133 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
134 
135 #else /* !__GNUCLIKE_ASM */
136 
137 /*
138  * Always use lock prefixes.  The result is slighly less optimal for
139  * UP systems, but it matters less now, and sometimes UP is emulated
140  * over SMP.
141  *
142  * The assembly is volatilized to avoid code chunk removal by the compiler.
143  * GCC aggressively reorders operations and memory clobbering is necessary
144  * in order to avoid that for memory barriers.
145  */
146 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
147 static __inline void					\
148 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
149 {							\
150 	__asm __volatile("lock; " OP			\
151 	: "+m" (*p)					\
152 	: CONS (V)					\
153 	: "cc");					\
154 }							\
155 							\
156 static __inline void					\
157 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
158 {							\
159 	__asm __volatile("lock; " OP			\
160 	: "+m" (*p)					\
161 	: CONS (V)					\
162 	: "memory", "cc");				\
163 }							\
164 struct __hack
165 
166 /*
167  * Atomic compare and set, used by the mutex functions.
168  *
169  * cmpset:
170  *	if (*dst == expect)
171  *		*dst = src
172  *
173  * fcmpset:
174  *	if (*dst == *expect)
175  *		*dst = src
176  *	else
177  *		*expect = *dst
178  *
179  * Returns 0 on failure, non-zero on success.
180  */
181 #define	ATOMIC_CMPSET(TYPE)				\
182 static __inline int					\
183 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
184 {							\
185 	u_char res;					\
186 							\
187 	__asm __volatile(				\
188 	" lock; cmpxchg %3,%1 ;	"			\
189 	"# atomic_cmpset_" #TYPE "	"		\
190 	: "=@cce" (res),		/* 0 */		\
191 	  "+m" (*dst),			/* 1 */		\
192 	  "+a" (expect)			/* 2 */		\
193 	: "r" (src)			/* 3 */		\
194 	: "memory", "cc");				\
195 	return (res);					\
196 }							\
197 							\
198 static __inline int					\
199 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
200 {							\
201 	u_char res;					\
202 							\
203 	__asm __volatile(				\
204 	" lock; cmpxchg %3,%1 ;		"		\
205 	"# atomic_fcmpset_" #TYPE "	"		\
206 	: "=@cce" (res),		/* 0 */		\
207 	  "+m" (*dst),			/* 1 */		\
208 	  "+a" (*expect)		/* 2 */		\
209 	: "r" (src)			/* 3 */		\
210 	: "memory", "cc");				\
211 	return (res);					\
212 }
213 
214 ATOMIC_CMPSET(char);
215 ATOMIC_CMPSET(short);
216 ATOMIC_CMPSET(int);
217 ATOMIC_CMPSET(long);
218 
219 /*
220  * Atomically add the value of v to the integer pointed to by p and return
221  * the previous value of *p.
222  */
223 static __inline u_int
224 atomic_fetchadd_int(volatile u_int *p, u_int v)
225 {
226 
227 	__asm __volatile(
228 	" lock; xaddl	%0,%1 ;		"
229 	"# atomic_fetchadd_int"
230 	: "+r" (v),			/* 0 */
231 	  "+m" (*p)			/* 1 */
232 	: : "cc");
233 	return (v);
234 }
235 
236 /*
237  * Atomically add the value of v to the long integer pointed to by p and return
238  * the previous value of *p.
239  */
240 static __inline u_long
241 atomic_fetchadd_long(volatile u_long *p, u_long v)
242 {
243 
244 	__asm __volatile(
245 	" lock;	xaddq	%0,%1 ;		"
246 	"# atomic_fetchadd_long"
247 	: "+r" (v),			/* 0 */
248 	  "+m" (*p)			/* 1 */
249 	: : "cc");
250 	return (v);
251 }
252 
253 static __inline int
254 atomic_testandset_int(volatile u_int *p, u_int v)
255 {
256 	u_char res;
257 
258 	__asm __volatile(
259 	" lock;	btsl	%2,%1 ;		"
260 	"# atomic_testandset_int"
261 	: "=@ccc" (res),		/* 0 */
262 	  "+m" (*p)			/* 1 */
263 	: "Ir" (v & 0x1f)		/* 2 */
264 	: "cc");
265 	return (res);
266 }
267 
268 static __inline int
269 atomic_testandset_long(volatile u_long *p, u_int v)
270 {
271 	u_char res;
272 
273 	__asm __volatile(
274 	" lock;	btsq	%2,%1 ;		"
275 	"# atomic_testandset_long"
276 	: "=@ccc" (res),		/* 0 */
277 	  "+m" (*p)			/* 1 */
278 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
279 	: "cc");
280 	return (res);
281 }
282 
283 static __inline int
284 atomic_testandclear_int(volatile u_int *p, u_int v)
285 {
286 	u_char res;
287 
288 	__asm __volatile(
289 	" lock;	btrl	%2,%1 ;		"
290 	"# atomic_testandclear_int"
291 	: "=@ccc" (res),		/* 0 */
292 	  "+m" (*p)			/* 1 */
293 	: "Ir" (v & 0x1f)		/* 2 */
294 	: "cc");
295 	return (res);
296 }
297 
298 static __inline int
299 atomic_testandclear_long(volatile u_long *p, u_int v)
300 {
301 	u_char res;
302 
303 	__asm __volatile(
304 	" lock;	btrq	%2,%1 ;		"
305 	"# atomic_testandclear_long"
306 	: "=@ccc" (res),		/* 0 */
307 	  "+m" (*p)			/* 1 */
308 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
309 	: "cc");
310 	return (res);
311 }
312 
313 /*
314  * We assume that a = b will do atomic loads and stores.  Due to the
315  * IA32 memory model, a simple store guarantees release semantics.
316  *
317  * However, a load may pass a store if they are performed on distinct
318  * addresses, so we need a Store/Load barrier for sequentially
319  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
320  * Store/Load barrier, as recommended by the AMD Software Optimization
321  * Guide, and not mfence.  To avoid false data dependencies, we use a
322  * special address for "mem".  In the kernel, we use a private per-cpu
323  * cache line.  In user space, we use a word in the stack's red zone
324  * (-8(%rsp)).
325  */
326 
327 static __inline void
328 __storeload_barrier(void)
329 {
330 #if defined(_KERNEL)
331 	__asm __volatile("lock; addl $0,%%gs:%0"
332 	    : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
333 #else /* !_KERNEL */
334 	__asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
335 #endif /* _KERNEL*/
336 }
337 
338 #define	ATOMIC_LOAD(TYPE)					\
339 static __inline u_##TYPE					\
340 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
341 {								\
342 	u_##TYPE res;						\
343 								\
344 	res = *p;						\
345 	__compiler_membar();					\
346 	return (res);						\
347 }								\
348 struct __hack
349 
350 #define	ATOMIC_STORE(TYPE)					\
351 static __inline void						\
352 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
353 {								\
354 								\
355 	__compiler_membar();					\
356 	*p = v;							\
357 }								\
358 struct __hack
359 
360 static __inline void
361 atomic_thread_fence_acq(void)
362 {
363 
364 	__compiler_membar();
365 }
366 
367 static __inline void
368 atomic_thread_fence_rel(void)
369 {
370 
371 	__compiler_membar();
372 }
373 
374 static __inline void
375 atomic_thread_fence_acq_rel(void)
376 {
377 
378 	__compiler_membar();
379 }
380 
381 static __inline void
382 atomic_thread_fence_seq_cst(void)
383 {
384 
385 	__storeload_barrier();
386 }
387 
388 #endif /* !__GNUCLIKE_ASM */
389 
390 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
391 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
392 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
393 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
394 
395 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
396 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
397 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
398 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
399 
400 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
401 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
402 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
403 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
404 
405 ATOMIC_ASM(set,	     long,  "orq %1,%0",   "er",  v);
406 ATOMIC_ASM(clear,    long,  "andq %1,%0",  "er", ~v);
407 ATOMIC_ASM(add,	     long,  "addq %1,%0",  "er",  v);
408 ATOMIC_ASM(subtract, long,  "subq %1,%0",  "er",  v);
409 
410 #define	ATOMIC_LOADSTORE(TYPE)					\
411 	ATOMIC_LOAD(TYPE);					\
412 	ATOMIC_STORE(TYPE)
413 
414 ATOMIC_LOADSTORE(char);
415 ATOMIC_LOADSTORE(short);
416 ATOMIC_LOADSTORE(int);
417 ATOMIC_LOADSTORE(long);
418 
419 #undef ATOMIC_ASM
420 #undef ATOMIC_LOAD
421 #undef ATOMIC_STORE
422 #undef ATOMIC_LOADSTORE
423 #ifndef WANT_FUNCTIONS
424 
425 /* Read the current value and store a new value in the destination. */
426 #ifdef __GNUCLIKE_ASM
427 
428 static __inline u_int
429 atomic_swap_int(volatile u_int *p, u_int v)
430 {
431 
432 	__asm __volatile(
433 	"	xchgl	%1,%0 ;		"
434 	"# atomic_swap_int"
435 	: "+r" (v),			/* 0 */
436 	  "+m" (*p));			/* 1 */
437 	return (v);
438 }
439 
440 static __inline u_long
441 atomic_swap_long(volatile u_long *p, u_long v)
442 {
443 
444 	__asm __volatile(
445 	"	xchgq	%1,%0 ;		"
446 	"# atomic_swap_long"
447 	: "+r" (v),			/* 0 */
448 	  "+m" (*p));			/* 1 */
449 	return (v);
450 }
451 
452 #else /* !__GNUCLIKE_ASM */
453 
454 u_int	atomic_swap_int(volatile u_int *p, u_int v);
455 u_long	atomic_swap_long(volatile u_long *p, u_long v);
456 
457 #endif /* __GNUCLIKE_ASM */
458 
459 #define	atomic_set_acq_char		atomic_set_barr_char
460 #define	atomic_set_rel_char		atomic_set_barr_char
461 #define	atomic_clear_acq_char		atomic_clear_barr_char
462 #define	atomic_clear_rel_char		atomic_clear_barr_char
463 #define	atomic_add_acq_char		atomic_add_barr_char
464 #define	atomic_add_rel_char		atomic_add_barr_char
465 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
466 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
467 #define	atomic_cmpset_acq_char		atomic_cmpset_char
468 #define	atomic_cmpset_rel_char		atomic_cmpset_char
469 #define	atomic_fcmpset_acq_char		atomic_fcmpset_char
470 #define	atomic_fcmpset_rel_char		atomic_fcmpset_char
471 
472 #define	atomic_set_acq_short		atomic_set_barr_short
473 #define	atomic_set_rel_short		atomic_set_barr_short
474 #define	atomic_clear_acq_short		atomic_clear_barr_short
475 #define	atomic_clear_rel_short		atomic_clear_barr_short
476 #define	atomic_add_acq_short		atomic_add_barr_short
477 #define	atomic_add_rel_short		atomic_add_barr_short
478 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
479 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
480 #define	atomic_cmpset_acq_short		atomic_cmpset_short
481 #define	atomic_cmpset_rel_short		atomic_cmpset_short
482 #define	atomic_fcmpset_acq_short	atomic_fcmpset_short
483 #define	atomic_fcmpset_rel_short	atomic_fcmpset_short
484 
485 #define	atomic_set_acq_int		atomic_set_barr_int
486 #define	atomic_set_rel_int		atomic_set_barr_int
487 #define	atomic_clear_acq_int		atomic_clear_barr_int
488 #define	atomic_clear_rel_int		atomic_clear_barr_int
489 #define	atomic_add_acq_int		atomic_add_barr_int
490 #define	atomic_add_rel_int		atomic_add_barr_int
491 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
492 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
493 #define	atomic_cmpset_acq_int		atomic_cmpset_int
494 #define	atomic_cmpset_rel_int		atomic_cmpset_int
495 #define	atomic_fcmpset_acq_int		atomic_fcmpset_int
496 #define	atomic_fcmpset_rel_int		atomic_fcmpset_int
497 
498 #define	atomic_set_acq_long		atomic_set_barr_long
499 #define	atomic_set_rel_long		atomic_set_barr_long
500 #define	atomic_clear_acq_long		atomic_clear_barr_long
501 #define	atomic_clear_rel_long		atomic_clear_barr_long
502 #define	atomic_add_acq_long		atomic_add_barr_long
503 #define	atomic_add_rel_long		atomic_add_barr_long
504 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
505 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
506 #define	atomic_cmpset_acq_long		atomic_cmpset_long
507 #define	atomic_cmpset_rel_long		atomic_cmpset_long
508 #define	atomic_fcmpset_acq_long		atomic_fcmpset_long
509 #define	atomic_fcmpset_rel_long		atomic_fcmpset_long
510 
511 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
512 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
513 #define	atomic_testandset_acq_long	atomic_testandset_long
514 
515 /* Operations on 8-bit bytes. */
516 #define	atomic_set_8		atomic_set_char
517 #define	atomic_set_acq_8	atomic_set_acq_char
518 #define	atomic_set_rel_8	atomic_set_rel_char
519 #define	atomic_clear_8		atomic_clear_char
520 #define	atomic_clear_acq_8	atomic_clear_acq_char
521 #define	atomic_clear_rel_8	atomic_clear_rel_char
522 #define	atomic_add_8		atomic_add_char
523 #define	atomic_add_acq_8	atomic_add_acq_char
524 #define	atomic_add_rel_8	atomic_add_rel_char
525 #define	atomic_subtract_8	atomic_subtract_char
526 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
527 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
528 #define	atomic_load_acq_8	atomic_load_acq_char
529 #define	atomic_store_rel_8	atomic_store_rel_char
530 #define	atomic_cmpset_8		atomic_cmpset_char
531 #define	atomic_cmpset_acq_8	atomic_cmpset_acq_char
532 #define	atomic_cmpset_rel_8	atomic_cmpset_rel_char
533 #define	atomic_fcmpset_8	atomic_fcmpset_char
534 #define	atomic_fcmpset_acq_8	atomic_fcmpset_acq_char
535 #define	atomic_fcmpset_rel_8	atomic_fcmpset_rel_char
536 
537 /* Operations on 16-bit words. */
538 #define	atomic_set_16		atomic_set_short
539 #define	atomic_set_acq_16	atomic_set_acq_short
540 #define	atomic_set_rel_16	atomic_set_rel_short
541 #define	atomic_clear_16		atomic_clear_short
542 #define	atomic_clear_acq_16	atomic_clear_acq_short
543 #define	atomic_clear_rel_16	atomic_clear_rel_short
544 #define	atomic_add_16		atomic_add_short
545 #define	atomic_add_acq_16	atomic_add_acq_short
546 #define	atomic_add_rel_16	atomic_add_rel_short
547 #define	atomic_subtract_16	atomic_subtract_short
548 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
549 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
550 #define	atomic_load_acq_16	atomic_load_acq_short
551 #define	atomic_store_rel_16	atomic_store_rel_short
552 #define	atomic_cmpset_16	atomic_cmpset_short
553 #define	atomic_cmpset_acq_16	atomic_cmpset_acq_short
554 #define	atomic_cmpset_rel_16	atomic_cmpset_rel_short
555 #define	atomic_fcmpset_16	atomic_fcmpset_short
556 #define	atomic_fcmpset_acq_16	atomic_fcmpset_acq_short
557 #define	atomic_fcmpset_rel_16	atomic_fcmpset_rel_short
558 
559 /* Operations on 32-bit double words. */
560 #define	atomic_set_32		atomic_set_int
561 #define	atomic_set_acq_32	atomic_set_acq_int
562 #define	atomic_set_rel_32	atomic_set_rel_int
563 #define	atomic_clear_32		atomic_clear_int
564 #define	atomic_clear_acq_32	atomic_clear_acq_int
565 #define	atomic_clear_rel_32	atomic_clear_rel_int
566 #define	atomic_add_32		atomic_add_int
567 #define	atomic_add_acq_32	atomic_add_acq_int
568 #define	atomic_add_rel_32	atomic_add_rel_int
569 #define	atomic_subtract_32	atomic_subtract_int
570 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
571 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
572 #define	atomic_load_acq_32	atomic_load_acq_int
573 #define	atomic_store_rel_32	atomic_store_rel_int
574 #define	atomic_cmpset_32	atomic_cmpset_int
575 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
576 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
577 #define	atomic_fcmpset_32	atomic_fcmpset_int
578 #define	atomic_fcmpset_acq_32	atomic_fcmpset_acq_int
579 #define	atomic_fcmpset_rel_32	atomic_fcmpset_rel_int
580 #define	atomic_swap_32		atomic_swap_int
581 #define	atomic_readandclear_32	atomic_readandclear_int
582 #define	atomic_fetchadd_32	atomic_fetchadd_int
583 #define	atomic_testandset_32	atomic_testandset_int
584 #define	atomic_testandclear_32	atomic_testandclear_int
585 
586 /* Operations on 64-bit quad words. */
587 #define	atomic_set_64		atomic_set_long
588 #define	atomic_set_acq_64	atomic_set_acq_long
589 #define	atomic_set_rel_64	atomic_set_rel_long
590 #define	atomic_clear_64		atomic_clear_long
591 #define	atomic_clear_acq_64	atomic_clear_acq_long
592 #define	atomic_clear_rel_64	atomic_clear_rel_long
593 #define	atomic_add_64		atomic_add_long
594 #define	atomic_add_acq_64	atomic_add_acq_long
595 #define	atomic_add_rel_64	atomic_add_rel_long
596 #define	atomic_subtract_64	atomic_subtract_long
597 #define	atomic_subtract_acq_64	atomic_subtract_acq_long
598 #define	atomic_subtract_rel_64	atomic_subtract_rel_long
599 #define	atomic_load_acq_64	atomic_load_acq_long
600 #define	atomic_store_rel_64	atomic_store_rel_long
601 #define	atomic_cmpset_64	atomic_cmpset_long
602 #define	atomic_cmpset_acq_64	atomic_cmpset_acq_long
603 #define	atomic_cmpset_rel_64	atomic_cmpset_rel_long
604 #define	atomic_fcmpset_64	atomic_fcmpset_long
605 #define	atomic_fcmpset_acq_64	atomic_fcmpset_acq_long
606 #define	atomic_fcmpset_rel_64	atomic_fcmpset_rel_long
607 #define	atomic_swap_64		atomic_swap_long
608 #define	atomic_readandclear_64	atomic_readandclear_long
609 #define	atomic_fetchadd_64	atomic_fetchadd_long
610 #define	atomic_testandset_64	atomic_testandset_long
611 #define	atomic_testandclear_64	atomic_testandclear_long
612 
613 /* Operations on pointers. */
614 #define	atomic_set_ptr		atomic_set_long
615 #define	atomic_set_acq_ptr	atomic_set_acq_long
616 #define	atomic_set_rel_ptr	atomic_set_rel_long
617 #define	atomic_clear_ptr	atomic_clear_long
618 #define	atomic_clear_acq_ptr	atomic_clear_acq_long
619 #define	atomic_clear_rel_ptr	atomic_clear_rel_long
620 #define	atomic_add_ptr		atomic_add_long
621 #define	atomic_add_acq_ptr	atomic_add_acq_long
622 #define	atomic_add_rel_ptr	atomic_add_rel_long
623 #define	atomic_subtract_ptr	atomic_subtract_long
624 #define	atomic_subtract_acq_ptr	atomic_subtract_acq_long
625 #define	atomic_subtract_rel_ptr	atomic_subtract_rel_long
626 #define	atomic_load_acq_ptr	atomic_load_acq_long
627 #define	atomic_store_rel_ptr	atomic_store_rel_long
628 #define	atomic_cmpset_ptr	atomic_cmpset_long
629 #define	atomic_cmpset_acq_ptr	atomic_cmpset_acq_long
630 #define	atomic_cmpset_rel_ptr	atomic_cmpset_rel_long
631 #define	atomic_fcmpset_ptr	atomic_fcmpset_long
632 #define	atomic_fcmpset_acq_ptr	atomic_fcmpset_acq_long
633 #define	atomic_fcmpset_rel_ptr	atomic_fcmpset_rel_long
634 #define	atomic_swap_ptr		atomic_swap_long
635 #define	atomic_readandclear_ptr	atomic_readandclear_long
636 
637 #endif /* !WANT_FUNCTIONS */
638 
639 #endif /* !SAN_NEEDS_INTERCEPTORS || SAN_RUNTIME */
640 
641 #endif /* !_MACHINE_ATOMIC_H_ */
642