xref: /dragonfly/sys/cpu/x86_64/include/atomic.h (revision 655933d6)
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/i386/include/atomic.h,v 1.9.2.1 2000/07/07 00:38:47 obrien Exp $
27  */
28 #ifndef _CPU_ATOMIC_H_
29 #define _CPU_ATOMIC_H_
30 
31 #ifndef _SYS_TYPES_H_
32 #include <sys/types.h>
33 #endif
34 
35 /*
36  * Various simple arithmetic on memory which is atomic in the presence
37  * of interrupts and multiple processors.
38  *
39  * atomic_set_char(P, V)	(*(u_char*)(P) |= (V))
40  * atomic_clear_char(P, V)	(*(u_char*)(P) &= ~(V))
41  * atomic_add_char(P, V)	(*(u_char*)(P) += (V))
42  * atomic_subtract_char(P, V)	(*(u_char*)(P) -= (V))
43  *
44  * atomic_set_short(P, V)	(*(u_short*)(P) |= (V))
45  * atomic_clear_short(P, V)	(*(u_short*)(P) &= ~(V))
46  * atomic_add_short(P, V)	(*(u_short*)(P) += (V))
47  * atomic_subtract_short(P, V)	(*(u_short*)(P) -= (V))
48  *
49  * atomic_set_int(P, V)		(*(u_int*)(P) |= (V))
50  * atomic_clear_int(P, V)	(*(u_int*)(P) &= ~(V))
51  * atomic_add_int(P, V)		(*(u_int*)(P) += (V))
52  * atomic_subtract_int(P, V)	(*(u_int*)(P) -= (V))
53  *
54  * atomic_set_long(P, V)	(*(u_long*)(P) |= (V))
55  * atomic_clear_long(P, V)	(*(u_long*)(P) &= ~(V))
56  * atomic_add_long(P, V)	(*(u_long*)(P) += (V))
57  * atomic_subtract_long(P, V)	(*(u_long*)(P) -= (V))
58  * atomic_readandclear_long(P)	(return (*(u_long*)(P)); *(u_long*)(P) = 0;)
59  * atomic_readandclear_int(P)	(return (*(u_int*)(P)); *(u_int*)(P) = 0;)
60  */
61 
62 /*
63  * locked bus cycle
64  * lock elision (backwards compatible)
65  */
66 #define MPLOCKED	"lock ; "
67 #define XACQUIRE	"repne; "	/* lock elision */
68 #define XRELEASE	"repe; "	/* lock elision */
69 
70 /*
71  * The assembly is volatilized to demark potential before-and-after side
72  * effects if an interrupt or SMP collision were to occur.  The primary
73  * atomic instructions are MP safe, the nonlocked instructions are
74  * local-interrupt-safe (so we don't depend on C 'X |= Y' generating an
75  * atomic instruction).
76  *
77  * +m - memory is read and written (=m - memory is only written)
78  * iq - integer constant or %ax/%bx/%cx/%dx (ir = int constant or any reg)
79  *	(Note: byte instructions only work on %ax,%bx,%cx, or %dx).  iq
80  *	is good enough for our needs so don't get fancy.
81  * r  - any register.
82  *
83  * NOTE: 64-bit immediate values are not supported for most x86-64
84  *	 instructions so we have to use "r".
85  */
86 
87 /* egcs 1.1.2+ version */
88 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
89 static __inline void					\
90 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
91 {							\
92 	__asm __volatile(MPLOCKED OP			\
93 			 : "+m" (*p)			\
94 			 : CONS (V)); 			\
95 }							\
96 static __inline void					\
97 atomic_##NAME##_##TYPE##_xacquire(volatile u_##TYPE *p, u_##TYPE v)\
98 {							\
99 	__asm __volatile(XACQUIRE MPLOCKED OP		\
100 			 : "+m" (*p)			\
101 			 : CONS (V)); 			\
102 }							\
103 static __inline void					\
104 atomic_##NAME##_##TYPE##_xrelease(volatile u_##TYPE *p, u_##TYPE v)\
105 {							\
106 	__asm __volatile(XRELEASE MPLOCKED OP		\
107 			 : "+m" (*p)			\
108 			 : CONS (V)); 			\
109 }							\
110 static __inline void					\
111 atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v)\
112 {							\
113 	__asm __volatile(OP				\
114 			 : "+m" (*p)			\
115 			 : CONS (V)); 			\
116 }
117 
118 /* egcs 1.1.2+ version */
119 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",   v)
120 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq",   ~v)
121 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",   v)
122 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",   v)
123 
124 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "iq",   v)
125 ATOMIC_ASM(clear,    short, "andw %w1,%0", "iq",  ~v)
126 ATOMIC_ASM(add,	     short, "addw %w1,%0", "iq",   v)
127 ATOMIC_ASM(subtract, short, "subw %w1,%0", "iq",   v)
128 
129 ATOMIC_ASM(set,	     int,   "orl %1,%0",  "iq",   v)
130 ATOMIC_ASM(clear,    int,   "andl %1,%0", "iq",  ~v)
131 ATOMIC_ASM(add,	     int,   "addl %1,%0", "iq",   v)
132 ATOMIC_ASM(subtract, int,   "subl %1,%0", "iq",   v)
133 
134 ATOMIC_ASM(set,	     long,  "orq %1,%0",  "r",   v)
135 ATOMIC_ASM(clear,    long,  "andq %1,%0", "r",  ~v)
136 ATOMIC_ASM(add,	     long,  "addq %1,%0", "r",   v)
137 ATOMIC_ASM(subtract, long,  "subq %1,%0", "r",   v)
138 
139 static __inline u_long
140 atomic_readandclear_long(volatile u_long *addr)
141 {
142 	u_long res;
143 
144 	res = 0;
145 	__asm __volatile(
146 	"	xchgq	%1,%0 ;		"
147 	"# atomic_readandclear_long"
148 	: "+r" (res),			/* 0 */
149 	  "=m" (*addr)			/* 1 */
150 	: "m" (*addr));
151 
152 	return (res);
153 }
154 
155 static __inline u_int
156 atomic_readandclear_int(volatile u_int *addr)
157 {
158 	u_int res;
159 
160 	res = 0;
161 	__asm __volatile(
162 	"	xchgl	%1,%0 ;		"
163 	"# atomic_readandclear_int"
164 	: "+r" (res),			/* 0 */
165 	  "=m" (*addr)			/* 1 */
166 	: "m" (*addr));
167 
168 	return (res);
169 }
170 
171 /*
172  * atomic_poll_acquire_int(P)	Returns non-zero on success, 0 if the lock
173  *				has already been acquired.
174  * atomic_poll_release_int(P)
175  *
176  * These are used for IPIQ interlocks between CPUs.
177  * Both the acquisition and release must be cache-synchronizing instructions.
178  */
179 
180 static __inline int
181 atomic_swap_int(volatile int *addr, int value)
182 {
183 	__asm __volatile("xchgl %0, %1" :
184 	    "=r" (value), "=m" (*addr) : "0" (value) : "memory");
185 	return (value);
186 }
187 
188 static __inline long
189 atomic_swap_long(volatile long *addr, long value)
190 {
191 	__asm __volatile("xchgq %0, %1" :
192 	    "=r" (value), "=m" (*addr) : "0" (value) : "memory");
193 	return (value);
194 }
195 
196 static __inline void *
197 atomic_swap_ptr(volatile void **addr, void *value)
198 {
199 	__asm __volatile("xchgq %0, %1" :
200 	    "=r" (value), "=m" (*addr) : "0" (value) : "memory");
201 	return (value);
202 }
203 
204 static __inline int
205 atomic_poll_acquire_int(volatile u_int *p)
206 {
207 	u_int data;
208 
209 	__asm __volatile(MPLOCKED "btsl $0,%0; setnc %%al; andl $255,%%eax" : "+m" (*p), "=a" (data));
210 	return(data);
211 }
212 
213 static __inline void
214 atomic_poll_release_int(volatile u_int *p)
215 {
216 	__asm __volatile(MPLOCKED "btrl $0,%0" : "+m" (*p));
217 }
218 
219 /*
220  * These functions operate on a 32 bit interrupt interlock which is defined
221  * as follows:
222  *
223  *	bit 0-29	interrupt handler wait counter
224  *	bit 30		interrupt handler disabled bit
225  *	bit 31		interrupt handler currently running bit (1 = run)
226  *
227  * atomic_intr_cond_test(P)	Determine if the interlock is in an
228  *				acquired state.  Returns 0 if it not
229  *				acquired, non-zero if it is. (not MPLOCKed)
230  *
231  * atomic_intr_cond_try(P) 	Attempt to set bit 31 to acquire the
232  *				interlock.  If we are unable to set bit 31
233  *				we return 1, otherwise we return 0.
234  *
235  * atomic_intr_cond_enter(P, func, arg)
236  *				Attempt to set bit 31 to acquire the
237  *				interlock.  If we are unable to set bit 31,
238  *				the wait is incremented counter and func(arg)
239  *				is called in a loop until we are able to set
240  *				bit 31.  Once we set bit 31, wait counter
241  *				is decremented.
242  *
243  * atomic_intr_cond_exit(P, func, arg)
244  *				Clear bit 31.  If the wait counter is still
245  *				non-zero call func(arg) once.
246  *
247  * atomic_intr_handler_disable(P)
248  *				Set bit 30, indicating that the interrupt
249  *				handler has been disabled.  Must be called
250  *				after the hardware is disabled.
251  *
252  *				Returns bit 31 indicating whether a serialized
253  *				accessor is active (typically the interrupt
254  *				handler is running).  0 == not active,
255  *				non-zero == active.
256  *
257  * atomic_intr_handler_enable(P)
258  *				Clear bit 30, indicating that the interrupt
259  *				handler has been enabled.  Must be called
260  *				before the hardware is actually enabled.
261  *
262  * atomic_intr_handler_is_enabled(P)
263  *				Returns bit 30, 0 indicates that the handler
264  *				is enabled, non-zero indicates that it is
265  *				disabled.  The request counter portion of
266  *				the field is ignored. (not MPLOCKed)
267  *
268  * atomic_intr_cond_inc(P)	Increment wait counter by 1.
269  * atomic_intr_cond_dec(P)	Decrement wait counter by 1.
270  */
271 
272 static __inline void
273 atomic_intr_init(__atomic_intr_t *p)
274 {
275 	*p = 0;
276 }
277 
278 static __inline int
279 atomic_intr_handler_disable(__atomic_intr_t *p)
280 {
281 	int data;
282 
283 	__asm __volatile(MPLOCKED "orl $0x40000000,%1; movl %1,%%eax; " \
284 				  "andl $0x80000000,%%eax" \
285 				  : "=a"(data) , "+m"(*p));
286 	return(data);
287 }
288 
289 static __inline void
290 atomic_intr_handler_enable(__atomic_intr_t *p)
291 {
292 	__asm __volatile(MPLOCKED "andl $0xBFFFFFFF,%0" : "+m" (*p));
293 }
294 
295 static __inline int
296 atomic_intr_handler_is_enabled(__atomic_intr_t *p)
297 {
298 	int data;
299 
300 	__asm __volatile("movl %1,%%eax; andl $0x40000000,%%eax" \
301 			 : "=a"(data) : "m"(*p));
302 	return(data);
303 }
304 
305 static __inline void
306 atomic_intr_cond_inc(__atomic_intr_t *p)
307 {
308 	__asm __volatile(MPLOCKED "incl %0" : "+m" (*p));
309 }
310 
311 static __inline void
312 atomic_intr_cond_dec(__atomic_intr_t *p)
313 {
314 	__asm __volatile(MPLOCKED "decl %0" : "+m" (*p));
315 }
316 
317 static __inline void
318 atomic_intr_cond_enter(__atomic_intr_t *p, void (*func)(void *), void *arg)
319 {
320 	__asm __volatile(MPLOCKED "btsl $31,%0; jnc 3f; " \
321 			 MPLOCKED "incl %0; " \
322 			 "1: ;" \
323 			 MPLOCKED "btsl $31,%0; jnc 2f; " \
324 			 "movq %2,%%rdi; call *%1; " \
325 			 "jmp 1b; " \
326 			 "2: ;" \
327 			 MPLOCKED "decl %0; " \
328 			 "3: ;" \
329 			 : "+m" (*p) \
330 			 : "r"(func), "m"(arg) \
331 			 : "ax", "cx", "dx", "rsi", "rdi", "r8", "r9", "r10", "r11");
332 		/* YYY the function call may clobber even more registers? */
333 }
334 
335 /*
336  * Attempt to enter the interrupt condition variable.  Returns zero on
337  * success, 1 on failure.
338  */
339 static __inline int
340 atomic_intr_cond_try(__atomic_intr_t *p)
341 {
342 	int ret;
343 
344 	__asm __volatile("subl %%eax,%%eax; "			\
345 			 MPLOCKED "btsl $31,%0; jnc 2f; "	\
346 			 "movl $1,%%eax;"			\
347 			 "2: ;"
348 			 : "+m" (*p), "=&a"(ret)
349                          : : "cx", "dx");
350 	return (ret);
351 }
352 
353 
354 static __inline int
355 atomic_intr_cond_test(__atomic_intr_t *p)
356 {
357 	return((int)(*p & 0x80000000));
358 }
359 
360 static __inline void
361 atomic_intr_cond_exit(__atomic_intr_t *p, void (*func)(void *), void *arg)
362 {
363 	__asm __volatile(MPLOCKED "btrl $31,%0; " \
364 			 "testl $0x3FFFFFFF,%0; jz 1f; " \
365 			 "movq %2,%%rdi; call *%1; " \
366 			 "1: ;" \
367 			 : "+m" (*p) \
368 			 : "r"(func), "m"(arg) \
369 			 : "ax", "cx", "dx", "rsi", "rdi", "r8", "r9", "r10", "r11");
370 		/* YYY the function call may clobber even more registers? */
371 }
372 
373 /*
374  * Atomic compare and set
375  *
376  * if (*_dst == _old) *_dst = _new (all 32 bit words)
377  *
378  * Returns 0 on failure, non-zero on success.  The inline is designed to
379  * allow the compiler to optimize the common case where the caller calls
380  * these functions from inside a conditional.
381  */
382 
383 static __inline int
384 atomic_cmpxchg_int(volatile u_int *_dst, u_int _old, u_int _new)
385 {
386 	u_int res = _old;
387 
388 	__asm __volatile(MPLOCKED "cmpxchgl %2,%1; " \
389 			 : "+a" (res), "=m" (*_dst) \
390 			 : "r" (_new), "m" (*_dst) \
391 			 : "memory");
392 	return (res);
393 }
394 
395 static __inline int
396 atomic_cmpxchg_long_test(volatile u_long *_dst, u_long _old, u_long _new)
397 {
398 	u_int res = _old;
399 
400 	__asm __volatile(MPLOCKED "cmpxchgq %2,%1; "
401 				  " setz %%al;"
402 				  " movsbq %%al,%%rax" \
403 			 : "+a" (res), "=m" (*_dst) \
404 			 : "r" (_new), "m" (*_dst) \
405 			 : "memory");
406 	return (res);
407 }
408 
409 static __inline int
410 atomic_cmpset_short(volatile u_short *_dst, u_short _old, u_short _new)
411 {
412 	u_short res = _old;
413 
414 	__asm __volatile(MPLOCKED "cmpxchgw %w2,%1; " \
415 			 : "+a" (res), "=m" (*_dst) \
416 			 : "r" (_new), "m" (*_dst) \
417 			 : "memory");
418 	return (res == _old);
419 }
420 
421 static __inline int
422 atomic_fcmpset_char(volatile u_char *_dst, u_char *_old, u_char _new)
423 {
424 	u_char res = *_old;
425 
426 	__asm __volatile(MPLOCKED "cmpxchgb %2,%0; " \
427 			 : "+m" (*_dst),		/* 0 */
428 			   "+a" (*_old)			/* 1 */
429 			 : "r" (_new)			/* 2 */
430 			 : "memory", "cc");
431 	return (res == *_old);
432 }
433 
434 static __inline int
435 atomic_fcmpset_short(volatile u_short *_dst, u_short *_old, u_short _new)
436 {
437 	u_short res = *_old;
438 
439 	__asm __volatile(MPLOCKED "cmpxchgw %2,%0; " \
440 			 : "+m" (*_dst),		/* 0 */
441 			   "+a" (*_old)			/* 1 */
442 			 : "r" (_new)			/* 2 */
443 			 : "memory", "cc");
444 	return (res == *_old);
445 }
446 
447 static __inline int
448 atomic_cmpset_int(volatile u_int *_dst, u_int _old, u_int _new)
449 {
450 	u_int res = _old;
451 
452 	__asm __volatile(MPLOCKED "cmpxchgl %2,%1; " \
453 			 : "+a" (res), "=m" (*_dst) \
454 			 : "r" (_new), "m" (*_dst) \
455 			 : "memory");
456 	return (res == _old);
457 }
458 
459 static __inline int
460 atomic_fcmpset_int(volatile u_int *_dst, u_int *_old, u_int _new)
461 {
462 	u_int res = *_old;
463 
464 	__asm __volatile(MPLOCKED "cmpxchgl %2,%0; " \
465 			 : "+m" (*_dst),		/* 0 */
466 			   "+a" (*_old)			/* 1 */
467 			 : "r" (_new)			/* 2 */
468 			 : "memory", "cc");
469 	return (res == *_old);
470 }
471 
472 static __inline int
473 atomic_cmpset_int_xacquire(volatile u_int *_dst, u_int _old, u_int _new)
474 {
475 	u_int res = _old;
476 
477 	__asm __volatile(XACQUIRE MPLOCKED "cmpxchgl %2,%1; " \
478 			 : "+a" (res), "=m" (*_dst) \
479 			 : "r" (_new), "m" (*_dst) \
480 			 : "memory");
481 	return (res == _old);
482 }
483 
484 static __inline int
485 atomic_cmpset_int_xrelease(volatile u_int *_dst, u_int _old, u_int _new)
486 {
487 	u_int res = _old;
488 
489 	__asm __volatile(XRELEASE MPLOCKED "cmpxchgl %2,%1; " \
490 			 : "+a" (res), "=m" (*_dst) \
491 			 : "r" (_new), "m" (*_dst) \
492 			 : "memory");
493 	return (res == _old);
494 }
495 
496 static __inline int
497 atomic_cmpset_long(volatile u_long *_dst, u_long _old, u_long _new)
498 {
499 	u_long res = _old;
500 
501 	__asm __volatile(MPLOCKED "cmpxchgq %2,%1; " \
502 			 : "+a" (res), "=m" (*_dst) \
503 			 : "r" (_new), "m" (*_dst) \
504 			 : "memory");
505 	return (res == _old);
506 }
507 
508 static __inline int
509 atomic_fcmpset_long(volatile u_long *_dst, u_long *_old, u_long _new)
510 {
511 	u_long res = *_old;
512 
513 	__asm __volatile(MPLOCKED "cmpxchgq %2,%0; " \
514 			 : "+m" (*_dst),		/* 0 */
515 			   "+a" (*_old)			/* 1 */
516 			 : "r" (_new)			/* 2 */
517 			 : "memory", "cc");
518 	return (res == *_old);
519 }
520 
521 static __inline int
522 atomic_cmpset_long_xacquire(volatile u_long *_dst, u_long _old, u_long _new)
523 {
524 	u_long res = _old;
525 
526 	__asm __volatile(XACQUIRE MPLOCKED "cmpxchgq %2,%1; " \
527 			 : "+a" (res), "=m" (*_dst) \
528 			 : "r" (_new), "m" (*_dst) \
529 			 : "memory");
530 	return (res == _old);
531 }
532 
533 static __inline int
534 atomic_cmpset_long_xrelease(volatile u_long *_dst, u_long _old, u_long _new)
535 {
536 	u_long res = _old;
537 
538 	__asm __volatile(XRELEASE MPLOCKED "cmpxchgq %2,%1; " \
539 			 : "+a" (res), "=m" (*_dst) \
540 			 : "r" (_new), "m" (*_dst) \
541 			 : "memory");
542 	return (res == _old);
543 }
544 
545 static inline void *
546 atomic_cas_ptr(volatile void *p, void *e, void *n)
547 {
548 	__asm volatile(MPLOCKED " cmpxchgq %2, %1"
549 	    : "=a" (n), "=m" (*(volatile unsigned long *)p)
550 	    : "r" (n), "a" (e), "m" (*(volatile unsigned long *)p));
551 
552 	return (n);
553 }
554 
555 /*
556  * Atomically add the value of v to the integer pointed to by p and return
557  * the previous value of *p.
558  */
559 static __inline u_int
560 atomic_fetchadd_int(volatile u_int *_p, u_int _v)
561 {
562 	__asm __volatile(MPLOCKED "xaddl %0,%1; " \
563 			 : "+r" (_v), "=m" (*_p)	\
564 			 : "m" (*_p)		\
565 			 : "memory");
566 	return (_v);
567 }
568 
569 static __inline u_int
570 atomic_fetchadd_int_xacquire(volatile u_int *_p, u_int _v)
571 {
572 	__asm __volatile(XACQUIRE MPLOCKED "xaddl %0,%1; " \
573 			 : "+r" (_v), "=m" (*_p)	\
574 			 : "m" (*_p)		\
575 			 : "memory");
576 	return (_v);
577 }
578 
579 static __inline u_int
580 atomic_fetchadd_int_xrelease(volatile u_int *_p, u_int _v)
581 {
582 	__asm __volatile(XRELEASE MPLOCKED "xaddl %0,%1; " \
583 			 : "+r" (_v), "=m" (*_p)	\
584 			 : "m" (*_p)		\
585 			 : "memory");
586 	return (_v);
587 }
588 
589 static __inline u_long
590 atomic_fetchadd_long(volatile u_long *_p, u_long _v)
591 {
592 	__asm __volatile(MPLOCKED "xaddq %0,%1; " \
593 			 : "+r" (_v), "=m" (*_p)	\
594 			 : "m" (*_p)		\
595 			 : "memory");
596 	return (_v);
597 }
598 
599 static __inline u_long
600 atomic_fetchadd_long_xacquire(volatile u_long *_p, u_long _v)
601 {
602 	__asm __volatile(XACQUIRE MPLOCKED "xaddq %0,%1; " \
603 			 : "+r" (_v), "=m" (*_p)	\
604 			 : "m" (*_p)		\
605 			 : "memory");
606 	return (_v);
607 }
608 
609 static __inline u_long
610 atomic_fetchadd_long_xrelease(volatile u_long *_p, u_long _v)
611 {
612 	__asm __volatile(XRELEASE MPLOCKED "xaddq %0,%1; " \
613 			 : "+r" (_v), "=m" (*_p)	\
614 			 : "m" (*_p)		\
615 			 : "memory");
616 	return (_v);
617 }
618 
619 static __inline int
620 atomic_testandset_int(volatile u_int *p, u_int v)
621 {
622 	u_char res;
623 
624 	__asm __volatile(
625 	"	" MPLOCKED "		"
626 	"	btsl	%2,%1 ;		"
627 	"	setc	%0 ;		"
628 	"# atomic_testandset_int"
629 	: "=q" (res),			/* 0 */
630 	  "+m" (*p)			/* 1 */
631 	: "Ir" (v & 0x1f)		/* 2 */
632 	: "cc");
633 	return (res);
634 }
635 
636 static __inline int
637 atomic_testandset_long(volatile u_long *p, u_long v)
638 {
639 	u_char res;
640 
641 	__asm __volatile(
642 	"	" MPLOCKED "		"
643 	"	btsq	%2,%1 ;		"
644 	"	setc	%0 ;		"
645 	"# atomic_testandset_long"
646 	: "=q" (res),			/* 0 */
647 	  "+m" (*p)			/* 1 */
648 	: "Ir" (v & 0x3f)		/* 2 */
649 	: "cc");
650 	return (res);
651 }
652 
653 static __inline int
654 atomic_testandclear_int(volatile u_int *p, u_int v)
655 {
656 	u_char res;
657 
658 	__asm __volatile(
659 	"	" MPLOCKED "		"
660 	"	btrl	%2,%1 ;		"
661 	"	setc	%0 ;		"
662 	"# atomic_testandclear_int"
663 	: "=q" (res),			/* 0 */
664 	  "+m" (*p)			/* 1 */
665 	: "Ir" (v & 0x1f)		/* 2 */
666 	: "cc");
667 	return (res);
668 }
669 
670 static __inline int
671 atomic_testandclear_long(volatile u_long *p, u_long v)
672 {
673 	u_char res;
674 
675 	__asm __volatile(
676 	"	" MPLOCKED "		"
677 	"	btrq	%2,%1 ;		"
678 	"	setc	%0 ;		"
679 	"# atomic_testandclear_long"
680 	: "=q" (res),			/* 0 */
681 	  "+m" (*p)			/* 1 */
682 	: "Ir" (v & 0x3f)		/* 2 */
683 	: "cc");
684 	return (res);
685 }
686 
687 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
688 static __inline u_##TYPE				\
689 atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
690 {							\
691 	u_##TYPE res; /* accumulator can be anything */	\
692 							\
693 	__asm __volatile(MPLOCKED LOP			\
694 	: "=a" (res),			/* 0 */		\
695 	  "=m" (*p)			/* 1 */		\
696 	: "m" (*p)			/* 2 */		\
697 	: "memory");					\
698 							\
699 	return (res);					\
700 }							\
701 							\
702 /*							\
703  * The XCHG instruction asserts LOCK automagically.	\
704  */							\
705 static __inline void					\
706 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
707 {							\
708 	__asm __volatile(SOP				\
709 	: "=m" (*p),			/* 0 */		\
710 	  "+r" (v)			/* 1 */		\
711 	: "m" (*p));			/* 2 */		\
712 }							\
713 struct __hack
714 
715 ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0");
716 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
717 ATOMIC_STORE_LOAD(int,  "cmpxchgl %0,%1",  "xchgl %1,%0");
718 ATOMIC_STORE_LOAD(long, "cmpxchgq %0,%1",  "xchgq %1,%0");
719 
720 #undef ATOMIC_ASM
721 #undef ATOMIC_STORE_LOAD
722 
723 /* Acquire and release variants are identical to the normal ones. */
724 #define	atomic_set_acq_char		atomic_set_char
725 #define	atomic_set_rel_char		atomic_set_char
726 #define	atomic_clear_acq_char		atomic_clear_char
727 #define	atomic_clear_rel_char		atomic_clear_char
728 #define	atomic_add_acq_char		atomic_add_char
729 #define	atomic_add_rel_char		atomic_add_char
730 #define	atomic_subtract_acq_char	atomic_subtract_char
731 #define	atomic_subtract_rel_char	atomic_subtract_char
732 
733 #define	atomic_set_acq_short		atomic_set_short
734 #define	atomic_set_rel_short		atomic_set_short
735 #define	atomic_clear_acq_short		atomic_clear_short
736 #define	atomic_clear_rel_short		atomic_clear_short
737 #define	atomic_add_acq_short		atomic_add_short
738 #define	atomic_add_rel_short		atomic_add_short
739 #define	atomic_subtract_acq_short	atomic_subtract_short
740 #define	atomic_subtract_rel_short	atomic_subtract_short
741 
742 #define	atomic_set_acq_int		atomic_set_int
743 #define	atomic_set_rel_int		atomic_set_int
744 #define	atomic_clear_acq_int		atomic_clear_int
745 #define	atomic_clear_rel_int		atomic_clear_int
746 #define	atomic_add_acq_int		atomic_add_int
747 #define	atomic_add_rel_int		atomic_add_int
748 #define	atomic_subtract_acq_int		atomic_subtract_int
749 #define	atomic_subtract_rel_int		atomic_subtract_int
750 #define	atomic_cmpset_acq_int		atomic_cmpset_int
751 #define	atomic_cmpset_rel_int		atomic_cmpset_int
752 
753 #define	atomic_set_acq_long		atomic_set_long
754 #define	atomic_set_rel_long		atomic_set_long
755 #define	atomic_clear_acq_long		atomic_clear_long
756 #define	atomic_clear_rel_long		atomic_clear_long
757 #define	atomic_add_acq_long		atomic_add_long
758 #define	atomic_add_rel_long		atomic_add_long
759 #define	atomic_subtract_acq_long	atomic_subtract_long
760 #define	atomic_subtract_rel_long	atomic_subtract_long
761 #define	atomic_cmpset_acq_long		atomic_cmpset_long
762 #define	atomic_cmpset_rel_long		atomic_cmpset_long
763 
764 /* cpumask_t is 64-bits on x86-64 */
765 #define atomic_set_cpumask		atomic_set_long
766 #define atomic_clear_cpumask		atomic_clear_long
767 #define atomic_cmpset_cpumask		atomic_cmpset_long
768 #define atomic_store_rel_cpumask	atomic_store_rel_long
769 #define atomic_load_acq_cpumask		atomic_load_acq_long
770 
771 /* Operations on 8-bit bytes. */
772 #define	atomic_set_8		atomic_set_char
773 #define	atomic_set_acq_8	atomic_set_acq_char
774 #define	atomic_set_rel_8	atomic_set_rel_char
775 #define	atomic_clear_8		atomic_clear_char
776 #define	atomic_clear_acq_8	atomic_clear_acq_char
777 #define	atomic_clear_rel_8	atomic_clear_rel_char
778 #define	atomic_add_8		atomic_add_char
779 #define	atomic_add_acq_8	atomic_add_acq_char
780 #define	atomic_add_rel_8	atomic_add_rel_char
781 #define	atomic_subtract_8	atomic_subtract_char
782 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
783 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
784 #define	atomic_load_acq_8	atomic_load_acq_char
785 #define	atomic_store_rel_8	atomic_store_rel_char
786 #define	atomic_fcmpset_8	atomic_fcmpset_char
787 
788 /* Operations on 16-bit words. */
789 #define	atomic_set_16		atomic_set_short
790 #define	atomic_set_acq_16	atomic_set_acq_short
791 #define	atomic_set_rel_16	atomic_set_rel_short
792 #define	atomic_clear_16		atomic_clear_short
793 #define	atomic_clear_acq_16	atomic_clear_acq_short
794 #define	atomic_clear_rel_16	atomic_clear_rel_short
795 #define	atomic_add_16		atomic_add_short
796 #define	atomic_add_acq_16	atomic_add_acq_short
797 #define	atomic_add_rel_16	atomic_add_rel_short
798 #define	atomic_subtract_16	atomic_subtract_short
799 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
800 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
801 #define	atomic_load_acq_16	atomic_load_acq_short
802 #define	atomic_store_rel_16	atomic_store_rel_short
803 #define	atomic_fcmpset_16	atomic_fcmpset_short
804 
805 /* Operations on 32-bit double words. */
806 #define	atomic_set_32		atomic_set_int
807 #define	atomic_set_acq_32	atomic_set_acq_int
808 #define	atomic_set_rel_32	atomic_set_rel_int
809 #define	atomic_clear_32		atomic_clear_int
810 #define	atomic_clear_acq_32	atomic_clear_acq_int
811 #define	atomic_clear_rel_32	atomic_clear_rel_int
812 #define	atomic_add_32		atomic_add_int
813 #define	atomic_add_acq_32	atomic_add_acq_int
814 #define	atomic_add_rel_32	atomic_add_rel_int
815 #define	atomic_subtract_32	atomic_subtract_int
816 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
817 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
818 #define	atomic_load_acq_32	atomic_load_acq_int
819 #define	atomic_store_rel_32	atomic_store_rel_int
820 #define	atomic_cmpset_32	atomic_cmpset_int
821 #define	atomic_fcmpset_32	atomic_fcmpset_int
822 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
823 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
824 #define	atomic_readandclear_32	atomic_readandclear_int
825 #define	atomic_fetchadd_32	atomic_fetchadd_int
826 
827 /* Operations on 64-bit quad words. */
828 #define	atomic_load_acq_64	atomic_load_acq_long
829 #define	atomic_store_rel_64	atomic_store_rel_long
830 #define	atomic_swap_64		atomic_swap_long
831 #define	atomic_fetchadd_64	atomic_fetchadd_long
832 #define	atomic_add_64		atomic_add_long
833 #define atomic_cmpset_64	atomic_cmpset_long
834 #define atomic_fcmpset_64	atomic_fcmpset_long
835 #define atomic_set_64		atomic_set_long
836 #define atomic_clear_64		atomic_clear_long
837 
838 /* Operations on pointers. */
839 #define atomic_set_ptr(p, v) \
840 	atomic_set_long((volatile u_long *)(p), (u_long)(v))
841 #define atomic_set_acq_ptr(p, v) \
842 	atomic_set_acq_long((volatile u_long *)(p), (u_long)(v))
843 #define atomic_set_rel_ptr(p, v) \
844 	atomic_set_rel_long((volatile u_long *)(p), (u_long)(v))
845 #define atomic_clear_ptr(p, v) \
846 	atomic_clear_long((volatile u_long *)(p), (u_long)(v))
847 #define atomic_clear_acq_ptr(p, v) \
848 	atomic_clear_acq_long((volatile u_long *)(p), (u_long)(v))
849 #define atomic_clear_rel_ptr(p, v) \
850 	atomic_clear_rel_long((volatile u_long *)(p), (u_long)(v))
851 #define atomic_add_ptr(p, v) \
852 	atomic_add_long((volatile u_long *)(p), (u_long)(v))
853 #define atomic_add_acq_ptr(p, v) \
854 	atomic_add_acq_long((volatile u_long *)(p), (u_long)(v))
855 #define atomic_add_rel_ptr(p, v) \
856 	atomic_add_rel_long((volatile u_long *)(p), (u_long)(v))
857 #define atomic_subtract_ptr(p, v) \
858 	atomic_subtract_long((volatile u_long *)(p), (u_long)(v))
859 #define atomic_subtract_acq_ptr(p, v) \
860 	atomic_subtract_acq_long((volatile u_long *)(p), (u_long)(v))
861 #define atomic_subtract_rel_ptr(p, v) \
862 	atomic_subtract_rel_long((volatile u_long *)(p), (u_long)(v))
863 #define atomic_load_acq_ptr(p) \
864 	atomic_load_acq_long((volatile u_long *)(p))
865 #define atomic_store_rel_ptr(p, v) \
866 	atomic_store_rel_long((volatile u_long *)(p), (v))
867 #define atomic_cmpset_ptr(dst, old, new) 				\
868 	atomic_cmpset_long((volatile u_long *)(dst), (u_long)(old),	\
869 				(u_long)(new))
870 #define atomic_fcmpset_ptr(dst, old, new) 				\
871 	atomic_fcmpset_long((volatile u_long *)(dst), (u_long *)(old),	\
872 				(u_long)(new))
873 #define atomic_cmpset_acq_ptr(dst, old, new)				\
874 	atomic_cmpset_acq_long((volatile u_long *)(dst), (u_long)(old), \
875 				(u_long)(new))
876 #define atomic_cmpset_rel_ptr(dst, old, new)				\
877 	atomic_cmpset_rel_long((volatile u_long *)(dst), (u_long)(old), \
878 				(u_long)(new))
879 #define atomic_readandclear_ptr(p)					\
880 	atomic_readandclear_long((volatile u_long *)(p))
881 
882 #endif /* ! _CPU_ATOMIC_H_ */
883