1 /*- 2 * Copyright (c) 1998 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/i386/include/atomic.h,v 1.9.2.1 2000/07/07 00:38:47 obrien Exp $ 27 * $DragonFly: src/sys/cpu/i386/include/atomic.h,v 1.25 2008/06/26 23:06:50 dillon Exp $ 28 */ 29 #ifndef _CPU_ATOMIC_H_ 30 #define _CPU_ATOMIC_H_ 31 32 #ifndef _SYS_TYPES_H_ 33 #include <sys/types.h> 34 #endif 35 36 /* 37 * Various simple arithmetic on memory which is atomic in the presence 38 * of interrupts and multiple processors. 39 * 40 * atomic_set_char(P, V) (*(u_char*)(P) |= (V)) 41 * atomic_clear_char(P, V) (*(u_char*)(P) &= ~(V)) 42 * atomic_add_char(P, V) (*(u_char*)(P) += (V)) 43 * atomic_subtract_char(P, V) (*(u_char*)(P) -= (V)) 44 * 45 * atomic_set_short(P, V) (*(u_short*)(P) |= (V)) 46 * atomic_clear_short(P, V) (*(u_short*)(P) &= ~(V)) 47 * atomic_add_short(P, V) (*(u_short*)(P) += (V)) 48 * atomic_subtract_short(P, V) (*(u_short*)(P) -= (V)) 49 * 50 * atomic_set_int(P, V) (*(u_int*)(P) |= (V)) 51 * atomic_clear_int(P, V) (*(u_int*)(P) &= ~(V)) 52 * atomic_add_int(P, V) (*(u_int*)(P) += (V)) 53 * atomic_subtract_int(P, V) (*(u_int*)(P) -= (V)) 54 * 55 * atomic_set_long(P, V) (*(u_long*)(P) |= (V)) 56 * atomic_clear_long(P, V) (*(u_long*)(P) &= ~(V)) 57 * atomic_add_long(P, V) (*(u_long*)(P) += (V)) 58 * atomic_subtract_long(P, V) (*(u_long*)(P) -= (V)) 59 * atomic_readandclear_long(P) (return (*(u_long*)(P)); *(u_long*)(P) = 0;) 60 * atomic_readandclear_int(P) (return (*(u_int*)(P)); *(u_int*)(P) = 0;) 61 */ 62 63 /* 64 * The above functions are expanded inline in the statically-linked 65 * kernel. Lock prefixes are generated if an SMP kernel is being 66 * built, or if user code is using these functions. 67 * 68 * Kernel modules call real functions which are built into the kernel. 69 * This allows kernel modules to be portable between UP and SMP systems. 70 */ 71 #if defined(KLD_MODULE) 72 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 73 extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ 74 extern void atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v); 75 #else /* !KLD_MODULE */ 76 #if defined(SMP) || !defined(_KERNEL) 77 #define MPLOCKED "lock ; " 78 #else 79 #define MPLOCKED 80 #endif 81 82 /* 83 * The assembly is volatilized to demark potential before-and-after side 84 * effects if an interrupt or SMP collision were to occur. The primary 85 * atomic instructions are MP safe, the nonlocked instructions are 86 * local-interrupt-safe (so we don't depend on C 'X |= Y' generating an 87 * atomic instruction). 88 * 89 * +m - memory is read and written (=m - memory is only written) 90 * iq - integer constant or %ax/%bx/%cx/%dx (ir = int constant or any reg) 91 * (Note: byte instructions only work on %ax,%bx,%cx, or %dx). iq 92 * is good enough for our needs so don't get fancy. 93 * r - any register. 94 * 95 * NOTE: 64-bit immediate values are not supported for most x86-64 96 * instructions so we have to use "r". 97 */ 98 99 /* egcs 1.1.2+ version */ 100 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 101 static __inline void \ 102 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 103 { \ 104 __asm __volatile(MPLOCKED OP \ 105 : "+m" (*p) \ 106 : CONS (V)); \ 107 } \ 108 static __inline void \ 109 atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v)\ 110 { \ 111 __asm __volatile(OP \ 112 : "+m" (*p) \ 113 : CONS (V)); \ 114 } 115 116 #endif /* KLD_MODULE */ 117 118 /* egcs 1.1.2+ version */ 119 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v) 120 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v) 121 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v) 122 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v) 123 124 ATOMIC_ASM(set, short, "orw %w1,%0", "iq", v) 125 ATOMIC_ASM(clear, short, "andw %w1,%0", "iq", ~v) 126 ATOMIC_ASM(add, short, "addw %w1,%0", "iq", v) 127 ATOMIC_ASM(subtract, short, "subw %w1,%0", "iq", v) 128 129 ATOMIC_ASM(set, int, "orl %1,%0", "iq", v) 130 ATOMIC_ASM(clear, int, "andl %1,%0", "iq", ~v) 131 ATOMIC_ASM(add, int, "addl %1,%0", "iq", v) 132 ATOMIC_ASM(subtract, int, "subl %1,%0", "iq", v) 133 134 ATOMIC_ASM(set, long, "orq %1,%0", "r", v) 135 ATOMIC_ASM(clear, long, "andq %1,%0", "r", ~v) 136 ATOMIC_ASM(add, long, "addq %1,%0", "r", v) 137 ATOMIC_ASM(subtract, long, "subq %1,%0", "r", v) 138 139 #if defined(KLD_MODULE) 140 141 u_long atomic_readandclear_long(volatile u_long *addr); 142 u_int atomic_readandclear_int(volatile u_int *addr); 143 144 #else /* !KLD_MODULE */ 145 146 static __inline u_long 147 atomic_readandclear_long(volatile u_long *addr) 148 { 149 u_long res; 150 151 res = 0; 152 __asm __volatile( 153 " xchgq %1,%0 ; " 154 "# atomic_readandclear_long" 155 : "+r" (res), /* 0 */ 156 "=m" (*addr) /* 1 */ 157 : "m" (*addr)); 158 159 return (res); 160 } 161 162 static __inline u_int 163 atomic_readandclear_int(volatile u_int *addr) 164 { 165 u_int res; 166 167 res = 0; 168 __asm __volatile( 169 " xchgl %1,%0 ; " 170 "# atomic_readandclear_int" 171 : "+r" (res), /* 0 */ 172 "=m" (*addr) /* 1 */ 173 : "m" (*addr)); 174 175 return (res); 176 } 177 178 #endif /* KLD_MODULE */ 179 180 /* 181 * atomic_poll_acquire_int(P) Returns non-zero on success, 0 if the lock 182 * has already been acquired. 183 * atomic_poll_release_int(P) 184 * 185 * These support the NDIS driver and are also used for IPIQ interlocks 186 * between cpus. Both the acquisition and release must be 187 * cache-synchronizing instructions. 188 */ 189 190 #if defined(KLD_MODULE) 191 192 extern int atomic_swap_int(volatile int *addr, int value); 193 extern int atomic_poll_acquire_int(volatile u_int *p); 194 extern void atomic_poll_release_int(volatile u_int *p); 195 196 #else 197 198 static __inline int 199 atomic_swap_int(volatile int *addr, int value) 200 { 201 __asm __volatile("xchgl %0, %1" : 202 "=r" (value), "=m" (*addr) : "0" (value) : "memory"); 203 return (value); 204 } 205 206 static __inline 207 int 208 atomic_poll_acquire_int(volatile u_int *p) 209 { 210 u_int data; 211 212 __asm __volatile(MPLOCKED "btsl $0,%0; setnc %%al; andl $255,%%eax" : "+m" (*p), "=a" (data)); 213 return(data); 214 } 215 216 static __inline 217 void 218 atomic_poll_release_int(volatile u_int *p) 219 { 220 __asm __volatile(MPLOCKED "btrl $0,%0" : "+m" (*p)); 221 } 222 223 #endif 224 225 /* 226 * These functions operate on a 32 bit interrupt interlock which is defined 227 * as follows: 228 * 229 * bit 0-30 interrupt handler disabled bits (counter) 230 * bit 31 interrupt handler currently running bit (1 = run) 231 * 232 * atomic_intr_cond_test(P) Determine if the interlock is in an 233 * acquired state. Returns 0 if it not 234 * acquired, non-zero if it is. 235 * 236 * atomic_intr_cond_try(P) 237 * Increment the request counter and attempt to 238 * set bit 31 to acquire the interlock. If 239 * we are unable to set bit 31 the request 240 * counter is decremented and we return -1, 241 * otherwise we return 0. 242 * 243 * atomic_intr_cond_enter(P, func, arg) 244 * Increment the request counter and attempt to 245 * set bit 31 to acquire the interlock. If 246 * we are unable to set bit 31 func(arg) is 247 * called in a loop until we are able to set 248 * bit 31. 249 * 250 * atomic_intr_cond_exit(P, func, arg) 251 * Decrement the request counter and clear bit 252 * 31. If the request counter is still non-zero 253 * call func(arg) once. 254 * 255 * atomic_intr_handler_disable(P) 256 * Set bit 30, indicating that the interrupt 257 * handler has been disabled. Must be called 258 * after the hardware is disabled. 259 * 260 * Returns bit 31 indicating whether a serialized 261 * accessor is active (typically the interrupt 262 * handler is running). 0 == not active, 263 * non-zero == active. 264 * 265 * atomic_intr_handler_enable(P) 266 * Clear bit 30, indicating that the interrupt 267 * handler has been enabled. Must be called 268 * before the hardware is actually enabled. 269 * 270 * atomic_intr_handler_is_enabled(P) 271 * Returns bit 30, 0 indicates that the handler 272 * is enabled, non-zero indicates that it is 273 * disabled. The request counter portion of 274 * the field is ignored. 275 */ 276 277 #if defined(KLD_MODULE) 278 279 void atomic_intr_init(__atomic_intr_t *p); 280 int atomic_intr_handler_disable(__atomic_intr_t *p); 281 void atomic_intr_handler_enable(__atomic_intr_t *p); 282 int atomic_intr_handler_is_enabled(__atomic_intr_t *p); 283 int atomic_intr_cond_test(__atomic_intr_t *p); 284 int atomic_intr_cond_try(__atomic_intr_t *p); 285 void atomic_intr_cond_enter(__atomic_intr_t *p, void (*func)(void *), void *arg); 286 void atomic_intr_cond_exit(__atomic_intr_t *p, void (*func)(void *), void *arg); 287 288 #else 289 290 static __inline 291 void 292 atomic_intr_init(__atomic_intr_t *p) 293 { 294 *p = 0; 295 } 296 297 static __inline 298 int 299 atomic_intr_handler_disable(__atomic_intr_t *p) 300 { 301 int data; 302 303 __asm __volatile(MPLOCKED "orl $0x40000000,%1; movl %1,%%eax; " \ 304 "andl $0x80000000,%%eax" \ 305 : "=a"(data) , "+m"(*p)); 306 return(data); 307 } 308 309 static __inline 310 void 311 atomic_intr_handler_enable(__atomic_intr_t *p) 312 { 313 __asm __volatile(MPLOCKED "andl $0xBFFFFFFF,%0" : "+m" (*p)); 314 } 315 316 static __inline 317 int 318 atomic_intr_handler_is_enabled(__atomic_intr_t *p) 319 { 320 int data; 321 322 __asm __volatile("movl %1,%%eax; andl $0x40000000,%%eax" \ 323 : "=a"(data) : "m"(*p)); 324 return(data); 325 } 326 327 static __inline 328 void 329 atomic_intr_cond_enter(__atomic_intr_t *p, void (*func)(void *), void *arg) 330 { 331 __asm __volatile(MPLOCKED "incl %0; " \ 332 "1: ;" \ 333 MPLOCKED "btsl $31,%0; jnc 2f; " \ 334 "movq %2,%%rdi; call *%1; " \ 335 "jmp 1b; " \ 336 "2: ;" \ 337 : "+m" (*p) \ 338 : "r"(func), "m"(arg) \ 339 : "ax", "cx", "dx", "rsi", "rdi", "r8", "r9", "r10", "r11"); 340 /* YYY the function call may clobber even more registers? */ 341 } 342 343 /* 344 * Attempt to enter the interrupt condition variable. Returns zero on 345 * success, 1 on failure. 346 */ 347 static __inline 348 int 349 atomic_intr_cond_try(__atomic_intr_t *p) 350 { 351 int ret; 352 353 __asm __volatile(MPLOCKED "incl %0; " \ 354 "1: ;" \ 355 "subl %%eax,%%eax; " \ 356 MPLOCKED "btsl $31,%0; jnc 2f; " \ 357 MPLOCKED "decl %0; " \ 358 "movl $1,%%eax;" \ 359 "2: ;" 360 : "+m" (*p), "=&a"(ret) 361 : : "cx", "dx"); 362 return (ret); 363 } 364 365 366 static __inline 367 int 368 atomic_intr_cond_test(__atomic_intr_t *p) 369 { 370 return((int)(*p & 0x80000000)); 371 } 372 373 static __inline 374 void 375 atomic_intr_cond_exit(__atomic_intr_t *p, void (*func)(void *), void *arg) 376 { 377 __asm __volatile(MPLOCKED "decl %0; " \ 378 MPLOCKED "btrl $31,%0; " \ 379 "testl $0x3FFFFFFF,%0; jz 1f; " \ 380 "movq %2,%%rdi; call *%1; " \ 381 "1: ;" \ 382 : "+m" (*p) \ 383 : "r"(func), "m"(arg) \ 384 : "ax", "cx", "dx", "rsi", "rdi", "r8", "r9", "r10", "r11"); 385 /* YYY the function call may clobber even more registers? */ 386 } 387 388 #endif 389 390 /* 391 * Atomic compare and set 392 * 393 * if (*_dst == _old) *_dst = _new (all 32 bit words) 394 * 395 * Returns 0 on failure, non-zero on success. The inline is designed to 396 * allow the compiler to optimize the common case where the caller calls 397 * these functions from inside a conditional. 398 */ 399 #if defined(KLD_MODULE) 400 401 extern int atomic_cmpset_int(volatile u_int *_dst, u_int _old, u_int _new); 402 extern long atomic_cmpset_long(volatile u_long *_dst, u_long _exp, u_long _src); 403 extern u_int atomic_fetchadd_int(volatile u_int *_p, u_int _v); 404 405 #else 406 407 static __inline int 408 atomic_cmpset_int(volatile u_int *_dst, u_int _old, u_int _new) 409 { 410 u_int res = _old; 411 412 __asm __volatile(MPLOCKED "cmpxchgl %2,%1; " \ 413 : "+a" (res), "=m" (*_dst) \ 414 : "r" (_new), "m" (*_dst) \ 415 : "memory"); 416 return (res == _old); 417 } 418 419 static __inline long 420 atomic_cmpset_long(volatile u_long *_dst, u_long _old, u_long _new) 421 { 422 u_long res = _old; 423 424 __asm __volatile(MPLOCKED "cmpxchgq %2,%1; " \ 425 : "+a" (res), "=m" (*_dst) \ 426 : "r" (_new), "m" (*_dst) \ 427 : "memory"); 428 return (res == _old); 429 } 430 431 /* 432 * Atomically add the value of v to the integer pointed to by p and return 433 * the previous value of *p. 434 */ 435 static __inline u_int 436 atomic_fetchadd_int(volatile u_int *_p, u_int _v) 437 { 438 __asm __volatile(MPLOCKED "xaddl %0,%1; " \ 439 : "+r" (_v), "=m" (*_p) \ 440 : "m" (*_p) \ 441 : "memory"); 442 return (_v); 443 } 444 445 #endif /* KLD_MODULE */ 446 447 #if defined(KLD_MODULE) 448 449 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 450 extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ 451 extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v); 452 453 #else /* !KLD_MODULE */ 454 455 #if defined(_KERNEL) && !defined(SMP) 456 /* 457 * We assume that a = b will do atomic loads and stores. However, on a 458 * PentiumPro or higher, reads may pass writes, so for that case we have 459 * to use a serializing instruction (i.e. with LOCK) to do the load in 460 * SMP kernels. For UP kernels, however, the cache of the single processor 461 * is always consistent, so we don't need any memory barriers. 462 */ 463 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 464 static __inline u_##TYPE \ 465 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 466 { \ 467 return (*p); \ 468 } \ 469 \ 470 static __inline void \ 471 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 472 { \ 473 *p = v; \ 474 } \ 475 struct __hack 476 477 #else /* !(_KERNEL && !SMP) */ 478 479 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 480 static __inline u_##TYPE \ 481 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 482 { \ 483 u_##TYPE res; \ 484 \ 485 __asm __volatile(MPLOCKED LOP \ 486 : "=a" (res), /* 0 */ \ 487 "=m" (*p) /* 1 */ \ 488 : "m" (*p) /* 2 */ \ 489 : "memory"); \ 490 \ 491 return (res); \ 492 } \ 493 \ 494 /* \ 495 * The XCHG instruction asserts LOCK automagically. \ 496 */ \ 497 static __inline void \ 498 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 499 { \ 500 __asm __volatile(SOP \ 501 : "=m" (*p), /* 0 */ \ 502 "+r" (v) /* 1 */ \ 503 : "m" (*p)); /* 2 */ \ 504 } \ 505 struct __hack 506 507 #endif /* _KERNEL && !SMP */ 508 509 #endif /* !KLD_MODULE */ 510 511 ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0"); 512 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0"); 513 ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0"); 514 ATOMIC_STORE_LOAD(long, "cmpxchgq %0,%1", "xchgq %1,%0"); 515 516 #undef ATOMIC_ASM 517 #undef ATOMIC_STORE_LOAD 518 519 /* Acquire and release variants are identical to the normal ones. */ 520 #define atomic_set_acq_char atomic_set_char 521 #define atomic_set_rel_char atomic_set_char 522 #define atomic_clear_acq_char atomic_clear_char 523 #define atomic_clear_rel_char atomic_clear_char 524 #define atomic_add_acq_char atomic_add_char 525 #define atomic_add_rel_char atomic_add_char 526 #define atomic_subtract_acq_char atomic_subtract_char 527 #define atomic_subtract_rel_char atomic_subtract_char 528 529 #define atomic_set_acq_short atomic_set_short 530 #define atomic_set_rel_short atomic_set_short 531 #define atomic_clear_acq_short atomic_clear_short 532 #define atomic_clear_rel_short atomic_clear_short 533 #define atomic_add_acq_short atomic_add_short 534 #define atomic_add_rel_short atomic_add_short 535 #define atomic_subtract_acq_short atomic_subtract_short 536 #define atomic_subtract_rel_short atomic_subtract_short 537 538 #define atomic_set_acq_int atomic_set_int 539 #define atomic_set_rel_int atomic_set_int 540 #define atomic_clear_acq_int atomic_clear_int 541 #define atomic_clear_rel_int atomic_clear_int 542 #define atomic_add_acq_int atomic_add_int 543 #define atomic_add_rel_int atomic_add_int 544 #define atomic_subtract_acq_int atomic_subtract_int 545 #define atomic_subtract_rel_int atomic_subtract_int 546 #define atomic_cmpset_acq_int atomic_cmpset_int 547 #define atomic_cmpset_rel_int atomic_cmpset_int 548 549 #define atomic_set_acq_long atomic_set_long 550 #define atomic_set_rel_long atomic_set_long 551 #define atomic_clear_acq_long atomic_clear_long 552 #define atomic_clear_rel_long atomic_clear_long 553 #define atomic_add_acq_long atomic_add_long 554 #define atomic_add_rel_long atomic_add_long 555 #define atomic_subtract_acq_long atomic_subtract_long 556 #define atomic_subtract_rel_long atomic_subtract_long 557 #define atomic_cmpset_acq_long atomic_cmpset_long 558 #define atomic_cmpset_rel_long atomic_cmpset_long 559 560 /* cpumask_t is 64-bits on x86-64 */ 561 #define atomic_set_cpumask atomic_set_long 562 #define atomic_clear_cpumask atomic_clear_long 563 #define atomic_cmpset_cpumask atomic_cmpset_long 564 565 /* Operations on 8-bit bytes. */ 566 #define atomic_set_8 atomic_set_char 567 #define atomic_set_acq_8 atomic_set_acq_char 568 #define atomic_set_rel_8 atomic_set_rel_char 569 #define atomic_clear_8 atomic_clear_char 570 #define atomic_clear_acq_8 atomic_clear_acq_char 571 #define atomic_clear_rel_8 atomic_clear_rel_char 572 #define atomic_add_8 atomic_add_char 573 #define atomic_add_acq_8 atomic_add_acq_char 574 #define atomic_add_rel_8 atomic_add_rel_char 575 #define atomic_subtract_8 atomic_subtract_char 576 #define atomic_subtract_acq_8 atomic_subtract_acq_char 577 #define atomic_subtract_rel_8 atomic_subtract_rel_char 578 #define atomic_load_acq_8 atomic_load_acq_char 579 #define atomic_store_rel_8 atomic_store_rel_char 580 581 /* Operations on 16-bit words. */ 582 #define atomic_set_16 atomic_set_short 583 #define atomic_set_acq_16 atomic_set_acq_short 584 #define atomic_set_rel_16 atomic_set_rel_short 585 #define atomic_clear_16 atomic_clear_short 586 #define atomic_clear_acq_16 atomic_clear_acq_short 587 #define atomic_clear_rel_16 atomic_clear_rel_short 588 #define atomic_add_16 atomic_add_short 589 #define atomic_add_acq_16 atomic_add_acq_short 590 #define atomic_add_rel_16 atomic_add_rel_short 591 #define atomic_subtract_16 atomic_subtract_short 592 #define atomic_subtract_acq_16 atomic_subtract_acq_short 593 #define atomic_subtract_rel_16 atomic_subtract_rel_short 594 #define atomic_load_acq_16 atomic_load_acq_short 595 #define atomic_store_rel_16 atomic_store_rel_short 596 597 /* Operations on 32-bit double words. */ 598 #define atomic_set_32 atomic_set_int 599 #define atomic_set_acq_32 atomic_set_acq_int 600 #define atomic_set_rel_32 atomic_set_rel_int 601 #define atomic_clear_32 atomic_clear_int 602 #define atomic_clear_acq_32 atomic_clear_acq_int 603 #define atomic_clear_rel_32 atomic_clear_rel_int 604 #define atomic_add_32 atomic_add_int 605 #define atomic_add_acq_32 atomic_add_acq_int 606 #define atomic_add_rel_32 atomic_add_rel_int 607 #define atomic_subtract_32 atomic_subtract_int 608 #define atomic_subtract_acq_32 atomic_subtract_acq_int 609 #define atomic_subtract_rel_32 atomic_subtract_rel_int 610 #define atomic_load_acq_32 atomic_load_acq_int 611 #define atomic_store_rel_32 atomic_store_rel_int 612 #define atomic_cmpset_32 atomic_cmpset_int 613 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int 614 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int 615 #define atomic_readandclear_32 atomic_readandclear_int 616 #define atomic_fetchadd_32 atomic_fetchadd_int 617 618 /* Operations on pointers. */ 619 #define atomic_set_ptr(p, v) \ 620 atomic_set_long((volatile u_long *)(p), (u_long)(v)) 621 #define atomic_set_acq_ptr(p, v) \ 622 atomic_set_acq_long((volatile u_long *)(p), (u_long)(v)) 623 #define atomic_set_rel_ptr(p, v) \ 624 atomic_set_rel_long((volatile u_long *)(p), (u_long)(v)) 625 #define atomic_clear_ptr(p, v) \ 626 atomic_clear_long((volatile u_long *)(p), (u_long)(v)) 627 #define atomic_clear_acq_ptr(p, v) \ 628 atomic_clear_acq_long((volatile u_long *)(p), (u_long)(v)) 629 #define atomic_clear_rel_ptr(p, v) \ 630 atomic_clear_rel_long((volatile u_long *)(p), (u_long)(v)) 631 #define atomic_add_ptr(p, v) \ 632 atomic_add_long((volatile u_long *)(p), (u_long)(v)) 633 #define atomic_add_acq_ptr(p, v) \ 634 atomic_add_acq_long((volatile u_long *)(p), (u_long)(v)) 635 #define atomic_add_rel_ptr(p, v) \ 636 atomic_add_rel_long((volatile u_long *)(p), (u_long)(v)) 637 #define atomic_subtract_ptr(p, v) \ 638 atomic_subtract_long((volatile u_long *)(p), (u_long)(v)) 639 #define atomic_subtract_acq_ptr(p, v) \ 640 atomic_subtract_acq_long((volatile u_long *)(p), (u_long)(v)) 641 #define atomic_subtract_rel_ptr(p, v) \ 642 atomic_subtract_rel_long((volatile u_long *)(p), (u_long)(v)) 643 #define atomic_load_acq_ptr(p) \ 644 atomic_load_acq_long((volatile u_long *)(p)) 645 #define atomic_store_rel_ptr(p, v) \ 646 atomic_store_rel_long((volatile u_long *)(p), (v)) 647 #define atomic_cmpset_ptr(dst, old, new) \ 648 atomic_cmpset_long((volatile u_long *)(dst), (u_long)(old), \ 649 (u_long)(new)) 650 #define atomic_cmpset_acq_ptr(dst, old, new) \ 651 atomic_cmpset_acq_long((volatile u_long *)(dst), (u_long)(old), \ 652 (u_long)(new)) 653 #define atomic_cmpset_rel_ptr(dst, old, new) \ 654 atomic_cmpset_rel_long((volatile u_long *)(dst), (u_long)(old), \ 655 (u_long)(new)) 656 #define atomic_readandclear_ptr(p) \ 657 atomic_readandclear_long((volatile u_long *)(p)) 658 659 #endif /* ! _CPU_ATOMIC_H_ */ 660