1 /*- 2 * Copyright (c) 1998 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/i386/include/atomic.h,v 1.9.2.1 2000/07/07 00:38:47 obrien Exp $ 27 */ 28 #ifndef _CPU_ATOMIC_H_ 29 #define _CPU_ATOMIC_H_ 30 31 #ifndef _SYS_TYPES_H_ 32 #include <sys/types.h> 33 #endif 34 35 /* 36 * Various simple arithmetic on memory which is atomic in the presence 37 * of interrupts and multiple processors. 38 * 39 * atomic_set_char(P, V) (*(u_char*)(P) |= (V)) 40 * atomic_clear_char(P, V) (*(u_char*)(P) &= ~(V)) 41 * atomic_add_char(P, V) (*(u_char*)(P) += (V)) 42 * atomic_subtract_char(P, V) (*(u_char*)(P) -= (V)) 43 * 44 * atomic_set_short(P, V) (*(u_short*)(P) |= (V)) 45 * atomic_clear_short(P, V) (*(u_short*)(P) &= ~(V)) 46 * atomic_add_short(P, V) (*(u_short*)(P) += (V)) 47 * atomic_subtract_short(P, V) (*(u_short*)(P) -= (V)) 48 * 49 * atomic_set_int(P, V) (*(u_int*)(P) |= (V)) 50 * atomic_clear_int(P, V) (*(u_int*)(P) &= ~(V)) 51 * atomic_add_int(P, V) (*(u_int*)(P) += (V)) 52 * atomic_subtract_int(P, V) (*(u_int*)(P) -= (V)) 53 * 54 * atomic_set_long(P, V) (*(u_long*)(P) |= (V)) 55 * atomic_clear_long(P, V) (*(u_long*)(P) &= ~(V)) 56 * atomic_add_long(P, V) (*(u_long*)(P) += (V)) 57 * atomic_subtract_long(P, V) (*(u_long*)(P) -= (V)) 58 * atomic_readandclear_long(P) (return (*(u_long*)(P)); *(u_long*)(P) = 0;) 59 * atomic_readandclear_int(P) (return (*(u_int*)(P)); *(u_int*)(P) = 0;) 60 */ 61 62 /* 63 * The above functions are expanded inline in the statically-linked 64 * kernel and lock prefixes are generated. 65 * 66 * Kernel modules call real functions which are built into the kernel. 67 */ 68 #if defined(KLD_MODULE) 69 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 70 extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ 71 extern void atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v); 72 73 int atomic_testandset_int(volatile u_int *p, u_int v); 74 75 #else /* !KLD_MODULE */ 76 #define MPLOCKED "lock ; " 77 78 /* 79 * The assembly is volatilized to demark potential before-and-after side 80 * effects if an interrupt or SMP collision were to occur. The primary 81 * atomic instructions are MP safe, the nonlocked instructions are 82 * local-interrupt-safe (so we don't depend on C 'X |= Y' generating an 83 * atomic instruction). 84 * 85 * +m - memory is read and written (=m - memory is only written) 86 * iq - integer constant or %ax/%bx/%cx/%dx (ir = int constant or any reg) 87 * (Note: byte instructions only work on %ax,%bx,%cx, or %dx). iq 88 * is good enough for our needs so don't get fancy. 89 * r - any register. 90 * 91 * NOTE: 64-bit immediate values are not supported for most x86-64 92 * instructions so we have to use "r". 93 */ 94 95 /* egcs 1.1.2+ version */ 96 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 97 static __inline void \ 98 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 99 { \ 100 __asm __volatile(MPLOCKED OP \ 101 : "+m" (*p) \ 102 : CONS (V)); \ 103 } \ 104 static __inline void \ 105 atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v)\ 106 { \ 107 __asm __volatile(OP \ 108 : "+m" (*p) \ 109 : CONS (V)); \ 110 } 111 112 #endif /* KLD_MODULE */ 113 114 /* egcs 1.1.2+ version */ 115 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v) 116 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v) 117 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v) 118 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v) 119 120 ATOMIC_ASM(set, short, "orw %w1,%0", "iq", v) 121 ATOMIC_ASM(clear, short, "andw %w1,%0", "iq", ~v) 122 ATOMIC_ASM(add, short, "addw %w1,%0", "iq", v) 123 ATOMIC_ASM(subtract, short, "subw %w1,%0", "iq", v) 124 125 ATOMIC_ASM(set, int, "orl %1,%0", "iq", v) 126 ATOMIC_ASM(clear, int, "andl %1,%0", "iq", ~v) 127 ATOMIC_ASM(add, int, "addl %1,%0", "iq", v) 128 ATOMIC_ASM(subtract, int, "subl %1,%0", "iq", v) 129 130 ATOMIC_ASM(set, long, "orq %1,%0", "r", v) 131 ATOMIC_ASM(clear, long, "andq %1,%0", "r", ~v) 132 ATOMIC_ASM(add, long, "addq %1,%0", "r", v) 133 ATOMIC_ASM(subtract, long, "subq %1,%0", "r", v) 134 135 #if defined(KLD_MODULE) 136 137 u_long atomic_readandclear_long(volatile u_long *addr); 138 u_int atomic_readandclear_int(volatile u_int *addr); 139 140 #else /* !KLD_MODULE */ 141 142 static __inline u_long 143 atomic_readandclear_long(volatile u_long *addr) 144 { 145 u_long res; 146 147 res = 0; 148 __asm __volatile( 149 " xchgq %1,%0 ; " 150 "# atomic_readandclear_long" 151 : "+r" (res), /* 0 */ 152 "=m" (*addr) /* 1 */ 153 : "m" (*addr)); 154 155 return (res); 156 } 157 158 static __inline u_int 159 atomic_readandclear_int(volatile u_int *addr) 160 { 161 u_int res; 162 163 res = 0; 164 __asm __volatile( 165 " xchgl %1,%0 ; " 166 "# atomic_readandclear_int" 167 : "+r" (res), /* 0 */ 168 "=m" (*addr) /* 1 */ 169 : "m" (*addr)); 170 171 return (res); 172 } 173 174 #endif /* KLD_MODULE */ 175 176 /* 177 * atomic_poll_acquire_int(P) Returns non-zero on success, 0 if the lock 178 * has already been acquired. 179 * atomic_poll_release_int(P) 180 * 181 * These support the NDIS driver and are also used for IPIQ interlocks 182 * between cpus. Both the acquisition and release must be 183 * cache-synchronizing instructions. 184 */ 185 186 #if defined(KLD_MODULE) 187 188 extern int atomic_swap_int(volatile int *addr, int value); 189 extern long atomic_swap_long(volatile long *addr, long value); 190 extern void *atomic_swap_ptr(volatile void **addr, void *value); 191 extern int atomic_poll_acquire_int(volatile u_int *p); 192 extern void atomic_poll_release_int(volatile u_int *p); 193 194 #else 195 196 static __inline int 197 atomic_swap_int(volatile int *addr, int value) 198 { 199 __asm __volatile("xchgl %0, %1" : 200 "=r" (value), "=m" (*addr) : "0" (value) : "memory"); 201 return (value); 202 } 203 204 static __inline long 205 atomic_swap_long(volatile long *addr, long value) 206 { 207 __asm __volatile("xchgq %0, %1" : 208 "=r" (value), "=m" (*addr) : "0" (value) : "memory"); 209 return (value); 210 } 211 212 static __inline void * 213 atomic_swap_ptr(volatile void **addr, void *value) 214 { 215 __asm __volatile("xchgq %0, %1" : 216 "=r" (value), "=m" (*addr) : "0" (value) : "memory"); 217 return (value); 218 } 219 220 static __inline int 221 atomic_poll_acquire_int(volatile u_int *p) 222 { 223 u_int data; 224 225 __asm __volatile(MPLOCKED "btsl $0,%0; setnc %%al; andl $255,%%eax" : "+m" (*p), "=a" (data)); 226 return(data); 227 } 228 229 static __inline void 230 atomic_poll_release_int(volatile u_int *p) 231 { 232 __asm __volatile(MPLOCKED "btrl $0,%0" : "+m" (*p)); 233 } 234 235 #endif 236 237 /* 238 * These functions operate on a 32 bit interrupt interlock which is defined 239 * as follows: 240 * 241 * bit 0-29 interrupt handler wait counter 242 * bit 30 interrupt handler disabled bit 243 * bit 31 interrupt handler currently running bit (1 = run) 244 * 245 * atomic_intr_cond_test(P) Determine if the interlock is in an 246 * acquired state. Returns 0 if it not 247 * acquired, non-zero if it is. (not MPLOCKed) 248 * 249 * atomic_intr_cond_try(P) Attempt to set bit 31 to acquire the 250 * interlock. If we are unable to set bit 31 251 * we return 1, otherwise we return 0. 252 * 253 * atomic_intr_cond_enter(P, func, arg) 254 * Attempt to set bit 31 to acquire the 255 * interlock. If we are unable to set bit 31, 256 * the wait is incremented counter and func(arg) 257 * is called in a loop until we are able to set 258 * bit 31. Once we set bit 31, wait counter 259 * is decremented. 260 * 261 * atomic_intr_cond_exit(P, func, arg) 262 * Clear bit 31. If the wait counter is still 263 * non-zero call func(arg) once. 264 * 265 * atomic_intr_handler_disable(P) 266 * Set bit 30, indicating that the interrupt 267 * handler has been disabled. Must be called 268 * after the hardware is disabled. 269 * 270 * Returns bit 31 indicating whether a serialized 271 * accessor is active (typically the interrupt 272 * handler is running). 0 == not active, 273 * non-zero == active. 274 * 275 * atomic_intr_handler_enable(P) 276 * Clear bit 30, indicating that the interrupt 277 * handler has been enabled. Must be called 278 * before the hardware is actually enabled. 279 * 280 * atomic_intr_handler_is_enabled(P) 281 * Returns bit 30, 0 indicates that the handler 282 * is enabled, non-zero indicates that it is 283 * disabled. The request counter portion of 284 * the field is ignored. (not MPLOCKed) 285 * 286 * atomic_intr_cond_inc(P) Increment wait counter by 1. 287 * atomic_intr_cond_dec(P) Decrement wait counter by 1. 288 */ 289 290 #if defined(KLD_MODULE) 291 292 void atomic_intr_init(__atomic_intr_t *p); 293 int atomic_intr_handler_disable(__atomic_intr_t *p); 294 void atomic_intr_handler_enable(__atomic_intr_t *p); 295 int atomic_intr_handler_is_enabled(__atomic_intr_t *p); 296 int atomic_intr_cond_test(__atomic_intr_t *p); 297 int atomic_intr_cond_try(__atomic_intr_t *p); 298 void atomic_intr_cond_enter(__atomic_intr_t *p, void (*func)(void *), void *arg); 299 void atomic_intr_cond_exit(__atomic_intr_t *p, void (*func)(void *), void *arg); 300 void atomic_intr_cond_inc(__atomic_intr_t *p); 301 void atomic_intr_cond_dec(__atomic_intr_t *p); 302 303 #else 304 305 static __inline void 306 atomic_intr_init(__atomic_intr_t *p) 307 { 308 *p = 0; 309 } 310 311 static __inline int 312 atomic_intr_handler_disable(__atomic_intr_t *p) 313 { 314 int data; 315 316 __asm __volatile(MPLOCKED "orl $0x40000000,%1; movl %1,%%eax; " \ 317 "andl $0x80000000,%%eax" \ 318 : "=a"(data) , "+m"(*p)); 319 return(data); 320 } 321 322 static __inline void 323 atomic_intr_handler_enable(__atomic_intr_t *p) 324 { 325 __asm __volatile(MPLOCKED "andl $0xBFFFFFFF,%0" : "+m" (*p)); 326 } 327 328 static __inline int 329 atomic_intr_handler_is_enabled(__atomic_intr_t *p) 330 { 331 int data; 332 333 __asm __volatile("movl %1,%%eax; andl $0x40000000,%%eax" \ 334 : "=a"(data) : "m"(*p)); 335 return(data); 336 } 337 338 static __inline void 339 atomic_intr_cond_inc(__atomic_intr_t *p) 340 { 341 __asm __volatile(MPLOCKED "incl %0" : "+m" (*p)); 342 } 343 344 static __inline void 345 atomic_intr_cond_dec(__atomic_intr_t *p) 346 { 347 __asm __volatile(MPLOCKED "decl %0" : "+m" (*p)); 348 } 349 350 static __inline void 351 atomic_intr_cond_enter(__atomic_intr_t *p, void (*func)(void *), void *arg) 352 { 353 __asm __volatile(MPLOCKED "btsl $31,%0; jnc 3f; " \ 354 MPLOCKED "incl %0; " \ 355 "1: ;" \ 356 MPLOCKED "btsl $31,%0; jnc 2f; " \ 357 "movq %2,%%rdi; call *%1; " \ 358 "jmp 1b; " \ 359 "2: ;" \ 360 MPLOCKED "decl %0; " \ 361 "3: ;" \ 362 : "+m" (*p) \ 363 : "r"(func), "m"(arg) \ 364 : "ax", "cx", "dx", "rsi", "rdi", "r8", "r9", "r10", "r11"); 365 /* YYY the function call may clobber even more registers? */ 366 } 367 368 /* 369 * Attempt to enter the interrupt condition variable. Returns zero on 370 * success, 1 on failure. 371 */ 372 static __inline int 373 atomic_intr_cond_try(__atomic_intr_t *p) 374 { 375 int ret; 376 377 __asm __volatile("subl %%eax,%%eax; " \ 378 MPLOCKED "btsl $31,%0; jnc 2f; " \ 379 "movl $1,%%eax;" \ 380 "2: ;" 381 : "+m" (*p), "=&a"(ret) 382 : : "cx", "dx"); 383 return (ret); 384 } 385 386 387 static __inline int 388 atomic_intr_cond_test(__atomic_intr_t *p) 389 { 390 return((int)(*p & 0x80000000)); 391 } 392 393 static __inline void 394 atomic_intr_cond_exit(__atomic_intr_t *p, void (*func)(void *), void *arg) 395 { 396 __asm __volatile(MPLOCKED "btrl $31,%0; " \ 397 "testl $0x3FFFFFFF,%0; jz 1f; " \ 398 "movq %2,%%rdi; call *%1; " \ 399 "1: ;" \ 400 : "+m" (*p) \ 401 : "r"(func), "m"(arg) \ 402 : "ax", "cx", "dx", "rsi", "rdi", "r8", "r9", "r10", "r11"); 403 /* YYY the function call may clobber even more registers? */ 404 } 405 406 #endif 407 408 /* 409 * Atomic compare and set 410 * 411 * if (*_dst == _old) *_dst = _new (all 32 bit words) 412 * 413 * Returns 0 on failure, non-zero on success. The inline is designed to 414 * allow the compiler to optimize the common case where the caller calls 415 * these functions from inside a conditional. 416 */ 417 #if defined(KLD_MODULE) 418 419 extern int atomic_cmpset_int(volatile u_int *_dst, u_int _old, u_int _new); 420 extern long atomic_cmpset_long(volatile u_long *_dst, u_long _exp, u_long _src); 421 extern u_int atomic_fetchadd_int(volatile u_int *_p, u_int _v); 422 extern u_long atomic_fetchadd_long(volatile u_long *_p, u_long _v); 423 424 #else 425 426 static __inline int 427 atomic_cmpset_int(volatile u_int *_dst, u_int _old, u_int _new) 428 { 429 u_int res = _old; 430 431 __asm __volatile(MPLOCKED "cmpxchgl %2,%1; " \ 432 : "+a" (res), "=m" (*_dst) \ 433 : "r" (_new), "m" (*_dst) \ 434 : "memory"); 435 return (res == _old); 436 } 437 438 static __inline long 439 atomic_cmpset_long(volatile u_long *_dst, u_long _old, u_long _new) 440 { 441 u_long res = _old; 442 443 __asm __volatile(MPLOCKED "cmpxchgq %2,%1; " \ 444 : "+a" (res), "=m" (*_dst) \ 445 : "r" (_new), "m" (*_dst) \ 446 : "memory"); 447 return (res == _old); 448 } 449 450 /* 451 * Atomically add the value of v to the integer pointed to by p and return 452 * the previous value of *p. 453 */ 454 static __inline u_int 455 atomic_fetchadd_int(volatile u_int *_p, u_int _v) 456 { 457 __asm __volatile(MPLOCKED "xaddl %0,%1; " \ 458 : "+r" (_v), "=m" (*_p) \ 459 : "m" (*_p) \ 460 : "memory"); 461 return (_v); 462 } 463 464 static __inline u_long 465 atomic_fetchadd_long(volatile u_long *_p, u_long _v) 466 { 467 __asm __volatile(MPLOCKED "xaddq %0,%1; " \ 468 : "+r" (_v), "=m" (*_p) \ 469 : "m" (*_p) \ 470 : "memory"); 471 return (_v); 472 } 473 474 static __inline int 475 atomic_testandset_int(volatile u_int *p, u_int v) 476 { 477 u_char res; 478 479 __asm __volatile( 480 " " MPLOCKED " " 481 " btsl %2,%1 ; " 482 " setc %0 ; " 483 "# atomic_testandset_int" 484 : "=q" (res), /* 0 */ 485 "+m" (*p) /* 1 */ 486 : "Ir" (v & 0x1f) /* 2 */ 487 : "cc"); 488 return (res); 489 } 490 491 #endif /* KLD_MODULE */ 492 493 #if defined(KLD_MODULE) 494 495 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 496 extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ 497 extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v); 498 499 #else /* !KLD_MODULE */ 500 501 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 502 static __inline u_##TYPE \ 503 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 504 { \ 505 u_##TYPE res; \ 506 \ 507 __asm __volatile(MPLOCKED LOP \ 508 : "=a" (res), /* 0 */ \ 509 "=m" (*p) /* 1 */ \ 510 : "m" (*p) /* 2 */ \ 511 : "memory"); \ 512 \ 513 return (res); \ 514 } \ 515 \ 516 /* \ 517 * The XCHG instruction asserts LOCK automagically. \ 518 */ \ 519 static __inline void \ 520 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 521 { \ 522 __asm __volatile(SOP \ 523 : "=m" (*p), /* 0 */ \ 524 "+r" (v) /* 1 */ \ 525 : "m" (*p)); /* 2 */ \ 526 } \ 527 struct __hack 528 529 #endif /* !KLD_MODULE */ 530 531 ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0"); 532 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0"); 533 ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0"); 534 ATOMIC_STORE_LOAD(long, "cmpxchgq %0,%1", "xchgq %1,%0"); 535 536 #undef ATOMIC_ASM 537 #undef ATOMIC_STORE_LOAD 538 539 /* Acquire and release variants are identical to the normal ones. */ 540 #define atomic_set_acq_char atomic_set_char 541 #define atomic_set_rel_char atomic_set_char 542 #define atomic_clear_acq_char atomic_clear_char 543 #define atomic_clear_rel_char atomic_clear_char 544 #define atomic_add_acq_char atomic_add_char 545 #define atomic_add_rel_char atomic_add_char 546 #define atomic_subtract_acq_char atomic_subtract_char 547 #define atomic_subtract_rel_char atomic_subtract_char 548 549 #define atomic_set_acq_short atomic_set_short 550 #define atomic_set_rel_short atomic_set_short 551 #define atomic_clear_acq_short atomic_clear_short 552 #define atomic_clear_rel_short atomic_clear_short 553 #define atomic_add_acq_short atomic_add_short 554 #define atomic_add_rel_short atomic_add_short 555 #define atomic_subtract_acq_short atomic_subtract_short 556 #define atomic_subtract_rel_short atomic_subtract_short 557 558 #define atomic_set_acq_int atomic_set_int 559 #define atomic_set_rel_int atomic_set_int 560 #define atomic_clear_acq_int atomic_clear_int 561 #define atomic_clear_rel_int atomic_clear_int 562 #define atomic_add_acq_int atomic_add_int 563 #define atomic_add_rel_int atomic_add_int 564 #define atomic_subtract_acq_int atomic_subtract_int 565 #define atomic_subtract_rel_int atomic_subtract_int 566 #define atomic_cmpset_acq_int atomic_cmpset_int 567 #define atomic_cmpset_rel_int atomic_cmpset_int 568 569 #define atomic_set_acq_long atomic_set_long 570 #define atomic_set_rel_long atomic_set_long 571 #define atomic_clear_acq_long atomic_clear_long 572 #define atomic_clear_rel_long atomic_clear_long 573 #define atomic_add_acq_long atomic_add_long 574 #define atomic_add_rel_long atomic_add_long 575 #define atomic_subtract_acq_long atomic_subtract_long 576 #define atomic_subtract_rel_long atomic_subtract_long 577 #define atomic_cmpset_acq_long atomic_cmpset_long 578 #define atomic_cmpset_rel_long atomic_cmpset_long 579 580 /* cpumask_t is 64-bits on x86-64 */ 581 #define atomic_set_cpumask atomic_set_long 582 #define atomic_clear_cpumask atomic_clear_long 583 #define atomic_cmpset_cpumask atomic_cmpset_long 584 585 /* Operations on 8-bit bytes. */ 586 #define atomic_set_8 atomic_set_char 587 #define atomic_set_acq_8 atomic_set_acq_char 588 #define atomic_set_rel_8 atomic_set_rel_char 589 #define atomic_clear_8 atomic_clear_char 590 #define atomic_clear_acq_8 atomic_clear_acq_char 591 #define atomic_clear_rel_8 atomic_clear_rel_char 592 #define atomic_add_8 atomic_add_char 593 #define atomic_add_acq_8 atomic_add_acq_char 594 #define atomic_add_rel_8 atomic_add_rel_char 595 #define atomic_subtract_8 atomic_subtract_char 596 #define atomic_subtract_acq_8 atomic_subtract_acq_char 597 #define atomic_subtract_rel_8 atomic_subtract_rel_char 598 #define atomic_load_acq_8 atomic_load_acq_char 599 #define atomic_store_rel_8 atomic_store_rel_char 600 601 /* Operations on 16-bit words. */ 602 #define atomic_set_16 atomic_set_short 603 #define atomic_set_acq_16 atomic_set_acq_short 604 #define atomic_set_rel_16 atomic_set_rel_short 605 #define atomic_clear_16 atomic_clear_short 606 #define atomic_clear_acq_16 atomic_clear_acq_short 607 #define atomic_clear_rel_16 atomic_clear_rel_short 608 #define atomic_add_16 atomic_add_short 609 #define atomic_add_acq_16 atomic_add_acq_short 610 #define atomic_add_rel_16 atomic_add_rel_short 611 #define atomic_subtract_16 atomic_subtract_short 612 #define atomic_subtract_acq_16 atomic_subtract_acq_short 613 #define atomic_subtract_rel_16 atomic_subtract_rel_short 614 #define atomic_load_acq_16 atomic_load_acq_short 615 #define atomic_store_rel_16 atomic_store_rel_short 616 617 /* Operations on 32-bit double words. */ 618 #define atomic_set_32 atomic_set_int 619 #define atomic_set_acq_32 atomic_set_acq_int 620 #define atomic_set_rel_32 atomic_set_rel_int 621 #define atomic_clear_32 atomic_clear_int 622 #define atomic_clear_acq_32 atomic_clear_acq_int 623 #define atomic_clear_rel_32 atomic_clear_rel_int 624 #define atomic_add_32 atomic_add_int 625 #define atomic_add_acq_32 atomic_add_acq_int 626 #define atomic_add_rel_32 atomic_add_rel_int 627 #define atomic_subtract_32 atomic_subtract_int 628 #define atomic_subtract_acq_32 atomic_subtract_acq_int 629 #define atomic_subtract_rel_32 atomic_subtract_rel_int 630 #define atomic_load_acq_32 atomic_load_acq_int 631 #define atomic_store_rel_32 atomic_store_rel_int 632 #define atomic_cmpset_32 atomic_cmpset_int 633 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int 634 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int 635 #define atomic_readandclear_32 atomic_readandclear_int 636 #define atomic_fetchadd_32 atomic_fetchadd_int 637 638 /* Operations on 64-bit quad words. */ 639 #define atomic_load_acq_64 atomic_load_acq_long 640 #define atomic_store_rel_64 atomic_store_rel_long 641 #define atomic_swap_64 atomic_swap_long 642 #define atomic_fetchadd_64 atomic_fetchadd_long 643 644 /* Operations on pointers. */ 645 #define atomic_set_ptr(p, v) \ 646 atomic_set_long((volatile u_long *)(p), (u_long)(v)) 647 #define atomic_set_acq_ptr(p, v) \ 648 atomic_set_acq_long((volatile u_long *)(p), (u_long)(v)) 649 #define atomic_set_rel_ptr(p, v) \ 650 atomic_set_rel_long((volatile u_long *)(p), (u_long)(v)) 651 #define atomic_clear_ptr(p, v) \ 652 atomic_clear_long((volatile u_long *)(p), (u_long)(v)) 653 #define atomic_clear_acq_ptr(p, v) \ 654 atomic_clear_acq_long((volatile u_long *)(p), (u_long)(v)) 655 #define atomic_clear_rel_ptr(p, v) \ 656 atomic_clear_rel_long((volatile u_long *)(p), (u_long)(v)) 657 #define atomic_add_ptr(p, v) \ 658 atomic_add_long((volatile u_long *)(p), (u_long)(v)) 659 #define atomic_add_acq_ptr(p, v) \ 660 atomic_add_acq_long((volatile u_long *)(p), (u_long)(v)) 661 #define atomic_add_rel_ptr(p, v) \ 662 atomic_add_rel_long((volatile u_long *)(p), (u_long)(v)) 663 #define atomic_subtract_ptr(p, v) \ 664 atomic_subtract_long((volatile u_long *)(p), (u_long)(v)) 665 #define atomic_subtract_acq_ptr(p, v) \ 666 atomic_subtract_acq_long((volatile u_long *)(p), (u_long)(v)) 667 #define atomic_subtract_rel_ptr(p, v) \ 668 atomic_subtract_rel_long((volatile u_long *)(p), (u_long)(v)) 669 #define atomic_load_acq_ptr(p) \ 670 atomic_load_acq_long((volatile u_long *)(p)) 671 #define atomic_store_rel_ptr(p, v) \ 672 atomic_store_rel_long((volatile u_long *)(p), (v)) 673 #define atomic_cmpset_ptr(dst, old, new) \ 674 atomic_cmpset_long((volatile u_long *)(dst), (u_long)(old), \ 675 (u_long)(new)) 676 #define atomic_cmpset_acq_ptr(dst, old, new) \ 677 atomic_cmpset_acq_long((volatile u_long *)(dst), (u_long)(old), \ 678 (u_long)(new)) 679 #define atomic_cmpset_rel_ptr(dst, old, new) \ 680 atomic_cmpset_rel_long((volatile u_long *)(dst), (u_long)(old), \ 681 (u_long)(new)) 682 #define atomic_readandclear_ptr(p) \ 683 atomic_readandclear_long((volatile u_long *)(p)) 684 685 #endif /* ! _CPU_ATOMIC_H_ */ 686