1 /*- 2 * Copyright (c) 1998 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/i386/include/atomic.h,v 1.9.2.1 2000/07/07 00:38:47 obrien Exp $ 27 * $DragonFly: src/sys/cpu/i386/include/atomic.h,v 1.25 2008/06/26 23:06:50 dillon Exp $ 28 */ 29 #ifndef _CPU_ATOMIC_H_ 30 #define _CPU_ATOMIC_H_ 31 32 #ifndef _SYS_TYPES_H_ 33 #include <sys/types.h> 34 #endif 35 36 /* 37 * Various simple arithmetic on memory which is atomic in the presence 38 * of interrupts and multiple processors. 39 * 40 * atomic_set_char(P, V) (*(u_char*)(P) |= (V)) 41 * atomic_clear_char(P, V) (*(u_char*)(P) &= ~(V)) 42 * atomic_add_char(P, V) (*(u_char*)(P) += (V)) 43 * atomic_subtract_char(P, V) (*(u_char*)(P) -= (V)) 44 * 45 * atomic_set_short(P, V) (*(u_short*)(P) |= (V)) 46 * atomic_clear_short(P, V) (*(u_short*)(P) &= ~(V)) 47 * atomic_add_short(P, V) (*(u_short*)(P) += (V)) 48 * atomic_subtract_short(P, V) (*(u_short*)(P) -= (V)) 49 * 50 * atomic_set_int(P, V) (*(u_int*)(P) |= (V)) 51 * atomic_clear_int(P, V) (*(u_int*)(P) &= ~(V)) 52 * atomic_add_int(P, V) (*(u_int*)(P) += (V)) 53 * atomic_subtract_int(P, V) (*(u_int*)(P) -= (V)) 54 * 55 * atomic_set_long(P, V) (*(u_long*)(P) |= (V)) 56 * atomic_clear_long(P, V) (*(u_long*)(P) &= ~(V)) 57 * atomic_add_long(P, V) (*(u_long*)(P) += (V)) 58 * atomic_subtract_long(P, V) (*(u_long*)(P) -= (V)) 59 * atomic_readandclear_long(P) (return (*(u_long*)(P)); *(u_long*)(P) = 0;) 60 */ 61 62 /* 63 * The above functions are expanded inline in the statically-linked 64 * kernel. Lock prefixes are generated if an SMP kernel is being 65 * built, or if user code is using these functions. 66 * 67 * Kernel modules call real functions which are built into the kernel. 68 * This allows kernel modules to be portable between UP and SMP systems. 69 */ 70 #if defined(KLD_MODULE) 71 #define ATOMIC_ASM(NAME, TYPE, OP, V) \ 72 extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ 73 extern void atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v); 74 #else /* !KLD_MODULE */ 75 #if defined(SMP) || !defined(_KERNEL) 76 #define MPLOCKED "lock ; " 77 #else 78 #define MPLOCKED 79 #endif 80 81 /* 82 * The assembly is volatilized to demark potential before-and-after side 83 * effects if an interrupt or SMP collision were to occur. The primary 84 * atomic instructions are MP safe, the nonlocked instructions are 85 * local-interrupt-safe (so we don't depend on C 'X |= Y' generating an 86 * atomic instruction). 87 * 88 * +m - memory is read and written (=m - memory is only written) 89 * iq - integer constant or %ax/%bx/%cx/%dx (ir = int constant or any reg) 90 * (Note: byte instructions only work on %ax,%bx,%cx, or %dx). iq 91 * is good enough for our needs so don't get fancy. 92 */ 93 94 /* egcs 1.1.2+ version */ 95 #define ATOMIC_ASM(NAME, TYPE, OP, V) \ 96 static __inline void \ 97 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 98 { \ 99 __asm __volatile(MPLOCKED OP \ 100 : "+m" (*p) \ 101 : "iq" (V)); \ 102 } \ 103 static __inline void \ 104 atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v)\ 105 { \ 106 __asm __volatile(OP \ 107 : "+m" (*p) \ 108 : "iq" (V)); \ 109 } 110 111 #endif /* KLD_MODULE */ 112 113 /* egcs 1.1.2+ version */ 114 ATOMIC_ASM(set, char, "orb %b1,%0", v) 115 ATOMIC_ASM(clear, char, "andb %b1,%0", ~v) 116 ATOMIC_ASM(add, char, "addb %b1,%0", v) 117 ATOMIC_ASM(subtract, char, "subb %b1,%0", v) 118 119 ATOMIC_ASM(set, short, "orw %w1,%0", v) 120 ATOMIC_ASM(clear, short, "andw %w1,%0", ~v) 121 ATOMIC_ASM(add, short, "addw %w1,%0", v) 122 ATOMIC_ASM(subtract, short, "subw %w1,%0", v) 123 124 ATOMIC_ASM(set, int, "orl %1,%0", v) 125 ATOMIC_ASM(clear, int, "andl %1,%0", ~v) 126 ATOMIC_ASM(add, int, "addl %1,%0", v) 127 ATOMIC_ASM(subtract, int, "subl %1,%0", v) 128 129 ATOMIC_ASM(set, long, "orq %1,%0", v) 130 ATOMIC_ASM(clear, long, "andq %1,%0", ~v) 131 ATOMIC_ASM(add, long, "addq %1,%0", v) 132 ATOMIC_ASM(subtract, long, "subq %1,%0", v) 133 134 #if defined(KLD_MODULE) 135 u_long atomic_readandclear_long(volatile u_long *addr); 136 #else /* !KLD_MODULE */ 137 static __inline u_long 138 atomic_readandclear_long(volatile u_long *addr) 139 { 140 u_long res; 141 142 res = 0; 143 __asm __volatile( 144 " xchgq %1,%0 ; " 145 "# atomic_readandclear_long" 146 : "+r" (res), /* 0 */ 147 "=m" (*addr) /* 1 */ 148 : "m" (*addr)); 149 150 return (res); 151 } 152 #endif /* KLD_MODULE */ 153 154 /* 155 * atomic_poll_acquire_int(P) Returns non-zero on success, 0 if the lock 156 * has already been acquired. 157 * atomic_poll_release_int(P) 158 * 159 * These support the NDIS driver and are also used for IPIQ interlocks 160 * between cpus. Both the acquisition and release must be 161 * cache-synchronizing instructions. 162 */ 163 164 #if defined(KLD_MODULE) 165 166 extern int atomic_swap_int(volatile int *addr, int value); 167 extern int atomic_poll_acquire_int(volatile u_int *p); 168 extern void atomic_poll_release_int(volatile u_int *p); 169 170 #else 171 172 static __inline int 173 atomic_swap_int(volatile int *addr, int value) 174 { 175 __asm __volatile("xchgl %0, %1" : 176 "=r" (value), "=m" (*addr) : "0" (value) : "memory"); 177 return (value); 178 } 179 180 static __inline 181 int 182 atomic_poll_acquire_int(volatile u_int *p) 183 { 184 u_int data; 185 186 __asm __volatile(MPLOCKED "btsl $0,%0; setnc %%al; andl $255,%%eax" : "+m" (*p), "=a" (data)); 187 return(data); 188 } 189 190 static __inline 191 void 192 atomic_poll_release_int(volatile u_int *p) 193 { 194 __asm __volatile(MPLOCKED "btrl $0,%0" : "+m" (*p)); 195 } 196 197 #endif 198 199 /* 200 * These functions operate on a 32 bit interrupt interlock which is defined 201 * as follows: 202 * 203 * bit 0-30 interrupt handler disabled bits (counter) 204 * bit 31 interrupt handler currently running bit (1 = run) 205 * 206 * atomic_intr_cond_test(P) Determine if the interlock is in an 207 * acquired state. Returns 0 if it not 208 * acquired, non-zero if it is. 209 * 210 * atomic_intr_cond_try(P) 211 * Increment the request counter and attempt to 212 * set bit 31 to acquire the interlock. If 213 * we are unable to set bit 31 the request 214 * counter is decremented and we return -1, 215 * otherwise we return 0. 216 * 217 * atomic_intr_cond_enter(P, func, arg) 218 * Increment the request counter and attempt to 219 * set bit 31 to acquire the interlock. If 220 * we are unable to set bit 31 func(arg) is 221 * called in a loop until we are able to set 222 * bit 31. 223 * 224 * atomic_intr_cond_exit(P, func, arg) 225 * Decrement the request counter and clear bit 226 * 31. If the request counter is still non-zero 227 * call func(arg) once. 228 * 229 * atomic_intr_handler_disable(P) 230 * Set bit 30, indicating that the interrupt 231 * handler has been disabled. Must be called 232 * after the hardware is disabled. 233 * 234 * Returns bit 31 indicating whether a serialized 235 * accessor is active (typically the interrupt 236 * handler is running). 0 == not active, 237 * non-zero == active. 238 * 239 * atomic_intr_handler_enable(P) 240 * Clear bit 30, indicating that the interrupt 241 * handler has been enabled. Must be called 242 * before the hardware is actually enabled. 243 * 244 * atomic_intr_handler_is_enabled(P) 245 * Returns bit 30, 0 indicates that the handler 246 * is enabled, non-zero indicates that it is 247 * disabled. The request counter portion of 248 * the field is ignored. 249 */ 250 251 #if defined(KLD_MODULE) 252 253 void atomic_intr_init(__atomic_intr_t *p); 254 int atomic_intr_handler_disable(__atomic_intr_t *p); 255 void atomic_intr_handler_enable(__atomic_intr_t *p); 256 int atomic_intr_handler_is_enabled(__atomic_intr_t *p); 257 int atomic_intr_cond_test(__atomic_intr_t *p); 258 int atomic_intr_cond_try(__atomic_intr_t *p); 259 void atomic_intr_cond_enter(__atomic_intr_t *p, void (*func)(void *), void *arg); 260 void atomic_intr_cond_exit(__atomic_intr_t *p, void (*func)(void *), void *arg); 261 262 #else 263 264 static __inline 265 void 266 atomic_intr_init(__atomic_intr_t *p) 267 { 268 *p = 0; 269 } 270 271 static __inline 272 int 273 atomic_intr_handler_disable(__atomic_intr_t *p) 274 { 275 int data; 276 277 __asm __volatile(MPLOCKED "orl $0x40000000,%1; movl %1,%%eax; " \ 278 "andl $0x80000000,%%eax" \ 279 : "=a"(data) , "+m"(*p)); 280 return(data); 281 } 282 283 static __inline 284 void 285 atomic_intr_handler_enable(__atomic_intr_t *p) 286 { 287 __asm __volatile(MPLOCKED "andl $0xBFFFFFFF,%0" : "+m" (*p)); 288 } 289 290 static __inline 291 int 292 atomic_intr_handler_is_enabled(__atomic_intr_t *p) 293 { 294 int data; 295 296 __asm __volatile("movl %1,%%eax; andl $0x40000000,%%eax" \ 297 : "=a"(data) : "m"(*p)); 298 return(data); 299 } 300 301 static __inline 302 void 303 atomic_intr_cond_enter(__atomic_intr_t *p, void (*func)(void *), void *arg) 304 { 305 __asm __volatile(MPLOCKED "incl %0; " \ 306 "1: ;" \ 307 MPLOCKED "btsl $31,%0; jnc 2f; " \ 308 "movq %2,%%rdi; call *%1; " \ 309 "jmp 1b; " \ 310 "2: ;" \ 311 : "+m" (*p) \ 312 : "r"(func), "m"(arg) \ 313 : "ax", "cx", "dx", "rsi", "rdi", "r8", "r9", "r10", "r11"); 314 /* YYY the function call may clobber even more registers? */ 315 } 316 317 /* 318 * Attempt to enter the interrupt condition variable. Returns zero on 319 * success, 1 on failure. 320 */ 321 static __inline 322 int 323 atomic_intr_cond_try(__atomic_intr_t *p) 324 { 325 int ret; 326 327 __asm __volatile(MPLOCKED "incl %0; " \ 328 "1: ;" \ 329 "subl %%eax,%%eax; " \ 330 MPLOCKED "btsl $31,%0; jnc 2f; " \ 331 MPLOCKED "decl %0; " \ 332 "movl $1,%%eax;" \ 333 "2: ;" 334 : "+m" (*p), "=a"(ret) 335 #ifdef __clang__ 336 : : "ax", "cx", "dx"); 337 #else 338 : : "cx", "dx"); 339 #endif 340 return (ret); 341 } 342 343 344 static __inline 345 int 346 atomic_intr_cond_test(__atomic_intr_t *p) 347 { 348 return((int)(*p & 0x80000000)); 349 } 350 351 static __inline 352 void 353 atomic_intr_cond_exit(__atomic_intr_t *p, void (*func)(void *), void *arg) 354 { 355 __asm __volatile(MPLOCKED "decl %0; " \ 356 MPLOCKED "btrl $31,%0; " \ 357 "testl $0x3FFFFFFF,%0; jz 1f; " \ 358 "movq %2,%%rdi; call *%1; " \ 359 "1: ;" \ 360 : "+m" (*p) \ 361 : "r"(func), "m"(arg) \ 362 : "ax", "cx", "dx", "rsi", "rdi", "r8", "r9", "r10", "r11"); 363 /* YYY the function call may clobber even more registers? */ 364 } 365 366 #endif 367 368 /* 369 * Atomic compare and set 370 * 371 * if (*_dst == _old) *_dst = _new (all 32 bit words) 372 * 373 * Returns 0 on failure, non-zero on success. The inline is designed to 374 * allow the compiler to optimize the common case where the caller calls 375 * these functions from inside a conditional. 376 */ 377 #if defined(KLD_MODULE) 378 379 extern int atomic_cmpset_int(volatile u_int *_dst, u_int _old, u_int _new); 380 extern long atomic_cmpset_long(volatile u_long *_dst, u_long _exp, u_long _src); 381 extern u_int atomic_fetchadd_int(volatile u_int *_p, u_int _v); 382 383 #else 384 385 static __inline int 386 atomic_cmpset_int(volatile u_int *_dst, u_int _old, u_int _new) 387 { 388 u_int res = _old; 389 390 __asm __volatile(MPLOCKED "cmpxchgl %2,%1; " \ 391 : "+a" (res), "=m" (*_dst) \ 392 : "r" (_new), "m" (*_dst) \ 393 : "memory"); 394 return (res == _old); 395 } 396 397 static __inline long 398 atomic_cmpset_long(volatile u_long *_dst, u_long _old, u_long _new) 399 { 400 u_long res = _old; 401 402 __asm __volatile(MPLOCKED "cmpxchgq %2,%1; " \ 403 : "+a" (res), "=m" (*_dst) \ 404 : "r" (_new), "m" (*_dst) \ 405 : "memory"); 406 return (res == _old); 407 } 408 409 /* 410 * Atomically add the value of v to the integer pointed to by p and return 411 * the previous value of *p. 412 */ 413 static __inline u_int 414 atomic_fetchadd_int(volatile u_int *_p, u_int _v) 415 { 416 __asm __volatile(MPLOCKED "xaddl %0,%1; " \ 417 : "+r" (_v), "=m" (*_p) \ 418 : "m" (*_p) \ 419 : "memory"); 420 return (_v); 421 } 422 423 #endif /* KLD_MODULE */ 424 425 #if defined(KLD_MODULE) 426 427 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 428 extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ 429 extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v); 430 431 #else /* !KLD_MODULE */ 432 433 #if defined(_KERNEL) && !defined(SMP) 434 /* 435 * We assume that a = b will do atomic loads and stores. However, on a 436 * PentiumPro or higher, reads may pass writes, so for that case we have 437 * to use a serializing instruction (i.e. with LOCK) to do the load in 438 * SMP kernels. For UP kernels, however, the cache of the single processor 439 * is always consistent, so we don't need any memory barriers. 440 */ 441 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 442 static __inline u_##TYPE \ 443 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 444 { \ 445 return (*p); \ 446 } \ 447 \ 448 static __inline void \ 449 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 450 { \ 451 *p = v; \ 452 } \ 453 struct __hack 454 455 #else /* !(_KERNEL && !SMP) */ 456 457 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 458 static __inline u_##TYPE \ 459 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 460 { \ 461 u_##TYPE res; \ 462 \ 463 __asm __volatile(MPLOCKED LOP \ 464 : "=a" (res), /* 0 */ \ 465 "=m" (*p) /* 1 */ \ 466 : "m" (*p) /* 2 */ \ 467 : "memory"); \ 468 \ 469 return (res); \ 470 } \ 471 \ 472 /* \ 473 * The XCHG instruction asserts LOCK automagically. \ 474 */ \ 475 static __inline void \ 476 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 477 { \ 478 __asm __volatile(SOP \ 479 : "=m" (*p), /* 0 */ \ 480 "+r" (v) /* 1 */ \ 481 : "m" (*p)); /* 2 */ \ 482 } \ 483 struct __hack 484 485 #endif /* _KERNEL && !SMP */ 486 487 #endif /* !KLD_MODULE */ 488 489 ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0"); 490 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0"); 491 ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0"); 492 ATOMIC_STORE_LOAD(long, "cmpxchgq %0,%1", "xchgq %1,%0"); 493 494 #undef ATOMIC_ASM 495 #undef ATOMIC_STORE_LOAD 496 497 /* Acquire and release variants are identical to the normal ones. */ 498 #define atomic_set_acq_char atomic_set_char 499 #define atomic_set_rel_char atomic_set_char 500 #define atomic_clear_acq_char atomic_clear_char 501 #define atomic_clear_rel_char atomic_clear_char 502 #define atomic_add_acq_char atomic_add_char 503 #define atomic_add_rel_char atomic_add_char 504 #define atomic_subtract_acq_char atomic_subtract_char 505 #define atomic_subtract_rel_char atomic_subtract_char 506 507 #define atomic_set_acq_short atomic_set_short 508 #define atomic_set_rel_short atomic_set_short 509 #define atomic_clear_acq_short atomic_clear_short 510 #define atomic_clear_rel_short atomic_clear_short 511 #define atomic_add_acq_short atomic_add_short 512 #define atomic_add_rel_short atomic_add_short 513 #define atomic_subtract_acq_short atomic_subtract_short 514 #define atomic_subtract_rel_short atomic_subtract_short 515 516 #define atomic_set_acq_int atomic_set_int 517 #define atomic_set_rel_int atomic_set_int 518 #define atomic_clear_acq_int atomic_clear_int 519 #define atomic_clear_rel_int atomic_clear_int 520 #define atomic_add_acq_int atomic_add_int 521 #define atomic_add_rel_int atomic_add_int 522 #define atomic_subtract_acq_int atomic_subtract_int 523 #define atomic_subtract_rel_int atomic_subtract_int 524 #define atomic_cmpset_acq_int atomic_cmpset_int 525 #define atomic_cmpset_rel_int atomic_cmpset_int 526 527 #define atomic_set_acq_long atomic_set_long 528 #define atomic_set_rel_long atomic_set_long 529 #define atomic_clear_acq_long atomic_clear_long 530 #define atomic_clear_rel_long atomic_clear_long 531 #define atomic_add_acq_long atomic_add_long 532 #define atomic_add_rel_long atomic_add_long 533 #define atomic_subtract_acq_long atomic_subtract_long 534 #define atomic_subtract_rel_long atomic_subtract_long 535 #define atomic_cmpset_acq_long atomic_cmpset_long 536 #define atomic_cmpset_rel_long atomic_cmpset_long 537 538 /* Operations on 8-bit bytes. */ 539 #define atomic_set_8 atomic_set_char 540 #define atomic_set_acq_8 atomic_set_acq_char 541 #define atomic_set_rel_8 atomic_set_rel_char 542 #define atomic_clear_8 atomic_clear_char 543 #define atomic_clear_acq_8 atomic_clear_acq_char 544 #define atomic_clear_rel_8 atomic_clear_rel_char 545 #define atomic_add_8 atomic_add_char 546 #define atomic_add_acq_8 atomic_add_acq_char 547 #define atomic_add_rel_8 atomic_add_rel_char 548 #define atomic_subtract_8 atomic_subtract_char 549 #define atomic_subtract_acq_8 atomic_subtract_acq_char 550 #define atomic_subtract_rel_8 atomic_subtract_rel_char 551 #define atomic_load_acq_8 atomic_load_acq_char 552 #define atomic_store_rel_8 atomic_store_rel_char 553 554 /* Operations on 16-bit words. */ 555 #define atomic_set_16 atomic_set_short 556 #define atomic_set_acq_16 atomic_set_acq_short 557 #define atomic_set_rel_16 atomic_set_rel_short 558 #define atomic_clear_16 atomic_clear_short 559 #define atomic_clear_acq_16 atomic_clear_acq_short 560 #define atomic_clear_rel_16 atomic_clear_rel_short 561 #define atomic_add_16 atomic_add_short 562 #define atomic_add_acq_16 atomic_add_acq_short 563 #define atomic_add_rel_16 atomic_add_rel_short 564 #define atomic_subtract_16 atomic_subtract_short 565 #define atomic_subtract_acq_16 atomic_subtract_acq_short 566 #define atomic_subtract_rel_16 atomic_subtract_rel_short 567 #define atomic_load_acq_16 atomic_load_acq_short 568 #define atomic_store_rel_16 atomic_store_rel_short 569 570 /* Operations on 32-bit double words. */ 571 #define atomic_set_32 atomic_set_int 572 #define atomic_set_acq_32 atomic_set_acq_int 573 #define atomic_set_rel_32 atomic_set_rel_int 574 #define atomic_clear_32 atomic_clear_int 575 #define atomic_clear_acq_32 atomic_clear_acq_int 576 #define atomic_clear_rel_32 atomic_clear_rel_int 577 #define atomic_add_32 atomic_add_int 578 #define atomic_add_acq_32 atomic_add_acq_int 579 #define atomic_add_rel_32 atomic_add_rel_int 580 #define atomic_subtract_32 atomic_subtract_int 581 #define atomic_subtract_acq_32 atomic_subtract_acq_int 582 #define atomic_subtract_rel_32 atomic_subtract_rel_int 583 #define atomic_load_acq_32 atomic_load_acq_int 584 #define atomic_store_rel_32 atomic_store_rel_int 585 #define atomic_cmpset_32 atomic_cmpset_int 586 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int 587 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int 588 #define atomic_readandclear_32 atomic_readandclear_int 589 #define atomic_fetchadd_32 atomic_fetchadd_int 590 591 /* Operations on pointers. */ 592 #define atomic_set_ptr atomic_set_long 593 #define atomic_set_acq_ptr atomic_set_acq_long 594 #define atomic_set_rel_ptr atomic_set_rel_long 595 #define atomic_clear_ptr atomic_clear_long 596 #define atomic_clear_acq_ptr atomic_clear_acq_long 597 #define atomic_clear_rel_ptr atomic_clear_rel_long 598 #define atomic_add_ptr atomic_add_long 599 #define atomic_add_acq_ptr atomic_add_acq_long 600 #define atomic_add_rel_ptr atomic_add_rel_long 601 #define atomic_subtract_ptr atomic_subtract_long 602 #define atomic_subtract_acq_ptr atomic_subtract_acq_long 603 #define atomic_subtract_rel_ptr atomic_subtract_rel_long 604 #define atomic_load_acq_ptr atomic_load_acq_long 605 #define atomic_store_rel_ptr atomic_store_rel_long 606 #define atomic_cmpset_ptr atomic_cmpset_long 607 #define atomic_cmpset_acq_ptr atomic_cmpset_acq_long 608 #define atomic_cmpset_rel_ptr atomic_cmpset_rel_long 609 #define atomic_readandclear_ptr atomic_readandclear_long 610 611 #endif /* ! _CPU_ATOMIC_H_ */ 612