1 /*- 2 * Copyright (c) 1998 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/i386/include/atomic.h,v 1.9.2.1 2000/07/07 00:38:47 obrien Exp $ 27 */ 28 #ifndef _CPU_ATOMIC_H_ 29 #define _CPU_ATOMIC_H_ 30 31 #ifndef _SYS_TYPES_H_ 32 #include <sys/types.h> 33 #endif 34 35 /* 36 * Various simple arithmetic on memory which is atomic in the presence 37 * of interrupts and multiple processors. 38 * 39 * atomic_set_char(P, V) (*(u_char*)(P) |= (V)) 40 * atomic_clear_char(P, V) (*(u_char*)(P) &= ~(V)) 41 * atomic_add_char(P, V) (*(u_char*)(P) += (V)) 42 * atomic_subtract_char(P, V) (*(u_char*)(P) -= (V)) 43 * 44 * atomic_set_short(P, V) (*(u_short*)(P) |= (V)) 45 * atomic_clear_short(P, V) (*(u_short*)(P) &= ~(V)) 46 * atomic_add_short(P, V) (*(u_short*)(P) += (V)) 47 * atomic_subtract_short(P, V) (*(u_short*)(P) -= (V)) 48 * 49 * atomic_set_int(P, V) (*(u_int*)(P) |= (V)) 50 * atomic_clear_int(P, V) (*(u_int*)(P) &= ~(V)) 51 * atomic_add_int(P, V) (*(u_int*)(P) += (V)) 52 * atomic_subtract_int(P, V) (*(u_int*)(P) -= (V)) 53 * 54 * atomic_set_long(P, V) (*(u_long*)(P) |= (V)) 55 * atomic_clear_long(P, V) (*(u_long*)(P) &= ~(V)) 56 * atomic_add_long(P, V) (*(u_long*)(P) += (V)) 57 * atomic_subtract_long(P, V) (*(u_long*)(P) -= (V)) 58 * atomic_readandclear_long(P) (return (*(u_long*)(P)); *(u_long*)(P) = 0;) 59 * atomic_readandclear_int(P) (return (*(u_int*)(P)); *(u_int*)(P) = 0;) 60 */ 61 62 /* 63 * The above functions are expanded inline in the statically-linked 64 * kernel and lock prefixes are generated. 65 * 66 * Kernel modules call real functions which are built into the kernel. 67 */ 68 #if defined(KLD_MODULE) 69 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 70 extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ 71 extern void atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v); 72 73 int atomic_testandset_int(volatile u_int *p, u_int v); 74 int atomic_testandclear_int(volatile u_int *p, u_int v); 75 76 #else /* !KLD_MODULE */ 77 #define MPLOCKED "lock ; " 78 79 /* 80 * The assembly is volatilized to demark potential before-and-after side 81 * effects if an interrupt or SMP collision were to occur. The primary 82 * atomic instructions are MP safe, the nonlocked instructions are 83 * local-interrupt-safe (so we don't depend on C 'X |= Y' generating an 84 * atomic instruction). 85 * 86 * +m - memory is read and written (=m - memory is only written) 87 * iq - integer constant or %ax/%bx/%cx/%dx (ir = int constant or any reg) 88 * (Note: byte instructions only work on %ax,%bx,%cx, or %dx). iq 89 * is good enough for our needs so don't get fancy. 90 * r - any register. 91 * 92 * NOTE: 64-bit immediate values are not supported for most x86-64 93 * instructions so we have to use "r". 94 */ 95 96 /* egcs 1.1.2+ version */ 97 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 98 static __inline void \ 99 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 100 { \ 101 __asm __volatile(MPLOCKED OP \ 102 : "+m" (*p) \ 103 : CONS (V)); \ 104 } \ 105 static __inline void \ 106 atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v)\ 107 { \ 108 __asm __volatile(OP \ 109 : "+m" (*p) \ 110 : CONS (V)); \ 111 } 112 113 #endif /* KLD_MODULE */ 114 115 /* egcs 1.1.2+ version */ 116 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v) 117 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v) 118 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v) 119 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v) 120 121 ATOMIC_ASM(set, short, "orw %w1,%0", "iq", v) 122 ATOMIC_ASM(clear, short, "andw %w1,%0", "iq", ~v) 123 ATOMIC_ASM(add, short, "addw %w1,%0", "iq", v) 124 ATOMIC_ASM(subtract, short, "subw %w1,%0", "iq", v) 125 126 ATOMIC_ASM(set, int, "orl %1,%0", "iq", v) 127 ATOMIC_ASM(clear, int, "andl %1,%0", "iq", ~v) 128 ATOMIC_ASM(add, int, "addl %1,%0", "iq", v) 129 ATOMIC_ASM(subtract, int, "subl %1,%0", "iq", v) 130 131 ATOMIC_ASM(set, long, "orq %1,%0", "r", v) 132 ATOMIC_ASM(clear, long, "andq %1,%0", "r", ~v) 133 ATOMIC_ASM(add, long, "addq %1,%0", "r", v) 134 ATOMIC_ASM(subtract, long, "subq %1,%0", "r", v) 135 136 #if defined(KLD_MODULE) 137 138 u_long atomic_readandclear_long(volatile u_long *addr); 139 u_int atomic_readandclear_int(volatile u_int *addr); 140 141 #else /* !KLD_MODULE */ 142 143 static __inline u_long 144 atomic_readandclear_long(volatile u_long *addr) 145 { 146 u_long res; 147 148 res = 0; 149 __asm __volatile( 150 " xchgq %1,%0 ; " 151 "# atomic_readandclear_long" 152 : "+r" (res), /* 0 */ 153 "=m" (*addr) /* 1 */ 154 : "m" (*addr)); 155 156 return (res); 157 } 158 159 static __inline u_int 160 atomic_readandclear_int(volatile u_int *addr) 161 { 162 u_int res; 163 164 res = 0; 165 __asm __volatile( 166 " xchgl %1,%0 ; " 167 "# atomic_readandclear_int" 168 : "+r" (res), /* 0 */ 169 "=m" (*addr) /* 1 */ 170 : "m" (*addr)); 171 172 return (res); 173 } 174 175 #endif /* KLD_MODULE */ 176 177 /* 178 * atomic_poll_acquire_int(P) Returns non-zero on success, 0 if the lock 179 * has already been acquired. 180 * atomic_poll_release_int(P) 181 * 182 * These support the NDIS driver and are also used for IPIQ interlocks 183 * between cpus. Both the acquisition and release must be 184 * cache-synchronizing instructions. 185 */ 186 187 #if defined(KLD_MODULE) 188 189 extern int atomic_swap_int(volatile int *addr, int value); 190 extern long atomic_swap_long(volatile long *addr, long value); 191 extern void *atomic_swap_ptr(volatile void **addr, void *value); 192 extern int atomic_poll_acquire_int(volatile u_int *p); 193 extern void atomic_poll_release_int(volatile u_int *p); 194 195 #else 196 197 static __inline int 198 atomic_swap_int(volatile int *addr, int value) 199 { 200 __asm __volatile("xchgl %0, %1" : 201 "=r" (value), "=m" (*addr) : "0" (value) : "memory"); 202 return (value); 203 } 204 205 static __inline long 206 atomic_swap_long(volatile long *addr, long value) 207 { 208 __asm __volatile("xchgq %0, %1" : 209 "=r" (value), "=m" (*addr) : "0" (value) : "memory"); 210 return (value); 211 } 212 213 static __inline void * 214 atomic_swap_ptr(volatile void **addr, void *value) 215 { 216 __asm __volatile("xchgq %0, %1" : 217 "=r" (value), "=m" (*addr) : "0" (value) : "memory"); 218 return (value); 219 } 220 221 static __inline int 222 atomic_poll_acquire_int(volatile u_int *p) 223 { 224 u_int data; 225 226 __asm __volatile(MPLOCKED "btsl $0,%0; setnc %%al; andl $255,%%eax" : "+m" (*p), "=a" (data)); 227 return(data); 228 } 229 230 static __inline void 231 atomic_poll_release_int(volatile u_int *p) 232 { 233 __asm __volatile(MPLOCKED "btrl $0,%0" : "+m" (*p)); 234 } 235 236 #endif 237 238 /* 239 * These functions operate on a 32 bit interrupt interlock which is defined 240 * as follows: 241 * 242 * bit 0-29 interrupt handler wait counter 243 * bit 30 interrupt handler disabled bit 244 * bit 31 interrupt handler currently running bit (1 = run) 245 * 246 * atomic_intr_cond_test(P) Determine if the interlock is in an 247 * acquired state. Returns 0 if it not 248 * acquired, non-zero if it is. (not MPLOCKed) 249 * 250 * atomic_intr_cond_try(P) Attempt to set bit 31 to acquire the 251 * interlock. If we are unable to set bit 31 252 * we return 1, otherwise we return 0. 253 * 254 * atomic_intr_cond_enter(P, func, arg) 255 * Attempt to set bit 31 to acquire the 256 * interlock. If we are unable to set bit 31, 257 * the wait is incremented counter and func(arg) 258 * is called in a loop until we are able to set 259 * bit 31. Once we set bit 31, wait counter 260 * is decremented. 261 * 262 * atomic_intr_cond_exit(P, func, arg) 263 * Clear bit 31. If the wait counter is still 264 * non-zero call func(arg) once. 265 * 266 * atomic_intr_handler_disable(P) 267 * Set bit 30, indicating that the interrupt 268 * handler has been disabled. Must be called 269 * after the hardware is disabled. 270 * 271 * Returns bit 31 indicating whether a serialized 272 * accessor is active (typically the interrupt 273 * handler is running). 0 == not active, 274 * non-zero == active. 275 * 276 * atomic_intr_handler_enable(P) 277 * Clear bit 30, indicating that the interrupt 278 * handler has been enabled. Must be called 279 * before the hardware is actually enabled. 280 * 281 * atomic_intr_handler_is_enabled(P) 282 * Returns bit 30, 0 indicates that the handler 283 * is enabled, non-zero indicates that it is 284 * disabled. The request counter portion of 285 * the field is ignored. (not MPLOCKed) 286 * 287 * atomic_intr_cond_inc(P) Increment wait counter by 1. 288 * atomic_intr_cond_dec(P) Decrement wait counter by 1. 289 */ 290 291 #if defined(KLD_MODULE) 292 293 void atomic_intr_init(__atomic_intr_t *p); 294 int atomic_intr_handler_disable(__atomic_intr_t *p); 295 void atomic_intr_handler_enable(__atomic_intr_t *p); 296 int atomic_intr_handler_is_enabled(__atomic_intr_t *p); 297 int atomic_intr_cond_test(__atomic_intr_t *p); 298 int atomic_intr_cond_try(__atomic_intr_t *p); 299 void atomic_intr_cond_enter(__atomic_intr_t *p, void (*func)(void *), void *arg); 300 void atomic_intr_cond_exit(__atomic_intr_t *p, void (*func)(void *), void *arg); 301 void atomic_intr_cond_inc(__atomic_intr_t *p); 302 void atomic_intr_cond_dec(__atomic_intr_t *p); 303 304 #else 305 306 static __inline void 307 atomic_intr_init(__atomic_intr_t *p) 308 { 309 *p = 0; 310 } 311 312 static __inline int 313 atomic_intr_handler_disable(__atomic_intr_t *p) 314 { 315 int data; 316 317 __asm __volatile(MPLOCKED "orl $0x40000000,%1; movl %1,%%eax; " \ 318 "andl $0x80000000,%%eax" \ 319 : "=a"(data) , "+m"(*p)); 320 return(data); 321 } 322 323 static __inline void 324 atomic_intr_handler_enable(__atomic_intr_t *p) 325 { 326 __asm __volatile(MPLOCKED "andl $0xBFFFFFFF,%0" : "+m" (*p)); 327 } 328 329 static __inline int 330 atomic_intr_handler_is_enabled(__atomic_intr_t *p) 331 { 332 int data; 333 334 __asm __volatile("movl %1,%%eax; andl $0x40000000,%%eax" \ 335 : "=a"(data) : "m"(*p)); 336 return(data); 337 } 338 339 static __inline void 340 atomic_intr_cond_inc(__atomic_intr_t *p) 341 { 342 __asm __volatile(MPLOCKED "incl %0" : "+m" (*p)); 343 } 344 345 static __inline void 346 atomic_intr_cond_dec(__atomic_intr_t *p) 347 { 348 __asm __volatile(MPLOCKED "decl %0" : "+m" (*p)); 349 } 350 351 static __inline void 352 atomic_intr_cond_enter(__atomic_intr_t *p, void (*func)(void *), void *arg) 353 { 354 __asm __volatile(MPLOCKED "btsl $31,%0; jnc 3f; " \ 355 MPLOCKED "incl %0; " \ 356 "1: ;" \ 357 MPLOCKED "btsl $31,%0; jnc 2f; " \ 358 "movq %2,%%rdi; call *%1; " \ 359 "jmp 1b; " \ 360 "2: ;" \ 361 MPLOCKED "decl %0; " \ 362 "3: ;" \ 363 : "+m" (*p) \ 364 : "r"(func), "m"(arg) \ 365 : "ax", "cx", "dx", "rsi", "rdi", "r8", "r9", "r10", "r11"); 366 /* YYY the function call may clobber even more registers? */ 367 } 368 369 /* 370 * Attempt to enter the interrupt condition variable. Returns zero on 371 * success, 1 on failure. 372 */ 373 static __inline int 374 atomic_intr_cond_try(__atomic_intr_t *p) 375 { 376 int ret; 377 378 __asm __volatile("subl %%eax,%%eax; " \ 379 MPLOCKED "btsl $31,%0; jnc 2f; " \ 380 "movl $1,%%eax;" \ 381 "2: ;" 382 : "+m" (*p), "=&a"(ret) 383 : : "cx", "dx"); 384 return (ret); 385 } 386 387 388 static __inline int 389 atomic_intr_cond_test(__atomic_intr_t *p) 390 { 391 return((int)(*p & 0x80000000)); 392 } 393 394 static __inline void 395 atomic_intr_cond_exit(__atomic_intr_t *p, void (*func)(void *), void *arg) 396 { 397 __asm __volatile(MPLOCKED "btrl $31,%0; " \ 398 "testl $0x3FFFFFFF,%0; jz 1f; " \ 399 "movq %2,%%rdi; call *%1; " \ 400 "1: ;" \ 401 : "+m" (*p) \ 402 : "r"(func), "m"(arg) \ 403 : "ax", "cx", "dx", "rsi", "rdi", "r8", "r9", "r10", "r11"); 404 /* YYY the function call may clobber even more registers? */ 405 } 406 407 #endif 408 409 /* 410 * Atomic compare and set 411 * 412 * if (*_dst == _old) *_dst = _new (all 32 bit words) 413 * 414 * Returns 0 on failure, non-zero on success. The inline is designed to 415 * allow the compiler to optimize the common case where the caller calls 416 * these functions from inside a conditional. 417 */ 418 #if defined(KLD_MODULE) 419 420 extern int atomic_cmpxchg_int(volatile u_int *_dst, u_int _old, u_int _new); 421 extern int atomic_cmpset_short(volatile u_short *_dst, 422 u_short _old, u_short _new); 423 extern int atomic_cmpset_int(volatile u_int *_dst, u_int _old, u_int _new); 424 extern int atomic_cmpset_long(volatile u_long *_dst, u_long _exp, u_long _src); 425 extern u_int atomic_fetchadd_int(volatile u_int *_p, u_int _v); 426 extern u_long atomic_fetchadd_long(volatile u_long *_p, u_long _v); 427 428 #else 429 430 static __inline int 431 atomic_cmpxchg_int(volatile u_int *_dst, u_int _old, u_int _new) 432 { 433 u_int res = _old; 434 435 __asm __volatile(MPLOCKED "cmpxchgl %2,%1; " \ 436 : "+a" (res), "=m" (*_dst) \ 437 : "r" (_new), "m" (*_dst) \ 438 : "memory"); 439 return (res); 440 } 441 442 static __inline int 443 atomic_cmpset_short(volatile u_short *_dst, u_short _old, u_short _new) 444 { 445 u_short res = _old; 446 447 __asm __volatile(MPLOCKED "cmpxchgw %w2,%1; " \ 448 : "+a" (res), "=m" (*_dst) \ 449 : "r" (_new), "m" (*_dst) \ 450 : "memory"); 451 return (res == _old); 452 } 453 454 static __inline int 455 atomic_cmpset_int(volatile u_int *_dst, u_int _old, u_int _new) 456 { 457 u_int res = _old; 458 459 __asm __volatile(MPLOCKED "cmpxchgl %2,%1; " \ 460 : "+a" (res), "=m" (*_dst) \ 461 : "r" (_new), "m" (*_dst) \ 462 : "memory"); 463 return (res == _old); 464 } 465 466 static __inline int 467 atomic_cmpset_long(volatile u_long *_dst, u_long _old, u_long _new) 468 { 469 u_long res = _old; 470 471 __asm __volatile(MPLOCKED "cmpxchgq %2,%1; " \ 472 : "+a" (res), "=m" (*_dst) \ 473 : "r" (_new), "m" (*_dst) \ 474 : "memory"); 475 return (res == _old); 476 } 477 478 /* 479 * Atomically add the value of v to the integer pointed to by p and return 480 * the previous value of *p. 481 */ 482 static __inline u_int 483 atomic_fetchadd_int(volatile u_int *_p, u_int _v) 484 { 485 __asm __volatile(MPLOCKED "xaddl %0,%1; " \ 486 : "+r" (_v), "=m" (*_p) \ 487 : "m" (*_p) \ 488 : "memory"); 489 return (_v); 490 } 491 492 static __inline u_long 493 atomic_fetchadd_long(volatile u_long *_p, u_long _v) 494 { 495 __asm __volatile(MPLOCKED "xaddq %0,%1; " \ 496 : "+r" (_v), "=m" (*_p) \ 497 : "m" (*_p) \ 498 : "memory"); 499 return (_v); 500 } 501 502 static __inline int 503 atomic_testandset_int(volatile u_int *p, u_int v) 504 { 505 u_char res; 506 507 __asm __volatile( 508 " " MPLOCKED " " 509 " btsl %2,%1 ; " 510 " setc %0 ; " 511 "# atomic_testandset_int" 512 : "=q" (res), /* 0 */ 513 "+m" (*p) /* 1 */ 514 : "Ir" (v & 0x1f) /* 2 */ 515 : "cc"); 516 return (res); 517 } 518 519 static __inline int 520 atomic_testandclear_int(volatile u_int *p, u_int v) 521 { 522 u_char res; 523 524 __asm __volatile( 525 " " MPLOCKED " " 526 " btrl %2,%1 ; " 527 " setc %0 ; " 528 "# atomic_testandclear_int" 529 : "=q" (res), /* 0 */ 530 "+m" (*p) /* 1 */ 531 : "Ir" (v & 0x1f) /* 2 */ 532 : "cc"); 533 return (res); 534 } 535 536 #endif /* KLD_MODULE */ 537 538 #if defined(KLD_MODULE) 539 540 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 541 extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ 542 extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v); 543 544 #else /* !KLD_MODULE */ 545 546 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 547 static __inline u_##TYPE \ 548 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 549 { \ 550 u_##TYPE res; \ 551 \ 552 __asm __volatile(MPLOCKED LOP \ 553 : "=a" (res), /* 0 */ \ 554 "=m" (*p) /* 1 */ \ 555 : "m" (*p) /* 2 */ \ 556 : "memory"); \ 557 \ 558 return (res); \ 559 } \ 560 \ 561 /* \ 562 * The XCHG instruction asserts LOCK automagically. \ 563 */ \ 564 static __inline void \ 565 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 566 { \ 567 __asm __volatile(SOP \ 568 : "=m" (*p), /* 0 */ \ 569 "+r" (v) /* 1 */ \ 570 : "m" (*p)); /* 2 */ \ 571 } \ 572 struct __hack 573 574 #endif /* !KLD_MODULE */ 575 576 ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0"); 577 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0"); 578 ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0"); 579 ATOMIC_STORE_LOAD(long, "cmpxchgq %0,%1", "xchgq %1,%0"); 580 581 #undef ATOMIC_ASM 582 #undef ATOMIC_STORE_LOAD 583 584 /* Acquire and release variants are identical to the normal ones. */ 585 #define atomic_set_acq_char atomic_set_char 586 #define atomic_set_rel_char atomic_set_char 587 #define atomic_clear_acq_char atomic_clear_char 588 #define atomic_clear_rel_char atomic_clear_char 589 #define atomic_add_acq_char atomic_add_char 590 #define atomic_add_rel_char atomic_add_char 591 #define atomic_subtract_acq_char atomic_subtract_char 592 #define atomic_subtract_rel_char atomic_subtract_char 593 594 #define atomic_set_acq_short atomic_set_short 595 #define atomic_set_rel_short atomic_set_short 596 #define atomic_clear_acq_short atomic_clear_short 597 #define atomic_clear_rel_short atomic_clear_short 598 #define atomic_add_acq_short atomic_add_short 599 #define atomic_add_rel_short atomic_add_short 600 #define atomic_subtract_acq_short atomic_subtract_short 601 #define atomic_subtract_rel_short atomic_subtract_short 602 603 #define atomic_set_acq_int atomic_set_int 604 #define atomic_set_rel_int atomic_set_int 605 #define atomic_clear_acq_int atomic_clear_int 606 #define atomic_clear_rel_int atomic_clear_int 607 #define atomic_add_acq_int atomic_add_int 608 #define atomic_add_rel_int atomic_add_int 609 #define atomic_subtract_acq_int atomic_subtract_int 610 #define atomic_subtract_rel_int atomic_subtract_int 611 #define atomic_cmpset_acq_int atomic_cmpset_int 612 #define atomic_cmpset_rel_int atomic_cmpset_int 613 614 #define atomic_set_acq_long atomic_set_long 615 #define atomic_set_rel_long atomic_set_long 616 #define atomic_clear_acq_long atomic_clear_long 617 #define atomic_clear_rel_long atomic_clear_long 618 #define atomic_add_acq_long atomic_add_long 619 #define atomic_add_rel_long atomic_add_long 620 #define atomic_subtract_acq_long atomic_subtract_long 621 #define atomic_subtract_rel_long atomic_subtract_long 622 #define atomic_cmpset_acq_long atomic_cmpset_long 623 #define atomic_cmpset_rel_long atomic_cmpset_long 624 625 /* cpumask_t is 64-bits on x86-64 */ 626 #define atomic_set_cpumask atomic_set_long 627 #define atomic_clear_cpumask atomic_clear_long 628 #define atomic_cmpset_cpumask atomic_cmpset_long 629 630 /* Operations on 8-bit bytes. */ 631 #define atomic_set_8 atomic_set_char 632 #define atomic_set_acq_8 atomic_set_acq_char 633 #define atomic_set_rel_8 atomic_set_rel_char 634 #define atomic_clear_8 atomic_clear_char 635 #define atomic_clear_acq_8 atomic_clear_acq_char 636 #define atomic_clear_rel_8 atomic_clear_rel_char 637 #define atomic_add_8 atomic_add_char 638 #define atomic_add_acq_8 atomic_add_acq_char 639 #define atomic_add_rel_8 atomic_add_rel_char 640 #define atomic_subtract_8 atomic_subtract_char 641 #define atomic_subtract_acq_8 atomic_subtract_acq_char 642 #define atomic_subtract_rel_8 atomic_subtract_rel_char 643 #define atomic_load_acq_8 atomic_load_acq_char 644 #define atomic_store_rel_8 atomic_store_rel_char 645 646 /* Operations on 16-bit words. */ 647 #define atomic_set_16 atomic_set_short 648 #define atomic_set_acq_16 atomic_set_acq_short 649 #define atomic_set_rel_16 atomic_set_rel_short 650 #define atomic_clear_16 atomic_clear_short 651 #define atomic_clear_acq_16 atomic_clear_acq_short 652 #define atomic_clear_rel_16 atomic_clear_rel_short 653 #define atomic_add_16 atomic_add_short 654 #define atomic_add_acq_16 atomic_add_acq_short 655 #define atomic_add_rel_16 atomic_add_rel_short 656 #define atomic_subtract_16 atomic_subtract_short 657 #define atomic_subtract_acq_16 atomic_subtract_acq_short 658 #define atomic_subtract_rel_16 atomic_subtract_rel_short 659 #define atomic_load_acq_16 atomic_load_acq_short 660 #define atomic_store_rel_16 atomic_store_rel_short 661 662 /* Operations on 32-bit double words. */ 663 #define atomic_set_32 atomic_set_int 664 #define atomic_set_acq_32 atomic_set_acq_int 665 #define atomic_set_rel_32 atomic_set_rel_int 666 #define atomic_clear_32 atomic_clear_int 667 #define atomic_clear_acq_32 atomic_clear_acq_int 668 #define atomic_clear_rel_32 atomic_clear_rel_int 669 #define atomic_add_32 atomic_add_int 670 #define atomic_add_acq_32 atomic_add_acq_int 671 #define atomic_add_rel_32 atomic_add_rel_int 672 #define atomic_subtract_32 atomic_subtract_int 673 #define atomic_subtract_acq_32 atomic_subtract_acq_int 674 #define atomic_subtract_rel_32 atomic_subtract_rel_int 675 #define atomic_load_acq_32 atomic_load_acq_int 676 #define atomic_store_rel_32 atomic_store_rel_int 677 #define atomic_cmpset_32 atomic_cmpset_int 678 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int 679 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int 680 #define atomic_readandclear_32 atomic_readandclear_int 681 #define atomic_fetchadd_32 atomic_fetchadd_int 682 683 /* Operations on 64-bit quad words. */ 684 #define atomic_load_acq_64 atomic_load_acq_long 685 #define atomic_store_rel_64 atomic_store_rel_long 686 #define atomic_swap_64 atomic_swap_long 687 #define atomic_fetchadd_64 atomic_fetchadd_long 688 689 /* Operations on pointers. */ 690 #define atomic_set_ptr(p, v) \ 691 atomic_set_long((volatile u_long *)(p), (u_long)(v)) 692 #define atomic_set_acq_ptr(p, v) \ 693 atomic_set_acq_long((volatile u_long *)(p), (u_long)(v)) 694 #define atomic_set_rel_ptr(p, v) \ 695 atomic_set_rel_long((volatile u_long *)(p), (u_long)(v)) 696 #define atomic_clear_ptr(p, v) \ 697 atomic_clear_long((volatile u_long *)(p), (u_long)(v)) 698 #define atomic_clear_acq_ptr(p, v) \ 699 atomic_clear_acq_long((volatile u_long *)(p), (u_long)(v)) 700 #define atomic_clear_rel_ptr(p, v) \ 701 atomic_clear_rel_long((volatile u_long *)(p), (u_long)(v)) 702 #define atomic_add_ptr(p, v) \ 703 atomic_add_long((volatile u_long *)(p), (u_long)(v)) 704 #define atomic_add_acq_ptr(p, v) \ 705 atomic_add_acq_long((volatile u_long *)(p), (u_long)(v)) 706 #define atomic_add_rel_ptr(p, v) \ 707 atomic_add_rel_long((volatile u_long *)(p), (u_long)(v)) 708 #define atomic_subtract_ptr(p, v) \ 709 atomic_subtract_long((volatile u_long *)(p), (u_long)(v)) 710 #define atomic_subtract_acq_ptr(p, v) \ 711 atomic_subtract_acq_long((volatile u_long *)(p), (u_long)(v)) 712 #define atomic_subtract_rel_ptr(p, v) \ 713 atomic_subtract_rel_long((volatile u_long *)(p), (u_long)(v)) 714 #define atomic_load_acq_ptr(p) \ 715 atomic_load_acq_long((volatile u_long *)(p)) 716 #define atomic_store_rel_ptr(p, v) \ 717 atomic_store_rel_long((volatile u_long *)(p), (v)) 718 #define atomic_cmpset_ptr(dst, old, new) \ 719 atomic_cmpset_long((volatile u_long *)(dst), (u_long)(old), \ 720 (u_long)(new)) 721 #define atomic_cmpset_acq_ptr(dst, old, new) \ 722 atomic_cmpset_acq_long((volatile u_long *)(dst), (u_long)(old), \ 723 (u_long)(new)) 724 #define atomic_cmpset_rel_ptr(dst, old, new) \ 725 atomic_cmpset_rel_long((volatile u_long *)(dst), (u_long)(old), \ 726 (u_long)(new)) 727 #define atomic_readandclear_ptr(p) \ 728 atomic_readandclear_long((volatile u_long *)(p)) 729 730 #endif /* ! _CPU_ATOMIC_H_ */ 731