1 /*- 2 * Copyright (c) 1998 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 #ifndef _MACHINE_ATOMIC_H_ 29 #define _MACHINE_ATOMIC_H_ 30 31 #ifndef __GNUC__ 32 #ifndef lint 33 #error "This file must be compiled with GCC or lint" 34 #endif /* lint */ 35 #endif /* __GNUC__ */ 36 37 /* 38 * Various simple arithmetic on memory which is atomic in the presence 39 * of interrupts and multiple processors. 40 * 41 * atomic_set_char(P, V) (*(u_char*)(P) |= (V)) 42 * atomic_clear_char(P, V) (*(u_char*)(P) &= ~(V)) 43 * atomic_add_char(P, V) (*(u_char*)(P) += (V)) 44 * atomic_subtract_char(P, V) (*(u_char*)(P) -= (V)) 45 * 46 * atomic_set_short(P, V) (*(u_short*)(P) |= (V)) 47 * atomic_clear_short(P, V) (*(u_short*)(P) &= ~(V)) 48 * atomic_add_short(P, V) (*(u_short*)(P) += (V)) 49 * atomic_subtract_short(P, V) (*(u_short*)(P) -= (V)) 50 * 51 * atomic_set_int(P, V) (*(u_int*)(P) |= (V)) 52 * atomic_clear_int(P, V) (*(u_int*)(P) &= ~(V)) 53 * atomic_add_int(P, V) (*(u_int*)(P) += (V)) 54 * atomic_subtract_int(P, V) (*(u_int*)(P) -= (V)) 55 * atomic_readandclear_int(P) (return *(u_int*)P; *(u_int*)P = 0;) 56 * 57 * atomic_set_long(P, V) (*(u_long*)(P) |= (V)) 58 * atomic_clear_long(P, V) (*(u_long*)(P) &= ~(V)) 59 * atomic_add_long(P, V) (*(u_long*)(P) += (V)) 60 * atomic_subtract_long(P, V) (*(u_long*)(P) -= (V)) 61 * atomic_readandclear_long(P) (return *(u_long*)P; *(u_long*)P = 0;) 62 */ 63 64 /* 65 * The above functions are expanded inline in the statically-linked 66 * kernel. Lock prefixes are generated if an SMP kernel is being 67 * built. 68 * 69 * Kernel modules call real functions which are built into the kernel. 70 * This allows kernel modules to be portable between UP and SMP systems. 71 */ 72 #if defined(KLD_MODULE) 73 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 74 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 75 76 int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src); 77 78 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 79 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ 80 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 81 82 #else /* !KLD_MODULE */ 83 84 /* 85 * For userland, assume the SMP case and use lock prefixes so that 86 * the binaries will run on both types of systems. 87 */ 88 #if defined(SMP) || !defined(_KERNEL) 89 #define MPLOCKED lock ; 90 #else 91 #define MPLOCKED 92 #endif 93 94 /* 95 * The assembly is volatilized to demark potential before-and-after side 96 * effects if an interrupt or SMP collision were to occur. 97 */ 98 #ifdef __GNUC__ 99 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 100 static __inline void \ 101 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 102 { \ 103 __asm __volatile(__XSTRING(MPLOCKED) OP \ 104 : "+m" (*p) \ 105 : CONS (V)); \ 106 } 107 #else /* !__GNUC__ */ 108 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 109 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 110 #endif /* __GNUC__ */ 111 112 /* 113 * Atomic compare and set, used by the mutex functions 114 * 115 * if (*dst == exp) *dst = src (all 32 bit words) 116 * 117 * Returns 0 on failure, non-zero on success 118 */ 119 120 #if defined(__GNUC__) 121 #if defined(I386_CPU) 122 static __inline int 123 atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src) 124 { 125 int res = exp; 126 127 __asm __volatile( 128 " pushfl ; " 129 " cli ; " 130 " cmpl %0,%2 ; " 131 " jne 1f ; " 132 " movl %1,%2 ; " 133 "1: " 134 " sete %%al; " 135 " movzbl %%al,%0 ; " 136 " popfl ; " 137 "# atomic_cmpset_int" 138 : "+a" (res) /* 0 (result) */ 139 : "r" (src), /* 1 */ 140 "m" (*(dst)) /* 2 */ 141 : "memory"); 142 143 return (res); 144 } 145 #else /* defined(I386_CPU) */ 146 static __inline int 147 atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src) 148 { 149 int res = exp; 150 151 __asm __volatile ( 152 " " __XSTRING(MPLOCKED) " " 153 " cmpxchgl %1,%2 ; " 154 " setz %%al ; " 155 " movzbl %%al,%0 ; " 156 "1: " 157 "# atomic_cmpset_int" 158 : "+a" (res) /* 0 (result) */ 159 : "r" (src), /* 1 */ 160 "m" (*(dst)) /* 2 */ 161 : "memory"); 162 163 return (res); 164 } 165 #endif /* defined(I386_CPU) */ 166 #else /* !defined(__GNUC__) */ 167 static __inline int 168 atomic_cmpset_int(volatile u_int *dst __unused, u_int exp __unused, 169 u_int src __unused) 170 { 171 } 172 #endif /* defined(__GNUC__) */ 173 174 #if defined(__GNUC__) 175 #if defined(I386_CPU) 176 /* 177 * We assume that a = b will do atomic loads and stores. 178 * 179 * XXX: This is _NOT_ safe on a P6 or higher because it does not guarantee 180 * memory ordering. These should only be used on a 386. 181 */ 182 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 183 static __inline u_##TYPE \ 184 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 185 { \ 186 return (*p); \ 187 } \ 188 \ 189 static __inline void \ 190 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 191 { \ 192 *p = v; \ 193 __asm __volatile("" : : : "memory"); \ 194 } 195 #else /* !defined(I386_CPU) */ 196 197 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 198 static __inline u_##TYPE \ 199 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 200 { \ 201 u_##TYPE res; \ 202 \ 203 __asm __volatile(__XSTRING(MPLOCKED) LOP \ 204 : "=a" (res), /* 0 (result) */\ 205 "+m" (*p) /* 1 */ \ 206 : : "memory"); \ 207 \ 208 return (res); \ 209 } \ 210 \ 211 /* \ 212 * The XCHG instruction asserts LOCK automagically. \ 213 */ \ 214 static __inline void \ 215 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 216 { \ 217 __asm __volatile(SOP \ 218 : "+m" (*p), /* 0 */ \ 219 "+r" (v) /* 1 */ \ 220 : : "memory"); \ 221 } 222 #endif /* defined(I386_CPU) */ 223 #else /* !defined(__GNUC__) */ 224 225 /* 226 * XXXX: Dummy functions!! 227 */ 228 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 229 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p __unused); \ 230 void atomic_store_rel_##TYPE(volatile u_##TYPE *p __unused, \ 231 u_##TYPE v __unused) 232 233 #endif /* defined(__GNUC__) */ 234 #endif /* KLD_MODULE */ 235 236 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v); 237 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v); 238 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v); 239 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v); 240 241 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v); 242 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v); 243 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v); 244 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v); 245 246 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v); 247 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v); 248 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v); 249 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v); 250 251 ATOMIC_ASM(set, long, "orl %1,%0", "ir", v); 252 ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v); 253 ATOMIC_ASM(add, long, "addl %1,%0", "ir", v); 254 ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v); 255 256 ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0"); 257 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0"); 258 ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0"); 259 ATOMIC_STORE_LOAD(long, "cmpxchgl %0,%1", "xchgl %1,%0"); 260 261 #undef ATOMIC_ASM 262 #undef ATOMIC_STORE_LOAD 263 264 #define atomic_set_acq_char atomic_set_char 265 #define atomic_set_rel_char atomic_set_char 266 #define atomic_clear_acq_char atomic_clear_char 267 #define atomic_clear_rel_char atomic_clear_char 268 #define atomic_add_acq_char atomic_add_char 269 #define atomic_add_rel_char atomic_add_char 270 #define atomic_subtract_acq_char atomic_subtract_char 271 #define atomic_subtract_rel_char atomic_subtract_char 272 273 #define atomic_set_acq_short atomic_set_short 274 #define atomic_set_rel_short atomic_set_short 275 #define atomic_clear_acq_short atomic_clear_short 276 #define atomic_clear_rel_short atomic_clear_short 277 #define atomic_add_acq_short atomic_add_short 278 #define atomic_add_rel_short atomic_add_short 279 #define atomic_subtract_acq_short atomic_subtract_short 280 #define atomic_subtract_rel_short atomic_subtract_short 281 282 #define atomic_set_acq_int atomic_set_int 283 #define atomic_set_rel_int atomic_set_int 284 #define atomic_clear_acq_int atomic_clear_int 285 #define atomic_clear_rel_int atomic_clear_int 286 #define atomic_add_acq_int atomic_add_int 287 #define atomic_add_rel_int atomic_add_int 288 #define atomic_subtract_acq_int atomic_subtract_int 289 #define atomic_subtract_rel_int atomic_subtract_int 290 #define atomic_cmpset_acq_int atomic_cmpset_int 291 #define atomic_cmpset_rel_int atomic_cmpset_int 292 293 #define atomic_set_acq_long atomic_set_long 294 #define atomic_set_rel_long atomic_set_long 295 #define atomic_clear_acq_long atomic_clear_long 296 #define atomic_clear_rel_long atomic_clear_long 297 #define atomic_add_acq_long atomic_add_long 298 #define atomic_add_rel_long atomic_add_long 299 #define atomic_subtract_acq_long atomic_subtract_long 300 #define atomic_subtract_rel_long atomic_subtract_long 301 #define atomic_cmpset_long atomic_cmpset_int 302 #define atomic_cmpset_acq_long atomic_cmpset_acq_int 303 #define atomic_cmpset_rel_long atomic_cmpset_rel_int 304 305 #define atomic_cmpset_acq_ptr atomic_cmpset_ptr 306 #define atomic_cmpset_rel_ptr atomic_cmpset_ptr 307 308 #define atomic_set_8 atomic_set_char 309 #define atomic_set_acq_8 atomic_set_acq_char 310 #define atomic_set_rel_8 atomic_set_rel_char 311 #define atomic_clear_8 atomic_clear_char 312 #define atomic_clear_acq_8 atomic_clear_acq_char 313 #define atomic_clear_rel_8 atomic_clear_rel_char 314 #define atomic_add_8 atomic_add_char 315 #define atomic_add_acq_8 atomic_add_acq_char 316 #define atomic_add_rel_8 atomic_add_rel_char 317 #define atomic_subtract_8 atomic_subtract_char 318 #define atomic_subtract_acq_8 atomic_subtract_acq_char 319 #define atomic_subtract_rel_8 atomic_subtract_rel_char 320 #define atomic_load_acq_8 atomic_load_acq_char 321 #define atomic_store_rel_8 atomic_store_rel_char 322 323 #define atomic_set_16 atomic_set_short 324 #define atomic_set_acq_16 atomic_set_acq_short 325 #define atomic_set_rel_16 atomic_set_rel_short 326 #define atomic_clear_16 atomic_clear_short 327 #define atomic_clear_acq_16 atomic_clear_acq_short 328 #define atomic_clear_rel_16 atomic_clear_rel_short 329 #define atomic_add_16 atomic_add_short 330 #define atomic_add_acq_16 atomic_add_acq_short 331 #define atomic_add_rel_16 atomic_add_rel_short 332 #define atomic_subtract_16 atomic_subtract_short 333 #define atomic_subtract_acq_16 atomic_subtract_acq_short 334 #define atomic_subtract_rel_16 atomic_subtract_rel_short 335 #define atomic_load_acq_16 atomic_load_acq_short 336 #define atomic_store_rel_16 atomic_store_rel_short 337 338 #define atomic_set_32 atomic_set_int 339 #define atomic_set_acq_32 atomic_set_acq_int 340 #define atomic_set_rel_32 atomic_set_rel_int 341 #define atomic_clear_32 atomic_clear_int 342 #define atomic_clear_acq_32 atomic_clear_acq_int 343 #define atomic_clear_rel_32 atomic_clear_rel_int 344 #define atomic_add_32 atomic_add_int 345 #define atomic_add_acq_32 atomic_add_acq_int 346 #define atomic_add_rel_32 atomic_add_rel_int 347 #define atomic_subtract_32 atomic_subtract_int 348 #define atomic_subtract_acq_32 atomic_subtract_acq_int 349 #define atomic_subtract_rel_32 atomic_subtract_rel_int 350 #define atomic_load_acq_32 atomic_load_acq_int 351 #define atomic_store_rel_32 atomic_store_rel_int 352 #define atomic_cmpset_32 atomic_cmpset_int 353 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int 354 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int 355 #define atomic_readandclear_32 atomic_readandclear_int 356 357 #if !defined(WANT_FUNCTIONS) 358 static __inline int 359 atomic_cmpset_ptr(volatile void *dst, void *exp, void *src) 360 { 361 362 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)exp, 363 (u_int)src)); 364 } 365 366 static __inline void * 367 atomic_load_acq_ptr(volatile void *p) 368 { 369 return (void *)atomic_load_acq_int((volatile u_int *)p); 370 } 371 372 static __inline void 373 atomic_store_rel_ptr(volatile void *p, void *v) 374 { 375 atomic_store_rel_int((volatile u_int *)p, (u_int)v); 376 } 377 378 #define ATOMIC_PTR(NAME) \ 379 static __inline void \ 380 atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \ 381 { \ 382 atomic_##NAME##_int((volatile u_int *)p, v); \ 383 } \ 384 \ 385 static __inline void \ 386 atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \ 387 { \ 388 atomic_##NAME##_acq_int((volatile u_int *)p, v);\ 389 } \ 390 \ 391 static __inline void \ 392 atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \ 393 { \ 394 atomic_##NAME##_rel_int((volatile u_int *)p, v);\ 395 } 396 397 ATOMIC_PTR(set) 398 ATOMIC_PTR(clear) 399 ATOMIC_PTR(add) 400 ATOMIC_PTR(subtract) 401 402 #undef ATOMIC_PTR 403 404 #if defined(__GNUC__) 405 static __inline u_int 406 atomic_readandclear_int(volatile u_int *addr) 407 { 408 u_int result; 409 410 __asm __volatile ( 411 " xorl %0,%0 ; " 412 " xchgl %1,%0 ; " 413 "# atomic_readandclear_int" 414 : "=&r" (result) /* 0 (result) */ 415 : "m" (*addr)); /* 1 (addr) */ 416 417 return (result); 418 } 419 #else /* !defined(__GNUC__) */ 420 /* 421 * XXXX: Dummy! 422 */ 423 static __inline u_int 424 atomic_readandclear_int(volatile u_int *addr __unused) 425 { 426 } 427 #endif /* defined(__GNUC__) */ 428 429 #if defined(__GNUC__) 430 static __inline u_long 431 atomic_readandclear_long(volatile u_long *addr) 432 { 433 u_long result; 434 435 __asm __volatile ( 436 " xorl %0,%0 ; " 437 " xchgl %1,%0 ; " 438 "# atomic_readandclear_int" 439 : "=&r" (result) /* 0 (result) */ 440 : "m" (*addr)); /* 1 (addr) */ 441 442 return (result); 443 } 444 #else /* !defined(__GNUC__) */ 445 /* 446 * XXXX: Dummy! 447 */ 448 static __inline u_long 449 atomic_readandclear_long(volatile u_long *addr __unused) 450 { 451 } 452 #endif /* defined(__GNUC__) */ 453 #endif /* !defined(WANT_FUNCTIONS) */ 454 #endif /* ! _MACHINE_ATOMIC_H_ */ 455