1 /* 2 * Simple interface for atomic operations. 3 * 4 * Copyright (C) 2013 Red Hat, Inc. 5 * 6 * Author: Paolo Bonzini <pbonzini@redhat.com> 7 * 8 * This work is licensed under the terms of the GNU GPL, version 2 or later. 9 * See the COPYING file in the top-level directory. 10 * 11 * See docs/devel/atomics.txt for discussion about the guarantees each 12 * atomic primitive is meant to provide. 13 */ 14 15 #ifndef QEMU_ATOMIC_H 16 #define QEMU_ATOMIC_H 17 18 /* Compiler barrier */ 19 #define barrier() ({ asm volatile("" ::: "memory"); (void)0; }) 20 21 /* The variable that receives the old value of an atomically-accessed 22 * variable must be non-qualified, because atomic builtins return values 23 * through a pointer-type argument as in __atomic_load(&var, &old, MODEL). 24 * 25 * This macro has to handle types smaller than int manually, because of 26 * implicit promotion. int and larger types, as well as pointers, can be 27 * converted to a non-qualified type just by applying a binary operator. 28 */ 29 #define typeof_strip_qual(expr) \ 30 typeof( \ 31 __builtin_choose_expr( \ 32 __builtin_types_compatible_p(typeof(expr), bool) || \ 33 __builtin_types_compatible_p(typeof(expr), const bool) || \ 34 __builtin_types_compatible_p(typeof(expr), volatile bool) || \ 35 __builtin_types_compatible_p(typeof(expr), const volatile bool), \ 36 (bool)1, \ 37 __builtin_choose_expr( \ 38 __builtin_types_compatible_p(typeof(expr), signed char) || \ 39 __builtin_types_compatible_p(typeof(expr), const signed char) || \ 40 __builtin_types_compatible_p(typeof(expr), volatile signed char) || \ 41 __builtin_types_compatible_p(typeof(expr), const volatile signed char), \ 42 (signed char)1, \ 43 __builtin_choose_expr( \ 44 __builtin_types_compatible_p(typeof(expr), unsigned char) || \ 45 __builtin_types_compatible_p(typeof(expr), const unsigned char) || \ 46 __builtin_types_compatible_p(typeof(expr), volatile unsigned char) || \ 47 __builtin_types_compatible_p(typeof(expr), const volatile unsigned char), \ 48 (unsigned char)1, \ 49 __builtin_choose_expr( \ 50 __builtin_types_compatible_p(typeof(expr), signed short) || \ 51 __builtin_types_compatible_p(typeof(expr), const signed short) || \ 52 __builtin_types_compatible_p(typeof(expr), volatile signed short) || \ 53 __builtin_types_compatible_p(typeof(expr), const volatile signed short), \ 54 (signed short)1, \ 55 __builtin_choose_expr( \ 56 __builtin_types_compatible_p(typeof(expr), unsigned short) || \ 57 __builtin_types_compatible_p(typeof(expr), const unsigned short) || \ 58 __builtin_types_compatible_p(typeof(expr), volatile unsigned short) || \ 59 __builtin_types_compatible_p(typeof(expr), const volatile unsigned short), \ 60 (unsigned short)1, \ 61 (expr)+0)))))) 62 63 #ifdef __ATOMIC_RELAXED 64 /* For C11 atomic ops */ 65 66 /* Manual memory barriers 67 * 68 *__atomic_thread_fence does not include a compiler barrier; instead, 69 * the barrier is part of __atomic_load/__atomic_store's "volatile-like" 70 * semantics. If smp_wmb() is a no-op, absence of the barrier means that 71 * the compiler is free to reorder stores on each side of the barrier. 72 * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends(). 73 */ 74 75 #define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); }) 76 #define smp_mb_release() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); }) 77 #define smp_mb_acquire() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); }) 78 79 /* Most compilers currently treat consume and acquire the same, but really 80 * no processors except Alpha need a barrier here. Leave it in if 81 * using Thread Sanitizer to avoid warnings, otherwise optimize it away. 82 */ 83 #if defined(__SANITIZE_THREAD__) 84 #define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); }) 85 #elif defined(__alpha__) 86 #define smp_read_barrier_depends() asm volatile("mb":::"memory") 87 #else 88 #define smp_read_barrier_depends() barrier() 89 #endif 90 91 /* 92 * A signal barrier forces all pending local memory ops to be observed before 93 * a SIGSEGV is delivered to the *same* thread. In practice this is exactly 94 * the same as barrier(), but since we have the correct builtin, use it. 95 */ 96 #define signal_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST) 97 98 /* Sanity check that the size of an atomic operation isn't "overly large". 99 * Despite the fact that e.g. i686 has 64-bit atomic operations, we do not 100 * want to use them because we ought not need them, and this lets us do a 101 * bit of sanity checking that other 32-bit hosts might build. 102 * 103 * That said, we have a problem on 64-bit ILP32 hosts in that in order to 104 * sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS. 105 * We'd prefer not want to pull in everything else TCG related, so handle 106 * those few cases by hand. 107 * 108 * Note that x32 is fully detected with __x86_64__ + _ILP32, and that for 109 * Sparc we always force the use of sparcv9 in configure. MIPS n32 (ILP32) & 110 * n64 (LP64) ABIs are both detected using __mips64. 111 */ 112 #if defined(__x86_64__) || defined(__sparc__) || defined(__mips64) 113 # define ATOMIC_REG_SIZE 8 114 #else 115 # define ATOMIC_REG_SIZE sizeof(void *) 116 #endif 117 118 /* Weak atomic operations prevent the compiler moving other 119 * loads/stores past the atomic operation load/store. However there is 120 * no explicit memory barrier for the processor. 121 * 122 * The C11 memory model says that variables that are accessed from 123 * different threads should at least be done with __ATOMIC_RELAXED 124 * primitives or the result is undefined. Generally this has little to 125 * no effect on the generated code but not using the atomic primitives 126 * will get flagged by sanitizers as a violation. 127 */ 128 #define qatomic_read__nocheck(ptr) \ 129 __atomic_load_n(ptr, __ATOMIC_RELAXED) 130 131 #define qatomic_read(ptr) \ 132 ({ \ 133 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ 134 qatomic_read__nocheck(ptr); \ 135 }) 136 137 #define qatomic_set__nocheck(ptr, i) \ 138 __atomic_store_n(ptr, i, __ATOMIC_RELAXED) 139 140 #define qatomic_set(ptr, i) do { \ 141 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ 142 qatomic_set__nocheck(ptr, i); \ 143 } while(0) 144 145 /* See above: most compilers currently treat consume and acquire the 146 * same, but this slows down qatomic_rcu_read unnecessarily. 147 */ 148 #ifdef __SANITIZE_THREAD__ 149 #define qatomic_rcu_read__nocheck(ptr, valptr) \ 150 __atomic_load(ptr, valptr, __ATOMIC_CONSUME); 151 #else 152 #define qatomic_rcu_read__nocheck(ptr, valptr) \ 153 __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \ 154 smp_read_barrier_depends(); 155 #endif 156 157 #define qatomic_rcu_read(ptr) \ 158 ({ \ 159 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ 160 typeof_strip_qual(*ptr) _val; \ 161 qatomic_rcu_read__nocheck(ptr, &_val); \ 162 _val; \ 163 }) 164 165 #define qatomic_rcu_set(ptr, i) do { \ 166 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ 167 __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ 168 } while(0) 169 170 #define qatomic_load_acquire(ptr) \ 171 ({ \ 172 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ 173 typeof_strip_qual(*ptr) _val; \ 174 __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \ 175 _val; \ 176 }) 177 178 #define qatomic_store_release(ptr, i) do { \ 179 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ 180 __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ 181 } while(0) 182 183 184 /* All the remaining operations are fully sequentially consistent */ 185 186 #define qatomic_xchg__nocheck(ptr, i) ({ \ 187 __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \ 188 }) 189 190 #define qatomic_xchg(ptr, i) ({ \ 191 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ 192 qatomic_xchg__nocheck(ptr, i); \ 193 }) 194 195 /* Returns the eventual value, failed or not */ 196 #define qatomic_cmpxchg__nocheck(ptr, old, new) ({ \ 197 typeof_strip_qual(*ptr) _old = (old); \ 198 (void)__atomic_compare_exchange_n(ptr, &_old, new, false, \ 199 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \ 200 _old; \ 201 }) 202 203 #define qatomic_cmpxchg(ptr, old, new) ({ \ 204 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ 205 qatomic_cmpxchg__nocheck(ptr, old, new); \ 206 }) 207 208 /* Provide shorter names for GCC atomic builtins, return old value */ 209 #define qatomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST) 210 #define qatomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST) 211 212 #define qatomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST) 213 #define qatomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST) 214 #define qatomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST) 215 #define qatomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST) 216 #define qatomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST) 217 218 #define qatomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST) 219 #define qatomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST) 220 #define qatomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST) 221 #define qatomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST) 222 #define qatomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST) 223 #define qatomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST) 224 #define qatomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST) 225 226 /* And even shorter names that return void. */ 227 #define qatomic_inc(ptr) \ 228 ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)) 229 #define qatomic_dec(ptr) \ 230 ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)) 231 #define qatomic_add(ptr, n) \ 232 ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)) 233 #define qatomic_sub(ptr, n) \ 234 ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)) 235 #define qatomic_and(ptr, n) \ 236 ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)) 237 #define qatomic_or(ptr, n) \ 238 ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)) 239 #define qatomic_xor(ptr, n) \ 240 ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)) 241 242 #else /* __ATOMIC_RELAXED */ 243 244 #ifdef __alpha__ 245 #define smp_read_barrier_depends() asm volatile("mb":::"memory") 246 #endif 247 248 #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__) 249 250 /* 251 * Because of the strongly ordered storage model, wmb() and rmb() are nops 252 * here (a compiler barrier only). QEMU doesn't do accesses to write-combining 253 * qemu memory or non-temporal load/stores from C code. 254 */ 255 #define smp_mb_release() barrier() 256 #define smp_mb_acquire() barrier() 257 258 /* 259 * __sync_lock_test_and_set() is documented to be an acquire barrier only, 260 * but it is a full barrier at the hardware level. Add a compiler barrier 261 * to make it a full barrier also at the compiler level. 262 */ 263 #define qatomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i)) 264 265 #elif defined(_ARCH_PPC) 266 267 /* 268 * We use an eieio() for wmb() on powerpc. This assumes we don't 269 * need to order cacheable and non-cacheable stores with respect to 270 * each other. 271 * 272 * smp_mb has the same problem as on x86 for not-very-new GCC 273 * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011). 274 */ 275 #define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; }) 276 #if defined(__powerpc64__) 277 #define smp_mb_release() ({ asm volatile("lwsync" ::: "memory"); (void)0; }) 278 #define smp_mb_acquire() ({ asm volatile("lwsync" ::: "memory"); (void)0; }) 279 #else 280 #define smp_mb_release() ({ asm volatile("sync" ::: "memory"); (void)0; }) 281 #define smp_mb_acquire() ({ asm volatile("sync" ::: "memory"); (void)0; }) 282 #endif 283 #define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; }) 284 285 #endif /* _ARCH_PPC */ 286 287 /* 288 * For (host) platforms we don't have explicit barrier definitions 289 * for, we use the gcc __sync_synchronize() primitive to generate a 290 * full barrier. This should be safe on all platforms, though it may 291 * be overkill for smp_mb_acquire() and smp_mb_release(). 292 */ 293 #ifndef smp_mb 294 #define smp_mb() __sync_synchronize() 295 #endif 296 297 #ifndef smp_mb_acquire 298 #define smp_mb_acquire() __sync_synchronize() 299 #endif 300 301 #ifndef smp_mb_release 302 #define smp_mb_release() __sync_synchronize() 303 #endif 304 305 #ifndef smp_read_barrier_depends 306 #define smp_read_barrier_depends() barrier() 307 #endif 308 309 #ifndef signal_barrier 310 #define signal_barrier() barrier() 311 #endif 312 313 /* These will only be atomic if the processor does the fetch or store 314 * in a single issue memory operation 315 */ 316 #define qatomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p)) 317 #define qatomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i)) 318 319 #define qatomic_read(ptr) qatomic_read__nocheck(ptr) 320 #define qatomic_set(ptr, i) qatomic_set__nocheck(ptr,i) 321 322 /** 323 * qatomic_rcu_read - reads a RCU-protected pointer to a local variable 324 * into a RCU read-side critical section. The pointer can later be safely 325 * dereferenced within the critical section. 326 * 327 * This ensures that the pointer copy is invariant thorough the whole critical 328 * section. 329 * 330 * Inserts memory barriers on architectures that require them (currently only 331 * Alpha) and documents which pointers are protected by RCU. 332 * 333 * qatomic_rcu_read also includes a compiler barrier to ensure that 334 * value-speculative optimizations (e.g. VSS: Value Speculation 335 * Scheduling) does not perform the data read before the pointer read 336 * by speculating the value of the pointer. 337 * 338 * Should match qatomic_rcu_set(), qatomic_xchg(), qatomic_cmpxchg(). 339 */ 340 #define qatomic_rcu_read(ptr) ({ \ 341 typeof(*ptr) _val = qatomic_read(ptr); \ 342 smp_read_barrier_depends(); \ 343 _val; \ 344 }) 345 346 /** 347 * qatomic_rcu_set - assigns (publicizes) a pointer to a new data structure 348 * meant to be read by RCU read-side critical sections. 349 * 350 * Documents which pointers will be dereferenced by RCU read-side critical 351 * sections and adds the required memory barriers on architectures requiring 352 * them. It also makes sure the compiler does not reorder code initializing the 353 * data structure before its publication. 354 * 355 * Should match qatomic_rcu_read(). 356 */ 357 #define qatomic_rcu_set(ptr, i) do { \ 358 smp_wmb(); \ 359 qatomic_set(ptr, i); \ 360 } while (0) 361 362 #define qatomic_load_acquire(ptr) ({ \ 363 typeof(*ptr) _val = qatomic_read(ptr); \ 364 smp_mb_acquire(); \ 365 _val; \ 366 }) 367 368 #define qatomic_store_release(ptr, i) do { \ 369 smp_mb_release(); \ 370 qatomic_set(ptr, i); \ 371 } while (0) 372 373 #ifndef qatomic_xchg 374 #if defined(__clang__) 375 #define qatomic_xchg(ptr, i) __sync_swap(ptr, i) 376 #else 377 /* __sync_lock_test_and_set() is documented to be an acquire barrier only. */ 378 #define qatomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i)) 379 #endif 380 #endif 381 #define qatomic_xchg__nocheck qatomic_xchg 382 383 /* Provide shorter names for GCC atomic builtins. */ 384 #define qatomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1) 385 #define qatomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1) 386 387 #define qatomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n) 388 #define qatomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n) 389 #define qatomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n) 390 #define qatomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n) 391 #define qatomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n) 392 393 #define qatomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1) 394 #define qatomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1) 395 #define qatomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n) 396 #define qatomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n) 397 #define qatomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n) 398 #define qatomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n) 399 #define qatomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n) 400 401 #define qatomic_cmpxchg(ptr, old, new) \ 402 __sync_val_compare_and_swap(ptr, old, new) 403 #define qatomic_cmpxchg__nocheck(ptr, old, new) qatomic_cmpxchg(ptr, old, new) 404 405 /* And even shorter names that return void. */ 406 #define qatomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1)) 407 #define qatomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1)) 408 #define qatomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n)) 409 #define qatomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n)) 410 #define qatomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n)) 411 #define qatomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n)) 412 #define qatomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n)) 413 414 #endif /* __ATOMIC_RELAXED */ 415 416 #ifndef smp_wmb 417 #define smp_wmb() smp_mb_release() 418 #endif 419 #ifndef smp_rmb 420 #define smp_rmb() smp_mb_acquire() 421 #endif 422 423 /* This is more efficient than a store plus a fence. */ 424 #if !defined(__SANITIZE_THREAD__) 425 #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__) 426 #define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i)) 427 #endif 428 #endif 429 430 /* qatomic_mb_read/set semantics map Java volatile variables. They are 431 * less expensive on some platforms (notably POWER) than fully 432 * sequentially consistent operations. 433 * 434 * As long as they are used as paired operations they are safe to 435 * use. See docs/devel/atomics.txt for more discussion. 436 */ 437 438 #ifndef qatomic_mb_read 439 #define qatomic_mb_read(ptr) \ 440 qatomic_load_acquire(ptr) 441 #endif 442 443 #ifndef qatomic_mb_set 444 #define qatomic_mb_set(ptr, i) do { \ 445 qatomic_store_release(ptr, i); \ 446 smp_mb(); \ 447 } while(0) 448 #endif 449 450 #define qatomic_fetch_inc_nonzero(ptr) ({ \ 451 typeof_strip_qual(*ptr) _oldn = qatomic_read(ptr); \ 452 while (_oldn && qatomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) { \ 453 _oldn = qatomic_read(ptr); \ 454 } \ 455 _oldn; \ 456 }) 457 458 /* Abstractions to access atomically (i.e. "once") i64/u64 variables */ 459 #ifdef CONFIG_ATOMIC64 460 static inline int64_t qatomic_read_i64(const int64_t *ptr) 461 { 462 /* use __nocheck because sizeof(void *) might be < sizeof(u64) */ 463 return qatomic_read__nocheck(ptr); 464 } 465 466 static inline uint64_t qatomic_read_u64(const uint64_t *ptr) 467 { 468 return qatomic_read__nocheck(ptr); 469 } 470 471 static inline void qatomic_set_i64(int64_t *ptr, int64_t val) 472 { 473 qatomic_set__nocheck(ptr, val); 474 } 475 476 static inline void qatomic_set_u64(uint64_t *ptr, uint64_t val) 477 { 478 qatomic_set__nocheck(ptr, val); 479 } 480 481 static inline void qatomic64_init(void) 482 { 483 } 484 #else /* !CONFIG_ATOMIC64 */ 485 int64_t qatomic_read_i64(const int64_t *ptr); 486 uint64_t qatomic_read_u64(const uint64_t *ptr); 487 void qatomic_set_i64(int64_t *ptr, int64_t val); 488 void qatomic_set_u64(uint64_t *ptr, uint64_t val); 489 void qatomic64_init(void); 490 #endif /* !CONFIG_ATOMIC64 */ 491 492 #endif /* QEMU_ATOMIC_H */ 493