1 /* 2 * SPDX-License-Identifier: GPL-2.0-or-later 3 * Load/store for 128-bit atomic operations, x86_64 version. 4 * 5 * Copyright (C) 2023 Linaro, Ltd. 6 * 7 * See docs/devel/atomics.rst for discussion about the guarantees each 8 * atomic primitive is meant to provide. 9 */ 10 11 #ifndef X86_64_ATOMIC128_LDST_H 12 #define X86_64_ATOMIC128_LDST_H 13 14 #ifdef CONFIG_INT128_TYPE 15 #include "host/cpuinfo.h" 16 #include "tcg/debug-assert.h" 17 #include <immintrin.h> 18 19 typedef union { 20 __m128i v; 21 __int128_t i; 22 Int128 s; 23 } X86Int128Union; 24 25 /* 26 * Through clang 16, with -mcx16, __atomic_load_n is incorrectly 27 * expanded to a read-write operation: lock cmpxchg16b. 28 */ 29 30 #define HAVE_ATOMIC128_RO likely(cpuinfo & CPUINFO_ATOMIC_VMOVDQA) 31 #define HAVE_ATOMIC128_RW 1 32 33 static inline Int128 atomic16_read_ro(const Int128 *ptr) 34 { 35 X86Int128Union r; 36 37 tcg_debug_assert(HAVE_ATOMIC128_RO); 38 asm("vmovdqa %1, %0" : "=x" (r.v) : "m" (*ptr)); 39 40 return r.s; 41 } 42 43 static inline Int128 atomic16_read_rw(Int128 *ptr) 44 { 45 __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); 46 X86Int128Union r; 47 48 if (HAVE_ATOMIC128_RO) { 49 asm("vmovdqa %1, %0" : "=x" (r.v) : "m" (*ptr_align)); 50 } else { 51 r.i = __sync_val_compare_and_swap_16(ptr_align, 0, 0); 52 } 53 return r.s; 54 } 55 56 static inline void atomic16_set(Int128 *ptr, Int128 val) 57 { 58 __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); 59 X86Int128Union new = { .s = val }; 60 61 if (HAVE_ATOMIC128_RO) { 62 asm("vmovdqa %1, %0" : "=m"(*ptr_align) : "x" (new.v)); 63 } else { 64 __int128_t old; 65 do { 66 old = *ptr_align; 67 } while (!__sync_bool_compare_and_swap_16(ptr_align, old, new.i)); 68 } 69 } 70 #else 71 /* Provide QEMU_ERROR stubs. */ 72 #include "host/include/generic/host/atomic128-ldst.h" 73 #endif 74 75 #endif /* X86_64_ATOMIC128_LDST_H */ 76