xref: /qemu/host/include/x86_64/host/atomic128-ldst.h (revision 78f314cf)
1 /*
2  * SPDX-License-Identifier: GPL-2.0-or-later
3  * Load/store for 128-bit atomic operations, x86_64 version.
4  *
5  * Copyright (C) 2023 Linaro, Ltd.
6  *
7  * See docs/devel/atomics.rst for discussion about the guarantees each
8  * atomic primitive is meant to provide.
9  */
10 
11 #ifndef AARCH64_ATOMIC128_LDST_H
12 #define AARCH64_ATOMIC128_LDST_H
13 
14 #ifdef CONFIG_INT128_TYPE
15 #include "host/cpuinfo.h"
16 #include "tcg/debug-assert.h"
17 
18 /*
19  * Through clang 16, with -mcx16, __atomic_load_n is incorrectly
20  * expanded to a read-write operation: lock cmpxchg16b.
21  */
22 
23 #define HAVE_ATOMIC128_RO  likely(cpuinfo & CPUINFO_ATOMIC_VMOVDQA)
24 #define HAVE_ATOMIC128_RW  1
25 
26 static inline Int128 atomic16_read_ro(const Int128 *ptr)
27 {
28     Int128Alias r;
29 
30     tcg_debug_assert(HAVE_ATOMIC128_RO);
31     asm("vmovdqa %1, %0" : "=x" (r.i) : "m" (*ptr));
32 
33     return r.s;
34 }
35 
36 static inline Int128 atomic16_read_rw(Int128 *ptr)
37 {
38     __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
39     Int128Alias r;
40 
41     if (HAVE_ATOMIC128_RO) {
42         asm("vmovdqa %1, %0" : "=x" (r.i) : "m" (*ptr_align));
43     } else {
44         r.i = __sync_val_compare_and_swap_16(ptr_align, 0, 0);
45     }
46     return r.s;
47 }
48 
49 static inline void atomic16_set(Int128 *ptr, Int128 val)
50 {
51     __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
52     Int128Alias new = { .s = val };
53 
54     if (HAVE_ATOMIC128_RO) {
55         asm("vmovdqa %1, %0" : "=m"(*ptr_align) : "x" (new.i));
56     } else {
57         __int128_t old;
58         do {
59             old = *ptr_align;
60         } while (!__sync_bool_compare_and_swap_16(ptr_align, old, new.i));
61     }
62 }
63 #else
64 /* Provide QEMU_ERROR stubs. */
65 #include "host/include/generic/host/atomic128-ldst.h"
66 #endif
67 
68 #endif /* AARCH64_ATOMIC128_LDST_H */
69