1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Based on arch/arm/include/asm/barrier.h
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7 #ifndef __ASM_BARRIER_H
8 #define __ASM_BARRIER_H
9
10 #ifndef __ASSEMBLY__
11
12 #include <linux/kasan-checks.h>
13
14 #define __nops(n) ".rept " #n "\nnop\n.endr\n"
15 #define nops(n) asm volatile(__nops(n))
16
17 #define sev() asm volatile("sev" : : : "memory")
18 #define wfe() asm volatile("wfe" : : : "memory")
19 #define wfi() asm volatile("wfi" : : : "memory")
20
21 #define isb() asm volatile("isb" : : : "memory")
22 #define dmb(opt) asm volatile("dmb " #opt : : : "memory")
23 #define dsb(opt) asm volatile("dsb " #opt : : : "memory")
24
25 #define psb_csync() asm volatile("hint #17" : : : "memory")
26 #define tsb_csync() asm volatile("hint #18" : : : "memory")
27 #define csdb() asm volatile("hint #20" : : : "memory")
28
29 #ifdef CONFIG_ARM64_PSEUDO_NMI
30 #define pmr_sync() \
31 do { \
32 extern struct static_key_false gic_pmr_sync; \
33 \
34 if (static_branch_unlikely(&gic_pmr_sync)) \
35 dsb(sy); \
36 } while(0)
37 #else
38 #define pmr_sync() do {} while (0)
39 #endif
40
41 #define mb() dsb(sy)
42 #define rmb() dsb(ld)
43 #define wmb() dsb(st)
44
45 #define dma_mb() dmb(osh)
46 #define dma_rmb() dmb(oshld)
47 #define dma_wmb() dmb(oshst)
48
49 /*
50 * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
51 * and 0 otherwise.
52 */
53 #define array_index_mask_nospec array_index_mask_nospec
array_index_mask_nospec(unsigned long idx,unsigned long sz)54 static inline unsigned long array_index_mask_nospec(unsigned long idx,
55 unsigned long sz)
56 {
57 unsigned long mask;
58
59 asm volatile(
60 " cmp %1, %2\n"
61 " sbc %0, xzr, xzr\n"
62 : "=r" (mask)
63 : "r" (idx), "Ir" (sz)
64 : "cc");
65
66 csdb();
67 return mask;
68 }
69
70 /*
71 * Ensure that reads of the counter are treated the same as memory reads
72 * for the purposes of ordering by subsequent memory barriers.
73 *
74 * This insanity brought to you by speculative system register reads,
75 * out-of-order memory accesses, sequence locks and Thomas Gleixner.
76 *
77 * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
78 */
79 #define arch_counter_enforce_ordering(val) do { \
80 u64 tmp, _val = (val); \
81 \
82 asm volatile( \
83 " eor %0, %1, %1\n" \
84 " add %0, sp, %0\n" \
85 " ldr xzr, [%0]" \
86 : "=r" (tmp) : "r" (_val)); \
87 } while (0)
88
89 #define __smp_mb() dmb(ish)
90 #define __smp_rmb() dmb(ishld)
91 #define __smp_wmb() dmb(ishst)
92
93 #define __smp_store_release(p, v) \
94 do { \
95 typeof(p) __p = (p); \
96 union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u = \
97 { .__val = (__force __unqual_scalar_typeof(*p)) (v) }; \
98 compiletime_assert_atomic_type(*p); \
99 kasan_check_write(__p, sizeof(*p)); \
100 switch (sizeof(*p)) { \
101 case 1: \
102 asm volatile ("stlrb %w1, %0" \
103 : "=Q" (*__p) \
104 : "r" (*(__u8 *)__u.__c) \
105 : "memory"); \
106 break; \
107 case 2: \
108 asm volatile ("stlrh %w1, %0" \
109 : "=Q" (*__p) \
110 : "r" (*(__u16 *)__u.__c) \
111 : "memory"); \
112 break; \
113 case 4: \
114 asm volatile ("stlr %w1, %0" \
115 : "=Q" (*__p) \
116 : "r" (*(__u32 *)__u.__c) \
117 : "memory"); \
118 break; \
119 case 8: \
120 asm volatile ("stlr %1, %0" \
121 : "=Q" (*__p) \
122 : "r" (*(__u64 *)__u.__c) \
123 : "memory"); \
124 break; \
125 } \
126 } while (0)
127
128 #define __smp_load_acquire(p) \
129 ({ \
130 union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u; \
131 typeof(p) __p = (p); \
132 compiletime_assert_atomic_type(*p); \
133 kasan_check_read(__p, sizeof(*p)); \
134 switch (sizeof(*p)) { \
135 case 1: \
136 asm volatile ("ldarb %w0, %1" \
137 : "=r" (*(__u8 *)__u.__c) \
138 : "Q" (*__p) : "memory"); \
139 break; \
140 case 2: \
141 asm volatile ("ldarh %w0, %1" \
142 : "=r" (*(__u16 *)__u.__c) \
143 : "Q" (*__p) : "memory"); \
144 break; \
145 case 4: \
146 asm volatile ("ldar %w0, %1" \
147 : "=r" (*(__u32 *)__u.__c) \
148 : "Q" (*__p) : "memory"); \
149 break; \
150 case 8: \
151 asm volatile ("ldar %0, %1" \
152 : "=r" (*(__u64 *)__u.__c) \
153 : "Q" (*__p) : "memory"); \
154 break; \
155 } \
156 (typeof(*p))__u.__val; \
157 })
158
159 #define smp_cond_load_relaxed(ptr, cond_expr) \
160 ({ \
161 typeof(ptr) __PTR = (ptr); \
162 __unqual_scalar_typeof(*ptr) VAL; \
163 for (;;) { \
164 VAL = READ_ONCE(*__PTR); \
165 if (cond_expr) \
166 break; \
167 __cmpwait_relaxed(__PTR, VAL); \
168 } \
169 (typeof(*ptr))VAL; \
170 })
171
172 #define smp_cond_load_acquire(ptr, cond_expr) \
173 ({ \
174 typeof(ptr) __PTR = (ptr); \
175 __unqual_scalar_typeof(*ptr) VAL; \
176 for (;;) { \
177 VAL = smp_load_acquire(__PTR); \
178 if (cond_expr) \
179 break; \
180 __cmpwait_relaxed(__PTR, VAL); \
181 } \
182 (typeof(*ptr))VAL; \
183 })
184
185 #include <asm-generic/barrier.h>
186
187 #endif /* __ASSEMBLY__ */
188
189 #endif /* __ASM_BARRIER_H */
190