xref: /linux/arch/powerpc/include/asm/spinlock.h (revision 9a6b55ac)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
4 #ifdef __KERNEL__
5 
6 /*
7  * Simple spin lock operations.
8  *
9  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
10  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
11  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
12  *	Rework to support virtual processors
13  *
14  * Type of int is used as a full 64b word is not necessary.
15  *
16  * (the type definitions are in asm/spinlock_types.h)
17  */
18 #include <linux/jump_label.h>
19 #include <linux/irqflags.h>
20 #ifdef CONFIG_PPC64
21 #include <asm/paca.h>
22 #include <asm/hvcall.h>
23 #endif
24 #include <asm/synch.h>
25 #include <asm/ppc-opcode.h>
26 #include <asm/asm-405.h>
27 
28 #ifdef CONFIG_PPC64
29 /* use 0x800000yy when locked, where yy == CPU number */
30 #ifdef __BIG_ENDIAN__
31 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
32 #else
33 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
34 #endif
35 #else
36 #define LOCK_TOKEN	1
37 #endif
38 
39 #ifdef CONFIG_PPC_PSERIES
40 DECLARE_STATIC_KEY_FALSE(shared_processor);
41 
42 #define vcpu_is_preempted vcpu_is_preempted
43 static inline bool vcpu_is_preempted(int cpu)
44 {
45 	if (!static_branch_unlikely(&shared_processor))
46 		return false;
47 	return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
48 }
49 #endif
50 
51 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
52 {
53 	return lock.slock == 0;
54 }
55 
56 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
57 {
58 	smp_mb();
59 	return !arch_spin_value_unlocked(*lock);
60 }
61 
62 /*
63  * This returns the old value in the lock, so we succeeded
64  * in getting the lock if the return value is 0.
65  */
66 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
67 {
68 	unsigned long tmp, token;
69 
70 	token = LOCK_TOKEN;
71 	__asm__ __volatile__(
72 "1:	" PPC_LWARX(%0,0,%2,1) "\n\
73 	cmpwi		0,%0,0\n\
74 	bne-		2f\n\
75 	stwcx.		%1,0,%2\n\
76 	bne-		1b\n"
77 	PPC_ACQUIRE_BARRIER
78 "2:"
79 	: "=&r" (tmp)
80 	: "r" (token), "r" (&lock->slock)
81 	: "cr0", "memory");
82 
83 	return tmp;
84 }
85 
86 static inline int arch_spin_trylock(arch_spinlock_t *lock)
87 {
88 	return __arch_spin_trylock(lock) == 0;
89 }
90 
91 /*
92  * On a system with shared processors (that is, where a physical
93  * processor is multiplexed between several virtual processors),
94  * there is no point spinning on a lock if the holder of the lock
95  * isn't currently scheduled on a physical processor.  Instead
96  * we detect this situation and ask the hypervisor to give the
97  * rest of our timeslice to the lock holder.
98  *
99  * So that we can tell which virtual processor is holding a lock,
100  * we put 0x80000000 | smp_processor_id() in the lock when it is
101  * held.  Conveniently, we have a word in the paca that holds this
102  * value.
103  */
104 
105 #if defined(CONFIG_PPC_SPLPAR)
106 /* We only yield to the hypervisor if we are in shared processor mode */
107 void splpar_spin_yield(arch_spinlock_t *lock);
108 void splpar_rw_yield(arch_rwlock_t *lock);
109 #else /* SPLPAR */
110 static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
111 static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
112 #endif
113 
114 static inline bool is_shared_processor(void)
115 {
116 #ifdef CONFIG_PPC_SPLPAR
117 	return static_branch_unlikely(&shared_processor);
118 #else
119 	return false;
120 #endif
121 }
122 
123 static inline void spin_yield(arch_spinlock_t *lock)
124 {
125 	if (is_shared_processor())
126 		splpar_spin_yield(lock);
127 	else
128 		barrier();
129 }
130 
131 static inline void rw_yield(arch_rwlock_t *lock)
132 {
133 	if (is_shared_processor())
134 		splpar_rw_yield(lock);
135 	else
136 		barrier();
137 }
138 
139 static inline void arch_spin_lock(arch_spinlock_t *lock)
140 {
141 	while (1) {
142 		if (likely(__arch_spin_trylock(lock) == 0))
143 			break;
144 		do {
145 			HMT_low();
146 			if (is_shared_processor())
147 				splpar_spin_yield(lock);
148 		} while (unlikely(lock->slock != 0));
149 		HMT_medium();
150 	}
151 }
152 
153 static inline
154 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
155 {
156 	unsigned long flags_dis;
157 
158 	while (1) {
159 		if (likely(__arch_spin_trylock(lock) == 0))
160 			break;
161 		local_save_flags(flags_dis);
162 		local_irq_restore(flags);
163 		do {
164 			HMT_low();
165 			if (is_shared_processor())
166 				splpar_spin_yield(lock);
167 		} while (unlikely(lock->slock != 0));
168 		HMT_medium();
169 		local_irq_restore(flags_dis);
170 	}
171 }
172 #define arch_spin_lock_flags arch_spin_lock_flags
173 
174 static inline void arch_spin_unlock(arch_spinlock_t *lock)
175 {
176 	__asm__ __volatile__("# arch_spin_unlock\n\t"
177 				PPC_RELEASE_BARRIER: : :"memory");
178 	lock->slock = 0;
179 }
180 
181 /*
182  * Read-write spinlocks, allowing multiple readers
183  * but only one writer.
184  *
185  * NOTE! it is quite common to have readers in interrupts
186  * but no interrupt writers. For those circumstances we
187  * can "mix" irq-safe locks - any writer needs to get a
188  * irq-safe write-lock, but readers can get non-irqsafe
189  * read-locks.
190  */
191 
192 #ifdef CONFIG_PPC64
193 #define __DO_SIGN_EXTEND	"extsw	%0,%0\n"
194 #define WRLOCK_TOKEN		LOCK_TOKEN	/* it's negative */
195 #else
196 #define __DO_SIGN_EXTEND
197 #define WRLOCK_TOKEN		(-1)
198 #endif
199 
200 /*
201  * This returns the old value in the lock + 1,
202  * so we got a read lock if the return value is > 0.
203  */
204 static inline long __arch_read_trylock(arch_rwlock_t *rw)
205 {
206 	long tmp;
207 
208 	__asm__ __volatile__(
209 "1:	" PPC_LWARX(%0,0,%1,1) "\n"
210 	__DO_SIGN_EXTEND
211 "	addic.		%0,%0,1\n\
212 	ble-		2f\n"
213 	PPC405_ERR77(0,%1)
214 "	stwcx.		%0,0,%1\n\
215 	bne-		1b\n"
216 	PPC_ACQUIRE_BARRIER
217 "2:"	: "=&r" (tmp)
218 	: "r" (&rw->lock)
219 	: "cr0", "xer", "memory");
220 
221 	return tmp;
222 }
223 
224 /*
225  * This returns the old value in the lock,
226  * so we got the write lock if the return value is 0.
227  */
228 static inline long __arch_write_trylock(arch_rwlock_t *rw)
229 {
230 	long tmp, token;
231 
232 	token = WRLOCK_TOKEN;
233 	__asm__ __volatile__(
234 "1:	" PPC_LWARX(%0,0,%2,1) "\n\
235 	cmpwi		0,%0,0\n\
236 	bne-		2f\n"
237 	PPC405_ERR77(0,%1)
238 "	stwcx.		%1,0,%2\n\
239 	bne-		1b\n"
240 	PPC_ACQUIRE_BARRIER
241 "2:"	: "=&r" (tmp)
242 	: "r" (token), "r" (&rw->lock)
243 	: "cr0", "memory");
244 
245 	return tmp;
246 }
247 
248 static inline void arch_read_lock(arch_rwlock_t *rw)
249 {
250 	while (1) {
251 		if (likely(__arch_read_trylock(rw) > 0))
252 			break;
253 		do {
254 			HMT_low();
255 			if (is_shared_processor())
256 				splpar_rw_yield(rw);
257 		} while (unlikely(rw->lock < 0));
258 		HMT_medium();
259 	}
260 }
261 
262 static inline void arch_write_lock(arch_rwlock_t *rw)
263 {
264 	while (1) {
265 		if (likely(__arch_write_trylock(rw) == 0))
266 			break;
267 		do {
268 			HMT_low();
269 			if (is_shared_processor())
270 				splpar_rw_yield(rw);
271 		} while (unlikely(rw->lock != 0));
272 		HMT_medium();
273 	}
274 }
275 
276 static inline int arch_read_trylock(arch_rwlock_t *rw)
277 {
278 	return __arch_read_trylock(rw) > 0;
279 }
280 
281 static inline int arch_write_trylock(arch_rwlock_t *rw)
282 {
283 	return __arch_write_trylock(rw) == 0;
284 }
285 
286 static inline void arch_read_unlock(arch_rwlock_t *rw)
287 {
288 	long tmp;
289 
290 	__asm__ __volatile__(
291 	"# read_unlock\n\t"
292 	PPC_RELEASE_BARRIER
293 "1:	lwarx		%0,0,%1\n\
294 	addic		%0,%0,-1\n"
295 	PPC405_ERR77(0,%1)
296 "	stwcx.		%0,0,%1\n\
297 	bne-		1b"
298 	: "=&r"(tmp)
299 	: "r"(&rw->lock)
300 	: "cr0", "xer", "memory");
301 }
302 
303 static inline void arch_write_unlock(arch_rwlock_t *rw)
304 {
305 	__asm__ __volatile__("# write_unlock\n\t"
306 				PPC_RELEASE_BARRIER: : :"memory");
307 	rw->lock = 0;
308 }
309 
310 #define arch_spin_relax(lock)	spin_yield(lock)
311 #define arch_read_relax(lock)	rw_yield(lock)
312 #define arch_write_relax(lock)	rw_yield(lock)
313 
314 /* See include/linux/spinlock.h */
315 #define smp_mb__after_spinlock()   smp_mb()
316 
317 #endif /* __KERNEL__ */
318 #endif /* __ASM_SPINLOCK_H */
319