xref: /linux/arch/sh/include/asm/spinlock-cas.h (revision 6a0abce4)
1*6a0abce4SKuninori Morimoto /* SPDX-License-Identifier: GPL-2.0
2*6a0abce4SKuninori Morimoto  *
32b47d54eSRich Felker  * include/asm-sh/spinlock-cas.h
42b47d54eSRich Felker  *
52b47d54eSRich Felker  * Copyright (C) 2015 SEI
62b47d54eSRich Felker  */
72b47d54eSRich Felker #ifndef __ASM_SH_SPINLOCK_CAS_H
82b47d54eSRich Felker #define __ASM_SH_SPINLOCK_CAS_H
92b47d54eSRich Felker 
102b47d54eSRich Felker #include <asm/barrier.h>
112b47d54eSRich Felker #include <asm/processor.h>
122b47d54eSRich Felker 
__sl_cas(volatile unsigned * p,unsigned old,unsigned new)132b47d54eSRich Felker static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new)
142b47d54eSRich Felker {
152b47d54eSRich Felker 	__asm__ __volatile__("cas.l %1,%0,@r0"
162b47d54eSRich Felker 		: "+r"(new)
172b47d54eSRich Felker 		: "r"(old), "z"(p)
182b47d54eSRich Felker 		: "t", "memory" );
192b47d54eSRich Felker 	return new;
202b47d54eSRich Felker }
212b47d54eSRich Felker 
222b47d54eSRich Felker /*
232b47d54eSRich Felker  * Your basic SMP spinlocks, allowing only a single CPU anywhere
242b47d54eSRich Felker  */
252b47d54eSRich Felker 
262b47d54eSRich Felker #define arch_spin_is_locked(x)		((x)->lock <= 0)
272b47d54eSRich Felker 
arch_spin_lock(arch_spinlock_t * lock)282b47d54eSRich Felker static inline void arch_spin_lock(arch_spinlock_t *lock)
292b47d54eSRich Felker {
302b47d54eSRich Felker 	while (!__sl_cas(&lock->lock, 1, 0));
312b47d54eSRich Felker }
322b47d54eSRich Felker 
arch_spin_unlock(arch_spinlock_t * lock)332b47d54eSRich Felker static inline void arch_spin_unlock(arch_spinlock_t *lock)
342b47d54eSRich Felker {
352b47d54eSRich Felker 	__sl_cas(&lock->lock, 0, 1);
362b47d54eSRich Felker }
372b47d54eSRich Felker 
arch_spin_trylock(arch_spinlock_t * lock)382b47d54eSRich Felker static inline int arch_spin_trylock(arch_spinlock_t *lock)
392b47d54eSRich Felker {
402b47d54eSRich Felker 	return __sl_cas(&lock->lock, 1, 0);
412b47d54eSRich Felker }
422b47d54eSRich Felker 
432b47d54eSRich Felker /*
442b47d54eSRich Felker  * Read-write spinlocks, allowing multiple readers but only one writer.
452b47d54eSRich Felker  *
462b47d54eSRich Felker  * NOTE! it is quite common to have readers in interrupts but no interrupt
472b47d54eSRich Felker  * writers. For those circumstances we can "mix" irq-safe locks - any writer
482b47d54eSRich Felker  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
492b47d54eSRich Felker  * read-locks.
502b47d54eSRich Felker  */
512b47d54eSRich Felker 
arch_read_lock(arch_rwlock_t * rw)522b47d54eSRich Felker static inline void arch_read_lock(arch_rwlock_t *rw)
532b47d54eSRich Felker {
542b47d54eSRich Felker 	unsigned old;
552b47d54eSRich Felker 	do old = rw->lock;
562b47d54eSRich Felker 	while (!old || __sl_cas(&rw->lock, old, old-1) != old);
572b47d54eSRich Felker }
582b47d54eSRich Felker 
arch_read_unlock(arch_rwlock_t * rw)592b47d54eSRich Felker static inline void arch_read_unlock(arch_rwlock_t *rw)
602b47d54eSRich Felker {
612b47d54eSRich Felker 	unsigned old;
622b47d54eSRich Felker 	do old = rw->lock;
632b47d54eSRich Felker 	while (__sl_cas(&rw->lock, old, old+1) != old);
642b47d54eSRich Felker }
652b47d54eSRich Felker 
arch_write_lock(arch_rwlock_t * rw)662b47d54eSRich Felker static inline void arch_write_lock(arch_rwlock_t *rw)
672b47d54eSRich Felker {
682b47d54eSRich Felker 	while (__sl_cas(&rw->lock, RW_LOCK_BIAS, 0) != RW_LOCK_BIAS);
692b47d54eSRich Felker }
702b47d54eSRich Felker 
arch_write_unlock(arch_rwlock_t * rw)712b47d54eSRich Felker static inline void arch_write_unlock(arch_rwlock_t *rw)
722b47d54eSRich Felker {
732b47d54eSRich Felker 	__sl_cas(&rw->lock, 0, RW_LOCK_BIAS);
742b47d54eSRich Felker }
752b47d54eSRich Felker 
arch_read_trylock(arch_rwlock_t * rw)762b47d54eSRich Felker static inline int arch_read_trylock(arch_rwlock_t *rw)
772b47d54eSRich Felker {
782b47d54eSRich Felker 	unsigned old;
792b47d54eSRich Felker 	do old = rw->lock;
802b47d54eSRich Felker 	while (old && __sl_cas(&rw->lock, old, old-1) != old);
812b47d54eSRich Felker 	return !!old;
822b47d54eSRich Felker }
832b47d54eSRich Felker 
arch_write_trylock(arch_rwlock_t * rw)842b47d54eSRich Felker static inline int arch_write_trylock(arch_rwlock_t *rw)
852b47d54eSRich Felker {
862b47d54eSRich Felker 	return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
872b47d54eSRich Felker }
882b47d54eSRich Felker 
892b47d54eSRich Felker #endif /* __ASM_SH_SPINLOCK_CAS_H */
90