xref: /linux/arch/parisc/include/asm/spinlock.h (revision 84b9b44b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
4 
5 #include <asm/barrier.h>
6 #include <asm/ldcw.h>
7 #include <asm/processor.h>
8 #include <asm/spinlock_types.h>
9 
10 #define SPINLOCK_BREAK_INSN	0x0000c006	/* break 6,6 */
11 
12 static inline void arch_spin_val_check(int lock_val)
13 {
14 	if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
15 		asm volatile(	"andcm,= %0,%1,%%r0\n"
16 				".word %2\n"
17 		: : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL),
18 			"i" (SPINLOCK_BREAK_INSN));
19 }
20 
21 static inline int arch_spin_is_locked(arch_spinlock_t *x)
22 {
23 	volatile unsigned int *a;
24 	int lock_val;
25 
26 	a = __ldcw_align(x);
27 	lock_val = READ_ONCE(*a);
28 	arch_spin_val_check(lock_val);
29 	return (lock_val == 0);
30 }
31 
32 static inline void arch_spin_lock(arch_spinlock_t *x)
33 {
34 	volatile unsigned int *a;
35 
36 	a = __ldcw_align(x);
37 	do {
38 		int lock_val_old;
39 
40 		lock_val_old = __ldcw(a);
41 		arch_spin_val_check(lock_val_old);
42 		if (lock_val_old)
43 			return;	/* got lock */
44 
45 		/* wait until we should try to get lock again */
46 		while (*a == 0)
47 			continue;
48 	} while (1);
49 }
50 
51 static inline void arch_spin_unlock(arch_spinlock_t *x)
52 {
53 	volatile unsigned int *a;
54 
55 	a = __ldcw_align(x);
56 	/* Release with ordered store. */
57 	__asm__ __volatile__("stw,ma %0,0(%1)"
58 		: : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory");
59 }
60 
61 static inline int arch_spin_trylock(arch_spinlock_t *x)
62 {
63 	volatile unsigned int *a;
64 	int lock_val;
65 
66 	a = __ldcw_align(x);
67 	lock_val = __ldcw(a);
68 	arch_spin_val_check(lock_val);
69 	return lock_val != 0;
70 }
71 
72 /*
73  * Read-write spinlocks, allowing multiple readers but only one writer.
74  * Unfair locking as Writers could be starved indefinitely by Reader(s)
75  *
76  * The spinlock itself is contained in @counter and access to it is
77  * serialized with @lock_mutex.
78  */
79 
80 /* 1 - lock taken successfully */
81 static inline int arch_read_trylock(arch_rwlock_t *rw)
82 {
83 	int ret = 0;
84 	unsigned long flags;
85 
86 	local_irq_save(flags);
87 	arch_spin_lock(&(rw->lock_mutex));
88 
89 	/*
90 	 * zero means writer holds the lock exclusively, deny Reader.
91 	 * Otherwise grant lock to first/subseq reader
92 	 */
93 	if (rw->counter > 0) {
94 		rw->counter--;
95 		ret = 1;
96 	}
97 
98 	arch_spin_unlock(&(rw->lock_mutex));
99 	local_irq_restore(flags);
100 
101 	return ret;
102 }
103 
104 /* 1 - lock taken successfully */
105 static inline int arch_write_trylock(arch_rwlock_t *rw)
106 {
107 	int ret = 0;
108 	unsigned long flags;
109 
110 	local_irq_save(flags);
111 	arch_spin_lock(&(rw->lock_mutex));
112 
113 	/*
114 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
115 	 * deny writer. Otherwise if unlocked grant to writer
116 	 * Hence the claim that Linux rwlocks are unfair to writers.
117 	 * (can be starved for an indefinite time by readers).
118 	 */
119 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
120 		rw->counter = 0;
121 		ret = 1;
122 	}
123 	arch_spin_unlock(&(rw->lock_mutex));
124 	local_irq_restore(flags);
125 
126 	return ret;
127 }
128 
129 static inline void arch_read_lock(arch_rwlock_t *rw)
130 {
131 	while (!arch_read_trylock(rw))
132 		cpu_relax();
133 }
134 
135 static inline void arch_write_lock(arch_rwlock_t *rw)
136 {
137 	while (!arch_write_trylock(rw))
138 		cpu_relax();
139 }
140 
141 static inline void arch_read_unlock(arch_rwlock_t *rw)
142 {
143 	unsigned long flags;
144 
145 	local_irq_save(flags);
146 	arch_spin_lock(&(rw->lock_mutex));
147 	rw->counter++;
148 	arch_spin_unlock(&(rw->lock_mutex));
149 	local_irq_restore(flags);
150 }
151 
152 static inline void arch_write_unlock(arch_rwlock_t *rw)
153 {
154 	unsigned long flags;
155 
156 	local_irq_save(flags);
157 	arch_spin_lock(&(rw->lock_mutex));
158 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
159 	arch_spin_unlock(&(rw->lock_mutex));
160 	local_irq_restore(flags);
161 }
162 
163 #endif /* __ASM_SPINLOCK_H */
164