xref: /openbsd/sys/arch/hppa/hppa/lock_machdep.c (revision 73471bf0)
1 /*	$OpenBSD: lock_machdep.c,v 1.14 2019/04/23 13:35:12 visa Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Artur Grabowski <art@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/witness.h>
22 #include <sys/_lock.h>
23 
24 #include <machine/atomic.h>
25 #include <machine/intr.h>
26 #include <machine/psl.h>
27 #include <machine/cpu.h>
28 
29 #include <ddb/db_output.h>
30 
31 static __inline int
32 __cpu_cas(struct __mp_lock *mpl, volatile unsigned long *addr,
33     unsigned long old, unsigned long new)
34 {
35 	volatile int *lock = (int *)(((vaddr_t)mpl->mpl_lock + 0xf) & ~0xf);
36 	volatile register_t old_lock = 0;
37 	int ret = 1;
38 
39 	/* Note: lock must be 16-byte aligned. */
40 	asm volatile (
41 		"ldcws      0(%2), %0"
42 		: "=&r" (old_lock), "+m" (lock)
43 		: "r" (lock)
44 	);
45 
46 	if (old_lock == MPL_UNLOCKED) {
47 		if (*addr == old) {
48 			*addr = new;
49 			asm("sync" ::: "memory");
50 			ret = 0;
51 		}
52 		*lock = MPL_UNLOCKED;
53 	}
54 
55 	return ret;
56 }
57 
58 void
59 ___mp_lock_init(struct __mp_lock *lock)
60 {
61 	lock->mpl_lock[0] = MPL_UNLOCKED;
62 	lock->mpl_lock[1] = MPL_UNLOCKED;
63 	lock->mpl_lock[2] = MPL_UNLOCKED;
64 	lock->mpl_lock[3] = MPL_UNLOCKED;
65 	lock->mpl_cpu = NULL;
66 	lock->mpl_count = 0;
67 }
68 
69 #if defined(MP_LOCKDEBUG)
70 #ifndef DDB
71 #error "MP_LOCKDEBUG requires DDB"
72 #endif
73 
74 /* CPU-dependent timing, this needs to be settable from ddb. */
75 extern int __mp_lock_spinout;
76 #endif
77 
78 static __inline void
79 __mp_lock_spin(struct __mp_lock *mpl)
80 {
81 #ifndef MP_LOCKDEBUG
82 	while (mpl->mpl_count != 0)
83 		CPU_BUSY_CYCLE();
84 #else
85 	int nticks = __mp_lock_spinout;
86 
87 	while (mpl->mpl_count != 0 && --nticks > 0)
88 		CPU_BUSY_CYCLE();
89 
90 	if (nticks == 0) {
91 		db_printf("__mp_lock(%p): lock spun out", mpl);
92 		db_enter();
93 	}
94 #endif
95 }
96 
97 void
98 __mp_lock(struct __mp_lock *mpl)
99 {
100 	int s;
101 
102 #ifdef WITNESS
103 	if (!__mp_lock_held(mpl, curcpu()))
104 		WITNESS_CHECKORDER(&mpl->mpl_lock_obj,
105 		    LOP_EXCLUSIVE | LOP_NEWORDER, NULL);
106 #endif
107 
108 	/*
109 	 * Please notice that mpl_count gets incremented twice for the
110 	 * first lock. This is on purpose. The way we release the lock
111 	 * in mp_unlock is to decrement the mpl_count and then check if
112 	 * the lock should be released. Since mpl_count is what we're
113 	 * spinning on, decrementing it in mpl_unlock to 0 means that
114 	 * we can't clear mpl_cpu, because we're no longer holding the
115 	 * lock. In theory mpl_cpu doesn't need to be cleared, but it's
116 	 * safer to clear it and besides, setting mpl_count to 2 on the
117 	 * first lock makes most of this code much simpler.
118 	 */
119 
120 	while (1) {
121 		s = hppa_intr_disable();
122 		if (__cpu_cas(mpl, &mpl->mpl_count, 0, 1) == 0) {
123 			__asm volatile("sync" ::: "memory");
124 			mpl->mpl_cpu = curcpu();
125 		}
126 		if (mpl->mpl_cpu == curcpu()) {
127 			mpl->mpl_count++;
128 			hppa_intr_enable(s);
129 			break;
130 		}
131 		hppa_intr_enable(s);
132 
133 		__mp_lock_spin(mpl);
134 	}
135 
136 	WITNESS_LOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE);
137 }
138 
139 void
140 __mp_unlock(struct __mp_lock *mpl)
141 {
142 	int s;
143 
144 #ifdef MP_LOCKDEBUG
145 	if (mpl->mpl_cpu != curcpu()) {
146 		db_printf("__mp_unlock(%p): lock not held - %p != %p\n",
147 		    mpl, mpl->mpl_cpu, curcpu());
148 		db_enter();
149 	}
150 #endif
151 
152 	WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE);
153 
154 	s = hppa_intr_disable();
155 	if (--mpl->mpl_count == 1) {
156 		mpl->mpl_cpu = NULL;
157 		__asm volatile("sync" ::: "memory");
158 		mpl->mpl_count = 0;
159 	}
160 	hppa_intr_enable(s);
161 }
162 
163 int
164 __mp_release_all(struct __mp_lock *mpl)
165 {
166 	int rv = mpl->mpl_count - 1;
167 	int s;
168 #ifdef WITNESS
169 	int i;
170 #endif
171 
172 #ifdef MP_LOCKDEBUG
173 	if (mpl->mpl_cpu != curcpu()) {
174 		db_printf("__mp_release_all(%p): lock not held - %p != %p\n",
175 		    mpl, mpl->mpl_cpu, curcpu());
176 		db_enter();
177 	}
178 #endif
179 
180 #ifdef WITNESS
181 	for (i = 0; i < rv; i++)
182 		WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE);
183 #endif
184 
185 	s = hppa_intr_disable();
186 	mpl->mpl_cpu = NULL;
187 	__asm volatile("sync" ::: "memory");
188 	mpl->mpl_count = 0;
189 	hppa_intr_enable(s);
190 
191 	return (rv);
192 }
193 
194 int
195 __mp_release_all_but_one(struct __mp_lock *mpl)
196 {
197 	int rv = mpl->mpl_count - 2;
198 #ifdef WITNESS
199 	int i;
200 #endif
201 
202 #ifdef MP_LOCKDEBUG
203 	if (mpl->mpl_cpu != curcpu()) {
204 		db_printf("__mp_release_all_but_one(%p): lock not held - "
205 		    "%p != %p\n", mpl, mpl->mpl_cpu, curcpu());
206 		db_enter();
207 	}
208 #endif
209 
210 #ifdef WITNESS
211 	for (i = 0; i < rv; i++)
212 		WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE);
213 #endif
214 
215 	mpl->mpl_count = 2;
216 
217 	return (rv);
218 }
219 
220 void
221 __mp_acquire_count(struct __mp_lock *mpl, int count)
222 {
223 	while (count--)
224 		__mp_lock(mpl);
225 }
226 
227 int
228 __mp_lock_held(struct __mp_lock *mpl, struct cpu_info *ci)
229 {
230 	return mpl->mpl_cpu == ci;
231 }
232