xref: /openbsd/sys/kern/kern_lock.c (revision 73471bf0)
1 /*	$OpenBSD: kern_lock.c,v 1.71 2020/03/05 09:28:31 claudio Exp $	*/
2 
3 /*
4  * Copyright (c) 2017 Visa Hankala
5  * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
6  * Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/sched.h>
24 #include <sys/atomic.h>
25 #include <sys/witness.h>
26 #include <sys/mutex.h>
27 
28 #include <ddb/db_output.h>
29 
30 #ifdef MP_LOCKDEBUG
31 #ifndef DDB
32 #error "MP_LOCKDEBUG requires DDB"
33 #endif
34 
35 /* CPU-dependent timing, this needs to be settable from ddb. */
36 int __mp_lock_spinout = 200000000;
37 #endif /* MP_LOCKDEBUG */
38 
39 #ifdef MULTIPROCESSOR
40 
41 #include <sys/mplock.h>
42 struct __mp_lock kernel_lock;
43 
44 /*
45  * Functions for manipulating the kernel_lock.  We put them here
46  * so that they show up in profiles.
47  */
48 
49 void
50 _kernel_lock_init(void)
51 {
52 	__mp_lock_init(&kernel_lock);
53 }
54 
55 /*
56  * Acquire/release the kernel lock.  Intended for use in the scheduler
57  * and the lower half of the kernel.
58  */
59 
60 void
61 _kernel_lock(void)
62 {
63 	SCHED_ASSERT_UNLOCKED();
64 	__mp_lock(&kernel_lock);
65 }
66 
67 void
68 _kernel_unlock(void)
69 {
70 	__mp_unlock(&kernel_lock);
71 }
72 
73 int
74 _kernel_lock_held(void)
75 {
76 	if (panicstr || db_active)
77 		return 1;
78 	return (__mp_lock_held(&kernel_lock, curcpu()));
79 }
80 
81 #ifdef __USE_MI_MPLOCK
82 
83 /* Ticket lock implementation */
84 
85 #include <machine/cpu.h>
86 
87 void
88 ___mp_lock_init(struct __mp_lock *mpl, const struct lock_type *type)
89 {
90 	memset(mpl->mpl_cpus, 0, sizeof(mpl->mpl_cpus));
91 	mpl->mpl_users = 0;
92 	mpl->mpl_ticket = 1;
93 
94 #ifdef WITNESS
95 	mpl->mpl_lock_obj.lo_name = type->lt_name;
96 	mpl->mpl_lock_obj.lo_type = type;
97 	if (mpl == &kernel_lock)
98 		mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
99 		    LO_SLEEPABLE | (LO_CLASS_KERNEL_LOCK << LO_CLASSSHIFT);
100 	else if (mpl == &sched_lock)
101 		mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
102 		    LO_RECURSABLE | (LO_CLASS_SCHED_LOCK << LO_CLASSSHIFT);
103 	WITNESS_INIT(&mpl->mpl_lock_obj, type);
104 #endif
105 }
106 
107 static __inline void
108 __mp_lock_spin(struct __mp_lock *mpl, u_int me)
109 {
110 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
111 #ifdef MP_LOCKDEBUG
112 	int nticks = __mp_lock_spinout;
113 #endif
114 
115 	spc->spc_spinning++;
116 	while (mpl->mpl_ticket != me) {
117 		CPU_BUSY_CYCLE();
118 
119 #ifdef MP_LOCKDEBUG
120 		if (--nticks <= 0) {
121 			db_printf("%s: %p lock spun out\n", __func__, mpl);
122 			db_enter();
123 			nticks = __mp_lock_spinout;
124 		}
125 #endif
126 	}
127 	spc->spc_spinning--;
128 }
129 
130 void
131 __mp_lock(struct __mp_lock *mpl)
132 {
133 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
134 	unsigned long s;
135 
136 #ifdef WITNESS
137 	if (!__mp_lock_held(mpl, curcpu()))
138 		WITNESS_CHECKORDER(&mpl->mpl_lock_obj,
139 		    LOP_EXCLUSIVE | LOP_NEWORDER, NULL);
140 #endif
141 
142 	s = intr_disable();
143 	if (cpu->mplc_depth++ == 0)
144 		cpu->mplc_ticket = atomic_inc_int_nv(&mpl->mpl_users);
145 	intr_restore(s);
146 
147 	__mp_lock_spin(mpl, cpu->mplc_ticket);
148 	membar_enter_after_atomic();
149 
150 	WITNESS_LOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE);
151 }
152 
153 void
154 __mp_unlock(struct __mp_lock *mpl)
155 {
156 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
157 	unsigned long s;
158 
159 #ifdef MP_LOCKDEBUG
160 	if (!__mp_lock_held(mpl, curcpu())) {
161 		db_printf("__mp_unlock(%p): not held lock\n", mpl);
162 		db_enter();
163 	}
164 #endif
165 
166 	WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE);
167 
168 	s = intr_disable();
169 	if (--cpu->mplc_depth == 0) {
170 		membar_exit();
171 		mpl->mpl_ticket++;
172 	}
173 	intr_restore(s);
174 }
175 
176 int
177 __mp_release_all(struct __mp_lock *mpl)
178 {
179 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
180 	unsigned long s;
181 	int rv;
182 #ifdef WITNESS
183 	int i;
184 #endif
185 
186 	s = intr_disable();
187 	rv = cpu->mplc_depth;
188 #ifdef WITNESS
189 	for (i = 0; i < rv; i++)
190 		WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE);
191 #endif
192 	cpu->mplc_depth = 0;
193 	membar_exit();
194 	mpl->mpl_ticket++;
195 	intr_restore(s);
196 
197 	return (rv);
198 }
199 
200 int
201 __mp_release_all_but_one(struct __mp_lock *mpl)
202 {
203 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
204 	int rv = cpu->mplc_depth - 1;
205 #ifdef WITNESS
206 	int i;
207 
208 	for (i = 0; i < rv; i++)
209 		WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE);
210 #endif
211 
212 #ifdef MP_LOCKDEBUG
213 	if (!__mp_lock_held(mpl, curcpu())) {
214 		db_printf("__mp_release_all_but_one(%p): not held lock\n", mpl);
215 		db_enter();
216 	}
217 #endif
218 
219 	cpu->mplc_depth = 1;
220 
221 	return (rv);
222 }
223 
224 void
225 __mp_acquire_count(struct __mp_lock *mpl, int count)
226 {
227 	while (count--)
228 		__mp_lock(mpl);
229 }
230 
231 int
232 __mp_lock_held(struct __mp_lock *mpl, struct cpu_info *ci)
233 {
234 	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[CPU_INFO_UNIT(ci)];
235 
236 	return (cpu->mplc_ticket == mpl->mpl_ticket && cpu->mplc_depth > 0);
237 }
238 
239 #endif /* __USE_MI_MPLOCK */
240 
241 #endif /* MULTIPROCESSOR */
242 
243 
244 #ifdef __USE_MI_MUTEX
245 void
246 __mtx_init(struct mutex *mtx, int wantipl)
247 {
248 	mtx->mtx_owner = NULL;
249 	mtx->mtx_wantipl = wantipl;
250 	mtx->mtx_oldipl = IPL_NONE;
251 }
252 
253 #ifdef MULTIPROCESSOR
254 void
255 mtx_enter(struct mutex *mtx)
256 {
257 	struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
258 #ifdef MP_LOCKDEBUG
259 	int nticks = __mp_lock_spinout;
260 #endif
261 
262 	WITNESS_CHECKORDER(MUTEX_LOCK_OBJECT(mtx),
263 	    LOP_EXCLUSIVE | LOP_NEWORDER, NULL);
264 
265 	spc->spc_spinning++;
266 	while (mtx_enter_try(mtx) == 0) {
267 		CPU_BUSY_CYCLE();
268 
269 #ifdef MP_LOCKDEBUG
270 		if (--nticks == 0) {
271 			db_printf("%s: %p lock spun out\n", __func__, mtx);
272 			db_enter();
273 			nticks = __mp_lock_spinout;
274 		}
275 #endif
276 	}
277 	spc->spc_spinning--;
278 }
279 
280 int
281 mtx_enter_try(struct mutex *mtx)
282 {
283 	struct cpu_info *owner, *ci = curcpu();
284 	int s;
285 
286 	/* Avoid deadlocks after panic or in DDB */
287 	if (panicstr || db_active)
288 		return (1);
289 
290 	if (mtx->mtx_wantipl != IPL_NONE)
291 		s = splraise(mtx->mtx_wantipl);
292 
293 	owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci);
294 #ifdef DIAGNOSTIC
295 	if (__predict_false(owner == ci))
296 		panic("mtx %p: locking against myself", mtx);
297 #endif
298 	if (owner == NULL) {
299 		membar_enter_after_atomic();
300 		if (mtx->mtx_wantipl != IPL_NONE)
301 			mtx->mtx_oldipl = s;
302 #ifdef DIAGNOSTIC
303 		ci->ci_mutex_level++;
304 #endif
305 		WITNESS_LOCK(MUTEX_LOCK_OBJECT(mtx), LOP_EXCLUSIVE);
306 		return (1);
307 	}
308 
309 	if (mtx->mtx_wantipl != IPL_NONE)
310 		splx(s);
311 
312 	return (0);
313 }
314 #else
315 void
316 mtx_enter(struct mutex *mtx)
317 {
318 	struct cpu_info *ci = curcpu();
319 
320 	/* Avoid deadlocks after panic or in DDB */
321 	if (panicstr || db_active)
322 		return;
323 
324 	WITNESS_CHECKORDER(MUTEX_LOCK_OBJECT(mtx),
325 	    LOP_EXCLUSIVE | LOP_NEWORDER, NULL);
326 
327 #ifdef DIAGNOSTIC
328 	if (__predict_false(mtx->mtx_owner == ci))
329 		panic("mtx %p: locking against myself", mtx);
330 #endif
331 
332 	if (mtx->mtx_wantipl != IPL_NONE)
333 		mtx->mtx_oldipl = splraise(mtx->mtx_wantipl);
334 
335 	mtx->mtx_owner = ci;
336 
337 #ifdef DIAGNOSTIC
338 	ci->ci_mutex_level++;
339 #endif
340 	WITNESS_LOCK(MUTEX_LOCK_OBJECT(mtx), LOP_EXCLUSIVE);
341 }
342 
343 int
344 mtx_enter_try(struct mutex *mtx)
345 {
346 	mtx_enter(mtx);
347 	return (1);
348 }
349 #endif
350 
351 void
352 mtx_leave(struct mutex *mtx)
353 {
354 	int s;
355 
356 	/* Avoid deadlocks after panic or in DDB */
357 	if (panicstr || db_active)
358 		return;
359 
360 	MUTEX_ASSERT_LOCKED(mtx);
361 	WITNESS_UNLOCK(MUTEX_LOCK_OBJECT(mtx), LOP_EXCLUSIVE);
362 
363 #ifdef DIAGNOSTIC
364 	curcpu()->ci_mutex_level--;
365 #endif
366 
367 	s = mtx->mtx_oldipl;
368 #ifdef MULTIPROCESSOR
369 	membar_exit();
370 #endif
371 	mtx->mtx_owner = NULL;
372 	if (mtx->mtx_wantipl != IPL_NONE)
373 		splx(s);
374 }
375 
376 #ifdef DDB
377 void
378 db_mtx_enter(struct db_mutex *mtx)
379 {
380 	struct cpu_info *ci = curcpu(), *owner;
381 	unsigned long s;
382 
383 #ifdef DIAGNOSTIC
384 	if (__predict_false(mtx->mtx_owner == ci))
385 		panic("%s: mtx %p: locking against myself", __func__, mtx);
386 #endif
387 
388 	s = intr_disable();
389 
390 	for (;;) {
391 		owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci);
392 		if (owner == NULL)
393 			break;
394 		CPU_BUSY_CYCLE();
395 	}
396 	membar_enter_after_atomic();
397 
398 	mtx->mtx_intr_state = s;
399 
400 #ifdef DIAGNOSTIC
401 	ci->ci_mutex_level++;
402 #endif
403 }
404 
405 void
406 db_mtx_leave(struct db_mutex *mtx)
407 {
408 #ifdef DIAGNOSTIC
409 	struct cpu_info *ci = curcpu();
410 #endif
411 	unsigned long s;
412 
413 #ifdef DIAGNOSTIC
414 	if (__predict_false(mtx->mtx_owner != ci))
415 		panic("%s: mtx %p: not owned by this CPU", __func__, mtx);
416 	ci->ci_mutex_level--;
417 #endif
418 
419 	s = mtx->mtx_intr_state;
420 #ifdef MULTIPROCESSOR
421 	membar_exit();
422 #endif
423 	mtx->mtx_owner = NULL;
424 	intr_restore(s);
425 }
426 #endif /* DDB */
427 #endif /* __USE_MI_MUTEX */
428 
429 #ifdef WITNESS
430 void
431 _mtx_init_flags(struct mutex *m, int ipl, const char *name, int flags,
432     const struct lock_type *type)
433 {
434 	struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
435 
436 	lo->lo_flags = MTX_LO_FLAGS(flags);
437 	if (name != NULL)
438 		lo->lo_name = name;
439 	else
440 		lo->lo_name = type->lt_name;
441 	WITNESS_INIT(lo, type);
442 
443 	_mtx_init(m, ipl);
444 }
445 #endif /* WITNESS */
446