1 /* $OpenBSD: kern_lock.c,v 1.75 2024/07/03 01:36:50 jsg Exp $ */
2
3 /*
4 * Copyright (c) 2017 Visa Hankala
5 * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
6 * Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/sched.h>
24 #include <sys/atomic.h>
25 #include <sys/witness.h>
26 #include <sys/mutex.h>
27
28 #include <ddb/db_output.h>
29
30 #ifdef MP_LOCKDEBUG
31 #ifndef DDB
32 #error "MP_LOCKDEBUG requires DDB"
33 #endif
34
35 /* CPU-dependent timing, this needs to be settable from ddb. */
36 int __mp_lock_spinout = INT_MAX;
37 #endif /* MP_LOCKDEBUG */
38
39 #ifdef MULTIPROCESSOR
40
41 #include <sys/mplock.h>
42 struct __mp_lock kernel_lock;
43
44 /*
45 * Functions for manipulating the kernel_lock. We put them here
46 * so that they show up in profiles.
47 */
48
49 void
_kernel_lock_init(void)50 _kernel_lock_init(void)
51 {
52 __mp_lock_init(&kernel_lock);
53 }
54
55 /*
56 * Acquire/release the kernel lock. Intended for use in the scheduler
57 * and the lower half of the kernel.
58 */
59
60 void
_kernel_lock(void)61 _kernel_lock(void)
62 {
63 SCHED_ASSERT_UNLOCKED();
64 __mp_lock(&kernel_lock);
65 }
66
67 void
_kernel_unlock(void)68 _kernel_unlock(void)
69 {
70 __mp_unlock(&kernel_lock);
71 }
72
73 int
_kernel_lock_held(void)74 _kernel_lock_held(void)
75 {
76 if (panicstr || db_active)
77 return 1;
78 return (__mp_lock_held(&kernel_lock, curcpu()));
79 }
80
81 #ifdef __USE_MI_MPLOCK
82
83 /* Ticket lock implementation */
84
85 #include <machine/cpu.h>
86
87 void
___mp_lock_init(struct __mp_lock * mpl,const struct lock_type * type)88 ___mp_lock_init(struct __mp_lock *mpl, const struct lock_type *type)
89 {
90 memset(mpl->mpl_cpus, 0, sizeof(mpl->mpl_cpus));
91 mpl->mpl_users = 0;
92 mpl->mpl_ticket = 1;
93
94 #ifdef WITNESS
95 mpl->mpl_lock_obj.lo_name = type->lt_name;
96 mpl->mpl_lock_obj.lo_type = type;
97 if (mpl == &kernel_lock)
98 mpl->mpl_lock_obj.lo_flags = LO_WITNESS | LO_INITIALIZED |
99 LO_SLEEPABLE | (LO_CLASS_KERNEL_LOCK << LO_CLASSSHIFT);
100 WITNESS_INIT(&mpl->mpl_lock_obj, type);
101 #endif
102 }
103
104 static __inline void
__mp_lock_spin(struct __mp_lock * mpl,u_int me)105 __mp_lock_spin(struct __mp_lock *mpl, u_int me)
106 {
107 struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
108 #ifdef MP_LOCKDEBUG
109 int nticks = __mp_lock_spinout;
110 #endif
111
112 spc->spc_spinning++;
113 while (mpl->mpl_ticket != me) {
114 CPU_BUSY_CYCLE();
115
116 #ifdef MP_LOCKDEBUG
117 if (--nticks <= 0) {
118 db_printf("%s: %p lock spun out\n", __func__, mpl);
119 db_enter();
120 nticks = __mp_lock_spinout;
121 }
122 #endif
123 }
124 spc->spc_spinning--;
125 }
126
127 void
__mp_lock(struct __mp_lock * mpl)128 __mp_lock(struct __mp_lock *mpl)
129 {
130 struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
131 unsigned long s;
132
133 #ifdef WITNESS
134 if (!__mp_lock_held(mpl, curcpu()))
135 WITNESS_CHECKORDER(&mpl->mpl_lock_obj,
136 LOP_EXCLUSIVE | LOP_NEWORDER, NULL);
137 #endif
138
139 s = intr_disable();
140 if (cpu->mplc_depth++ == 0)
141 cpu->mplc_ticket = atomic_inc_int_nv(&mpl->mpl_users);
142 intr_restore(s);
143
144 __mp_lock_spin(mpl, cpu->mplc_ticket);
145 membar_enter_after_atomic();
146
147 WITNESS_LOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE);
148 }
149
150 void
__mp_unlock(struct __mp_lock * mpl)151 __mp_unlock(struct __mp_lock *mpl)
152 {
153 struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
154 unsigned long s;
155
156 #ifdef MP_LOCKDEBUG
157 if (!__mp_lock_held(mpl, curcpu())) {
158 db_printf("__mp_unlock(%p): not held lock\n", mpl);
159 db_enter();
160 }
161 #endif
162
163 WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE);
164
165 s = intr_disable();
166 if (--cpu->mplc_depth == 0) {
167 membar_exit();
168 mpl->mpl_ticket++;
169 }
170 intr_restore(s);
171 }
172
173 int
__mp_release_all(struct __mp_lock * mpl)174 __mp_release_all(struct __mp_lock *mpl)
175 {
176 struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
177 unsigned long s;
178 int rv;
179 #ifdef WITNESS
180 int i;
181 #endif
182
183 s = intr_disable();
184 rv = cpu->mplc_depth;
185 #ifdef WITNESS
186 for (i = 0; i < rv; i++)
187 WITNESS_UNLOCK(&mpl->mpl_lock_obj, LOP_EXCLUSIVE);
188 #endif
189 cpu->mplc_depth = 0;
190 membar_exit();
191 mpl->mpl_ticket++;
192 intr_restore(s);
193
194 return (rv);
195 }
196
197 void
__mp_acquire_count(struct __mp_lock * mpl,int count)198 __mp_acquire_count(struct __mp_lock *mpl, int count)
199 {
200 while (count--)
201 __mp_lock(mpl);
202 }
203
204 int
__mp_lock_held(struct __mp_lock * mpl,struct cpu_info * ci)205 __mp_lock_held(struct __mp_lock *mpl, struct cpu_info *ci)
206 {
207 struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[CPU_INFO_UNIT(ci)];
208
209 return (cpu->mplc_ticket == mpl->mpl_ticket && cpu->mplc_depth > 0);
210 }
211
212 #endif /* __USE_MI_MPLOCK */
213
214 #endif /* MULTIPROCESSOR */
215
216
217 #ifdef __USE_MI_MUTEX
218 void
__mtx_init(struct mutex * mtx,int wantipl)219 __mtx_init(struct mutex *mtx, int wantipl)
220 {
221 mtx->mtx_owner = NULL;
222 mtx->mtx_wantipl = wantipl;
223 mtx->mtx_oldipl = IPL_NONE;
224 }
225
226 #ifdef MULTIPROCESSOR
227 void
mtx_enter(struct mutex * mtx)228 mtx_enter(struct mutex *mtx)
229 {
230 struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
231 #ifdef MP_LOCKDEBUG
232 int nticks = __mp_lock_spinout;
233 #endif
234
235 WITNESS_CHECKORDER(MUTEX_LOCK_OBJECT(mtx),
236 LOP_EXCLUSIVE | LOP_NEWORDER, NULL);
237
238 spc->spc_spinning++;
239 while (mtx_enter_try(mtx) == 0) {
240 do {
241 CPU_BUSY_CYCLE();
242 #ifdef MP_LOCKDEBUG
243 if (--nticks == 0) {
244 db_printf("%s: %p lock spun out\n",
245 __func__, mtx);
246 db_enter();
247 nticks = __mp_lock_spinout;
248 }
249 #endif
250 } while (mtx->mtx_owner != NULL);
251 }
252 spc->spc_spinning--;
253 }
254
255 int
mtx_enter_try(struct mutex * mtx)256 mtx_enter_try(struct mutex *mtx)
257 {
258 struct cpu_info *owner, *ci = curcpu();
259 int s;
260
261 /* Avoid deadlocks after panic or in DDB */
262 if (panicstr || db_active)
263 return (1);
264
265 if (mtx->mtx_wantipl != IPL_NONE)
266 s = splraise(mtx->mtx_wantipl);
267
268 owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci);
269 #ifdef DIAGNOSTIC
270 if (__predict_false(owner == ci))
271 panic("mtx %p: locking against myself", mtx);
272 #endif
273 if (owner == NULL) {
274 membar_enter_after_atomic();
275 if (mtx->mtx_wantipl != IPL_NONE)
276 mtx->mtx_oldipl = s;
277 #ifdef DIAGNOSTIC
278 ci->ci_mutex_level++;
279 #endif
280 WITNESS_LOCK(MUTEX_LOCK_OBJECT(mtx), LOP_EXCLUSIVE);
281 return (1);
282 }
283
284 if (mtx->mtx_wantipl != IPL_NONE)
285 splx(s);
286
287 return (0);
288 }
289 #else
290 void
mtx_enter(struct mutex * mtx)291 mtx_enter(struct mutex *mtx)
292 {
293 struct cpu_info *ci = curcpu();
294
295 /* Avoid deadlocks after panic or in DDB */
296 if (panicstr || db_active)
297 return;
298
299 WITNESS_CHECKORDER(MUTEX_LOCK_OBJECT(mtx),
300 LOP_EXCLUSIVE | LOP_NEWORDER, NULL);
301
302 #ifdef DIAGNOSTIC
303 if (__predict_false(mtx->mtx_owner == ci))
304 panic("mtx %p: locking against myself", mtx);
305 #endif
306
307 if (mtx->mtx_wantipl != IPL_NONE)
308 mtx->mtx_oldipl = splraise(mtx->mtx_wantipl);
309
310 mtx->mtx_owner = ci;
311
312 #ifdef DIAGNOSTIC
313 ci->ci_mutex_level++;
314 #endif
315 WITNESS_LOCK(MUTEX_LOCK_OBJECT(mtx), LOP_EXCLUSIVE);
316 }
317
318 int
mtx_enter_try(struct mutex * mtx)319 mtx_enter_try(struct mutex *mtx)
320 {
321 mtx_enter(mtx);
322 return (1);
323 }
324 #endif
325
326 void
mtx_leave(struct mutex * mtx)327 mtx_leave(struct mutex *mtx)
328 {
329 int s;
330
331 /* Avoid deadlocks after panic or in DDB */
332 if (panicstr || db_active)
333 return;
334
335 MUTEX_ASSERT_LOCKED(mtx);
336 WITNESS_UNLOCK(MUTEX_LOCK_OBJECT(mtx), LOP_EXCLUSIVE);
337
338 #ifdef DIAGNOSTIC
339 curcpu()->ci_mutex_level--;
340 #endif
341
342 s = mtx->mtx_oldipl;
343 #ifdef MULTIPROCESSOR
344 membar_exit();
345 #endif
346 mtx->mtx_owner = NULL;
347 if (mtx->mtx_wantipl != IPL_NONE)
348 splx(s);
349 }
350
351 #ifdef DDB
352 void
db_mtx_enter(struct db_mutex * mtx)353 db_mtx_enter(struct db_mutex *mtx)
354 {
355 struct cpu_info *ci = curcpu(), *owner;
356 unsigned long s;
357
358 #ifdef DIAGNOSTIC
359 if (__predict_false(mtx->mtx_owner == ci))
360 panic("%s: mtx %p: locking against myself", __func__, mtx);
361 #endif
362
363 s = intr_disable();
364
365 for (;;) {
366 owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci);
367 if (owner == NULL)
368 break;
369 CPU_BUSY_CYCLE();
370 }
371 membar_enter_after_atomic();
372
373 mtx->mtx_intr_state = s;
374
375 #ifdef DIAGNOSTIC
376 ci->ci_mutex_level++;
377 #endif
378 }
379
380 void
db_mtx_leave(struct db_mutex * mtx)381 db_mtx_leave(struct db_mutex *mtx)
382 {
383 #ifdef DIAGNOSTIC
384 struct cpu_info *ci = curcpu();
385 #endif
386 unsigned long s;
387
388 #ifdef DIAGNOSTIC
389 if (__predict_false(mtx->mtx_owner != ci))
390 panic("%s: mtx %p: not owned by this CPU", __func__, mtx);
391 ci->ci_mutex_level--;
392 #endif
393
394 s = mtx->mtx_intr_state;
395 #ifdef MULTIPROCESSOR
396 membar_exit();
397 #endif
398 mtx->mtx_owner = NULL;
399 intr_restore(s);
400 }
401 #endif /* DDB */
402 #endif /* __USE_MI_MUTEX */
403
404 #ifdef WITNESS
405 void
_mtx_init_flags(struct mutex * m,int ipl,const char * name,int flags,const struct lock_type * type)406 _mtx_init_flags(struct mutex *m, int ipl, const char *name, int flags,
407 const struct lock_type *type)
408 {
409 struct lock_object *lo = MUTEX_LOCK_OBJECT(m);
410
411 lo->lo_flags = MTX_LO_FLAGS(flags);
412 if (name != NULL)
413 lo->lo_name = name;
414 else
415 lo->lo_name = type->lt_name;
416 WITNESS_INIT(lo, type);
417
418 _mtx_init(m, ipl);
419 }
420 #endif /* WITNESS */
421