1 /* $NetBSD: linux_ww_mutex.c,v 1.2 2015/05/21 21:55:55 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.2 2015/05/21 21:55:55 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/condvar.h>
38 #include <sys/lockdebug.h>
39 #include <sys/lwp.h>
40 #include <sys/mutex.h>
41 #include <sys/rbtree.h>
42
43 #include <linux/ww_mutex.h>
44
45 #define WW_WANTLOCK(WW) \
46 LOCKDEBUG_WANTLOCK((WW)->wwm_debug, (WW), \
47 (uintptr_t)__builtin_return_address(0), 0)
48 #define WW_LOCKED(WW) \
49 LOCKDEBUG_LOCKED((WW)->wwm_debug, (WW), NULL, \
50 (uintptr_t)__builtin_return_address(0), 0)
51 #define WW_UNLOCKED(WW) \
52 LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW), \
53 (uintptr_t)__builtin_return_address(0), 0)
54
55 static int
ww_acquire_ctx_compare(void * cookie __unused,const void * va,const void * vb)56 ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
57 {
58 const struct ww_acquire_ctx *const ctx_a = va;
59 const struct ww_acquire_ctx *const ctx_b = vb;
60
61 if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
62 return -1;
63 if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
64 return -1;
65 return 0;
66 }
67
68 static int
ww_acquire_ctx_compare_key(void * cookie __unused,const void * vn,const void * vk)69 ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
70 const void *vk)
71 {
72 const struct ww_acquire_ctx *const ctx = vn;
73 const uint64_t *const ticketp = vk, ticket = *ticketp;
74
75 if (ctx->wwx_ticket < ticket)
76 return -1;
77 if (ctx->wwx_ticket > ticket)
78 return -1;
79 return 0;
80 }
81
82 static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
83 .rbto_compare_nodes = &ww_acquire_ctx_compare,
84 .rbto_compare_key = &ww_acquire_ctx_compare_key,
85 .rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
86 .rbto_context = NULL,
87 };
88
89 void
ww_acquire_init(struct ww_acquire_ctx * ctx,struct ww_class * class)90 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
91 {
92
93 ctx->wwx_class = class;
94 ctx->wwx_owner = curlwp;
95 ctx->wwx_ticket = atomic_inc_64_nv(&class->wwc_ticket);
96 ctx->wwx_acquired = 0;
97 ctx->wwx_acquire_done = false;
98 }
99
100 void
ww_acquire_done(struct ww_acquire_ctx * ctx)101 ww_acquire_done(struct ww_acquire_ctx *ctx)
102 {
103
104 KASSERTMSG((ctx->wwx_owner == curlwp),
105 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
106
107 ctx->wwx_acquire_done = true;
108 }
109
110 void
ww_acquire_fini(struct ww_acquire_ctx * ctx)111 ww_acquire_fini(struct ww_acquire_ctx *ctx)
112 {
113
114 KASSERTMSG((ctx->wwx_owner == curlwp),
115 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
116 KASSERTMSG((ctx->wwx_acquired == 0), "ctx %p still holds %u locks",
117 ctx, ctx->wwx_acquired);
118
119 ctx->wwx_acquired = ~0U; /* Fail if called again. */
120 ctx->wwx_owner = NULL;
121 }
122
123 #ifdef LOCKDEBUG
124 static void
ww_dump(volatile void * cookie)125 ww_dump(volatile void *cookie)
126 {
127 volatile struct ww_mutex *mutex = cookie;
128
129 printf_nolog("%-13s: ", "state");
130 switch (mutex->wwm_state) {
131 case WW_UNLOCKED:
132 printf_nolog("unlocked\n");
133 break;
134 case WW_OWNED:
135 printf_nolog("owned by lwp\n");
136 printf_nolog("%-13s: %p\n", "owner", mutex->wwm_u.owner);
137 printf_nolog("%-13s: %s\n", "waiters",
138 cv_has_waiters(__UNVOLATILE(&mutex->wwm_cv))
139 ? "yes" : "no");
140 break;
141 case WW_CTX:
142 printf_nolog("owned via ctx\n");
143 printf_nolog("%-13s: %p\n", "context", mutex->wwm_u.ctx);
144 printf_nolog("%-13s: %p\n", "lwp",
145 mutex->wwm_u.ctx->wwx_owner);
146 printf_nolog("%-13s: %s\n", "waiters",
147 cv_has_waiters(__UNVOLATILE(&mutex->wwm_cv))
148 ? "yes" : "no");
149 break;
150 case WW_WANTOWN:
151 printf_nolog("owned via ctx\n");
152 printf_nolog("%-13s: %p\n", "context", mutex->wwm_u.ctx);
153 printf_nolog("%-13s: %p\n", "lwp",
154 mutex->wwm_u.ctx->wwx_owner);
155 printf_nolog("%-13s: %s\n", "waiters", "yes (noctx)");
156 break;
157 default:
158 printf_nolog("unknown\n");
159 break;
160 }
161 }
162
163 static lockops_t ww_lockops = {
164 .lo_name = "Wait/wound mutex",
165 .lo_type = LOCKOPS_SLEEP,
166 .lo_dump = ww_dump,
167 };
168 #endif
169
170 void
ww_mutex_init(struct ww_mutex * mutex,struct ww_class * class)171 ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
172 {
173
174 /*
175 * XXX Apparently Linux takes these with spin locks held. That
176 * strikes me as a bad idea, but so it is...
177 */
178 mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
179 mutex->wwm_state = WW_UNLOCKED;
180 mutex->wwm_class = class;
181 rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
182 cv_init(&mutex->wwm_cv, "linuxwwm");
183 #ifdef LOCKDEBUG
184 mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops,
185 (uintptr_t)__builtin_return_address(0));
186 #endif
187 }
188
189 void
ww_mutex_destroy(struct ww_mutex * mutex)190 ww_mutex_destroy(struct ww_mutex *mutex)
191 {
192
193 KASSERT(mutex->wwm_state == WW_UNLOCKED);
194
195 #ifdef LOCKDEBUG
196 LOCKDEBUG_FREE(mutex->wwm_debug, mutex);
197 #endif
198 cv_destroy(&mutex->wwm_cv);
199 #if 0
200 rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
201 #endif
202 KASSERT(mutex->wwm_state == WW_UNLOCKED);
203 mutex_destroy(&mutex->wwm_lock);
204 }
205
206 /*
207 * XXX WARNING: This returns true if it is locked by ANYONE. Does not
208 * mean `Do I hold this lock?' (answering which really requires an
209 * acquire context).
210 */
211 bool
ww_mutex_is_locked(struct ww_mutex * mutex)212 ww_mutex_is_locked(struct ww_mutex *mutex)
213 {
214 int locked;
215
216 mutex_enter(&mutex->wwm_lock);
217 switch (mutex->wwm_state) {
218 case WW_UNLOCKED:
219 locked = false;
220 break;
221 case WW_OWNED:
222 case WW_CTX:
223 case WW_WANTOWN:
224 locked = true;
225 break;
226 default:
227 panic("wait/wound mutex %p in bad state: %d", mutex,
228 (int)mutex->wwm_state);
229 }
230 mutex_exit(&mutex->wwm_lock);
231
232 return locked;
233 }
234
235 static void
ww_mutex_state_wait(struct ww_mutex * mutex,enum ww_mutex_state state)236 ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
237 {
238
239 KASSERT(mutex->wwm_state == state);
240 do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
241 while (mutex->wwm_state == state);
242 }
243
244 static int
ww_mutex_state_wait_sig(struct ww_mutex * mutex,enum ww_mutex_state state)245 ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
246 {
247 int ret;
248
249 KASSERT(mutex->wwm_state == state);
250 do {
251 /* XXX errno NetBSD->Linux */
252 ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
253 if (ret)
254 break;
255 } while (mutex->wwm_state == state);
256
257 return ret;
258 }
259
260 static void
ww_mutex_lock_wait(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)261 ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
262 {
263 struct ww_acquire_ctx *collision __diagused;
264
265 KASSERT(mutex_owned(&mutex->wwm_lock));
266
267 KASSERT((mutex->wwm_state == WW_CTX) ||
268 (mutex->wwm_state == WW_WANTOWN));
269 KASSERT(mutex->wwm_u.ctx != ctx);
270 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
271 "ww mutex class mismatch: %p != %p",
272 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
273 KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
274 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
275 ctx->wwx_ticket, ctx,
276 mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
277
278 collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
279 KASSERTMSG((collision == ctx),
280 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
281 ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
282
283 do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
284 while (!(((mutex->wwm_state == WW_CTX) ||
285 (mutex->wwm_state == WW_WANTOWN)) &&
286 (mutex->wwm_u.ctx == ctx)));
287
288 rb_tree_remove_node(&mutex->wwm_waiters, ctx);
289 }
290
291 static int
ww_mutex_lock_wait_sig(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)292 ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
293 {
294 struct ww_acquire_ctx *collision __diagused;
295 int ret;
296
297 KASSERT(mutex_owned(&mutex->wwm_lock));
298
299 KASSERT((mutex->wwm_state == WW_CTX) ||
300 (mutex->wwm_state == WW_WANTOWN));
301 KASSERT(mutex->wwm_u.ctx != ctx);
302 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
303 "ww mutex class mismatch: %p != %p",
304 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
305 KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
306 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
307 ctx->wwx_ticket, ctx,
308 mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
309
310 collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
311 KASSERTMSG((collision == ctx),
312 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
313 ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
314
315 do {
316 /* XXX errno NetBSD->Linux */
317 ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
318 if (ret)
319 goto out;
320 } while (!(((mutex->wwm_state == WW_CTX) ||
321 (mutex->wwm_state == WW_WANTOWN)) &&
322 (mutex->wwm_u.ctx == ctx)));
323
324 out: rb_tree_remove_node(&mutex->wwm_waiters, ctx);
325 return ret;
326 }
327
328 static void
ww_mutex_lock_noctx(struct ww_mutex * mutex)329 ww_mutex_lock_noctx(struct ww_mutex *mutex)
330 {
331
332 mutex_enter(&mutex->wwm_lock);
333 retry: switch (mutex->wwm_state) {
334 case WW_UNLOCKED:
335 mutex->wwm_state = WW_OWNED;
336 mutex->wwm_u.owner = curlwp;
337 WW_LOCKED(mutex);
338 break;
339 case WW_OWNED:
340 KASSERTMSG((mutex->wwm_u.owner != curlwp),
341 "locking %p against myself: %p", mutex, curlwp);
342 ww_mutex_state_wait(mutex, WW_OWNED);
343 goto retry;
344 case WW_CTX:
345 KASSERT(mutex->wwm_u.ctx != NULL);
346 mutex->wwm_state = WW_WANTOWN;
347 /* FALLTHROUGH */
348 case WW_WANTOWN:
349 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
350 "locking %p against myself: %p", mutex, curlwp);
351 ww_mutex_state_wait(mutex, WW_WANTOWN);
352 goto retry;
353 default:
354 panic("wait/wound mutex %p in bad state: %d",
355 mutex, (int)mutex->wwm_state);
356 }
357 KASSERT(mutex->wwm_state == WW_OWNED);
358 KASSERT(mutex->wwm_u.owner == curlwp);
359 mutex_exit(&mutex->wwm_lock);
360 }
361
362 static int
ww_mutex_lock_noctx_sig(struct ww_mutex * mutex)363 ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
364 {
365 int ret;
366
367 mutex_enter(&mutex->wwm_lock);
368 retry: switch (mutex->wwm_state) {
369 case WW_UNLOCKED:
370 mutex->wwm_state = WW_OWNED;
371 mutex->wwm_u.owner = curlwp;
372 WW_LOCKED(mutex);
373 break;
374 case WW_OWNED:
375 KASSERTMSG((mutex->wwm_u.owner != curlwp),
376 "locking %p against myself: %p", mutex, curlwp);
377 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
378 if (ret)
379 goto out;
380 goto retry;
381 case WW_CTX:
382 KASSERT(mutex->wwm_u.ctx != NULL);
383 mutex->wwm_state = WW_WANTOWN;
384 /* FALLTHROUGH */
385 case WW_WANTOWN:
386 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
387 "locking %p against myself: %p", mutex, curlwp);
388 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
389 if (ret)
390 goto out;
391 goto retry;
392 default:
393 panic("wait/wound mutex %p in bad state: %d",
394 mutex, (int)mutex->wwm_state);
395 }
396 KASSERT(mutex->wwm_state == WW_OWNED);
397 KASSERT(mutex->wwm_u.owner == curlwp);
398 ret = 0;
399 out: mutex_exit(&mutex->wwm_lock);
400 return ret;
401 }
402
403 int
ww_mutex_lock(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)404 ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
405 {
406
407 /*
408 * We do not WW_WANTLOCK at the beginning because we may
409 * correctly already hold it, if we have a context, in which
410 * case we must return EALREADY to the caller.
411 */
412 ASSERT_SLEEPABLE();
413
414 if (ctx == NULL) {
415 WW_WANTLOCK(mutex);
416 ww_mutex_lock_noctx(mutex);
417 return 0;
418 }
419
420 KASSERTMSG((ctx->wwx_owner == curlwp),
421 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
422 KASSERTMSG(!ctx->wwx_acquire_done,
423 "ctx %p done acquiring locks, can't acquire more", ctx);
424 KASSERTMSG((ctx->wwx_acquired != ~0U),
425 "ctx %p finished, can't be used any more", ctx);
426 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
427 "ctx %p in class %p, mutex %p in class %p",
428 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
429
430 mutex_enter(&mutex->wwm_lock);
431 retry: switch (mutex->wwm_state) {
432 case WW_UNLOCKED:
433 WW_WANTLOCK(mutex);
434 mutex->wwm_state = WW_CTX;
435 mutex->wwm_u.ctx = ctx;
436 WW_LOCKED(mutex);
437 goto locked;
438 case WW_OWNED:
439 WW_WANTLOCK(mutex);
440 KASSERTMSG((mutex->wwm_u.owner != curlwp),
441 "locking %p against myself: %p", mutex, curlwp);
442 ww_mutex_state_wait(mutex, WW_OWNED);
443 goto retry;
444 case WW_CTX:
445 break;
446 case WW_WANTOWN:
447 ww_mutex_state_wait(mutex, WW_WANTOWN);
448 goto retry;
449 default:
450 panic("wait/wound mutex %p in bad state: %d",
451 mutex, (int)mutex->wwm_state);
452 }
453
454 KASSERT(mutex->wwm_state == WW_CTX);
455 KASSERT(mutex->wwm_u.ctx != NULL);
456 KASSERT((mutex->wwm_u.ctx == ctx) ||
457 (mutex->wwm_u.ctx->wwx_owner != curlwp));
458
459 if (mutex->wwm_u.ctx == ctx) {
460 /*
461 * We already own it. Yes, this can happen correctly
462 * for objects whose locking order is determined by
463 * userland.
464 */
465 mutex_exit(&mutex->wwm_lock);
466 return -EALREADY;
467 }
468
469 /*
470 * We do not own it. We can safely assert to LOCKDEBUG that we
471 * want it.
472 */
473 WW_WANTLOCK(mutex);
474
475 if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
476 /*
477 * Owned by a higher-priority party. Tell the caller
478 * to unlock everything and start over.
479 */
480 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
481 "ww mutex class mismatch: %p != %p",
482 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
483 mutex_exit(&mutex->wwm_lock);
484 return -EDEADLK;
485 }
486
487 /*
488 * Owned by a lower-priority party. Ask that party to wake us
489 * when it is done or it realizes it needs to back off.
490 */
491 ww_mutex_lock_wait(mutex, ctx);
492
493 locked: ctx->wwx_acquired++;
494 KASSERT((mutex->wwm_state == WW_CTX) ||
495 (mutex->wwm_state == WW_WANTOWN));
496 KASSERT(mutex->wwm_u.ctx == ctx);
497 mutex_exit(&mutex->wwm_lock);
498 return 0;
499 }
500
501 int
ww_mutex_lock_interruptible(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)502 ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
503 {
504 int ret;
505
506 /*
507 * We do not WW_WANTLOCK at the beginning because we may
508 * correctly already hold it, if we have a context, in which
509 * case we must return EALREADY to the caller.
510 */
511 ASSERT_SLEEPABLE();
512
513 if (ctx == NULL) {
514 WW_WANTLOCK(mutex);
515 return ww_mutex_lock_noctx_sig(mutex);
516 }
517
518 KASSERTMSG((ctx->wwx_owner == curlwp),
519 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
520 KASSERTMSG(!ctx->wwx_acquire_done,
521 "ctx %p done acquiring locks, can't acquire more", ctx);
522 KASSERTMSG((ctx->wwx_acquired != ~0U),
523 "ctx %p finished, can't be used any more", ctx);
524 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
525 "ctx %p in class %p, mutex %p in class %p",
526 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
527
528 mutex_enter(&mutex->wwm_lock);
529 retry: switch (mutex->wwm_state) {
530 case WW_UNLOCKED:
531 WW_WANTLOCK(mutex);
532 mutex->wwm_state = WW_CTX;
533 mutex->wwm_u.ctx = ctx;
534 WW_LOCKED(mutex);
535 goto locked;
536 case WW_OWNED:
537 WW_WANTLOCK(mutex);
538 KASSERTMSG((mutex->wwm_u.owner != curlwp),
539 "locking %p against myself: %p", mutex, curlwp);
540 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
541 if (ret)
542 goto out;
543 goto retry;
544 case WW_CTX:
545 break;
546 case WW_WANTOWN:
547 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
548 if (ret)
549 goto out;
550 goto retry;
551 default:
552 panic("wait/wound mutex %p in bad state: %d",
553 mutex, (int)mutex->wwm_state);
554 }
555
556 KASSERT(mutex->wwm_state == WW_CTX);
557 KASSERT(mutex->wwm_u.ctx != NULL);
558 KASSERT((mutex->wwm_u.ctx == ctx) ||
559 (mutex->wwm_u.ctx->wwx_owner != curlwp));
560
561 if (mutex->wwm_u.ctx == ctx) {
562 /*
563 * We already own it. Yes, this can happen correctly
564 * for objects whose locking order is determined by
565 * userland.
566 */
567 mutex_exit(&mutex->wwm_lock);
568 return -EALREADY;
569 }
570
571 /*
572 * We do not own it. We can safely assert to LOCKDEBUG that we
573 * want it.
574 */
575 WW_WANTLOCK(mutex);
576
577 if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
578 /*
579 * Owned by a higher-priority party. Tell the caller
580 * to unlock everything and start over.
581 */
582 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
583 "ww mutex class mismatch: %p != %p",
584 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
585 mutex_exit(&mutex->wwm_lock);
586 return -EDEADLK;
587 }
588
589 /*
590 * Owned by a lower-priority party. Ask that party to wake us
591 * when it is done or it realizes it needs to back off.
592 */
593 ret = ww_mutex_lock_wait_sig(mutex, ctx);
594 if (ret)
595 goto out;
596
597 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
598 (mutex->wwm_state == WW_WANTOWN));
599 KASSERT(mutex->wwm_u.ctx == ctx);
600 ctx->wwx_acquired++;
601 ret = 0;
602 out: mutex_exit(&mutex->wwm_lock);
603 return ret;
604 }
605
606 void
ww_mutex_lock_slow(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)607 ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
608 {
609
610 /* Caller must not try to lock against self here. */
611 WW_WANTLOCK(mutex);
612 ASSERT_SLEEPABLE();
613
614 if (ctx == NULL) {
615 ww_mutex_lock_noctx(mutex);
616 return;
617 }
618
619 KASSERTMSG((ctx->wwx_owner == curlwp),
620 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
621 KASSERTMSG(!ctx->wwx_acquire_done,
622 "ctx %p done acquiring locks, can't acquire more", ctx);
623 KASSERTMSG((ctx->wwx_acquired != ~0U),
624 "ctx %p finished, can't be used any more", ctx);
625 KASSERTMSG((ctx->wwx_acquired == 0),
626 "ctx %p still holds %u locks, not allowed in slow path",
627 ctx, ctx->wwx_acquired);
628 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
629 "ctx %p in class %p, mutex %p in class %p",
630 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
631
632 mutex_enter(&mutex->wwm_lock);
633 retry: switch (mutex->wwm_state) {
634 case WW_UNLOCKED:
635 mutex->wwm_state = WW_CTX;
636 mutex->wwm_u.ctx = ctx;
637 WW_LOCKED(mutex);
638 goto locked;
639 case WW_OWNED:
640 KASSERTMSG((mutex->wwm_u.owner != curlwp),
641 "locking %p against myself: %p", mutex, curlwp);
642 ww_mutex_state_wait(mutex, WW_OWNED);
643 goto retry;
644 case WW_CTX:
645 break;
646 case WW_WANTOWN:
647 ww_mutex_state_wait(mutex, WW_WANTOWN);
648 goto retry;
649 default:
650 panic("wait/wound mutex %p in bad state: %d",
651 mutex, (int)mutex->wwm_state);
652 }
653
654 KASSERT(mutex->wwm_state == WW_CTX);
655 KASSERT(mutex->wwm_u.ctx != NULL);
656 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
657 "locking %p against myself: %p", mutex, curlwp);
658
659 /*
660 * Owned by another party, of any priority. Ask that party to
661 * wake us when it's done.
662 */
663 ww_mutex_lock_wait(mutex, ctx);
664
665 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
666 (mutex->wwm_state == WW_WANTOWN));
667 KASSERT(mutex->wwm_u.ctx == ctx);
668 ctx->wwx_acquired++;
669 mutex_exit(&mutex->wwm_lock);
670 }
671
672 int
ww_mutex_lock_slow_interruptible(struct ww_mutex * mutex,struct ww_acquire_ctx * ctx)673 ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
674 struct ww_acquire_ctx *ctx)
675 {
676 int ret;
677
678 WW_WANTLOCK(mutex);
679 ASSERT_SLEEPABLE();
680
681 if (ctx == NULL)
682 return ww_mutex_lock_noctx_sig(mutex);
683
684 KASSERTMSG((ctx->wwx_owner == curlwp),
685 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
686 KASSERTMSG(!ctx->wwx_acquire_done,
687 "ctx %p done acquiring locks, can't acquire more", ctx);
688 KASSERTMSG((ctx->wwx_acquired != ~0U),
689 "ctx %p finished, can't be used any more", ctx);
690 KASSERTMSG((ctx->wwx_acquired == 0),
691 "ctx %p still holds %u locks, not allowed in slow path",
692 ctx, ctx->wwx_acquired);
693 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
694 "ctx %p in class %p, mutex %p in class %p",
695 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
696
697 mutex_enter(&mutex->wwm_lock);
698 retry: switch (mutex->wwm_state) {
699 case WW_UNLOCKED:
700 mutex->wwm_state = WW_CTX;
701 mutex->wwm_u.ctx = ctx;
702 WW_LOCKED(mutex);
703 goto locked;
704 case WW_OWNED:
705 KASSERTMSG((mutex->wwm_u.owner != curlwp),
706 "locking %p against myself: %p", mutex, curlwp);
707 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
708 if (ret)
709 goto out;
710 goto retry;
711 case WW_CTX:
712 break;
713 case WW_WANTOWN:
714 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
715 if (ret)
716 goto out;
717 goto retry;
718 default:
719 panic("wait/wound mutex %p in bad state: %d",
720 mutex, (int)mutex->wwm_state);
721 }
722
723 KASSERT(mutex->wwm_state == WW_CTX);
724 KASSERT(mutex->wwm_u.ctx != NULL);
725 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
726 "locking %p against myself: %p", mutex, curlwp);
727
728 /*
729 * Owned by another party, of any priority. Ask that party to
730 * wake us when it's done.
731 */
732 ret = ww_mutex_lock_wait_sig(mutex, ctx);
733 if (ret)
734 goto out;
735
736 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
737 (mutex->wwm_state == WW_WANTOWN));
738 KASSERT(mutex->wwm_u.ctx == ctx);
739 ctx->wwx_acquired++;
740 ret = 0;
741 out: mutex_exit(&mutex->wwm_lock);
742 return ret;
743 }
744
745 int
ww_mutex_trylock(struct ww_mutex * mutex)746 ww_mutex_trylock(struct ww_mutex *mutex)
747 {
748 int ret;
749
750 mutex_enter(&mutex->wwm_lock);
751 if (mutex->wwm_state == WW_UNLOCKED) {
752 mutex->wwm_state = WW_OWNED;
753 mutex->wwm_u.owner = curlwp;
754 WW_WANTLOCK(mutex);
755 WW_LOCKED(mutex);
756 ret = 1;
757 } else {
758 KASSERTMSG(((mutex->wwm_state != WW_OWNED) ||
759 (mutex->wwm_u.owner != curlwp)),
760 "locking %p against myself: %p", mutex, curlwp);
761 KASSERTMSG(((mutex->wwm_state != WW_CTX) ||
762 (mutex->wwm_u.ctx->wwx_owner != curlwp)),
763 "locking %p against myself: %p", mutex, curlwp);
764 KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) ||
765 (mutex->wwm_u.ctx->wwx_owner != curlwp)),
766 "locking %p against myself: %p", mutex, curlwp);
767 ret = 0;
768 }
769 mutex_exit(&mutex->wwm_lock);
770
771 return ret;
772 }
773
774 static void
ww_mutex_unlock_release(struct ww_mutex * mutex)775 ww_mutex_unlock_release(struct ww_mutex *mutex)
776 {
777
778 KASSERT(mutex_owned(&mutex->wwm_lock));
779 KASSERT((mutex->wwm_state == WW_CTX) ||
780 (mutex->wwm_state == WW_WANTOWN));
781 KASSERT(mutex->wwm_u.ctx != NULL);
782 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp),
783 "ww_mutex %p ctx %p held by %p, not by self (%p)",
784 mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner,
785 curlwp);
786 KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U);
787 mutex->wwm_u.ctx->wwx_acquired--;
788 mutex->wwm_u.ctx = NULL;
789 }
790
791 void
ww_mutex_unlock(struct ww_mutex * mutex)792 ww_mutex_unlock(struct ww_mutex *mutex)
793 {
794 struct ww_acquire_ctx *ctx;
795
796 mutex_enter(&mutex->wwm_lock);
797 KASSERT(mutex->wwm_state != WW_UNLOCKED);
798 switch (mutex->wwm_state) {
799 case WW_UNLOCKED:
800 panic("unlocking unlocked wait/wound mutex: %p", mutex);
801 case WW_OWNED:
802 /* Let the context lockers fight over it. */
803 mutex->wwm_u.owner = NULL;
804 mutex->wwm_state = WW_UNLOCKED;
805 break;
806 case WW_CTX:
807 ww_mutex_unlock_release(mutex);
808 /*
809 * If there are any waiters with contexts, grant the
810 * lock to the highest-priority one. Otherwise, just
811 * unlock it.
812 */
813 if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
814 mutex->wwm_state = WW_CTX;
815 mutex->wwm_u.ctx = ctx;
816 } else {
817 mutex->wwm_state = WW_UNLOCKED;
818 }
819 break;
820 case WW_WANTOWN:
821 ww_mutex_unlock_release(mutex);
822 /* Let the non-context lockers fight over it. */
823 mutex->wwm_state = WW_UNLOCKED;
824 break;
825 }
826 WW_UNLOCKED(mutex);
827 cv_broadcast(&mutex->wwm_cv);
828 mutex_exit(&mutex->wwm_lock);
829 }
830