1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/locking/mutex.c
4 *
5 * Mutexes: blocking mutual exclusion locks
6 *
7 * Started by Ingo Molnar:
8 *
9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 *
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
13 *
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17 * and Sven Dietrich.
18 *
19 * Also see Documentation/locking/mutex-design.rst.
20 */
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
32
33 #ifdef CONFIG_DEBUG_MUTEXES
34 # include "mutex-debug.h"
35 #else
36 # include "mutex.h"
37 #endif
38
39 void
__mutex_init(struct mutex * lock,const char * name,struct lock_class_key * key)40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
41 {
42 atomic_long_set(&lock->owner, 0);
43 spin_lock_init(&lock->wait_lock);
44 INIT_LIST_HEAD(&lock->wait_list);
45 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
46 osq_lock_init(&lock->osq);
47 #endif
48
49 debug_mutex_init(lock, name, key);
50 }
51 EXPORT_SYMBOL(__mutex_init);
52
53 /*
54 * @owner: contains: 'struct task_struct *' to the current lock owner,
55 * NULL means not owned. Since task_struct pointers are aligned at
56 * at least L1_CACHE_BYTES, we have low bits to store extra state.
57 *
58 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
59 * Bit1 indicates unlock needs to hand the lock to the top-waiter
60 * Bit2 indicates handoff has been done and we're waiting for pickup.
61 */
62 #define MUTEX_FLAG_WAITERS 0x01
63 #define MUTEX_FLAG_HANDOFF 0x02
64 #define MUTEX_FLAG_PICKUP 0x04
65
66 #define MUTEX_FLAGS 0x07
67
68 /*
69 * Internal helper function; C doesn't allow us to hide it :/
70 *
71 * DO NOT USE (outside of mutex code).
72 */
__mutex_owner(struct mutex * lock)73 static inline struct task_struct *__mutex_owner(struct mutex *lock)
74 {
75 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
76 }
77
__owner_task(unsigned long owner)78 static inline struct task_struct *__owner_task(unsigned long owner)
79 {
80 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
81 }
82
mutex_is_locked(struct mutex * lock)83 bool mutex_is_locked(struct mutex *lock)
84 {
85 return __mutex_owner(lock) != NULL;
86 }
87 EXPORT_SYMBOL(mutex_is_locked);
88
__owner_flags(unsigned long owner)89 static inline unsigned long __owner_flags(unsigned long owner)
90 {
91 return owner & MUTEX_FLAGS;
92 }
93
94 /*
95 * Trylock variant that returns the owning task on failure.
96 */
__mutex_trylock_or_owner(struct mutex * lock)97 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
98 {
99 unsigned long owner, curr = (unsigned long)current;
100
101 owner = atomic_long_read(&lock->owner);
102 for (;;) { /* must loop, can race against a flag */
103 unsigned long old, flags = __owner_flags(owner);
104 unsigned long task = owner & ~MUTEX_FLAGS;
105
106 if (task) {
107 if (likely(task != curr))
108 break;
109
110 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
111 break;
112
113 flags &= ~MUTEX_FLAG_PICKUP;
114 } else {
115 #ifdef CONFIG_DEBUG_MUTEXES
116 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
117 #endif
118 }
119
120 /*
121 * We set the HANDOFF bit, we must make sure it doesn't live
122 * past the point where we acquire it. This would be possible
123 * if we (accidentally) set the bit on an unlocked mutex.
124 */
125 flags &= ~MUTEX_FLAG_HANDOFF;
126
127 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
128 if (old == owner)
129 return NULL;
130
131 owner = old;
132 }
133
134 return __owner_task(owner);
135 }
136
137 /*
138 * Actual trylock that will work on any unlocked state.
139 */
__mutex_trylock(struct mutex * lock)140 static inline bool __mutex_trylock(struct mutex *lock)
141 {
142 return !__mutex_trylock_or_owner(lock);
143 }
144
145 #ifndef CONFIG_DEBUG_LOCK_ALLOC
146 /*
147 * Lockdep annotations are contained to the slow paths for simplicity.
148 * There is nothing that would stop spreading the lockdep annotations outwards
149 * except more code.
150 */
151
152 /*
153 * Optimistic trylock that only works in the uncontended case. Make sure to
154 * follow with a __mutex_trylock() before failing.
155 */
__mutex_trylock_fast(struct mutex * lock)156 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
157 {
158 unsigned long curr = (unsigned long)current;
159 unsigned long zero = 0UL;
160
161 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
162 return true;
163
164 return false;
165 }
166
__mutex_unlock_fast(struct mutex * lock)167 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
168 {
169 unsigned long curr = (unsigned long)current;
170
171 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
172 return true;
173
174 return false;
175 }
176 #endif
177
__mutex_set_flag(struct mutex * lock,unsigned long flag)178 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
179 {
180 atomic_long_or(flag, &lock->owner);
181 }
182
__mutex_clear_flag(struct mutex * lock,unsigned long flag)183 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
184 {
185 atomic_long_andnot(flag, &lock->owner);
186 }
187
__mutex_waiter_is_first(struct mutex * lock,struct mutex_waiter * waiter)188 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
189 {
190 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
191 }
192
193 /*
194 * Add @waiter to a given location in the lock wait_list and set the
195 * FLAG_WAITERS flag if it's the first waiter.
196 */
197 static void __sched
__mutex_add_waiter(struct mutex * lock,struct mutex_waiter * waiter,struct list_head * list)198 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
199 struct list_head *list)
200 {
201 debug_mutex_add_waiter(lock, waiter, current);
202
203 list_add_tail(&waiter->list, list);
204 if (__mutex_waiter_is_first(lock, waiter))
205 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
206 }
207
208 /*
209 * Give up ownership to a specific task, when @task = NULL, this is equivalent
210 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
211 * WAITERS. Provides RELEASE semantics like a regular unlock, the
212 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
213 */
__mutex_handoff(struct mutex * lock,struct task_struct * task)214 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
215 {
216 unsigned long owner = atomic_long_read(&lock->owner);
217
218 for (;;) {
219 unsigned long old, new;
220
221 #ifdef CONFIG_DEBUG_MUTEXES
222 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
223 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
224 #endif
225
226 new = (owner & MUTEX_FLAG_WAITERS);
227 new |= (unsigned long)task;
228 if (task)
229 new |= MUTEX_FLAG_PICKUP;
230
231 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
232 if (old == owner)
233 break;
234
235 owner = old;
236 }
237 }
238
239 #ifndef CONFIG_DEBUG_LOCK_ALLOC
240 /*
241 * We split the mutex lock/unlock logic into separate fastpath and
242 * slowpath functions, to reduce the register pressure on the fastpath.
243 * We also put the fastpath first in the kernel image, to make sure the
244 * branch is predicted by the CPU as default-untaken.
245 */
246 static void __sched __mutex_lock_slowpath(struct mutex *lock);
247
248 /**
249 * mutex_lock - acquire the mutex
250 * @lock: the mutex to be acquired
251 *
252 * Lock the mutex exclusively for this task. If the mutex is not
253 * available right now, it will sleep until it can get it.
254 *
255 * The mutex must later on be released by the same task that
256 * acquired it. Recursive locking is not allowed. The task
257 * may not exit without first unlocking the mutex. Also, kernel
258 * memory where the mutex resides must not be freed with
259 * the mutex still locked. The mutex must first be initialized
260 * (or statically defined) before it can be locked. memset()-ing
261 * the mutex to 0 is not allowed.
262 *
263 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
264 * checks that will enforce the restrictions and will also do
265 * deadlock debugging)
266 *
267 * This function is similar to (but not equivalent to) down().
268 */
mutex_lock(struct mutex * lock)269 void __sched mutex_lock(struct mutex *lock)
270 {
271 might_sleep();
272
273 if (!__mutex_trylock_fast(lock))
274 __mutex_lock_slowpath(lock);
275 }
276 EXPORT_SYMBOL(mutex_lock);
277 #endif
278
279 /*
280 * Wait-Die:
281 * The newer transactions are killed when:
282 * It (the new transaction) makes a request for a lock being held
283 * by an older transaction.
284 *
285 * Wound-Wait:
286 * The newer transactions are wounded when:
287 * An older transaction makes a request for a lock being held by
288 * the newer transaction.
289 */
290
291 /*
292 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
293 * it.
294 */
295 static __always_inline void
ww_mutex_lock_acquired(struct ww_mutex * ww,struct ww_acquire_ctx * ww_ctx)296 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
297 {
298 #ifdef CONFIG_DEBUG_MUTEXES
299 /*
300 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
301 * but released with a normal mutex_unlock in this call.
302 *
303 * This should never happen, always use ww_mutex_unlock.
304 */
305 DEBUG_LOCKS_WARN_ON(ww->ctx);
306
307 /*
308 * Not quite done after calling ww_acquire_done() ?
309 */
310 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
311
312 if (ww_ctx->contending_lock) {
313 /*
314 * After -EDEADLK you tried to
315 * acquire a different ww_mutex? Bad!
316 */
317 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
318
319 /*
320 * You called ww_mutex_lock after receiving -EDEADLK,
321 * but 'forgot' to unlock everything else first?
322 */
323 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
324 ww_ctx->contending_lock = NULL;
325 }
326
327 /*
328 * Naughty, using a different class will lead to undefined behavior!
329 */
330 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
331 #endif
332 ww_ctx->acquired++;
333 ww->ctx = ww_ctx;
334 }
335
336 /*
337 * Determine if context @a is 'after' context @b. IOW, @a is a younger
338 * transaction than @b and depending on algorithm either needs to wait for
339 * @b or die.
340 */
341 static inline bool __sched
__ww_ctx_stamp_after(struct ww_acquire_ctx * a,struct ww_acquire_ctx * b)342 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
343 {
344
345 return (signed long)(a->stamp - b->stamp) > 0;
346 }
347
348 /*
349 * Wait-Die; wake a younger waiter context (when locks held) such that it can
350 * die.
351 *
352 * Among waiters with context, only the first one can have other locks acquired
353 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
354 * __ww_mutex_check_kill() wake any but the earliest context.
355 */
356 static bool __sched
__ww_mutex_die(struct mutex * lock,struct mutex_waiter * waiter,struct ww_acquire_ctx * ww_ctx)357 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
358 struct ww_acquire_ctx *ww_ctx)
359 {
360 if (!ww_ctx->is_wait_die)
361 return false;
362
363 if (waiter->ww_ctx->acquired > 0 &&
364 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
365 debug_mutex_wake_waiter(lock, waiter);
366 wake_up_process(waiter->task);
367 }
368
369 return true;
370 }
371
372 /*
373 * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
374 *
375 * Wound the lock holder if there are waiters with older transactions than
376 * the lock holders. Even if multiple waiters may wound the lock holder,
377 * it's sufficient that only one does.
378 */
__ww_mutex_wound(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct ww_acquire_ctx * hold_ctx)379 static bool __ww_mutex_wound(struct mutex *lock,
380 struct ww_acquire_ctx *ww_ctx,
381 struct ww_acquire_ctx *hold_ctx)
382 {
383 struct task_struct *owner = __mutex_owner(lock);
384
385 lockdep_assert_held(&lock->wait_lock);
386
387 /*
388 * Possible through __ww_mutex_add_waiter() when we race with
389 * ww_mutex_set_context_fastpath(). In that case we'll get here again
390 * through __ww_mutex_check_waiters().
391 */
392 if (!hold_ctx)
393 return false;
394
395 /*
396 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
397 * it cannot go away because we'll have FLAG_WAITERS set and hold
398 * wait_lock.
399 */
400 if (!owner)
401 return false;
402
403 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
404 hold_ctx->wounded = 1;
405
406 /*
407 * wake_up_process() paired with set_current_state()
408 * inserts sufficient barriers to make sure @owner either sees
409 * it's wounded in __ww_mutex_check_kill() or has a
410 * wakeup pending to re-read the wounded state.
411 */
412 if (owner != current)
413 wake_up_process(owner);
414
415 return true;
416 }
417
418 return false;
419 }
420
421 /*
422 * We just acquired @lock under @ww_ctx, if there are later contexts waiting
423 * behind us on the wait-list, check if they need to die, or wound us.
424 *
425 * See __ww_mutex_add_waiter() for the list-order construction; basically the
426 * list is ordered by stamp, smallest (oldest) first.
427 *
428 * This relies on never mixing wait-die/wound-wait on the same wait-list;
429 * which is currently ensured by that being a ww_class property.
430 *
431 * The current task must not be on the wait list.
432 */
433 static void __sched
__ww_mutex_check_waiters(struct mutex * lock,struct ww_acquire_ctx * ww_ctx)434 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
435 {
436 struct mutex_waiter *cur;
437
438 lockdep_assert_held(&lock->wait_lock);
439
440 list_for_each_entry(cur, &lock->wait_list, list) {
441 if (!cur->ww_ctx)
442 continue;
443
444 if (__ww_mutex_die(lock, cur, ww_ctx) ||
445 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
446 break;
447 }
448 }
449
450 /*
451 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
452 * and wake up any waiters so they can recheck.
453 */
454 static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)455 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
456 {
457 ww_mutex_lock_acquired(lock, ctx);
458
459 /*
460 * The lock->ctx update should be visible on all cores before
461 * the WAITERS check is done, otherwise contended waiters might be
462 * missed. The contended waiters will either see ww_ctx == NULL
463 * and keep spinning, or it will acquire wait_lock, add itself
464 * to waiter list and sleep.
465 */
466 smp_mb(); /* See comments above and below. */
467
468 /*
469 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
470 * MB MB
471 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
472 *
473 * The memory barrier above pairs with the memory barrier in
474 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
475 * and/or !empty list.
476 */
477 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
478 return;
479
480 /*
481 * Uh oh, we raced in fastpath, check if any of the waiters need to
482 * die or wound us.
483 */
484 spin_lock(&lock->base.wait_lock);
485 __ww_mutex_check_waiters(&lock->base, ctx);
486 spin_unlock(&lock->base.wait_lock);
487 }
488
489 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
490
491 static inline
ww_mutex_spin_on_owner(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)492 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
493 struct mutex_waiter *waiter)
494 {
495 struct ww_mutex *ww;
496
497 ww = container_of(lock, struct ww_mutex, base);
498
499 /*
500 * If ww->ctx is set the contents are undefined, only
501 * by acquiring wait_lock there is a guarantee that
502 * they are not invalid when reading.
503 *
504 * As such, when deadlock detection needs to be
505 * performed the optimistic spinning cannot be done.
506 *
507 * Check this in every inner iteration because we may
508 * be racing against another thread's ww_mutex_lock.
509 */
510 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
511 return false;
512
513 /*
514 * If we aren't on the wait list yet, cancel the spin
515 * if there are waiters. We want to avoid stealing the
516 * lock from a waiter with an earlier stamp, since the
517 * other thread may already own a lock that we also
518 * need.
519 */
520 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
521 return false;
522
523 /*
524 * Similarly, stop spinning if we are no longer the
525 * first waiter.
526 */
527 if (waiter && !__mutex_waiter_is_first(lock, waiter))
528 return false;
529
530 return true;
531 }
532
533 /*
534 * Look out! "owner" is an entirely speculative pointer access and not
535 * reliable.
536 *
537 * "noinline" so that this function shows up on perf profiles.
538 */
539 static noinline
mutex_spin_on_owner(struct mutex * lock,struct task_struct * owner,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)540 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
541 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
542 {
543 bool ret = true;
544
545 rcu_read_lock();
546 while (__mutex_owner(lock) == owner) {
547 /*
548 * Ensure we emit the owner->on_cpu, dereference _after_
549 * checking lock->owner still matches owner. If that fails,
550 * owner might point to freed memory. If it still matches,
551 * the rcu_read_lock() ensures the memory stays valid.
552 */
553 barrier();
554
555 /*
556 * Use vcpu_is_preempted to detect lock holder preemption issue.
557 */
558 if (!owner->on_cpu || need_resched() ||
559 vcpu_is_preempted(task_cpu(owner))) {
560 ret = false;
561 break;
562 }
563
564 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
565 ret = false;
566 break;
567 }
568
569 cpu_relax();
570 }
571 rcu_read_unlock();
572
573 return ret;
574 }
575
576 /*
577 * Initial check for entering the mutex spinning loop
578 */
mutex_can_spin_on_owner(struct mutex * lock)579 static inline int mutex_can_spin_on_owner(struct mutex *lock)
580 {
581 struct task_struct *owner;
582 int retval = 1;
583
584 if (need_resched())
585 return 0;
586
587 rcu_read_lock();
588 owner = __mutex_owner(lock);
589
590 /*
591 * As lock holder preemption issue, we both skip spinning if task is not
592 * on cpu or its cpu is preempted
593 */
594 if (owner)
595 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
596 rcu_read_unlock();
597
598 /*
599 * If lock->owner is not set, the mutex has been released. Return true
600 * such that we'll trylock in the spin path, which is a faster option
601 * than the blocking slow path.
602 */
603 return retval;
604 }
605
606 /*
607 * Optimistic spinning.
608 *
609 * We try to spin for acquisition when we find that the lock owner
610 * is currently running on a (different) CPU and while we don't
611 * need to reschedule. The rationale is that if the lock owner is
612 * running, it is likely to release the lock soon.
613 *
614 * The mutex spinners are queued up using MCS lock so that only one
615 * spinner can compete for the mutex. However, if mutex spinning isn't
616 * going to happen, there is no point in going through the lock/unlock
617 * overhead.
618 *
619 * Returns true when the lock was taken, otherwise false, indicating
620 * that we need to jump to the slowpath and sleep.
621 *
622 * The waiter flag is set to true if the spinner is a waiter in the wait
623 * queue. The waiter-spinner will spin on the lock directly and concurrently
624 * with the spinner at the head of the OSQ, if present, until the owner is
625 * changed to itself.
626 */
627 static __always_inline bool
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)628 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
629 struct mutex_waiter *waiter)
630 {
631 if (!waiter) {
632 /*
633 * The purpose of the mutex_can_spin_on_owner() function is
634 * to eliminate the overhead of osq_lock() and osq_unlock()
635 * in case spinning isn't possible. As a waiter-spinner
636 * is not going to take OSQ lock anyway, there is no need
637 * to call mutex_can_spin_on_owner().
638 */
639 if (!mutex_can_spin_on_owner(lock))
640 goto fail;
641
642 /*
643 * In order to avoid a stampede of mutex spinners trying to
644 * acquire the mutex all at once, the spinners need to take a
645 * MCS (queued) lock first before spinning on the owner field.
646 */
647 if (!osq_lock(&lock->osq))
648 goto fail;
649 }
650
651 for (;;) {
652 struct task_struct *owner;
653
654 /* Try to acquire the mutex... */
655 owner = __mutex_trylock_or_owner(lock);
656 if (!owner)
657 break;
658
659 /*
660 * There's an owner, wait for it to either
661 * release the lock or go to sleep.
662 */
663 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
664 goto fail_unlock;
665
666 /*
667 * The cpu_relax() call is a compiler barrier which forces
668 * everything in this loop to be re-loaded. We don't need
669 * memory barriers as we'll eventually observe the right
670 * values at the cost of a few extra spins.
671 */
672 cpu_relax();
673 }
674
675 if (!waiter)
676 osq_unlock(&lock->osq);
677
678 return true;
679
680
681 fail_unlock:
682 if (!waiter)
683 osq_unlock(&lock->osq);
684
685 fail:
686 /*
687 * If we fell out of the spin path because of need_resched(),
688 * reschedule now, before we try-lock the mutex. This avoids getting
689 * scheduled out right after we obtained the mutex.
690 */
691 if (need_resched()) {
692 /*
693 * We _should_ have TASK_RUNNING here, but just in case
694 * we do not, make it so, otherwise we might get stuck.
695 */
696 __set_current_state(TASK_RUNNING);
697 schedule_preempt_disabled();
698 }
699
700 return false;
701 }
702 #else
703 static __always_inline bool
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)704 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
705 struct mutex_waiter *waiter)
706 {
707 return false;
708 }
709 #endif
710
711 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
712
713 /**
714 * mutex_unlock - release the mutex
715 * @lock: the mutex to be released
716 *
717 * Unlock a mutex that has been locked by this task previously.
718 *
719 * This function must not be used in interrupt context. Unlocking
720 * of a not locked mutex is not allowed.
721 *
722 * This function is similar to (but not equivalent to) up().
723 */
mutex_unlock(struct mutex * lock)724 void __sched mutex_unlock(struct mutex *lock)
725 {
726 #ifndef CONFIG_DEBUG_LOCK_ALLOC
727 if (__mutex_unlock_fast(lock))
728 return;
729 #endif
730 __mutex_unlock_slowpath(lock, _RET_IP_);
731 }
732 EXPORT_SYMBOL(mutex_unlock);
733
734 /**
735 * ww_mutex_unlock - release the w/w mutex
736 * @lock: the mutex to be released
737 *
738 * Unlock a mutex that has been locked by this task previously with any of the
739 * ww_mutex_lock* functions (with or without an acquire context). It is
740 * forbidden to release the locks after releasing the acquire context.
741 *
742 * This function must not be used in interrupt context. Unlocking
743 * of a unlocked mutex is not allowed.
744 */
ww_mutex_unlock(struct ww_mutex * lock)745 void __sched ww_mutex_unlock(struct ww_mutex *lock)
746 {
747 /*
748 * The unlocking fastpath is the 0->1 transition from 'locked'
749 * into 'unlocked' state:
750 */
751 if (lock->ctx) {
752 #ifdef CONFIG_DEBUG_MUTEXES
753 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
754 #endif
755 if (lock->ctx->acquired > 0)
756 lock->ctx->acquired--;
757 lock->ctx = NULL;
758 }
759
760 mutex_unlock(&lock->base);
761 }
762 EXPORT_SYMBOL(ww_mutex_unlock);
763
764
765 static __always_inline int __sched
__ww_mutex_kill(struct mutex * lock,struct ww_acquire_ctx * ww_ctx)766 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
767 {
768 if (ww_ctx->acquired > 0) {
769 #ifdef CONFIG_DEBUG_MUTEXES
770 struct ww_mutex *ww;
771
772 ww = container_of(lock, struct ww_mutex, base);
773 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
774 ww_ctx->contending_lock = ww;
775 #endif
776 return -EDEADLK;
777 }
778
779 return 0;
780 }
781
782
783 /*
784 * Check the wound condition for the current lock acquire.
785 *
786 * Wound-Wait: If we're wounded, kill ourself.
787 *
788 * Wait-Die: If we're trying to acquire a lock already held by an older
789 * context, kill ourselves.
790 *
791 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
792 * look at waiters before us in the wait-list.
793 */
794 static inline int __sched
__ww_mutex_check_kill(struct mutex * lock,struct mutex_waiter * waiter,struct ww_acquire_ctx * ctx)795 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
796 struct ww_acquire_ctx *ctx)
797 {
798 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
799 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
800 struct mutex_waiter *cur;
801
802 if (ctx->acquired == 0)
803 return 0;
804
805 if (!ctx->is_wait_die) {
806 if (ctx->wounded)
807 return __ww_mutex_kill(lock, ctx);
808
809 return 0;
810 }
811
812 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
813 return __ww_mutex_kill(lock, ctx);
814
815 /*
816 * If there is a waiter in front of us that has a context, then its
817 * stamp is earlier than ours and we must kill ourself.
818 */
819 cur = waiter;
820 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
821 if (!cur->ww_ctx)
822 continue;
823
824 return __ww_mutex_kill(lock, ctx);
825 }
826
827 return 0;
828 }
829
830 /*
831 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
832 * first. Such that older contexts are preferred to acquire the lock over
833 * younger contexts.
834 *
835 * Waiters without context are interspersed in FIFO order.
836 *
837 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
838 * older contexts already waiting) to avoid unnecessary waiting and for
839 * Wound-Wait ensure we wound the owning context when it is younger.
840 */
841 static inline int __sched
__ww_mutex_add_waiter(struct mutex_waiter * waiter,struct mutex * lock,struct ww_acquire_ctx * ww_ctx)842 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
843 struct mutex *lock,
844 struct ww_acquire_ctx *ww_ctx)
845 {
846 struct mutex_waiter *cur;
847 struct list_head *pos;
848 bool is_wait_die;
849
850 if (!ww_ctx) {
851 __mutex_add_waiter(lock, waiter, &lock->wait_list);
852 return 0;
853 }
854
855 is_wait_die = ww_ctx->is_wait_die;
856
857 /*
858 * Add the waiter before the first waiter with a higher stamp.
859 * Waiters without a context are skipped to avoid starving
860 * them. Wait-Die waiters may die here. Wound-Wait waiters
861 * never die here, but they are sorted in stamp order and
862 * may wound the lock holder.
863 */
864 pos = &lock->wait_list;
865 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
866 if (!cur->ww_ctx)
867 continue;
868
869 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
870 /*
871 * Wait-Die: if we find an older context waiting, there
872 * is no point in queueing behind it, as we'd have to
873 * die the moment it would acquire the lock.
874 */
875 if (is_wait_die) {
876 int ret = __ww_mutex_kill(lock, ww_ctx);
877
878 if (ret)
879 return ret;
880 }
881
882 break;
883 }
884
885 pos = &cur->list;
886
887 /* Wait-Die: ensure younger waiters die. */
888 __ww_mutex_die(lock, cur, ww_ctx);
889 }
890
891 __mutex_add_waiter(lock, waiter, pos);
892
893 /*
894 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
895 * wound that such that we might proceed.
896 */
897 if (!is_wait_die) {
898 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
899
900 /*
901 * See ww_mutex_set_context_fastpath(). Orders setting
902 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
903 * such that either we or the fastpath will wound @ww->ctx.
904 */
905 smp_mb();
906 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
907 }
908
909 return 0;
910 }
911
912 /*
913 * Lock a mutex (possibly interruptible), slowpath:
914 */
915 static __always_inline int __sched
__mutex_lock_common(struct mutex * lock,long state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip,struct ww_acquire_ctx * ww_ctx,const bool use_ww_ctx)916 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
917 struct lockdep_map *nest_lock, unsigned long ip,
918 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
919 {
920 struct mutex_waiter waiter;
921 bool first = false;
922 struct ww_mutex *ww;
923 int ret;
924
925 if (!use_ww_ctx)
926 ww_ctx = NULL;
927
928 might_sleep();
929
930 #ifdef CONFIG_DEBUG_MUTEXES
931 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
932 #endif
933
934 ww = container_of(lock, struct ww_mutex, base);
935 if (ww_ctx) {
936 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
937 return -EALREADY;
938
939 /*
940 * Reset the wounded flag after a kill. No other process can
941 * race and wound us here since they can't have a valid owner
942 * pointer if we don't have any locks held.
943 */
944 if (ww_ctx->acquired == 0)
945 ww_ctx->wounded = 0;
946 }
947
948 preempt_disable();
949 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
950
951 if (__mutex_trylock(lock) ||
952 mutex_optimistic_spin(lock, ww_ctx, NULL)) {
953 /* got the lock, yay! */
954 lock_acquired(&lock->dep_map, ip);
955 if (ww_ctx)
956 ww_mutex_set_context_fastpath(ww, ww_ctx);
957 preempt_enable();
958 return 0;
959 }
960
961 spin_lock(&lock->wait_lock);
962 /*
963 * After waiting to acquire the wait_lock, try again.
964 */
965 if (__mutex_trylock(lock)) {
966 if (ww_ctx)
967 __ww_mutex_check_waiters(lock, ww_ctx);
968
969 goto skip_wait;
970 }
971
972 debug_mutex_lock_common(lock, &waiter);
973
974 lock_contended(&lock->dep_map, ip);
975
976 if (!use_ww_ctx) {
977 /* add waiting tasks to the end of the waitqueue (FIFO): */
978 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
979
980
981 #ifdef CONFIG_DEBUG_MUTEXES
982 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
983 #endif
984 } else {
985 /*
986 * Add in stamp order, waking up waiters that must kill
987 * themselves.
988 */
989 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
990 if (ret)
991 goto err_early_kill;
992
993 waiter.ww_ctx = ww_ctx;
994 }
995
996 waiter.task = current;
997
998 set_current_state(state);
999 for (;;) {
1000 /*
1001 * Once we hold wait_lock, we're serialized against
1002 * mutex_unlock() handing the lock off to us, do a trylock
1003 * before testing the error conditions to make sure we pick up
1004 * the handoff.
1005 */
1006 if (__mutex_trylock(lock))
1007 goto acquired;
1008
1009 /*
1010 * Check for signals and kill conditions while holding
1011 * wait_lock. This ensures the lock cancellation is ordered
1012 * against mutex_unlock() and wake-ups do not go missing.
1013 */
1014 if (signal_pending_state(state, current)) {
1015 ret = -EINTR;
1016 goto err;
1017 }
1018
1019 if (ww_ctx) {
1020 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1021 if (ret)
1022 goto err;
1023 }
1024
1025 spin_unlock(&lock->wait_lock);
1026 schedule_preempt_disabled();
1027
1028 /*
1029 * ww_mutex needs to always recheck its position since its waiter
1030 * list is not FIFO ordered.
1031 */
1032 if (ww_ctx || !first) {
1033 first = __mutex_waiter_is_first(lock, &waiter);
1034 if (first)
1035 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
1036 }
1037
1038 set_current_state(state);
1039 /*
1040 * Here we order against unlock; we must either see it change
1041 * state back to RUNNING and fall through the next schedule(),
1042 * or we must see its unlock and acquire.
1043 */
1044 if (__mutex_trylock(lock) ||
1045 (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
1046 break;
1047
1048 spin_lock(&lock->wait_lock);
1049 }
1050 spin_lock(&lock->wait_lock);
1051 acquired:
1052 __set_current_state(TASK_RUNNING);
1053
1054 if (ww_ctx) {
1055 /*
1056 * Wound-Wait; we stole the lock (!first_waiter), check the
1057 * waiters as anyone might want to wound us.
1058 */
1059 if (!ww_ctx->is_wait_die &&
1060 !__mutex_waiter_is_first(lock, &waiter))
1061 __ww_mutex_check_waiters(lock, ww_ctx);
1062 }
1063
1064 mutex_remove_waiter(lock, &waiter, current);
1065 if (likely(list_empty(&lock->wait_list)))
1066 __mutex_clear_flag(lock, MUTEX_FLAGS);
1067
1068 debug_mutex_free_waiter(&waiter);
1069
1070 skip_wait:
1071 /* got the lock - cleanup and rejoice! */
1072 lock_acquired(&lock->dep_map, ip);
1073
1074 if (ww_ctx)
1075 ww_mutex_lock_acquired(ww, ww_ctx);
1076
1077 spin_unlock(&lock->wait_lock);
1078 preempt_enable();
1079 return 0;
1080
1081 err:
1082 __set_current_state(TASK_RUNNING);
1083 mutex_remove_waiter(lock, &waiter, current);
1084 err_early_kill:
1085 spin_unlock(&lock->wait_lock);
1086 debug_mutex_free_waiter(&waiter);
1087 mutex_release(&lock->dep_map, ip);
1088 preempt_enable();
1089 return ret;
1090 }
1091
1092 static int __sched
__mutex_lock(struct mutex * lock,long state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip)1093 __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1094 struct lockdep_map *nest_lock, unsigned long ip)
1095 {
1096 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1097 }
1098
1099 static int __sched
__ww_mutex_lock(struct mutex * lock,long state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip,struct ww_acquire_ctx * ww_ctx)1100 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1101 struct lockdep_map *nest_lock, unsigned long ip,
1102 struct ww_acquire_ctx *ww_ctx)
1103 {
1104 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1105 }
1106
1107 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1108 void __sched
mutex_lock_nested(struct mutex * lock,unsigned int subclass)1109 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1110 {
1111 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1112 }
1113
1114 EXPORT_SYMBOL_GPL(mutex_lock_nested);
1115
1116 void __sched
_mutex_lock_nest_lock(struct mutex * lock,struct lockdep_map * nest)1117 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1118 {
1119 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1120 }
1121 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1122
1123 int __sched
mutex_lock_killable_nested(struct mutex * lock,unsigned int subclass)1124 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1125 {
1126 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1127 }
1128 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1129
1130 int __sched
mutex_lock_interruptible_nested(struct mutex * lock,unsigned int subclass)1131 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1132 {
1133 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1134 }
1135 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1136
1137 void __sched
mutex_lock_io_nested(struct mutex * lock,unsigned int subclass)1138 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1139 {
1140 int token;
1141
1142 might_sleep();
1143
1144 token = io_schedule_prepare();
1145 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1146 subclass, NULL, _RET_IP_, NULL, 0);
1147 io_schedule_finish(token);
1148 }
1149 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1150
1151 static inline int
ww_mutex_deadlock_injection(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1152 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1153 {
1154 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1155 unsigned tmp;
1156
1157 if (ctx->deadlock_inject_countdown-- == 0) {
1158 tmp = ctx->deadlock_inject_interval;
1159 if (tmp > UINT_MAX/4)
1160 tmp = UINT_MAX;
1161 else
1162 tmp = tmp*2 + tmp + tmp/2;
1163
1164 ctx->deadlock_inject_interval = tmp;
1165 ctx->deadlock_inject_countdown = tmp;
1166 ctx->contending_lock = lock;
1167
1168 ww_mutex_unlock(lock);
1169
1170 return -EDEADLK;
1171 }
1172 #endif
1173
1174 return 0;
1175 }
1176
1177 int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1178 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1179 {
1180 int ret;
1181
1182 might_sleep();
1183 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1184 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1185 ctx);
1186 if (!ret && ctx && ctx->acquired > 1)
1187 return ww_mutex_deadlock_injection(lock, ctx);
1188
1189 return ret;
1190 }
1191 EXPORT_SYMBOL_GPL(ww_mutex_lock);
1192
1193 int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1194 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1195 {
1196 int ret;
1197
1198 might_sleep();
1199 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1200 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1201 ctx);
1202
1203 if (!ret && ctx && ctx->acquired > 1)
1204 return ww_mutex_deadlock_injection(lock, ctx);
1205
1206 return ret;
1207 }
1208 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1209
1210 #endif
1211
1212 /*
1213 * Release the lock, slowpath:
1214 */
__mutex_unlock_slowpath(struct mutex * lock,unsigned long ip)1215 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1216 {
1217 struct task_struct *next = NULL;
1218 DEFINE_WAKE_Q(wake_q);
1219 unsigned long owner;
1220
1221 mutex_release(&lock->dep_map, ip);
1222
1223 /*
1224 * Release the lock before (potentially) taking the spinlock such that
1225 * other contenders can get on with things ASAP.
1226 *
1227 * Except when HANDOFF, in that case we must not clear the owner field,
1228 * but instead set it to the top waiter.
1229 */
1230 owner = atomic_long_read(&lock->owner);
1231 for (;;) {
1232 unsigned long old;
1233
1234 #ifdef CONFIG_DEBUG_MUTEXES
1235 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1236 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1237 #endif
1238
1239 if (owner & MUTEX_FLAG_HANDOFF)
1240 break;
1241
1242 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1243 __owner_flags(owner));
1244 if (old == owner) {
1245 if (owner & MUTEX_FLAG_WAITERS)
1246 break;
1247
1248 return;
1249 }
1250
1251 owner = old;
1252 }
1253
1254 spin_lock(&lock->wait_lock);
1255 debug_mutex_unlock(lock);
1256 if (!list_empty(&lock->wait_list)) {
1257 /* get the first entry from the wait-list: */
1258 struct mutex_waiter *waiter =
1259 list_first_entry(&lock->wait_list,
1260 struct mutex_waiter, list);
1261
1262 next = waiter->task;
1263
1264 debug_mutex_wake_waiter(lock, waiter);
1265 wake_q_add(&wake_q, next);
1266 }
1267
1268 if (owner & MUTEX_FLAG_HANDOFF)
1269 __mutex_handoff(lock, next);
1270
1271 spin_unlock(&lock->wait_lock);
1272
1273 wake_up_q(&wake_q);
1274 }
1275
1276 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1277 /*
1278 * Here come the less common (and hence less performance-critical) APIs:
1279 * mutex_lock_interruptible() and mutex_trylock().
1280 */
1281 static noinline int __sched
1282 __mutex_lock_killable_slowpath(struct mutex *lock);
1283
1284 static noinline int __sched
1285 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1286
1287 /**
1288 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1289 * @lock: The mutex to be acquired.
1290 *
1291 * Lock the mutex like mutex_lock(). If a signal is delivered while the
1292 * process is sleeping, this function will return without acquiring the
1293 * mutex.
1294 *
1295 * Context: Process context.
1296 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1297 * signal arrived.
1298 */
mutex_lock_interruptible(struct mutex * lock)1299 int __sched mutex_lock_interruptible(struct mutex *lock)
1300 {
1301 might_sleep();
1302
1303 if (__mutex_trylock_fast(lock))
1304 return 0;
1305
1306 return __mutex_lock_interruptible_slowpath(lock);
1307 }
1308
1309 EXPORT_SYMBOL(mutex_lock_interruptible);
1310
1311 /**
1312 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1313 * @lock: The mutex to be acquired.
1314 *
1315 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
1316 * the current process is delivered while the process is sleeping, this
1317 * function will return without acquiring the mutex.
1318 *
1319 * Context: Process context.
1320 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1321 * fatal signal arrived.
1322 */
mutex_lock_killable(struct mutex * lock)1323 int __sched mutex_lock_killable(struct mutex *lock)
1324 {
1325 might_sleep();
1326
1327 if (__mutex_trylock_fast(lock))
1328 return 0;
1329
1330 return __mutex_lock_killable_slowpath(lock);
1331 }
1332 EXPORT_SYMBOL(mutex_lock_killable);
1333
1334 /**
1335 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1336 * @lock: The mutex to be acquired.
1337 *
1338 * Lock the mutex like mutex_lock(). While the task is waiting for this
1339 * mutex, it will be accounted as being in the IO wait state by the
1340 * scheduler.
1341 *
1342 * Context: Process context.
1343 */
mutex_lock_io(struct mutex * lock)1344 void __sched mutex_lock_io(struct mutex *lock)
1345 {
1346 int token;
1347
1348 token = io_schedule_prepare();
1349 mutex_lock(lock);
1350 io_schedule_finish(token);
1351 }
1352 EXPORT_SYMBOL_GPL(mutex_lock_io);
1353
1354 static noinline void __sched
__mutex_lock_slowpath(struct mutex * lock)1355 __mutex_lock_slowpath(struct mutex *lock)
1356 {
1357 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1358 }
1359
1360 static noinline int __sched
__mutex_lock_killable_slowpath(struct mutex * lock)1361 __mutex_lock_killable_slowpath(struct mutex *lock)
1362 {
1363 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1364 }
1365
1366 static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex * lock)1367 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1368 {
1369 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1370 }
1371
1372 static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1373 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1374 {
1375 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1376 _RET_IP_, ctx);
1377 }
1378
1379 static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1380 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1381 struct ww_acquire_ctx *ctx)
1382 {
1383 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1384 _RET_IP_, ctx);
1385 }
1386
1387 #endif
1388
1389 /**
1390 * mutex_trylock - try to acquire the mutex, without waiting
1391 * @lock: the mutex to be acquired
1392 *
1393 * Try to acquire the mutex atomically. Returns 1 if the mutex
1394 * has been acquired successfully, and 0 on contention.
1395 *
1396 * NOTE: this function follows the spin_trylock() convention, so
1397 * it is negated from the down_trylock() return values! Be careful
1398 * about this when converting semaphore users to mutexes.
1399 *
1400 * This function must not be used in interrupt context. The
1401 * mutex must be released by the same task that acquired it.
1402 */
mutex_trylock(struct mutex * lock)1403 int __sched mutex_trylock(struct mutex *lock)
1404 {
1405 bool locked;
1406
1407 #ifdef CONFIG_DEBUG_MUTEXES
1408 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1409 #endif
1410
1411 locked = __mutex_trylock(lock);
1412 if (locked)
1413 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1414
1415 return locked;
1416 }
1417 EXPORT_SYMBOL(mutex_trylock);
1418
1419 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1420 int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1421 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1422 {
1423 might_sleep();
1424
1425 if (__mutex_trylock_fast(&lock->base)) {
1426 if (ctx)
1427 ww_mutex_set_context_fastpath(lock, ctx);
1428 return 0;
1429 }
1430
1431 return __ww_mutex_lock_slowpath(lock, ctx);
1432 }
1433 EXPORT_SYMBOL(ww_mutex_lock);
1434
1435 int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1436 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1437 {
1438 might_sleep();
1439
1440 if (__mutex_trylock_fast(&lock->base)) {
1441 if (ctx)
1442 ww_mutex_set_context_fastpath(lock, ctx);
1443 return 0;
1444 }
1445
1446 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1447 }
1448 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1449
1450 #endif
1451
1452 /**
1453 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1454 * @cnt: the atomic which we are to dec
1455 * @lock: the mutex to return holding if we dec to 0
1456 *
1457 * return true and hold lock if we dec to 0, return false otherwise
1458 */
atomic_dec_and_mutex_lock(atomic_t * cnt,struct mutex * lock)1459 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1460 {
1461 /* dec if we can't possibly hit 0 */
1462 if (atomic_add_unless(cnt, -1, 1))
1463 return 0;
1464 /* we might hit 0, so take the lock */
1465 mutex_lock(lock);
1466 if (!atomic_dec_and_test(cnt)) {
1467 /* when we actually did the dec, we didn't hit 0 */
1468 mutex_unlock(lock);
1469 return 0;
1470 }
1471 /* we hit 0, and we hold the lock */
1472 return 1;
1473 }
1474 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1475