xref: /dragonfly/sys/kern/kern_mutex.c (revision 82730a9c)
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * Implement fast persistent locks based on atomic_cmpset_int() with
36  * semantics similar to lockmgr locks but faster and taking up much less
37  * space.  Taken from HAMMER's lock implementation.
38  *
39  * These are meant to complement our LWKT tokens.  Tokens are only held
40  * while the thread is running.  Mutexes can be held across blocking
41  * conditions.
42  *
43  * Most of the support is in sys/mutex[2].h.  We mostly provide backoff
44  * functions here.
45  */
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/thread.h>
52 
53 #include <machine/cpufunc.h>
54 
55 #include <sys/thread2.h>
56 #include <sys/mutex2.h>
57 
58 static __int64_t mtx_contention_count;
59 static __int64_t mtx_collision_count;
60 static __int64_t mtx_wakeup_count;
61 
62 SYSCTL_QUAD(_kern, OID_AUTO, mtx_contention_count, CTLFLAG_RW,
63 	    &mtx_contention_count, 0, "");
64 SYSCTL_QUAD(_kern, OID_AUTO, mtx_collision_count, CTLFLAG_RW,
65 	    &mtx_collision_count, 0, "");
66 SYSCTL_QUAD(_kern, OID_AUTO, mtx_wakeup_count, CTLFLAG_RW,
67 	    &mtx_wakeup_count, 0, "");
68 
69 static void mtx_chain_link(mtx_t mtx);
70 static void mtx_delete_link(mtx_t mtx, mtx_link_t link);
71 
72 /*
73  * Exclusive-lock a mutex, block until acquired.  Recursion is allowed.
74  *
75  * Returns 0 on success, or the tsleep() return code on failure.
76  * An error can only be returned if PCATCH is specified in the flags.
77  */
78 static __inline int
79 __mtx_lock_ex(mtx_t mtx, mtx_link_t link, const char *ident, int flags, int to)
80 {
81 	u_int	lock;
82 	u_int	nlock;
83 	int	error;
84 
85 	for (;;) {
86 		lock = mtx->mtx_lock;
87 		if (lock == 0) {
88 			nlock = MTX_EXCLUSIVE | 1;
89 			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
90 				mtx->mtx_owner = curthread;
91 				error = 0;
92 				break;
93 			}
94 		} else if ((lock & MTX_EXCLUSIVE) &&
95 			   mtx->mtx_owner == curthread) {
96 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
97 			nlock = lock + 1;
98 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
99 				error = 0;
100 				break;
101 			}
102 		} else {
103 			/*
104 			 * Clearing MTX_EXLINK in lock causes us to loop until
105 			 * MTX_EXLINK is available.  However, to avoid
106 			 * unnecessary cpu cache traffic we poll instead.
107 			 *
108 			 * Setting MTX_EXLINK in nlock causes us to loop until
109 			 * we can acquire MTX_EXLINK.
110 			 *
111 			 * Also set MTX_EXWANTED coincident with EXLINK, if
112 			 * not already set.
113 			 */
114 			thread_t td;
115 
116 			if (lock & MTX_EXLINK) {
117 				cpu_pause();
118 				++mtx_collision_count;
119 				continue;
120 			}
121 			td = curthread;
122 			/*lock &= ~MTX_EXLINK;*/
123 			nlock = lock | MTX_EXWANTED | MTX_EXLINK;
124 			++td->td_critcount;
125 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
126 				/*
127 				 * Check for early abort
128 				 */
129 				if (link->state == MTX_LINK_ABORTED) {
130 					atomic_clear_int(&mtx->mtx_lock,
131 							 MTX_EXLINK);
132 					--td->td_critcount;
133 					error = ENOLCK;
134 					if (mtx->mtx_link == NULL) {
135 						atomic_clear_int(&mtx->mtx_lock,
136 								 MTX_EXWANTED);
137 					}
138 					break;
139 				}
140 
141 				/*
142 				 * Success.  Link in our structure then
143 				 * release EXLINK and sleep.
144 				 */
145 				link->owner = td;
146 				link->state = MTX_LINK_LINKED;
147 				if (mtx->mtx_link) {
148 					link->next = mtx->mtx_link;
149 					link->prev = link->next->prev;
150 					link->next->prev = link;
151 					link->prev->next = link;
152 				} else {
153 					link->next = link;
154 					link->prev = link;
155 					mtx->mtx_link = link;
156 				}
157 				tsleep_interlock(link, 0);
158 				atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
159 				--td->td_critcount;
160 
161 				mycpu->gd_cnt.v_lock_name[0] = 'X';
162 				strncpy(mycpu->gd_cnt.v_lock_name + 1,
163 					ident,
164 					sizeof(mycpu->gd_cnt.v_lock_name) - 2);
165 				++mycpu->gd_cnt.v_lock_colls;
166 
167 				error = tsleep(link, flags | PINTERLOCKED,
168 					       ident, to);
169 				++mtx_contention_count;
170 
171 				/*
172 				 * Normal unlink, we should own the exclusive
173 				 * lock now.
174 				 */
175 				if (link->state == MTX_LINK_LINKED)
176 					mtx_delete_link(mtx, link);
177 				if (link->state == MTX_LINK_ACQUIRED) {
178 					KKASSERT(mtx->mtx_owner == link->owner);
179 					error = 0;
180 					break;
181 				}
182 
183 				/*
184 				 * Aborted lock (mtx_abort_ex called).
185 				 */
186 				if (link->state == MTX_LINK_ABORTED) {
187 					error = ENOLCK;
188 					break;
189 				}
190 
191 				/*
192 				 * tsleep error, else retry.
193 				 */
194 				if (error)
195 					break;
196 			} else {
197 				--td->td_critcount;
198 			}
199 		}
200 		++mtx_collision_count;
201 	}
202 	return (error);
203 }
204 
205 int
206 _mtx_lock_ex_link(mtx_t mtx, mtx_link_t link,
207 		  const char *ident, int flags, int to)
208 {
209 	return(__mtx_lock_ex(mtx, link, ident, flags, to));
210 }
211 
212 int
213 _mtx_lock_ex(mtx_t mtx, const char *ident, int flags, int to)
214 {
215 	struct mtx_link link;
216 
217 	mtx_link_init(&link);
218 	return(__mtx_lock_ex(mtx, &link, ident, flags, to));
219 }
220 
221 int
222 _mtx_lock_ex_quick(mtx_t mtx, const char *ident)
223 {
224 	struct mtx_link link;
225 
226 	mtx_link_init(&link);
227 	return(__mtx_lock_ex(mtx, &link, ident, 0, 0));
228 }
229 
230 /*
231  * Share-lock a mutex, block until acquired.  Recursion is allowed.
232  *
233  * Returns 0 on success, or the tsleep() return code on failure.
234  * An error can only be returned if PCATCH is specified in the flags.
235  *
236  * NOTE: Shared locks get a mass-wakeup so if the tsleep fails we
237  *	 do not have to chain the wakeup().
238  */
239 static __inline int
240 __mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
241 {
242 	u_int	lock;
243 	u_int	nlock;
244 	int	error;
245 
246 	for (;;) {
247 		lock = mtx->mtx_lock;
248 		if ((lock & MTX_EXCLUSIVE) == 0) {
249 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
250 			nlock = lock + 1;
251 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
252 				error = 0;
253 				break;
254 			}
255 		} else {
256 			nlock = lock | MTX_SHWANTED;
257 			tsleep_interlock(mtx, 0);
258 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
259 
260 				mycpu->gd_cnt.v_lock_name[0] = 'S';
261 				strncpy(mycpu->gd_cnt.v_lock_name + 1,
262 					ident,
263 					sizeof(mycpu->gd_cnt.v_lock_name) - 2);
264 				++mycpu->gd_cnt.v_lock_colls;
265 
266 				error = tsleep(mtx, flags | PINTERLOCKED,
267 					       ident, to);
268 				if (error)
269 					break;
270 				++mtx_contention_count;
271 				/* retry */
272 			} else {
273 				crit_enter();
274 				tsleep_remove(curthread);
275 				crit_exit();
276 			}
277 		}
278 		++mtx_collision_count;
279 	}
280 	return (error);
281 }
282 
283 int
284 _mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
285 {
286 	return (__mtx_lock_sh(mtx, ident, flags, to));
287 }
288 
289 int
290 _mtx_lock_sh_quick(mtx_t mtx, const char *ident)
291 {
292 	return (__mtx_lock_sh(mtx, ident, 0, 0));
293 }
294 
295 /*
296  * Get an exclusive spinlock the hard way.
297  */
298 void
299 _mtx_spinlock(mtx_t mtx)
300 {
301 	u_int	lock;
302 	u_int	nlock;
303 	int	bb = 1;
304 	int	bo;
305 
306 	for (;;) {
307 		lock = mtx->mtx_lock;
308 		if (lock == 0) {
309 			nlock = MTX_EXCLUSIVE | 1;
310 			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
311 				mtx->mtx_owner = curthread;
312 				break;
313 			}
314 		} else if ((lock & MTX_EXCLUSIVE) &&
315 			   mtx->mtx_owner == curthread) {
316 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
317 			nlock = lock + 1;
318 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
319 				break;
320 		} else {
321 			/* MWAIT here */
322 			if (bb < 1000)
323 				++bb;
324 			cpu_pause();
325 			for (bo = 0; bo < bb; ++bo)
326 				;
327 			++mtx_contention_count;
328 		}
329 		cpu_pause();
330 		++mtx_collision_count;
331 	}
332 }
333 
334 /*
335  * Attempt to acquire a spinlock, if we fail we must undo the
336  * gd->gd_spinlocks/gd->gd_curthead->td_critcount predisposition.
337  *
338  * Returns 0 on success, EAGAIN on failure.
339  */
340 int
341 _mtx_spinlock_try(mtx_t mtx)
342 {
343 	globaldata_t gd = mycpu;
344 	u_int	lock;
345 	u_int	nlock;
346 	int	res = 0;
347 
348 	for (;;) {
349 		lock = mtx->mtx_lock;
350 		if (lock == 0) {
351 			nlock = MTX_EXCLUSIVE | 1;
352 			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
353 				mtx->mtx_owner = gd->gd_curthread;
354 				break;
355 			}
356 		} else if ((lock & MTX_EXCLUSIVE) &&
357 			   mtx->mtx_owner == gd->gd_curthread) {
358 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
359 			nlock = lock + 1;
360 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
361 				break;
362 		} else {
363 			--gd->gd_spinlocks;
364 			cpu_ccfence();
365 			--gd->gd_curthread->td_critcount;
366 			res = EAGAIN;
367 			break;
368 		}
369 		cpu_pause();
370 		++mtx_collision_count;
371 	}
372 	return res;
373 }
374 
375 #if 0
376 
377 void
378 _mtx_spinlock_sh(mtx_t mtx)
379 {
380 	u_int	lock;
381 	u_int	nlock;
382 	int	bb = 1;
383 	int	bo;
384 
385 	for (;;) {
386 		lock = mtx->mtx_lock;
387 		if ((lock & MTX_EXCLUSIVE) == 0) {
388 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
389 			nlock = lock + 1;
390 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
391 				break;
392 		} else {
393 			/* MWAIT here */
394 			if (bb < 1000)
395 				++bb;
396 			cpu_pause();
397 			for (bo = 0; bo < bb; ++bo)
398 				;
399 			++mtx_contention_count;
400 		}
401 		cpu_pause();
402 		++mtx_collision_count;
403 	}
404 }
405 
406 #endif
407 
408 int
409 _mtx_lock_ex_try(mtx_t mtx)
410 {
411 	u_int	lock;
412 	u_int	nlock;
413 	int	error = 0;
414 
415 	for (;;) {
416 		lock = mtx->mtx_lock;
417 		if (lock == 0) {
418 			nlock = MTX_EXCLUSIVE | 1;
419 			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
420 				mtx->mtx_owner = curthread;
421 				break;
422 			}
423 		} else if ((lock & MTX_EXCLUSIVE) &&
424 			   mtx->mtx_owner == curthread) {
425 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
426 			nlock = lock + 1;
427 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
428 				break;
429 		} else {
430 			error = EAGAIN;
431 			break;
432 		}
433 		cpu_pause();
434 		++mtx_collision_count;
435 	}
436 	return (error);
437 }
438 
439 int
440 _mtx_lock_sh_try(mtx_t mtx)
441 {
442 	u_int	lock;
443 	u_int	nlock;
444 	int	error = 0;
445 
446 	for (;;) {
447 		lock = mtx->mtx_lock;
448 		if ((lock & MTX_EXCLUSIVE) == 0) {
449 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
450 			nlock = lock + 1;
451 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
452 				break;
453 		} else {
454 			error = EAGAIN;
455 			break;
456 		}
457 		cpu_pause();
458 		++mtx_collision_count;
459 	}
460 	return (error);
461 }
462 
463 /*
464  * If the lock is held exclusively it must be owned by the caller.  If the
465  * lock is already a shared lock this operation is a NOP.  A panic will
466  * occur if the lock is not held either shared or exclusive.
467  *
468  * The exclusive count is converted to a shared count.
469  */
470 void
471 _mtx_downgrade(mtx_t mtx)
472 {
473 	u_int	lock;
474 	u_int	nlock;
475 
476 	for (;;) {
477 		lock = mtx->mtx_lock;
478 		if ((lock & MTX_EXCLUSIVE) == 0) {
479 			KKASSERT((lock & MTX_MASK) > 0);
480 			break;
481 		}
482 		KKASSERT(mtx->mtx_owner == curthread);
483 		nlock = lock & ~(MTX_EXCLUSIVE | MTX_SHWANTED);
484 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
485 			if (lock & MTX_SHWANTED) {
486 				wakeup(mtx);
487 				++mtx_wakeup_count;
488 			}
489 			break;
490 		}
491 		cpu_pause();
492 		++mtx_collision_count;
493 	}
494 }
495 
496 /*
497  * Upgrade a shared lock to an exclusive lock.  The upgrade will fail if
498  * the shared lock has a count other then 1.  Optimize the most likely case
499  * but note that a single cmpset can fail due to WANTED races.
500  *
501  * If the lock is held exclusively it must be owned by the caller and
502  * this function will simply return without doing anything.   A panic will
503  * occur if the lock is held exclusively by someone other then the caller.
504  *
505  * Returns 0 on success, EDEADLK on failure.
506  */
507 int
508 _mtx_upgrade_try(mtx_t mtx)
509 {
510 	u_int	lock;
511 	u_int	nlock;
512 	int	error = 0;
513 
514 	for (;;) {
515 		lock = mtx->mtx_lock;
516 
517 		if ((lock & ~MTX_EXWANTED) == 1) {
518 			nlock = lock | MTX_EXCLUSIVE;
519 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
520 				mtx->mtx_owner = curthread;
521 				break;
522 			}
523 		} else if (lock & MTX_EXCLUSIVE) {
524 			KKASSERT(mtx->mtx_owner == curthread);
525 			break;
526 		} else {
527 			error = EDEADLK;
528 			break;
529 		}
530 		cpu_pause();
531 		++mtx_collision_count;
532 	}
533 	return (error);
534 }
535 
536 /*
537  * Unlock a lock.  The caller must hold the lock either shared or exclusive.
538  *
539  * Any release which makes the lock available when others want an exclusive
540  * lock causes us to chain the owner to the next exclusive lock instead of
541  * releasing the lock.
542  */
543 void
544 _mtx_unlock(mtx_t mtx)
545 {
546 	u_int	lock;
547 	u_int	nlock;
548 
549 	for (;;) {
550 		lock = mtx->mtx_lock;
551 		nlock = lock & ~(MTX_SHWANTED | MTX_EXLINK);
552 
553 		if (nlock == 1) {
554 			/*
555 			 * Last release, shared lock, no exclusive waiters.
556 			 */
557 			nlock = lock & MTX_EXLINK;
558 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
559 				break;
560 		} else if (nlock == (MTX_EXCLUSIVE | 1)) {
561 			/*
562 			 * Last release, exclusive lock, no exclusive waiters.
563 			 * Wake up any shared waiters.
564 			 */
565 			mtx->mtx_owner = NULL;
566 			nlock = lock & MTX_EXLINK;
567 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
568 				if (lock & MTX_SHWANTED) {
569 					wakeup(mtx);
570 					++mtx_wakeup_count;
571 				}
572 				break;
573 			}
574 		} else if (nlock == (MTX_EXWANTED | 1)) {
575 			/*
576 			 * Last release, shared lock, with exclusive
577 			 * waiters.
578 			 *
579 			 * Wait for EXLINK to clear, then acquire it.
580 			 * We could use the cmpset for this but polling
581 			 * is better on the cpu caches.
582 			 *
583 			 * Acquire an exclusive lock leaving the lockcount
584 			 * set to 1, and get EXLINK for access to mtx_link.
585 			 */
586 			thread_t td;
587 
588 			if (lock & MTX_EXLINK) {
589 				cpu_pause();
590 				++mtx_collision_count;
591 				continue;
592 			}
593 			td = curthread;
594 			/*lock &= ~MTX_EXLINK;*/
595 			nlock |= MTX_EXLINK | MTX_EXCLUSIVE;
596 			nlock |= (lock & MTX_SHWANTED);
597 			++td->td_critcount;
598 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
599 				mtx_chain_link(mtx);
600 				--td->td_critcount;
601 				break;
602 			}
603 			--td->td_critcount;
604 		} else if (nlock == (MTX_EXCLUSIVE | MTX_EXWANTED | 1)) {
605 			/*
606 			 * Last release, exclusive lock, with exclusive
607 			 * waiters.
608 			 *
609 			 * leave the exclusive lock intact and the lockcount
610 			 * set to 1, and get EXLINK for access to mtx_link.
611 			 */
612 			thread_t td;
613 
614 			if (lock & MTX_EXLINK) {
615 				cpu_pause();
616 				++mtx_collision_count;
617 				continue;
618 			}
619 			td = curthread;
620 			/*lock &= ~MTX_EXLINK;*/
621 			nlock |= MTX_EXLINK;
622 			nlock |= (lock & MTX_SHWANTED);
623 			++td->td_critcount;
624 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
625 				mtx_chain_link(mtx);
626 				--td->td_critcount;
627 				break;
628 			}
629 			--td->td_critcount;
630 		} else {
631 			/*
632 			 * Not the last release (shared or exclusive)
633 			 */
634 			nlock = lock - 1;
635 			KKASSERT((nlock & MTX_MASK) != MTX_MASK);
636 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
637 				break;
638 		}
639 		cpu_pause();
640 		++mtx_collision_count;
641 	}
642 }
643 
644 /*
645  * Chain mtx_chain_link.  Called with the lock held exclusively with a
646  * single ref count, and also with MTX_EXLINK held.
647  */
648 static void
649 mtx_chain_link(mtx_t mtx)
650 {
651 	mtx_link_t link;
652 	u_int	lock;
653 	u_int	nlock;
654 	u_int	clock;	/* bits we own and want to clear */
655 
656 	/*
657 	 * Chain the exclusive lock to the next link.  The caller cleared
658 	 * SHWANTED so if there is no link we have to wake up any shared
659 	 * waiters.
660 	 */
661 	clock = MTX_EXLINK;
662 	if ((link = mtx->mtx_link) != NULL) {
663 		KKASSERT(link->state == MTX_LINK_LINKED);
664 		if (link->next == link) {
665 			mtx->mtx_link = NULL;
666 			clock |= MTX_EXWANTED;
667 		} else {
668 			mtx->mtx_link = link->next;
669 			link->next->prev = link->prev;
670 			link->prev->next = link->next;
671 		}
672 		link->state = MTX_LINK_ACQUIRED;
673 		mtx->mtx_owner = link->owner;
674 	} else {
675 		/*
676 		 * Chain was empty, release the exclusive lock's last count
677 		 * as well the bits shown.
678 		 */
679 		clock |= MTX_EXCLUSIVE | MTX_EXWANTED | MTX_SHWANTED | 1;
680 	}
681 
682 	/*
683 	 * We have to uset cmpset here to deal with MTX_SHWANTED.  If
684 	 * we just clear the bits we can miss a wakeup or, worse,
685 	 * leave mtx_lock unlocked with MTX_SHWANTED still set.
686 	 */
687 	for (;;) {
688 		lock = mtx->mtx_lock;
689 		nlock = lock & ~clock;
690 
691 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
692 			if (link) {
693 				/*
694 				 * Wakeup new exclusive holder.  Leave
695 				 * SHWANTED intact.
696 				 */
697 				wakeup(link);
698 			} else if (lock & MTX_SHWANTED) {
699 				/*
700 				 * Signal any shared waiters (and we also
701 				 * clear SHWANTED).
702 				 */
703 				mtx->mtx_owner = NULL;
704 				wakeup(mtx);
705 				++mtx_wakeup_count;
706 			}
707 			break;
708 		}
709 		cpu_pause();
710 		++mtx_collision_count;
711 	}
712 }
713 
714 /*
715  * Delete a link structure after tsleep has failed.  This code is not
716  * in the critical path as most exclusive waits are chained.
717  */
718 static
719 void
720 mtx_delete_link(mtx_t mtx, mtx_link_t link)
721 {
722 	thread_t td = curthread;
723 	u_int	lock;
724 	u_int	nlock;
725 
726 	/*
727 	 * Acquire MTX_EXLINK.
728 	 *
729 	 * Do not use cmpxchg to wait for EXLINK to clear as this might
730 	 * result in too much cpu cache traffic.
731 	 */
732 	++td->td_critcount;
733 	for (;;) {
734 		lock = mtx->mtx_lock;
735 		if (lock & MTX_EXLINK) {
736 			cpu_pause();
737 			++mtx_collision_count;
738 			continue;
739 		}
740 		/* lock &= ~MTX_EXLINK; */
741 		nlock = lock | MTX_EXLINK;
742 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
743 			break;
744 		cpu_pause();
745 		++mtx_collision_count;
746 	}
747 
748 	/*
749 	 * Delete the link and release EXLINK.
750 	 */
751 	if (link->state == MTX_LINK_LINKED) {
752 		if (link->next == link) {
753 			mtx->mtx_link = NULL;
754 		} else {
755 			mtx->mtx_link = link->next;
756 			link->next->prev = link->prev;
757 			link->prev->next = link->next;
758 		}
759 		link->state = MTX_LINK_IDLE;
760 	}
761 	atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
762 	--td->td_critcount;
763 }
764 
765 /*
766  * Abort a mutex locking operation, causing mtx_lock_ex_link() to
767  * return ENOLCK.  This may be called at any time after the
768  * mtx_link is initialized, including both before and after the call
769  * to mtx_lock_ex_link().
770  */
771 void
772 mtx_abort_ex_link(mtx_t mtx, mtx_link_t link)
773 {
774 	thread_t td = curthread;
775 	u_int	lock;
776 	u_int	nlock;
777 
778 	/*
779 	 * Acquire MTX_EXLINK
780 	 */
781 	++td->td_critcount;
782 	for (;;) {
783 		lock = mtx->mtx_lock;
784 		if (lock & MTX_EXLINK) {
785 			cpu_pause();
786 			++mtx_collision_count;
787 			continue;
788 		}
789 		/* lock &= ~MTX_EXLINK; */
790 		nlock = lock | MTX_EXLINK;
791 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
792 			break;
793 		cpu_pause();
794 		++mtx_collision_count;
795 	}
796 
797 	/*
798 	 * Do the abort
799 	 */
800 	switch(link->state) {
801 	case MTX_LINK_IDLE:
802 		/*
803 		 * Link not started yet
804 		 */
805 		link->state = MTX_LINK_ABORTED;
806 		break;
807 	case MTX_LINK_LINKED:
808 		/*
809 		 * de-link, mark aborted, and wakeup the thread.
810 		 */
811 		if (link->next == link) {
812 			mtx->mtx_link = NULL;
813 		} else {
814 			mtx->mtx_link = link->next;
815 			link->next->prev = link->prev;
816 			link->prev->next = link->next;
817 		}
818 		link->state = MTX_LINK_ABORTED;
819 		wakeup(link);
820 		break;
821 	case MTX_LINK_ACQUIRED:
822 		/*
823 		 * Too late, the lock was acquired.  Let it complete.
824 		 */
825 		break;
826 	default:
827 		/*
828 		 * link already aborted, do nothing.
829 		 */
830 		break;
831 	}
832 	atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
833 	--td->td_critcount;
834 }
835