xref: /dragonfly/sys/kern/kern_mutex.c (revision 235099c3)
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * Implement fast persistent locks based on atomic_cmpset_int() with
36  * semantics similar to lockmgr locks but faster and taking up much less
37  * space.  Taken from HAMMER's lock implementation.
38  *
39  * These are meant to complement our LWKT tokens.  Tokens are only held
40  * while the thread is running.  Mutexes can be held across blocking
41  * conditions.
42  *
43  * Most of the support is in sys/mutex[2].h.  We mostly provide backoff
44  * functions here.
45  */
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/thread.h>
52 #include <sys/mutex.h>
53 
54 #include <machine/cpufunc.h>
55 
56 #include <sys/thread2.h>
57 #include <sys/mutex2.h>
58 
59 static __int64_t mtx_contention_count;
60 static __int64_t mtx_collision_count;
61 static __int64_t mtx_wakeup_count;
62 
63 SYSCTL_QUAD(_kern, OID_AUTO, mtx_contention_count, CTLFLAG_RW,
64 	    &mtx_contention_count, 0, "");
65 SYSCTL_QUAD(_kern, OID_AUTO, mtx_collision_count, CTLFLAG_RW,
66 	    &mtx_collision_count, 0, "");
67 SYSCTL_QUAD(_kern, OID_AUTO, mtx_wakeup_count, CTLFLAG_RW,
68 	    &mtx_wakeup_count, 0, "");
69 
70 static void mtx_chain_link(mtx_t mtx);
71 static void mtx_delete_link(mtx_t mtx, mtx_link_t link);
72 
73 /*
74  * Exclusive-lock a mutex, block until acquired.  Recursion is allowed.
75  *
76  * Returns 0 on success, or the tsleep() return code on failure.
77  * An error can only be returned if PCATCH is specified in the flags.
78  */
79 static __inline int
80 __mtx_lock_ex(mtx_t mtx, mtx_link_t link, const char *ident, int flags, int to)
81 {
82 	u_int	lock;
83 	u_int	nlock;
84 	int	error;
85 
86 	for (;;) {
87 		lock = mtx->mtx_lock;
88 		if (lock == 0) {
89 			nlock = MTX_EXCLUSIVE | 1;
90 			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
91 				mtx->mtx_owner = curthread;
92 				error = 0;
93 				break;
94 			}
95 		} else if ((lock & MTX_EXCLUSIVE) &&
96 			   mtx->mtx_owner == curthread) {
97 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
98 			nlock = lock + 1;
99 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
100 				error = 0;
101 				break;
102 			}
103 		} else {
104 			/*
105 			 * Clearing MTX_EXLINK in lock causes us to loop until
106 			 * MTX_EXLINK is available.  However, to avoid
107 			 * unnecessary cpu cache traffic we poll instead.
108 			 *
109 			 * Setting MTX_EXLINK in nlock causes us to loop until
110 			 * we can acquire MTX_EXLINK.
111 			 *
112 			 * Also set MTX_EXWANTED coincident with EXLINK, if
113 			 * not already set.
114 			 */
115 			if (lock & MTX_EXLINK) {
116 				cpu_pause();
117 				++mtx_collision_count;
118 				continue;
119 			}
120 			/*lock &= ~MTX_EXLINK;*/
121 			nlock = lock | MTX_EXWANTED | MTX_EXLINK;
122 			++mycpu->gd_spinlocks_wr;
123 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
124 				/*
125 				 * Check for early abort
126 				 */
127 				if (link->state == MTX_LINK_ABORTED) {
128 					atomic_clear_int(&mtx->mtx_lock,
129 							 MTX_EXLINK);
130 					--mycpu->gd_spinlocks_wr;
131 					error = ENOLCK;
132 					if (mtx->mtx_link == NULL) {
133 						atomic_clear_int(&mtx->mtx_lock,
134 								 MTX_EXWANTED);
135 					}
136 					break;
137 				}
138 
139 				/*
140 				 * Success.  Link in our structure then
141 				 * release EXLINK and sleep.
142 				 */
143 				link->owner = curthread;
144 				link->state = MTX_LINK_LINKED;
145 				if (mtx->mtx_link) {
146 					link->next = mtx->mtx_link;
147 					link->prev = link->next->prev;
148 					link->next->prev = link;
149 					link->prev->next = link;
150 				} else {
151 					link->next = link;
152 					link->prev = link;
153 					mtx->mtx_link = link;
154 				}
155 				tsleep_interlock(link, 0);
156 				atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
157 				--mycpu->gd_spinlocks_wr;
158 
159 				error = tsleep(link, flags, ident, to);
160 				++mtx_contention_count;
161 
162 				/*
163 				 * Normal unlink, we should own the exclusive
164 				 * lock now.
165 				 */
166 				if (link->state == MTX_LINK_LINKED)
167 					mtx_delete_link(mtx, link);
168 				if (link->state == MTX_LINK_ACQUIRED) {
169 					KKASSERT(mtx->mtx_owner == link->owner);
170 					error = 0;
171 					break;
172 				}
173 
174 				/*
175 				 * Aborted lock (mtx_abort_ex called).
176 				 */
177 				if (link->state == MTX_LINK_ABORTED) {
178 					error = ENOLCK;
179 					break;
180 				}
181 
182 				/*
183 				 * tsleep error, else retry.
184 				 */
185 				if (error)
186 					break;
187 			} else {
188 				--mycpu->gd_spinlocks_wr;
189 			}
190 		}
191 		++mtx_collision_count;
192 	}
193 	return (error);
194 }
195 
196 int
197 _mtx_lock_ex_link(mtx_t mtx, mtx_link_t link,
198 		  const char *ident, int flags, int to)
199 {
200 	return(__mtx_lock_ex(mtx, link, ident, flags, to));
201 }
202 
203 int
204 _mtx_lock_ex(mtx_t mtx, const char *ident, int flags, int to)
205 {
206 	struct mtx_link link;
207 
208 	mtx_link_init(&link);
209 	return(__mtx_lock_ex(mtx, &link, ident, flags, to));
210 }
211 
212 int
213 _mtx_lock_ex_quick(mtx_t mtx, const char *ident)
214 {
215 	struct mtx_link link;
216 
217 	mtx_link_init(&link);
218 	return(__mtx_lock_ex(mtx, &link, ident, 0, 0));
219 }
220 
221 /*
222  * Share-lock a mutex, block until acquired.  Recursion is allowed.
223  *
224  * Returns 0 on success, or the tsleep() return code on failure.
225  * An error can only be returned if PCATCH is specified in the flags.
226  *
227  * NOTE: Shared locks get a mass-wakeup so if the tsleep fails we
228  *	 do not have to chain the wakeup().
229  */
230 static __inline int
231 __mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
232 {
233 	u_int	lock;
234 	u_int	nlock;
235 	int	error;
236 
237 	for (;;) {
238 		lock = mtx->mtx_lock;
239 		if ((lock & MTX_EXCLUSIVE) == 0) {
240 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
241 			nlock = lock + 1;
242 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
243 				error = 0;
244 				break;
245 			}
246 		} else {
247 			nlock = lock | MTX_SHWANTED;
248 			tsleep_interlock(mtx, 0);
249 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
250 				error = tsleep(mtx, flags, ident, to);
251 				if (error)
252 					break;
253 				++mtx_contention_count;
254 				/* retry */
255 			} else {
256 				tsleep_remove(curthread);
257 			}
258 		}
259 		++mtx_collision_count;
260 	}
261 	return (error);
262 }
263 
264 int
265 _mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
266 {
267 	return (__mtx_lock_sh(mtx, ident, flags, to));
268 }
269 
270 int
271 _mtx_lock_sh_quick(mtx_t mtx, const char *ident)
272 {
273 	return (__mtx_lock_sh(mtx, ident, 0, 0));
274 }
275 
276 void
277 _mtx_spinlock_ex(mtx_t mtx)
278 {
279 	u_int	lock;
280 	u_int	nlock;
281 	int	bb = 1;
282 	int	bo;
283 
284 	for (;;) {
285 		lock = mtx->mtx_lock;
286 		if (lock == 0) {
287 			nlock = MTX_EXCLUSIVE | 1;
288 			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
289 				mtx->mtx_owner = curthread;
290 				break;
291 			}
292 		} else if ((lock & MTX_EXCLUSIVE) &&
293 			   mtx->mtx_owner == curthread) {
294 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
295 			nlock = lock + 1;
296 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
297 				break;
298 		} else {
299 			/* MWAIT here */
300 			if (bb < 1000)
301 				++bb;
302 			cpu_pause();
303 			for (bo = 0; bo < bb; ++bo)
304 				;
305 			++mtx_contention_count;
306 		}
307 		cpu_pause();
308 		++mtx_collision_count;
309 	}
310 }
311 
312 void
313 _mtx_spinlock_sh(mtx_t mtx)
314 {
315 	u_int	lock;
316 	u_int	nlock;
317 	int	bb = 1;
318 	int	bo;
319 
320 	for (;;) {
321 		lock = mtx->mtx_lock;
322 		if ((lock & MTX_EXCLUSIVE) == 0) {
323 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
324 			nlock = lock + 1;
325 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
326 				break;
327 		} else {
328 			/* MWAIT here */
329 			if (bb < 1000)
330 				++bb;
331 			cpu_pause();
332 			for (bo = 0; bo < bb; ++bo)
333 				;
334 			++mtx_contention_count;
335 		}
336 		cpu_pause();
337 		++mtx_collision_count;
338 	}
339 }
340 
341 int
342 _mtx_lock_ex_try(mtx_t mtx)
343 {
344 	u_int	lock;
345 	u_int	nlock;
346 	int	error = 0;
347 
348 	for (;;) {
349 		lock = mtx->mtx_lock;
350 		if (lock == 0) {
351 			nlock = MTX_EXCLUSIVE | 1;
352 			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
353 				mtx->mtx_owner = curthread;
354 				break;
355 			}
356 		} else if ((lock & MTX_EXCLUSIVE) &&
357 			   mtx->mtx_owner == curthread) {
358 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
359 			nlock = lock + 1;
360 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
361 				break;
362 		} else {
363 			error = EAGAIN;
364 			break;
365 		}
366 		cpu_pause();
367 		++mtx_collision_count;
368 	}
369 	return (error);
370 }
371 
372 int
373 _mtx_lock_sh_try(mtx_t mtx)
374 {
375 	u_int	lock;
376 	u_int	nlock;
377 	int	error = 0;
378 
379 	for (;;) {
380 		lock = mtx->mtx_lock;
381 		if ((lock & MTX_EXCLUSIVE) == 0) {
382 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
383 			nlock = lock + 1;
384 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
385 				break;
386 		} else {
387 			error = EAGAIN;
388 			break;
389 		}
390 		cpu_pause();
391 		++mtx_collision_count;
392 	}
393 	return (error);
394 }
395 
396 /*
397  * If the lock is held exclusively it must be owned by the caller.  If the
398  * lock is already a shared lock this operation is a NOP.  A panic will
399  * occur if the lock is not held either shared or exclusive.
400  *
401  * The exclusive count is converted to a shared count.
402  */
403 void
404 _mtx_downgrade(mtx_t mtx)
405 {
406 	u_int	lock;
407 	u_int	nlock;
408 
409 	for (;;) {
410 		lock = mtx->mtx_lock;
411 		if ((lock & MTX_EXCLUSIVE) == 0) {
412 			KKASSERT((lock & MTX_MASK) > 0);
413 			break;
414 		}
415 		KKASSERT(mtx->mtx_owner == curthread);
416 		nlock = lock & ~(MTX_EXCLUSIVE | MTX_SHWANTED);
417 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
418 			if (lock & MTX_SHWANTED) {
419 				wakeup(mtx);
420 				++mtx_wakeup_count;
421 			}
422 			break;
423 		}
424 		cpu_pause();
425 		++mtx_collision_count;
426 	}
427 }
428 
429 /*
430  * Upgrade a shared lock to an exclusive lock.  The upgrade will fail if
431  * the shared lock has a count other then 1.  Optimize the most likely case
432  * but note that a single cmpset can fail due to WANTED races.
433  *
434  * If the lock is held exclusively it must be owned by the caller and
435  * this function will simply return without doing anything.   A panic will
436  * occur if the lock is held exclusively by someone other then the caller.
437  *
438  * Returns 0 on success, EDEADLK on failure.
439  */
440 int
441 _mtx_upgrade_try(mtx_t mtx)
442 {
443 	u_int	lock;
444 	u_int	nlock;
445 	int	error = 0;
446 
447 	for (;;) {
448 		lock = mtx->mtx_lock;
449 
450 		if ((lock & ~MTX_EXWANTED) == 1) {
451 			nlock = lock | MTX_EXCLUSIVE;
452 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
453 				mtx->mtx_owner = curthread;
454 				break;
455 			}
456 		} else if (lock & MTX_EXCLUSIVE) {
457 			KKASSERT(mtx->mtx_owner == curthread);
458 			break;
459 		} else {
460 			error = EDEADLK;
461 			break;
462 		}
463 		cpu_pause();
464 		++mtx_collision_count;
465 	}
466 	return (error);
467 }
468 
469 /*
470  * Unlock a lock.  The caller must hold the lock either shared or exclusive.
471  *
472  * Any release which makes the lock available when others want an exclusive
473  * lock causes us to chain the owner to the next exclusive lock instead of
474  * releasing the lock.
475  */
476 void
477 _mtx_unlock(mtx_t mtx)
478 {
479 	u_int	lock;
480 	u_int	nlock;
481 
482 	for (;;) {
483 		lock = mtx->mtx_lock;
484 		nlock = lock & ~(MTX_SHWANTED | MTX_EXLINK);
485 
486 		if (nlock == 1) {
487 			/*
488 			 * Last release, shared lock, no exclusive waiters.
489 			 */
490 			nlock = lock & MTX_EXLINK;
491 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
492 				break;
493 		} else if (nlock == (MTX_EXCLUSIVE | 1)) {
494 			/*
495 			 * Last release, exclusive lock, no exclusive waiters.
496 			 * Wake up any shared waiters.
497 			 */
498 			mtx->mtx_owner = NULL;
499 			nlock = lock & MTX_EXLINK;
500 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
501 				if (lock & MTX_SHWANTED) {
502 					wakeup(mtx);
503 					++mtx_wakeup_count;
504 				}
505 				break;
506 			}
507 		} else if (nlock == (MTX_EXWANTED | 1)) {
508 			/*
509 			 * Last release, shared lock, with exclusive
510 			 * waiters.
511 			 *
512 			 * Wait for EXLINK to clear, then acquire it.
513 			 * We could use the cmpset for this but polling
514 			 * is better on the cpu caches.
515 			 *
516 			 * Acquire an exclusive lock leaving the lockcount
517 			 * set to 1, and get EXLINK for access to mtx_link.
518 			 */
519 			if (lock & MTX_EXLINK) {
520 				cpu_pause();
521 				++mtx_collision_count;
522 				continue;
523 			}
524 			/*lock &= ~MTX_EXLINK;*/
525 			nlock |= MTX_EXLINK | MTX_EXCLUSIVE;
526 			nlock |= (lock & MTX_SHWANTED);
527 			++mycpu->gd_spinlocks_wr;
528 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
529 				mtx_chain_link(mtx);
530 				--mycpu->gd_spinlocks_wr;
531 				break;
532 			}
533 			--mycpu->gd_spinlocks_wr;
534 		} else if (nlock == (MTX_EXCLUSIVE | MTX_EXWANTED | 1)) {
535 			/*
536 			 * Last release, exclusive lock, with exclusive
537 			 * waiters.
538 			 *
539 			 * leave the exclusive lock intact and the lockcount
540 			 * set to 1, and get EXLINK for access to mtx_link.
541 			 */
542 			if (lock & MTX_EXLINK) {
543 				cpu_pause();
544 				++mtx_collision_count;
545 				continue;
546 			}
547 			/*lock &= ~MTX_EXLINK;*/
548 			nlock |= MTX_EXLINK;
549 			nlock |= (lock & MTX_SHWANTED);
550 			++mycpu->gd_spinlocks_wr;
551 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
552 				mtx_chain_link(mtx);
553 				--mycpu->gd_spinlocks_wr;
554 				break;
555 			}
556 			--mycpu->gd_spinlocks_wr;
557 		} else {
558 			/*
559 			 * Not the last release (shared or exclusive)
560 			 */
561 			nlock = lock - 1;
562 			KKASSERT((nlock & MTX_MASK) != MTX_MASK);
563 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
564 				break;
565 		}
566 		cpu_pause();
567 		++mtx_collision_count;
568 	}
569 }
570 
571 /*
572  * Chain mtx_chain_link.  Called with the lock held exclusively with a
573  * single ref count, and also with MTX_EXLINK held.
574  */
575 static void
576 mtx_chain_link(mtx_t mtx)
577 {
578 	mtx_link_t link;
579 	u_int	lock;
580 	u_int	nlock;
581 	u_int	clock;	/* bits we own and want to clear */
582 
583 	/*
584 	 * Chain the exclusive lock to the next link.  The caller cleared
585 	 * SHWANTED so if there is no link we have to wake up any shared
586 	 * waiters.
587 	 */
588 	clock = MTX_EXLINK;
589 	if ((link = mtx->mtx_link) != NULL) {
590 		KKASSERT(link->state == MTX_LINK_LINKED);
591 		if (link->next == link) {
592 			mtx->mtx_link = NULL;
593 			clock |= MTX_EXWANTED;
594 		} else {
595 			mtx->mtx_link = link->next;
596 			link->next->prev = link->prev;
597 			link->prev->next = link->next;
598 		}
599 		link->state = MTX_LINK_ACQUIRED;
600 		mtx->mtx_owner = link->owner;
601 	} else {
602 		/*
603 		 * Chain was empty, release the exclusive lock's last count
604 		 * as well the bits shown.
605 		 */
606 		clock |= MTX_EXCLUSIVE | MTX_EXWANTED | MTX_SHWANTED | 1;
607 	}
608 
609 	/*
610 	 * We have to uset cmpset here to deal with MTX_SHWANTED.  If
611 	 * we just clear the bits we can miss a wakeup or, worse,
612 	 * leave mtx_lock unlocked with MTX_SHWANTED still set.
613 	 */
614 	for (;;) {
615 		lock = mtx->mtx_lock;
616 		nlock = lock & ~clock;
617 
618 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
619 			if (link) {
620 				/*
621 				 * Wakeup new exclusive holder.  Leave
622 				 * SHWANTED intact.
623 				 */
624 				wakeup(link);
625 			} else if (lock & MTX_SHWANTED) {
626 				/*
627 				 * Signal any shared waiters (and we also
628 				 * clear SHWANTED).
629 				 */
630 				mtx->mtx_owner = NULL;
631 				wakeup(mtx);
632 				++mtx_wakeup_count;
633 			}
634 			break;
635 		}
636 		cpu_pause();
637 		++mtx_collision_count;
638 	}
639 }
640 
641 /*
642  * Delete a link structure after tsleep has failed.  This code is not
643  * in the critical path as most exclusive waits are chained.
644  */
645 static
646 void
647 mtx_delete_link(mtx_t mtx, mtx_link_t link)
648 {
649 	u_int	lock;
650 	u_int	nlock;
651 
652 	/*
653 	 * Acquire MTX_EXLINK.
654 	 *
655 	 * Do not use cmpxchg to wait for EXLINK to clear as this might
656 	 * result in too much cpu cache traffic.
657 	 */
658 	++mycpu->gd_spinlocks_wr;
659 	for (;;) {
660 		lock = mtx->mtx_lock;
661 		if (lock & MTX_EXLINK) {
662 			cpu_pause();
663 			++mtx_collision_count;
664 			continue;
665 		}
666 		/* lock &= ~MTX_EXLINK; */
667 		nlock = lock | MTX_EXLINK;
668 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
669 			break;
670 		cpu_pause();
671 		++mtx_collision_count;
672 	}
673 
674 	/*
675 	 * Delete the link and release EXLINK.
676 	 */
677 	if (link->state == MTX_LINK_LINKED) {
678 		if (link->next == link) {
679 			mtx->mtx_link = NULL;
680 		} else {
681 			mtx->mtx_link = link->next;
682 			link->next->prev = link->prev;
683 			link->prev->next = link->next;
684 		}
685 		link->state = MTX_LINK_IDLE;
686 	}
687 	atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
688 	--mycpu->gd_spinlocks_wr;
689 }
690 
691 /*
692  * Abort a mutex locking operation, causing mtx_lock_ex_link() to
693  * return ENOLCK.  This may be called at any time after the
694  * mtx_link is initialized, including both before and after the call
695  * to mtx_lock_ex_link().
696  */
697 void
698 mtx_abort_ex_link(mtx_t mtx, mtx_link_t link)
699 {
700 	u_int	lock;
701 	u_int	nlock;
702 
703 	/*
704 	 * Acquire MTX_EXLINK
705 	 */
706 	++mycpu->gd_spinlocks_wr;
707 	for (;;) {
708 		lock = mtx->mtx_lock;
709 		if (lock & MTX_EXLINK) {
710 			cpu_pause();
711 			++mtx_collision_count;
712 			continue;
713 		}
714 		/* lock &= ~MTX_EXLINK; */
715 		nlock = lock | MTX_EXLINK;
716 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
717 			break;
718 		cpu_pause();
719 		++mtx_collision_count;
720 	}
721 
722 	/*
723 	 * Do the abort
724 	 */
725 	switch(link->state) {
726 	case MTX_LINK_IDLE:
727 		/*
728 		 * Link not started yet
729 		 */
730 		link->state = MTX_LINK_ABORTED;
731 		break;
732 	case MTX_LINK_LINKED:
733 		/*
734 		 * de-link, mark aborted, and wakeup the thread.
735 		 */
736 		if (link->next == link) {
737 			mtx->mtx_link = NULL;
738 		} else {
739 			mtx->mtx_link = link->next;
740 			link->next->prev = link->prev;
741 			link->prev->next = link->next;
742 		}
743 		link->state = MTX_LINK_ABORTED;
744 		wakeup(link);
745 		break;
746 	case MTX_LINK_ACQUIRED:
747 		/*
748 		 * Too late, the lock was acquired.  Let it complete.
749 		 */
750 		break;
751 	default:
752 		/*
753 		 * link already aborted, do nothing.
754 		 */
755 		break;
756 	}
757 	atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
758 	--mycpu->gd_spinlocks_wr;
759 }
760