1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Berkeley Software Design Inc's name may not be used to endorse or 15 * promote products derived from this software without specific prior 16 * written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $ 31 * $FreeBSD$ 32 */ 33 34 #ifndef _SYS_MUTEX_H_ 35 #define _SYS_MUTEX_H_ 36 37 #include <sys/queue.h> 38 #include <sys/_lock.h> 39 #include <sys/_mutex.h> 40 41 #ifdef _KERNEL 42 #include <sys/pcpu.h> 43 #include <sys/lock_profile.h> 44 #include <sys/lockstat.h> 45 #include <machine/atomic.h> 46 #include <machine/cpufunc.h> 47 48 /* 49 * Mutex types and options passed to mtx_init(). MTX_QUIET and MTX_DUPOK 50 * can also be passed in. 51 */ 52 #define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ 53 #define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ 54 #define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */ 55 #define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */ 56 #define MTX_NOPROFILE 0x00000020 /* Don't profile this lock */ 57 #define MTX_NEW 0x00000040 /* Don't check for double-init */ 58 59 /* 60 * Option flags passed to certain lock/unlock routines, through the use 61 * of corresponding mtx_{lock,unlock}_flags() interface macros. 62 */ 63 #define MTX_QUIET LOP_QUIET /* Don't log a mutex event */ 64 #define MTX_DUPOK LOP_DUPOK /* Don't log a duplicate acquire */ 65 66 /* 67 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, 68 * with the exception of MTX_UNOWNED, applies to spin locks. 69 */ 70 #define MTX_UNOWNED 0x00000000 /* Cookie for free mutex */ 71 #define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ 72 #define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ 73 #define MTX_DESTROYED 0x00000004 /* lock destroyed */ 74 #define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_DESTROYED) 75 76 /* 77 * Prototypes 78 * 79 * NOTE: Functions prepended with `_' (underscore) are exported to other parts 80 * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE 81 * and LOCK_LINE or for hiding the lock cookie crunching to the 82 * consumers. These functions should not be called directly by any 83 * code using the API. Their macros cover their functionality. 84 * Functions with a `_' suffix are the entrypoint for the common 85 * KPI covering both compat shims and fast path case. These can be 86 * used by consumers willing to pass options, file and line 87 * informations, in an option-independent way. 88 * 89 * [See below for descriptions] 90 * 91 */ 92 void _mtx_init(volatile uintptr_t *c, const char *name, const char *type, 93 int opts); 94 void _mtx_destroy(volatile uintptr_t *c); 95 void mtx_sysinit(void *arg); 96 int _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF); 97 int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, 98 int line); 99 void mutex_init(void); 100 #if LOCK_DEBUG > 0 101 void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, 102 const char *file, int line); 103 void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, 104 const char *file, int line); 105 #else 106 void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v); 107 void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v); 108 #endif 109 void mtx_wait_unlocked(struct mtx *m); 110 111 #ifdef SMP 112 #if LOCK_DEBUG > 0 113 void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts, 114 const char *file, int line); 115 #else 116 void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v); 117 #endif 118 #endif 119 void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, 120 int line); 121 void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, 122 int line); 123 void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 124 int line); 125 int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, 126 const char *file, int line); 127 void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, 128 const char *file, int line); 129 void mtx_spin_wait_unlocked(struct mtx *m); 130 131 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 132 void __mtx_assert(const volatile uintptr_t *c, int what, const char *file, 133 int line); 134 #endif 135 void thread_lock_flags_(struct thread *, int, const char *, int); 136 #if LOCK_DEBUG > 0 137 void _thread_lock(struct thread *td, int opts, const char *file, int line); 138 #else 139 void _thread_lock(struct thread *); 140 #endif 141 142 #if defined(LOCK_PROFILING) || (defined(KLD_MODULE) && !defined(KLD_TIED)) 143 #define thread_lock(tdp) \ 144 thread_lock_flags_((tdp), 0, __FILE__, __LINE__) 145 #elif LOCK_DEBUG > 0 146 #define thread_lock(tdp) \ 147 _thread_lock((tdp), 0, __FILE__, __LINE__) 148 #else 149 #define thread_lock(tdp) \ 150 _thread_lock((tdp)) 151 #endif 152 153 #if LOCK_DEBUG > 0 154 #define thread_lock_flags(tdp, opt) \ 155 thread_lock_flags_((tdp), (opt), __FILE__, __LINE__) 156 #else 157 #define thread_lock_flags(tdp, opt) \ 158 _thread_lock(tdp) 159 #endif 160 161 #define thread_unlock(tdp) \ 162 mtx_unlock_spin((tdp)->td_lock) 163 164 /* 165 * Top-level macros to provide lock cookie once the actual mtx is passed. 166 * They will also prevent passing a malformed object to the mtx KPI by 167 * failing compilation as the mtx_lock reserved member will not be found. 168 */ 169 #define mtx_init(m, n, t, o) \ 170 _mtx_init(&(m)->mtx_lock, n, t, o) 171 #define mtx_destroy(m) \ 172 _mtx_destroy(&(m)->mtx_lock) 173 #define mtx_trylock_flags_(m, o, f, l) \ 174 _mtx_trylock_flags_(&(m)->mtx_lock, o, f, l) 175 #if LOCK_DEBUG > 0 176 #define _mtx_lock_sleep(m, v, o, f, l) \ 177 __mtx_lock_sleep(&(m)->mtx_lock, v, o, f, l) 178 #define _mtx_unlock_sleep(m, v, o, f, l) \ 179 __mtx_unlock_sleep(&(m)->mtx_lock, v, o, f, l) 180 #else 181 #define _mtx_lock_sleep(m, v, o, f, l) \ 182 __mtx_lock_sleep(&(m)->mtx_lock, v) 183 #define _mtx_unlock_sleep(m, v, o, f, l) \ 184 __mtx_unlock_sleep(&(m)->mtx_lock, v) 185 #endif 186 #ifdef SMP 187 #if LOCK_DEBUG > 0 188 #define _mtx_lock_spin(m, v, o, f, l) \ 189 _mtx_lock_spin_cookie(&(m)->mtx_lock, v, o, f, l) 190 #else 191 #define _mtx_lock_spin(m, v, o, f, l) \ 192 _mtx_lock_spin_cookie(&(m)->mtx_lock, v) 193 #endif 194 #endif 195 #define _mtx_lock_flags(m, o, f, l) \ 196 __mtx_lock_flags(&(m)->mtx_lock, o, f, l) 197 #define _mtx_unlock_flags(m, o, f, l) \ 198 __mtx_unlock_flags(&(m)->mtx_lock, o, f, l) 199 #define _mtx_lock_spin_flags(m, o, f, l) \ 200 __mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l) 201 #define _mtx_trylock_spin_flags(m, o, f, l) \ 202 __mtx_trylock_spin_flags(&(m)->mtx_lock, o, f, l) 203 #define _mtx_unlock_spin_flags(m, o, f, l) \ 204 __mtx_unlock_spin_flags(&(m)->mtx_lock, o, f, l) 205 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 206 #define _mtx_assert(m, w, f, l) \ 207 __mtx_assert(&(m)->mtx_lock, w, f, l) 208 #endif 209 210 #define mtx_recurse lock_object.lo_data 211 212 /* Very simple operations on mtx_lock. */ 213 214 /* Try to obtain mtx_lock once. */ 215 #define _mtx_obtain_lock(mp, tid) \ 216 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) 217 218 #define _mtx_obtain_lock_fetch(mp, vp, tid) \ 219 atomic_fcmpset_acq_ptr(&(mp)->mtx_lock, vp, (tid)) 220 221 /* Try to release mtx_lock if it is unrecursed and uncontested. */ 222 #define _mtx_release_lock(mp, tid) \ 223 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED) 224 225 /* Release mtx_lock quickly, assuming we own it. */ 226 #define _mtx_release_lock_quick(mp) \ 227 atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED) 228 229 #define _mtx_release_lock_fetch(mp, vp) \ 230 atomic_fcmpset_rel_ptr(&(mp)->mtx_lock, (vp), MTX_UNOWNED) 231 232 /* 233 * Full lock operations that are suitable to be inlined in non-debug 234 * kernels. If the lock cannot be acquired or released trivially then 235 * the work is deferred to another function. 236 */ 237 238 /* Lock a normal mutex. */ 239 #define __mtx_lock(mp, tid, opts, file, line) __extension__ ({ \ 240 uintptr_t _tid = (uintptr_t)(tid); \ 241 uintptr_t _v = MTX_UNOWNED; \ 242 \ 243 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__acquire) ||\ 244 !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ 245 _mtx_lock_sleep((mp), _v, (opts), (file), (line)); \ 246 (void)0; /* ensure void type for expression */ \ 247 }) 248 249 /* 250 * Lock a spin mutex. For spinlocks, we handle recursion inline (it 251 * turns out that function calls can be significantly expensive on 252 * some architectures). Since spin locks are not _too_ common, 253 * inlining this code is not too big a deal. 254 */ 255 #ifdef SMP 256 #define __mtx_lock_spin(mp, tid, opts, file, line) __extension__ ({ \ 257 uintptr_t _tid = (uintptr_t)(tid); \ 258 uintptr_t _v = MTX_UNOWNED; \ 259 \ 260 spinlock_enter(); \ 261 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire) || \ 262 !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ 263 _mtx_lock_spin((mp), _v, (opts), (file), (line)); \ 264 (void)0; /* ensure void type for expression */ \ 265 }) 266 #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ 267 uintptr_t _tid = (uintptr_t)(tid); \ 268 int _ret; \ 269 \ 270 spinlock_enter(); \ 271 if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\ 272 spinlock_exit(); \ 273 _ret = 0; \ 274 } else { \ 275 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, \ 276 mp, 0, 0, file, line); \ 277 _ret = 1; \ 278 } \ 279 _ret; \ 280 }) 281 #else /* SMP */ 282 #define __mtx_lock_spin(mp, tid, opts, file, line) __extension__ ({ \ 283 uintptr_t _tid = (uintptr_t)(tid); \ 284 \ 285 spinlock_enter(); \ 286 if ((mp)->mtx_lock == _tid) \ 287 (mp)->mtx_recurse++; \ 288 else { \ 289 KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ 290 (mp)->mtx_lock = _tid; \ 291 } \ 292 (void)0; /* ensure void type for expression */ \ 293 }) 294 #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ 295 uintptr_t _tid = (uintptr_t)(tid); \ 296 int _ret; \ 297 \ 298 spinlock_enter(); \ 299 if ((mp)->mtx_lock != MTX_UNOWNED) { \ 300 spinlock_exit(); \ 301 _ret = 0; \ 302 } else { \ 303 (mp)->mtx_lock = _tid; \ 304 _ret = 1; \ 305 } \ 306 _ret; \ 307 }) 308 #endif /* SMP */ 309 310 /* Unlock a normal mutex. */ 311 #define __mtx_unlock(mp, tid, opts, file, line) __extension__ ({ \ 312 uintptr_t _v = (uintptr_t)(tid); \ 313 \ 314 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__release) ||\ 315 !_mtx_release_lock_fetch((mp), &_v))) \ 316 _mtx_unlock_sleep((mp), _v, (opts), (file), (line)); \ 317 (void)0; /* ensure void type for expression */ \ 318 }) 319 320 /* 321 * Unlock a spin mutex. For spinlocks, we can handle everything 322 * inline, as it's pretty simple and a function call would be too 323 * expensive (at least on some architectures). Since spin locks are 324 * not _too_ common, inlining this code is not too big a deal. 325 * 326 * Since we always perform a spinlock_enter() when attempting to acquire a 327 * spin lock, we need to always perform a matching spinlock_exit() when 328 * releasing a spin lock. This includes the recursion cases. 329 */ 330 #ifdef SMP 331 #define __mtx_unlock_spin(mp) __extension__ ({ \ 332 if (mtx_recursed((mp))) \ 333 (mp)->mtx_recurse--; \ 334 else { \ 335 LOCKSTAT_PROFILE_RELEASE_SPIN_LOCK(spin__release, mp); \ 336 _mtx_release_lock_quick((mp)); \ 337 } \ 338 spinlock_exit(); \ 339 }) 340 #else /* SMP */ 341 #define __mtx_unlock_spin(mp) __extension__ ({ \ 342 if (mtx_recursed((mp))) \ 343 (mp)->mtx_recurse--; \ 344 else { \ 345 LOCKSTAT_PROFILE_RELEASE_SPIN_LOCK(spin__release, mp); \ 346 (mp)->mtx_lock = MTX_UNOWNED; \ 347 } \ 348 spinlock_exit(); \ 349 }) 350 #endif /* SMP */ 351 352 /* 353 * Exported lock manipulation interface. 354 * 355 * mtx_lock(m) locks MTX_DEF mutex `m' 356 * 357 * mtx_lock_spin(m) locks MTX_SPIN mutex `m' 358 * 359 * mtx_unlock(m) unlocks MTX_DEF mutex `m' 360 * 361 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m' 362 * 363 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m' 364 * and passes option flags `opts' to the "hard" function, if required. 365 * With these routines, it is possible to pass flags such as MTX_QUIET 366 * to the appropriate lock manipulation routines. 367 * 368 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if 369 * it cannot. Rather, it returns 0 on failure and non-zero on success. 370 * It does NOT handle recursion as we assume that if a caller is properly 371 * using this part of the interface, he will know that the lock in question 372 * is _not_ recursed. 373 * 374 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts 375 * relevant option flags `opts.' 376 * 377 * mtx_trylock_spin(m) attempts to acquire MTX_SPIN mutex `m' but doesn't 378 * spin if it cannot. Rather, it returns 0 on failure and non-zero on 379 * success. It always returns failure for recursed lock attempts. 380 * 381 * mtx_initialized(m) returns non-zero if the lock `m' has been initialized. 382 * 383 * mtx_owned(m) returns non-zero if the current thread owns the lock `m' 384 * 385 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed. 386 */ 387 #define mtx_lock(m) mtx_lock_flags((m), 0) 388 #define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0) 389 #define mtx_trylock(m) mtx_trylock_flags((m), 0) 390 #define mtx_trylock_spin(m) mtx_trylock_spin_flags((m), 0) 391 #define mtx_unlock(m) mtx_unlock_flags((m), 0) 392 #define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0) 393 394 struct mtx_pool; 395 396 struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts); 397 void mtx_pool_destroy(struct mtx_pool **poolp); 398 struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr); 399 struct mtx *mtx_pool_alloc(struct mtx_pool *pool); 400 #define mtx_pool_lock(pool, ptr) \ 401 mtx_lock(mtx_pool_find((pool), (ptr))) 402 #define mtx_pool_lock_spin(pool, ptr) \ 403 mtx_lock_spin(mtx_pool_find((pool), (ptr))) 404 #define mtx_pool_unlock(pool, ptr) \ 405 mtx_unlock(mtx_pool_find((pool), (ptr))) 406 #define mtx_pool_unlock_spin(pool, ptr) \ 407 mtx_unlock_spin(mtx_pool_find((pool), (ptr))) 408 409 /* 410 * mtxpool_sleep is a general purpose pool of sleep mutexes. 411 */ 412 extern struct mtx_pool *mtxpool_sleep; 413 414 #ifndef LOCK_DEBUG 415 #error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h> 416 #endif 417 #if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE) 418 #define mtx_lock_flags_(m, opts, file, line) \ 419 _mtx_lock_flags((m), (opts), (file), (line)) 420 #define mtx_unlock_flags_(m, opts, file, line) \ 421 _mtx_unlock_flags((m), (opts), (file), (line)) 422 #define mtx_lock_spin_flags_(m, opts, file, line) \ 423 _mtx_lock_spin_flags((m), (opts), (file), (line)) 424 #define mtx_trylock_spin_flags_(m, opts, file, line) \ 425 _mtx_trylock_spin_flags((m), (opts), (file), (line)) 426 #define mtx_unlock_spin_flags_(m, opts, file, line) \ 427 _mtx_unlock_spin_flags((m), (opts), (file), (line)) 428 #else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */ 429 #define mtx_lock_flags_(m, opts, file, line) \ 430 __mtx_lock((m), curthread, (opts), (file), (line)) 431 #define mtx_unlock_flags_(m, opts, file, line) \ 432 __mtx_unlock((m), curthread, (opts), (file), (line)) 433 #define mtx_lock_spin_flags_(m, opts, file, line) \ 434 __mtx_lock_spin((m), curthread, (opts), (file), (line)) 435 #define mtx_trylock_spin_flags_(m, opts, file, line) \ 436 __mtx_trylock_spin((m), curthread, (opts), (file), (line)) 437 #define mtx_unlock_spin_flags_(m, opts, file, line) \ 438 __mtx_unlock_spin((m)) 439 #endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ 440 441 #ifdef INVARIANTS 442 #define mtx_assert_(m, what, file, line) \ 443 _mtx_assert((m), (what), (file), (line)) 444 445 #define GIANT_REQUIRED mtx_assert_(&Giant, MA_OWNED, __FILE__, __LINE__) 446 447 #else /* INVARIANTS */ 448 #define mtx_assert_(m, what, file, line) (void)0 449 #define GIANT_REQUIRED 450 #endif /* INVARIANTS */ 451 452 #define mtx_lock_flags(m, opts) \ 453 mtx_lock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 454 #define mtx_unlock_flags(m, opts) \ 455 mtx_unlock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 456 #define mtx_lock_spin_flags(m, opts) \ 457 mtx_lock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 458 #define mtx_unlock_spin_flags(m, opts) \ 459 mtx_unlock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 460 #define mtx_trylock_flags(m, opts) \ 461 mtx_trylock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 462 #define mtx_trylock_spin_flags(m, opts) \ 463 mtx_trylock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 464 #define mtx_assert(m, what) \ 465 mtx_assert_((m), (what), __FILE__, __LINE__) 466 467 #define mtx_sleep(chan, mtx, pri, wmesg, timo) \ 468 _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \ 469 tick_sbt * (timo), 0, C_HARDCLOCK) 470 471 #define MTX_READ_VALUE(m) ((m)->mtx_lock) 472 473 #define mtx_initialized(m) lock_initialized(&(m)->lock_object) 474 475 #define lv_mtx_owner(v) ((struct thread *)((v) & ~MTX_FLAGMASK)) 476 477 #define mtx_owner(m) lv_mtx_owner(MTX_READ_VALUE(m)) 478 479 #define mtx_owned(m) (mtx_owner(m) == curthread) 480 481 #define mtx_recursed(m) ((m)->mtx_recurse != 0) 482 483 #define mtx_name(m) ((m)->lock_object.lo_name) 484 485 /* 486 * Global locks. 487 */ 488 extern struct mtx Giant; 489 extern struct mtx blocked_lock; 490 491 /* 492 * Giant lock manipulation and clean exit macros. 493 * Used to replace return with an exit Giant and return. 494 * 495 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() 496 * The #ifndef is to allow lint-like tools to redefine DROP_GIANT. 497 */ 498 #ifndef DROP_GIANT 499 #define DROP_GIANT() \ 500 do { \ 501 int _giantcnt = 0; \ 502 WITNESS_SAVE_DECL(Giant); \ 503 \ 504 if (__predict_false(mtx_owned(&Giant))) { \ 505 WITNESS_SAVE(&Giant.lock_object, Giant); \ 506 for (_giantcnt = 0; mtx_owned(&Giant) && \ 507 !SCHEDULER_STOPPED(); _giantcnt++) \ 508 mtx_unlock(&Giant); \ 509 } 510 511 #define PICKUP_GIANT() \ 512 mtx_assert(&Giant, MA_NOTOWNED); \ 513 if (__predict_false(_giantcnt > 0)) { \ 514 while (_giantcnt--) \ 515 mtx_lock(&Giant); \ 516 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 517 } \ 518 } while (0) 519 #endif 520 521 struct mtx_args { 522 void *ma_mtx; 523 const char *ma_desc; 524 int ma_opts; 525 }; 526 527 #define MTX_SYSINIT(name, mtx, desc, opts) \ 528 static struct mtx_args name##_args = { \ 529 (mtx), \ 530 (desc), \ 531 (opts) \ 532 }; \ 533 SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 534 mtx_sysinit, &name##_args); \ 535 SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 536 _mtx_destroy, __DEVOLATILE(void *, &(mtx)->mtx_lock)) 537 538 /* 539 * The INVARIANTS-enabled mtx_assert() functionality. 540 * 541 * The constants need to be defined for INVARIANT_SUPPORT infrastructure 542 * support as _mtx_assert() itself uses them and the latter implies that 543 * _mtx_assert() must build. 544 */ 545 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 546 #define MA_OWNED LA_XLOCKED 547 #define MA_NOTOWNED LA_UNLOCKED 548 #define MA_RECURSED LA_RECURSED 549 #define MA_NOTRECURSED LA_NOTRECURSED 550 #endif 551 552 /* 553 * Common lock type names. 554 */ 555 #define MTX_NETWORK_LOCK "network driver" 556 557 #endif /* _KERNEL */ 558 #endif /* _SYS_MUTEX_H_ */ 559