1 /*- 2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $ 29 * $FreeBSD$ 30 */ 31 32 #ifndef _SYS_MUTEX_H_ 33 #define _SYS_MUTEX_H_ 34 35 #include <sys/queue.h> 36 #include <sys/_lock.h> 37 #include <sys/_mutex.h> 38 39 #ifdef _KERNEL 40 #include <sys/pcpu.h> 41 #include <sys/lock_profile.h> 42 #include <sys/lockstat.h> 43 #include <machine/atomic.h> 44 #include <machine/cpufunc.h> 45 46 /* 47 * Mutex types and options passed to mtx_init(). MTX_QUIET and MTX_DUPOK 48 * can also be passed in. 49 */ 50 #define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ 51 #define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ 52 #define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */ 53 #define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */ 54 #define MTX_NOPROFILE 0x00000020 /* Don't profile this lock */ 55 #define MTX_NEW 0x00000040 /* Don't check for double-init */ 56 57 /* 58 * Option flags passed to certain lock/unlock routines, through the use 59 * of corresponding mtx_{lock,unlock}_flags() interface macros. 60 */ 61 #define MTX_QUIET LOP_QUIET /* Don't log a mutex event */ 62 #define MTX_DUPOK LOP_DUPOK /* Don't log a duplicate acquire */ 63 64 /* 65 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, 66 * with the exception of MTX_UNOWNED, applies to spin locks. 67 */ 68 #define MTX_UNOWNED 0x00000000 /* Cookie for free mutex */ 69 #define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ 70 #define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ 71 #define MTX_DESTROYED 0x00000004 /* lock destroyed */ 72 #define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_DESTROYED) 73 74 /* 75 * Prototypes 76 * 77 * NOTE: Functions prepended with `_' (underscore) are exported to other parts 78 * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE 79 * and LOCK_LINE or for hiding the lock cookie crunching to the 80 * consumers. These functions should not be called directly by any 81 * code using the API. Their macros cover their functionality. 82 * Functions with a `_' suffix are the entrypoint for the common 83 * KPI covering both compat shims and fast path case. These can be 84 * used by consumers willing to pass options, file and line 85 * informations, in an option-independent way. 86 * 87 * [See below for descriptions] 88 * 89 */ 90 void _mtx_init(volatile uintptr_t *c, const char *name, const char *type, 91 int opts); 92 void _mtx_destroy(volatile uintptr_t *c); 93 void mtx_sysinit(void *arg); 94 int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, 95 int line); 96 void mutex_init(void); 97 #if LOCK_DEBUG > 0 98 void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, 99 const char *file, int line); 100 void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, 101 int line); 102 #else 103 void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v); 104 void __mtx_unlock_sleep(volatile uintptr_t *c); 105 #endif 106 107 #ifdef SMP 108 #if LOCK_DEBUG > 0 109 void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts, 110 const char *file, int line); 111 #else 112 void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v); 113 #endif 114 #endif 115 void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, 116 int line); 117 void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, 118 int line); 119 void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 120 int line); 121 int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, 122 const char *file, int line); 123 void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, 124 const char *file, int line); 125 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 126 void __mtx_assert(const volatile uintptr_t *c, int what, const char *file, 127 int line); 128 #endif 129 void thread_lock_flags_(struct thread *, int, const char *, int); 130 #if LOCK_DEBUG > 0 131 void _thread_lock(struct thread *td, int opts, const char *file, int line); 132 #else 133 void _thread_lock(struct thread *); 134 #endif 135 136 #if defined(LOCK_PROFILING) || defined(KLD_MODULE) 137 #define thread_lock(tdp) \ 138 thread_lock_flags_((tdp), 0, __FILE__, __LINE__) 139 #elif LOCK_DEBUG > 0 140 #define thread_lock(tdp) \ 141 _thread_lock((tdp), 0, __FILE__, __LINE__) 142 #else 143 #define thread_lock(tdp) \ 144 _thread_lock((tdp)) 145 #endif 146 147 #define thread_lock_flags(tdp, opt) \ 148 thread_lock_flags_((tdp), (opt), __FILE__, __LINE__) 149 #define thread_unlock(tdp) \ 150 mtx_unlock_spin((tdp)->td_lock) 151 152 /* 153 * Top-level macros to provide lock cookie once the actual mtx is passed. 154 * They will also prevent passing a malformed object to the mtx KPI by 155 * failing compilation as the mtx_lock reserved member will not be found. 156 */ 157 #define mtx_init(m, n, t, o) \ 158 _mtx_init(&(m)->mtx_lock, n, t, o) 159 #define mtx_destroy(m) \ 160 _mtx_destroy(&(m)->mtx_lock) 161 #define mtx_trylock_flags_(m, o, f, l) \ 162 _mtx_trylock_flags_(&(m)->mtx_lock, o, f, l) 163 #if LOCK_DEBUG > 0 164 #define _mtx_lock_sleep(m, v, o, f, l) \ 165 __mtx_lock_sleep(&(m)->mtx_lock, v, o, f, l) 166 #define _mtx_unlock_sleep(m, o, f, l) \ 167 __mtx_unlock_sleep(&(m)->mtx_lock, o, f, l) 168 #else 169 #define _mtx_lock_sleep(m, v, o, f, l) \ 170 __mtx_lock_sleep(&(m)->mtx_lock, v) 171 #define _mtx_unlock_sleep(m, o, f, l) \ 172 __mtx_unlock_sleep(&(m)->mtx_lock) 173 #endif 174 #ifdef SMP 175 #if LOCK_DEBUG > 0 176 #define _mtx_lock_spin(m, v, o, f, l) \ 177 _mtx_lock_spin_cookie(&(m)->mtx_lock, v, o, f, l) 178 #else 179 #define _mtx_lock_spin(m, v, o, f, l) \ 180 _mtx_lock_spin_cookie(&(m)->mtx_lock, v) 181 #endif 182 #endif 183 #define _mtx_lock_flags(m, o, f, l) \ 184 __mtx_lock_flags(&(m)->mtx_lock, o, f, l) 185 #define _mtx_unlock_flags(m, o, f, l) \ 186 __mtx_unlock_flags(&(m)->mtx_lock, o, f, l) 187 #define _mtx_lock_spin_flags(m, o, f, l) \ 188 __mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l) 189 #define _mtx_trylock_spin_flags(m, o, f, l) \ 190 __mtx_trylock_spin_flags(&(m)->mtx_lock, o, f, l) 191 #define _mtx_unlock_spin_flags(m, o, f, l) \ 192 __mtx_unlock_spin_flags(&(m)->mtx_lock, o, f, l) 193 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 194 #define _mtx_assert(m, w, f, l) \ 195 __mtx_assert(&(m)->mtx_lock, w, f, l) 196 #endif 197 198 #define mtx_recurse lock_object.lo_data 199 200 /* Very simple operations on mtx_lock. */ 201 202 /* Try to obtain mtx_lock once. */ 203 #define _mtx_obtain_lock(mp, tid) \ 204 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) 205 206 #define _mtx_obtain_lock_fetch(mp, vp, tid) \ 207 atomic_fcmpset_acq_ptr(&(mp)->mtx_lock, vp, (tid)) 208 209 /* Try to release mtx_lock if it is unrecursed and uncontested. */ 210 #define _mtx_release_lock(mp, tid) \ 211 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED) 212 213 /* Release mtx_lock quickly, assuming we own it. */ 214 #define _mtx_release_lock_quick(mp) \ 215 atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED) 216 217 /* 218 * Full lock operations that are suitable to be inlined in non-debug 219 * kernels. If the lock cannot be acquired or released trivially then 220 * the work is deferred to another function. 221 */ 222 223 /* Lock a normal mutex. */ 224 #define __mtx_lock(mp, tid, opts, file, line) do { \ 225 uintptr_t _tid = (uintptr_t)(tid); \ 226 uintptr_t _v = MTX_UNOWNED; \ 227 \ 228 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__acquire) ||\ 229 !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ 230 _mtx_lock_sleep((mp), _v, (opts), (file), (line)); \ 231 } while (0) 232 233 /* 234 * Lock a spin mutex. For spinlocks, we handle recursion inline (it 235 * turns out that function calls can be significantly expensive on 236 * some architectures). Since spin locks are not _too_ common, 237 * inlining this code is not too big a deal. 238 */ 239 #ifdef SMP 240 #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ 241 uintptr_t _tid = (uintptr_t)(tid); \ 242 uintptr_t _v = MTX_UNOWNED; \ 243 \ 244 spinlock_enter(); \ 245 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire) || \ 246 !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ 247 _mtx_lock_spin((mp), _v, (opts), (file), (line)); \ 248 } while (0) 249 #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ 250 uintptr_t _tid = (uintptr_t)(tid); \ 251 int _ret; \ 252 \ 253 spinlock_enter(); \ 254 if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\ 255 spinlock_exit(); \ 256 _ret = 0; \ 257 } else { \ 258 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \ 259 mp, 0, 0, file, line); \ 260 _ret = 1; \ 261 } \ 262 _ret; \ 263 }) 264 #else /* SMP */ 265 #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ 266 uintptr_t _tid = (uintptr_t)(tid); \ 267 \ 268 spinlock_enter(); \ 269 if ((mp)->mtx_lock == _tid) \ 270 (mp)->mtx_recurse++; \ 271 else { \ 272 KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ 273 (mp)->mtx_lock = _tid; \ 274 } \ 275 } while (0) 276 #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ 277 uintptr_t _tid = (uintptr_t)(tid); \ 278 int _ret; \ 279 \ 280 spinlock_enter(); \ 281 if ((mp)->mtx_lock != MTX_UNOWNED) { \ 282 spinlock_exit(); \ 283 _ret = 0; \ 284 } else { \ 285 (mp)->mtx_lock = _tid; \ 286 _ret = 1; \ 287 } \ 288 _ret; \ 289 }) 290 #endif /* SMP */ 291 292 /* Unlock a normal mutex. */ 293 #define __mtx_unlock(mp, tid, opts, file, line) do { \ 294 uintptr_t _tid = (uintptr_t)(tid); \ 295 \ 296 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__release) ||\ 297 !_mtx_release_lock((mp), _tid))) \ 298 _mtx_unlock_sleep((mp), (opts), (file), (line)); \ 299 } while (0) 300 301 /* 302 * Unlock a spin mutex. For spinlocks, we can handle everything 303 * inline, as it's pretty simple and a function call would be too 304 * expensive (at least on some architectures). Since spin locks are 305 * not _too_ common, inlining this code is not too big a deal. 306 * 307 * Since we always perform a spinlock_enter() when attempting to acquire a 308 * spin lock, we need to always perform a matching spinlock_exit() when 309 * releasing a spin lock. This includes the recursion cases. 310 */ 311 #ifdef SMP 312 #define __mtx_unlock_spin(mp) do { \ 313 if (mtx_recursed((mp))) \ 314 (mp)->mtx_recurse--; \ 315 else { \ 316 LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \ 317 _mtx_release_lock_quick((mp)); \ 318 } \ 319 spinlock_exit(); \ 320 } while (0) 321 #else /* SMP */ 322 #define __mtx_unlock_spin(mp) do { \ 323 if (mtx_recursed((mp))) \ 324 (mp)->mtx_recurse--; \ 325 else { \ 326 LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \ 327 (mp)->mtx_lock = MTX_UNOWNED; \ 328 } \ 329 spinlock_exit(); \ 330 } while (0) 331 #endif /* SMP */ 332 333 /* 334 * Exported lock manipulation interface. 335 * 336 * mtx_lock(m) locks MTX_DEF mutex `m' 337 * 338 * mtx_lock_spin(m) locks MTX_SPIN mutex `m' 339 * 340 * mtx_unlock(m) unlocks MTX_DEF mutex `m' 341 * 342 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m' 343 * 344 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m' 345 * and passes option flags `opts' to the "hard" function, if required. 346 * With these routines, it is possible to pass flags such as MTX_QUIET 347 * to the appropriate lock manipulation routines. 348 * 349 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if 350 * it cannot. Rather, it returns 0 on failure and non-zero on success. 351 * It does NOT handle recursion as we assume that if a caller is properly 352 * using this part of the interface, he will know that the lock in question 353 * is _not_ recursed. 354 * 355 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts 356 * relevant option flags `opts.' 357 * 358 * mtx_trylock_spin(m) attempts to acquire MTX_SPIN mutex `m' but doesn't 359 * spin if it cannot. Rather, it returns 0 on failure and non-zero on 360 * success. It always returns failure for recursed lock attempts. 361 * 362 * mtx_initialized(m) returns non-zero if the lock `m' has been initialized. 363 * 364 * mtx_owned(m) returns non-zero if the current thread owns the lock `m' 365 * 366 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed. 367 */ 368 #define mtx_lock(m) mtx_lock_flags((m), 0) 369 #define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0) 370 #define mtx_trylock(m) mtx_trylock_flags((m), 0) 371 #define mtx_trylock_spin(m) mtx_trylock_spin_flags((m), 0) 372 #define mtx_unlock(m) mtx_unlock_flags((m), 0) 373 #define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0) 374 375 struct mtx_pool; 376 377 struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts); 378 void mtx_pool_destroy(struct mtx_pool **poolp); 379 struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr); 380 struct mtx *mtx_pool_alloc(struct mtx_pool *pool); 381 #define mtx_pool_lock(pool, ptr) \ 382 mtx_lock(mtx_pool_find((pool), (ptr))) 383 #define mtx_pool_lock_spin(pool, ptr) \ 384 mtx_lock_spin(mtx_pool_find((pool), (ptr))) 385 #define mtx_pool_unlock(pool, ptr) \ 386 mtx_unlock(mtx_pool_find((pool), (ptr))) 387 #define mtx_pool_unlock_spin(pool, ptr) \ 388 mtx_unlock_spin(mtx_pool_find((pool), (ptr))) 389 390 /* 391 * mtxpool_sleep is a general purpose pool of sleep mutexes. 392 */ 393 extern struct mtx_pool *mtxpool_sleep; 394 395 #ifndef LOCK_DEBUG 396 #error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h> 397 #endif 398 #if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE) 399 #define mtx_lock_flags_(m, opts, file, line) \ 400 _mtx_lock_flags((m), (opts), (file), (line)) 401 #define mtx_unlock_flags_(m, opts, file, line) \ 402 _mtx_unlock_flags((m), (opts), (file), (line)) 403 #define mtx_lock_spin_flags_(m, opts, file, line) \ 404 _mtx_lock_spin_flags((m), (opts), (file), (line)) 405 #define mtx_trylock_spin_flags_(m, opts, file, line) \ 406 _mtx_trylock_spin_flags((m), (opts), (file), (line)) 407 #define mtx_unlock_spin_flags_(m, opts, file, line) \ 408 _mtx_unlock_spin_flags((m), (opts), (file), (line)) 409 #else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */ 410 #define mtx_lock_flags_(m, opts, file, line) \ 411 __mtx_lock((m), curthread, (opts), (file), (line)) 412 #define mtx_unlock_flags_(m, opts, file, line) \ 413 __mtx_unlock((m), curthread, (opts), (file), (line)) 414 #define mtx_lock_spin_flags_(m, opts, file, line) \ 415 __mtx_lock_spin((m), curthread, (opts), (file), (line)) 416 #define mtx_trylock_spin_flags_(m, opts, file, line) \ 417 __mtx_trylock_spin((m), curthread, (opts), (file), (line)) 418 #define mtx_unlock_spin_flags_(m, opts, file, line) \ 419 __mtx_unlock_spin((m)) 420 #endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ 421 422 #ifdef INVARIANTS 423 #define mtx_assert_(m, what, file, line) \ 424 _mtx_assert((m), (what), (file), (line)) 425 426 #define GIANT_REQUIRED mtx_assert_(&Giant, MA_OWNED, __FILE__, __LINE__) 427 428 #else /* INVARIANTS */ 429 #define mtx_assert_(m, what, file, line) (void)0 430 #define GIANT_REQUIRED 431 #endif /* INVARIANTS */ 432 433 #define mtx_lock_flags(m, opts) \ 434 mtx_lock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 435 #define mtx_unlock_flags(m, opts) \ 436 mtx_unlock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 437 #define mtx_lock_spin_flags(m, opts) \ 438 mtx_lock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 439 #define mtx_unlock_spin_flags(m, opts) \ 440 mtx_unlock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 441 #define mtx_trylock_flags(m, opts) \ 442 mtx_trylock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 443 #define mtx_trylock_spin_flags(m, opts) \ 444 mtx_trylock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 445 #define mtx_assert(m, what) \ 446 mtx_assert_((m), (what), __FILE__, __LINE__) 447 448 #define mtx_sleep(chan, mtx, pri, wmesg, timo) \ 449 _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \ 450 tick_sbt * (timo), 0, C_HARDCLOCK) 451 452 #define MTX_READ_VALUE(m) ((m)->mtx_lock) 453 454 #define mtx_initialized(m) lock_initialized(&(m)->lock_object) 455 456 #define lv_mtx_owner(v) ((struct thread *)((v) & ~MTX_FLAGMASK)) 457 458 #define mtx_owner(m) lv_mtx_owner(MTX_READ_VALUE(m)) 459 460 #define mtx_owned(m) (mtx_owner(m) == curthread) 461 462 #define mtx_recursed(m) ((m)->mtx_recurse != 0) 463 464 #define mtx_name(m) ((m)->lock_object.lo_name) 465 466 /* 467 * Global locks. 468 */ 469 extern struct mtx Giant; 470 extern struct mtx blocked_lock; 471 472 /* 473 * Giant lock manipulation and clean exit macros. 474 * Used to replace return with an exit Giant and return. 475 * 476 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() 477 * The #ifndef is to allow lint-like tools to redefine DROP_GIANT. 478 */ 479 #ifndef DROP_GIANT 480 #define DROP_GIANT() \ 481 do { \ 482 int _giantcnt = 0; \ 483 WITNESS_SAVE_DECL(Giant); \ 484 \ 485 if (mtx_owned(&Giant)) { \ 486 WITNESS_SAVE(&Giant.lock_object, Giant); \ 487 for (_giantcnt = 0; mtx_owned(&Giant) && \ 488 !SCHEDULER_STOPPED(); _giantcnt++) \ 489 mtx_unlock(&Giant); \ 490 } 491 492 #define PICKUP_GIANT() \ 493 PARTIAL_PICKUP_GIANT(); \ 494 } while (0) 495 496 #define PARTIAL_PICKUP_GIANT() \ 497 mtx_assert(&Giant, MA_NOTOWNED); \ 498 if (_giantcnt > 0) { \ 499 while (_giantcnt--) \ 500 mtx_lock(&Giant); \ 501 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 502 } 503 #endif 504 505 struct mtx_args { 506 void *ma_mtx; 507 const char *ma_desc; 508 int ma_opts; 509 }; 510 511 #define MTX_SYSINIT(name, mtx, desc, opts) \ 512 static struct mtx_args name##_args = { \ 513 (mtx), \ 514 (desc), \ 515 (opts) \ 516 }; \ 517 SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 518 mtx_sysinit, &name##_args); \ 519 SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 520 _mtx_destroy, __DEVOLATILE(void *, &(mtx)->mtx_lock)) 521 522 /* 523 * The INVARIANTS-enabled mtx_assert() functionality. 524 * 525 * The constants need to be defined for INVARIANT_SUPPORT infrastructure 526 * support as _mtx_assert() itself uses them and the latter implies that 527 * _mtx_assert() must build. 528 */ 529 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 530 #define MA_OWNED LA_XLOCKED 531 #define MA_NOTOWNED LA_UNLOCKED 532 #define MA_RECURSED LA_RECURSED 533 #define MA_NOTRECURSED LA_NOTRECURSED 534 #endif 535 536 /* 537 * Common lock type names. 538 */ 539 #define MTX_NETWORK_LOCK "network driver" 540 541 #endif /* _KERNEL */ 542 #endif /* _SYS_MUTEX_H_ */ 543