1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #ifndef _SYS_MUTEX2_H_ 36 #define _SYS_MUTEX2_H_ 37 38 #ifndef _SYS_MUTEX_H_ 39 #include <sys/mutex.h> 40 #endif 41 #ifndef _SYS_THREAD2_H_ 42 #include <sys/thread2.h> 43 #endif 44 #ifndef _SYS_GLOBALDATA_H_ 45 #include <sys/globaldata.h> 46 #endif 47 #include <machine/atomic.h> 48 49 /* 50 * Initialize a new mutex, placing it in an unlocked state with no refs. 51 */ 52 static __inline void 53 mtx_init(mtx_t *mtx, const char *ident) 54 { 55 mtx->mtx_lock = 0; 56 mtx->mtx_flags = 0; 57 mtx->mtx_owner = NULL; 58 mtx->mtx_exlink = NULL; 59 mtx->mtx_shlink = NULL; 60 mtx->mtx_ident = ident; 61 } 62 63 static __inline void 64 mtx_init_flags(mtx_t *mtx, const char *ident, uint32_t flags) 65 { 66 mtx->mtx_lock = 0; 67 mtx->mtx_flags = flags; 68 mtx->mtx_owner = NULL; 69 mtx->mtx_exlink = NULL; 70 mtx->mtx_shlink = NULL; 71 mtx->mtx_ident = ident; 72 } 73 74 /* 75 * Initialize a mtx link structure for deeper control over the mutex 76 * operation. 77 */ 78 static __inline void 79 mtx_link_init(mtx_link_t *link) 80 { 81 link->state = MTX_LINK_IDLE; 82 link->callback = NULL; 83 link->arg = NULL; 84 } 85 86 /* 87 * A link structure initialized this way causes mutex operations to not block, 88 * caller must specify a callback. Caller may still abort the mutex via 89 * the link. 90 */ 91 static __inline void 92 mtx_link_init_async(mtx_link_t *link, 93 void (*callback)(mtx_link_t *link, void *arg, int error), 94 void *arg) 95 { 96 link->state = MTX_LINK_IDLE; 97 link->callback = callback; 98 link->arg = arg; 99 } 100 101 /* 102 * Deinitialize a mutex 103 */ 104 static __inline void 105 mtx_uninit(mtx_t *mtx) 106 { 107 /* empty */ 108 } 109 110 /* 111 * Exclusive-lock a mutex, block until acquired or aborted. Recursion 112 * is allowed. 113 * 114 * This version of the function allows the mtx_link to be passed in, thus 115 * giving the caller visibility for the link structure which is required 116 * when calling mtx_abort_ex_link() or when requesting an asynchronous lock. 117 * 118 * The mutex may be aborted at any time while the passed link structure 119 * is valid. 120 */ 121 static __inline int 122 mtx_lock_ex_link(mtx_t *mtx, mtx_link_t *link, int flags, int to) 123 { 124 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 125 return(_mtx_lock_ex_link(mtx, link, flags, to)); 126 mtx->mtx_owner = curthread; 127 link->state = MTX_LINK_ACQUIRED; 128 129 return(0); 130 } 131 132 /* 133 * Short-form exclusive-lock a mutex, block until acquired. Recursion is 134 * allowed. This is equivalent to mtx_lock_ex(mtx, 0, 0). 135 */ 136 static __inline void 137 mtx_lock(mtx_t *mtx) 138 { 139 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) { 140 _mtx_lock_ex(mtx, 0, 0); 141 return; 142 } 143 mtx->mtx_owner = curthread; 144 } 145 146 /* 147 * Exclusive-lock a mutex, block until acquired. Recursion is allowed. 148 * 149 * Returns 0 on success, or the tsleep() return code on failure. 150 * An error can only be returned if PCATCH is specified in the flags. 151 */ 152 static __inline int 153 mtx_lock_ex(mtx_t *mtx, int flags, int to) 154 { 155 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 156 return(_mtx_lock_ex(mtx, flags, to)); 157 mtx->mtx_owner = curthread; 158 return(0); 159 } 160 161 static __inline int 162 mtx_lock_ex_quick(mtx_t *mtx) 163 { 164 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 165 return(_mtx_lock_ex_quick(mtx)); 166 mtx->mtx_owner = curthread; 167 return(0); 168 } 169 170 static __inline int 171 mtx_lock_sh_link(mtx_t *mtx, mtx_link_t *link, int flags, int to) 172 { 173 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) 174 return(_mtx_lock_sh_link(mtx, link, flags, to)); 175 link->state = MTX_LINK_ACQUIRED; 176 return(0); 177 } 178 179 /* 180 * Share-lock a mutex, block until acquired. Recursion is allowed. 181 * 182 * Returns 0 on success, or the tsleep() return code on failure. 183 * An error can only be returned if PCATCH is specified in the flags. 184 */ 185 static __inline int 186 mtx_lock_sh(mtx_t *mtx, int flags, int to) 187 { 188 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) 189 return(_mtx_lock_sh(mtx, flags, to)); 190 return(0); 191 } 192 193 static __inline int 194 mtx_lock_sh_quick(mtx_t *mtx) 195 { 196 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) 197 return(_mtx_lock_sh_quick(mtx)); 198 return(0); 199 } 200 201 /* 202 * Adds a shared lock reference to a lock already locked shared, 203 * does not block on pending exclusive request. 204 */ 205 static __inline void 206 mtx_lock_sh_again(mtx_t *mtx) 207 { 208 KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 && 209 (mtx->mtx_lock & MTX_MASK) > 0); 210 atomic_add_int(&mtx->mtx_lock, 1); 211 } 212 213 /* 214 * Short-form exclusive spinlock a mutex. Must be paired with 215 * mtx_spinunlock(). 216 */ 217 static __inline void 218 mtx_spinlock(mtx_t *mtx) 219 { 220 globaldata_t gd = mycpu; 221 222 /* 223 * Predispose a hard critical section 224 */ 225 crit_enter_quick(gd->gd_curthread); 226 ++gd->gd_spinlocks; 227 cpu_ccfence(); 228 229 /* 230 * If we cannot get it trivially get it the hard way. 231 * 232 * Note that mtx_owner will be set twice if we fail to get it 233 * trivially, but there's no point conditionalizing it as a 234 * conditional will be slower. 235 */ 236 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 237 _mtx_spinlock(mtx); 238 mtx->mtx_owner = gd->gd_curthread; 239 } 240 241 static __inline int 242 mtx_spinlock_try(mtx_t *mtx) 243 { 244 globaldata_t gd = mycpu; 245 246 /* 247 * Predispose a hard critical section 248 */ 249 crit_enter_quick(gd->gd_curthread); 250 ++gd->gd_spinlocks; 251 cpu_ccfence(); 252 253 /* 254 * If we cannot get it trivially call _mtx_spinlock_try(). This 255 * function will clean up the hard critical section if it fails. 256 */ 257 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 258 return(_mtx_spinlock_try(mtx)); 259 mtx->mtx_owner = gd->gd_curthread; 260 return (0); 261 } 262 263 /* 264 * Short-form exclusive-lock a mutex, spin until acquired. Recursion is 265 * allowed. This form is identical to mtx_spinlock_ex(). 266 * 267 * Attempt to exclusive-lock a mutex, return 0 on success and 268 * EAGAIN on failure. 269 */ 270 static __inline int 271 mtx_lock_ex_try(mtx_t *mtx) 272 { 273 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 274 return (_mtx_lock_ex_try(mtx)); 275 mtx->mtx_owner = curthread; 276 return (0); 277 } 278 279 /* 280 * Attempt to share-lock a mutex, return 0 on success and 281 * EAGAIN on failure. 282 */ 283 static __inline int 284 mtx_lock_sh_try(mtx_t *mtx) 285 { 286 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) 287 return (_mtx_lock_sh_try(mtx)); 288 return (0); 289 } 290 291 /* 292 * If the lock is held exclusively it must be owned by the caller. If the 293 * lock is already a shared lock this operation is a NOP. A panic will 294 * occur if the lock is not held either shared or exclusive. 295 * 296 * The exclusive count is converted to a shared count. 297 */ 298 static __inline void 299 mtx_downgrade(mtx_t *mtx) 300 { 301 globaldata_t gd __debugvar = mycpu; 302 303 KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) && 304 mtx->mtx_owner == gd->gd_curthread); 305 mtx->mtx_owner = NULL; 306 if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 1) == 0) 307 _mtx_downgrade(mtx); 308 } 309 310 /* 311 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if 312 * the shared lock has a count other then 1. Optimize the most likely case 313 * but note that a single cmpset can fail due to WANTED races. 314 * 315 * If the lock is held exclusively it must be owned by the caller and 316 * this function will simply return without doing anything. A panic will 317 * occur if the lock is held exclusively by someone other then the caller. 318 * 319 * Returns 0 on success, EDEADLK on failure. 320 */ 321 static __inline int 322 mtx_upgrade_try(mtx_t *mtx) 323 { 324 if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1)) { 325 mtx->mtx_owner = curthread; 326 return(0); 327 } 328 return (_mtx_upgrade_try(mtx)); 329 } 330 331 /* 332 * Optimized unlock cases. 333 * 334 * NOTE: mtx_unlock() handles any type of mutex: exclusive, shared, and 335 * both blocking and spin methods. 336 * 337 * The mtx_unlock_ex/sh() forms are optimized for exclusive or shared 338 * mutexes and produce less code, but it is ok for code to just use 339 * mtx_unlock() and, in fact, if code uses the short-form mtx_lock() 340 * or mtx_spinlock() to lock it should also use mtx_unlock() to unlock. 341 */ 342 static __inline void 343 mtx_unlock(mtx_t *mtx) 344 { 345 globaldata_t gd __debugvar = mycpu; 346 u_int lock = mtx->mtx_lock; 347 348 KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 || 349 mtx->mtx_owner == gd->gd_curthread); 350 if (lock == (MTX_EXCLUSIVE | 1)) { 351 mtx->mtx_owner = NULL; 352 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0) 353 _mtx_unlock(mtx); 354 } else if (lock == 1) { 355 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0) 356 _mtx_unlock(mtx); 357 } else { 358 _mtx_unlock(mtx); 359 } 360 } 361 362 static __inline void 363 mtx_unlock_ex(mtx_t *mtx) 364 { 365 globaldata_t gd __debugvar = mycpu; 366 u_int lock = mtx->mtx_lock; 367 368 KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 || 369 mtx->mtx_owner == gd->gd_curthread); 370 if (lock == (MTX_EXCLUSIVE | 1)) { 371 mtx->mtx_owner = NULL; 372 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0) 373 _mtx_unlock(mtx); 374 } else { 375 _mtx_unlock(mtx); 376 } 377 } 378 379 static __inline void 380 mtx_unlock_sh(mtx_t *mtx) 381 { 382 if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0) 383 _mtx_unlock(mtx); 384 } 385 386 /* 387 * NOTE: spinlocks are exclusive-only 388 */ 389 static __inline void 390 mtx_spinunlock(mtx_t *mtx) 391 { 392 globaldata_t gd = mycpu; 393 394 mtx_unlock(mtx); 395 396 cpu_ccfence(); 397 --gd->gd_spinlocks; 398 crit_exit_quick(gd->gd_curthread); 399 } 400 401 /* 402 * Return TRUE (non-zero) if the mutex is locked shared or exclusive by 403 * anyone, including the owner. 404 */ 405 static __inline int 406 mtx_islocked(mtx_t *mtx) 407 { 408 return(mtx->mtx_lock != 0); 409 } 410 411 /* 412 * Return TRUE (non-zero) if the mutex is locked exclusively by anyone, 413 * including the owner. Returns FALSE (0) if the mutex is unlocked or 414 * if it is locked shared by one or more entities. 415 * 416 * A caller wishing to check whether a lock is owned exclusively by it 417 * should use mtx_owned(). 418 */ 419 static __inline int 420 mtx_islocked_ex(mtx_t *mtx) 421 { 422 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0); 423 } 424 425 /* 426 * Return TRUE (non-zero) if the mutex is not locked. 427 */ 428 static __inline int 429 mtx_notlocked(mtx_t *mtx) 430 { 431 return(mtx->mtx_lock == 0); 432 } 433 434 /* 435 * Return TRUE (non-zero) if the mutex is not locked exclusively. 436 * The mutex may in an unlocked or shared lock state. 437 */ 438 static __inline int 439 mtx_notlocked_ex(mtx_t *mtx) 440 { 441 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0); 442 } 443 444 /* 445 * Return TRUE (non-zero) if the mutex is exclusively locked by 446 * the caller. 447 */ 448 static __inline int 449 mtx_owned(mtx_t *mtx) 450 { 451 return((mtx->mtx_lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread); 452 } 453 454 /* 455 * Return TRUE (non-zero) if the mutex is not exclusively locked by 456 * the caller. 457 */ 458 static __inline int 459 mtx_notowned(mtx_t *mtx) 460 { 461 return((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 || 462 mtx->mtx_owner != curthread); 463 } 464 465 /* 466 * Return the shared or exclusive lock count. A return value of 0 467 * indicate that the mutex is not locked. 468 * 469 * NOTE: If the mutex is held exclusively by someone other then the 470 * caller the lock count for the other owner is still returned. 471 */ 472 static __inline 473 int 474 mtx_lockrefs(mtx_t *mtx) 475 { 476 return(mtx->mtx_lock & MTX_MASK); 477 } 478 479 /* 480 * Lock must held and will be released on return. Returns state 481 * which can be passed to mtx_lock_temp_restore() to return the 482 * lock to its previous state. 483 */ 484 static __inline 485 mtx_state_t 486 mtx_lock_temp_release(mtx_t *mtx) 487 { 488 mtx_state_t state; 489 490 state = (mtx->mtx_lock & MTX_EXCLUSIVE); 491 mtx_unlock(mtx); 492 493 return state; 494 } 495 496 /* 497 * Restore the previous state of a lock released with 498 * mtx_lock_temp_release() or mtx_lock_upgrade(). 499 */ 500 static __inline 501 void 502 mtx_lock_temp_restore(mtx_t *mtx, mtx_state_t state) 503 { 504 if (state & MTX_EXCLUSIVE) 505 mtx_lock_ex_quick(mtx); 506 else 507 mtx_lock_sh_quick(mtx); 508 } 509 510 #endif 511