1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #ifndef _SYS_MUTEX2_H_ 36 #define _SYS_MUTEX2_H_ 37 38 #ifndef _SYS_MUTEX_H_ 39 #include <sys/mutex.h> 40 #endif 41 #ifndef _SYS_THREAD2_H_ 42 #include <sys/thread2.h> 43 #endif 44 #ifndef _SYS_GLOBALDATA_H_ 45 #include <sys/globaldata.h> 46 #endif 47 #include <machine/atomic.h> 48 49 /* 50 * Initialize a new mutex, placing it in an unlocked state with no refs. 51 */ 52 static __inline void 53 mtx_init(mtx_t *mtx, const char *ident) 54 { 55 mtx->mtx_lock = 0; 56 mtx->mtx_flags = 0; 57 mtx->mtx_owner = NULL; 58 mtx->mtx_exlink = NULL; 59 mtx->mtx_shlink = NULL; 60 mtx->mtx_ident = ident; 61 } 62 63 static __inline void 64 mtx_init_flags(mtx_t *mtx, const char *ident, uint32_t flags) 65 { 66 mtx->mtx_lock = 0; 67 mtx->mtx_flags = flags; 68 mtx->mtx_owner = NULL; 69 mtx->mtx_exlink = NULL; 70 mtx->mtx_shlink = NULL; 71 mtx->mtx_ident = ident; 72 } 73 74 /* 75 * Initialize a mtx link structure for deeper control over the mutex 76 * operation. 77 */ 78 static __inline void 79 mtx_link_init(mtx_link_t *link) 80 { 81 link->state = MTX_LINK_IDLE; 82 link->callback = NULL; 83 link->arg = NULL; 84 } 85 86 /* 87 * A link structure initialized this way causes mutex operations to not block, 88 * caller must specify a callback. Caller may still abort the mutex via 89 * the link. 90 */ 91 static __inline void 92 mtx_link_init_async(mtx_link_t *link, 93 void (*callback)(mtx_link_t *link, void *arg, int error), 94 void *arg) 95 { 96 link->state = MTX_LINK_IDLE; 97 link->callback = callback; 98 link->arg = arg; 99 } 100 101 /* 102 * Deinitialize a mutex 103 */ 104 static __inline void 105 mtx_uninit(mtx_t *mtx) 106 { 107 /* empty */ 108 } 109 110 /* 111 * Exclusive-lock a mutex, block until acquired or aborted. Recursion 112 * is allowed. 113 * 114 * This version of the function allows the mtx_link to be passed in, thus 115 * giving the caller visibility for the link structure which is required 116 * when calling mtx_abort_ex_link() or when requesting an asynchronous lock. 117 * 118 * The mutex may be aborted at any time while the passed link structure 119 * is valid. 120 */ 121 static __inline int 122 mtx_lock_ex_link(mtx_t *mtx, mtx_link_t *link, int flags, int to) 123 { 124 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 125 return(_mtx_lock_ex_link(mtx, link, flags, to)); 126 mtx->mtx_owner = curthread; 127 link->state = MTX_LINK_ACQUIRED; 128 129 return(0); 130 } 131 132 /* 133 * Short-form exclusive-lock a mutex, block until acquired. Recursion is 134 * allowed. This is equivalent to mtx_lock_ex(mtx, "mtxex", 0, 0). 135 */ 136 static __inline void 137 mtx_lock(mtx_t *mtx) 138 { 139 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) { 140 _mtx_lock_ex(mtx, 0, 0); 141 return; 142 } 143 mtx->mtx_owner = curthread; 144 } 145 146 /* 147 * Exclusive-lock a mutex, block until acquired. Recursion is allowed. 148 * 149 * Returns 0 on success, or the tsleep() return code on failure. 150 * An error can only be returned if PCATCH is specified in the flags. 151 */ 152 static __inline int 153 mtx_lock_ex(mtx_t *mtx, int flags, int to) 154 { 155 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 156 return(_mtx_lock_ex(mtx, flags, to)); 157 mtx->mtx_owner = curthread; 158 return(0); 159 } 160 161 static __inline int 162 mtx_lock_ex_quick(mtx_t *mtx) 163 { 164 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 165 return(_mtx_lock_ex_quick(mtx)); 166 mtx->mtx_owner = curthread; 167 return(0); 168 } 169 170 static __inline int 171 mtx_lock_sh_link(mtx_t *mtx, mtx_link_t *link, int flags, int to) 172 { 173 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) 174 return(_mtx_lock_sh_link(mtx, link, flags, to)); 175 link->state = MTX_LINK_ACQUIRED; 176 return(0); 177 } 178 179 /* 180 * Share-lock a mutex, block until acquired. Recursion is allowed. 181 * 182 * Returns 0 on success, or the tsleep() return code on failure. 183 * An error can only be returned if PCATCH is specified in the flags. 184 */ 185 static __inline int 186 mtx_lock_sh(mtx_t *mtx, int flags, int to) 187 { 188 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) 189 return(_mtx_lock_sh(mtx, flags, to)); 190 return(0); 191 } 192 193 static __inline int 194 mtx_lock_sh_quick(mtx_t *mtx) 195 { 196 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) 197 return(_mtx_lock_sh_quick(mtx)); 198 return(0); 199 } 200 201 /* 202 * Adds a shared lock reference to a lock already locked shared, 203 * does not block on pending exclusive request. 204 */ 205 static __inline void 206 mtx_lock_sh_again(mtx_t *mtx) 207 { 208 KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 && 209 (mtx->mtx_lock & MTX_MASK) > 0); 210 atomic_add_int(&mtx->mtx_lock, 1); 211 } 212 213 /* 214 * Short-form exclusive spinlock a mutex. Must be paired with 215 * mtx_spinunlock(). 216 */ 217 static __inline void 218 mtx_spinlock(mtx_t *mtx) 219 { 220 globaldata_t gd = mycpu; 221 222 /* 223 * Predispose a hard critical section 224 */ 225 crit_enter_raw(gd->gd_curthread); 226 ++gd->gd_spinlocks; 227 cpu_ccfence(); 228 229 /* 230 * If we cannot get it trivially get it the hard way. 231 * 232 * Note that mtx_owner will be set twice if we fail to get it 233 * trivially, but there's no point conditionalizing it as a 234 * conditional will be slower. 235 */ 236 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 237 _mtx_spinlock(mtx); 238 mtx->mtx_owner = gd->gd_curthread; 239 } 240 241 static __inline int 242 mtx_spinlock_try(mtx_t *mtx) 243 { 244 globaldata_t gd = mycpu; 245 246 /* 247 * Predispose a hard critical section 248 */ 249 crit_enter_raw(gd->gd_curthread); 250 ++gd->gd_spinlocks; 251 cpu_ccfence(); 252 253 /* 254 * If we cannot get it trivially call _mtx_spinlock_try(). This 255 * function will clean up the hard critical section if it fails. 256 */ 257 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 258 return(_mtx_spinlock_try(mtx)); 259 mtx->mtx_owner = gd->gd_curthread; 260 return (0); 261 } 262 263 /* 264 * Short-form exclusive-lock a mutex, spin until acquired. Recursion is 265 * allowed. This form is identical to mtx_spinlock_ex(). 266 * 267 * Attempt to exclusive-lock a mutex, return 0 on success and 268 * EAGAIN on failure. 269 */ 270 static __inline int 271 mtx_lock_ex_try(mtx_t *mtx) 272 { 273 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 274 return (_mtx_lock_ex_try(mtx)); 275 mtx->mtx_owner = curthread; 276 return (0); 277 } 278 279 /* 280 * Attempt to share-lock a mutex, return 0 on success and 281 * EAGAIN on failure. 282 */ 283 static __inline int 284 mtx_lock_sh_try(mtx_t *mtx) 285 { 286 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) 287 return (_mtx_lock_sh_try(mtx)); 288 return (0); 289 } 290 291 /* 292 * If the lock is held exclusively it must be owned by the caller. If the 293 * lock is already a shared lock this operation is a NOP. A panic will 294 * occur if the lock is not held either shared or exclusive. 295 * 296 * The exclusive count is converted to a shared count. 297 */ 298 static __inline void 299 mtx_downgrade(mtx_t *mtx) 300 { 301 mtx->mtx_owner = NULL; 302 if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 1) == 0) 303 _mtx_downgrade(mtx); 304 } 305 306 /* 307 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if 308 * the shared lock has a count other then 1. Optimize the most likely case 309 * but note that a single cmpset can fail due to WANTED races. 310 * 311 * If the lock is held exclusively it must be owned by the caller and 312 * this function will simply return without doing anything. A panic will 313 * occur if the lock is held exclusively by someone other then the caller. 314 * 315 * Returns 0 on success, EDEADLK on failure. 316 */ 317 static __inline int 318 mtx_upgrade_try(mtx_t *mtx) 319 { 320 if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1)) { 321 mtx->mtx_owner = curthread; 322 return(0); 323 } 324 return (_mtx_upgrade_try(mtx)); 325 } 326 327 /* 328 * Optimized unlock cases. 329 * 330 * NOTE: mtx_unlock() handles any type of mutex: exclusive, shared, and 331 * both blocking and spin methods. 332 * 333 * The mtx_unlock_ex/sh() forms are optimized for exclusive or shared 334 * mutexes and produce less code, but it is ok for code to just use 335 * mtx_unlock() and, in fact, if code uses the short-form mtx_lock() 336 * or mtx_spinlock() to lock it should also use mtx_unlock() to unlock. 337 */ 338 static __inline void 339 mtx_unlock(mtx_t *mtx) 340 { 341 u_int lock = mtx->mtx_lock; 342 343 if (lock == (MTX_EXCLUSIVE | 1)) { 344 mtx->mtx_owner = NULL; 345 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0) 346 _mtx_unlock(mtx); 347 } else if (lock == 1) { 348 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0) 349 _mtx_unlock(mtx); 350 } else { 351 _mtx_unlock(mtx); 352 } 353 } 354 355 static __inline void 356 mtx_unlock_ex(mtx_t *mtx) 357 { 358 u_int lock = mtx->mtx_lock; 359 360 if (lock == (MTX_EXCLUSIVE | 1)) { 361 mtx->mtx_owner = NULL; 362 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0) 363 _mtx_unlock(mtx); 364 } else { 365 _mtx_unlock(mtx); 366 } 367 } 368 369 static __inline void 370 mtx_unlock_sh(mtx_t *mtx) 371 { 372 if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0) 373 _mtx_unlock(mtx); 374 } 375 376 /* 377 * NOTE: spinlocks are exclusive-only 378 */ 379 static __inline void 380 mtx_spinunlock(mtx_t *mtx) 381 { 382 globaldata_t gd = mycpu; 383 384 mtx_unlock(mtx); 385 386 cpu_ccfence(); 387 --gd->gd_spinlocks; 388 crit_exit_raw(gd->gd_curthread); 389 } 390 391 /* 392 * Return TRUE (non-zero) if the mutex is locked shared or exclusive by 393 * anyone, including the owner. 394 */ 395 static __inline int 396 mtx_islocked(mtx_t *mtx) 397 { 398 return(mtx->mtx_lock != 0); 399 } 400 401 /* 402 * Return TRUE (non-zero) if the mutex is locked exclusively by anyone, 403 * including the owner. Returns FALSE (0) if the mutex is unlocked or 404 * if it is locked shared by one or more entities. 405 * 406 * A caller wishing to check whether a lock is owned exclusively by it 407 * should use mtx_owned(). 408 */ 409 static __inline int 410 mtx_islocked_ex(mtx_t *mtx) 411 { 412 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0); 413 } 414 415 /* 416 * Return TRUE (non-zero) if the mutex is not locked. 417 */ 418 static __inline int 419 mtx_notlocked(mtx_t *mtx) 420 { 421 return(mtx->mtx_lock == 0); 422 } 423 424 /* 425 * Return TRUE (non-zero) if the mutex is not locked exclusively. 426 * The mutex may in an unlocked or shared lock state. 427 */ 428 static __inline int 429 mtx_notlocked_ex(mtx_t *mtx) 430 { 431 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0); 432 } 433 434 /* 435 * Return TRUE (non-zero) if the mutex is exclusively locked by 436 * the caller. 437 */ 438 static __inline int 439 mtx_owned(mtx_t *mtx) 440 { 441 return((mtx->mtx_lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread); 442 } 443 444 /* 445 * Return TRUE (non-zero) if the mutex is not exclusively locked by 446 * the caller. 447 */ 448 static __inline int 449 mtx_notowned(mtx_t *mtx) 450 { 451 return((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 || 452 mtx->mtx_owner != curthread); 453 } 454 455 /* 456 * Return the shared or exclusive lock count. A return value of 0 457 * indicate that the mutex is not locked. 458 * 459 * NOTE: If the mutex is held exclusively by someone other then the 460 * caller the lock count for the other owner is still returned. 461 */ 462 static __inline 463 int 464 mtx_lockrefs(mtx_t *mtx) 465 { 466 return(mtx->mtx_lock & MTX_MASK); 467 } 468 469 /* 470 * Lock must held and will be released on return. Returns state 471 * which can be passed to mtx_lock_temp_restore() to return the 472 * lock to its previous state. 473 */ 474 static __inline 475 mtx_state_t 476 mtx_lock_temp_release(mtx_t *mtx) 477 { 478 mtx_state_t state; 479 480 state = (mtx->mtx_lock & MTX_EXCLUSIVE); 481 mtx_unlock(mtx); 482 483 return state; 484 } 485 486 /* 487 * Restore the previous state of a lock released with 488 * mtx_lock_temp_release() or mtx_lock_upgrade(). 489 */ 490 static __inline 491 void 492 mtx_lock_temp_restore(mtx_t *mtx, mtx_state_t state) 493 { 494 if (state & MTX_EXCLUSIVE) 495 mtx_lock_ex_quick(mtx); 496 else 497 mtx_lock_sh_quick(mtx); 498 } 499 500 #endif 501