1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #ifndef _SYS_MUTEX2_H_ 36 #define _SYS_MUTEX2_H_ 37 38 #ifndef _SYS_MUTEX_H_ 39 #include <sys/mutex.h> 40 #endif 41 #ifndef _SYS_THREAD2_H_ 42 #include <sys/thread2.h> 43 #endif 44 #ifndef _SYS_GLOBALDATA_H_ 45 #include <sys/globaldata.h> 46 #endif 47 #include <machine/atomic.h> 48 49 /* 50 * Initialize a new mutex, placing it in an unlocked state with no refs. 51 */ 52 static __inline void 53 mtx_init(mtx_t *mtx, const char *ident) 54 { 55 mtx->mtx_lock = 0; 56 mtx->mtx_owner = NULL; 57 mtx->mtx_exlink = NULL; 58 mtx->mtx_shlink = NULL; 59 mtx->mtx_ident = ident; 60 } 61 62 /* 63 * Initialize a mtx link structure for deeper control over the mutex 64 * operation. 65 */ 66 static __inline void 67 mtx_link_init(mtx_link_t *link) 68 { 69 link->state = MTX_LINK_IDLE; 70 link->callback = NULL; 71 link->arg = NULL; 72 } 73 74 /* 75 * A link structure initialized this way causes mutex operations to not block, 76 * caller must specify a callback. Caller may still abort the mutex via 77 * the link. 78 */ 79 static __inline void 80 mtx_link_init_async(mtx_link_t *link, 81 void (*callback)(mtx_link_t *link, void *arg, int error), 82 void *arg) 83 { 84 link->state = MTX_LINK_IDLE; 85 link->callback = callback; 86 link->arg = arg; 87 } 88 89 /* 90 * Deinitialize a mutex 91 */ 92 static __inline void 93 mtx_uninit(mtx_t *mtx) 94 { 95 /* empty */ 96 } 97 98 /* 99 * Exclusive-lock a mutex, block until acquired or aborted. Recursion 100 * is allowed. 101 * 102 * This version of the function allows the mtx_link to be passed in, thus 103 * giving the caller visibility for the link structure which is required 104 * when calling mtx_abort_ex_link() or when requesting an asynchronous lock. 105 * 106 * The mutex may be aborted at any time while the passed link structure 107 * is valid. 108 */ 109 static __inline int 110 mtx_lock_ex_link(mtx_t *mtx, mtx_link_t *link, int flags, int to) 111 { 112 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 113 return(_mtx_lock_ex_link(mtx, link, flags, to)); 114 mtx->mtx_owner = curthread; 115 link->state = MTX_LINK_ACQUIRED; 116 117 return(0); 118 } 119 120 /* 121 * Short-form exclusive-lock a mutex, block until acquired. Recursion is 122 * allowed. This is equivalent to mtx_lock_ex(mtx, "mtxex", 0, 0). 123 */ 124 static __inline void 125 mtx_lock(mtx_t *mtx) 126 { 127 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) { 128 _mtx_lock_ex(mtx, 0, 0); 129 return; 130 } 131 mtx->mtx_owner = curthread; 132 } 133 134 /* 135 * Exclusive-lock a mutex, block until acquired. Recursion is allowed. 136 * 137 * Returns 0 on success, or the tsleep() return code on failure. 138 * An error can only be returned if PCATCH is specified in the flags. 139 */ 140 static __inline int 141 mtx_lock_ex(mtx_t *mtx, int flags, int to) 142 { 143 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 144 return(_mtx_lock_ex(mtx, flags, to)); 145 mtx->mtx_owner = curthread; 146 return(0); 147 } 148 149 static __inline int 150 mtx_lock_ex_quick(mtx_t *mtx) 151 { 152 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 153 return(_mtx_lock_ex_quick(mtx)); 154 mtx->mtx_owner = curthread; 155 return(0); 156 } 157 158 static __inline int 159 mtx_lock_sh_link(mtx_t *mtx, mtx_link_t *link, int flags, int to) 160 { 161 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) 162 return(_mtx_lock_sh_link(mtx, link, flags, to)); 163 link->state = MTX_LINK_ACQUIRED; 164 return(0); 165 } 166 167 /* 168 * Share-lock a mutex, block until acquired. Recursion is allowed. 169 * 170 * Returns 0 on success, or the tsleep() return code on failure. 171 * An error can only be returned if PCATCH is specified in the flags. 172 */ 173 static __inline int 174 mtx_lock_sh(mtx_t *mtx, int flags, int to) 175 { 176 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) 177 return(_mtx_lock_sh(mtx, flags, to)); 178 return(0); 179 } 180 181 static __inline int 182 mtx_lock_sh_quick(mtx_t *mtx) 183 { 184 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) 185 return(_mtx_lock_sh_quick(mtx)); 186 return(0); 187 } 188 189 /* 190 * Adds a shared lock reference to a lock already locked shared, 191 * does not block on pending exclusive request. 192 */ 193 static __inline void 194 mtx_lock_sh_again(mtx_t *mtx) 195 { 196 KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 && 197 (mtx->mtx_lock & MTX_MASK) > 0); 198 atomic_add_int(&mtx->mtx_lock, 1); 199 } 200 201 /* 202 * Short-form exclusive spinlock a mutex. Must be paired with 203 * mtx_spinunlock(). 204 */ 205 static __inline void 206 mtx_spinlock(mtx_t *mtx) 207 { 208 globaldata_t gd = mycpu; 209 210 /* 211 * Predispose a hard critical section 212 */ 213 ++gd->gd_curthread->td_critcount; 214 cpu_ccfence(); 215 ++gd->gd_spinlocks; 216 217 /* 218 * If we cannot get it trivially get it the hard way. 219 * 220 * Note that mtx_owner will be set twice if we fail to get it 221 * trivially, but there's no point conditionalizing it as a 222 * conditional will be slower. 223 */ 224 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 225 _mtx_spinlock(mtx); 226 mtx->mtx_owner = gd->gd_curthread; 227 } 228 229 static __inline int 230 mtx_spinlock_try(mtx_t *mtx) 231 { 232 globaldata_t gd = mycpu; 233 234 /* 235 * Predispose a hard critical section 236 */ 237 ++gd->gd_curthread->td_critcount; 238 cpu_ccfence(); 239 ++gd->gd_spinlocks; 240 241 /* 242 * If we cannot get it trivially call _mtx_spinlock_try(). This 243 * function will clean up the hard critical section if it fails. 244 */ 245 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 246 return(_mtx_spinlock_try(mtx)); 247 mtx->mtx_owner = gd->gd_curthread; 248 return (0); 249 } 250 251 /* 252 * Short-form exclusive-lock a mutex, spin until acquired. Recursion is 253 * allowed. This form is identical to mtx_spinlock_ex(). 254 * 255 * Attempt to exclusive-lock a mutex, return 0 on success and 256 * EAGAIN on failure. 257 */ 258 static __inline int 259 mtx_lock_ex_try(mtx_t *mtx) 260 { 261 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) 262 return (_mtx_lock_ex_try(mtx)); 263 mtx->mtx_owner = curthread; 264 return (0); 265 } 266 267 /* 268 * Attempt to share-lock a mutex, return 0 on success and 269 * EAGAIN on failure. 270 */ 271 static __inline int 272 mtx_lock_sh_try(mtx_t *mtx) 273 { 274 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) 275 return (_mtx_lock_sh_try(mtx)); 276 return (0); 277 } 278 279 /* 280 * If the lock is held exclusively it must be owned by the caller. If the 281 * lock is already a shared lock this operation is a NOP. A panic will 282 * occur if the lock is not held either shared or exclusive. 283 * 284 * The exclusive count is converted to a shared count. 285 */ 286 static __inline void 287 mtx_downgrade(mtx_t *mtx) 288 { 289 mtx->mtx_owner = NULL; 290 if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 1) == 0) 291 _mtx_downgrade(mtx); 292 } 293 294 /* 295 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if 296 * the shared lock has a count other then 1. Optimize the most likely case 297 * but note that a single cmpset can fail due to WANTED races. 298 * 299 * If the lock is held exclusively it must be owned by the caller and 300 * this function will simply return without doing anything. A panic will 301 * occur if the lock is held exclusively by someone other then the caller. 302 * 303 * Returns 0 on success, EDEADLK on failure. 304 */ 305 static __inline int 306 mtx_upgrade_try(mtx_t *mtx) 307 { 308 if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1)) 309 return(0); 310 return (_mtx_upgrade_try(mtx)); 311 } 312 313 /* 314 * Optimized unlock cases. 315 * 316 * NOTE: mtx_unlock() handles any type of mutex: exclusive, shared, and 317 * both blocking and spin methods. 318 * 319 * The mtx_unlock_ex/sh() forms are optimized for exclusive or shared 320 * mutexes and produce less code, but it is ok for code to just use 321 * mtx_unlock() and, in fact, if code uses the short-form mtx_lock() 322 * or mtx_spinlock() to lock it should also use mtx_unlock() to unlock. 323 */ 324 static __inline void 325 mtx_unlock(mtx_t *mtx) 326 { 327 u_int lock = mtx->mtx_lock; 328 329 if (lock == (MTX_EXCLUSIVE | 1)) { 330 mtx->mtx_owner = NULL; 331 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0) 332 _mtx_unlock(mtx); 333 } else if (lock == 1) { 334 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0) 335 _mtx_unlock(mtx); 336 } else { 337 _mtx_unlock(mtx); 338 } 339 } 340 341 static __inline void 342 mtx_unlock_ex(mtx_t *mtx) 343 { 344 u_int lock = mtx->mtx_lock; 345 346 if (lock == (MTX_EXCLUSIVE | 1)) { 347 mtx->mtx_owner = NULL; 348 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0) 349 _mtx_unlock(mtx); 350 } else { 351 _mtx_unlock(mtx); 352 } 353 } 354 355 static __inline void 356 mtx_unlock_sh(mtx_t *mtx) 357 { 358 if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0) 359 _mtx_unlock(mtx); 360 } 361 362 /* 363 * NOTE: spinlocks are exclusive-only 364 */ 365 static __inline void 366 mtx_spinunlock(mtx_t *mtx) 367 { 368 globaldata_t gd = mycpu; 369 370 mtx_unlock(mtx); 371 372 --gd->gd_spinlocks; 373 cpu_ccfence(); 374 --gd->gd_curthread->td_critcount; 375 } 376 377 /* 378 * Return TRUE (non-zero) if the mutex is locked shared or exclusive by 379 * anyone, including the owner. 380 */ 381 static __inline int 382 mtx_islocked(mtx_t *mtx) 383 { 384 return(mtx->mtx_lock != 0); 385 } 386 387 /* 388 * Return TRUE (non-zero) if the mutex is locked exclusively by anyone, 389 * including the owner. Returns FALSE (0) if the mutex is unlocked or 390 * if it is locked shared by one or more entities. 391 * 392 * A caller wishing to check whether a lock is owned exclusively by it 393 * should use mtx_owned(). 394 */ 395 static __inline int 396 mtx_islocked_ex(mtx_t *mtx) 397 { 398 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0); 399 } 400 401 /* 402 * Return TRUE (non-zero) if the mutex is not locked. 403 */ 404 static __inline int 405 mtx_notlocked(mtx_t *mtx) 406 { 407 return(mtx->mtx_lock == 0); 408 } 409 410 /* 411 * Return TRUE (non-zero) if the mutex is not locked exclusively. 412 * The mutex may in an unlocked or shared lock state. 413 */ 414 static __inline int 415 mtx_notlocked_ex(mtx_t *mtx) 416 { 417 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0); 418 } 419 420 /* 421 * Return TRUE (non-zero) if the mutex is exclusively locked by 422 * the caller. 423 */ 424 static __inline int 425 mtx_owned(mtx_t *mtx) 426 { 427 return((mtx->mtx_lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread); 428 } 429 430 /* 431 * Return TRUE (non-zero) if the mutex is not exclusively locked by 432 * the caller. 433 */ 434 static __inline int 435 mtx_notowned(mtx_t *mtx) 436 { 437 return((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 || 438 mtx->mtx_owner != curthread); 439 } 440 441 /* 442 * Return the shared or exclusive lock count. A return value of 0 443 * indicate that the mutex is not locked. 444 * 445 * NOTE: If the mutex is held exclusively by someone other then the 446 * caller the lock count for the other owner is still returned. 447 */ 448 static __inline 449 int 450 mtx_lockrefs(mtx_t *mtx) 451 { 452 return(mtx->mtx_lock & MTX_MASK); 453 } 454 455 /* 456 * Lock must held and will be released on return. Returns state 457 * which can be passed to mtx_lock_temp_restore() to return the 458 * lock to its previous state. 459 */ 460 static __inline 461 mtx_state_t 462 mtx_lock_temp_release(mtx_t *mtx) 463 { 464 mtx_state_t state; 465 466 state = (mtx->mtx_lock & MTX_EXCLUSIVE); 467 mtx_unlock(mtx); 468 469 return state; 470 } 471 472 /* 473 * Restore the previous state of a lock released with 474 * mtx_lock_temp_release() or mtx_lock_upgrade(). 475 */ 476 static __inline 477 void 478 mtx_lock_temp_restore(mtx_t *mtx, mtx_state_t state) 479 { 480 if (state & MTX_EXCLUSIVE) 481 mtx_lock_ex_quick(mtx); 482 else 483 mtx_lock_sh_quick(mtx); 484 } 485 486 #endif 487