1 /*- 2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice(s), this list of conditions and the following disclaimer as 9 * the first lines of this file unmodified other than the possible 10 * addition of one or more copyright notices. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice(s), this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 25 * DAMAGE. 26 */ 27 28 /* 29 * Shared/exclusive locks. This implementation assures deterministic lock 30 * granting behavior, so that slocks and xlocks are interleaved. 31 * 32 * Priority propagation will not generally raise the priority of lock holders, 33 * so should not be relied upon in combination with sx locks. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_ddb.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/ktr.h> 44 #include <sys/linker_set.h> 45 #include <sys/condvar.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/proc.h> 49 #include <sys/sx.h> 50 #include <sys/lock_profile.h> 51 52 #ifdef DDB 53 #include <ddb/ddb.h> 54 55 static void db_show_sx(struct lock_object *lock); 56 #endif 57 58 struct lock_class lock_class_sx = { 59 "sx", 60 LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 61 #ifdef DDB 62 db_show_sx 63 #endif 64 }; 65 66 #ifndef INVARIANTS 67 #define _sx_assert(sx, what, file, line) 68 #endif 69 70 void 71 sx_sysinit(void *arg) 72 { 73 struct sx_args *sargs = arg; 74 75 sx_init(sargs->sa_sx, sargs->sa_desc); 76 } 77 78 void 79 sx_init(struct sx *sx, const char *description) 80 { 81 82 sx->sx_lock = mtx_pool_find(mtxpool_lockbuilder, sx); 83 sx->sx_cnt = 0; 84 cv_init(&sx->sx_shrd_cv, description); 85 sx->sx_shrd_wcnt = 0; 86 cv_init(&sx->sx_excl_cv, description); 87 sx->sx_excl_wcnt = 0; 88 sx->sx_xholder = NULL; 89 lock_profile_object_init(&sx->sx_object, &lock_class_sx, description); 90 lock_init(&sx->sx_object, &lock_class_sx, description, NULL, 91 LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE); 92 } 93 94 void 95 sx_destroy(struct sx *sx) 96 { 97 98 KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt == 99 0), ("%s (%s): holders or waiters\n", __func__, 100 sx->sx_object.lo_name)); 101 102 sx->sx_lock = NULL; 103 cv_destroy(&sx->sx_shrd_cv); 104 cv_destroy(&sx->sx_excl_cv); 105 106 lock_profile_object_destroy(&sx->sx_object); 107 lock_destroy(&sx->sx_object); 108 } 109 110 void 111 _sx_slock(struct sx *sx, const char *file, int line) 112 { 113 uint64_t waittime = 0; 114 int contested = 0; 115 116 mtx_lock(sx->sx_lock); 117 KASSERT(sx->sx_xholder != curthread, 118 ("%s (%s): slock while xlock is held @ %s:%d\n", __func__, 119 sx->sx_object.lo_name, file, line)); 120 WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER, file, line); 121 122 /* 123 * Loop in case we lose the race for lock acquisition. 124 */ 125 while (sx->sx_cnt < 0) { 126 sx->sx_shrd_wcnt++; 127 lock_profile_obtain_lock_failed(&sx->sx_object, &contested, &waittime); 128 cv_wait(&sx->sx_shrd_cv, sx->sx_lock); 129 sx->sx_shrd_wcnt--; 130 } 131 132 /* Acquire a shared lock. */ 133 sx->sx_cnt++; 134 135 if (sx->sx_cnt == 1) 136 lock_profile_obtain_lock_success(&sx->sx_object, contested, waittime, file, line); 137 138 LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line); 139 WITNESS_LOCK(&sx->sx_object, 0, file, line); 140 curthread->td_locks++; 141 142 mtx_unlock(sx->sx_lock); 143 } 144 145 int 146 _sx_try_slock(struct sx *sx, const char *file, int line) 147 { 148 149 mtx_lock(sx->sx_lock); 150 if (sx->sx_cnt >= 0) { 151 sx->sx_cnt++; 152 LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line); 153 WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line); 154 curthread->td_locks++; 155 mtx_unlock(sx->sx_lock); 156 return (1); 157 } else { 158 LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line); 159 mtx_unlock(sx->sx_lock); 160 return (0); 161 } 162 } 163 164 void 165 _sx_xlock(struct sx *sx, const char *file, int line) 166 { 167 int contested = 0; 168 uint64_t waittime = 0; 169 170 mtx_lock(sx->sx_lock); 171 172 /* 173 * With sx locks, we're absolutely not permitted to recurse on 174 * xlocks, as it is fatal (deadlock). Normally, recursion is handled 175 * by WITNESS, but as it is not semantically correct to hold the 176 * xlock while in here, we consider it API abuse and put it under 177 * INVARIANTS. 178 */ 179 KASSERT(sx->sx_xholder != curthread, 180 ("%s (%s): xlock already held @ %s:%d", __func__, 181 sx->sx_object.lo_name, file, line)); 182 WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 183 line); 184 185 /* Loop in case we lose the race for lock acquisition. */ 186 while (sx->sx_cnt != 0) { 187 sx->sx_excl_wcnt++; 188 lock_profile_obtain_lock_failed(&sx->sx_object, &contested, &waittime); 189 cv_wait(&sx->sx_excl_cv, sx->sx_lock); 190 sx->sx_excl_wcnt--; 191 } 192 193 MPASS(sx->sx_cnt == 0); 194 195 /* Acquire an exclusive lock. */ 196 sx->sx_cnt--; 197 sx->sx_xholder = curthread; 198 199 lock_profile_obtain_lock_success(&sx->sx_object, contested, waittime, file, line); 200 LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line); 201 WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line); 202 curthread->td_locks++; 203 204 mtx_unlock(sx->sx_lock); 205 } 206 207 int 208 _sx_try_xlock(struct sx *sx, const char *file, int line) 209 { 210 211 mtx_lock(sx->sx_lock); 212 if (sx->sx_cnt == 0) { 213 sx->sx_cnt--; 214 sx->sx_xholder = curthread; 215 LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line); 216 WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file, 217 line); 218 curthread->td_locks++; 219 mtx_unlock(sx->sx_lock); 220 return (1); 221 } else { 222 LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line); 223 mtx_unlock(sx->sx_lock); 224 return (0); 225 } 226 } 227 228 void 229 _sx_sunlock(struct sx *sx, const char *file, int line) 230 { 231 _sx_assert(sx, SX_SLOCKED, file, line); 232 mtx_lock(sx->sx_lock); 233 234 curthread->td_locks--; 235 WITNESS_UNLOCK(&sx->sx_object, 0, file, line); 236 237 /* Release. */ 238 sx->sx_cnt--; 239 240 if (sx->sx_cnt == 0) { 241 lock_profile_release_lock(&sx->sx_object); 242 } 243 244 /* 245 * If we just released the last shared lock, wake any waiters up, giving 246 * exclusive lockers precedence. In order to make sure that exclusive 247 * lockers won't be blocked forever, don't wake shared lock waiters if 248 * there are exclusive lock waiters. 249 */ 250 if (sx->sx_excl_wcnt > 0) { 251 if (sx->sx_cnt == 0) 252 cv_signal(&sx->sx_excl_cv); 253 } else if (sx->sx_shrd_wcnt > 0) 254 cv_broadcast(&sx->sx_shrd_cv); 255 256 LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line); 257 258 mtx_unlock(sx->sx_lock); 259 } 260 261 void 262 _sx_xunlock(struct sx *sx, const char *file, int line) 263 { 264 _sx_assert(sx, SX_XLOCKED, file, line); 265 mtx_lock(sx->sx_lock); 266 MPASS(sx->sx_cnt == -1); 267 268 curthread->td_locks--; 269 WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line); 270 271 /* Release. */ 272 sx->sx_cnt++; 273 sx->sx_xholder = NULL; 274 275 /* 276 * Wake up waiters if there are any. Give precedence to slock waiters. 277 */ 278 if (sx->sx_shrd_wcnt > 0) 279 cv_broadcast(&sx->sx_shrd_cv); 280 else if (sx->sx_excl_wcnt > 0) 281 cv_signal(&sx->sx_excl_cv); 282 283 LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line); 284 285 lock_profile_release_lock(&sx->sx_object); 286 mtx_unlock(sx->sx_lock); 287 } 288 289 int 290 _sx_try_upgrade(struct sx *sx, const char *file, int line) 291 { 292 293 _sx_assert(sx, SX_SLOCKED, file, line); 294 mtx_lock(sx->sx_lock); 295 296 if (sx->sx_cnt == 1) { 297 sx->sx_cnt = -1; 298 sx->sx_xholder = curthread; 299 300 LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 1, file, line); 301 WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 302 file, line); 303 304 mtx_unlock(sx->sx_lock); 305 return (1); 306 } else { 307 LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 0, file, line); 308 mtx_unlock(sx->sx_lock); 309 return (0); 310 } 311 } 312 313 void 314 _sx_downgrade(struct sx *sx, const char *file, int line) 315 { 316 317 _sx_assert(sx, SX_XLOCKED, file, line); 318 mtx_lock(sx->sx_lock); 319 MPASS(sx->sx_cnt == -1); 320 321 WITNESS_DOWNGRADE(&sx->sx_object, 0, file, line); 322 323 sx->sx_cnt = 1; 324 sx->sx_xholder = NULL; 325 if (sx->sx_shrd_wcnt > 0) 326 cv_broadcast(&sx->sx_shrd_cv); 327 328 LOCK_LOG_LOCK("XDOWNGRADE", &sx->sx_object, 0, 0, file, line); 329 330 mtx_unlock(sx->sx_lock); 331 } 332 333 #ifdef INVARIANT_SUPPORT 334 #ifndef INVARIANTS 335 #undef _sx_assert 336 #endif 337 338 /* 339 * In the non-WITNESS case, sx_assert() can only detect that at least 340 * *some* thread owns an slock, but it cannot guarantee that *this* 341 * thread owns an slock. 342 */ 343 void 344 _sx_assert(struct sx *sx, int what, const char *file, int line) 345 { 346 347 if (panicstr != NULL) 348 return; 349 switch (what) { 350 case SX_LOCKED: 351 case SX_SLOCKED: 352 #ifdef WITNESS 353 witness_assert(&sx->sx_object, what, file, line); 354 #else 355 mtx_lock(sx->sx_lock); 356 if (sx->sx_cnt <= 0 && 357 (what == SX_SLOCKED || sx->sx_xholder != curthread)) 358 panic("Lock %s not %slocked @ %s:%d\n", 359 sx->sx_object.lo_name, (what == SX_SLOCKED) ? 360 "share " : "", file, line); 361 mtx_unlock(sx->sx_lock); 362 #endif 363 break; 364 case SX_XLOCKED: 365 mtx_lock(sx->sx_lock); 366 if (sx->sx_xholder != curthread) 367 panic("Lock %s not exclusively locked @ %s:%d\n", 368 sx->sx_object.lo_name, file, line); 369 mtx_unlock(sx->sx_lock); 370 break; 371 case SX_UNLOCKED: 372 #ifdef WITNESS 373 witness_assert(&sx->sx_object, what, file, line); 374 #else 375 /* 376 * We are able to check only exclusive lock here, 377 * we cannot assert that *this* thread owns slock. 378 */ 379 mtx_lock(sx->sx_lock); 380 if (sx->sx_xholder == curthread) 381 panic("Lock %s exclusively locked @ %s:%d\n", 382 sx->sx_object.lo_name, file, line); 383 mtx_unlock(sx->sx_lock); 384 #endif 385 break; 386 default: 387 panic("Unknown sx lock assertion: %d @ %s:%d", what, file, 388 line); 389 } 390 } 391 #endif /* INVARIANT_SUPPORT */ 392 393 #ifdef DDB 394 void 395 db_show_sx(struct lock_object *lock) 396 { 397 struct thread *td; 398 struct sx *sx; 399 400 sx = (struct sx *)lock; 401 402 db_printf(" state: "); 403 if (sx->sx_cnt < 0) { 404 td = sx->sx_xholder; 405 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 406 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); 407 } else if (sx->sx_cnt > 0) 408 db_printf("SLOCK: %d locks\n", sx->sx_cnt); 409 else 410 db_printf("UNLOCKED\n"); 411 db_printf(" waiters: %d shared, %d exclusive\n", sx->sx_shrd_wcnt, 412 sx->sx_excl_wcnt); 413 } 414 415 /* 416 * Check to see if a thread that is blocked on a sleep queue is actually 417 * blocked on an sx lock. If so, output some details and return true. 418 * If the lock has an exclusive owner, return that in *ownerp. 419 */ 420 int 421 sx_chain(struct thread *td, struct thread **ownerp) 422 { 423 struct sx *sx; 424 struct cv *cv; 425 426 /* 427 * First, see if it looks like td is blocked on a condition 428 * variable. 429 */ 430 cv = td->td_wchan; 431 if (cv->cv_description != td->td_wmesg) 432 return (0); 433 434 /* 435 * Ok, see if it looks like td is blocked on the exclusive 436 * condition variable. 437 */ 438 sx = (struct sx *)((char *)cv - offsetof(struct sx, sx_excl_cv)); 439 if (LOCK_CLASS(&sx->sx_object) == &lock_class_sx && 440 sx->sx_excl_wcnt > 0) 441 goto ok; 442 443 /* 444 * Second, see if it looks like td is blocked on the shared 445 * condition variable. 446 */ 447 sx = (struct sx *)((char *)cv - offsetof(struct sx, sx_shrd_cv)); 448 if (LOCK_CLASS(&sx->sx_object) == &lock_class_sx && 449 sx->sx_shrd_wcnt > 0) 450 goto ok; 451 452 /* Doesn't seem to be an sx lock. */ 453 return (0); 454 455 ok: 456 /* We think we have an sx lock, so output some details. */ 457 db_printf("blocked on sx \"%s\" ", td->td_wmesg); 458 if (sx->sx_cnt >= 0) { 459 db_printf("SLOCK (count %d)\n", sx->sx_cnt); 460 *ownerp = NULL; 461 } else { 462 db_printf("XLOCK\n"); 463 *ownerp = sx->sx_xholder; 464 } 465 return (1); 466 } 467 #endif 468