1 /* $OpenBSD: kern_lock.c,v 1.38 2011/08/28 02:35:34 guenther Exp $ */ 2 3 /* 4 * Copyright (c) 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code contains ideas from software contributed to Berkeley by 8 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 9 * System project at Carnegie-Mellon University. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 36 */ 37 38 #include <sys/param.h> 39 #include <sys/proc.h> 40 #include <sys/lock.h> 41 #include <sys/systm.h> 42 #include <sys/sched.h> 43 44 #include <machine/cpu.h> 45 46 /* 47 * Locking primitives implementation. 48 * Locks provide shared/exclusive synchronization. 49 */ 50 51 /* 52 * Acquire a resource. We sleep on the address of the lk_sharecount 53 * member normally; if waiting for it to drain we sleep on the address 54 * of the lk_waitcount member instead. 55 */ 56 #define ACQUIRE(lkp, error, extflags, drain, wanted) \ 57 do { \ 58 for (error = 0; wanted; ) { \ 59 if ((drain)) \ 60 (lkp)->lk_flags |= LK_WAITDRAIN; \ 61 else \ 62 (lkp)->lk_waitcount++; \ 63 error = tsleep((drain) ? \ 64 &(lkp)->lk_waitcount : &(lkp)->lk_sharecount, \ 65 (lkp)->lk_prio, (lkp)->lk_wmesg, (lkp)->lk_timo); \ 66 if ((drain) == 0) \ 67 (lkp)->lk_waitcount--; \ 68 if (error) \ 69 break; \ 70 } \ 71 } while (0) 72 73 #define SETHOLDER(lkp, pid, cpu_id) \ 74 (lkp)->lk_lockholder = (pid) 75 76 #define WEHOLDIT(lkp, pid, cpu_id) \ 77 ((lkp)->lk_lockholder == (pid)) 78 79 /* 80 * Initialize a lock; required before use. 81 */ 82 void 83 lockinit(struct lock *lkp, int prio, char *wmesg, int timo, int flags) 84 { 85 86 bzero(lkp, sizeof(struct lock)); 87 lkp->lk_flags = flags & LK_EXTFLG_MASK; 88 lkp->lk_lockholder = LK_NOPROC; 89 lkp->lk_prio = prio; 90 lkp->lk_timo = timo; 91 lkp->lk_wmesg = wmesg; /* just a name for spin locks */ 92 } 93 94 /* 95 * Determine the status of a lock. 96 */ 97 int 98 lockstatus(struct lock *lkp) 99 { 100 int lock_type = 0; 101 102 if (lkp->lk_exclusivecount != 0) 103 lock_type = LK_EXCLUSIVE; 104 else if (lkp->lk_sharecount != 0) 105 lock_type = LK_SHARED; 106 return (lock_type); 107 } 108 109 /* 110 * Set, change, or release a lock. 111 * 112 * Shared requests increment the shared count. Exclusive requests set the 113 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 114 * accepted shared locks and shared-to-exclusive upgrades to go away. 115 */ 116 int 117 lockmgr(__volatile struct lock *lkp, u_int flags, void *notused) 118 { 119 int error; 120 pid_t pid; 121 int extflags; 122 cpuid_t cpu_id; 123 struct proc *p = curproc; 124 125 error = 0; 126 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 127 128 #ifdef DIAGNOSTIC 129 if (p == NULL) 130 panic("lockmgr: process context required"); 131 #endif 132 /* Process context required. */ 133 pid = p->p_pid; 134 cpu_id = cpu_number(); 135 136 /* 137 * Once a lock has drained, the LK_DRAINING flag is set and an 138 * exclusive lock is returned. The only valid operation thereafter 139 * is a single release of that exclusive lock. This final release 140 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any 141 * further requests of any sort will result in a panic. The bits 142 * selected for these two flags are chosen so that they will be set 143 * in memory that is freed (freed memory is filled with 0xdeadbeef). 144 */ 145 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { 146 #ifdef DIAGNOSTIC 147 if (lkp->lk_flags & LK_DRAINED) 148 panic("lockmgr: using decommissioned lock"); 149 if ((flags & LK_TYPE_MASK) != LK_RELEASE || 150 WEHOLDIT(lkp, pid, cpu_id) == 0) 151 panic("lockmgr: non-release on draining lock: %d", 152 flags & LK_TYPE_MASK); 153 #endif /* DIAGNOSTIC */ 154 lkp->lk_flags &= ~LK_DRAINING; 155 lkp->lk_flags |= LK_DRAINED; 156 } 157 158 /* 159 * Check if the caller is asking us to be schizophrenic. 160 */ 161 if ((lkp->lk_flags & (LK_CANRECURSE|LK_RECURSEFAIL)) == 162 (LK_CANRECURSE|LK_RECURSEFAIL)) 163 panic("lockmgr: make up your mind"); 164 165 switch (flags & LK_TYPE_MASK) { 166 167 case LK_SHARED: 168 if (WEHOLDIT(lkp, pid, cpu_id) == 0) { 169 /* 170 * If just polling, check to see if we will block. 171 */ 172 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 173 (LK_HAVE_EXCL | LK_WANT_EXCL))) { 174 error = EBUSY; 175 break; 176 } 177 /* 178 * Wait for exclusive locks and upgrades to clear. 179 */ 180 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags & 181 (LK_HAVE_EXCL | LK_WANT_EXCL)); 182 if (error) 183 break; 184 lkp->lk_sharecount++; 185 break; 186 } 187 /* 188 * We hold an exclusive lock, so downgrade it to shared. 189 * An alternative would be to fail with EDEADLK. 190 */ 191 lkp->lk_sharecount++; 192 193 if (WEHOLDIT(lkp, pid, cpu_id) == 0 || 194 lkp->lk_exclusivecount == 0) 195 panic("lockmgr: not holding exclusive lock"); 196 lkp->lk_sharecount += lkp->lk_exclusivecount; 197 lkp->lk_exclusivecount = 0; 198 lkp->lk_flags &= ~LK_HAVE_EXCL; 199 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU); 200 if (lkp->lk_waitcount) 201 wakeup(&lkp->lk_sharecount); 202 break; 203 204 case LK_EXCLUSIVE: 205 if (WEHOLDIT(lkp, pid, cpu_id)) { 206 /* 207 * Recursive lock. 208 */ 209 if ((extflags & LK_CANRECURSE) == 0) { 210 if (extflags & LK_RECURSEFAIL) { 211 error = EDEADLK; 212 break; 213 } else 214 panic("lockmgr: locking against myself"); 215 } 216 lkp->lk_exclusivecount++; 217 break; 218 } 219 /* 220 * If we are just polling, check to see if we will sleep. 221 */ 222 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 223 (LK_HAVE_EXCL | LK_WANT_EXCL)) || 224 lkp->lk_sharecount != 0)) { 225 error = EBUSY; 226 break; 227 } 228 /* 229 * Try to acquire the want_exclusive flag. 230 */ 231 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags & 232 (LK_HAVE_EXCL | LK_WANT_EXCL)); 233 if (error) 234 break; 235 lkp->lk_flags |= LK_WANT_EXCL; 236 /* 237 * Wait for shared locks and upgrades to finish. 238 */ 239 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0); 240 lkp->lk_flags &= ~LK_WANT_EXCL; 241 if (error) 242 break; 243 lkp->lk_flags |= LK_HAVE_EXCL; 244 SETHOLDER(lkp, pid, cpu_id); 245 if (lkp->lk_exclusivecount != 0) 246 panic("lockmgr: non-zero exclusive count"); 247 lkp->lk_exclusivecount = 1; 248 break; 249 250 case LK_RELEASE: 251 if (lkp->lk_exclusivecount != 0) { 252 if (WEHOLDIT(lkp, pid, cpu_id) == 0) { 253 panic("lockmgr: pid %d, not exclusive lock " 254 "holder %d unlocking", 255 pid, lkp->lk_lockholder); 256 } 257 lkp->lk_exclusivecount--; 258 if (lkp->lk_exclusivecount == 0) { 259 lkp->lk_flags &= ~LK_HAVE_EXCL; 260 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU); 261 } 262 } else if (lkp->lk_sharecount != 0) { 263 lkp->lk_sharecount--; 264 } 265 #ifdef DIAGNOSTIC 266 else 267 panic("lockmgr: release of unlocked lock!"); 268 #endif 269 if (lkp->lk_waitcount) 270 wakeup(&lkp->lk_sharecount); 271 break; 272 273 case LK_DRAIN: 274 /* 275 * Check that we do not already hold the lock, as it can 276 * never drain if we do. Unfortunately, we have no way to 277 * check for holding a shared lock, but at least we can 278 * check for an exclusive one. 279 */ 280 if (WEHOLDIT(lkp, pid, cpu_id)) 281 panic("lockmgr: draining against myself"); 282 /* 283 * If we are just polling, check to see if we will sleep. 284 */ 285 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 286 (LK_HAVE_EXCL | LK_WANT_EXCL)) || 287 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { 288 error = EBUSY; 289 break; 290 } 291 ACQUIRE(lkp, error, extflags, 1, 292 ((lkp->lk_flags & 293 (LK_HAVE_EXCL | LK_WANT_EXCL)) || 294 lkp->lk_sharecount != 0 || 295 lkp->lk_waitcount != 0)); 296 if (error) 297 break; 298 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 299 SETHOLDER(lkp, pid, cpu_id); 300 lkp->lk_exclusivecount = 1; 301 break; 302 303 default: 304 panic("lockmgr: unknown locktype request %d", 305 flags & LK_TYPE_MASK); 306 /* NOTREACHED */ 307 } 308 if ((lkp->lk_flags & LK_WAITDRAIN) != 0 && 309 ((lkp->lk_flags & 310 (LK_HAVE_EXCL | LK_WANT_EXCL)) == 0 && 311 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { 312 lkp->lk_flags &= ~LK_WAITDRAIN; 313 wakeup(&lkp->lk_waitcount); 314 } 315 return (error); 316 } 317 318 #ifdef DIAGNOSTIC 319 /* 320 * Print out information about state of a lock. Used by VOP_PRINT 321 * routines to display status about contained locks. 322 */ 323 void 324 lockmgr_printinfo(__volatile struct lock *lkp) 325 { 326 327 if (lkp->lk_sharecount) 328 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 329 lkp->lk_sharecount); 330 else if (lkp->lk_flags & LK_HAVE_EXCL) { 331 printf(" lock type %s: EXCL (count %d) by ", 332 lkp->lk_wmesg, lkp->lk_exclusivecount); 333 printf("pid %d", lkp->lk_lockholder); 334 } else 335 printf(" not locked"); 336 if (lkp->lk_waitcount > 0) 337 printf(" with %d pending", lkp->lk_waitcount); 338 } 339 #endif /* DIAGNOSTIC */ 340 341 #if defined(MULTIPROCESSOR) 342 /* 343 * Functions for manipulating the kernel_lock. We put them here 344 * so that they show up in profiles. 345 */ 346 347 struct __mp_lock kernel_lock; 348 349 void 350 _kernel_lock_init(void) 351 { 352 __mp_lock_init(&kernel_lock); 353 } 354 355 /* 356 * Acquire/release the kernel lock. Intended for use in the scheduler 357 * and the lower half of the kernel. 358 */ 359 360 void 361 _kernel_lock(void) 362 { 363 SCHED_ASSERT_UNLOCKED(); 364 __mp_lock(&kernel_lock); 365 } 366 367 void 368 _kernel_unlock(void) 369 { 370 __mp_unlock(&kernel_lock); 371 } 372 373 #ifdef MP_LOCKDEBUG 374 /* CPU-dependent timing, needs this to be settable from ddb. */ 375 int __mp_lock_spinout = 200000000; 376 #endif 377 378 #endif /* MULTIPROCESSOR */ 379