xref: /original-bsd/sys/kern/kern_lock.c (revision 27393bdf)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code contains ideas from software contributed to Berkeley by
6  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
7  * System project at Carnegie-Mellon University.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)kern_lock.c	8.8 (Berkeley) 04/27/95
12  */
13 
14 #include <sys/param.h>
15 #include <sys/proc.h>
16 #include <sys/lock.h>
17 
18 /*
19  * Locking primitives implementation.
20  * Locks provide shared/exclusive sychronization.
21  */
22 
23 #if NCPUS > 1
24 
25 /*
26  * For multiprocessor system, try spin lock first.
27  *
28  * This should be inline expanded below, but we cannot have #if
29  * inside a multiline define.
30  */
31 int lock_wait_time = 100;
32 #define PAUSE(lkp, wanted)						\
33 		if (lock_wait_time > 0) {				\
34 			int i;						\
35 									\
36 			simple_unlock(&lkp->lk_interlock);		\
37 			for (i = lock_wait_time; i > 0; i--)		\
38 				if (!(wanted))				\
39 					break;				\
40 			simple_lock(&lkp->lk_interlock);		\
41 		}							\
42 		if (!(wanted))						\
43 			break;
44 
45 #else /* NCPUS == 1 */
46 
47 /*
48  * It is an error to spin on a uniprocessor as nothing will ever cause
49  * the simple lock to clear while we are executing.
50  */
51 #define PAUSE(lkp, wanted)
52 
53 /*
54  * Panic messages for inline expanded simple locks.
55  * Put text here to avoid hundreds of copies.
56  */
57 const char *simple_lock_held = "simple_lock: lock held";
58 const char *simple_lock_not_held = "simple_lock: lock not held";
59 
60 #endif /* NCPUS == 1 */
61 
62 /*
63  * Acquire a resource.
64  */
65 #define ACQUIRE(lkp, error, extflags, wanted)				\
66 	PAUSE(lkp, wanted);						\
67 	for (error = 0; wanted; ) {					\
68 		(lkp)->lk_waitcount++;					\
69 		simple_unlock(&(lkp)->lk_interlock);			\
70 		error = tsleep((void *)lkp, (lkp)->lk_prio,		\
71 		    (lkp)->lk_wmesg, (lkp)->lk_timo);			\
72 		simple_lock(&(lkp)->lk_interlock);			\
73 		(lkp)->lk_waitcount--;					\
74 		if (error)						\
75 			break;						\
76 		if ((extflags) & LK_SLEEPFAIL) {			\
77 			error = ENOLCK;					\
78 			break;						\
79 		}							\
80 	}
81 
82 /*
83  * Initialize a lock; required before use.
84  */
85 void
86 lock_init(lkp, prio, wmesg, timo, flags)
87 	struct lock *lkp;
88 	int prio;
89 	char *wmesg;
90 	int timo;
91 	int flags;
92 {
93 	bzero(lkp, sizeof(struct lock));
94 	simple_lock_init(&lkp->lk_interlock);
95 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
96 	lkp->lk_prio = prio;
97 	lkp->lk_timo = timo;
98 	lkp->lk_wmesg = wmesg;
99 	lkp->lk_lockholder = LK_NOPROC;
100 }
101 
102 /*
103  * Determine the status of a lock.
104  */
105 int
106 lockstatus(lkp)
107 	struct lock *lkp;
108 {
109 	int lock_type = 0;
110 
111 	simple_lock(&lkp->lk_interlock);
112 	if (lkp->lk_exclusivecount != 0)
113 		lock_type = LK_EXCLUSIVE;
114 	else if (lkp->lk_sharecount != 0)
115 		lock_type = LK_SHARED;
116 	simple_unlock(&lkp->lk_interlock);
117 	return (lock_type);
118 }
119 
120 /*
121  * Set, change, or release a lock.
122  *
123  * Shared requests increment the shared count. Exclusive requests set the
124  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
125  * accepted shared locks and shared-to-exclusive upgrades to go away.
126  */
127 int
128 lockmgr(lkp, flags, interlkp, pid)
129 	__volatile struct lock *lkp;
130 	u_int flags;
131 	struct simple_lock *interlkp;
132 	pid_t pid;
133 {
134 	int error;
135 	__volatile int extflags;
136 
137 	error = 0;
138 	simple_lock(&lkp->lk_interlock);
139 	if (flags & LK_INTERLOCK)
140 		simple_unlock(interlkp);
141 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
142 	if ((lkp->lk_flags & LK_DRAINED) &&
143 	    (((flags & LK_TYPE_MASK) != LK_RELEASE) ||
144 	    lkp->lk_lockholder != pid))
145 		panic("lockmgr: using decommissioned lock");
146 
147 	switch (flags & LK_TYPE_MASK) {
148 
149 	case LK_SHARED:
150 		if (lkp->lk_lockholder != pid) {
151 			/*
152 			 * If just polling, check to see if we will block.
153 			 */
154 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
155 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
156 				error = EBUSY;
157 				break;
158 			}
159 			/*
160 			 * Wait for exclusive locks and upgrades to clear.
161 			 */
162 			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
163 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
164 			if (error)
165 				break;
166 			lkp->lk_sharecount++;
167 			break;
168 		}
169 		/*
170 		 * We hold an exclusive lock, so downgrade it to shared.
171 		 * An alternative would be to fail with EDEADLK.
172 		 */
173 		lkp->lk_sharecount++;
174 		/* fall into downgrade */
175 
176 	case LK_DOWNGRADE:
177 		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
178 			panic("lockmgr: not holding exclusive lock");
179 		lkp->lk_sharecount += lkp->lk_exclusivecount;
180 		lkp->lk_exclusivecount = 0;
181 		lkp->lk_flags &= ~LK_HAVE_EXCL;
182 		lkp->lk_lockholder = LK_NOPROC;
183 		if (lkp->lk_waitcount)
184 			wakeup((void *)lkp);
185 		break;
186 
187 	case LK_EXCLUPGRADE:
188 		/*
189 		 * If another process is ahead of us to get an upgrade,
190 		 * then we want to fail rather than have an intervening
191 		 * exclusive access.
192 		 */
193 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
194 			lkp->lk_sharecount--;
195 			error = EBUSY;
196 			break;
197 		}
198 		/* fall into normal upgrade */
199 
200 	case LK_UPGRADE:
201 		/*
202 		 * Upgrade a shared lock to an exclusive one. If another
203 		 * shared lock has already requested an upgrade to an
204 		 * exclusive lock, our shared lock is released and an
205 		 * exclusive lock is requested (which will be granted
206 		 * after the upgrade). If we return an error, the file
207 		 * will always be unlocked.
208 		 */
209 		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
210 			panic("lockmgr: upgrade exclusive lock");
211 		lkp->lk_sharecount--;
212 		/*
213 		 * If we are just polling, check to see if we will block.
214 		 */
215 		if ((extflags & LK_NOWAIT) &&
216 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
217 		     lkp->lk_sharecount > 1)) {
218 			error = EBUSY;
219 			break;
220 		}
221 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
222 			/*
223 			 * We are first shared lock to request an upgrade, so
224 			 * request upgrade and wait for the shared count to
225 			 * drop to zero, then take exclusive lock.
226 			 */
227 			lkp->lk_flags |= LK_WANT_UPGRADE;
228 			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
229 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
230 			if (error)
231 				break;
232 			lkp->lk_flags |= LK_HAVE_EXCL;
233 			lkp->lk_lockholder = pid;
234 			if (lkp->lk_exclusivecount != 0)
235 				panic("lockmgr: non-zero exclusive count");
236 			lkp->lk_exclusivecount = 1;
237 			break;
238 		}
239 		/*
240 		 * Someone else has requested upgrade. Release our shared
241 		 * lock, awaken upgrade requestor if we are the last shared
242 		 * lock, then request an exclusive lock.
243 		 */
244 		if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
245 			wakeup((void *)lkp);
246 		/* fall into exclusive request */
247 
248 	case LK_EXCLUSIVE:
249 		if (lkp->lk_lockholder == pid) {
250 			/*
251 			 *	Recursive lock.
252 			 */
253 			if ((extflags & LK_CANRECURSE) == 0)
254 				panic("lockmgr: locking against myself");
255 			lkp->lk_exclusivecount++;
256 			break;
257 		}
258 		/*
259 		 * If we are just polling, check to see if we will sleep.
260 		 */
261 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
262 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
263 		     lkp->lk_sharecount != 0)) {
264 			error = EBUSY;
265 			break;
266 		}
267 		/*
268 		 * Try to acquire the want_exclusive flag.
269 		 */
270 		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
271 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
272 		if (error)
273 			break;
274 		lkp->lk_flags |= LK_WANT_EXCL;
275 		/*
276 		 * Wait for shared locks and upgrades to finish.
277 		 */
278 		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
279 		       (lkp->lk_flags & LK_WANT_UPGRADE));
280 		lkp->lk_flags &= ~LK_WANT_EXCL;
281 		if (error)
282 			break;
283 		lkp->lk_flags |= LK_HAVE_EXCL;
284 		lkp->lk_lockholder = pid;
285 		if (lkp->lk_exclusivecount != 0)
286 			panic("lockmgr: non-zero exclusive count");
287 		lkp->lk_exclusivecount = 1;
288 		break;
289 
290 	case LK_RELEASE:
291 		if (lkp->lk_exclusivecount != 0) {
292 			if (pid != lkp->lk_lockholder)
293 				panic("lockmgr: pid %d, not %s %d unlocking",
294 				    pid, "exclusive lock holder",
295 				    lkp->lk_lockholder);
296 			lkp->lk_exclusivecount--;
297 			if (lkp->lk_exclusivecount == 0) {
298 				lkp->lk_flags &= ~LK_HAVE_EXCL;
299 				lkp->lk_lockholder = LK_NOPROC;
300 			}
301 		} else if (lkp->lk_sharecount != 0)
302 			lkp->lk_sharecount--;
303 		if (lkp->lk_waitcount)
304 			wakeup((void *)lkp);
305 		break;
306 
307 	case LK_DRAIN:
308 		/*
309 		 * If we are just polling, check to see if we will sleep.
310 		 */
311 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
312 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
313 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
314 			error = EBUSY;
315 			break;
316 		}
317 		PAUSE(lkp, ((lkp->lk_flags &
318 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
319 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
320 		for (error = 0; ((lkp->lk_flags &
321 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
322 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
323 			lkp->lk_flags |= LK_WAITDRAIN;
324 			simple_unlock(&lkp->lk_interlock);
325 			if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
326 			    lkp->lk_wmesg, lkp->lk_timo))
327 				return (error);
328 			if ((extflags) & LK_SLEEPFAIL)
329 				return (ENOLCK);
330 			simple_lock(&lkp->lk_interlock);
331 		}
332 		lkp->lk_flags |= LK_DRAINED | LK_HAVE_EXCL;
333 		lkp->lk_lockholder = pid;
334 		lkp->lk_exclusivecount = 1;
335 		break;
336 
337 	default:
338 		simple_unlock(&lkp->lk_interlock);
339 		panic("lockmgr: unknown locktype request %d",
340 		    flags & LK_TYPE_MASK);
341 		/* NOTREACHED */
342 	}
343 	if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
344 	     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
345 	     lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
346 		lkp->lk_flags &= ~LK_WAITDRAIN;
347 		wakeup((void *)&lkp->lk_flags);
348 	}
349 	simple_unlock(&lkp->lk_interlock);
350 	return (error);
351 }
352 
353 lockmgr_printinfo(lkp)
354 	struct lock *lkp;
355 {
356 
357 	if (lkp->lk_sharecount)
358 		printf(" lock type %s: SHARED", lkp->lk_wmesg);
359 	else if (lkp->lk_flags & LK_HAVE_EXCL)
360 		printf(" lock type %s: EXCL by pid %d", lkp->lk_wmesg,
361 		    lkp->lk_lockholder);
362 	if (lkp->lk_waitcount > 0)
363 		printf(" with %d pending", lkp->lk_waitcount);
364 }
365