xref: /original-bsd/sys/kern/kern_lock.c (revision 3e62f48e)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code contains ideas from software contributed to Berkeley by
6  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
7  * System project at Carnegie-Mellon University.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)kern_lock.c	8.5 (Berkeley) 04/11/95
12  */
13 
14 #include <sys/param.h>
15 #include <sys/proc.h>
16 #include <sys/lock.h>
17 
18 /*
19  * Locking primitives implementation.
20  * Locks provide shared/exclusive sychronization.
21  */
22 
23 #if NCPUS > 1
24 
25 /*
26  * For multiprocessor system, try spin lock first.
27  *
28  * This should be inline expanded below, but we cannot have #if
29  * inside a multiline define.
30  */
31 int lock_wait_time = 100;
32 #define PAUSE(lkp, wanted)						\
33 		if (lock_wait_time > 0) {				\
34 			int i;						\
35 									\
36 			atomic_unlock(&lkp->lk_interlock);		\
37 			for (i = lock_wait_time; i > 0; i--)		\
38 				if (!(wanted))				\
39 					break;				\
40 			atomic_lock(&lkp->lk_interlock);		\
41 		}							\
42 		if (!(wanted))						\
43 			break;
44 
45 #else /* NCPUS == 1 */
46 
47 /*
48  * It is an error to spin on a uniprocessor as nothing will ever cause
49  * the atomic lock to clear while we are executing.
50  */
51 #define PAUSE(lkp, wanted)
52 
53 #endif /* NCPUS == 1 */
54 
55 /*
56  * Acquire a resource.
57  */
58 #define ACQUIRE(lkp, error, extflags, wanted)				\
59 	PAUSE(lkp, wanted);						\
60 	for (error = 0; wanted; ) {					\
61 		(lkp)->lk_flags |= LK_WAITING;				\
62 		atomic_unlock(&(lkp)->lk_interlock);			\
63 		error = tsleep((void *)lkp, (lkp)->lk_prio,		\
64 		    (lkp)->lk_wmesg, (lkp)->lk_timo);			\
65 		atomic_lock(&(lkp)->lk_interlock);			\
66 		if (error)						\
67 			break;						\
68 		if ((extflags) & LK_SLEEPFAIL) {			\
69 			error = ENOLCK;					\
70 			break;						\
71 		}							\
72 	}
73 
74 /*
75  * Initialize a lock; required before use.
76  */
77 void
78 lock_init(lkp, prio, wmesg, timo, flags)
79 	struct lock *lkp;
80 	int prio;
81 	char *wmesg;
82 	int timo;
83 	int flags;
84 {
85 	bzero(lkp, sizeof(struct lock));
86 	atomic_lock_init(&lkp->lk_interlock);
87 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
88 	lkp->lk_prio = prio;
89 	lkp->lk_timo = timo;
90 	lkp->lk_wmesg = wmesg;
91 	lkp->lk_lockholder = LK_NOPROC;
92 }
93 
94 /*
95  * Determine the status of a lock.
96  */
97 int
98 lockstatus(lkp)
99 	struct lock *lkp;
100 {
101 	int lock_type = 0;
102 
103 	atomic_lock(&lkp->lk_interlock);
104 	if (lkp->lk_exclusivecount != 0)
105 		lock_type = LK_EXCLUSIVE;
106 	else if (lkp->lk_sharecount != 0)
107 		lock_type = LK_SHARED;
108 	atomic_unlock(&lkp->lk_interlock);
109 	return (lock_type);
110 }
111 
112 /*
113  * Set, change, or release a lock.
114  *
115  * Shared requests increment the shared count. Exclusive requests set the
116  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
117  * accepted shared locks and shared-to-exclusive upgrades to go away.
118  */
119 int
120 lockmgr(lkp, p, flags)
121 	volatile struct lock *lkp;
122 	struct proc *p;
123 	u_int flags;
124 {
125 	int error;
126 	pid_t pid;
127 	volatile int extflags;
128 
129 	pid = p->p_pid;
130 	atomic_lock(&lkp->lk_interlock);
131 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
132 
133 	switch (flags & LK_TYPE_MASK) {
134 
135 	case LK_SHARED:
136 		if (lkp->lk_lockholder != pid) {
137 			/*
138 			 * If just polling, check to see if we will block.
139 			 */
140 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
141 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
142 				atomic_unlock(&lkp->lk_interlock);
143 				return (EBUSY);
144 			}
145 			/*
146 			 * Wait for exclusive locks and upgrades to clear.
147 			 */
148 			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
149 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
150 			if (error) {
151 				atomic_unlock(&lkp->lk_interlock);
152 				return (error);
153 			}
154 			lkp->lk_sharecount++;
155 			atomic_unlock(&lkp->lk_interlock);
156 			return (0);
157 		}
158 		/*
159 		 * We hold an exclusive lock, so downgrade it to shared.
160 		 * An alternative would be to fail with EDEADLK.
161 		 */
162 		lkp->lk_sharecount++;
163 		/* fall into downgrade */
164 
165 	case LK_DOWNGRADE:
166 		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
167 			panic("lockmgr: not holding exclusive lock");
168 		lkp->lk_sharecount += lkp->lk_exclusivecount;
169 		lkp->lk_exclusivecount = 0;
170 		lkp->lk_flags &= ~LK_HAVE_EXCL;
171 		lkp->lk_lockholder = LK_NOPROC;
172 		if (lkp->lk_flags & LK_WAITING) {
173 			lkp->lk_flags &= ~LK_WAITING;
174 			wakeup((void *)lkp);
175 		}
176 		atomic_unlock(&lkp->lk_interlock);
177 		return (0);
178 
179 	case LK_EXCLUPGRADE:
180 		/*
181 		 * If another process is ahead of us to get an upgrade,
182 		 * then we want to fail rather than have an intervening
183 		 * exclusive access.
184 		 */
185 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
186 			lkp->lk_sharecount--;
187 			atomic_unlock(&lkp->lk_interlock);
188 			return (EBUSY);
189 		}
190 		/* fall into normal upgrade */
191 
192 	case LK_UPGRADE:
193 		/*
194 		 * Upgrade a shared lock to an exclusive one. If another
195 		 * shared lock has already requested an upgrade to an
196 		 * exclusive lock, our shared lock is released and an
197 		 * exclusive lock is requested (which will be granted
198 		 * after the upgrade). If we return an error, the file
199 		 * will always be unlocked.
200 		 */
201 		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
202 			panic("lockmgr: upgrade exclusive lock");
203 		lkp->lk_sharecount--;
204 		/*
205 		 * If we are just polling, check to see if we will block.
206 		 */
207 		if ((extflags & LK_NOWAIT) &&
208 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
209 		     lkp->lk_sharecount > 1)) {
210 			atomic_unlock(&lkp->lk_interlock);
211 			return (EBUSY);
212 		}
213 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
214 			/*
215 			 * We are first shared lock to request an upgrade, so
216 			 * request upgrade and wait for the shared count to
217 			 * drop to zero, then take exclusive lock.
218 			 */
219 			lkp->lk_flags |= LK_WANT_UPGRADE;
220 			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
221 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
222 			if (error) {
223 				atomic_unlock(&lkp->lk_interlock);
224 				return (error);
225 			}
226 			lkp->lk_flags |= LK_HAVE_EXCL;
227 			lkp->lk_lockholder = pid;
228 			if (lkp->lk_exclusivecount != 0)
229 				panic("lockmgr: non-zero exclusive count");
230 			lkp->lk_exclusivecount = 1;
231 			atomic_unlock(&lkp->lk_interlock);
232 			return (0);
233 		}
234 		/*
235 		 * Someone else has requested upgrade. Release our shared
236 		 * lock, awaken upgrade requestor if we are the last shared
237 		 * lock, then request an exclusive lock.
238 		 */
239 		if (lkp->lk_sharecount == 0 && (lkp->lk_flags & LK_WAITING)) {
240 			lkp->lk_flags &= ~LK_WAITING;
241 			wakeup((void *)lkp);
242 		}
243 		/* fall into exclusive request */
244 
245 	case LK_EXCLUSIVE:
246 		if (lkp->lk_lockholder == pid) {
247 			/*
248 			 *	Recursive lock.
249 			 */
250 			if ((extflags & LK_CANRECURSE) == 0)
251 				panic("lockmgr: locking against myself");
252 			lkp->lk_exclusivecount++;
253 			atomic_unlock(&lkp->lk_interlock);
254 			return (0);
255 		}
256 		/*
257 		 * If we are just polling, check to see if we will sleep.
258 		 */
259 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
260 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
261 		     lkp->lk_sharecount != 0)) {
262 			atomic_unlock(&lkp->lk_interlock);
263 			return (EBUSY);
264 		}
265 		/*
266 		 * Try to acquire the want_exclusive flag.
267 		 */
268 		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
269 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
270 		if (error) {
271 			atomic_unlock(&lkp->lk_interlock);
272 			return (error);
273 		}
274 		lkp->lk_flags |= LK_WANT_EXCL;
275 		/*
276 		 * Wait for shared locks and upgrades to finish.
277 		 */
278 		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
279 		       (lkp->lk_flags & LK_WANT_UPGRADE));
280 		lkp->lk_flags &= ~LK_WANT_EXCL;
281 		if (error) {
282 			atomic_unlock(&lkp->lk_interlock);
283 			return (error);
284 		}
285 		lkp->lk_flags |= LK_HAVE_EXCL;
286 		lkp->lk_lockholder = pid;
287 		if (lkp->lk_exclusivecount != 0)
288 			panic("lockmgr: non-zero exclusive count");
289 		lkp->lk_exclusivecount = 1;
290 		atomic_unlock(&lkp->lk_interlock);
291 		return (0);
292 
293 	case LK_RELEASE:
294 		if (lkp->lk_exclusivecount != 0) {
295 			lkp->lk_exclusivecount--;
296 			if (lkp->lk_exclusivecount == 0) {
297 				lkp->lk_flags &= ~LK_HAVE_EXCL;
298 				lkp->lk_lockholder = LK_NOPROC;
299 			}
300 		} else if (lkp->lk_sharecount != 0)
301 			lkp->lk_sharecount--;
302 		if (lkp->lk_flags & LK_WAITING) {
303 			lkp->lk_flags &= ~LK_WAITING;
304 			wakeup((void *)lkp);
305 		}
306 		atomic_unlock(&lkp->lk_interlock);
307 		return (0);
308 
309 	default:
310 		atomic_unlock(&lkp->lk_interlock);
311 		panic("lockmgr: unknown locktype request %d",
312 		    flags & LK_TYPE_MASK);
313 		/* NOTREACHED */
314 	}
315 }
316