xref: /original-bsd/sys/sys/lock.h (revision 0d869007)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code contains ideas from software contributed to Berkeley by
6  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
7  * System project at Carnegie-Mellon University.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)lock.h	8.5 (Berkeley) 04/13/95
12  */
13 
14 #ifndef	_LOCK_H_
15 #define	_LOCK_H_
16 
17 /*
18  * XXX This belongs in <machine/param.h>, but is here for now.
19  */
20 #define NCPUS 1
21 
22 /*
23  * An atomic spin lock.
24  *
25  * This structure only sets one bit of data, but is sized based on the
26  * minimum word size that can be operated on by the hardware test-and-set
27  * instruction. It is only needed for multiprocessors, as uniprocessors
28  * will always run to completion or a sleep. It is an error to hold one
29  * of these locks while a process is sleeping.
30  */
31 struct atomic_lk {
32 	int	lock_data;
33 };
34 
35 /*
36  * The general lock structure.  Provides for multiple shared locks,
37  * upgrading from shared to exclusive, and sleeping until the lock
38  * can be gained.
39  */
40 struct lock {
41 	struct	atomic_lk lk_interlock;	/* lock on remaining fields */
42 	u_int	lk_flags;		/* see below */
43 	int	lk_sharecount;		/* # of accepted shared locks */
44 	int	lk_exclusivecount;	/* # of recursive exclusive locks */
45 	int	lk_waitcount;		/* # of processes sleeping for lock */
46 	int	lk_prio;		/* priority at which to sleep */
47 	char	*lk_wmesg;		/* resource sleeping (for tsleep) */
48 	int	lk_timo;		/* maximum sleep time (for tsleep) */
49 	pid_t	lk_lockholder;		/* pid of exclusive lock holder */
50 };
51 /*
52  * Lock request types:
53  *   LK_SHARED - get one of many possible shared locks. If a process
54  *	holding an exclusive lock requests a shared lock, the exclusive
55  *	lock(s) will be downgraded to shared locks.
56  *   LK_EXCLUSIVE - stop further shared locks, when they are cleared,
57  *	grant a pending upgrade if it exists, then grant an exclusive
58  *	lock. Only one exclusive lock may exist at a time, except that
59  *	a process holding an exclusive lock may get additional exclusive
60  *	locks if it explicitly sets the LK_CANRECURSE flag in the lock
61  *	request, or if the LK_CANRECUSE flag was set when the lock was
62  *	initialized.
63  *   LK_UPGRADE - the process must hold a shared lock that it wants to
64  *	have upgraded to an exclusive lock. Other processes may get
65  *	exclusive access to the resource between the time that the upgrade
66  *	is requested and the time that it is granted.
67  *   LK_EXCLUPGRADE - the process must hold a shared lock that it wants to
68  *	have upgraded to an exclusive lock. If the request succeeds, no
69  *	other processes will have gotten exclusive access to the resource
70  *	between the time that the upgrade is requested and the time that
71  *	it is granted. However, if another process has already requested
72  *	an upgrade, the request will fail (see error returns below).
73  *   LK_DOWNGRADE - the process must hold an exclusive lock that it wants
74  *	to have downgraded to a shared lock. If the process holds multiple
75  *	(recursive) exclusive locks, they will all be downgraded to shared
76  *	locks.
77  *   LK_RELEASE - release one instance of a lock.
78  *   LK_DRAIN - wait for all activity on the lock to end, then mark it
79  *	decommissioned. This feature is used before freeing a lock that
80  *	is part of a piece of memory that is about to be freed.
81  *
82  * These are flags that are passed to the lockmgr routine.
83  */
84 #define LK_TYPE_MASK	0x00000007	/* type of lock sought */
85 #define LK_SHARED	0x00000001	/* shared lock */
86 #define LK_EXCLUSIVE	0x00000002	/* exclusive lock */
87 #define LK_UPGRADE	0x00000003	/* shared-to-exclusive upgrade */
88 #define LK_EXCLUPGRADE	0x00000004	/* first shared-to-exclusive upgrade */
89 #define LK_DOWNGRADE	0x00000005	/* exclusive-to-shared downgrade */
90 #define LK_RELEASE	0x00000006	/* release any type of lock */
91 #define LK_DRAIN	0x00000007	/* wait for all lock activity to end */
92 /*
93  * External lock flags.
94  *
95  * These flags may be set in lock_init to set their mode permanently,
96  * or passed in as arguments to the lock manager.
97  */
98 #define LK_EXTFLG_MASK	0x000000f0	/* mask of external flags */
99 #define LK_NOWAIT	0x00000010	/* do not sleep to await lock */
100 #define LK_SLEEPFAIL	0x00000020	/* sleep, then return failure */
101 #define LK_CANRECURSE	0x00000040	/* allow recursive exclusive lock */
102 /*
103  * Internal lock flags.
104  *
105  * These flags are used internally to the lock manager.
106  */
107 #define LK_WANT_UPGRADE	0x00000100	/* waiting for share-to-excl upgrade */
108 #define LK_WANT_EXCL	0x00000200	/* exclusive lock sought */
109 #define LK_HAVE_EXCL	0x00000400	/* exclusive lock obtained */
110 #define LK_WAITDRAIN	0x00000800	/* process waiting for lock to drain */
111 #define LK_DRAINED	0x00001000	/* lock has been decommissioned */
112 /*
113  * Lock return status.
114  *
115  * Successfully obtained locks return 0. Locks will always succeed
116  * unless one of the following is true:
117  *	LK_FORCEUPGRADE is requested and some other process has already
118  *	    requested a lock upgrade (returns EBUSY).
119  *	LK_WAIT is set and a sleep would be required (returns EBUSY).
120  *	LK_SLEEPFAIL is set and a sleep was done (returns ENOLCK).
121  *	PCATCH is set in lock priority and a signal arrives (returns
122  *	    either EINTR or ERESTART if system calls is to be restarted).
123  *	Non-null lock timeout and timeout expires (returns EWOULDBLOCK).
124  * A failed lock attempt always returns a non-zero error value. No lock
125  * is held after an error return (in particular, a failed LK_UPGRADE
126  * or LK_FORCEUPGRADE will have released its shared access lock).
127  */
128 
129 /*
130  * Indicator that no process holds exclusive lock
131  */
132 #define LK_NOPROC ((pid_t) -1)
133 
134 void	lock_init __P((struct lock *, int prio, char *wmesg, int timo,
135 			int flags));
136 int	lockmgr __P((__volatile struct lock *, u_int flags, struct proc *));
137 int	lockstatus __P((struct lock *));
138 
139 #if NCPUS > 1
140 /*
141  * The simple-lock routines are the primitives out of which the lock
142  * package is built. The machine-dependent code must implement an
143  * atomic test_and_set operation that indivisibly sets the atomic_lk
144  * to non-zero and returns its old value. It also assumes that the
145  * setting of the lock to zero below is indivisible. Atomic locks may
146  * only be used for exclusive locks.
147  */
148 __inline void
149 atomic_lock_init(lkp)
150 	struct atomic_lk *lkp;
151 {
152 
153 	lkp->lock_data = 0;
154 }
155 
156 __inline void
157 atomic_lock(lkp)
158 	__volatile struct atomic_lk *lkp;
159 {
160 
161 	while (test_and_set(&lkp->lock_data))
162 		continue;
163 }
164 
165 __inline int
166 atomic_lock_try(lkp)
167 	__volatile struct atomic_lk *lkp;
168 {
169 
170 	return (!test_and_set(&lkp->lock_data))
171 }
172 
173 __inline void
174 atomic_unlock(lkp)
175 	struct atomic_lk *lkp;
176 {
177 
178 	lkp->lock_data = 0;
179 }
180 
181 #else /* NCPUS == 1, so no multiprocessor locking is necessary */
182 
183 #ifdef DEBUG
184 __inline void
185 atomic_lock_init(alp)
186 	struct atomic_lk *alp;
187 {
188 
189 	alp->lock_data = 0;
190 }
191 
192 __inline void
193 atomic_lock(alp)
194 	__volatile struct atomic_lk *alp;
195 {
196 
197 	if (alp->lock_data == 1)
198 		panic("atomic lock held");
199 	alp->lock_data = 1;
200 }
201 
202 __inline int
203 atomic_lock_try(alp)
204 	__volatile struct atomic_lk *alp;
205 {
206 
207 	if (alp->lock_data == 1)
208 		panic("atomic lock held");
209 	alp->lock_data = 1;
210 	return (1);
211 }
212 
213 __inline void
214 atomic_unlock(alp)
215 	struct atomic_lk *alp;
216 {
217 
218 	if (alp->lock_data == 0)
219 		panic("atomic lock not held");
220 	alp->lock_data = 0;
221 }
222 
223 #else /* !DEBUG */
224 #define	atomic_lock_init(alp)
225 #define	atomic_lock(alp)
226 #define	atomic_lock_try(alp)	(1)	/* always succeeds */
227 #define	atomic_unlock(alp)
228 #endif /* !DIAGNOSTIC */
229 
230 #endif /* NCPUS == 1 */
231 
232 #endif /* !_LOCK_H_ */
233