xref: /minix/minix/servers/vfs/lock.c (revision 0a6a1f1d)
1 /* This file handles advisory file locking as required by POSIX.
2  *
3  * The entry points into this file are
4  *   lock_op:	perform locking operations for FCNTL system call
5  *   lock_revive: revive processes when a lock is released
6  */
7 
8 #include "fs.h"
9 #include <minix/com.h>
10 #include <minix/u64.h>
11 #include <fcntl.h>
12 #include <unistd.h>
13 #include "file.h"
14 #include "lock.h"
15 #include "vnode.h"
16 
17 /*===========================================================================*
18  *				lock_op					     *
19  *===========================================================================*/
20 int lock_op(f, req)
21 struct filp *f;
22 int req;			/* either F_SETLK or F_SETLKW */
23 {
24 /* Perform the advisory locking required by POSIX. */
25 
26   int r, ltype, i, conflict = 0, unlocking = 0;
27   mode_t mo;
28   off_t first, last;
29   struct flock flock;
30   struct file_lock *flp, *flp2, *empty;
31 
32   /* Fetch the flock structure from user space. */
33   r = sys_datacopy_wrapper(who_e, fp->fp_io_buffer, VFS_PROC_NR,
34 		   (vir_bytes) &flock, sizeof(flock));
35   if (r != OK) return(EINVAL);
36 
37   /* Make some error checks. */
38   ltype = flock.l_type;
39   mo = f->filp_mode;
40   if (ltype != F_UNLCK && ltype != F_RDLCK && ltype != F_WRLCK) return(EINVAL);
41   if (req == F_GETLK && ltype == F_UNLCK) return(EINVAL);
42   if (!S_ISREG(f->filp_vno->v_mode) && !S_ISBLK(f->filp_vno->v_mode))
43 	return(EINVAL);
44   if (req != F_GETLK && ltype == F_RDLCK && (mo & R_BIT) == 0) return(EBADF);
45   if (req != F_GETLK && ltype == F_WRLCK && (mo & W_BIT) == 0) return(EBADF);
46 
47   /* Compute the first and last bytes in the lock region. */
48   switch (flock.l_whence) {
49     case SEEK_SET:	first = 0; break;
50     case SEEK_CUR:	first = f->filp_pos; break;
51     case SEEK_END:	first = f->filp_vno->v_size; break;
52     default:	return(EINVAL);
53   }
54 
55   /* Check for overflow. */
56   if (((long) flock.l_start > 0) && ((first + flock.l_start) < first))
57 	return(EINVAL);
58   if (((long) flock.l_start < 0) && ((first + flock.l_start) > first))
59 	return(EINVAL);
60   first = first + flock.l_start;
61   last = first + flock.l_len - 1;
62   if (flock.l_len == 0) last = MAX_FILE_POS;
63   if (last < first) return(EINVAL);
64 
65   /* Check if this region conflicts with any existing lock. */
66   empty = NULL;
67   for (flp = &file_lock[0]; flp < &file_lock[NR_LOCKS]; flp++) {
68 	if (flp->lock_type == 0) {
69 		if (empty == NULL) empty = flp;
70 		continue;	/* 0 means unused slot */
71 	}
72 	if (flp->lock_vnode != f->filp_vno) continue;	/* different file */
73 	if (last < flp->lock_first) continue;	/* new one is in front */
74 	if (first > flp->lock_last) continue;	/* new one is afterwards */
75 	if (ltype == F_RDLCK && flp->lock_type == F_RDLCK) continue;
76 	if (ltype != F_UNLCK && flp->lock_pid == fp->fp_pid) continue;
77 
78 	/* There might be a conflict.  Process it. */
79 	conflict = 1;
80 	if (req == F_GETLK) break;
81 
82 	/* If we are trying to set a lock, it just failed. */
83 	if (ltype == F_RDLCK || ltype == F_WRLCK) {
84 		if (req == F_SETLK) {
85 			/* For F_SETLK, just report back failure. */
86 			return(EAGAIN);
87 		} else {
88 			/* For F_SETLKW, suspend the process. */
89 			suspend(FP_BLOCKED_ON_LOCK);
90 			return(SUSPEND);
91 		}
92 	}
93 
94 	/* We are clearing a lock and we found something that overlaps. */
95 	unlocking = 1;
96 	if (first <= flp->lock_first && last >= flp->lock_last) {
97 		flp->lock_type = 0;	/* mark slot as unused */
98 		nr_locks--;		/* number of locks is now 1 less */
99 		continue;
100 	}
101 
102 	/* Part of a locked region has been unlocked. */
103 	if (first <= flp->lock_first) {
104 		flp->lock_first = last + 1;
105 		continue;
106 	}
107 
108 	if (last >= flp->lock_last) {
109 		flp->lock_last = first - 1;
110 		continue;
111 	}
112 
113 	/* Bad luck. A lock has been split in two by unlocking the middle. */
114 	if (nr_locks == NR_LOCKS) return(ENOLCK);
115 	for (i = 0; i < NR_LOCKS; i++)
116 		if (file_lock[i].lock_type == 0) break;
117 	flp2 = &file_lock[i];
118 	flp2->lock_type = flp->lock_type;
119 	flp2->lock_pid = flp->lock_pid;
120 	flp2->lock_vnode = flp->lock_vnode;
121 	flp2->lock_first = last + 1;
122 	flp2->lock_last = flp->lock_last;
123 	flp->lock_last = first - 1;
124 	nr_locks++;
125   }
126   if (unlocking) lock_revive();
127 
128   if (req == F_GETLK) {
129 	if (conflict) {
130 		/* GETLK and conflict. Report on the conflicting lock. */
131 		flock.l_type = flp->lock_type;
132 		flock.l_whence = SEEK_SET;
133 		flock.l_start = flp->lock_first;
134 		flock.l_len = flp->lock_last - flp->lock_first + 1;
135 		flock.l_pid = flp->lock_pid;
136 
137 	} else {
138 		/* It is GETLK and there is no conflict. */
139 		flock.l_type = F_UNLCK;
140 	}
141 
142 	/* Copy the flock structure back to the caller. */
143 	r = sys_datacopy_wrapper(VFS_PROC_NR, (vir_bytes) &flock, who_e,
144 		fp->fp_io_buffer, sizeof(flock));
145 	return(r);
146   }
147 
148   if (ltype == F_UNLCK) return(OK);	/* unlocked a region with no locks */
149 
150   /* There is no conflict.  If space exists, store new lock in the table. */
151   if (empty == NULL) return(ENOLCK);	/* table full */
152   empty->lock_type = ltype;
153   empty->lock_pid = fp->fp_pid;
154   empty->lock_vnode = f->filp_vno;
155   empty->lock_first = first;
156   empty->lock_last = last;
157   nr_locks++;
158   return(OK);
159 }
160 
161 
162 /*===========================================================================*
163  *				lock_revive				     *
164  *===========================================================================*/
165 void lock_revive()
166 {
167 /* Go find all the processes that are waiting for any kind of lock and
168  * revive them all.  The ones that are still blocked will block again when
169  * they run.  The others will complete.  This strategy is a space-time
170  * tradeoff.  Figuring out exactly which ones to unblock now would take
171  * extra code, and the only thing it would win would be some performance in
172  * extremely rare circumstances (namely, that somebody actually used
173  * locking).
174  */
175 
176   struct fproc *fptr;
177 
178   for (fptr = &fproc[0]; fptr < &fproc[NR_PROCS]; fptr++){
179 	if (fptr->fp_pid == PID_FREE) continue;
180 	if (fptr->fp_blocked_on == FP_BLOCKED_ON_LOCK) {
181 		revive(fptr->fp_endpoint, 0);
182 	}
183   }
184 }
185