xref: /minix/minix/servers/vfs/filedes.c (revision 045e0ed3)
1 /* This file contains the procedures that manipulate file descriptors.
2  *
3  * The entry points into this file are
4  *   get_fd:	    look for free file descriptor and free filp slots
5  *   get_filp:	    look up the filp entry for a given file descriptor
6  *   find_filp:	    find a filp slot that points to a given vnode
7  *   inval_filp:    invalidate a filp and associated fd's, only let close()
8  *                  happen on it
9  *   do_copyfd:     copies a file descriptor from or to another endpoint
10  */
11 
12 #include <sys/select.h>
13 #include <minix/callnr.h>
14 #include <minix/u64.h>
15 #include <assert.h>
16 #include <sys/stat.h>
17 #include "fs.h"
18 #include "file.h"
19 #include "vnode.h"
20 
21 
22 #if LOCK_DEBUG
23 /*===========================================================================*
24  *				check_filp_locks			     *
25  *===========================================================================*/
26 void check_filp_locks_by_me(void)
27 {
28 /* Check whether this thread still has filp locks held */
29   struct filp *f;
30   int r;
31 
32   for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
33 	r = mutex_trylock(&f->filp_lock);
34 	if (r == -EDEADLK)
35 		panic("Thread %d still holds filp lock on filp %p call_nr=%d\n",
36 		      mthread_self(), f, job_call_nr);
37 	else if (r == 0) {
38 		/* We just obtained the lock, release it */
39 		mutex_unlock(&f->filp_lock);
40 	}
41   }
42 }
43 #endif
44 
45 /*===========================================================================*
46  *				check_filp_locks			     *
47  *===========================================================================*/
48 void check_filp_locks(void)
49 {
50   struct filp *f;
51   int r, count = 0;
52 
53   for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
54 	r = mutex_trylock(&f->filp_lock);
55 	if (r == -EBUSY) {
56 		/* Mutex is still locked */
57 		count++;
58 	} else if (r == 0) {
59 		/* We just obtained a lock, don't want it */
60 		mutex_unlock(&f->filp_lock);
61 	} else
62 		panic("filp_lock weird state");
63   }
64   if (count) panic("locked filps");
65 #if 0
66   else printf("check_filp_locks OK\n");
67 #endif
68 }
69 
70 /*===========================================================================*
71  *				init_filps				     *
72  *===========================================================================*/
73 void init_filps(void)
74 {
75 /* Initialize filps */
76   struct filp *f;
77 
78   for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
79 	if (mutex_init(&f->filp_lock, NULL) != 0)
80 		panic("Failed to initialize filp mutex");
81   }
82 
83 }
84 
85 /*===========================================================================*
86  *				get_fd					     *
87  *===========================================================================*/
88 int get_fd(struct fproc *rfp, int start, mode_t bits, int *k, struct filp **fpt)
89 {
90 /* Look for a free file descriptor and a free filp slot.  Fill in the mode word
91  * in the latter, but don't claim either one yet, since the open() or creat()
92  * may yet fail.
93  */
94 
95   register struct filp *f;
96   register int i;
97 
98   /* Search the fproc fp_filp table for a free file descriptor. */
99   for (i = start; i < OPEN_MAX; i++) {
100 	if (rfp->fp_filp[i] == NULL) {
101 		/* A file descriptor has been located. */
102 		*k = i;
103 		break;
104 	}
105   }
106 
107   /* Check to see if a file descriptor has been found. */
108   if (i >= OPEN_MAX) return(EMFILE);
109 
110   /* If we don't care about a filp, return now */
111   if (fpt == NULL) return(OK);
112 
113   /* Now that a file descriptor has been found, look for a free filp slot. */
114   for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
115 	assert(f->filp_count >= 0);
116 	if (f->filp_count == 0 && mutex_trylock(&f->filp_lock) == 0) {
117 		f->filp_mode = bits;
118 		f->filp_pos = 0;
119 		f->filp_selectors = 0;
120 		f->filp_select_ops = 0;
121 		f->filp_pipe_select_ops = 0;
122 		f->filp_char_select_dev = NO_DEV;
123 		f->filp_flags = 0;
124 		f->filp_select_flags = 0;
125 		f->filp_softlock = NULL;
126 		f->filp_ioctl_fp = NULL;
127 		*fpt = f;
128 		return(OK);
129 	}
130   }
131 
132   /* If control passes here, the filp table must be full.  Report that back. */
133   return(ENFILE);
134 }
135 
136 
137 /*===========================================================================*
138  *				get_filp				     *
139  *===========================================================================*/
140 struct filp *
141 get_filp(
142 	int fild,			/* file descriptor */
143 	tll_access_t locktype
144 )
145 {
146 /* See if 'fild' refers to a valid file descr.  If so, return its filp ptr. */
147 
148   return get_filp2(fp, fild, locktype);
149 }
150 
151 
152 /*===========================================================================*
153  *				get_filp2				     *
154  *===========================================================================*/
155 struct filp *
156 get_filp2(
157 	register struct fproc *rfp,
158 	int fild,			/* file descriptor */
159 	tll_access_t locktype
160 )
161 {
162 /* See if 'fild' refers to a valid file descr.  If so, return its filp ptr. */
163   struct filp *filp;
164 
165   filp = NULL;
166   if (fild < 0 || fild >= OPEN_MAX)
167 	err_code = EBADF;
168   else if (locktype != VNODE_OPCL && rfp->fp_filp[fild] != NULL &&
169 		rfp->fp_filp[fild]->filp_mode == FILP_CLOSED)
170 	err_code = EIO; /* disallow all use except close(2) */
171   else if ((filp = rfp->fp_filp[fild]) == NULL)
172 	err_code = EBADF;
173   else if (locktype != VNODE_NONE)	/* Only lock the filp if requested */
174 	lock_filp(filp, locktype);	/* All is fine */
175 
176   return(filp);	/* may also be NULL */
177 }
178 
179 
180 /*===========================================================================*
181  *				find_filp				     *
182  *===========================================================================*/
183 struct filp *find_filp(struct vnode *vp, mode_t bits)
184 {
185 /* Find a filp slot that refers to the vnode 'vp' in a way as described
186  * by the mode bit 'bits'. Used for determining whether somebody is still
187  * interested in either end of a pipe.  Also used when opening a FIFO to
188  * find partners to share a filp field with (to shared the file position).
189  * Like 'get_fd' it performs its job by linear search through the filp table.
190  */
191 
192   struct filp *f;
193 
194   for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
195 	if (f->filp_count != 0 && f->filp_vno == vp && (f->filp_mode & bits)) {
196 		return(f);
197 	}
198   }
199 
200   /* If control passes here, the filp wasn't there.  Report that back. */
201   return(NULL);
202 }
203 
204 /*===========================================================================*
205  *				invalidate_filp				     *
206  *===========================================================================*/
207 void invalidate_filp(struct filp *rfilp)
208 {
209 /* Invalidate filp. */
210 
211   rfilp->filp_mode = FILP_CLOSED;
212 }
213 
214 /*===========================================================================*
215  *			invalidate_filp_by_char_major			     *
216  *===========================================================================*/
217 void invalidate_filp_by_char_major(devmajor_t major)
218 {
219   struct filp *f;
220 
221   for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
222 	if (f->filp_count != 0 && f->filp_vno != NULL) {
223 		if (major(f->filp_vno->v_sdev) == major &&
224 		    S_ISCHR(f->filp_vno->v_mode)) {
225 			invalidate_filp(f);
226 		}
227 	}
228   }
229 }
230 
231 /*===========================================================================*
232  *			invalidate_filp_by_endpt			     *
233  *===========================================================================*/
234 void invalidate_filp_by_endpt(endpoint_t proc_e)
235 {
236   struct filp *f;
237 
238   for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
239 	if (f->filp_count != 0 && f->filp_vno != NULL) {
240 		if (f->filp_vno->v_fs_e == proc_e)
241 			invalidate_filp(f);
242 	}
243   }
244 }
245 
246 /*===========================================================================*
247  *				lock_filp				     *
248  *===========================================================================*/
249 void
250 lock_filp(struct filp *filp, tll_access_t locktype)
251 {
252   struct worker_thread *org_self;
253   struct vnode *vp;
254 
255   assert(filp->filp_count > 0);
256   vp = filp->filp_vno;
257   assert(vp != NULL);
258 
259   /* Lock vnode only if we haven't already locked it. If already locked by us,
260    * we're allowed to have one additional 'soft' lock. */
261   if (tll_locked_by_me(&vp->v_lock)) {
262 	assert(filp->filp_softlock == NULL);
263 	filp->filp_softlock = fp;
264   } else {
265 	/* We have to make an exception for vnodes belonging to pipes. Even
266 	 * read(2) operations on pipes change the vnode and therefore require
267 	 * exclusive access.
268 	 */
269 	if (S_ISFIFO(vp->v_mode) && locktype == VNODE_READ)
270 		locktype = VNODE_WRITE;
271 	lock_vnode(vp, locktype);
272   }
273 
274   assert(vp->v_ref_count > 0);	/* vnode still in use? */
275   assert(filp->filp_vno == vp);	/* vnode still what we think it is? */
276 
277   /* First try to get filp lock right off the bat */
278   if (mutex_trylock(&filp->filp_lock) != 0) {
279 
280 	/* Already in use, let's wait for our turn */
281 	org_self = worker_suspend();
282 
283 	if (mutex_lock(&filp->filp_lock) != 0)
284 		panic("unable to obtain lock on filp");
285 
286 	worker_resume(org_self);
287   }
288 }
289 
290 /*===========================================================================*
291  *				unlock_filp				     *
292  *===========================================================================*/
293 void
294 unlock_filp(struct filp *filp)
295 {
296   /* If this filp holds a soft lock on the vnode, we must be the owner */
297   if (filp->filp_softlock != NULL)
298 	assert(filp->filp_softlock == fp);
299 
300   if (filp->filp_count > 0) {
301 	/* Only unlock vnode if filp is still in use */
302 
303 	/* and if we don't hold a soft lock */
304 	if (filp->filp_softlock == NULL) {
305 		assert(tll_islocked(&(filp->filp_vno->v_lock)));
306 		unlock_vnode(filp->filp_vno);
307 	}
308   }
309 
310   filp->filp_softlock = NULL;
311   if (mutex_unlock(&filp->filp_lock) != 0)
312 	panic("unable to release lock on filp");
313 }
314 
315 /*===========================================================================*
316  *				unlock_filps				     *
317  *===========================================================================*/
318 void
319 unlock_filps(struct filp *filp1, struct filp *filp2)
320 {
321 /* Unlock two filps that are tied to the same vnode. As a thread can lock a
322  * vnode only once, unlocking the vnode twice would result in an error. */
323 
324   /* No NULL pointers and not equal */
325   assert(filp1);
326   assert(filp2);
327   assert(filp1 != filp2);
328 
329   /* Must be tied to the same vnode and not NULL */
330   assert(filp1->filp_vno == filp2->filp_vno);
331   assert(filp1->filp_vno != NULL);
332 
333   if (filp1->filp_count > 0 && filp2->filp_count > 0) {
334 	/* Only unlock vnode if filps are still in use */
335 	unlock_vnode(filp1->filp_vno);
336   }
337 
338   filp1->filp_softlock = NULL;
339   filp2->filp_softlock = NULL;
340   if (mutex_unlock(&filp2->filp_lock) != 0)
341 	panic("unable to release filp lock on filp2");
342   if (mutex_unlock(&filp1->filp_lock) != 0)
343 	panic("unable to release filp lock on filp1");
344 }
345 
346 /*===========================================================================*
347  *				close_filp				     *
348  *===========================================================================*/
349 void
350 close_filp(struct filp *f)
351 {
352 /* Close a file. Will also unlock filp when done */
353 
354   int rw;
355   dev_t dev;
356   struct vnode *vp;
357 
358   /* Must be locked */
359   assert(mutex_trylock(&f->filp_lock) == -EDEADLK);
360   assert(tll_islocked(&f->filp_vno->v_lock));
361 
362   vp = f->filp_vno;
363 
364   if (f->filp_count - 1 == 0 && f->filp_mode != FILP_CLOSED) {
365 	/* Check to see if the file is special. */
366 	if (S_ISCHR(vp->v_mode) || S_ISBLK(vp->v_mode)) {
367 		dev = vp->v_sdev;
368 		if (S_ISBLK(vp->v_mode))  {
369 			lock_bsf();
370 			if (vp->v_bfs_e == ROOT_FS_E && dev != ROOT_DEV) {
371 				/* Invalidate the cache unless the special is
372 				 * mounted. Be careful not to flush the root
373 				 * file system either.
374 				 */
375 				(void) req_flush(vp->v_bfs_e, dev);
376 			}
377 			unlock_bsf();
378 
379 			(void) bdev_close(dev);	/* Ignore errors */
380 		} else {
381 			(void) cdev_close(dev);	/* Ignore errors */
382 		}
383 
384 		f->filp_mode = FILP_CLOSED;
385 	}
386   }
387 
388   /* If the inode being closed is a pipe, release everyone hanging on it. */
389   if (S_ISFIFO(vp->v_mode)) {
390 	rw = (f->filp_mode & R_BIT ? VFS_WRITE : VFS_READ);
391 	release(vp, rw, susp_count);
392   }
393 
394   if (--f->filp_count == 0) {
395 	if (S_ISFIFO(vp->v_mode)) {
396 		/* Last reader or writer is going. Tell PFS about latest
397 		 * pipe size.
398 		 */
399 		truncate_vnode(vp, vp->v_size);
400 	}
401 
402 	unlock_vnode(f->filp_vno);
403 	put_vnode(f->filp_vno);
404 	f->filp_vno = NULL;
405 	f->filp_mode = FILP_CLOSED;
406 	f->filp_count = 0;
407   } else if (f->filp_count < 0) {
408 	panic("VFS: invalid filp count: %d ino %llx/%llu", f->filp_count,
409 	      vp->v_dev, vp->v_inode_nr);
410   } else {
411 	unlock_vnode(f->filp_vno);
412   }
413 
414   mutex_unlock(&f->filp_lock);
415 }
416 
417 /*===========================================================================*
418  *				do_copyfd				     *
419  *===========================================================================*/
420 int do_copyfd(void)
421 {
422 /* Copy a file descriptor between processes, or close a remote file descriptor.
423  * This call is used as back-call by device drivers (UDS, VND), and is expected
424  * to be used in response to an IOCTL to such device drivers.
425  */
426   struct fproc *rfp;
427   struct filp *rfilp;
428   endpoint_t endpt;
429   int r, fd, what, slot;
430 
431   /* This should be replaced with an ACL check. */
432   if (!super_user) return(EPERM);
433 
434   endpt = job_m_in.m_lsys_vfs_copyfd.endpt;
435   fd = job_m_in.m_lsys_vfs_copyfd.fd;
436   what = job_m_in.m_lsys_vfs_copyfd.what;
437 
438   if (isokendpt(endpt, &slot) != OK) return(EINVAL);
439   rfp = &fproc[slot];
440 
441   /* FIXME: we should now check that the user process is indeed blocked on an
442    * IOCTL call, so that we can safely mess with its file descriptors.  We
443    * currently do not have the necessary state to verify this, so we assume
444    * that the call is always used in the right way.
445    */
446 
447   /* Depending on the operation, get the file descriptor from the caller or the
448    * user process.  Do not lock the filp yet: we first need to make sure that
449    * locking it will not result in a deadlock.
450    */
451   rfilp = get_filp2((what == COPYFD_TO) ? fp : rfp, fd, VNODE_NONE);
452   if (rfilp == NULL)
453 	return(err_code);
454 
455   /* If the filp is involved in an IOCTL by the user process, locking the filp
456    * here would result in a deadlock.  This would happen if a user process
457    * passes in the file descriptor to the device node on which it is performing
458    * the IOCTL.  We do not allow manipulation of such device nodes.  In
459    * practice, this only applies to block-special files (and thus VND), because
460    * character-special files (as used by UDS) are unlocked during the IOCTL.
461    */
462   if (rfilp->filp_ioctl_fp == rfp)
463 	return(EBADF);
464 
465   /* Now we can safely lock the filp, copy or close it, and unlock it again. */
466   lock_filp(rfilp, VNODE_READ);
467 
468   switch (what) {
469   case COPYFD_FROM:
470 	rfp = fp;
471 
472 	/* FALLTHROUGH */
473   case COPYFD_TO:
474 	/* Find a free file descriptor slot in the local or remote process. */
475 	for (fd = 0; fd < OPEN_MAX; fd++)
476 		if (rfp->fp_filp[fd] == NULL)
477 			break;
478 
479 	/* If found, fill the slot and return the slot number. */
480 	if (fd < OPEN_MAX) {
481 		rfp->fp_filp[fd] = rfilp;
482 		rfilp->filp_count++;
483 		r = fd;
484 	} else
485 		r = EMFILE;
486 
487 	break;
488 
489   case COPYFD_CLOSE:
490 	/* This should be used ONLY to revert a successful copy-to operation,
491 	 * and assumes that the filp is still in use by the caller as well.
492 	 */
493 	if (rfilp->filp_count > 1) {
494 		rfilp->filp_count--;
495 		rfp->fp_filp[fd] = NULL;
496 		r = OK;
497 	} else
498 		r = EBADF;
499 
500 	break;
501 
502   default:
503 	r = EINVAL;
504   }
505 
506   unlock_filp(rfilp);
507 
508   return(r);
509 }
510