xref: /dragonfly/sys/kern/kern_descrip.c (revision 113f6df6)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_descrip.c	8.6 (Berkeley) 4/19/94
39  * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
40  * $DragonFly: src/sys/kern/kern_descrip.c,v 1.30 2004/10/12 19:20:46 dillon Exp $
41  */
42 
43 #include "opt_compat.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/sysproto.h>
48 #include <sys/conf.h>
49 #include <sys/filedesc.h>
50 #include <sys/kernel.h>
51 #include <sys/sysctl.h>
52 #include <sys/vnode.h>
53 #include <sys/proc.h>
54 #include <sys/namei.h>
55 #include <sys/file.h>
56 #include <sys/stat.h>
57 #include <sys/filio.h>
58 #include <sys/fcntl.h>
59 #include <sys/unistd.h>
60 #include <sys/resourcevar.h>
61 #include <sys/event.h>
62 #include <sys/kern_syscall.h>
63 
64 #include <vm/vm.h>
65 #include <vm/vm_extern.h>
66 
67 #include <sys/file2.h>
68 
69 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
70 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader",
71 		     "file desc to leader structures");
72 MALLOC_DEFINE(M_FILE, "file", "Open file structure");
73 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
74 
75 static	 d_open_t  fdopen;
76 #define NUMFDESC 64
77 
78 #define CDEV_MAJOR 22
79 static struct cdevsw fildesc_cdevsw = {
80 	/* name */	"FD",
81 	/* maj */	CDEV_MAJOR,
82 	/* flags */	0,
83 	/* port */      NULL,
84 	/* clone */	NULL,
85 
86 	/* open */	fdopen,
87 	/* close */	noclose,
88 	/* read */	noread,
89 	/* write */	nowrite,
90 	/* ioctl */	noioctl,
91 	/* poll */	nopoll,
92 	/* mmap */	nommap,
93 	/* strategy */	nostrategy,
94 	/* dump */	nodump,
95 	/* psize */	nopsize
96 };
97 
98 static int badfo_readwrite (struct file *fp, struct uio *uio,
99     struct ucred *cred, int flags, struct thread *td);
100 static int badfo_ioctl (struct file *fp, u_long com, caddr_t data,
101     struct thread *td);
102 static int badfo_poll (struct file *fp, int events,
103     struct ucred *cred, struct thread *td);
104 static int badfo_kqfilter (struct file *fp, struct knote *kn);
105 static int badfo_stat (struct file *fp, struct stat *sb, struct thread *td);
106 static int badfo_close (struct file *fp, struct thread *td);
107 
108 /*
109  * Descriptor management.
110  */
111 struct filelist filehead;	/* head of list of open files */
112 int nfiles;			/* actual number of open files */
113 extern int cmask;
114 
115 /*
116  * System calls on descriptors.
117  */
118 /* ARGSUSED */
119 int
120 getdtablesize(struct getdtablesize_args *uap)
121 {
122 	struct proc *p = curproc;
123 
124 	uap->sysmsg_result =
125 	    min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
126 	return (0);
127 }
128 
129 /*
130  * Duplicate a file descriptor to a particular value.
131  *
132  * note: keep in mind that a potential race condition exists when closing
133  * descriptors from a shared descriptor table (via rfork).
134  */
135 /* ARGSUSED */
136 int
137 dup2(struct dup2_args *uap)
138 {
139 	int error;
140 
141 	error = kern_dup(DUP_FIXED, uap->from, uap->to, uap->sysmsg_fds);
142 
143 	return (error);
144 }
145 
146 /*
147  * Duplicate a file descriptor.
148  */
149 /* ARGSUSED */
150 int
151 dup(struct dup_args *uap)
152 {
153 	int error;
154 
155 	error = kern_dup(DUP_VARIABLE, uap->fd, 0, uap->sysmsg_fds);
156 
157 	return (error);
158 }
159 
160 int
161 kern_fcntl(int fd, int cmd, union fcntl_dat *dat)
162 {
163 	struct thread *td = curthread;
164 	struct proc *p = td->td_proc;
165 	struct filedesc *fdp = p->p_fd;
166 	struct file *fp;
167 	char *pop;
168 	struct vnode *vp;
169 	u_int newmin;
170 	int tmp, error, flg = F_POSIX;
171 
172 	KKASSERT(p);
173 
174 	if ((unsigned)fd >= fdp->fd_nfiles ||
175 	    (fp = fdp->fd_ofiles[fd]) == NULL)
176 		return (EBADF);
177 	pop = &fdp->fd_ofileflags[fd];
178 
179 	switch (cmd) {
180 	case F_DUPFD:
181 		newmin = dat->fc_fd;
182 		if (newmin >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
183 		    newmin > maxfilesperproc)
184 			return (EINVAL);
185 		error = kern_dup(DUP_VARIABLE, fd, newmin, &dat->fc_fd);
186 		return (error);
187 
188 	case F_GETFD:
189 		dat->fc_cloexec = (*pop & UF_EXCLOSE) ? FD_CLOEXEC : 0;
190 		return (0);
191 
192 	case F_SETFD:
193 		*pop = (*pop &~ UF_EXCLOSE) |
194 		    (dat->fc_cloexec & FD_CLOEXEC ? UF_EXCLOSE : 0);
195 		return (0);
196 
197 	case F_GETFL:
198 		dat->fc_flags = OFLAGS(fp->f_flag);
199 		return (0);
200 
201 	case F_SETFL:
202 		fhold(fp);
203 		fp->f_flag &= ~FCNTLFLAGS;
204 		fp->f_flag |= FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS;
205 		tmp = fp->f_flag & FNONBLOCK;
206 		error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td);
207 		if (error) {
208 			fdrop(fp, td);
209 			return (error);
210 		}
211 		tmp = fp->f_flag & FASYNC;
212 		error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, td);
213 		if (!error) {
214 			fdrop(fp, td);
215 			return (0);
216 		}
217 		fp->f_flag &= ~FNONBLOCK;
218 		tmp = 0;
219 		fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td);
220 		fdrop(fp, td);
221 		return (error);
222 
223 	case F_GETOWN:
224 		fhold(fp);
225 		error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner, td);
226 		fdrop(fp, td);
227 		return(error);
228 
229 	case F_SETOWN:
230 		fhold(fp);
231 		error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner, td);
232 		fdrop(fp, td);
233 		return(error);
234 
235 	case F_SETLKW:
236 		flg |= F_WAIT;
237 		/* Fall into F_SETLK */
238 
239 	case F_SETLK:
240 		if (fp->f_type != DTYPE_VNODE)
241 			return (EBADF);
242 		vp = (struct vnode *)fp->f_data;
243 
244 		/*
245 		 * copyin/lockop may block
246 		 */
247 		fhold(fp);
248 		if (dat->fc_flock.l_whence == SEEK_CUR)
249 			dat->fc_flock.l_start += fp->f_offset;
250 
251 		switch (dat->fc_flock.l_type) {
252 		case F_RDLCK:
253 			if ((fp->f_flag & FREAD) == 0) {
254 				error = EBADF;
255 				break;
256 			}
257 			p->p_leader->p_flag |= P_ADVLOCK;
258 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
259 			    &dat->fc_flock, flg);
260 			break;
261 		case F_WRLCK:
262 			if ((fp->f_flag & FWRITE) == 0) {
263 				error = EBADF;
264 				break;
265 			}
266 			p->p_leader->p_flag |= P_ADVLOCK;
267 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
268 			    &dat->fc_flock, flg);
269 			break;
270 		case F_UNLCK:
271 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
272 				&dat->fc_flock, F_POSIX);
273 			break;
274 		default:
275 			error = EINVAL;
276 			break;
277 		}
278 		/* Check for race with close */
279 		if ((unsigned) fd >= fdp->fd_nfiles ||
280 		    fp != fdp->fd_ofiles[fd]) {
281 			dat->fc_flock.l_whence = SEEK_SET;
282 			dat->fc_flock.l_start = 0;
283 			dat->fc_flock.l_len = 0;
284 			dat->fc_flock.l_type = F_UNLCK;
285 			(void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
286 					   F_UNLCK, &dat->fc_flock, F_POSIX);
287 		}
288 		fdrop(fp, td);
289 		return(error);
290 
291 	case F_GETLK:
292 		if (fp->f_type != DTYPE_VNODE)
293 			return (EBADF);
294 		vp = (struct vnode *)fp->f_data;
295 		/*
296 		 * copyin/lockop may block
297 		 */
298 		fhold(fp);
299 		if (dat->fc_flock.l_type != F_RDLCK &&
300 		    dat->fc_flock.l_type != F_WRLCK &&
301 		    dat->fc_flock.l_type != F_UNLCK) {
302 			fdrop(fp, td);
303 			return (EINVAL);
304 		}
305 		if (dat->fc_flock.l_whence == SEEK_CUR)
306 			dat->fc_flock.l_start += fp->f_offset;
307 		error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK,
308 			    &dat->fc_flock, F_POSIX);
309 		fdrop(fp, td);
310 		return(error);
311 	default:
312 		return (EINVAL);
313 	}
314 	/* NOTREACHED */
315 }
316 
317 /*
318  * The file control system call.
319  */
320 int
321 fcntl(struct fcntl_args *uap)
322 {
323 	union fcntl_dat dat;
324 	int error;
325 
326 	switch (uap->cmd) {
327 	case F_DUPFD:
328 		dat.fc_fd = uap->arg;
329 		break;
330 	case F_SETFD:
331 		dat.fc_cloexec = uap->arg;
332 		break;
333 	case F_SETFL:
334 		dat.fc_flags = uap->arg;
335 		break;
336 	case F_SETOWN:
337 		dat.fc_owner = uap->arg;
338 		break;
339 	case F_SETLKW:
340 	case F_SETLK:
341 	case F_GETLK:
342 		error = copyin((caddr_t)uap->arg, &dat.fc_flock,
343 		    sizeof(struct flock));
344 		if (error)
345 			return (error);
346 		break;
347 	}
348 
349 	error = kern_fcntl(uap->fd, uap->cmd, &dat);
350 
351 	if (error == 0) {
352 		switch (uap->cmd) {
353 		case F_DUPFD:
354 			uap->sysmsg_result = dat.fc_fd;
355 			break;
356 		case F_GETFD:
357 			uap->sysmsg_result = dat.fc_cloexec;
358 			break;
359 		case F_GETFL:
360 			uap->sysmsg_result = dat.fc_flags;
361 			break;
362 		case F_GETOWN:
363 			uap->sysmsg_result = dat.fc_owner;
364 		case F_GETLK:
365 			error = copyout(&dat.fc_flock, (caddr_t)uap->arg,
366 			    sizeof(struct flock));
367 			break;
368 		}
369 	}
370 
371 	return (error);
372 }
373 
374 /*
375  * Common code for dup, dup2, and fcntl(F_DUPFD).
376  *
377  * The type flag can be either DUP_FIXED or DUP_VARIABLE.  DUP_FIXED tells
378  * kern_dup() to destructively dup over an existing file descriptor if new
379  * is already open.  DUP_VARIABLE tells kern_dup() to find the lowest
380  * unused file descriptor that is greater than or equal to new.
381  */
382 int
383 kern_dup(enum dup_type type, int old, int new, int *res)
384 {
385 	struct thread *td = curthread;
386 	struct proc *p = td->td_proc;
387 	struct filedesc *fdp = p->p_fd;
388 	struct file *fp;
389 	struct file *delfp;
390 	int holdleaders;
391 	int error, newfd;
392 
393 	/*
394 	 * Verify that we have a valid descriptor to dup from and
395 	 * possibly to dup to.
396 	 */
397 	if (old < 0 || new < 0 || new > p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
398 	    new >= maxfilesperproc)
399 		return (EBADF);
400 	if (old >= fdp->fd_nfiles || fdp->fd_ofiles[old] == NULL)
401 		return (EBADF);
402 	if (type == DUP_FIXED && old == new) {
403 		*res = new;
404 		return (0);
405 	}
406 	fp = fdp->fd_ofiles[old];
407 	fhold(fp);
408 
409 	/*
410 	 * Expand the table for the new descriptor if needed.  This may
411 	 * block and drop and reacquire the fidedesc lock.
412 	 */
413 	if (type == DUP_VARIABLE || new >= fdp->fd_nfiles) {
414 		error = fdalloc(p, new, &newfd);
415 		if (error) {
416 			fdrop(fp, td);
417 			return (error);
418 		}
419 	}
420 	if (type == DUP_VARIABLE)
421 		new = newfd;
422 
423 	/*
424 	 * If the old file changed out from under us then treat it as a
425 	 * bad file descriptor.  Userland should do its own locking to
426 	 * avoid this case.
427 	 */
428 	if (fdp->fd_ofiles[old] != fp) {
429 		if (fdp->fd_ofiles[new] == NULL) {
430 			if (new < fdp->fd_freefile)
431 				fdp->fd_freefile = new;
432 			while (fdp->fd_lastfile > 0 &&
433 			    fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
434 				fdp->fd_lastfile--;
435 		}
436 		fdrop(fp, td);
437 		return (EBADF);
438 	}
439 	KASSERT(old != new, ("new fd is same as old"));
440 
441 	/*
442 	 * Save info on the descriptor being overwritten.  We have
443 	 * to do the unmap now, but we cannot close it without
444 	 * introducing an ownership race for the slot.
445 	 */
446 	delfp = fdp->fd_ofiles[new];
447 	if (delfp != NULL && p->p_fdtol != NULL) {
448 		/*
449 		 * Ask fdfree() to sleep to ensure that all relevant
450 		 * process leaders can be traversed in closef().
451 		 */
452 		fdp->fd_holdleaderscount++;
453 		holdleaders = 1;
454 	} else
455 		holdleaders = 0;
456 	KASSERT(delfp == NULL || type == DUP_FIXED,
457 	    ("dup() picked an open file"));
458 #if 0
459 	if (delfp && (fdp->fd_ofileflags[new] & UF_MAPPED))
460 		(void) munmapfd(p, new);
461 #endif
462 
463 	/*
464 	 * Duplicate the source descriptor, update lastfile
465 	 */
466 	fdp->fd_ofiles[new] = fp;
467 	fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE;
468 	if (new > fdp->fd_lastfile)
469 		fdp->fd_lastfile = new;
470 	*res = new;
471 
472 	/*
473 	 * If we dup'd over a valid file, we now own the reference to it
474 	 * and must dispose of it using closef() semantics (as if a
475 	 * close() were performed on it).
476 	 */
477 	if (delfp) {
478 		(void) closef(delfp, td);
479 		if (holdleaders) {
480 			fdp->fd_holdleaderscount--;
481 			if (fdp->fd_holdleaderscount == 0 &&
482 			    fdp->fd_holdleaderswakeup != 0) {
483 				fdp->fd_holdleaderswakeup = 0;
484 				wakeup(&fdp->fd_holdleaderscount);
485 			}
486 		}
487 	}
488 	return (0);
489 }
490 
491 /*
492  * If sigio is on the list associated with a process or process group,
493  * disable signalling from the device, remove sigio from the list and
494  * free sigio.
495  */
496 void
497 funsetown(struct sigio *sigio)
498 {
499 	int s;
500 
501 	if (sigio == NULL)
502 		return;
503 	s = splhigh();
504 	*(sigio->sio_myref) = NULL;
505 	splx(s);
506 	if (sigio->sio_pgid < 0) {
507 		SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,
508 			     sigio, sio_pgsigio);
509 	} else /* if ((*sigiop)->sio_pgid > 0) */ {
510 		SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,
511 			     sigio, sio_pgsigio);
512 	}
513 	crfree(sigio->sio_ucred);
514 	free(sigio, M_SIGIO);
515 }
516 
517 /* Free a list of sigio structures. */
518 void
519 funsetownlst(struct sigiolst *sigiolst)
520 {
521 	struct sigio *sigio;
522 
523 	while ((sigio = SLIST_FIRST(sigiolst)) != NULL)
524 		funsetown(sigio);
525 }
526 
527 /*
528  * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
529  *
530  * After permission checking, add a sigio structure to the sigio list for
531  * the process or process group.
532  */
533 int
534 fsetown(pid_t pgid, struct sigio **sigiop)
535 {
536 	struct proc *proc;
537 	struct pgrp *pgrp;
538 	struct sigio *sigio;
539 	int s;
540 
541 	if (pgid == 0) {
542 		funsetown(*sigiop);
543 		return (0);
544 	}
545 	if (pgid > 0) {
546 		proc = pfind(pgid);
547 		if (proc == NULL)
548 			return (ESRCH);
549 
550 		/*
551 		 * Policy - Don't allow a process to FSETOWN a process
552 		 * in another session.
553 		 *
554 		 * Remove this test to allow maximum flexibility or
555 		 * restrict FSETOWN to the current process or process
556 		 * group for maximum safety.
557 		 */
558 		if (proc->p_session != curproc->p_session)
559 			return (EPERM);
560 
561 		pgrp = NULL;
562 	} else /* if (pgid < 0) */ {
563 		pgrp = pgfind(-pgid);
564 		if (pgrp == NULL)
565 			return (ESRCH);
566 
567 		/*
568 		 * Policy - Don't allow a process to FSETOWN a process
569 		 * in another session.
570 		 *
571 		 * Remove this test to allow maximum flexibility or
572 		 * restrict FSETOWN to the current process or process
573 		 * group for maximum safety.
574 		 */
575 		if (pgrp->pg_session != curproc->p_session)
576 			return (EPERM);
577 
578 		proc = NULL;
579 	}
580 	funsetown(*sigiop);
581 	sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK);
582 	if (pgid > 0) {
583 		SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
584 		sigio->sio_proc = proc;
585 	} else {
586 		SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
587 		sigio->sio_pgrp = pgrp;
588 	}
589 	sigio->sio_pgid = pgid;
590 	sigio->sio_ucred = crhold(curproc->p_ucred);
591 	/* It would be convenient if p_ruid was in ucred. */
592 	sigio->sio_ruid = curproc->p_ucred->cr_ruid;
593 	sigio->sio_myref = sigiop;
594 	s = splhigh();
595 	*sigiop = sigio;
596 	splx(s);
597 	return (0);
598 }
599 
600 /*
601  * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
602  */
603 pid_t
604 fgetown(struct sigio *sigio)
605 {
606 	return (sigio != NULL ? sigio->sio_pgid : 0);
607 }
608 
609 /*
610  * Close a file descriptor.
611  */
612 /* ARGSUSED */
613 int
614 close(struct close_args *uap)
615 {
616 	struct thread *td = curthread;
617 	struct proc *p = td->td_proc;
618 	struct filedesc *fdp;
619 	struct file *fp;
620 	int fd = uap->fd;
621 	int error;
622 	int holdleaders;
623 
624 	KKASSERT(p);
625 	fdp = p->p_fd;
626 
627 	if ((unsigned)fd >= fdp->fd_nfiles ||
628 	    (fp = fdp->fd_ofiles[fd]) == NULL)
629 		return (EBADF);
630 #if 0
631 	if (fdp->fd_ofileflags[fd] & UF_MAPPED)
632 		(void) munmapfd(p, fd);
633 #endif
634 	fdp->fd_ofiles[fd] = NULL;
635 	fdp->fd_ofileflags[fd] = 0;
636 	holdleaders = 0;
637 	if (p->p_fdtol != NULL) {
638 		/*
639 		 * Ask fdfree() to sleep to ensure that all relevant
640 		 * process leaders can be traversed in closef().
641 		 */
642 		fdp->fd_holdleaderscount++;
643 		holdleaders = 1;
644 	}
645 
646 	/*
647 	 * we now hold the fp reference that used to be owned by the descriptor
648 	 * array.
649 	 */
650 	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
651 		fdp->fd_lastfile--;
652 	if (fd < fdp->fd_freefile)
653 		fdp->fd_freefile = fd;
654 	if (fd < fdp->fd_knlistsize)
655 		knote_fdclose(p, fd);
656 	error = closef(fp, td);
657 	if (holdleaders) {
658 		fdp->fd_holdleaderscount--;
659 		if (fdp->fd_holdleaderscount == 0 &&
660 		    fdp->fd_holdleaderswakeup != 0) {
661 			fdp->fd_holdleaderswakeup = 0;
662 			wakeup(&fdp->fd_holdleaderscount);
663 		}
664 	}
665 	return (error);
666 }
667 
668 int
669 kern_fstat(int fd, struct stat *ub)
670 {
671 	struct thread *td = curthread;
672 	struct proc *p = td->td_proc;
673 	struct filedesc *fdp;
674 	struct file *fp;
675 	int error;
676 
677 	KKASSERT(p);
678 
679 	fdp = p->p_fd;
680 	if ((unsigned)fd >= fdp->fd_nfiles ||
681 	    (fp = fdp->fd_ofiles[fd]) == NULL)
682 		return (EBADF);
683 	fhold(fp);
684 	error = fo_stat(fp, ub, td);
685 	fdrop(fp, td);
686 
687 	return (error);
688 }
689 
690 /*
691  * Return status information about a file descriptor.
692  */
693 int
694 fstat(struct fstat_args *uap)
695 {
696 	struct stat st;
697 	int error;
698 
699 	error = kern_fstat(uap->fd, &st);
700 
701 	if (error == 0)
702 		error = copyout(&st, uap->sb, sizeof(st));
703 	return (error);
704 }
705 
706 /*
707  * XXX: This is for source compatibility with NetBSD.  Probably doesn't
708  * belong here.
709  */
710 int
711 nfstat(struct nfstat_args *uap)
712 {
713 	struct stat st;
714 	struct nstat nst;
715 	int error;
716 
717 	error = kern_fstat(uap->fd, &st);
718 
719 	if (error == 0) {
720 		cvtnstat(&st, &nst);
721 		error = copyout(&nst, uap->sb, sizeof (nst));
722 	}
723 	return (error);
724 }
725 
726 /*
727  * Return pathconf information about a file descriptor.
728  */
729 /* ARGSUSED */
730 int
731 fpathconf(struct fpathconf_args *uap)
732 {
733 	struct thread *td = curthread;
734 	struct proc *p = td->td_proc;
735 	struct filedesc *fdp;
736 	struct file *fp;
737 	struct vnode *vp;
738 	int error = 0;
739 
740 	KKASSERT(p);
741 	fdp = p->p_fd;
742 	if ((unsigned)uap->fd >= fdp->fd_nfiles ||
743 	    (fp = fdp->fd_ofiles[uap->fd]) == NULL)
744 		return (EBADF);
745 
746 	fhold(fp);
747 
748 	switch (fp->f_type) {
749 	case DTYPE_PIPE:
750 	case DTYPE_SOCKET:
751 		if (uap->name != _PC_PIPE_BUF) {
752 			error = EINVAL;
753 		} else {
754 			uap->sysmsg_result = PIPE_BUF;
755 			error = 0;
756 		}
757 		break;
758 	case DTYPE_FIFO:
759 	case DTYPE_VNODE:
760 		vp = (struct vnode *)fp->f_data;
761 		error = VOP_PATHCONF(vp, uap->name, uap->sysmsg_fds);
762 		break;
763 	default:
764 		error = EOPNOTSUPP;
765 		break;
766 	}
767 	fdrop(fp, td);
768 	return(error);
769 }
770 
771 /*
772  * Allocate a file descriptor for the process.
773  */
774 static int fdexpand;
775 SYSCTL_INT(_debug, OID_AUTO, fdexpand, CTLFLAG_RD, &fdexpand, 0, "");
776 
777 int
778 fdalloc(struct proc *p, int want, int *result)
779 {
780 	struct filedesc *fdp = p->p_fd;
781 	int i;
782 	int lim, last, nfiles;
783 	struct file **newofile;
784 	char *newofileflags;
785 
786 	/*
787 	 * Search for a free descriptor starting at the higher
788 	 * of want or fd_freefile.  If that fails, consider
789 	 * expanding the ofile array.
790 	 */
791 	lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
792 	for (;;) {
793 		last = min(fdp->fd_nfiles, lim);
794 		if ((i = want) < fdp->fd_freefile)
795 			i = fdp->fd_freefile;
796 		for (; i < last; i++) {
797 			if (fdp->fd_ofiles[i] == NULL) {
798 				fdp->fd_ofileflags[i] = 0;
799 				if (i > fdp->fd_lastfile)
800 					fdp->fd_lastfile = i;
801 				if (want <= fdp->fd_freefile)
802 					fdp->fd_freefile = i;
803 				*result = i;
804 				return (0);
805 			}
806 		}
807 
808 		/*
809 		 * No space in current array.  Expand?
810 		 */
811 		if (fdp->fd_nfiles >= lim)
812 			return (EMFILE);
813 		if (fdp->fd_nfiles < NDEXTENT)
814 			nfiles = NDEXTENT;
815 		else
816 			nfiles = 2 * fdp->fd_nfiles;
817 		newofile = malloc(nfiles * OFILESIZE, M_FILEDESC, M_WAITOK);
818 
819 		/*
820 		 * deal with file-table extend race that might have occured
821 		 * when malloc was blocked.
822 		 */
823 		if (fdp->fd_nfiles >= nfiles) {
824 			free(newofile, M_FILEDESC);
825 			continue;
826 		}
827 		newofileflags = (char *) &newofile[nfiles];
828 		/*
829 		 * Copy the existing ofile and ofileflags arrays
830 		 * and zero the new portion of each array.
831 		 */
832 		bcopy(fdp->fd_ofiles, newofile,
833 			(i = sizeof(struct file *) * fdp->fd_nfiles));
834 		bzero((char *)newofile + i, nfiles * sizeof(struct file *) - i);
835 		bcopy(fdp->fd_ofileflags, newofileflags,
836 			(i = sizeof(char) * fdp->fd_nfiles));
837 		bzero(newofileflags + i, nfiles * sizeof(char) - i);
838 		if (fdp->fd_nfiles > NDFILE)
839 			free(fdp->fd_ofiles, M_FILEDESC);
840 		fdp->fd_ofiles = newofile;
841 		fdp->fd_ofileflags = newofileflags;
842 		fdp->fd_nfiles = nfiles;
843 		fdexpand++;
844 	}
845 	return (0);
846 }
847 
848 /*
849  * Check to see whether n user file descriptors
850  * are available to the process p.
851  */
852 int
853 fdavail(struct proc *p, int n)
854 {
855 	struct filedesc *fdp = p->p_fd;
856 	struct file **fpp;
857 	int i, lim, last;
858 
859 	lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
860 	if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0)
861 		return (1);
862 
863 	last = min(fdp->fd_nfiles, lim);
864 	fpp = &fdp->fd_ofiles[fdp->fd_freefile];
865 	for (i = last - fdp->fd_freefile; --i >= 0; fpp++) {
866 		if (*fpp == NULL && --n <= 0)
867 			return (1);
868 	}
869 	return (0);
870 }
871 
872 /*
873  * falloc:
874  *	Create a new open file structure and allocate a file decriptor
875  *	for the process that refers to it.  If p is NULL, no descriptor
876  *	is allocated and the file pointer is returned unassociated with
877  *	any process.  resultfd is only used if p is not NULL and may
878  *	separately be NULL indicating that you don't need the returned fd.
879  */
880 int
881 falloc(struct proc *p, struct file **resultfp, int *resultfd)
882 {
883 	struct file *fp, *fq;
884 	int error, i;
885 	static struct timeval lastfail;
886 	static int curfail;
887 
888 	if (nfiles >= maxfiles - maxfilesrootres &&
889 	    ((p && p->p_ucred->cr_ruid != 0) || nfiles >= maxfiles)) {
890 		if (ppsratecheck(&lastfail, &curfail, 1)) {
891 			printf("kern.maxfiles limit exceeded by uid %d, please see tuning(7).\n",
892 				(p ? p->p_ucred->cr_ruid : -1));
893 		}
894 		return (ENFILE);
895 	}
896 	/*
897 	 * Allocate a new file descriptor.
898 	 * If the process has file descriptor zero open, add to the list
899 	 * of open files at that point, otherwise put it at the front of
900 	 * the list of open files.
901 	 */
902 	nfiles++;
903 	fp = malloc(sizeof(struct file), M_FILE, M_WAITOK | M_ZERO);
904 
905 	/*
906 	 * wait until after malloc (which may have blocked) returns before
907 	 * allocating the slot, else a race might have shrunk it if we had
908 	 * allocated it before the malloc.
909 	 */
910 	i = -1;
911 	if (p && (error = fdalloc(p, 0, &i))) {
912 		nfiles--;
913 		free(fp, M_FILE);
914 		return (error);
915 	}
916 	fp->f_count = 1;
917 	fp->f_ops = &badfileops;
918 	fp->f_seqcount = 1;
919 	if (p) {
920 		fp->f_cred = crhold(p->p_ucred);
921 		if ((fq = p->p_fd->fd_ofiles[0]) != NULL) {
922 			LIST_INSERT_AFTER(fq, fp, f_list);
923 		} else {
924 			LIST_INSERT_HEAD(&filehead, fp, f_list);
925 		}
926 		p->p_fd->fd_ofiles[i] = fp;
927 	} else {
928 		fp->f_cred = crhold(proc0.p_ucred);
929 		LIST_INSERT_HEAD(&filehead, fp, f_list);
930 	}
931 	if (resultfp)
932 		*resultfp = fp;
933 	if (resultfd)
934 		*resultfd = i;
935 	return (0);
936 }
937 
938 void
939 fsetcred(struct file *fp, struct ucred *cr)
940 {
941 	crhold(cr);
942 	crfree(fp->f_cred);
943 	fp->f_cred = cr;
944 }
945 
946 /*
947  * Free a file descriptor.
948  */
949 void
950 ffree(struct file *fp)
951 {
952 	KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!"));
953 	LIST_REMOVE(fp, f_list);
954 	crfree(fp->f_cred);
955 	if (fp->f_ncp) {
956 	    cache_drop(fp->f_ncp);
957 	    fp->f_ncp = NULL;
958 	}
959 	nfiles--;
960 	free(fp, M_FILE);
961 }
962 
963 /*
964  * Build a new filedesc structure.
965  */
966 struct filedesc *
967 fdinit(struct proc *p)
968 {
969 	struct filedesc0 *newfdp;
970 	struct filedesc *fdp = p->p_fd;
971 
972 	newfdp = malloc(sizeof(struct filedesc0), M_FILEDESC, M_WAITOK|M_ZERO);
973 	newfdp->fd_fd.fd_cdir = fdp->fd_cdir;
974 	if (newfdp->fd_fd.fd_cdir) {
975 		vref(newfdp->fd_fd.fd_cdir);
976 		newfdp->fd_fd.fd_ncdir = cache_hold(fdp->fd_ncdir);
977 	}
978 	newfdp->fd_fd.fd_rdir = fdp->fd_rdir;
979 	newfdp->fd_fd.fd_nrdir = cache_hold(fdp->fd_nrdir);
980 	vref(newfdp->fd_fd.fd_rdir);
981 	newfdp->fd_fd.fd_jdir = fdp->fd_jdir;
982 	if (newfdp->fd_fd.fd_jdir) {
983 		vref(newfdp->fd_fd.fd_jdir);
984 		newfdp->fd_fd.fd_njdir = cache_hold(fdp->fd_njdir);
985 	}
986 
987 
988 	/* Create the file descriptor table. */
989 	newfdp->fd_fd.fd_refcnt = 1;
990 	newfdp->fd_fd.fd_cmask = cmask;
991 	newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles;
992 	newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags;
993 	newfdp->fd_fd.fd_nfiles = NDFILE;
994 	newfdp->fd_fd.fd_knlistsize = -1;
995 
996 	return (&newfdp->fd_fd);
997 }
998 
999 /*
1000  * Share a filedesc structure.
1001  */
1002 struct filedesc *
1003 fdshare(struct proc *p)
1004 {
1005 	p->p_fd->fd_refcnt++;
1006 	return (p->p_fd);
1007 }
1008 
1009 /*
1010  * Copy a filedesc structure.
1011  */
1012 struct filedesc *
1013 fdcopy(struct proc *p)
1014 {
1015 	struct filedesc *newfdp, *fdp = p->p_fd;
1016 	struct file **fpp;
1017 	int i;
1018 
1019 	/* Certain daemons might not have file descriptors. */
1020 	if (fdp == NULL)
1021 		return (NULL);
1022 
1023 	newfdp = malloc(sizeof(struct filedesc0), M_FILEDESC, M_WAITOK);
1024 	bcopy(fdp, newfdp, sizeof(struct filedesc));
1025 	if (newfdp->fd_cdir) {
1026 		vref(newfdp->fd_cdir);
1027 		newfdp->fd_ncdir = cache_hold(newfdp->fd_ncdir);
1028 	}
1029 	/*
1030 	 * We must check for fd_rdir here, at least for now because
1031 	 * the init process is created before we have access to the
1032 	 * rootvode to take a reference to it.
1033 	 */
1034 	if (newfdp->fd_rdir) {
1035 		vref(newfdp->fd_rdir);
1036 		newfdp->fd_nrdir = cache_hold(newfdp->fd_nrdir);
1037 	}
1038 	if (newfdp->fd_jdir) {
1039 		vref(newfdp->fd_jdir);
1040 		newfdp->fd_njdir = cache_hold(newfdp->fd_njdir);
1041 	}
1042 	newfdp->fd_refcnt = 1;
1043 
1044 	/*
1045 	 * If the number of open files fits in the internal arrays
1046 	 * of the open file structure, use them, otherwise allocate
1047 	 * additional memory for the number of descriptors currently
1048 	 * in use.
1049 	 */
1050 	if (newfdp->fd_lastfile < NDFILE) {
1051 		newfdp->fd_ofiles = ((struct filedesc0 *) newfdp)->fd_dfiles;
1052 		newfdp->fd_ofileflags =
1053 		    ((struct filedesc0 *) newfdp)->fd_dfileflags;
1054 		i = NDFILE;
1055 	} else {
1056 		/*
1057 		 * Compute the smallest multiple of NDEXTENT needed
1058 		 * for the file descriptors currently in use,
1059 		 * allowing the table to shrink.
1060 		 */
1061 		i = newfdp->fd_nfiles;
1062 		while (i > 2 * NDEXTENT && i > newfdp->fd_lastfile * 2)
1063 			i /= 2;
1064 		newfdp->fd_ofiles = malloc(i * OFILESIZE, M_FILEDESC, M_WAITOK);
1065 		newfdp->fd_ofileflags = (char *) &newfdp->fd_ofiles[i];
1066 	}
1067 	newfdp->fd_nfiles = i;
1068 	bcopy(fdp->fd_ofiles, newfdp->fd_ofiles, i * sizeof(struct file **));
1069 	bcopy(fdp->fd_ofileflags, newfdp->fd_ofileflags, i * sizeof(char));
1070 
1071 	/*
1072 	 * kq descriptors cannot be copied.
1073 	 */
1074 	if (newfdp->fd_knlistsize != -1) {
1075 		fpp = &newfdp->fd_ofiles[newfdp->fd_lastfile];
1076 		for (i = newfdp->fd_lastfile; i >= 0; i--, fpp--) {
1077 			if (*fpp != NULL && (*fpp)->f_type == DTYPE_KQUEUE) {
1078 				*fpp = NULL;
1079 				if (i < newfdp->fd_freefile)
1080 					newfdp->fd_freefile = i;
1081 			}
1082 			if (*fpp == NULL && i == newfdp->fd_lastfile && i > 0)
1083 				newfdp->fd_lastfile--;
1084 		}
1085 		newfdp->fd_knlist = NULL;
1086 		newfdp->fd_knlistsize = -1;
1087 		newfdp->fd_knhash = NULL;
1088 		newfdp->fd_knhashmask = 0;
1089 	}
1090 
1091 	fpp = newfdp->fd_ofiles;
1092 	for (i = newfdp->fd_lastfile; i-- >= 0; fpp++) {
1093 		if (*fpp != NULL)
1094 			fhold(*fpp);
1095 	}
1096 	return (newfdp);
1097 }
1098 
1099 /*
1100  * Release a filedesc structure.
1101  */
1102 void
1103 fdfree(struct proc *p)
1104 {
1105 	struct thread *td = p->p_thread;
1106 	struct filedesc *fdp = p->p_fd;
1107 	struct file **fpp;
1108 	int i;
1109 	struct filedesc_to_leader *fdtol;
1110 	struct file *fp;
1111 	struct vnode *vp;
1112 	struct flock lf;
1113 
1114 	/* Certain daemons might not have file descriptors. */
1115 	if (fdp == NULL)
1116 		return;
1117 
1118 	/* Check for special need to clear POSIX style locks */
1119 	fdtol = p->p_fdtol;
1120 	if (fdtol != NULL) {
1121 		KASSERT(fdtol->fdl_refcount > 0,
1122 			("filedesc_to_refcount botch: fdl_refcount=%d",
1123 			 fdtol->fdl_refcount));
1124 		if (fdtol->fdl_refcount == 1 &&
1125 		    (p->p_leader->p_flag & P_ADVLOCK) != 0) {
1126 			i = 0;
1127 			fpp = fdp->fd_ofiles;
1128 			for (i = 0, fpp = fdp->fd_ofiles;
1129 			     i <= fdp->fd_lastfile;
1130 			     i++, fpp++) {
1131 				if (*fpp == NULL ||
1132 				    (*fpp)->f_type != DTYPE_VNODE)
1133 					continue;
1134 				fp = *fpp;
1135 				fhold(fp);
1136 				lf.l_whence = SEEK_SET;
1137 				lf.l_start = 0;
1138 				lf.l_len = 0;
1139 				lf.l_type = F_UNLCK;
1140 				vp = (struct vnode *)fp->f_data;
1141 				(void) VOP_ADVLOCK(vp,
1142 						   (caddr_t)p->p_leader,
1143 						   F_UNLCK,
1144 						   &lf,
1145 						   F_POSIX);
1146 				fdrop(fp, p->p_thread);
1147 				fpp = fdp->fd_ofiles + i;
1148 			}
1149 		}
1150 	retry:
1151 		if (fdtol->fdl_refcount == 1) {
1152 			if (fdp->fd_holdleaderscount > 0 &&
1153 			    (p->p_leader->p_flag & P_ADVLOCK) != 0) {
1154 				/*
1155 				 * close() or do_dup() has cleared a reference
1156 				 * in a shared file descriptor table.
1157 				 */
1158 				fdp->fd_holdleaderswakeup = 1;
1159 				tsleep(&fdp->fd_holdleaderscount,
1160 				       0, "fdlhold", 0);
1161 				goto retry;
1162 			}
1163 			if (fdtol->fdl_holdcount > 0) {
1164 				/*
1165 				 * Ensure that fdtol->fdl_leader
1166 				 * remains valid in closef().
1167 				 */
1168 				fdtol->fdl_wakeup = 1;
1169 				tsleep(fdtol, 0, "fdlhold", 0);
1170 				goto retry;
1171 			}
1172 		}
1173 		fdtol->fdl_refcount--;
1174 		if (fdtol->fdl_refcount == 0 &&
1175 		    fdtol->fdl_holdcount == 0) {
1176 			fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
1177 			fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
1178 		} else
1179 			fdtol = NULL;
1180 		p->p_fdtol = NULL;
1181 		if (fdtol != NULL)
1182 			free(fdtol, M_FILEDESC_TO_LEADER);
1183 	}
1184 	if (--fdp->fd_refcnt > 0)
1185 		return;
1186 	/*
1187 	 * we are the last reference to the structure, we can
1188 	 * safely assume it will not change out from under us.
1189 	 */
1190 	fpp = fdp->fd_ofiles;
1191 	for (i = fdp->fd_lastfile; i-- >= 0; fpp++) {
1192 		if (*fpp)
1193 			(void) closef(*fpp, td);
1194 	}
1195 	if (fdp->fd_nfiles > NDFILE)
1196 		free(fdp->fd_ofiles, M_FILEDESC);
1197 	if (fdp->fd_cdir) {
1198 		cache_drop(fdp->fd_ncdir);
1199 		vrele(fdp->fd_cdir);
1200 	}
1201 	cache_drop(fdp->fd_nrdir);
1202 	vrele(fdp->fd_rdir);
1203 	if (fdp->fd_jdir) {
1204 		cache_drop(fdp->fd_njdir);
1205 		vrele(fdp->fd_jdir);
1206 	}
1207 	if (fdp->fd_knlist)
1208 		free(fdp->fd_knlist, M_KQUEUE);
1209 	if (fdp->fd_knhash)
1210 		free(fdp->fd_knhash, M_KQUEUE);
1211 	free(fdp, M_FILEDESC);
1212 }
1213 
1214 /*
1215  * For setugid programs, we don't want to people to use that setugidness
1216  * to generate error messages which write to a file which otherwise would
1217  * otherwise be off-limits to the process.
1218  *
1219  * This is a gross hack to plug the hole.  A better solution would involve
1220  * a special vop or other form of generalized access control mechanism.  We
1221  * go ahead and just reject all procfs file systems accesses as dangerous.
1222  *
1223  * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
1224  * sufficient.  We also don't for check setugidness since we know we are.
1225  */
1226 static int
1227 is_unsafe(struct file *fp)
1228 {
1229 	if (fp->f_type == DTYPE_VNODE &&
1230 	    ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS)
1231 		return (1);
1232 	return (0);
1233 }
1234 
1235 /*
1236  * Make this setguid thing safe, if at all possible.
1237  */
1238 void
1239 setugidsafety(struct proc *p)
1240 {
1241 	struct thread *td = p->p_thread;
1242 	struct filedesc *fdp = p->p_fd;
1243 	int i;
1244 
1245 	/* Certain daemons might not have file descriptors. */
1246 	if (fdp == NULL)
1247 		return;
1248 
1249 	/*
1250 	 * note: fdp->fd_ofiles may be reallocated out from under us while
1251 	 * we are blocked in a close.  Be careful!
1252 	 */
1253 	for (i = 0; i <= fdp->fd_lastfile; i++) {
1254 		if (i > 2)
1255 			break;
1256 		if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) {
1257 			struct file *fp;
1258 
1259 #if 0
1260 			if ((fdp->fd_ofileflags[i] & UF_MAPPED) != 0)
1261 				(void) munmapfd(p, i);
1262 #endif
1263 			if (i < fdp->fd_knlistsize)
1264 				knote_fdclose(p, i);
1265 			/*
1266 			 * NULL-out descriptor prior to close to avoid
1267 			 * a race while close blocks.
1268 			 */
1269 			fp = fdp->fd_ofiles[i];
1270 			fdp->fd_ofiles[i] = NULL;
1271 			fdp->fd_ofileflags[i] = 0;
1272 			if (i < fdp->fd_freefile)
1273 				fdp->fd_freefile = i;
1274 			(void) closef(fp, td);
1275 		}
1276 	}
1277 	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
1278 		fdp->fd_lastfile--;
1279 }
1280 
1281 /*
1282  * Close any files on exec?
1283  */
1284 void
1285 fdcloseexec(struct proc *p)
1286 {
1287 	struct thread *td = p->p_thread;
1288 	struct filedesc *fdp = p->p_fd;
1289 	int i;
1290 
1291 	/* Certain daemons might not have file descriptors. */
1292 	if (fdp == NULL)
1293 		return;
1294 
1295 	/*
1296 	 * We cannot cache fd_ofiles or fd_ofileflags since operations
1297 	 * may block and rip them out from under us.
1298 	 */
1299 	for (i = 0; i <= fdp->fd_lastfile; i++) {
1300 		if (fdp->fd_ofiles[i] != NULL &&
1301 		    (fdp->fd_ofileflags[i] & UF_EXCLOSE)) {
1302 			struct file *fp;
1303 
1304 #if 0
1305 			if (fdp->fd_ofileflags[i] & UF_MAPPED)
1306 				(void) munmapfd(p, i);
1307 #endif
1308 			if (i < fdp->fd_knlistsize)
1309 				knote_fdclose(p, i);
1310 			/*
1311 			 * NULL-out descriptor prior to close to avoid
1312 			 * a race while close blocks.
1313 			 */
1314 			fp = fdp->fd_ofiles[i];
1315 			fdp->fd_ofiles[i] = NULL;
1316 			fdp->fd_ofileflags[i] = 0;
1317 			if (i < fdp->fd_freefile)
1318 				fdp->fd_freefile = i;
1319 			(void) closef(fp, td);
1320 		}
1321 	}
1322 	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
1323 		fdp->fd_lastfile--;
1324 }
1325 
1326 /*
1327  * It is unsafe for set[ug]id processes to be started with file
1328  * descriptors 0..2 closed, as these descriptors are given implicit
1329  * significance in the Standard C library.  fdcheckstd() will create a
1330  * descriptor referencing /dev/null for each of stdin, stdout, and
1331  * stderr that is not already open.
1332  */
1333 int
1334 fdcheckstd(struct proc *p)
1335 {
1336 	struct thread *td = p->p_thread;
1337 	struct nameidata nd;
1338 	struct filedesc *fdp;
1339 	struct file *fp;
1340 	register_t retval;
1341 	int fd, i, error, flags, devnull;
1342 
1343        fdp = p->p_fd;
1344        if (fdp == NULL)
1345                return (0);
1346        devnull = -1;
1347        error = 0;
1348        for (i = 0; i < 3; i++) {
1349                if (fdp->fd_ofiles[i] != NULL)
1350                        continue;
1351                if (devnull < 0) {
1352                        error = falloc(p, &fp, &fd);
1353                        if (error != 0)
1354                                break;
1355                        NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_SYSSPACE,
1356 			   "/dev/null", td);
1357                        flags = FREAD | FWRITE;
1358                        error = vn_open(&nd, flags, 0);
1359                        if (error != 0) {
1360                                fdp->fd_ofiles[i] = NULL;
1361                                fdrop(fp, td);
1362                                break;
1363                        }
1364                        NDFREE(&nd, NDF_ONLY_PNBUF);
1365                        fp->f_data = (caddr_t)nd.ni_vp;
1366                        fp->f_flag = flags;
1367                        fp->f_ops = &vnops;
1368                        fp->f_type = DTYPE_VNODE;
1369                        VOP_UNLOCK(nd.ni_vp, 0, td);
1370                        devnull = fd;
1371                } else {
1372                        error = kern_dup(DUP_FIXED, devnull, i, &retval);
1373                        if (error != 0)
1374                                break;
1375                }
1376        }
1377        return (error);
1378 }
1379 
1380 /*
1381  * Internal form of close.
1382  * Decrement reference count on file structure.
1383  * Note: td and/or p may be NULL when closing a file
1384  * that was being passed in a message.
1385  */
1386 int
1387 closef(struct file *fp, struct thread *td)
1388 {
1389 	struct vnode *vp;
1390 	struct flock lf;
1391 	struct filedesc_to_leader *fdtol;
1392 	struct proc *p;
1393 
1394 	if (fp == NULL)
1395 		return (0);
1396 	if (td == NULL) {
1397 		td = curthread;
1398 		p = NULL;		/* allow no proc association */
1399 	} else {
1400 		p = td->td_proc;	/* can also be NULL */
1401 	}
1402 	/*
1403 	 * POSIX record locking dictates that any close releases ALL
1404 	 * locks owned by this process.  This is handled by setting
1405 	 * a flag in the unlock to free ONLY locks obeying POSIX
1406 	 * semantics, and not to free BSD-style file locks.
1407 	 * If the descriptor was in a message, POSIX-style locks
1408 	 * aren't passed with the descriptor.
1409 	 */
1410 	if (p != NULL &&
1411 	    fp->f_type == DTYPE_VNODE) {
1412 		if ((p->p_leader->p_flag & P_ADVLOCK) != 0) {
1413 			lf.l_whence = SEEK_SET;
1414 			lf.l_start = 0;
1415 			lf.l_len = 0;
1416 			lf.l_type = F_UNLCK;
1417 			vp = (struct vnode *)fp->f_data;
1418 			(void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
1419 					   &lf, F_POSIX);
1420 		}
1421 		fdtol = p->p_fdtol;
1422 		if (fdtol != NULL) {
1423 			/*
1424 			 * Handle special case where file descriptor table
1425 			 * is shared between multiple process leaders.
1426 			 */
1427 			for (fdtol = fdtol->fdl_next;
1428 			     fdtol != p->p_fdtol;
1429 			     fdtol = fdtol->fdl_next) {
1430 				if ((fdtol->fdl_leader->p_flag &
1431 				     P_ADVLOCK) == 0)
1432 					continue;
1433 				fdtol->fdl_holdcount++;
1434 				lf.l_whence = SEEK_SET;
1435 				lf.l_start = 0;
1436 				lf.l_len = 0;
1437 				lf.l_type = F_UNLCK;
1438 				vp = (struct vnode *)fp->f_data;
1439 				(void) VOP_ADVLOCK(vp,
1440 						   (caddr_t)p->p_leader,
1441 						   F_UNLCK, &lf, F_POSIX);
1442 				fdtol->fdl_holdcount--;
1443 				if (fdtol->fdl_holdcount == 0 &&
1444 				    fdtol->fdl_wakeup != 0) {
1445 					fdtol->fdl_wakeup = 0;
1446 					wakeup(fdtol);
1447 				}
1448 			}
1449 		}
1450 	}
1451 	return (fdrop(fp, td));
1452 }
1453 
1454 int
1455 fdrop(struct file *fp, struct thread *td)
1456 {
1457 	struct flock lf;
1458 	struct vnode *vp;
1459 	int error;
1460 
1461 	if (--fp->f_count > 0)
1462 		return (0);
1463 	if (fp->f_count < 0)
1464 		panic("fdrop: count < 0");
1465 	if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE) {
1466 		lf.l_whence = SEEK_SET;
1467 		lf.l_start = 0;
1468 		lf.l_len = 0;
1469 		lf.l_type = F_UNLCK;
1470 		vp = (struct vnode *)fp->f_data;
1471 		(void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
1472 	}
1473 	if (fp->f_ops != &badfileops)
1474 		error = fo_close(fp, td);
1475 	else
1476 		error = 0;
1477 	ffree(fp);
1478 	return (error);
1479 }
1480 
1481 /*
1482  * Apply an advisory lock on a file descriptor.
1483  *
1484  * Just attempt to get a record lock of the requested type on
1485  * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
1486  */
1487 /* ARGSUSED */
1488 int
1489 flock(struct flock_args *uap)
1490 {
1491 	struct proc *p = curproc;
1492 	struct filedesc *fdp = p->p_fd;
1493 	struct file *fp;
1494 	struct vnode *vp;
1495 	struct flock lf;
1496 
1497 	if ((unsigned)uap->fd >= fdp->fd_nfiles ||
1498 	    (fp = fdp->fd_ofiles[uap->fd]) == NULL)
1499 		return (EBADF);
1500 	if (fp->f_type != DTYPE_VNODE)
1501 		return (EOPNOTSUPP);
1502 	vp = (struct vnode *)fp->f_data;
1503 	lf.l_whence = SEEK_SET;
1504 	lf.l_start = 0;
1505 	lf.l_len = 0;
1506 	if (uap->how & LOCK_UN) {
1507 		lf.l_type = F_UNLCK;
1508 		fp->f_flag &= ~FHASLOCK;
1509 		return (VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK));
1510 	}
1511 	if (uap->how & LOCK_EX)
1512 		lf.l_type = F_WRLCK;
1513 	else if (uap->how & LOCK_SH)
1514 		lf.l_type = F_RDLCK;
1515 	else
1516 		return (EBADF);
1517 	fp->f_flag |= FHASLOCK;
1518 	if (uap->how & LOCK_NB)
1519 		return (VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_FLOCK));
1520 	return (VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_FLOCK|F_WAIT));
1521 }
1522 
1523 /*
1524  * File Descriptor pseudo-device driver (/dev/fd/).
1525  *
1526  * Opening minor device N dup()s the file (if any) connected to file
1527  * descriptor N belonging to the calling process.  Note that this driver
1528  * consists of only the ``open()'' routine, because all subsequent
1529  * references to this file will be direct to the other driver.
1530  */
1531 /* ARGSUSED */
1532 static int
1533 fdopen(dev_t dev, int mode, int type, struct thread *td)
1534 {
1535 	KKASSERT(td->td_proc != NULL);
1536 
1537 	/*
1538 	 * XXX Kludge: set curproc->p_dupfd to contain the value of the
1539 	 * the file descriptor being sought for duplication. The error
1540 	 * return ensures that the vnode for this device will be released
1541 	 * by vn_open. Open will detect this special error and take the
1542 	 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
1543 	 * will simply report the error.
1544 	 */
1545 	td->td_proc->p_dupfd = minor(dev);
1546 	return (ENODEV);
1547 }
1548 
1549 /*
1550  * Duplicate the specified descriptor to a free descriptor.
1551  */
1552 int
1553 dupfdopen(struct filedesc *fdp, int indx, int dfd, int mode, int error)
1554 {
1555 	struct file *wfp;
1556 	struct file *fp;
1557 
1558 	/*
1559 	 * If the to-be-dup'd fd number is greater than the allowed number
1560 	 * of file descriptors, or the fd to be dup'd has already been
1561 	 * closed, then reject.
1562 	 */
1563 	if ((u_int)dfd >= fdp->fd_nfiles ||
1564 	    (wfp = fdp->fd_ofiles[dfd]) == NULL) {
1565 		return (EBADF);
1566 	}
1567 
1568 	/*
1569 	 * There are two cases of interest here.
1570 	 *
1571 	 * For ENODEV simply dup (dfd) to file descriptor
1572 	 * (indx) and return.
1573 	 *
1574 	 * For ENXIO steal away the file structure from (dfd) and
1575 	 * store it in (indx).  (dfd) is effectively closed by
1576 	 * this operation.
1577 	 *
1578 	 * Any other error code is just returned.
1579 	 */
1580 	switch (error) {
1581 	case ENODEV:
1582 		/*
1583 		 * Check that the mode the file is being opened for is a
1584 		 * subset of the mode of the existing descriptor.
1585 		 */
1586 		if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag)
1587 			return (EACCES);
1588 		fp = fdp->fd_ofiles[indx];
1589 #if 0
1590 		if (fp && fdp->fd_ofileflags[indx] & UF_MAPPED)
1591 			(void) munmapfd(p, indx);
1592 #endif
1593 		fdp->fd_ofiles[indx] = wfp;
1594 		fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
1595 		fhold(wfp);
1596 		if (indx > fdp->fd_lastfile)
1597 			fdp->fd_lastfile = indx;
1598 		/*
1599 		 * we now own the reference to fp that the ofiles[] array
1600 		 * used to own.  Release it.
1601 		 */
1602 		if (fp)
1603 			fdrop(fp, curthread);
1604 		return (0);
1605 
1606 	case ENXIO:
1607 		/*
1608 		 * Steal away the file pointer from dfd, and stuff it into indx.
1609 		 */
1610 		fp = fdp->fd_ofiles[indx];
1611 #if 0
1612 		if (fp && fdp->fd_ofileflags[indx] & UF_MAPPED)
1613 			(void) munmapfd(p, indx);
1614 #endif
1615 		fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd];
1616 		fdp->fd_ofiles[dfd] = NULL;
1617 		fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
1618 		fdp->fd_ofileflags[dfd] = 0;
1619 
1620 		/*
1621 		 * we now own the reference to fp that the ofiles[] array
1622 		 * used to own.  Release it.
1623 		 */
1624 		if (fp)
1625 			fdrop(fp, curthread);
1626 		/*
1627 		 * Complete the clean up of the filedesc structure by
1628 		 * recomputing the various hints.
1629 		 */
1630 		if (indx > fdp->fd_lastfile) {
1631 			fdp->fd_lastfile = indx;
1632 		} else {
1633 			while (fdp->fd_lastfile > 0 &&
1634 			   fdp->fd_ofiles[fdp->fd_lastfile] == NULL) {
1635 				fdp->fd_lastfile--;
1636 			}
1637 			if (dfd < fdp->fd_freefile)
1638 				fdp->fd_freefile = dfd;
1639 		}
1640 		return (0);
1641 
1642 	default:
1643 		return (error);
1644 	}
1645 	/* NOTREACHED */
1646 }
1647 
1648 
1649 struct filedesc_to_leader *
1650 filedesc_to_leader_alloc(struct filedesc_to_leader *old,
1651 			 struct proc *leader)
1652 {
1653 	struct filedesc_to_leader *fdtol;
1654 
1655 	fdtol = malloc(sizeof(struct filedesc_to_leader),
1656 			M_FILEDESC_TO_LEADER, M_WAITOK);
1657 	fdtol->fdl_refcount = 1;
1658 	fdtol->fdl_holdcount = 0;
1659 	fdtol->fdl_wakeup = 0;
1660 	fdtol->fdl_leader = leader;
1661 	if (old != NULL) {
1662 		fdtol->fdl_next = old->fdl_next;
1663 		fdtol->fdl_prev = old;
1664 		old->fdl_next = fdtol;
1665 		fdtol->fdl_next->fdl_prev = fdtol;
1666 	} else {
1667 		fdtol->fdl_next = fdtol;
1668 		fdtol->fdl_prev = fdtol;
1669 	}
1670 	return fdtol;
1671 }
1672 
1673 /*
1674  * Get file structures.
1675  */
1676 static int
1677 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
1678 {
1679 	int error;
1680 	struct file *fp;
1681 
1682 	if (!req->oldptr) {
1683 		/*
1684 		 * overestimate by 10 files
1685 		 */
1686 		return (SYSCTL_OUT(req, 0, sizeof(filehead) +
1687 				(nfiles + 10) * sizeof(struct file)));
1688 	}
1689 
1690 	error = SYSCTL_OUT(req, (caddr_t)&filehead, sizeof(filehead));
1691 	if (error)
1692 		return (error);
1693 
1694 	/*
1695 	 * followed by an array of file structures
1696 	 */
1697 	LIST_FOREACH(fp, &filehead, f_list) {
1698 		error = SYSCTL_OUT(req, (caddr_t)fp, sizeof (struct file));
1699 		if (error)
1700 			return (error);
1701 	}
1702 	return (0);
1703 }
1704 
1705 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
1706     0, 0, sysctl_kern_file, "S,file", "Entire file table");
1707 
1708 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
1709     &maxfilesperproc, 0, "Maximum files allowed open per process");
1710 
1711 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
1712     &maxfiles, 0, "Maximum number of files");
1713 
1714 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW,
1715     &maxfilesrootres, 0, "Descriptors reserved for root use");
1716 
1717 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
1718 	&nfiles, 0, "System-wide number of open files");
1719 
1720 static void
1721 fildesc_drvinit(void *unused)
1722 {
1723 	int fd;
1724 
1725 	cdevsw_add(&fildesc_cdevsw, 0, 0);
1726 	for (fd = 0; fd < NUMFDESC; fd++) {
1727 		make_dev(&fildesc_cdevsw, fd,
1728 		    UID_BIN, GID_BIN, 0666, "fd/%d", fd);
1729 	}
1730 	make_dev(&fildesc_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "stdin");
1731 	make_dev(&fildesc_cdevsw, 1, UID_ROOT, GID_WHEEL, 0666, "stdout");
1732 	make_dev(&fildesc_cdevsw, 2, UID_ROOT, GID_WHEEL, 0666, "stderr");
1733 }
1734 
1735 struct fileops badfileops = {
1736 	NULL,	/* port */
1737 	NULL,	/* clone */
1738 	badfo_readwrite,
1739 	badfo_readwrite,
1740 	badfo_ioctl,
1741 	badfo_poll,
1742 	badfo_kqfilter,
1743 	badfo_stat,
1744 	badfo_close
1745 };
1746 
1747 static int
1748 badfo_readwrite(
1749 	struct file *fp,
1750 	struct uio *uio,
1751 	struct ucred *cred,
1752 	int flags,
1753 	struct thread *td
1754 ) {
1755 	return (EBADF);
1756 }
1757 
1758 static int
1759 badfo_ioctl(struct file *fp, u_long com, caddr_t data, struct thread *td)
1760 {
1761 	return (EBADF);
1762 }
1763 
1764 static int
1765 badfo_poll(struct file *fp, int events, struct ucred *cred, struct thread *td)
1766 {
1767 	return (0);
1768 }
1769 
1770 static int
1771 badfo_kqfilter(struct file *fp, struct knote *kn)
1772 {
1773 	return (0);
1774 }
1775 
1776 static int
1777 badfo_stat(struct file *fp, struct stat *sb, struct thread *td)
1778 {
1779 	return (EBADF);
1780 }
1781 
1782 static int
1783 badfo_close(struct file *fp, struct thread *td)
1784 {
1785 	return (EBADF);
1786 }
1787 
1788 SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,
1789 					fildesc_drvinit,NULL)
1790