xref: /dragonfly/sys/kern/kern_descrip.c (revision 8dbfb057)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_descrip.c	8.6 (Berkeley) 4/19/94
39  * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
40  * $DragonFly: src/sys/kern/kern_descrip.c,v 1.34 2004/11/24 22:51:01 joerg Exp $
41  */
42 
43 #include "opt_compat.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/sysproto.h>
48 #include <sys/conf.h>
49 #include <sys/filedesc.h>
50 #include <sys/kernel.h>
51 #include <sys/sysctl.h>
52 #include <sys/vnode.h>
53 #include <sys/proc.h>
54 #include <sys/nlookup.h>
55 #include <sys/file.h>
56 #include <sys/stat.h>
57 #include <sys/filio.h>
58 #include <sys/fcntl.h>
59 #include <sys/unistd.h>
60 #include <sys/resourcevar.h>
61 #include <sys/event.h>
62 #include <sys/kern_syscall.h>
63 #include <sys/kcore.h>
64 #include <sys/kinfo.h>
65 
66 #include <vm/vm.h>
67 #include <vm/vm_extern.h>
68 
69 #include <sys/file2.h>
70 
71 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
72 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader",
73 		     "file desc to leader structures");
74 MALLOC_DEFINE(M_FILE, "file", "Open file structure");
75 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
76 
77 static	 d_open_t  fdopen;
78 #define NUMFDESC 64
79 
80 #define CDEV_MAJOR 22
81 static struct cdevsw fildesc_cdevsw = {
82 	/* name */	"FD",
83 	/* maj */	CDEV_MAJOR,
84 	/* flags */	0,
85 	/* port */      NULL,
86 	/* clone */	NULL,
87 
88 	/* open */	fdopen,
89 	/* close */	noclose,
90 	/* read */	noread,
91 	/* write */	nowrite,
92 	/* ioctl */	noioctl,
93 	/* poll */	nopoll,
94 	/* mmap */	nommap,
95 	/* strategy */	nostrategy,
96 	/* dump */	nodump,
97 	/* psize */	nopsize
98 };
99 
100 static int badfo_readwrite (struct file *fp, struct uio *uio,
101     struct ucred *cred, int flags, struct thread *td);
102 static int badfo_ioctl (struct file *fp, u_long com, caddr_t data,
103     struct thread *td);
104 static int badfo_poll (struct file *fp, int events,
105     struct ucred *cred, struct thread *td);
106 static int badfo_kqfilter (struct file *fp, struct knote *kn);
107 static int badfo_stat (struct file *fp, struct stat *sb, struct thread *td);
108 static int badfo_close (struct file *fp, struct thread *td);
109 
110 /*
111  * Descriptor management.
112  */
113 struct filelist filehead;	/* head of list of open files */
114 int nfiles;			/* actual number of open files */
115 extern int cmask;
116 
117 /*
118  * System calls on descriptors.
119  */
120 /* ARGSUSED */
121 int
122 getdtablesize(struct getdtablesize_args *uap)
123 {
124 	struct proc *p = curproc;
125 
126 	uap->sysmsg_result =
127 	    min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
128 	return (0);
129 }
130 
131 /*
132  * Duplicate a file descriptor to a particular value.
133  *
134  * note: keep in mind that a potential race condition exists when closing
135  * descriptors from a shared descriptor table (via rfork).
136  */
137 /* ARGSUSED */
138 int
139 dup2(struct dup2_args *uap)
140 {
141 	int error;
142 
143 	error = kern_dup(DUP_FIXED, uap->from, uap->to, uap->sysmsg_fds);
144 
145 	return (error);
146 }
147 
148 /*
149  * Duplicate a file descriptor.
150  */
151 /* ARGSUSED */
152 int
153 dup(struct dup_args *uap)
154 {
155 	int error;
156 
157 	error = kern_dup(DUP_VARIABLE, uap->fd, 0, uap->sysmsg_fds);
158 
159 	return (error);
160 }
161 
162 int
163 kern_fcntl(int fd, int cmd, union fcntl_dat *dat)
164 {
165 	struct thread *td = curthread;
166 	struct proc *p = td->td_proc;
167 	struct filedesc *fdp = p->p_fd;
168 	struct file *fp;
169 	char *pop;
170 	struct vnode *vp;
171 	u_int newmin;
172 	int tmp, error, flg = F_POSIX;
173 
174 	KKASSERT(p);
175 
176 	if ((unsigned)fd >= fdp->fd_nfiles ||
177 	    (fp = fdp->fd_ofiles[fd]) == NULL)
178 		return (EBADF);
179 	pop = &fdp->fd_ofileflags[fd];
180 
181 	switch (cmd) {
182 	case F_DUPFD:
183 		newmin = dat->fc_fd;
184 		if (newmin >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
185 		    newmin > maxfilesperproc)
186 			return (EINVAL);
187 		error = kern_dup(DUP_VARIABLE, fd, newmin, &dat->fc_fd);
188 		return (error);
189 
190 	case F_GETFD:
191 		dat->fc_cloexec = (*pop & UF_EXCLOSE) ? FD_CLOEXEC : 0;
192 		return (0);
193 
194 	case F_SETFD:
195 		*pop = (*pop &~ UF_EXCLOSE) |
196 		    (dat->fc_cloexec & FD_CLOEXEC ? UF_EXCLOSE : 0);
197 		return (0);
198 
199 	case F_GETFL:
200 		dat->fc_flags = OFLAGS(fp->f_flag);
201 		return (0);
202 
203 	case F_SETFL:
204 		fhold(fp);
205 		fp->f_flag &= ~FCNTLFLAGS;
206 		fp->f_flag |= FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS;
207 		tmp = fp->f_flag & FNONBLOCK;
208 		error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td);
209 		if (error) {
210 			fdrop(fp, td);
211 			return (error);
212 		}
213 		tmp = fp->f_flag & FASYNC;
214 		error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, td);
215 		if (!error) {
216 			fdrop(fp, td);
217 			return (0);
218 		}
219 		fp->f_flag &= ~FNONBLOCK;
220 		tmp = 0;
221 		fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td);
222 		fdrop(fp, td);
223 		return (error);
224 
225 	case F_GETOWN:
226 		fhold(fp);
227 		error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner, td);
228 		fdrop(fp, td);
229 		return(error);
230 
231 	case F_SETOWN:
232 		fhold(fp);
233 		error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner, td);
234 		fdrop(fp, td);
235 		return(error);
236 
237 	case F_SETLKW:
238 		flg |= F_WAIT;
239 		/* Fall into F_SETLK */
240 
241 	case F_SETLK:
242 		if (fp->f_type != DTYPE_VNODE)
243 			return (EBADF);
244 		vp = (struct vnode *)fp->f_data;
245 
246 		/*
247 		 * copyin/lockop may block
248 		 */
249 		fhold(fp);
250 		if (dat->fc_flock.l_whence == SEEK_CUR)
251 			dat->fc_flock.l_start += fp->f_offset;
252 
253 		switch (dat->fc_flock.l_type) {
254 		case F_RDLCK:
255 			if ((fp->f_flag & FREAD) == 0) {
256 				error = EBADF;
257 				break;
258 			}
259 			p->p_leader->p_flag |= P_ADVLOCK;
260 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
261 			    &dat->fc_flock, flg);
262 			break;
263 		case F_WRLCK:
264 			if ((fp->f_flag & FWRITE) == 0) {
265 				error = EBADF;
266 				break;
267 			}
268 			p->p_leader->p_flag |= P_ADVLOCK;
269 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
270 			    &dat->fc_flock, flg);
271 			break;
272 		case F_UNLCK:
273 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
274 				&dat->fc_flock, F_POSIX);
275 			break;
276 		default:
277 			error = EINVAL;
278 			break;
279 		}
280 		/* Check for race with close */
281 		if ((unsigned) fd >= fdp->fd_nfiles ||
282 		    fp != fdp->fd_ofiles[fd]) {
283 			dat->fc_flock.l_whence = SEEK_SET;
284 			dat->fc_flock.l_start = 0;
285 			dat->fc_flock.l_len = 0;
286 			dat->fc_flock.l_type = F_UNLCK;
287 			(void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
288 					   F_UNLCK, &dat->fc_flock, F_POSIX);
289 		}
290 		fdrop(fp, td);
291 		return(error);
292 
293 	case F_GETLK:
294 		if (fp->f_type != DTYPE_VNODE)
295 			return (EBADF);
296 		vp = (struct vnode *)fp->f_data;
297 		/*
298 		 * copyin/lockop may block
299 		 */
300 		fhold(fp);
301 		if (dat->fc_flock.l_type != F_RDLCK &&
302 		    dat->fc_flock.l_type != F_WRLCK &&
303 		    dat->fc_flock.l_type != F_UNLCK) {
304 			fdrop(fp, td);
305 			return (EINVAL);
306 		}
307 		if (dat->fc_flock.l_whence == SEEK_CUR)
308 			dat->fc_flock.l_start += fp->f_offset;
309 		error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK,
310 			    &dat->fc_flock, F_POSIX);
311 		fdrop(fp, td);
312 		return(error);
313 	default:
314 		return (EINVAL);
315 	}
316 	/* NOTREACHED */
317 }
318 
319 /*
320  * The file control system call.
321  */
322 int
323 fcntl(struct fcntl_args *uap)
324 {
325 	union fcntl_dat dat;
326 	int error;
327 
328 	switch (uap->cmd) {
329 	case F_DUPFD:
330 		dat.fc_fd = uap->arg;
331 		break;
332 	case F_SETFD:
333 		dat.fc_cloexec = uap->arg;
334 		break;
335 	case F_SETFL:
336 		dat.fc_flags = uap->arg;
337 		break;
338 	case F_SETOWN:
339 		dat.fc_owner = uap->arg;
340 		break;
341 	case F_SETLKW:
342 	case F_SETLK:
343 	case F_GETLK:
344 		error = copyin((caddr_t)uap->arg, &dat.fc_flock,
345 		    sizeof(struct flock));
346 		if (error)
347 			return (error);
348 		break;
349 	}
350 
351 	error = kern_fcntl(uap->fd, uap->cmd, &dat);
352 
353 	if (error == 0) {
354 		switch (uap->cmd) {
355 		case F_DUPFD:
356 			uap->sysmsg_result = dat.fc_fd;
357 			break;
358 		case F_GETFD:
359 			uap->sysmsg_result = dat.fc_cloexec;
360 			break;
361 		case F_GETFL:
362 			uap->sysmsg_result = dat.fc_flags;
363 			break;
364 		case F_GETOWN:
365 			uap->sysmsg_result = dat.fc_owner;
366 		case F_GETLK:
367 			error = copyout(&dat.fc_flock, (caddr_t)uap->arg,
368 			    sizeof(struct flock));
369 			break;
370 		}
371 	}
372 
373 	return (error);
374 }
375 
376 /*
377  * Common code for dup, dup2, and fcntl(F_DUPFD).
378  *
379  * The type flag can be either DUP_FIXED or DUP_VARIABLE.  DUP_FIXED tells
380  * kern_dup() to destructively dup over an existing file descriptor if new
381  * is already open.  DUP_VARIABLE tells kern_dup() to find the lowest
382  * unused file descriptor that is greater than or equal to new.
383  */
384 int
385 kern_dup(enum dup_type type, int old, int new, int *res)
386 {
387 	struct thread *td = curthread;
388 	struct proc *p = td->td_proc;
389 	struct filedesc *fdp = p->p_fd;
390 	struct file *fp;
391 	struct file *delfp;
392 	int holdleaders;
393 	int error, newfd;
394 
395 	/*
396 	 * Verify that we have a valid descriptor to dup from and
397 	 * possibly to dup to.
398 	 */
399 	if (old < 0 || new < 0 || new > p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
400 	    new >= maxfilesperproc)
401 		return (EBADF);
402 	if (old >= fdp->fd_nfiles || fdp->fd_ofiles[old] == NULL)
403 		return (EBADF);
404 	if (type == DUP_FIXED && old == new) {
405 		*res = new;
406 		return (0);
407 	}
408 	fp = fdp->fd_ofiles[old];
409 	fhold(fp);
410 
411 	/*
412 	 * Expand the table for the new descriptor if needed.  This may
413 	 * block and drop and reacquire the fidedesc lock.
414 	 */
415 	if (type == DUP_VARIABLE || new >= fdp->fd_nfiles) {
416 		error = fdalloc(p, new, &newfd);
417 		if (error) {
418 			fdrop(fp, td);
419 			return (error);
420 		}
421 	}
422 	if (type == DUP_VARIABLE)
423 		new = newfd;
424 
425 	/*
426 	 * If the old file changed out from under us then treat it as a
427 	 * bad file descriptor.  Userland should do its own locking to
428 	 * avoid this case.
429 	 */
430 	if (fdp->fd_ofiles[old] != fp) {
431 		if (fdp->fd_ofiles[new] == NULL) {
432 			if (new < fdp->fd_freefile)
433 				fdp->fd_freefile = new;
434 			while (fdp->fd_lastfile > 0 &&
435 			    fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
436 				fdp->fd_lastfile--;
437 		}
438 		fdrop(fp, td);
439 		return (EBADF);
440 	}
441 	KASSERT(old != new, ("new fd is same as old"));
442 
443 	/*
444 	 * Save info on the descriptor being overwritten.  We have
445 	 * to do the unmap now, but we cannot close it without
446 	 * introducing an ownership race for the slot.
447 	 */
448 	delfp = fdp->fd_ofiles[new];
449 	if (delfp != NULL && p->p_fdtol != NULL) {
450 		/*
451 		 * Ask fdfree() to sleep to ensure that all relevant
452 		 * process leaders can be traversed in closef().
453 		 */
454 		fdp->fd_holdleaderscount++;
455 		holdleaders = 1;
456 	} else
457 		holdleaders = 0;
458 	KASSERT(delfp == NULL || type == DUP_FIXED,
459 	    ("dup() picked an open file"));
460 #if 0
461 	if (delfp && (fdp->fd_ofileflags[new] & UF_MAPPED))
462 		(void) munmapfd(p, new);
463 #endif
464 
465 	/*
466 	 * Duplicate the source descriptor, update lastfile
467 	 */
468 	fdp->fd_ofiles[new] = fp;
469 	fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE;
470 	if (new > fdp->fd_lastfile)
471 		fdp->fd_lastfile = new;
472 	*res = new;
473 
474 	/*
475 	 * If we dup'd over a valid file, we now own the reference to it
476 	 * and must dispose of it using closef() semantics (as if a
477 	 * close() were performed on it).
478 	 */
479 	if (delfp) {
480 		(void) closef(delfp, td);
481 		if (holdleaders) {
482 			fdp->fd_holdleaderscount--;
483 			if (fdp->fd_holdleaderscount == 0 &&
484 			    fdp->fd_holdleaderswakeup != 0) {
485 				fdp->fd_holdleaderswakeup = 0;
486 				wakeup(&fdp->fd_holdleaderscount);
487 			}
488 		}
489 	}
490 	return (0);
491 }
492 
493 /*
494  * If sigio is on the list associated with a process or process group,
495  * disable signalling from the device, remove sigio from the list and
496  * free sigio.
497  */
498 void
499 funsetown(struct sigio *sigio)
500 {
501 	int s;
502 
503 	if (sigio == NULL)
504 		return;
505 	s = splhigh();
506 	*(sigio->sio_myref) = NULL;
507 	splx(s);
508 	if (sigio->sio_pgid < 0) {
509 		SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,
510 			     sigio, sio_pgsigio);
511 	} else /* if ((*sigiop)->sio_pgid > 0) */ {
512 		SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,
513 			     sigio, sio_pgsigio);
514 	}
515 	crfree(sigio->sio_ucred);
516 	free(sigio, M_SIGIO);
517 }
518 
519 /* Free a list of sigio structures. */
520 void
521 funsetownlst(struct sigiolst *sigiolst)
522 {
523 	struct sigio *sigio;
524 
525 	while ((sigio = SLIST_FIRST(sigiolst)) != NULL)
526 		funsetown(sigio);
527 }
528 
529 /*
530  * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
531  *
532  * After permission checking, add a sigio structure to the sigio list for
533  * the process or process group.
534  */
535 int
536 fsetown(pid_t pgid, struct sigio **sigiop)
537 {
538 	struct proc *proc;
539 	struct pgrp *pgrp;
540 	struct sigio *sigio;
541 	int s;
542 
543 	if (pgid == 0) {
544 		funsetown(*sigiop);
545 		return (0);
546 	}
547 	if (pgid > 0) {
548 		proc = pfind(pgid);
549 		if (proc == NULL)
550 			return (ESRCH);
551 
552 		/*
553 		 * Policy - Don't allow a process to FSETOWN a process
554 		 * in another session.
555 		 *
556 		 * Remove this test to allow maximum flexibility or
557 		 * restrict FSETOWN to the current process or process
558 		 * group for maximum safety.
559 		 */
560 		if (proc->p_session != curproc->p_session)
561 			return (EPERM);
562 
563 		pgrp = NULL;
564 	} else /* if (pgid < 0) */ {
565 		pgrp = pgfind(-pgid);
566 		if (pgrp == NULL)
567 			return (ESRCH);
568 
569 		/*
570 		 * Policy - Don't allow a process to FSETOWN a process
571 		 * in another session.
572 		 *
573 		 * Remove this test to allow maximum flexibility or
574 		 * restrict FSETOWN to the current process or process
575 		 * group for maximum safety.
576 		 */
577 		if (pgrp->pg_session != curproc->p_session)
578 			return (EPERM);
579 
580 		proc = NULL;
581 	}
582 	funsetown(*sigiop);
583 	sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK);
584 	if (pgid > 0) {
585 		SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
586 		sigio->sio_proc = proc;
587 	} else {
588 		SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
589 		sigio->sio_pgrp = pgrp;
590 	}
591 	sigio->sio_pgid = pgid;
592 	sigio->sio_ucred = crhold(curproc->p_ucred);
593 	/* It would be convenient if p_ruid was in ucred. */
594 	sigio->sio_ruid = curproc->p_ucred->cr_ruid;
595 	sigio->sio_myref = sigiop;
596 	s = splhigh();
597 	*sigiop = sigio;
598 	splx(s);
599 	return (0);
600 }
601 
602 /*
603  * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
604  */
605 pid_t
606 fgetown(struct sigio *sigio)
607 {
608 	return (sigio != NULL ? sigio->sio_pgid : 0);
609 }
610 
611 /*
612  * Close a file descriptor.
613  */
614 /* ARGSUSED */
615 int
616 close(struct close_args *uap)
617 {
618 	return(kern_close(uap->fd));
619 }
620 
621 int
622 kern_close(int fd)
623 {
624 	struct thread *td = curthread;
625 	struct proc *p = td->td_proc;
626 	struct filedesc *fdp;
627 	struct file *fp;
628 	int error;
629 	int holdleaders;
630 
631 	KKASSERT(p);
632 	fdp = p->p_fd;
633 
634 	if ((unsigned)fd >= fdp->fd_nfiles ||
635 	    (fp = fdp->fd_ofiles[fd]) == NULL)
636 		return (EBADF);
637 #if 0
638 	if (fdp->fd_ofileflags[fd] & UF_MAPPED)
639 		(void) munmapfd(p, fd);
640 #endif
641 	fdp->fd_ofiles[fd] = NULL;
642 	fdp->fd_ofileflags[fd] = 0;
643 	holdleaders = 0;
644 	if (p->p_fdtol != NULL) {
645 		/*
646 		 * Ask fdfree() to sleep to ensure that all relevant
647 		 * process leaders can be traversed in closef().
648 		 */
649 		fdp->fd_holdleaderscount++;
650 		holdleaders = 1;
651 	}
652 
653 	/*
654 	 * we now hold the fp reference that used to be owned by the descriptor
655 	 * array.
656 	 */
657 	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
658 		fdp->fd_lastfile--;
659 	if (fd < fdp->fd_freefile)
660 		fdp->fd_freefile = fd;
661 	if (fd < fdp->fd_knlistsize)
662 		knote_fdclose(p, fd);
663 	error = closef(fp, td);
664 	if (holdleaders) {
665 		fdp->fd_holdleaderscount--;
666 		if (fdp->fd_holdleaderscount == 0 &&
667 		    fdp->fd_holdleaderswakeup != 0) {
668 			fdp->fd_holdleaderswakeup = 0;
669 			wakeup(&fdp->fd_holdleaderscount);
670 		}
671 	}
672 	return (error);
673 }
674 
675 int
676 kern_fstat(int fd, struct stat *ub)
677 {
678 	struct thread *td = curthread;
679 	struct proc *p = td->td_proc;
680 	struct filedesc *fdp;
681 	struct file *fp;
682 	int error;
683 
684 	KKASSERT(p);
685 
686 	fdp = p->p_fd;
687 	if ((unsigned)fd >= fdp->fd_nfiles ||
688 	    (fp = fdp->fd_ofiles[fd]) == NULL)
689 		return (EBADF);
690 	fhold(fp);
691 	error = fo_stat(fp, ub, td);
692 	fdrop(fp, td);
693 
694 	return (error);
695 }
696 
697 /*
698  * Return status information about a file descriptor.
699  */
700 int
701 fstat(struct fstat_args *uap)
702 {
703 	struct stat st;
704 	int error;
705 
706 	error = kern_fstat(uap->fd, &st);
707 
708 	if (error == 0)
709 		error = copyout(&st, uap->sb, sizeof(st));
710 	return (error);
711 }
712 
713 /*
714  * XXX: This is for source compatibility with NetBSD.  Probably doesn't
715  * belong here.
716  */
717 int
718 nfstat(struct nfstat_args *uap)
719 {
720 	struct stat st;
721 	struct nstat nst;
722 	int error;
723 
724 	error = kern_fstat(uap->fd, &st);
725 
726 	if (error == 0) {
727 		cvtnstat(&st, &nst);
728 		error = copyout(&nst, uap->sb, sizeof (nst));
729 	}
730 	return (error);
731 }
732 
733 /*
734  * Return pathconf information about a file descriptor.
735  */
736 /* ARGSUSED */
737 int
738 fpathconf(struct fpathconf_args *uap)
739 {
740 	struct thread *td = curthread;
741 	struct proc *p = td->td_proc;
742 	struct filedesc *fdp;
743 	struct file *fp;
744 	struct vnode *vp;
745 	int error = 0;
746 
747 	KKASSERT(p);
748 	fdp = p->p_fd;
749 	if ((unsigned)uap->fd >= fdp->fd_nfiles ||
750 	    (fp = fdp->fd_ofiles[uap->fd]) == NULL)
751 		return (EBADF);
752 
753 	fhold(fp);
754 
755 	switch (fp->f_type) {
756 	case DTYPE_PIPE:
757 	case DTYPE_SOCKET:
758 		if (uap->name != _PC_PIPE_BUF) {
759 			error = EINVAL;
760 		} else {
761 			uap->sysmsg_result = PIPE_BUF;
762 			error = 0;
763 		}
764 		break;
765 	case DTYPE_FIFO:
766 	case DTYPE_VNODE:
767 		vp = (struct vnode *)fp->f_data;
768 		error = VOP_PATHCONF(vp, uap->name, uap->sysmsg_fds);
769 		break;
770 	default:
771 		error = EOPNOTSUPP;
772 		break;
773 	}
774 	fdrop(fp, td);
775 	return(error);
776 }
777 
778 /*
779  * Allocate a file descriptor for the process.
780  */
781 static int fdexpand;
782 SYSCTL_INT(_debug, OID_AUTO, fdexpand, CTLFLAG_RD, &fdexpand, 0, "");
783 
784 int
785 fdalloc(struct proc *p, int want, int *result)
786 {
787 	struct filedesc *fdp = p->p_fd;
788 	int i;
789 	int lim, last, nfiles;
790 	struct file **newofile;
791 	char *newofileflags;
792 
793 	/*
794 	 * Search for a free descriptor starting at the higher
795 	 * of want or fd_freefile.  If that fails, consider
796 	 * expanding the ofile array.
797 	 */
798 	lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
799 	for (;;) {
800 		last = min(fdp->fd_nfiles, lim);
801 		if ((i = want) < fdp->fd_freefile)
802 			i = fdp->fd_freefile;
803 		for (; i < last; i++) {
804 			if (fdp->fd_ofiles[i] == NULL) {
805 				fdp->fd_ofileflags[i] = 0;
806 				if (i > fdp->fd_lastfile)
807 					fdp->fd_lastfile = i;
808 				if (want <= fdp->fd_freefile)
809 					fdp->fd_freefile = i;
810 				*result = i;
811 				return (0);
812 			}
813 		}
814 
815 		/*
816 		 * No space in current array.  Expand?
817 		 */
818 		if (fdp->fd_nfiles >= lim)
819 			return (EMFILE);
820 		if (fdp->fd_nfiles < NDEXTENT)
821 			nfiles = NDEXTENT;
822 		else
823 			nfiles = 2 * fdp->fd_nfiles;
824 		newofile = malloc(nfiles * OFILESIZE, M_FILEDESC, M_WAITOK);
825 
826 		/*
827 		 * deal with file-table extend race that might have occured
828 		 * when malloc was blocked.
829 		 */
830 		if (fdp->fd_nfiles >= nfiles) {
831 			free(newofile, M_FILEDESC);
832 			continue;
833 		}
834 		newofileflags = (char *) &newofile[nfiles];
835 		/*
836 		 * Copy the existing ofile and ofileflags arrays
837 		 * and zero the new portion of each array.
838 		 */
839 		bcopy(fdp->fd_ofiles, newofile,
840 			(i = sizeof(struct file *) * fdp->fd_nfiles));
841 		bzero((char *)newofile + i, nfiles * sizeof(struct file *) - i);
842 		bcopy(fdp->fd_ofileflags, newofileflags,
843 			(i = sizeof(char) * fdp->fd_nfiles));
844 		bzero(newofileflags + i, nfiles * sizeof(char) - i);
845 		if (fdp->fd_nfiles > NDFILE)
846 			free(fdp->fd_ofiles, M_FILEDESC);
847 		fdp->fd_ofiles = newofile;
848 		fdp->fd_ofileflags = newofileflags;
849 		fdp->fd_nfiles = nfiles;
850 		fdexpand++;
851 	}
852 	return (0);
853 }
854 
855 /*
856  * Check to see whether n user file descriptors
857  * are available to the process p.
858  */
859 int
860 fdavail(struct proc *p, int n)
861 {
862 	struct filedesc *fdp = p->p_fd;
863 	struct file **fpp;
864 	int i, lim, last;
865 
866 	lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
867 	if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0)
868 		return (1);
869 
870 	last = min(fdp->fd_nfiles, lim);
871 	fpp = &fdp->fd_ofiles[fdp->fd_freefile];
872 	for (i = last - fdp->fd_freefile; --i >= 0; fpp++) {
873 		if (*fpp == NULL && --n <= 0)
874 			return (1);
875 	}
876 	return (0);
877 }
878 
879 /*
880  * falloc:
881  *	Create a new open file structure and allocate a file decriptor
882  *	for the process that refers to it.  If p is NULL, no descriptor
883  *	is allocated and the file pointer is returned unassociated with
884  *	any process.  resultfd is only used if p is not NULL and may
885  *	separately be NULL indicating that you don't need the returned fd.
886  *
887  *	A held file pointer is returned.  If a descriptor has been allocated
888  *	an additional hold on the fp will be made due to the fd_ofiles[]
889  *	reference.
890  */
891 int
892 falloc(struct proc *p, struct file **resultfp, int *resultfd)
893 {
894 	static struct timeval lastfail;
895 	static int curfail;
896 	struct file *fp;
897 	int error;
898 
899 	fp = NULL;
900 
901 	/*
902 	 * Handle filetable full issues and root overfill.
903 	 */
904 	if (nfiles >= maxfiles - maxfilesrootres &&
905 	    ((p && p->p_ucred->cr_ruid != 0) || nfiles >= maxfiles)) {
906 		if (ppsratecheck(&lastfail, &curfail, 1)) {
907 			printf("kern.maxfiles limit exceeded by uid %d, please see tuning(7).\n",
908 				(p ? p->p_ucred->cr_ruid : -1));
909 		}
910 		error = ENFILE;
911 		goto done;
912 	}
913 
914 	/*
915 	 * Allocate a new file descriptor.
916 	 */
917 	nfiles++;
918 	fp = malloc(sizeof(struct file), M_FILE, M_WAITOK | M_ZERO);
919 	fp->f_count = 1;
920 	fp->f_ops = &badfileops;
921 	fp->f_seqcount = 1;
922 	if (p)
923 		fp->f_cred = crhold(p->p_ucred);
924 	else
925 		fp->f_cred = crhold(proc0.p_ucred);
926 	LIST_INSERT_HEAD(&filehead, fp, f_list);
927 	if (resultfd) {
928 		if ((error = fsetfd(p, fp, resultfd)) != 0) {
929 			fdrop(fp, p->p_thread);
930 			fp = NULL;
931 		}
932 	} else {
933 		error = 0;
934 	}
935 done:
936 	*resultfp = fp;
937 	return (0);
938 }
939 
940 /*
941  * Associate a file pointer with a file descriptor.  On success the fp
942  * will have an additional ref representing the fd_ofiles[] association.
943  */
944 int
945 fsetfd(struct proc *p, struct file *fp, int *resultfd)
946 {
947 	int i;
948 	int error;
949 
950 	KKASSERT(p);
951 
952 	i = -1;
953 	if ((error = fdalloc(p, 0, &i)) == 0) {
954 		fhold(fp);
955 		p->p_fd->fd_ofiles[i] = fp;
956 	}
957 	*resultfd = i;
958 	return (0);
959 }
960 
961 void
962 fsetcred(struct file *fp, struct ucred *cr)
963 {
964 	crhold(cr);
965 	crfree(fp->f_cred);
966 	fp->f_cred = cr;
967 }
968 
969 /*
970  * Free a file descriptor.
971  */
972 void
973 ffree(struct file *fp)
974 {
975 	KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!"));
976 	LIST_REMOVE(fp, f_list);
977 	crfree(fp->f_cred);
978 	if (fp->f_ncp) {
979 	    cache_drop(fp->f_ncp);
980 	    fp->f_ncp = NULL;
981 	}
982 	nfiles--;
983 	free(fp, M_FILE);
984 }
985 
986 /*
987  * Build a new filedesc structure.
988  */
989 struct filedesc *
990 fdinit(struct proc *p)
991 {
992 	struct filedesc0 *newfdp;
993 	struct filedesc *fdp = p->p_fd;
994 
995 	newfdp = malloc(sizeof(struct filedesc0), M_FILEDESC, M_WAITOK|M_ZERO);
996 	newfdp->fd_fd.fd_cdir = fdp->fd_cdir;
997 	if (newfdp->fd_fd.fd_cdir) {
998 		vref(newfdp->fd_fd.fd_cdir);
999 		newfdp->fd_fd.fd_ncdir = cache_hold(fdp->fd_ncdir);
1000 	}
1001 	newfdp->fd_fd.fd_rdir = fdp->fd_rdir;
1002 	newfdp->fd_fd.fd_nrdir = cache_hold(fdp->fd_nrdir);
1003 	vref(newfdp->fd_fd.fd_rdir);
1004 	newfdp->fd_fd.fd_jdir = fdp->fd_jdir;
1005 	if (newfdp->fd_fd.fd_jdir) {
1006 		vref(newfdp->fd_fd.fd_jdir);
1007 		newfdp->fd_fd.fd_njdir = cache_hold(fdp->fd_njdir);
1008 	}
1009 
1010 
1011 	/* Create the file descriptor table. */
1012 	newfdp->fd_fd.fd_refcnt = 1;
1013 	newfdp->fd_fd.fd_cmask = cmask;
1014 	newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles;
1015 	newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags;
1016 	newfdp->fd_fd.fd_nfiles = NDFILE;
1017 	newfdp->fd_fd.fd_knlistsize = -1;
1018 
1019 	return (&newfdp->fd_fd);
1020 }
1021 
1022 /*
1023  * Share a filedesc structure.
1024  */
1025 struct filedesc *
1026 fdshare(struct proc *p)
1027 {
1028 	p->p_fd->fd_refcnt++;
1029 	return (p->p_fd);
1030 }
1031 
1032 /*
1033  * Copy a filedesc structure.
1034  */
1035 struct filedesc *
1036 fdcopy(struct proc *p)
1037 {
1038 	struct filedesc *newfdp, *fdp = p->p_fd;
1039 	struct file **fpp;
1040 	int i;
1041 
1042 	/* Certain daemons might not have file descriptors. */
1043 	if (fdp == NULL)
1044 		return (NULL);
1045 
1046 	newfdp = malloc(sizeof(struct filedesc0), M_FILEDESC, M_WAITOK);
1047 	bcopy(fdp, newfdp, sizeof(struct filedesc));
1048 	if (newfdp->fd_cdir) {
1049 		vref(newfdp->fd_cdir);
1050 		newfdp->fd_ncdir = cache_hold(newfdp->fd_ncdir);
1051 	}
1052 	/*
1053 	 * We must check for fd_rdir here, at least for now because
1054 	 * the init process is created before we have access to the
1055 	 * rootvode to take a reference to it.
1056 	 */
1057 	if (newfdp->fd_rdir) {
1058 		vref(newfdp->fd_rdir);
1059 		newfdp->fd_nrdir = cache_hold(newfdp->fd_nrdir);
1060 	}
1061 	if (newfdp->fd_jdir) {
1062 		vref(newfdp->fd_jdir);
1063 		newfdp->fd_njdir = cache_hold(newfdp->fd_njdir);
1064 	}
1065 	newfdp->fd_refcnt = 1;
1066 
1067 	/*
1068 	 * If the number of open files fits in the internal arrays
1069 	 * of the open file structure, use them, otherwise allocate
1070 	 * additional memory for the number of descriptors currently
1071 	 * in use.
1072 	 */
1073 	if (newfdp->fd_lastfile < NDFILE) {
1074 		newfdp->fd_ofiles = ((struct filedesc0 *) newfdp)->fd_dfiles;
1075 		newfdp->fd_ofileflags =
1076 		    ((struct filedesc0 *) newfdp)->fd_dfileflags;
1077 		i = NDFILE;
1078 	} else {
1079 		/*
1080 		 * Compute the smallest multiple of NDEXTENT needed
1081 		 * for the file descriptors currently in use,
1082 		 * allowing the table to shrink.
1083 		 */
1084 		i = newfdp->fd_nfiles;
1085 		while (i > 2 * NDEXTENT && i > newfdp->fd_lastfile * 2)
1086 			i /= 2;
1087 		newfdp->fd_ofiles = malloc(i * OFILESIZE, M_FILEDESC, M_WAITOK);
1088 		newfdp->fd_ofileflags = (char *) &newfdp->fd_ofiles[i];
1089 	}
1090 	newfdp->fd_nfiles = i;
1091 	bcopy(fdp->fd_ofiles, newfdp->fd_ofiles, i * sizeof(struct file **));
1092 	bcopy(fdp->fd_ofileflags, newfdp->fd_ofileflags, i * sizeof(char));
1093 
1094 	/*
1095 	 * kq descriptors cannot be copied.
1096 	 */
1097 	if (newfdp->fd_knlistsize != -1) {
1098 		fpp = &newfdp->fd_ofiles[newfdp->fd_lastfile];
1099 		for (i = newfdp->fd_lastfile; i >= 0; i--, fpp--) {
1100 			if (*fpp != NULL && (*fpp)->f_type == DTYPE_KQUEUE) {
1101 				*fpp = NULL;
1102 				if (i < newfdp->fd_freefile)
1103 					newfdp->fd_freefile = i;
1104 			}
1105 			if (*fpp == NULL && i == newfdp->fd_lastfile && i > 0)
1106 				newfdp->fd_lastfile--;
1107 		}
1108 		newfdp->fd_knlist = NULL;
1109 		newfdp->fd_knlistsize = -1;
1110 		newfdp->fd_knhash = NULL;
1111 		newfdp->fd_knhashmask = 0;
1112 	}
1113 
1114 	fpp = newfdp->fd_ofiles;
1115 	for (i = newfdp->fd_lastfile; i-- >= 0; fpp++) {
1116 		if (*fpp != NULL)
1117 			fhold(*fpp);
1118 	}
1119 	return (newfdp);
1120 }
1121 
1122 /*
1123  * Release a filedesc structure.
1124  */
1125 void
1126 fdfree(struct proc *p)
1127 {
1128 	struct thread *td = p->p_thread;
1129 	struct filedesc *fdp = p->p_fd;
1130 	struct file **fpp;
1131 	int i;
1132 	struct filedesc_to_leader *fdtol;
1133 	struct file *fp;
1134 	struct vnode *vp;
1135 	struct flock lf;
1136 
1137 	/* Certain daemons might not have file descriptors. */
1138 	if (fdp == NULL)
1139 		return;
1140 
1141 	/* Check for special need to clear POSIX style locks */
1142 	fdtol = p->p_fdtol;
1143 	if (fdtol != NULL) {
1144 		KASSERT(fdtol->fdl_refcount > 0,
1145 			("filedesc_to_refcount botch: fdl_refcount=%d",
1146 			 fdtol->fdl_refcount));
1147 		if (fdtol->fdl_refcount == 1 &&
1148 		    (p->p_leader->p_flag & P_ADVLOCK) != 0) {
1149 			i = 0;
1150 			fpp = fdp->fd_ofiles;
1151 			for (i = 0, fpp = fdp->fd_ofiles;
1152 			     i <= fdp->fd_lastfile;
1153 			     i++, fpp++) {
1154 				if (*fpp == NULL ||
1155 				    (*fpp)->f_type != DTYPE_VNODE)
1156 					continue;
1157 				fp = *fpp;
1158 				fhold(fp);
1159 				lf.l_whence = SEEK_SET;
1160 				lf.l_start = 0;
1161 				lf.l_len = 0;
1162 				lf.l_type = F_UNLCK;
1163 				vp = (struct vnode *)fp->f_data;
1164 				(void) VOP_ADVLOCK(vp,
1165 						   (caddr_t)p->p_leader,
1166 						   F_UNLCK,
1167 						   &lf,
1168 						   F_POSIX);
1169 				fdrop(fp, p->p_thread);
1170 				fpp = fdp->fd_ofiles + i;
1171 			}
1172 		}
1173 	retry:
1174 		if (fdtol->fdl_refcount == 1) {
1175 			if (fdp->fd_holdleaderscount > 0 &&
1176 			    (p->p_leader->p_flag & P_ADVLOCK) != 0) {
1177 				/*
1178 				 * close() or do_dup() has cleared a reference
1179 				 * in a shared file descriptor table.
1180 				 */
1181 				fdp->fd_holdleaderswakeup = 1;
1182 				tsleep(&fdp->fd_holdleaderscount,
1183 				       0, "fdlhold", 0);
1184 				goto retry;
1185 			}
1186 			if (fdtol->fdl_holdcount > 0) {
1187 				/*
1188 				 * Ensure that fdtol->fdl_leader
1189 				 * remains valid in closef().
1190 				 */
1191 				fdtol->fdl_wakeup = 1;
1192 				tsleep(fdtol, 0, "fdlhold", 0);
1193 				goto retry;
1194 			}
1195 		}
1196 		fdtol->fdl_refcount--;
1197 		if (fdtol->fdl_refcount == 0 &&
1198 		    fdtol->fdl_holdcount == 0) {
1199 			fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
1200 			fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
1201 		} else
1202 			fdtol = NULL;
1203 		p->p_fdtol = NULL;
1204 		if (fdtol != NULL)
1205 			free(fdtol, M_FILEDESC_TO_LEADER);
1206 	}
1207 	if (--fdp->fd_refcnt > 0)
1208 		return;
1209 	/*
1210 	 * we are the last reference to the structure, we can
1211 	 * safely assume it will not change out from under us.
1212 	 */
1213 	fpp = fdp->fd_ofiles;
1214 	for (i = fdp->fd_lastfile; i-- >= 0; fpp++) {
1215 		if (*fpp)
1216 			(void) closef(*fpp, td);
1217 	}
1218 	if (fdp->fd_nfiles > NDFILE)
1219 		free(fdp->fd_ofiles, M_FILEDESC);
1220 	if (fdp->fd_cdir) {
1221 		cache_drop(fdp->fd_ncdir);
1222 		vrele(fdp->fd_cdir);
1223 	}
1224 	cache_drop(fdp->fd_nrdir);
1225 	vrele(fdp->fd_rdir);
1226 	if (fdp->fd_jdir) {
1227 		cache_drop(fdp->fd_njdir);
1228 		vrele(fdp->fd_jdir);
1229 	}
1230 	if (fdp->fd_knlist)
1231 		free(fdp->fd_knlist, M_KQUEUE);
1232 	if (fdp->fd_knhash)
1233 		free(fdp->fd_knhash, M_KQUEUE);
1234 	free(fdp, M_FILEDESC);
1235 }
1236 
1237 /*
1238  * For setugid programs, we don't want to people to use that setugidness
1239  * to generate error messages which write to a file which otherwise would
1240  * otherwise be off-limits to the process.
1241  *
1242  * This is a gross hack to plug the hole.  A better solution would involve
1243  * a special vop or other form of generalized access control mechanism.  We
1244  * go ahead and just reject all procfs file systems accesses as dangerous.
1245  *
1246  * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
1247  * sufficient.  We also don't for check setugidness since we know we are.
1248  */
1249 static int
1250 is_unsafe(struct file *fp)
1251 {
1252 	if (fp->f_type == DTYPE_VNODE &&
1253 	    ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS)
1254 		return (1);
1255 	return (0);
1256 }
1257 
1258 /*
1259  * Make this setguid thing safe, if at all possible.
1260  */
1261 void
1262 setugidsafety(struct proc *p)
1263 {
1264 	struct thread *td = p->p_thread;
1265 	struct filedesc *fdp = p->p_fd;
1266 	int i;
1267 
1268 	/* Certain daemons might not have file descriptors. */
1269 	if (fdp == NULL)
1270 		return;
1271 
1272 	/*
1273 	 * note: fdp->fd_ofiles may be reallocated out from under us while
1274 	 * we are blocked in a close.  Be careful!
1275 	 */
1276 	for (i = 0; i <= fdp->fd_lastfile; i++) {
1277 		if (i > 2)
1278 			break;
1279 		if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) {
1280 			struct file *fp;
1281 
1282 #if 0
1283 			if ((fdp->fd_ofileflags[i] & UF_MAPPED) != 0)
1284 				(void) munmapfd(p, i);
1285 #endif
1286 			if (i < fdp->fd_knlistsize)
1287 				knote_fdclose(p, i);
1288 			/*
1289 			 * NULL-out descriptor prior to close to avoid
1290 			 * a race while close blocks.
1291 			 */
1292 			fp = fdp->fd_ofiles[i];
1293 			fdp->fd_ofiles[i] = NULL;
1294 			fdp->fd_ofileflags[i] = 0;
1295 			if (i < fdp->fd_freefile)
1296 				fdp->fd_freefile = i;
1297 			(void) closef(fp, td);
1298 		}
1299 	}
1300 	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
1301 		fdp->fd_lastfile--;
1302 }
1303 
1304 /*
1305  * Close any files on exec?
1306  */
1307 void
1308 fdcloseexec(struct proc *p)
1309 {
1310 	struct thread *td = p->p_thread;
1311 	struct filedesc *fdp = p->p_fd;
1312 	int i;
1313 
1314 	/* Certain daemons might not have file descriptors. */
1315 	if (fdp == NULL)
1316 		return;
1317 
1318 	/*
1319 	 * We cannot cache fd_ofiles or fd_ofileflags since operations
1320 	 * may block and rip them out from under us.
1321 	 */
1322 	for (i = 0; i <= fdp->fd_lastfile; i++) {
1323 		if (fdp->fd_ofiles[i] != NULL &&
1324 		    (fdp->fd_ofileflags[i] & UF_EXCLOSE)) {
1325 			struct file *fp;
1326 
1327 #if 0
1328 			if (fdp->fd_ofileflags[i] & UF_MAPPED)
1329 				(void) munmapfd(p, i);
1330 #endif
1331 			if (i < fdp->fd_knlistsize)
1332 				knote_fdclose(p, i);
1333 			/*
1334 			 * NULL-out descriptor prior to close to avoid
1335 			 * a race while close blocks.
1336 			 */
1337 			fp = fdp->fd_ofiles[i];
1338 			fdp->fd_ofiles[i] = NULL;
1339 			fdp->fd_ofileflags[i] = 0;
1340 			if (i < fdp->fd_freefile)
1341 				fdp->fd_freefile = i;
1342 			(void) closef(fp, td);
1343 		}
1344 	}
1345 	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
1346 		fdp->fd_lastfile--;
1347 }
1348 
1349 /*
1350  * It is unsafe for set[ug]id processes to be started with file
1351  * descriptors 0..2 closed, as these descriptors are given implicit
1352  * significance in the Standard C library.  fdcheckstd() will create a
1353  * descriptor referencing /dev/null for each of stdin, stdout, and
1354  * stderr that is not already open.
1355  */
1356 int
1357 fdcheckstd(struct proc *p)
1358 {
1359 	struct thread *td = p->p_thread;
1360 	struct nlookupdata nd;
1361 	struct filedesc *fdp;
1362 	struct file *fp;
1363 	register_t retval;
1364 	int fd, i, error, flags, devnull;
1365 
1366        fdp = p->p_fd;
1367        if (fdp == NULL)
1368                return (0);
1369        devnull = -1;
1370        error = 0;
1371        for (i = 0; i < 3; i++) {
1372 		if (fdp->fd_ofiles[i] != NULL)
1373 			continue;
1374 		if (devnull < 0) {
1375 			if ((error = falloc(p, &fp, NULL)) != 0)
1376 				break;
1377 
1378 			error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE,
1379 						NLC_FOLLOW|NLC_LOCKVP);
1380 			flags = FREAD | FWRITE;
1381 			if (error == 0)
1382 				error = vn_open(&nd, fp, flags, 0);
1383 			if (error == 0)
1384 				error = fsetfd(p, fp, &fd);
1385 			fdrop(fp, td);
1386 			nlookup_done(&nd);
1387 			if (error)
1388 				break;
1389 			KKASSERT(i == fd);
1390 			devnull = fd;
1391 		} else {
1392 			error = kern_dup(DUP_FIXED, devnull, i, &retval);
1393 			if (error != 0)
1394 				break;
1395 		}
1396        }
1397        return (error);
1398 }
1399 
1400 /*
1401  * Internal form of close.
1402  * Decrement reference count on file structure.
1403  * Note: td and/or p may be NULL when closing a file
1404  * that was being passed in a message.
1405  */
1406 int
1407 closef(struct file *fp, struct thread *td)
1408 {
1409 	struct vnode *vp;
1410 	struct flock lf;
1411 	struct filedesc_to_leader *fdtol;
1412 	struct proc *p;
1413 
1414 	if (fp == NULL)
1415 		return (0);
1416 	if (td == NULL) {
1417 		td = curthread;
1418 		p = NULL;		/* allow no proc association */
1419 	} else {
1420 		p = td->td_proc;	/* can also be NULL */
1421 	}
1422 	/*
1423 	 * POSIX record locking dictates that any close releases ALL
1424 	 * locks owned by this process.  This is handled by setting
1425 	 * a flag in the unlock to free ONLY locks obeying POSIX
1426 	 * semantics, and not to free BSD-style file locks.
1427 	 * If the descriptor was in a message, POSIX-style locks
1428 	 * aren't passed with the descriptor.
1429 	 */
1430 	if (p != NULL &&
1431 	    fp->f_type == DTYPE_VNODE) {
1432 		if ((p->p_leader->p_flag & P_ADVLOCK) != 0) {
1433 			lf.l_whence = SEEK_SET;
1434 			lf.l_start = 0;
1435 			lf.l_len = 0;
1436 			lf.l_type = F_UNLCK;
1437 			vp = (struct vnode *)fp->f_data;
1438 			(void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
1439 					   &lf, F_POSIX);
1440 		}
1441 		fdtol = p->p_fdtol;
1442 		if (fdtol != NULL) {
1443 			/*
1444 			 * Handle special case where file descriptor table
1445 			 * is shared between multiple process leaders.
1446 			 */
1447 			for (fdtol = fdtol->fdl_next;
1448 			     fdtol != p->p_fdtol;
1449 			     fdtol = fdtol->fdl_next) {
1450 				if ((fdtol->fdl_leader->p_flag &
1451 				     P_ADVLOCK) == 0)
1452 					continue;
1453 				fdtol->fdl_holdcount++;
1454 				lf.l_whence = SEEK_SET;
1455 				lf.l_start = 0;
1456 				lf.l_len = 0;
1457 				lf.l_type = F_UNLCK;
1458 				vp = (struct vnode *)fp->f_data;
1459 				(void) VOP_ADVLOCK(vp,
1460 						   (caddr_t)p->p_leader,
1461 						   F_UNLCK, &lf, F_POSIX);
1462 				fdtol->fdl_holdcount--;
1463 				if (fdtol->fdl_holdcount == 0 &&
1464 				    fdtol->fdl_wakeup != 0) {
1465 					fdtol->fdl_wakeup = 0;
1466 					wakeup(fdtol);
1467 				}
1468 			}
1469 		}
1470 	}
1471 	return (fdrop(fp, td));
1472 }
1473 
1474 int
1475 fdrop(struct file *fp, struct thread *td)
1476 {
1477 	struct flock lf;
1478 	struct vnode *vp;
1479 	int error;
1480 
1481 	if (--fp->f_count > 0)
1482 		return (0);
1483 	if (fp->f_count < 0)
1484 		panic("fdrop: count < 0");
1485 	if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE) {
1486 		lf.l_whence = SEEK_SET;
1487 		lf.l_start = 0;
1488 		lf.l_len = 0;
1489 		lf.l_type = F_UNLCK;
1490 		vp = (struct vnode *)fp->f_data;
1491 		(void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
1492 	}
1493 	if (fp->f_ops != &badfileops)
1494 		error = fo_close(fp, td);
1495 	else
1496 		error = 0;
1497 	ffree(fp);
1498 	return (error);
1499 }
1500 
1501 /*
1502  * Apply an advisory lock on a file descriptor.
1503  *
1504  * Just attempt to get a record lock of the requested type on
1505  * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
1506  */
1507 /* ARGSUSED */
1508 int
1509 flock(struct flock_args *uap)
1510 {
1511 	struct proc *p = curproc;
1512 	struct filedesc *fdp = p->p_fd;
1513 	struct file *fp;
1514 	struct vnode *vp;
1515 	struct flock lf;
1516 
1517 	if ((unsigned)uap->fd >= fdp->fd_nfiles ||
1518 	    (fp = fdp->fd_ofiles[uap->fd]) == NULL)
1519 		return (EBADF);
1520 	if (fp->f_type != DTYPE_VNODE)
1521 		return (EOPNOTSUPP);
1522 	vp = (struct vnode *)fp->f_data;
1523 	lf.l_whence = SEEK_SET;
1524 	lf.l_start = 0;
1525 	lf.l_len = 0;
1526 	if (uap->how & LOCK_UN) {
1527 		lf.l_type = F_UNLCK;
1528 		fp->f_flag &= ~FHASLOCK;
1529 		return (VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK));
1530 	}
1531 	if (uap->how & LOCK_EX)
1532 		lf.l_type = F_WRLCK;
1533 	else if (uap->how & LOCK_SH)
1534 		lf.l_type = F_RDLCK;
1535 	else
1536 		return (EBADF);
1537 	fp->f_flag |= FHASLOCK;
1538 	if (uap->how & LOCK_NB)
1539 		return (VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_FLOCK));
1540 	return (VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_FLOCK|F_WAIT));
1541 }
1542 
1543 /*
1544  * File Descriptor pseudo-device driver (/dev/fd/).
1545  *
1546  * Opening minor device N dup()s the file (if any) connected to file
1547  * descriptor N belonging to the calling process.  Note that this driver
1548  * consists of only the ``open()'' routine, because all subsequent
1549  * references to this file will be direct to the other driver.
1550  */
1551 /* ARGSUSED */
1552 static int
1553 fdopen(dev_t dev, int mode, int type, struct thread *td)
1554 {
1555 	KKASSERT(td->td_proc != NULL);
1556 
1557 	/*
1558 	 * XXX Kludge: set curproc->p_dupfd to contain the value of the
1559 	 * the file descriptor being sought for duplication. The error
1560 	 * return ensures that the vnode for this device will be released
1561 	 * by vn_open. Open will detect this special error and take the
1562 	 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
1563 	 * will simply report the error.
1564 	 */
1565 	td->td_proc->p_dupfd = minor(dev);
1566 	return (ENODEV);
1567 }
1568 
1569 /*
1570  * Duplicate the specified descriptor to a free descriptor.
1571  */
1572 int
1573 dupfdopen(struct filedesc *fdp, int indx, int dfd, int mode, int error)
1574 {
1575 	struct file *wfp;
1576 	struct file *fp;
1577 
1578 	/*
1579 	 * If the to-be-dup'd fd number is greater than the allowed number
1580 	 * of file descriptors, or the fd to be dup'd has already been
1581 	 * closed, then reject.
1582 	 */
1583 	if ((u_int)dfd >= fdp->fd_nfiles ||
1584 	    (wfp = fdp->fd_ofiles[dfd]) == NULL) {
1585 		return (EBADF);
1586 	}
1587 
1588 	/*
1589 	 * There are two cases of interest here.
1590 	 *
1591 	 * For ENODEV simply dup (dfd) to file descriptor
1592 	 * (indx) and return.
1593 	 *
1594 	 * For ENXIO steal away the file structure from (dfd) and
1595 	 * store it in (indx).  (dfd) is effectively closed by
1596 	 * this operation.
1597 	 *
1598 	 * Any other error code is just returned.
1599 	 */
1600 	switch (error) {
1601 	case ENODEV:
1602 		/*
1603 		 * Check that the mode the file is being opened for is a
1604 		 * subset of the mode of the existing descriptor.
1605 		 */
1606 		if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag)
1607 			return (EACCES);
1608 		fp = fdp->fd_ofiles[indx];
1609 #if 0
1610 		if (fp && fdp->fd_ofileflags[indx] & UF_MAPPED)
1611 			(void) munmapfd(p, indx);
1612 #endif
1613 		fdp->fd_ofiles[indx] = wfp;
1614 		fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
1615 		fhold(wfp);
1616 		if (indx > fdp->fd_lastfile)
1617 			fdp->fd_lastfile = indx;
1618 		/*
1619 		 * we now own the reference to fp that the ofiles[] array
1620 		 * used to own.  Release it.
1621 		 */
1622 		if (fp)
1623 			fdrop(fp, curthread);
1624 		return (0);
1625 
1626 	case ENXIO:
1627 		/*
1628 		 * Steal away the file pointer from dfd, and stuff it into indx.
1629 		 */
1630 		fp = fdp->fd_ofiles[indx];
1631 #if 0
1632 		if (fp && fdp->fd_ofileflags[indx] & UF_MAPPED)
1633 			(void) munmapfd(p, indx);
1634 #endif
1635 		fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd];
1636 		fdp->fd_ofiles[dfd] = NULL;
1637 		fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
1638 		fdp->fd_ofileflags[dfd] = 0;
1639 
1640 		/*
1641 		 * we now own the reference to fp that the ofiles[] array
1642 		 * used to own.  Release it.
1643 		 */
1644 		if (fp)
1645 			fdrop(fp, curthread);
1646 		/*
1647 		 * Complete the clean up of the filedesc structure by
1648 		 * recomputing the various hints.
1649 		 */
1650 		if (indx > fdp->fd_lastfile) {
1651 			fdp->fd_lastfile = indx;
1652 		} else {
1653 			while (fdp->fd_lastfile > 0 &&
1654 			   fdp->fd_ofiles[fdp->fd_lastfile] == NULL) {
1655 				fdp->fd_lastfile--;
1656 			}
1657 			if (dfd < fdp->fd_freefile)
1658 				fdp->fd_freefile = dfd;
1659 		}
1660 		return (0);
1661 
1662 	default:
1663 		return (error);
1664 	}
1665 	/* NOTREACHED */
1666 }
1667 
1668 
1669 struct filedesc_to_leader *
1670 filedesc_to_leader_alloc(struct filedesc_to_leader *old,
1671 			 struct proc *leader)
1672 {
1673 	struct filedesc_to_leader *fdtol;
1674 
1675 	fdtol = malloc(sizeof(struct filedesc_to_leader),
1676 			M_FILEDESC_TO_LEADER, M_WAITOK);
1677 	fdtol->fdl_refcount = 1;
1678 	fdtol->fdl_holdcount = 0;
1679 	fdtol->fdl_wakeup = 0;
1680 	fdtol->fdl_leader = leader;
1681 	if (old != NULL) {
1682 		fdtol->fdl_next = old->fdl_next;
1683 		fdtol->fdl_prev = old;
1684 		old->fdl_next = fdtol;
1685 		fdtol->fdl_next->fdl_prev = fdtol;
1686 	} else {
1687 		fdtol->fdl_next = fdtol;
1688 		fdtol->fdl_prev = fdtol;
1689 	}
1690 	return fdtol;
1691 }
1692 
1693 /*
1694  * Get file structures.
1695  */
1696 static int
1697 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
1698 {
1699 	struct kinfo_file kf;
1700 	struct filedesc *fdp;
1701 	struct file *fp;
1702 	struct proc *p;
1703 	int error, n;
1704 
1705 	/*
1706 	 * Note: because the number of file descriptors is calculated
1707 	 * in different ways for sizing vs returning the data,
1708 	 * there is information leakage from the first loop.  However,
1709 	 * it is of a similar order of magnitude to the leakage from
1710 	 * global system statistics such as kern.openfiles.
1711 	 */
1712 	if (req->oldptr == NULL) {
1713 		n = 16;		/* A slight overestimate. */
1714 		LIST_FOREACH(fp, &filehead, f_list)
1715 			n += fp->f_count;
1716 		return (SYSCTL_OUT(req, 0, n * sizeof(kf)));
1717 	}
1718 	error = 0;
1719 	LIST_FOREACH(p, &allproc, p_list) {
1720 		if (p->p_stat == SIDL)
1721 			continue;
1722 		if (!PRISON_CHECK(req->td->td_proc->p_ucred, p->p_ucred) != 0) {
1723 			continue;
1724 		}
1725 		if ((fdp = p->p_fd) == NULL) {
1726 			continue;
1727 		}
1728 		for (n = 0; n < fdp->fd_nfiles; ++n) {
1729 			if ((fp = fdp->fd_ofiles[n]) == NULL)
1730 				continue;
1731 			kcore_make_file(&kf, fp, p->p_pid,
1732 					p->p_ucred->cr_uid, n);
1733 			error = SYSCTL_OUT(req, &kf, sizeof(kf));
1734 			if (error)
1735 				break;
1736 		}
1737 		if (error)
1738 			break;
1739 	}
1740 	return (error);
1741 }
1742 
1743 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
1744     0, 0, sysctl_kern_file, "S,file", "Entire file table");
1745 
1746 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
1747     &maxfilesperproc, 0, "Maximum files allowed open per process");
1748 
1749 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
1750     &maxfiles, 0, "Maximum number of files");
1751 
1752 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW,
1753     &maxfilesrootres, 0, "Descriptors reserved for root use");
1754 
1755 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
1756 	&nfiles, 0, "System-wide number of open files");
1757 
1758 static void
1759 fildesc_drvinit(void *unused)
1760 {
1761 	int fd;
1762 
1763 	cdevsw_add(&fildesc_cdevsw, 0, 0);
1764 	for (fd = 0; fd < NUMFDESC; fd++) {
1765 		make_dev(&fildesc_cdevsw, fd,
1766 		    UID_BIN, GID_BIN, 0666, "fd/%d", fd);
1767 	}
1768 	make_dev(&fildesc_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "stdin");
1769 	make_dev(&fildesc_cdevsw, 1, UID_ROOT, GID_WHEEL, 0666, "stdout");
1770 	make_dev(&fildesc_cdevsw, 2, UID_ROOT, GID_WHEEL, 0666, "stderr");
1771 }
1772 
1773 struct fileops badfileops = {
1774 	NULL,	/* port */
1775 	NULL,	/* clone */
1776 	badfo_readwrite,
1777 	badfo_readwrite,
1778 	badfo_ioctl,
1779 	badfo_poll,
1780 	badfo_kqfilter,
1781 	badfo_stat,
1782 	badfo_close
1783 };
1784 
1785 static int
1786 badfo_readwrite(
1787 	struct file *fp,
1788 	struct uio *uio,
1789 	struct ucred *cred,
1790 	int flags,
1791 	struct thread *td
1792 ) {
1793 	return (EBADF);
1794 }
1795 
1796 static int
1797 badfo_ioctl(struct file *fp, u_long com, caddr_t data, struct thread *td)
1798 {
1799 	return (EBADF);
1800 }
1801 
1802 static int
1803 badfo_poll(struct file *fp, int events, struct ucred *cred, struct thread *td)
1804 {
1805 	return (0);
1806 }
1807 
1808 static int
1809 badfo_kqfilter(struct file *fp, struct knote *kn)
1810 {
1811 	return (0);
1812 }
1813 
1814 static int
1815 badfo_stat(struct file *fp, struct stat *sb, struct thread *td)
1816 {
1817 	return (EBADF);
1818 }
1819 
1820 static int
1821 badfo_close(struct file *fp, struct thread *td)
1822 {
1823 	return (EBADF);
1824 }
1825 
1826 SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,
1827 					fildesc_drvinit,NULL)
1828