xref: /dragonfly/sys/kern/kern_descrip.c (revision 8164c1fe)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_descrip.c	8.6 (Berkeley) 4/19/94
39  * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
40  * $DragonFly: src/sys/kern/kern_descrip.c,v 1.39 2005/02/02 20:36:09 dillon Exp $
41  */
42 
43 #include "opt_compat.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/sysproto.h>
48 #include <sys/conf.h>
49 #include <sys/filedesc.h>
50 #include <sys/kernel.h>
51 #include <sys/sysctl.h>
52 #include <sys/vnode.h>
53 #include <sys/proc.h>
54 #include <sys/nlookup.h>
55 #include <sys/file.h>
56 #include <sys/stat.h>
57 #include <sys/filio.h>
58 #include <sys/fcntl.h>
59 #include <sys/unistd.h>
60 #include <sys/resourcevar.h>
61 #include <sys/event.h>
62 #include <sys/kern_syscall.h>
63 #include <sys/kcore.h>
64 #include <sys/kinfo.h>
65 
66 #include <vm/vm.h>
67 #include <vm/vm_extern.h>
68 
69 #include <sys/file2.h>
70 
71 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
72 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader",
73 		     "file desc to leader structures");
74 MALLOC_DEFINE(M_FILE, "file", "Open file structure");
75 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
76 
77 static	 d_open_t  fdopen;
78 #define NUMFDESC 64
79 
80 #define CDEV_MAJOR 22
81 static struct cdevsw fildesc_cdevsw = {
82 	/* name */	"FD",
83 	/* maj */	CDEV_MAJOR,
84 	/* flags */	0,
85 	/* port */      NULL,
86 	/* clone */	NULL,
87 
88 	/* open */	fdopen,
89 	/* close */	noclose,
90 	/* read */	noread,
91 	/* write */	nowrite,
92 	/* ioctl */	noioctl,
93 	/* poll */	nopoll,
94 	/* mmap */	nommap,
95 	/* strategy */	nostrategy,
96 	/* dump */	nodump,
97 	/* psize */	nopsize
98 };
99 
100 static int badfo_readwrite (struct file *fp, struct uio *uio,
101     struct ucred *cred, int flags, struct thread *td);
102 static int badfo_ioctl (struct file *fp, u_long com, caddr_t data,
103     struct thread *td);
104 static int badfo_poll (struct file *fp, int events,
105     struct ucred *cred, struct thread *td);
106 static int badfo_kqfilter (struct file *fp, struct knote *kn);
107 static int badfo_stat (struct file *fp, struct stat *sb, struct thread *td);
108 static int badfo_close (struct file *fp, struct thread *td);
109 
110 /*
111  * Descriptor management.
112  */
113 struct filelist filehead;	/* head of list of open files */
114 int nfiles;			/* actual number of open files */
115 extern int cmask;
116 
117 /*
118  * System calls on descriptors.
119  */
120 /* ARGSUSED */
121 int
122 getdtablesize(struct getdtablesize_args *uap)
123 {
124 	struct proc *p = curproc;
125 
126 	uap->sysmsg_result =
127 	    min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
128 	return (0);
129 }
130 
131 /*
132  * Duplicate a file descriptor to a particular value.
133  *
134  * note: keep in mind that a potential race condition exists when closing
135  * descriptors from a shared descriptor table (via rfork).
136  */
137 /* ARGSUSED */
138 int
139 dup2(struct dup2_args *uap)
140 {
141 	int error;
142 
143 	error = kern_dup(DUP_FIXED, uap->from, uap->to, uap->sysmsg_fds);
144 
145 	return (error);
146 }
147 
148 /*
149  * Duplicate a file descriptor.
150  */
151 /* ARGSUSED */
152 int
153 dup(struct dup_args *uap)
154 {
155 	int error;
156 
157 	error = kern_dup(DUP_VARIABLE, uap->fd, 0, uap->sysmsg_fds);
158 
159 	return (error);
160 }
161 
162 int
163 kern_fcntl(int fd, int cmd, union fcntl_dat *dat)
164 {
165 	struct thread *td = curthread;
166 	struct proc *p = td->td_proc;
167 	struct filedesc *fdp = p->p_fd;
168 	struct file *fp;
169 	char *pop;
170 	struct vnode *vp;
171 	u_int newmin;
172 	int tmp, error, flg = F_POSIX;
173 
174 	KKASSERT(p);
175 
176 	if ((unsigned)fd >= fdp->fd_nfiles ||
177 	    (fp = fdp->fd_ofiles[fd]) == NULL)
178 		return (EBADF);
179 	pop = &fdp->fd_ofileflags[fd];
180 
181 	switch (cmd) {
182 	case F_DUPFD:
183 		newmin = dat->fc_fd;
184 		if (newmin >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
185 		    newmin > maxfilesperproc)
186 			return (EINVAL);
187 		error = kern_dup(DUP_VARIABLE, fd, newmin, &dat->fc_fd);
188 		return (error);
189 
190 	case F_GETFD:
191 		dat->fc_cloexec = (*pop & UF_EXCLOSE) ? FD_CLOEXEC : 0;
192 		return (0);
193 
194 	case F_SETFD:
195 		*pop = (*pop &~ UF_EXCLOSE) |
196 		    (dat->fc_cloexec & FD_CLOEXEC ? UF_EXCLOSE : 0);
197 		return (0);
198 
199 	case F_GETFL:
200 		dat->fc_flags = OFLAGS(fp->f_flag);
201 		return (0);
202 
203 	case F_SETFL:
204 		fhold(fp);
205 		fp->f_flag &= ~FCNTLFLAGS;
206 		fp->f_flag |= FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS;
207 		tmp = fp->f_flag & FNONBLOCK;
208 		error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td);
209 		if (error) {
210 			fdrop(fp, td);
211 			return (error);
212 		}
213 		tmp = fp->f_flag & FASYNC;
214 		error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, td);
215 		if (!error) {
216 			fdrop(fp, td);
217 			return (0);
218 		}
219 		fp->f_flag &= ~FNONBLOCK;
220 		tmp = 0;
221 		fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td);
222 		fdrop(fp, td);
223 		return (error);
224 
225 	case F_GETOWN:
226 		fhold(fp);
227 		error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner, td);
228 		fdrop(fp, td);
229 		return(error);
230 
231 	case F_SETOWN:
232 		fhold(fp);
233 		error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner, td);
234 		fdrop(fp, td);
235 		return(error);
236 
237 	case F_SETLKW:
238 		flg |= F_WAIT;
239 		/* Fall into F_SETLK */
240 
241 	case F_SETLK:
242 		if (fp->f_type != DTYPE_VNODE)
243 			return (EBADF);
244 		vp = (struct vnode *)fp->f_data;
245 
246 		/*
247 		 * copyin/lockop may block
248 		 */
249 		fhold(fp);
250 		if (dat->fc_flock.l_whence == SEEK_CUR)
251 			dat->fc_flock.l_start += fp->f_offset;
252 
253 		switch (dat->fc_flock.l_type) {
254 		case F_RDLCK:
255 			if ((fp->f_flag & FREAD) == 0) {
256 				error = EBADF;
257 				break;
258 			}
259 			p->p_leader->p_flag |= P_ADVLOCK;
260 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
261 			    &dat->fc_flock, flg);
262 			break;
263 		case F_WRLCK:
264 			if ((fp->f_flag & FWRITE) == 0) {
265 				error = EBADF;
266 				break;
267 			}
268 			p->p_leader->p_flag |= P_ADVLOCK;
269 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
270 			    &dat->fc_flock, flg);
271 			break;
272 		case F_UNLCK:
273 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
274 				&dat->fc_flock, F_POSIX);
275 			break;
276 		default:
277 			error = EINVAL;
278 			break;
279 		}
280 		/* Check for race with close */
281 		if ((unsigned) fd >= fdp->fd_nfiles ||
282 		    fp != fdp->fd_ofiles[fd]) {
283 			dat->fc_flock.l_whence = SEEK_SET;
284 			dat->fc_flock.l_start = 0;
285 			dat->fc_flock.l_len = 0;
286 			dat->fc_flock.l_type = F_UNLCK;
287 			(void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
288 					   F_UNLCK, &dat->fc_flock, F_POSIX);
289 		}
290 		fdrop(fp, td);
291 		return(error);
292 
293 	case F_GETLK:
294 		if (fp->f_type != DTYPE_VNODE)
295 			return (EBADF);
296 		vp = (struct vnode *)fp->f_data;
297 		/*
298 		 * copyin/lockop may block
299 		 */
300 		fhold(fp);
301 		if (dat->fc_flock.l_type != F_RDLCK &&
302 		    dat->fc_flock.l_type != F_WRLCK &&
303 		    dat->fc_flock.l_type != F_UNLCK) {
304 			fdrop(fp, td);
305 			return (EINVAL);
306 		}
307 		if (dat->fc_flock.l_whence == SEEK_CUR)
308 			dat->fc_flock.l_start += fp->f_offset;
309 		error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK,
310 			    &dat->fc_flock, F_POSIX);
311 		fdrop(fp, td);
312 		return(error);
313 	default:
314 		return (EINVAL);
315 	}
316 	/* NOTREACHED */
317 }
318 
319 /*
320  * The file control system call.
321  */
322 int
323 fcntl(struct fcntl_args *uap)
324 {
325 	union fcntl_dat dat;
326 	int error;
327 
328 	switch (uap->cmd) {
329 	case F_DUPFD:
330 		dat.fc_fd = uap->arg;
331 		break;
332 	case F_SETFD:
333 		dat.fc_cloexec = uap->arg;
334 		break;
335 	case F_SETFL:
336 		dat.fc_flags = uap->arg;
337 		break;
338 	case F_SETOWN:
339 		dat.fc_owner = uap->arg;
340 		break;
341 	case F_SETLKW:
342 	case F_SETLK:
343 	case F_GETLK:
344 		error = copyin((caddr_t)uap->arg, &dat.fc_flock,
345 		    sizeof(struct flock));
346 		if (error)
347 			return (error);
348 		break;
349 	}
350 
351 	error = kern_fcntl(uap->fd, uap->cmd, &dat);
352 
353 	if (error == 0) {
354 		switch (uap->cmd) {
355 		case F_DUPFD:
356 			uap->sysmsg_result = dat.fc_fd;
357 			break;
358 		case F_GETFD:
359 			uap->sysmsg_result = dat.fc_cloexec;
360 			break;
361 		case F_GETFL:
362 			uap->sysmsg_result = dat.fc_flags;
363 			break;
364 		case F_GETOWN:
365 			uap->sysmsg_result = dat.fc_owner;
366 		case F_GETLK:
367 			error = copyout(&dat.fc_flock, (caddr_t)uap->arg,
368 			    sizeof(struct flock));
369 			break;
370 		}
371 	}
372 
373 	return (error);
374 }
375 
376 /*
377  * Common code for dup, dup2, and fcntl(F_DUPFD).
378  *
379  * The type flag can be either DUP_FIXED or DUP_VARIABLE.  DUP_FIXED tells
380  * kern_dup() to destructively dup over an existing file descriptor if new
381  * is already open.  DUP_VARIABLE tells kern_dup() to find the lowest
382  * unused file descriptor that is greater than or equal to new.
383  */
384 int
385 kern_dup(enum dup_type type, int old, int new, int *res)
386 {
387 	struct thread *td = curthread;
388 	struct proc *p = td->td_proc;
389 	struct filedesc *fdp = p->p_fd;
390 	struct file *fp;
391 	struct file *delfp;
392 	int holdleaders;
393 	int error, newfd;
394 
395 	/*
396 	 * Verify that we have a valid descriptor to dup from and
397 	 * possibly to dup to.
398 	 */
399 	if (old < 0 || new < 0 || new > p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
400 	    new >= maxfilesperproc)
401 		return (EBADF);
402 	if (old >= fdp->fd_nfiles || fdp->fd_ofiles[old] == NULL)
403 		return (EBADF);
404 	if (type == DUP_FIXED && old == new) {
405 		*res = new;
406 		return (0);
407 	}
408 	fp = fdp->fd_ofiles[old];
409 	fhold(fp);
410 
411 	/*
412 	 * Expand the table for the new descriptor if needed.  This may
413 	 * block and drop and reacquire the fidedesc lock.
414 	 */
415 	if (type == DUP_VARIABLE || new >= fdp->fd_nfiles) {
416 		error = fdalloc(p, new, &newfd);
417 		if (error) {
418 			fdrop(fp, td);
419 			return (error);
420 		}
421 	}
422 	if (type == DUP_VARIABLE)
423 		new = newfd;
424 
425 	/*
426 	 * If the old file changed out from under us then treat it as a
427 	 * bad file descriptor.  Userland should do its own locking to
428 	 * avoid this case.
429 	 */
430 	if (fdp->fd_ofiles[old] != fp) {
431 		if (fdp->fd_ofiles[new] == NULL) {
432 			if (new < fdp->fd_freefile)
433 				fdp->fd_freefile = new;
434 			while (fdp->fd_lastfile > 0 &&
435 			    fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
436 				fdp->fd_lastfile--;
437 		}
438 		fdrop(fp, td);
439 		return (EBADF);
440 	}
441 	KASSERT(old != new, ("new fd is same as old"));
442 
443 	/*
444 	 * Save info on the descriptor being overwritten.  We have
445 	 * to do the unmap now, but we cannot close it without
446 	 * introducing an ownership race for the slot.
447 	 */
448 	delfp = fdp->fd_ofiles[new];
449 	if (delfp != NULL && p->p_fdtol != NULL) {
450 		/*
451 		 * Ask fdfree() to sleep to ensure that all relevant
452 		 * process leaders can be traversed in closef().
453 		 */
454 		fdp->fd_holdleaderscount++;
455 		holdleaders = 1;
456 	} else
457 		holdleaders = 0;
458 	KASSERT(delfp == NULL || type == DUP_FIXED,
459 	    ("dup() picked an open file"));
460 #if 0
461 	if (delfp && (fdp->fd_ofileflags[new] & UF_MAPPED))
462 		(void) munmapfd(p, new);
463 #endif
464 
465 	/*
466 	 * Duplicate the source descriptor, update lastfile
467 	 */
468 	fdp->fd_ofiles[new] = fp;
469 	fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE;
470 	if (new > fdp->fd_lastfile)
471 		fdp->fd_lastfile = new;
472 	*res = new;
473 
474 	/*
475 	 * If we dup'd over a valid file, we now own the reference to it
476 	 * and must dispose of it using closef() semantics (as if a
477 	 * close() were performed on it).
478 	 */
479 	if (delfp) {
480 		(void) closef(delfp, td);
481 		if (holdleaders) {
482 			fdp->fd_holdleaderscount--;
483 			if (fdp->fd_holdleaderscount == 0 &&
484 			    fdp->fd_holdleaderswakeup != 0) {
485 				fdp->fd_holdleaderswakeup = 0;
486 				wakeup(&fdp->fd_holdleaderscount);
487 			}
488 		}
489 	}
490 	return (0);
491 }
492 
493 /*
494  * If sigio is on the list associated with a process or process group,
495  * disable signalling from the device, remove sigio from the list and
496  * free sigio.
497  */
498 void
499 funsetown(struct sigio *sigio)
500 {
501 	int s;
502 
503 	if (sigio == NULL)
504 		return;
505 	s = splhigh();
506 	*(sigio->sio_myref) = NULL;
507 	splx(s);
508 	if (sigio->sio_pgid < 0) {
509 		SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,
510 			     sigio, sio_pgsigio);
511 	} else /* if ((*sigiop)->sio_pgid > 0) */ {
512 		SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,
513 			     sigio, sio_pgsigio);
514 	}
515 	crfree(sigio->sio_ucred);
516 	free(sigio, M_SIGIO);
517 }
518 
519 /* Free a list of sigio structures. */
520 void
521 funsetownlst(struct sigiolst *sigiolst)
522 {
523 	struct sigio *sigio;
524 
525 	while ((sigio = SLIST_FIRST(sigiolst)) != NULL)
526 		funsetown(sigio);
527 }
528 
529 /*
530  * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
531  *
532  * After permission checking, add a sigio structure to the sigio list for
533  * the process or process group.
534  */
535 int
536 fsetown(pid_t pgid, struct sigio **sigiop)
537 {
538 	struct proc *proc;
539 	struct pgrp *pgrp;
540 	struct sigio *sigio;
541 	int s;
542 
543 	if (pgid == 0) {
544 		funsetown(*sigiop);
545 		return (0);
546 	}
547 	if (pgid > 0) {
548 		proc = pfind(pgid);
549 		if (proc == NULL)
550 			return (ESRCH);
551 
552 		/*
553 		 * Policy - Don't allow a process to FSETOWN a process
554 		 * in another session.
555 		 *
556 		 * Remove this test to allow maximum flexibility or
557 		 * restrict FSETOWN to the current process or process
558 		 * group for maximum safety.
559 		 */
560 		if (proc->p_session != curproc->p_session)
561 			return (EPERM);
562 
563 		pgrp = NULL;
564 	} else /* if (pgid < 0) */ {
565 		pgrp = pgfind(-pgid);
566 		if (pgrp == NULL)
567 			return (ESRCH);
568 
569 		/*
570 		 * Policy - Don't allow a process to FSETOWN a process
571 		 * in another session.
572 		 *
573 		 * Remove this test to allow maximum flexibility or
574 		 * restrict FSETOWN to the current process or process
575 		 * group for maximum safety.
576 		 */
577 		if (pgrp->pg_session != curproc->p_session)
578 			return (EPERM);
579 
580 		proc = NULL;
581 	}
582 	funsetown(*sigiop);
583 	sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK);
584 	if (pgid > 0) {
585 		SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
586 		sigio->sio_proc = proc;
587 	} else {
588 		SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
589 		sigio->sio_pgrp = pgrp;
590 	}
591 	sigio->sio_pgid = pgid;
592 	sigio->sio_ucred = crhold(curproc->p_ucred);
593 	/* It would be convenient if p_ruid was in ucred. */
594 	sigio->sio_ruid = curproc->p_ucred->cr_ruid;
595 	sigio->sio_myref = sigiop;
596 	s = splhigh();
597 	*sigiop = sigio;
598 	splx(s);
599 	return (0);
600 }
601 
602 /*
603  * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
604  */
605 pid_t
606 fgetown(struct sigio *sigio)
607 {
608 	return (sigio != NULL ? sigio->sio_pgid : 0);
609 }
610 
611 /*
612  * Close a file descriptor.
613  */
614 /* ARGSUSED */
615 int
616 close(struct close_args *uap)
617 {
618 	return(kern_close(uap->fd));
619 }
620 
621 int
622 kern_close(int fd)
623 {
624 	struct thread *td = curthread;
625 	struct proc *p = td->td_proc;
626 	struct filedesc *fdp;
627 	struct file *fp;
628 	int error;
629 	int holdleaders;
630 
631 	KKASSERT(p);
632 	fdp = p->p_fd;
633 
634 	if ((unsigned)fd >= fdp->fd_nfiles ||
635 	    (fp = fdp->fd_ofiles[fd]) == NULL)
636 		return (EBADF);
637 #if 0
638 	if (fdp->fd_ofileflags[fd] & UF_MAPPED)
639 		(void) munmapfd(p, fd);
640 #endif
641 	fdp->fd_ofiles[fd] = NULL;
642 	fdp->fd_ofileflags[fd] = 0;
643 	holdleaders = 0;
644 	if (p->p_fdtol != NULL) {
645 		/*
646 		 * Ask fdfree() to sleep to ensure that all relevant
647 		 * process leaders can be traversed in closef().
648 		 */
649 		fdp->fd_holdleaderscount++;
650 		holdleaders = 1;
651 	}
652 
653 	/*
654 	 * we now hold the fp reference that used to be owned by the descriptor
655 	 * array.
656 	 */
657 	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
658 		fdp->fd_lastfile--;
659 	if (fd < fdp->fd_freefile)
660 		fdp->fd_freefile = fd;
661 	if (fd < fdp->fd_knlistsize)
662 		knote_fdclose(p, fd);
663 	error = closef(fp, td);
664 	if (holdleaders) {
665 		fdp->fd_holdleaderscount--;
666 		if (fdp->fd_holdleaderscount == 0 &&
667 		    fdp->fd_holdleaderswakeup != 0) {
668 			fdp->fd_holdleaderswakeup = 0;
669 			wakeup(&fdp->fd_holdleaderscount);
670 		}
671 	}
672 	return (error);
673 }
674 
675 int
676 kern_fstat(int fd, struct stat *ub)
677 {
678 	struct thread *td = curthread;
679 	struct proc *p = td->td_proc;
680 	struct filedesc *fdp;
681 	struct file *fp;
682 	int error;
683 
684 	KKASSERT(p);
685 
686 	fdp = p->p_fd;
687 	if ((unsigned)fd >= fdp->fd_nfiles ||
688 	    (fp = fdp->fd_ofiles[fd]) == NULL)
689 		return (EBADF);
690 	fhold(fp);
691 	error = fo_stat(fp, ub, td);
692 	fdrop(fp, td);
693 
694 	return (error);
695 }
696 
697 /*
698  * Return status information about a file descriptor.
699  */
700 int
701 fstat(struct fstat_args *uap)
702 {
703 	struct stat st;
704 	int error;
705 
706 	error = kern_fstat(uap->fd, &st);
707 
708 	if (error == 0)
709 		error = copyout(&st, uap->sb, sizeof(st));
710 	return (error);
711 }
712 
713 /*
714  * XXX: This is for source compatibility with NetBSD.  Probably doesn't
715  * belong here.
716  */
717 int
718 nfstat(struct nfstat_args *uap)
719 {
720 	struct stat st;
721 	struct nstat nst;
722 	int error;
723 
724 	error = kern_fstat(uap->fd, &st);
725 
726 	if (error == 0) {
727 		cvtnstat(&st, &nst);
728 		error = copyout(&nst, uap->sb, sizeof (nst));
729 	}
730 	return (error);
731 }
732 
733 /*
734  * Return pathconf information about a file descriptor.
735  */
736 /* ARGSUSED */
737 int
738 fpathconf(struct fpathconf_args *uap)
739 {
740 	struct thread *td = curthread;
741 	struct proc *p = td->td_proc;
742 	struct filedesc *fdp;
743 	struct file *fp;
744 	struct vnode *vp;
745 	int error = 0;
746 
747 	KKASSERT(p);
748 	fdp = p->p_fd;
749 	if ((unsigned)uap->fd >= fdp->fd_nfiles ||
750 	    (fp = fdp->fd_ofiles[uap->fd]) == NULL)
751 		return (EBADF);
752 
753 	fhold(fp);
754 
755 	switch (fp->f_type) {
756 	case DTYPE_PIPE:
757 	case DTYPE_SOCKET:
758 		if (uap->name != _PC_PIPE_BUF) {
759 			error = EINVAL;
760 		} else {
761 			uap->sysmsg_result = PIPE_BUF;
762 			error = 0;
763 		}
764 		break;
765 	case DTYPE_FIFO:
766 	case DTYPE_VNODE:
767 		vp = (struct vnode *)fp->f_data;
768 		error = VOP_PATHCONF(vp, uap->name, uap->sysmsg_fds);
769 		break;
770 	default:
771 		error = EOPNOTSUPP;
772 		break;
773 	}
774 	fdrop(fp, td);
775 	return(error);
776 }
777 
778 /*
779  * Allocate a file descriptor for the process.
780  */
781 static int fdexpand;
782 SYSCTL_INT(_debug, OID_AUTO, fdexpand, CTLFLAG_RD, &fdexpand, 0, "");
783 
784 int
785 fdalloc(struct proc *p, int want, int *result)
786 {
787 	struct filedesc *fdp = p->p_fd;
788 	int i;
789 	int lim, last, nfiles;
790 	struct file **newofile;
791 	char *newofileflags;
792 
793 	/*
794 	 * Search for a free descriptor starting at the higher
795 	 * of want or fd_freefile.  If that fails, consider
796 	 * expanding the ofile array.
797 	 */
798 	lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
799 	for (;;) {
800 		last = min(fdp->fd_nfiles, lim);
801 		if ((i = want) < fdp->fd_freefile)
802 			i = fdp->fd_freefile;
803 		for (; i < last; i++) {
804 			if (fdp->fd_ofiles[i] == NULL) {
805 				fdp->fd_ofileflags[i] = 0;
806 				if (i > fdp->fd_lastfile)
807 					fdp->fd_lastfile = i;
808 				if (want <= fdp->fd_freefile)
809 					fdp->fd_freefile = i;
810 				*result = i;
811 				return (0);
812 			}
813 		}
814 
815 		/*
816 		 * No space in current array.  Expand?
817 		 */
818 		if (fdp->fd_nfiles >= lim)
819 			return (EMFILE);
820 		if (fdp->fd_nfiles < NDEXTENT)
821 			nfiles = NDEXTENT;
822 		else
823 			nfiles = 2 * fdp->fd_nfiles;
824 		newofile = malloc(nfiles * OFILESIZE, M_FILEDESC, M_WAITOK);
825 
826 		/*
827 		 * deal with file-table extend race that might have occured
828 		 * when malloc was blocked.
829 		 */
830 		if (fdp->fd_nfiles >= nfiles) {
831 			free(newofile, M_FILEDESC);
832 			continue;
833 		}
834 		newofileflags = (char *) &newofile[nfiles];
835 		/*
836 		 * Copy the existing ofile and ofileflags arrays
837 		 * and zero the new portion of each array.
838 		 */
839 		bcopy(fdp->fd_ofiles, newofile,
840 			(i = sizeof(struct file *) * fdp->fd_nfiles));
841 		bzero((char *)newofile + i, nfiles * sizeof(struct file *) - i);
842 		bcopy(fdp->fd_ofileflags, newofileflags,
843 			(i = sizeof(char) * fdp->fd_nfiles));
844 		bzero(newofileflags + i, nfiles * sizeof(char) - i);
845 		if (fdp->fd_nfiles > NDFILE)
846 			free(fdp->fd_ofiles, M_FILEDESC);
847 		fdp->fd_ofiles = newofile;
848 		fdp->fd_ofileflags = newofileflags;
849 		fdp->fd_nfiles = nfiles;
850 		fdexpand++;
851 	}
852 	return (0);
853 }
854 
855 /*
856  * Check to see whether n user file descriptors
857  * are available to the process p.
858  */
859 int
860 fdavail(struct proc *p, int n)
861 {
862 	struct filedesc *fdp = p->p_fd;
863 	struct file **fpp;
864 	int i, lim, last;
865 
866 	lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
867 	if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0)
868 		return (1);
869 
870 	last = min(fdp->fd_nfiles, lim);
871 	fpp = &fdp->fd_ofiles[fdp->fd_freefile];
872 	for (i = last - fdp->fd_freefile; --i >= 0; fpp++) {
873 		if (*fpp == NULL && --n <= 0)
874 			return (1);
875 	}
876 	return (0);
877 }
878 
879 /*
880  * falloc:
881  *	Create a new open file structure and allocate a file decriptor
882  *	for the process that refers to it.  If p is NULL, no descriptor
883  *	is allocated and the file pointer is returned unassociated with
884  *	any process.  resultfd is only used if p is not NULL and may
885  *	separately be NULL indicating that you don't need the returned fd.
886  *
887  *	A held file pointer is returned.  If a descriptor has been allocated
888  *	an additional hold on the fp will be made due to the fd_ofiles[]
889  *	reference.
890  */
891 int
892 falloc(struct proc *p, struct file **resultfp, int *resultfd)
893 {
894 	static struct timeval lastfail;
895 	static int curfail;
896 	struct file *fp;
897 	int error;
898 
899 	fp = NULL;
900 
901 	/*
902 	 * Handle filetable full issues and root overfill.
903 	 */
904 	if (nfiles >= maxfiles - maxfilesrootres &&
905 	    ((p && p->p_ucred->cr_ruid != 0) || nfiles >= maxfiles)) {
906 		if (ppsratecheck(&lastfail, &curfail, 1)) {
907 			printf("kern.maxfiles limit exceeded by uid %d, please see tuning(7).\n",
908 				(p ? p->p_ucred->cr_ruid : -1));
909 		}
910 		error = ENFILE;
911 		goto done;
912 	}
913 
914 	/*
915 	 * Allocate a new file descriptor.
916 	 */
917 	nfiles++;
918 	fp = malloc(sizeof(struct file), M_FILE, M_WAITOK | M_ZERO);
919 	fp->f_count = 1;
920 	fp->f_ops = &badfileops;
921 	fp->f_seqcount = 1;
922 	if (p)
923 		fp->f_cred = crhold(p->p_ucred);
924 	else
925 		fp->f_cred = crhold(proc0.p_ucred);
926 	LIST_INSERT_HEAD(&filehead, fp, f_list);
927 	if (resultfd) {
928 		if ((error = fsetfd(p, fp, resultfd)) != 0) {
929 			fdrop(fp, p->p_thread);
930 			fp = NULL;
931 		}
932 	} else {
933 		error = 0;
934 	}
935 done:
936 	*resultfp = fp;
937 	return (error);
938 }
939 
940 /*
941  * Associate a file pointer with a file descriptor.  On success the fp
942  * will have an additional ref representing the fd_ofiles[] association.
943  */
944 int
945 fsetfd(struct proc *p, struct file *fp, int *resultfd)
946 {
947 	int i;
948 	int error;
949 
950 	KKASSERT(p);
951 
952 	i = -1;
953 	if ((error = fdalloc(p, 0, &i)) == 0) {
954 		fhold(fp);
955 		p->p_fd->fd_ofiles[i] = fp;
956 	}
957 	*resultfd = i;
958 	return (0);
959 }
960 
961 void
962 fsetcred(struct file *fp, struct ucred *cr)
963 {
964 	crhold(cr);
965 	crfree(fp->f_cred);
966 	fp->f_cred = cr;
967 }
968 
969 /*
970  * Free a file descriptor.
971  */
972 void
973 ffree(struct file *fp)
974 {
975 	KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!"));
976 	LIST_REMOVE(fp, f_list);
977 	crfree(fp->f_cred);
978 	if (fp->f_ncp) {
979 	    cache_drop(fp->f_ncp);
980 	    fp->f_ncp = NULL;
981 	}
982 	nfiles--;
983 	free(fp, M_FILE);
984 }
985 
986 /*
987  * Build a new filedesc structure.
988  */
989 struct filedesc *
990 fdinit(struct proc *p)
991 {
992 	struct filedesc0 *newfdp;
993 	struct filedesc *fdp = p->p_fd;
994 
995 	newfdp = malloc(sizeof(struct filedesc0), M_FILEDESC, M_WAITOK|M_ZERO);
996 	if (fdp->fd_cdir) {
997 		newfdp->fd_fd.fd_cdir = fdp->fd_cdir;
998 		vref(newfdp->fd_fd.fd_cdir);
999 		newfdp->fd_fd.fd_ncdir = cache_hold(fdp->fd_ncdir);
1000 	}
1001 
1002 	/*
1003 	 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of
1004 	 * proc0, but should unconditionally exist in other processes.
1005 	 */
1006 	if (fdp->fd_rdir) {
1007 		newfdp->fd_fd.fd_rdir = fdp->fd_rdir;
1008 		vref(newfdp->fd_fd.fd_rdir);
1009 		newfdp->fd_fd.fd_nrdir = cache_hold(fdp->fd_nrdir);
1010 	}
1011 	if (fdp->fd_jdir) {
1012 		newfdp->fd_fd.fd_jdir = fdp->fd_jdir;
1013 		vref(newfdp->fd_fd.fd_jdir);
1014 		newfdp->fd_fd.fd_njdir = cache_hold(fdp->fd_njdir);
1015 	}
1016 
1017 	/* Create the file descriptor table. */
1018 	newfdp->fd_fd.fd_refcnt = 1;
1019 	newfdp->fd_fd.fd_cmask = cmask;
1020 	newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles;
1021 	newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags;
1022 	newfdp->fd_fd.fd_nfiles = NDFILE;
1023 	newfdp->fd_fd.fd_knlistsize = -1;
1024 
1025 	return (&newfdp->fd_fd);
1026 }
1027 
1028 /*
1029  * Share a filedesc structure.
1030  */
1031 struct filedesc *
1032 fdshare(struct proc *p)
1033 {
1034 	p->p_fd->fd_refcnt++;
1035 	return (p->p_fd);
1036 }
1037 
1038 /*
1039  * Copy a filedesc structure.
1040  */
1041 struct filedesc *
1042 fdcopy(struct proc *p)
1043 {
1044 	struct filedesc *newfdp, *fdp = p->p_fd;
1045 	struct file **fpp;
1046 	int i;
1047 
1048 	/* Certain daemons might not have file descriptors. */
1049 	if (fdp == NULL)
1050 		return (NULL);
1051 
1052 	newfdp = malloc(sizeof(struct filedesc0), M_FILEDESC, M_WAITOK);
1053 	bcopy(fdp, newfdp, sizeof(struct filedesc));
1054 	if (newfdp->fd_cdir) {
1055 		vref(newfdp->fd_cdir);
1056 		newfdp->fd_ncdir = cache_hold(newfdp->fd_ncdir);
1057 	}
1058 	/*
1059 	 * We must check for fd_rdir here, at least for now because
1060 	 * the init process is created before we have access to the
1061 	 * rootvode to take a reference to it.
1062 	 */
1063 	if (newfdp->fd_rdir) {
1064 		vref(newfdp->fd_rdir);
1065 		newfdp->fd_nrdir = cache_hold(newfdp->fd_nrdir);
1066 	}
1067 	if (newfdp->fd_jdir) {
1068 		vref(newfdp->fd_jdir);
1069 		newfdp->fd_njdir = cache_hold(newfdp->fd_njdir);
1070 	}
1071 	newfdp->fd_refcnt = 1;
1072 
1073 	/*
1074 	 * If the number of open files fits in the internal arrays
1075 	 * of the open file structure, use them, otherwise allocate
1076 	 * additional memory for the number of descriptors currently
1077 	 * in use.
1078 	 */
1079 	if (newfdp->fd_lastfile < NDFILE) {
1080 		newfdp->fd_ofiles = ((struct filedesc0 *) newfdp)->fd_dfiles;
1081 		newfdp->fd_ofileflags =
1082 		    ((struct filedesc0 *) newfdp)->fd_dfileflags;
1083 		i = NDFILE;
1084 	} else {
1085 		/*
1086 		 * Compute the smallest multiple of NDEXTENT needed
1087 		 * for the file descriptors currently in use,
1088 		 * allowing the table to shrink.
1089 		 */
1090 		i = newfdp->fd_nfiles;
1091 		while (i > 2 * NDEXTENT && i > newfdp->fd_lastfile * 2)
1092 			i /= 2;
1093 		newfdp->fd_ofiles = malloc(i * OFILESIZE, M_FILEDESC, M_WAITOK);
1094 		newfdp->fd_ofileflags = (char *) &newfdp->fd_ofiles[i];
1095 	}
1096 	newfdp->fd_nfiles = i;
1097 	bcopy(fdp->fd_ofiles, newfdp->fd_ofiles, i * sizeof(struct file **));
1098 	bcopy(fdp->fd_ofileflags, newfdp->fd_ofileflags, i * sizeof(char));
1099 
1100 	/*
1101 	 * kq descriptors cannot be copied.
1102 	 */
1103 	if (newfdp->fd_knlistsize != -1) {
1104 		fpp = &newfdp->fd_ofiles[newfdp->fd_lastfile];
1105 		for (i = newfdp->fd_lastfile; i >= 0; i--, fpp--) {
1106 			if (*fpp != NULL && (*fpp)->f_type == DTYPE_KQUEUE) {
1107 				*fpp = NULL;
1108 				if (i < newfdp->fd_freefile)
1109 					newfdp->fd_freefile = i;
1110 			}
1111 			if (*fpp == NULL && i == newfdp->fd_lastfile && i > 0)
1112 				newfdp->fd_lastfile--;
1113 		}
1114 		newfdp->fd_knlist = NULL;
1115 		newfdp->fd_knlistsize = -1;
1116 		newfdp->fd_knhash = NULL;
1117 		newfdp->fd_knhashmask = 0;
1118 	}
1119 
1120 	fpp = newfdp->fd_ofiles;
1121 	for (i = newfdp->fd_lastfile; i-- >= 0; fpp++) {
1122 		if (*fpp != NULL)
1123 			fhold(*fpp);
1124 	}
1125 	return (newfdp);
1126 }
1127 
1128 /*
1129  * Release a filedesc structure.
1130  */
1131 void
1132 fdfree(struct proc *p)
1133 {
1134 	struct thread *td = p->p_thread;
1135 	struct filedesc *fdp = p->p_fd;
1136 	struct file **fpp;
1137 	int i;
1138 	struct filedesc_to_leader *fdtol;
1139 	struct file *fp;
1140 	struct vnode *vp;
1141 	struct flock lf;
1142 
1143 	/* Certain daemons might not have file descriptors. */
1144 	if (fdp == NULL)
1145 		return;
1146 
1147 	/* Check for special need to clear POSIX style locks */
1148 	fdtol = p->p_fdtol;
1149 	if (fdtol != NULL) {
1150 		KASSERT(fdtol->fdl_refcount > 0,
1151 			("filedesc_to_refcount botch: fdl_refcount=%d",
1152 			 fdtol->fdl_refcount));
1153 		if (fdtol->fdl_refcount == 1 &&
1154 		    (p->p_leader->p_flag & P_ADVLOCK) != 0) {
1155 			i = 0;
1156 			fpp = fdp->fd_ofiles;
1157 			for (i = 0, fpp = fdp->fd_ofiles;
1158 			     i <= fdp->fd_lastfile;
1159 			     i++, fpp++) {
1160 				if (*fpp == NULL ||
1161 				    (*fpp)->f_type != DTYPE_VNODE)
1162 					continue;
1163 				fp = *fpp;
1164 				fhold(fp);
1165 				lf.l_whence = SEEK_SET;
1166 				lf.l_start = 0;
1167 				lf.l_len = 0;
1168 				lf.l_type = F_UNLCK;
1169 				vp = (struct vnode *)fp->f_data;
1170 				(void) VOP_ADVLOCK(vp,
1171 						   (caddr_t)p->p_leader,
1172 						   F_UNLCK,
1173 						   &lf,
1174 						   F_POSIX);
1175 				fdrop(fp, p->p_thread);
1176 				fpp = fdp->fd_ofiles + i;
1177 			}
1178 		}
1179 	retry:
1180 		if (fdtol->fdl_refcount == 1) {
1181 			if (fdp->fd_holdleaderscount > 0 &&
1182 			    (p->p_leader->p_flag & P_ADVLOCK) != 0) {
1183 				/*
1184 				 * close() or do_dup() has cleared a reference
1185 				 * in a shared file descriptor table.
1186 				 */
1187 				fdp->fd_holdleaderswakeup = 1;
1188 				tsleep(&fdp->fd_holdleaderscount,
1189 				       0, "fdlhold", 0);
1190 				goto retry;
1191 			}
1192 			if (fdtol->fdl_holdcount > 0) {
1193 				/*
1194 				 * Ensure that fdtol->fdl_leader
1195 				 * remains valid in closef().
1196 				 */
1197 				fdtol->fdl_wakeup = 1;
1198 				tsleep(fdtol, 0, "fdlhold", 0);
1199 				goto retry;
1200 			}
1201 		}
1202 		fdtol->fdl_refcount--;
1203 		if (fdtol->fdl_refcount == 0 &&
1204 		    fdtol->fdl_holdcount == 0) {
1205 			fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
1206 			fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
1207 		} else
1208 			fdtol = NULL;
1209 		p->p_fdtol = NULL;
1210 		if (fdtol != NULL)
1211 			free(fdtol, M_FILEDESC_TO_LEADER);
1212 	}
1213 	if (--fdp->fd_refcnt > 0)
1214 		return;
1215 	/*
1216 	 * we are the last reference to the structure, we can
1217 	 * safely assume it will not change out from under us.
1218 	 */
1219 	fpp = fdp->fd_ofiles;
1220 	for (i = fdp->fd_lastfile; i-- >= 0; fpp++) {
1221 		if (*fpp)
1222 			(void) closef(*fpp, td);
1223 	}
1224 	if (fdp->fd_nfiles > NDFILE)
1225 		free(fdp->fd_ofiles, M_FILEDESC);
1226 	if (fdp->fd_cdir) {
1227 		cache_drop(fdp->fd_ncdir);
1228 		vrele(fdp->fd_cdir);
1229 	}
1230 	if (fdp->fd_rdir) {
1231 		cache_drop(fdp->fd_nrdir);
1232 		vrele(fdp->fd_rdir);
1233 	}
1234 	if (fdp->fd_jdir) {
1235 		cache_drop(fdp->fd_njdir);
1236 		vrele(fdp->fd_jdir);
1237 	}
1238 	if (fdp->fd_knlist)
1239 		free(fdp->fd_knlist, M_KQUEUE);
1240 	if (fdp->fd_knhash)
1241 		free(fdp->fd_knhash, M_KQUEUE);
1242 	free(fdp, M_FILEDESC);
1243 }
1244 
1245 /*
1246  * For setugid programs, we don't want to people to use that setugidness
1247  * to generate error messages which write to a file which otherwise would
1248  * otherwise be off-limits to the process.
1249  *
1250  * This is a gross hack to plug the hole.  A better solution would involve
1251  * a special vop or other form of generalized access control mechanism.  We
1252  * go ahead and just reject all procfs file systems accesses as dangerous.
1253  *
1254  * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
1255  * sufficient.  We also don't for check setugidness since we know we are.
1256  */
1257 static int
1258 is_unsafe(struct file *fp)
1259 {
1260 	if (fp->f_type == DTYPE_VNODE &&
1261 	    ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS)
1262 		return (1);
1263 	return (0);
1264 }
1265 
1266 /*
1267  * Make this setguid thing safe, if at all possible.
1268  */
1269 void
1270 setugidsafety(struct proc *p)
1271 {
1272 	struct thread *td = p->p_thread;
1273 	struct filedesc *fdp = p->p_fd;
1274 	int i;
1275 
1276 	/* Certain daemons might not have file descriptors. */
1277 	if (fdp == NULL)
1278 		return;
1279 
1280 	/*
1281 	 * note: fdp->fd_ofiles may be reallocated out from under us while
1282 	 * we are blocked in a close.  Be careful!
1283 	 */
1284 	for (i = 0; i <= fdp->fd_lastfile; i++) {
1285 		if (i > 2)
1286 			break;
1287 		if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) {
1288 			struct file *fp;
1289 
1290 #if 0
1291 			if ((fdp->fd_ofileflags[i] & UF_MAPPED) != 0)
1292 				(void) munmapfd(p, i);
1293 #endif
1294 			if (i < fdp->fd_knlistsize)
1295 				knote_fdclose(p, i);
1296 			/*
1297 			 * NULL-out descriptor prior to close to avoid
1298 			 * a race while close blocks.
1299 			 */
1300 			fp = fdp->fd_ofiles[i];
1301 			fdp->fd_ofiles[i] = NULL;
1302 			fdp->fd_ofileflags[i] = 0;
1303 			if (i < fdp->fd_freefile)
1304 				fdp->fd_freefile = i;
1305 			(void) closef(fp, td);
1306 		}
1307 	}
1308 	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
1309 		fdp->fd_lastfile--;
1310 }
1311 
1312 /*
1313  * Close any files on exec?
1314  */
1315 void
1316 fdcloseexec(struct proc *p)
1317 {
1318 	struct thread *td = p->p_thread;
1319 	struct filedesc *fdp = p->p_fd;
1320 	int i;
1321 
1322 	/* Certain daemons might not have file descriptors. */
1323 	if (fdp == NULL)
1324 		return;
1325 
1326 	/*
1327 	 * We cannot cache fd_ofiles or fd_ofileflags since operations
1328 	 * may block and rip them out from under us.
1329 	 */
1330 	for (i = 0; i <= fdp->fd_lastfile; i++) {
1331 		if (fdp->fd_ofiles[i] != NULL &&
1332 		    (fdp->fd_ofileflags[i] & UF_EXCLOSE)) {
1333 			struct file *fp;
1334 
1335 #if 0
1336 			if (fdp->fd_ofileflags[i] & UF_MAPPED)
1337 				(void) munmapfd(p, i);
1338 #endif
1339 			if (i < fdp->fd_knlistsize)
1340 				knote_fdclose(p, i);
1341 			/*
1342 			 * NULL-out descriptor prior to close to avoid
1343 			 * a race while close blocks.
1344 			 */
1345 			fp = fdp->fd_ofiles[i];
1346 			fdp->fd_ofiles[i] = NULL;
1347 			fdp->fd_ofileflags[i] = 0;
1348 			if (i < fdp->fd_freefile)
1349 				fdp->fd_freefile = i;
1350 			(void) closef(fp, td);
1351 		}
1352 	}
1353 	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
1354 		fdp->fd_lastfile--;
1355 }
1356 
1357 /*
1358  * It is unsafe for set[ug]id processes to be started with file
1359  * descriptors 0..2 closed, as these descriptors are given implicit
1360  * significance in the Standard C library.  fdcheckstd() will create a
1361  * descriptor referencing /dev/null for each of stdin, stdout, and
1362  * stderr that is not already open.
1363  */
1364 int
1365 fdcheckstd(struct proc *p)
1366 {
1367 	struct thread *td = p->p_thread;
1368 	struct nlookupdata nd;
1369 	struct filedesc *fdp;
1370 	struct file *fp;
1371 	register_t retval;
1372 	int fd, i, error, flags, devnull;
1373 
1374        fdp = p->p_fd;
1375        if (fdp == NULL)
1376                return (0);
1377        devnull = -1;
1378        error = 0;
1379        for (i = 0; i < 3; i++) {
1380 		if (fdp->fd_ofiles[i] != NULL)
1381 			continue;
1382 		if (devnull < 0) {
1383 			if ((error = falloc(p, &fp, NULL)) != 0)
1384 				break;
1385 
1386 			error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE,
1387 						NLC_FOLLOW|NLC_LOCKVP);
1388 			flags = FREAD | FWRITE;
1389 			if (error == 0)
1390 				error = vn_open(&nd, fp, flags, 0);
1391 			if (error == 0)
1392 				error = fsetfd(p, fp, &fd);
1393 			fdrop(fp, td);
1394 			nlookup_done(&nd);
1395 			if (error)
1396 				break;
1397 			KKASSERT(i == fd);
1398 			devnull = fd;
1399 		} else {
1400 			error = kern_dup(DUP_FIXED, devnull, i, &retval);
1401 			if (error != 0)
1402 				break;
1403 		}
1404        }
1405        return (error);
1406 }
1407 
1408 /*
1409  * Internal form of close.
1410  * Decrement reference count on file structure.
1411  * Note: td and/or p may be NULL when closing a file
1412  * that was being passed in a message.
1413  */
1414 int
1415 closef(struct file *fp, struct thread *td)
1416 {
1417 	struct vnode *vp;
1418 	struct flock lf;
1419 	struct filedesc_to_leader *fdtol;
1420 	struct proc *p;
1421 
1422 	if (fp == NULL)
1423 		return (0);
1424 	if (td == NULL) {
1425 		td = curthread;
1426 		p = NULL;		/* allow no proc association */
1427 	} else {
1428 		p = td->td_proc;	/* can also be NULL */
1429 	}
1430 	/*
1431 	 * POSIX record locking dictates that any close releases ALL
1432 	 * locks owned by this process.  This is handled by setting
1433 	 * a flag in the unlock to free ONLY locks obeying POSIX
1434 	 * semantics, and not to free BSD-style file locks.
1435 	 * If the descriptor was in a message, POSIX-style locks
1436 	 * aren't passed with the descriptor.
1437 	 */
1438 	if (p != NULL &&
1439 	    fp->f_type == DTYPE_VNODE) {
1440 		if ((p->p_leader->p_flag & P_ADVLOCK) != 0) {
1441 			lf.l_whence = SEEK_SET;
1442 			lf.l_start = 0;
1443 			lf.l_len = 0;
1444 			lf.l_type = F_UNLCK;
1445 			vp = (struct vnode *)fp->f_data;
1446 			(void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
1447 					   &lf, F_POSIX);
1448 		}
1449 		fdtol = p->p_fdtol;
1450 		if (fdtol != NULL) {
1451 			/*
1452 			 * Handle special case where file descriptor table
1453 			 * is shared between multiple process leaders.
1454 			 */
1455 			for (fdtol = fdtol->fdl_next;
1456 			     fdtol != p->p_fdtol;
1457 			     fdtol = fdtol->fdl_next) {
1458 				if ((fdtol->fdl_leader->p_flag &
1459 				     P_ADVLOCK) == 0)
1460 					continue;
1461 				fdtol->fdl_holdcount++;
1462 				lf.l_whence = SEEK_SET;
1463 				lf.l_start = 0;
1464 				lf.l_len = 0;
1465 				lf.l_type = F_UNLCK;
1466 				vp = (struct vnode *)fp->f_data;
1467 				(void) VOP_ADVLOCK(vp,
1468 						   (caddr_t)p->p_leader,
1469 						   F_UNLCK, &lf, F_POSIX);
1470 				fdtol->fdl_holdcount--;
1471 				if (fdtol->fdl_holdcount == 0 &&
1472 				    fdtol->fdl_wakeup != 0) {
1473 					fdtol->fdl_wakeup = 0;
1474 					wakeup(fdtol);
1475 				}
1476 			}
1477 		}
1478 	}
1479 	return (fdrop(fp, td));
1480 }
1481 
1482 int
1483 fdrop(struct file *fp, struct thread *td)
1484 {
1485 	struct flock lf;
1486 	struct vnode *vp;
1487 	int error;
1488 
1489 	if (--fp->f_count > 0)
1490 		return (0);
1491 	if (fp->f_count < 0)
1492 		panic("fdrop: count < 0");
1493 	if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE) {
1494 		lf.l_whence = SEEK_SET;
1495 		lf.l_start = 0;
1496 		lf.l_len = 0;
1497 		lf.l_type = F_UNLCK;
1498 		vp = (struct vnode *)fp->f_data;
1499 		(void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
1500 	}
1501 	if (fp->f_ops != &badfileops)
1502 		error = fo_close(fp, td);
1503 	else
1504 		error = 0;
1505 	ffree(fp);
1506 	return (error);
1507 }
1508 
1509 /*
1510  * Apply an advisory lock on a file descriptor.
1511  *
1512  * Just attempt to get a record lock of the requested type on
1513  * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
1514  */
1515 /* ARGSUSED */
1516 int
1517 flock(struct flock_args *uap)
1518 {
1519 	struct proc *p = curproc;
1520 	struct filedesc *fdp = p->p_fd;
1521 	struct file *fp;
1522 	struct vnode *vp;
1523 	struct flock lf;
1524 
1525 	if ((unsigned)uap->fd >= fdp->fd_nfiles ||
1526 	    (fp = fdp->fd_ofiles[uap->fd]) == NULL)
1527 		return (EBADF);
1528 	if (fp->f_type != DTYPE_VNODE)
1529 		return (EOPNOTSUPP);
1530 	vp = (struct vnode *)fp->f_data;
1531 	lf.l_whence = SEEK_SET;
1532 	lf.l_start = 0;
1533 	lf.l_len = 0;
1534 	if (uap->how & LOCK_UN) {
1535 		lf.l_type = F_UNLCK;
1536 		fp->f_flag &= ~FHASLOCK;
1537 		return (VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK));
1538 	}
1539 	if (uap->how & LOCK_EX)
1540 		lf.l_type = F_WRLCK;
1541 	else if (uap->how & LOCK_SH)
1542 		lf.l_type = F_RDLCK;
1543 	else
1544 		return (EBADF);
1545 	fp->f_flag |= FHASLOCK;
1546 	if (uap->how & LOCK_NB)
1547 		return (VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_FLOCK));
1548 	return (VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_FLOCK|F_WAIT));
1549 }
1550 
1551 /*
1552  * File Descriptor pseudo-device driver (/dev/fd/).
1553  *
1554  * Opening minor device N dup()s the file (if any) connected to file
1555  * descriptor N belonging to the calling process.  Note that this driver
1556  * consists of only the ``open()'' routine, because all subsequent
1557  * references to this file will be direct to the other driver.
1558  */
1559 /* ARGSUSED */
1560 static int
1561 fdopen(dev_t dev, int mode, int type, struct thread *td)
1562 {
1563 	KKASSERT(td->td_proc != NULL);
1564 
1565 	/*
1566 	 * XXX Kludge: set curproc->p_dupfd to contain the value of the
1567 	 * the file descriptor being sought for duplication. The error
1568 	 * return ensures that the vnode for this device will be released
1569 	 * by vn_open. Open will detect this special error and take the
1570 	 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
1571 	 * will simply report the error.
1572 	 */
1573 	td->td_proc->p_dupfd = minor(dev);
1574 	return (ENODEV);
1575 }
1576 
1577 /*
1578  * Duplicate the specified descriptor to a free descriptor.
1579  */
1580 int
1581 dupfdopen(struct filedesc *fdp, int indx, int dfd, int mode, int error)
1582 {
1583 	struct file *wfp;
1584 	struct file *fp;
1585 
1586 	/*
1587 	 * If the to-be-dup'd fd number is greater than the allowed number
1588 	 * of file descriptors, or the fd to be dup'd has already been
1589 	 * closed, then reject.
1590 	 */
1591 	if ((u_int)dfd >= fdp->fd_nfiles ||
1592 	    (wfp = fdp->fd_ofiles[dfd]) == NULL) {
1593 		return (EBADF);
1594 	}
1595 
1596 	/*
1597 	 * There are two cases of interest here.
1598 	 *
1599 	 * For ENODEV simply dup (dfd) to file descriptor
1600 	 * (indx) and return.
1601 	 *
1602 	 * For ENXIO steal away the file structure from (dfd) and
1603 	 * store it in (indx).  (dfd) is effectively closed by
1604 	 * this operation.
1605 	 *
1606 	 * Any other error code is just returned.
1607 	 */
1608 	switch (error) {
1609 	case ENODEV:
1610 		/*
1611 		 * Check that the mode the file is being opened for is a
1612 		 * subset of the mode of the existing descriptor.
1613 		 */
1614 		if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag)
1615 			return (EACCES);
1616 		fp = fdp->fd_ofiles[indx];
1617 #if 0
1618 		if (fp && fdp->fd_ofileflags[indx] & UF_MAPPED)
1619 			(void) munmapfd(p, indx);
1620 #endif
1621 		fdp->fd_ofiles[indx] = wfp;
1622 		fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
1623 		fhold(wfp);
1624 		if (indx > fdp->fd_lastfile)
1625 			fdp->fd_lastfile = indx;
1626 		/*
1627 		 * we now own the reference to fp that the ofiles[] array
1628 		 * used to own.  Release it.
1629 		 */
1630 		if (fp)
1631 			fdrop(fp, curthread);
1632 		return (0);
1633 
1634 	case ENXIO:
1635 		/*
1636 		 * Steal away the file pointer from dfd, and stuff it into indx.
1637 		 */
1638 		fp = fdp->fd_ofiles[indx];
1639 #if 0
1640 		if (fp && fdp->fd_ofileflags[indx] & UF_MAPPED)
1641 			(void) munmapfd(p, indx);
1642 #endif
1643 		fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd];
1644 		fdp->fd_ofiles[dfd] = NULL;
1645 		fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
1646 		fdp->fd_ofileflags[dfd] = 0;
1647 
1648 		/*
1649 		 * we now own the reference to fp that the ofiles[] array
1650 		 * used to own.  Release it.
1651 		 */
1652 		if (fp)
1653 			fdrop(fp, curthread);
1654 		/*
1655 		 * Complete the clean up of the filedesc structure by
1656 		 * recomputing the various hints.
1657 		 */
1658 		if (indx > fdp->fd_lastfile) {
1659 			fdp->fd_lastfile = indx;
1660 		} else {
1661 			while (fdp->fd_lastfile > 0 &&
1662 			   fdp->fd_ofiles[fdp->fd_lastfile] == NULL) {
1663 				fdp->fd_lastfile--;
1664 			}
1665 			if (dfd < fdp->fd_freefile)
1666 				fdp->fd_freefile = dfd;
1667 		}
1668 		return (0);
1669 
1670 	default:
1671 		return (error);
1672 	}
1673 	/* NOTREACHED */
1674 }
1675 
1676 
1677 struct filedesc_to_leader *
1678 filedesc_to_leader_alloc(struct filedesc_to_leader *old,
1679 			 struct proc *leader)
1680 {
1681 	struct filedesc_to_leader *fdtol;
1682 
1683 	fdtol = malloc(sizeof(struct filedesc_to_leader),
1684 			M_FILEDESC_TO_LEADER, M_WAITOK);
1685 	fdtol->fdl_refcount = 1;
1686 	fdtol->fdl_holdcount = 0;
1687 	fdtol->fdl_wakeup = 0;
1688 	fdtol->fdl_leader = leader;
1689 	if (old != NULL) {
1690 		fdtol->fdl_next = old->fdl_next;
1691 		fdtol->fdl_prev = old;
1692 		old->fdl_next = fdtol;
1693 		fdtol->fdl_next->fdl_prev = fdtol;
1694 	} else {
1695 		fdtol->fdl_next = fdtol;
1696 		fdtol->fdl_prev = fdtol;
1697 	}
1698 	return fdtol;
1699 }
1700 
1701 /*
1702  * Get file structures.
1703  */
1704 static int
1705 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
1706 {
1707 	struct kinfo_file kf;
1708 	struct filedesc *fdp;
1709 	struct file *fp;
1710 	struct proc *p;
1711 	int count;
1712 	int error;
1713 	int n;
1714 
1715 	/*
1716 	 * Note: because the number of file descriptors is calculated
1717 	 * in different ways for sizing vs returning the data,
1718 	 * there is information leakage from the first loop.  However,
1719 	 * it is of a similar order of magnitude to the leakage from
1720 	 * global system statistics such as kern.openfiles.
1721 	 *
1722 	 * When just doing a count, note that we cannot just count
1723 	 * the elements and add f_count via the filehead list because
1724 	 * threaded processes share their descriptor table and f_count might
1725 	 * still be '1' in that case.
1726 	 */
1727 	count = 0;
1728 	error = 0;
1729 	LIST_FOREACH(p, &allproc, p_list) {
1730 		if (p->p_stat == SIDL)
1731 			continue;
1732 		if (!PRISON_CHECK(req->td->td_proc->p_ucred, p->p_ucred) != 0)
1733 			continue;
1734 		if ((fdp = p->p_fd) == NULL)
1735 			continue;
1736 		for (n = 0; n < fdp->fd_nfiles; ++n) {
1737 			if ((fp = fdp->fd_ofiles[n]) == NULL)
1738 				continue;
1739 			if (req->oldptr == NULL) {
1740 				++count;
1741 			} else {
1742 				kcore_make_file(&kf, fp, p->p_pid,
1743 						p->p_ucred->cr_uid, n);
1744 				error = SYSCTL_OUT(req, &kf, sizeof(kf));
1745 				if (error)
1746 					break;
1747 			}
1748 		}
1749 		if (error)
1750 			break;
1751 	}
1752 
1753 	/*
1754 	 * When just calculating the size, overestimate a bit to try to
1755 	 * prevent system activity from causing the buffer-fill call
1756 	 * to fail later on.
1757 	 */
1758 	if (req->oldptr == NULL) {
1759 		count = (count + 16) + (count / 10);
1760 		error = SYSCTL_OUT(req, NULL, count * sizeof(kf));
1761 	}
1762 	return (error);
1763 }
1764 
1765 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
1766     0, 0, sysctl_kern_file, "S,file", "Entire file table");
1767 
1768 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
1769     &maxfilesperproc, 0, "Maximum files allowed open per process");
1770 
1771 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
1772     &maxfiles, 0, "Maximum number of files");
1773 
1774 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW,
1775     &maxfilesrootres, 0, "Descriptors reserved for root use");
1776 
1777 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
1778 	&nfiles, 0, "System-wide number of open files");
1779 
1780 static void
1781 fildesc_drvinit(void *unused)
1782 {
1783 	int fd;
1784 
1785 	cdevsw_add(&fildesc_cdevsw, 0, 0);
1786 	for (fd = 0; fd < NUMFDESC; fd++) {
1787 		make_dev(&fildesc_cdevsw, fd,
1788 		    UID_BIN, GID_BIN, 0666, "fd/%d", fd);
1789 	}
1790 	make_dev(&fildesc_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "stdin");
1791 	make_dev(&fildesc_cdevsw, 1, UID_ROOT, GID_WHEEL, 0666, "stdout");
1792 	make_dev(&fildesc_cdevsw, 2, UID_ROOT, GID_WHEEL, 0666, "stderr");
1793 }
1794 
1795 struct fileops badfileops = {
1796 	NULL,	/* port */
1797 	NULL,	/* clone */
1798 	badfo_readwrite,
1799 	badfo_readwrite,
1800 	badfo_ioctl,
1801 	badfo_poll,
1802 	badfo_kqfilter,
1803 	badfo_stat,
1804 	badfo_close
1805 };
1806 
1807 static int
1808 badfo_readwrite(
1809 	struct file *fp,
1810 	struct uio *uio,
1811 	struct ucred *cred,
1812 	int flags,
1813 	struct thread *td
1814 ) {
1815 	return (EBADF);
1816 }
1817 
1818 static int
1819 badfo_ioctl(struct file *fp, u_long com, caddr_t data, struct thread *td)
1820 {
1821 	return (EBADF);
1822 }
1823 
1824 static int
1825 badfo_poll(struct file *fp, int events, struct ucred *cred, struct thread *td)
1826 {
1827 	return (0);
1828 }
1829 
1830 static int
1831 badfo_kqfilter(struct file *fp, struct knote *kn)
1832 {
1833 	return (0);
1834 }
1835 
1836 static int
1837 badfo_stat(struct file *fp, struct stat *sb, struct thread *td)
1838 {
1839 	return (EBADF);
1840 }
1841 
1842 static int
1843 badfo_close(struct file *fp, struct thread *td)
1844 {
1845 	return (EBADF);
1846 }
1847 
1848 SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,
1849 					fildesc_drvinit,NULL)
1850