xref: /dragonfly/sys/kern/kern_descrip.c (revision 86fe9e07)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_descrip.c	8.6 (Berkeley) 4/19/94
39  * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
40  * $DragonFly: src/sys/kern/kern_descrip.c,v 1.27 2004/07/29 20:32:24 dillon Exp $
41  */
42 
43 #include "opt_compat.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/sysproto.h>
48 #include <sys/conf.h>
49 #include <sys/filedesc.h>
50 #include <sys/kernel.h>
51 #include <sys/sysctl.h>
52 #include <sys/vnode.h>
53 #include <sys/proc.h>
54 #include <sys/namei.h>
55 #include <sys/file.h>
56 #include <sys/stat.h>
57 #include <sys/filio.h>
58 #include <sys/fcntl.h>
59 #include <sys/unistd.h>
60 #include <sys/resourcevar.h>
61 #include <sys/event.h>
62 #include <sys/kern_syscall.h>
63 
64 #include <vm/vm.h>
65 #include <vm/vm_extern.h>
66 
67 #include <sys/file2.h>
68 
69 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
70 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader",
71 		     "file desc to leader structures");
72 MALLOC_DEFINE(M_FILE, "file", "Open file structure");
73 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
74 
75 static	 d_open_t  fdopen;
76 #define NUMFDESC 64
77 
78 #define CDEV_MAJOR 22
79 static struct cdevsw fildesc_cdevsw = {
80 	/* name */	"FD",
81 	/* maj */	CDEV_MAJOR,
82 	/* flags */	0,
83 	/* port */      NULL,
84 	/* clone */	NULL,
85 
86 	/* open */	fdopen,
87 	/* close */	noclose,
88 	/* read */	noread,
89 	/* write */	nowrite,
90 	/* ioctl */	noioctl,
91 	/* poll */	nopoll,
92 	/* mmap */	nommap,
93 	/* strategy */	nostrategy,
94 	/* dump */	nodump,
95 	/* psize */	nopsize
96 };
97 
98 static int badfo_readwrite (struct file *fp, struct uio *uio,
99     struct ucred *cred, int flags, struct thread *td);
100 static int badfo_ioctl (struct file *fp, u_long com, caddr_t data,
101     struct thread *td);
102 static int badfo_poll (struct file *fp, int events,
103     struct ucred *cred, struct thread *td);
104 static int badfo_kqfilter (struct file *fp, struct knote *kn);
105 static int badfo_stat (struct file *fp, struct stat *sb, struct thread *td);
106 static int badfo_close (struct file *fp, struct thread *td);
107 
108 /*
109  * Descriptor management.
110  */
111 struct filelist filehead;	/* head of list of open files */
112 int nfiles;			/* actual number of open files */
113 extern int cmask;
114 
115 /*
116  * System calls on descriptors.
117  */
118 /* ARGSUSED */
119 int
120 getdtablesize(struct getdtablesize_args *uap)
121 {
122 	struct proc *p = curproc;
123 
124 	uap->sysmsg_result =
125 	    min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
126 	return (0);
127 }
128 
129 /*
130  * Duplicate a file descriptor to a particular value.
131  *
132  * note: keep in mind that a potential race condition exists when closing
133  * descriptors from a shared descriptor table (via rfork).
134  */
135 /* ARGSUSED */
136 int
137 dup2(struct dup2_args *uap)
138 {
139 	int error;
140 
141 	error = kern_dup(DUP_FIXED, uap->from, uap->to, uap->sysmsg_fds);
142 
143 	return (error);
144 }
145 
146 /*
147  * Duplicate a file descriptor.
148  */
149 /* ARGSUSED */
150 int
151 dup(struct dup_args *uap)
152 {
153 	int error;
154 
155 	error = kern_dup(DUP_VARIABLE, uap->fd, 0, uap->sysmsg_fds);
156 
157 	return (error);
158 }
159 
160 int
161 kern_fcntl(int fd, int cmd, union fcntl_dat *dat)
162 {
163 	struct thread *td = curthread;
164 	struct proc *p = td->td_proc;
165 	struct filedesc *fdp = p->p_fd;
166 	struct file *fp;
167 	char *pop;
168 	struct vnode *vp;
169 	u_int newmin;
170 	int tmp, error, flg = F_POSIX;
171 
172 	KKASSERT(p);
173 
174 	if ((unsigned)fd >= fdp->fd_nfiles ||
175 	    (fp = fdp->fd_ofiles[fd]) == NULL)
176 		return (EBADF);
177 	pop = &fdp->fd_ofileflags[fd];
178 
179 	switch (cmd) {
180 	case F_DUPFD:
181 		newmin = dat->fc_fd;
182 		if (newmin >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
183 		    newmin > maxfilesperproc)
184 			return (EINVAL);
185 		error = kern_dup(DUP_VARIABLE, fd, newmin, &dat->fc_fd);
186 		return (error);
187 
188 	case F_GETFD:
189 		dat->fc_cloexec = (*pop & UF_EXCLOSE) ? FD_CLOEXEC : 0;
190 		return (0);
191 
192 	case F_SETFD:
193 		*pop = (*pop &~ UF_EXCLOSE) |
194 		    (dat->fc_cloexec & FD_CLOEXEC ? UF_EXCLOSE : 0);
195 		return (0);
196 
197 	case F_GETFL:
198 		dat->fc_flags = OFLAGS(fp->f_flag);
199 		return (0);
200 
201 	case F_SETFL:
202 		fhold(fp);
203 		fp->f_flag &= ~FCNTLFLAGS;
204 		fp->f_flag |= FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS;
205 		tmp = fp->f_flag & FNONBLOCK;
206 		error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td);
207 		if (error) {
208 			fdrop(fp, td);
209 			return (error);
210 		}
211 		tmp = fp->f_flag & FASYNC;
212 		error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, td);
213 		if (!error) {
214 			fdrop(fp, td);
215 			return (0);
216 		}
217 		fp->f_flag &= ~FNONBLOCK;
218 		tmp = 0;
219 		fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td);
220 		fdrop(fp, td);
221 		return (error);
222 
223 	case F_GETOWN:
224 		fhold(fp);
225 		error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner, td);
226 		fdrop(fp, td);
227 		return(error);
228 
229 	case F_SETOWN:
230 		fhold(fp);
231 		error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner, td);
232 		fdrop(fp, td);
233 		return(error);
234 
235 	case F_SETLKW:
236 		flg |= F_WAIT;
237 		/* Fall into F_SETLK */
238 
239 	case F_SETLK:
240 		if (fp->f_type != DTYPE_VNODE)
241 			return (EBADF);
242 		vp = (struct vnode *)fp->f_data;
243 
244 		/*
245 		 * copyin/lockop may block
246 		 */
247 		fhold(fp);
248 		if (dat->fc_flock.l_whence == SEEK_CUR)
249 			dat->fc_flock.l_start += fp->f_offset;
250 
251 		switch (dat->fc_flock.l_type) {
252 		case F_RDLCK:
253 			if ((fp->f_flag & FREAD) == 0) {
254 				error = EBADF;
255 				break;
256 			}
257 			p->p_leader->p_flag |= P_ADVLOCK;
258 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
259 			    &dat->fc_flock, flg);
260 			break;
261 		case F_WRLCK:
262 			if ((fp->f_flag & FWRITE) == 0) {
263 				error = EBADF;
264 				break;
265 			}
266 			p->p_leader->p_flag |= P_ADVLOCK;
267 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
268 			    &dat->fc_flock, flg);
269 			break;
270 		case F_UNLCK:
271 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
272 				&dat->fc_flock, F_POSIX);
273 			break;
274 		default:
275 			error = EINVAL;
276 			break;
277 		}
278 		/* Check for race with close */
279 		if ((unsigned) fd >= fdp->fd_nfiles ||
280 		    fp != fdp->fd_ofiles[fd]) {
281 			dat->fc_flock.l_whence = SEEK_SET;
282 			dat->fc_flock.l_start = 0;
283 			dat->fc_flock.l_len = 0;
284 			dat->fc_flock.l_type = F_UNLCK;
285 			(void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
286 					   F_UNLCK, &dat->fc_flock, F_POSIX);
287 		}
288 		fdrop(fp, td);
289 		return(error);
290 
291 	case F_GETLK:
292 		if (fp->f_type != DTYPE_VNODE)
293 			return (EBADF);
294 		vp = (struct vnode *)fp->f_data;
295 		/*
296 		 * copyin/lockop may block
297 		 */
298 		fhold(fp);
299 		if (dat->fc_flock.l_type != F_RDLCK &&
300 		    dat->fc_flock.l_type != F_WRLCK &&
301 		    dat->fc_flock.l_type != F_UNLCK) {
302 			fdrop(fp, td);
303 			return (EINVAL);
304 		}
305 		if (dat->fc_flock.l_whence == SEEK_CUR)
306 			dat->fc_flock.l_start += fp->f_offset;
307 		error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK,
308 			    &dat->fc_flock, F_POSIX);
309 		fdrop(fp, td);
310 		return(error);
311 	default:
312 		return (EINVAL);
313 	}
314 	/* NOTREACHED */
315 }
316 
317 /*
318  * The file control system call.
319  */
320 int
321 fcntl(struct fcntl_args *uap)
322 {
323 	union fcntl_dat dat;
324 	int error;
325 
326 	switch (uap->cmd) {
327 	case F_DUPFD:
328 		dat.fc_fd = uap->arg;
329 		break;
330 	case F_SETFD:
331 		dat.fc_cloexec = uap->arg;
332 		break;
333 	case F_SETFL:
334 		dat.fc_flags = uap->arg;
335 		break;
336 	case F_SETOWN:
337 		dat.fc_owner = uap->arg;
338 		break;
339 	case F_SETLKW:
340 	case F_SETLK:
341 	case F_GETLK:
342 		error = copyin((caddr_t)uap->arg, &dat.fc_flock,
343 		    sizeof(struct flock));
344 		if (error)
345 			return (error);
346 		break;
347 	}
348 
349 	error = kern_fcntl(uap->fd, uap->cmd, &dat);
350 
351 	if (error == 0) {
352 		switch (uap->cmd) {
353 		case F_DUPFD:
354 			uap->sysmsg_result = dat.fc_fd;
355 			break;
356 		case F_GETFD:
357 			uap->sysmsg_result = dat.fc_cloexec;
358 			break;
359 		case F_GETFL:
360 			uap->sysmsg_result = dat.fc_flags;
361 			break;
362 		case F_GETOWN:
363 			uap->sysmsg_result = dat.fc_owner;
364 		case F_GETLK:
365 			error = copyout(&dat.fc_flock, (caddr_t)uap->arg,
366 			    sizeof(struct flock));
367 			break;
368 		}
369 	}
370 
371 	return (error);
372 }
373 
374 /*
375  * Common code for dup, dup2, and fcntl(F_DUPFD).
376  *
377  * The type flag can be either DUP_FIXED or DUP_VARIABLE.  DUP_FIXED tells
378  * kern_dup() to destructively dup over an existing file descriptor if new
379  * is already open.  DUP_VARIABLE tells kern_dup() to find the lowest
380  * unused file descriptor that is greater than or equal to new.
381  */
382 int
383 kern_dup(enum dup_type type, int old, int new, int *res)
384 {
385 	struct thread *td = curthread;
386 	struct proc *p = td->td_proc;
387 	struct filedesc *fdp = p->p_fd;
388 	struct file *fp;
389 	struct file *delfp;
390 	int holdleaders;
391 	int error, newfd;
392 
393 	/*
394 	 * Verify that we have a valid descriptor to dup from and
395 	 * possibly to dup to.
396 	 */
397 	if (old < 0 || new < 0 || new > p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
398 	    new >= maxfilesperproc)
399 		return (EBADF);
400 	if (old >= fdp->fd_nfiles || fdp->fd_ofiles[old] == NULL)
401 		return (EBADF);
402 	if (type == DUP_FIXED && old == new) {
403 		*res = new;
404 		return (0);
405 	}
406 	fp = fdp->fd_ofiles[old];
407 	fhold(fp);
408 
409 	/*
410 	 * Expand the table for the new descriptor if needed.  This may
411 	 * block and drop and reacquire the fidedesc lock.
412 	 */
413 	if (type == DUP_VARIABLE || new >= fdp->fd_nfiles) {
414 		error = fdalloc(p, new, &newfd);
415 		if (error) {
416 			fdrop(fp, td);
417 			return (error);
418 		}
419 	}
420 	if (type == DUP_VARIABLE)
421 		new = newfd;
422 
423 	/*
424 	 * If the old file changed out from under us then treat it as a
425 	 * bad file descriptor.  Userland should do its own locking to
426 	 * avoid this case.
427 	 */
428 	if (fdp->fd_ofiles[old] != fp) {
429 		if (fdp->fd_ofiles[new] == NULL) {
430 			if (new < fdp->fd_freefile)
431 				fdp->fd_freefile = new;
432 			while (fdp->fd_lastfile > 0 &&
433 			    fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
434 				fdp->fd_lastfile--;
435 		}
436 		fdrop(fp, td);
437 		return (EBADF);
438 	}
439 	KASSERT(old != new, ("new fd is same as old"));
440 
441 	/*
442 	 * Save info on the descriptor being overwritten.  We have
443 	 * to do the unmap now, but we cannot close it without
444 	 * introducing an ownership race for the slot.
445 	 */
446 	delfp = fdp->fd_ofiles[new];
447 	if (delfp != NULL && p->p_fdtol != NULL) {
448 		/*
449 		 * Ask fdfree() to sleep to ensure that all relevant
450 		 * process leaders can be traversed in closef().
451 		 */
452 		fdp->fd_holdleaderscount++;
453 		holdleaders = 1;
454 	} else
455 		holdleaders = 0;
456 	KASSERT(delfp == NULL || type == DUP_FIXED,
457 	    ("dup() picked an open file"));
458 #if 0
459 	if (delfp && (fdp->fd_ofileflags[new] & UF_MAPPED))
460 		(void) munmapfd(p, new);
461 #endif
462 
463 	/*
464 	 * Duplicate the source descriptor, update lastfile
465 	 */
466 	fdp->fd_ofiles[new] = fp;
467 	fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE;
468 	if (new > fdp->fd_lastfile)
469 		fdp->fd_lastfile = new;
470 	*res = new;
471 
472 	/*
473 	 * If we dup'd over a valid file, we now own the reference to it
474 	 * and must dispose of it using closef() semantics (as if a
475 	 * close() were performed on it).
476 	 */
477 	if (delfp) {
478 		(void) closef(delfp, td);
479 		if (holdleaders) {
480 			fdp->fd_holdleaderscount--;
481 			if (fdp->fd_holdleaderscount == 0 &&
482 			    fdp->fd_holdleaderswakeup != 0) {
483 				fdp->fd_holdleaderswakeup = 0;
484 				wakeup(&fdp->fd_holdleaderscount);
485 			}
486 		}
487 	}
488 	return (0);
489 }
490 
491 /*
492  * If sigio is on the list associated with a process or process group,
493  * disable signalling from the device, remove sigio from the list and
494  * free sigio.
495  */
496 void
497 funsetown(struct sigio *sigio)
498 {
499 	int s;
500 
501 	if (sigio == NULL)
502 		return;
503 	s = splhigh();
504 	*(sigio->sio_myref) = NULL;
505 	splx(s);
506 	if (sigio->sio_pgid < 0) {
507 		SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,
508 			     sigio, sio_pgsigio);
509 	} else /* if ((*sigiop)->sio_pgid > 0) */ {
510 		SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,
511 			     sigio, sio_pgsigio);
512 	}
513 	crfree(sigio->sio_ucred);
514 	free(sigio, M_SIGIO);
515 }
516 
517 /* Free a list of sigio structures. */
518 void
519 funsetownlst(struct sigiolst *sigiolst)
520 {
521 	struct sigio *sigio;
522 
523 	while ((sigio = SLIST_FIRST(sigiolst)) != NULL)
524 		funsetown(sigio);
525 }
526 
527 /*
528  * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
529  *
530  * After permission checking, add a sigio structure to the sigio list for
531  * the process or process group.
532  */
533 int
534 fsetown(pid_t pgid, struct sigio **sigiop)
535 {
536 	struct proc *proc;
537 	struct pgrp *pgrp;
538 	struct sigio *sigio;
539 	int s;
540 
541 	if (pgid == 0) {
542 		funsetown(*sigiop);
543 		return (0);
544 	}
545 	if (pgid > 0) {
546 		proc = pfind(pgid);
547 		if (proc == NULL)
548 			return (ESRCH);
549 
550 		/*
551 		 * Policy - Don't allow a process to FSETOWN a process
552 		 * in another session.
553 		 *
554 		 * Remove this test to allow maximum flexibility or
555 		 * restrict FSETOWN to the current process or process
556 		 * group for maximum safety.
557 		 */
558 		if (proc->p_session != curproc->p_session)
559 			return (EPERM);
560 
561 		pgrp = NULL;
562 	} else /* if (pgid < 0) */ {
563 		pgrp = pgfind(-pgid);
564 		if (pgrp == NULL)
565 			return (ESRCH);
566 
567 		/*
568 		 * Policy - Don't allow a process to FSETOWN a process
569 		 * in another session.
570 		 *
571 		 * Remove this test to allow maximum flexibility or
572 		 * restrict FSETOWN to the current process or process
573 		 * group for maximum safety.
574 		 */
575 		if (pgrp->pg_session != curproc->p_session)
576 			return (EPERM);
577 
578 		proc = NULL;
579 	}
580 	funsetown(*sigiop);
581 	sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK);
582 	if (pgid > 0) {
583 		SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
584 		sigio->sio_proc = proc;
585 	} else {
586 		SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
587 		sigio->sio_pgrp = pgrp;
588 	}
589 	sigio->sio_pgid = pgid;
590 	sigio->sio_ucred = crhold(curproc->p_ucred);
591 	/* It would be convenient if p_ruid was in ucred. */
592 	sigio->sio_ruid = curproc->p_ucred->cr_ruid;
593 	sigio->sio_myref = sigiop;
594 	s = splhigh();
595 	*sigiop = sigio;
596 	splx(s);
597 	return (0);
598 }
599 
600 /*
601  * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
602  */
603 pid_t
604 fgetown(struct sigio *sigio)
605 {
606 	return (sigio != NULL ? sigio->sio_pgid : 0);
607 }
608 
609 /*
610  * Close a file descriptor.
611  */
612 /* ARGSUSED */
613 int
614 close(struct close_args *uap)
615 {
616 	struct thread *td = curthread;
617 	struct proc *p = td->td_proc;
618 	struct filedesc *fdp;
619 	struct file *fp;
620 	int fd = uap->fd;
621 	int error;
622 	int holdleaders;
623 
624 	KKASSERT(p);
625 	fdp = p->p_fd;
626 
627 	if ((unsigned)fd >= fdp->fd_nfiles ||
628 	    (fp = fdp->fd_ofiles[fd]) == NULL)
629 		return (EBADF);
630 #if 0
631 	if (fdp->fd_ofileflags[fd] & UF_MAPPED)
632 		(void) munmapfd(p, fd);
633 #endif
634 	fdp->fd_ofiles[fd] = NULL;
635 	fdp->fd_ofileflags[fd] = 0;
636 	holdleaders = 0;
637 	if (p->p_fdtol != NULL) {
638 		/*
639 		 * Ask fdfree() to sleep to ensure that all relevant
640 		 * process leaders can be traversed in closef().
641 		 */
642 		fdp->fd_holdleaderscount++;
643 		holdleaders = 1;
644 	}
645 
646 	/*
647 	 * we now hold the fp reference that used to be owned by the descriptor
648 	 * array.
649 	 */
650 	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
651 		fdp->fd_lastfile--;
652 	if (fd < fdp->fd_freefile)
653 		fdp->fd_freefile = fd;
654 	if (fd < fdp->fd_knlistsize)
655 		knote_fdclose(p, fd);
656 	error = closef(fp, td);
657 	if (holdleaders) {
658 		fdp->fd_holdleaderscount--;
659 		if (fdp->fd_holdleaderscount == 0 &&
660 		    fdp->fd_holdleaderswakeup != 0) {
661 			fdp->fd_holdleaderswakeup = 0;
662 			wakeup(&fdp->fd_holdleaderscount);
663 		}
664 	}
665 	return (error);
666 }
667 
668 int
669 kern_fstat(int fd, struct stat *ub)
670 {
671 	struct thread *td = curthread;
672 	struct proc *p = td->td_proc;
673 	struct filedesc *fdp;
674 	struct file *fp;
675 	int error;
676 
677 	KKASSERT(p);
678 
679 	fdp = p->p_fd;
680 	if ((unsigned)fd >= fdp->fd_nfiles ||
681 	    (fp = fdp->fd_ofiles[fd]) == NULL)
682 		return (EBADF);
683 	fhold(fp);
684 	error = fo_stat(fp, ub, td);
685 	fdrop(fp, td);
686 
687 	return (error);
688 }
689 
690 /*
691  * Return status information about a file descriptor.
692  */
693 int
694 fstat(struct fstat_args *uap)
695 {
696 	struct stat st;
697 	int error;
698 
699 	error = kern_fstat(uap->fd, &st);
700 
701 	if (error == 0)
702 		error = copyout(&st, uap->sb, sizeof(st));
703 	return (error);
704 }
705 
706 /*
707  * XXX: This is for source compatibility with NetBSD.  Probably doesn't
708  * belong here.
709  */
710 int
711 nfstat(struct nfstat_args *uap)
712 {
713 	struct stat st;
714 	struct nstat nst;
715 	int error;
716 
717 	error = kern_fstat(uap->fd, &st);
718 
719 	if (error == 0) {
720 		cvtnstat(&st, &nst);
721 		error = copyout(&nst, uap->sb, sizeof (nst));
722 	}
723 	return (error);
724 }
725 
726 /*
727  * Return pathconf information about a file descriptor.
728  */
729 /* ARGSUSED */
730 int
731 fpathconf(struct fpathconf_args *uap)
732 {
733 	struct thread *td = curthread;
734 	struct proc *p = td->td_proc;
735 	struct filedesc *fdp;
736 	struct file *fp;
737 	struct vnode *vp;
738 	int error = 0;
739 
740 	KKASSERT(p);
741 	fdp = p->p_fd;
742 	if ((unsigned)uap->fd >= fdp->fd_nfiles ||
743 	    (fp = fdp->fd_ofiles[uap->fd]) == NULL)
744 		return (EBADF);
745 
746 	fhold(fp);
747 
748 	switch (fp->f_type) {
749 	case DTYPE_PIPE:
750 	case DTYPE_SOCKET:
751 		if (uap->name != _PC_PIPE_BUF) {
752 			error = EINVAL;
753 		} else {
754 			uap->sysmsg_result = PIPE_BUF;
755 			error = 0;
756 		}
757 		break;
758 	case DTYPE_FIFO:
759 	case DTYPE_VNODE:
760 		vp = (struct vnode *)fp->f_data;
761 		error = VOP_PATHCONF(vp, uap->name, uap->sysmsg_fds);
762 		break;
763 	default:
764 		error = EOPNOTSUPP;
765 		break;
766 	}
767 	fdrop(fp, td);
768 	return(error);
769 }
770 
771 /*
772  * Allocate a file descriptor for the process.
773  */
774 static int fdexpand;
775 SYSCTL_INT(_debug, OID_AUTO, fdexpand, CTLFLAG_RD, &fdexpand, 0, "");
776 
777 int
778 fdalloc(struct proc *p, int want, int *result)
779 {
780 	struct filedesc *fdp = p->p_fd;
781 	int i;
782 	int lim, last, nfiles;
783 	struct file **newofile;
784 	char *newofileflags;
785 
786 	/*
787 	 * Search for a free descriptor starting at the higher
788 	 * of want or fd_freefile.  If that fails, consider
789 	 * expanding the ofile array.
790 	 */
791 	lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
792 	for (;;) {
793 		last = min(fdp->fd_nfiles, lim);
794 		if ((i = want) < fdp->fd_freefile)
795 			i = fdp->fd_freefile;
796 		for (; i < last; i++) {
797 			if (fdp->fd_ofiles[i] == NULL) {
798 				fdp->fd_ofileflags[i] = 0;
799 				if (i > fdp->fd_lastfile)
800 					fdp->fd_lastfile = i;
801 				if (want <= fdp->fd_freefile)
802 					fdp->fd_freefile = i;
803 				*result = i;
804 				return (0);
805 			}
806 		}
807 
808 		/*
809 		 * No space in current array.  Expand?
810 		 */
811 		if (fdp->fd_nfiles >= lim)
812 			return (EMFILE);
813 		if (fdp->fd_nfiles < NDEXTENT)
814 			nfiles = NDEXTENT;
815 		else
816 			nfiles = 2 * fdp->fd_nfiles;
817 		newofile = malloc(nfiles * OFILESIZE, M_FILEDESC, M_WAITOK);
818 
819 		/*
820 		 * deal with file-table extend race that might have occured
821 		 * when malloc was blocked.
822 		 */
823 		if (fdp->fd_nfiles >= nfiles) {
824 			free(newofile, M_FILEDESC);
825 			continue;
826 		}
827 		newofileflags = (char *) &newofile[nfiles];
828 		/*
829 		 * Copy the existing ofile and ofileflags arrays
830 		 * and zero the new portion of each array.
831 		 */
832 		bcopy(fdp->fd_ofiles, newofile,
833 			(i = sizeof(struct file *) * fdp->fd_nfiles));
834 		bzero((char *)newofile + i, nfiles * sizeof(struct file *) - i);
835 		bcopy(fdp->fd_ofileflags, newofileflags,
836 			(i = sizeof(char) * fdp->fd_nfiles));
837 		bzero(newofileflags + i, nfiles * sizeof(char) - i);
838 		if (fdp->fd_nfiles > NDFILE)
839 			free(fdp->fd_ofiles, M_FILEDESC);
840 		fdp->fd_ofiles = newofile;
841 		fdp->fd_ofileflags = newofileflags;
842 		fdp->fd_nfiles = nfiles;
843 		fdexpand++;
844 	}
845 	return (0);
846 }
847 
848 /*
849  * Check to see whether n user file descriptors
850  * are available to the process p.
851  */
852 int
853 fdavail(struct proc *p, int n)
854 {
855 	struct filedesc *fdp = p->p_fd;
856 	struct file **fpp;
857 	int i, lim, last;
858 
859 	lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
860 	if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0)
861 		return (1);
862 
863 	last = min(fdp->fd_nfiles, lim);
864 	fpp = &fdp->fd_ofiles[fdp->fd_freefile];
865 	for (i = last - fdp->fd_freefile; --i >= 0; fpp++) {
866 		if (*fpp == NULL && --n <= 0)
867 			return (1);
868 	}
869 	return (0);
870 }
871 
872 /*
873  * falloc:
874  *	Create a new open file structure and allocate a file decriptor
875  *	for the process that refers to it.  If p is NULL, no descriptor
876  *	is allocated and the file pointer is returned unassociated with
877  *	any process.  resultfd is only used if p is not NULL and may
878  *	separately be NULL indicating that you don't need the returned fd.
879  */
880 int
881 falloc(struct proc *p, struct file **resultfp, int *resultfd)
882 {
883 	struct file *fp, *fq;
884 	int error, i;
885 	static struct timeval lastfail;
886 	static int curfail;
887 
888 	if (nfiles >= maxfiles - maxfilesrootres &&
889 	    ((p && p->p_ucred->cr_ruid != 0) || nfiles >= maxfiles)) {
890 		if (ppsratecheck(&lastfail, &curfail, 1)) {
891 			printf("kern.maxfiles limit exceeded by uid %d, please see tuning(7).\n",
892 				(p ? p->p_ucred->cr_ruid : -1));
893 		}
894 		return (ENFILE);
895 	}
896 	/*
897 	 * Allocate a new file descriptor.
898 	 * If the process has file descriptor zero open, add to the list
899 	 * of open files at that point, otherwise put it at the front of
900 	 * the list of open files.
901 	 */
902 	nfiles++;
903 	fp = malloc(sizeof(struct file), M_FILE, M_WAITOK | M_ZERO);
904 
905 	/*
906 	 * wait until after malloc (which may have blocked) returns before
907 	 * allocating the slot, else a race might have shrunk it if we had
908 	 * allocated it before the malloc.
909 	 */
910 	i = -1;
911 	if (p && (error = fdalloc(p, 0, &i))) {
912 		nfiles--;
913 		free(fp, M_FILE);
914 		return (error);
915 	}
916 	fp->f_count = 1;
917 	fp->f_ops = &badfileops;
918 	fp->f_seqcount = 1;
919 	if (p) {
920 		fp->f_cred = crhold(p->p_ucred);
921 		if ((fq = p->p_fd->fd_ofiles[0]) != NULL) {
922 			LIST_INSERT_AFTER(fq, fp, f_list);
923 		} else {
924 			LIST_INSERT_HEAD(&filehead, fp, f_list);
925 		}
926 		p->p_fd->fd_ofiles[i] = fp;
927 	} else {
928 		fp->f_cred = crhold(proc0.p_ucred);
929 		LIST_INSERT_HEAD(&filehead, fp, f_list);
930 	}
931 	if (resultfp)
932 		*resultfp = fp;
933 	if (resultfd)
934 		*resultfd = i;
935 	return (0);
936 }
937 
938 void
939 fsetcred(struct file *fp, struct ucred *cr)
940 {
941 	crhold(cr);
942 	crfree(fp->f_cred);
943 	fp->f_cred = cr;
944 }
945 
946 /*
947  * Free a file descriptor.
948  */
949 void
950 ffree(struct file *fp)
951 {
952 	KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!"));
953 	LIST_REMOVE(fp, f_list);
954 	crfree(fp->f_cred);
955 	nfiles--;
956 	free(fp, M_FILE);
957 }
958 
959 /*
960  * Build a new filedesc structure.
961  */
962 struct filedesc *
963 fdinit(struct proc *p)
964 {
965 	struct filedesc0 *newfdp;
966 	struct filedesc *fdp = p->p_fd;
967 
968 	newfdp = malloc(sizeof(struct filedesc0), M_FILEDESC, M_WAITOK|M_ZERO);
969 	newfdp->fd_fd.fd_cdir = fdp->fd_cdir;
970 	if (newfdp->fd_fd.fd_cdir)
971 		vref(newfdp->fd_fd.fd_cdir);
972 	newfdp->fd_fd.fd_rdir = fdp->fd_rdir;
973 	vref(newfdp->fd_fd.fd_rdir);
974 	newfdp->fd_fd.fd_jdir = fdp->fd_jdir;
975 	if (newfdp->fd_fd.fd_jdir)
976 		vref(newfdp->fd_fd.fd_jdir);
977 
978 	/* Create the file descriptor table. */
979 	newfdp->fd_fd.fd_refcnt = 1;
980 	newfdp->fd_fd.fd_cmask = cmask;
981 	newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles;
982 	newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags;
983 	newfdp->fd_fd.fd_nfiles = NDFILE;
984 	newfdp->fd_fd.fd_knlistsize = -1;
985 
986 	return (&newfdp->fd_fd);
987 }
988 
989 /*
990  * Share a filedesc structure.
991  */
992 struct filedesc *
993 fdshare(struct proc *p)
994 {
995 	p->p_fd->fd_refcnt++;
996 	return (p->p_fd);
997 }
998 
999 /*
1000  * Copy a filedesc structure.
1001  */
1002 struct filedesc *
1003 fdcopy(struct proc *p)
1004 {
1005 	struct filedesc *newfdp, *fdp = p->p_fd;
1006 	struct file **fpp;
1007 	int i;
1008 
1009 	/* Certain daemons might not have file descriptors. */
1010 	if (fdp == NULL)
1011 		return (NULL);
1012 
1013 	newfdp = malloc(sizeof(struct filedesc0), M_FILEDESC, M_WAITOK);
1014 	bcopy(fdp, newfdp, sizeof(struct filedesc));
1015 	if (newfdp->fd_cdir)
1016 		vref(newfdp->fd_cdir);
1017 	/*
1018 	 * We must check for fd_rdir here, at least for now because
1019 	 * the init process is created before we have access to the
1020 	 * rootvode to take a reference to it.
1021 	 */
1022 	if (newfdp->fd_rdir)
1023 		vref(newfdp->fd_rdir);
1024 	if (newfdp->fd_jdir)
1025 		vref(newfdp->fd_jdir);
1026 	newfdp->fd_refcnt = 1;
1027 
1028 	/*
1029 	 * If the number of open files fits in the internal arrays
1030 	 * of the open file structure, use them, otherwise allocate
1031 	 * additional memory for the number of descriptors currently
1032 	 * in use.
1033 	 */
1034 	if (newfdp->fd_lastfile < NDFILE) {
1035 		newfdp->fd_ofiles = ((struct filedesc0 *) newfdp)->fd_dfiles;
1036 		newfdp->fd_ofileflags =
1037 		    ((struct filedesc0 *) newfdp)->fd_dfileflags;
1038 		i = NDFILE;
1039 	} else {
1040 		/*
1041 		 * Compute the smallest multiple of NDEXTENT needed
1042 		 * for the file descriptors currently in use,
1043 		 * allowing the table to shrink.
1044 		 */
1045 		i = newfdp->fd_nfiles;
1046 		while (i > 2 * NDEXTENT && i > newfdp->fd_lastfile * 2)
1047 			i /= 2;
1048 		newfdp->fd_ofiles = malloc(i * OFILESIZE, M_FILEDESC, M_WAITOK);
1049 		newfdp->fd_ofileflags = (char *) &newfdp->fd_ofiles[i];
1050 	}
1051 	newfdp->fd_nfiles = i;
1052 	bcopy(fdp->fd_ofiles, newfdp->fd_ofiles, i * sizeof(struct file **));
1053 	bcopy(fdp->fd_ofileflags, newfdp->fd_ofileflags, i * sizeof(char));
1054 
1055 	/*
1056 	 * kq descriptors cannot be copied.
1057 	 */
1058 	if (newfdp->fd_knlistsize != -1) {
1059 		fpp = &newfdp->fd_ofiles[newfdp->fd_lastfile];
1060 		for (i = newfdp->fd_lastfile; i >= 0; i--, fpp--) {
1061 			if (*fpp != NULL && (*fpp)->f_type == DTYPE_KQUEUE) {
1062 				*fpp = NULL;
1063 				if (i < newfdp->fd_freefile)
1064 					newfdp->fd_freefile = i;
1065 			}
1066 			if (*fpp == NULL && i == newfdp->fd_lastfile && i > 0)
1067 				newfdp->fd_lastfile--;
1068 		}
1069 		newfdp->fd_knlist = NULL;
1070 		newfdp->fd_knlistsize = -1;
1071 		newfdp->fd_knhash = NULL;
1072 		newfdp->fd_knhashmask = 0;
1073 	}
1074 
1075 	fpp = newfdp->fd_ofiles;
1076 	for (i = newfdp->fd_lastfile; i-- >= 0; fpp++) {
1077 		if (*fpp != NULL)
1078 			fhold(*fpp);
1079 	}
1080 	return (newfdp);
1081 }
1082 
1083 /*
1084  * Release a filedesc structure.
1085  */
1086 void
1087 fdfree(struct proc *p)
1088 {
1089 	struct thread *td = p->p_thread;
1090 	struct filedesc *fdp = p->p_fd;
1091 	struct file **fpp;
1092 	int i;
1093 	struct filedesc_to_leader *fdtol;
1094 	struct file *fp;
1095 	struct vnode *vp;
1096 	struct flock lf;
1097 
1098 	/* Certain daemons might not have file descriptors. */
1099 	if (fdp == NULL)
1100 		return;
1101 
1102 	/* Check for special need to clear POSIX style locks */
1103 	fdtol = p->p_fdtol;
1104 	if (fdtol != NULL) {
1105 		KASSERT(fdtol->fdl_refcount > 0,
1106 			("filedesc_to_refcount botch: fdl_refcount=%d",
1107 			 fdtol->fdl_refcount));
1108 		if (fdtol->fdl_refcount == 1 &&
1109 		    (p->p_leader->p_flag & P_ADVLOCK) != 0) {
1110 			i = 0;
1111 			fpp = fdp->fd_ofiles;
1112 			for (i = 0, fpp = fdp->fd_ofiles;
1113 			     i <= fdp->fd_lastfile;
1114 			     i++, fpp++) {
1115 				if (*fpp == NULL ||
1116 				    (*fpp)->f_type != DTYPE_VNODE)
1117 					continue;
1118 				fp = *fpp;
1119 				fhold(fp);
1120 				lf.l_whence = SEEK_SET;
1121 				lf.l_start = 0;
1122 				lf.l_len = 0;
1123 				lf.l_type = F_UNLCK;
1124 				vp = (struct vnode *)fp->f_data;
1125 				(void) VOP_ADVLOCK(vp,
1126 						   (caddr_t)p->p_leader,
1127 						   F_UNLCK,
1128 						   &lf,
1129 						   F_POSIX);
1130 				fdrop(fp, p->p_thread);
1131 				fpp = fdp->fd_ofiles + i;
1132 			}
1133 		}
1134 	retry:
1135 		if (fdtol->fdl_refcount == 1) {
1136 			if (fdp->fd_holdleaderscount > 0 &&
1137 			    (p->p_leader->p_flag & P_ADVLOCK) != 0) {
1138 				/*
1139 				 * close() or do_dup() has cleared a reference
1140 				 * in a shared file descriptor table.
1141 				 */
1142 				fdp->fd_holdleaderswakeup = 1;
1143 				tsleep(&fdp->fd_holdleaderscount,
1144 				       0, "fdlhold", 0);
1145 				goto retry;
1146 			}
1147 			if (fdtol->fdl_holdcount > 0) {
1148 				/*
1149 				 * Ensure that fdtol->fdl_leader
1150 				 * remains valid in closef().
1151 				 */
1152 				fdtol->fdl_wakeup = 1;
1153 				tsleep(fdtol, 0, "fdlhold", 0);
1154 				goto retry;
1155 			}
1156 		}
1157 		fdtol->fdl_refcount--;
1158 		if (fdtol->fdl_refcount == 0 &&
1159 		    fdtol->fdl_holdcount == 0) {
1160 			fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
1161 			fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
1162 		} else
1163 			fdtol = NULL;
1164 		p->p_fdtol = NULL;
1165 		if (fdtol != NULL)
1166 			free(fdtol, M_FILEDESC_TO_LEADER);
1167 	}
1168 	if (--fdp->fd_refcnt > 0)
1169 		return;
1170 	/*
1171 	 * we are the last reference to the structure, we can
1172 	 * safely assume it will not change out from under us.
1173 	 */
1174 	fpp = fdp->fd_ofiles;
1175 	for (i = fdp->fd_lastfile; i-- >= 0; fpp++) {
1176 		if (*fpp)
1177 			(void) closef(*fpp, td);
1178 	}
1179 	if (fdp->fd_nfiles > NDFILE)
1180 		free(fdp->fd_ofiles, M_FILEDESC);
1181 	if (fdp->fd_cdir)
1182 		vrele(fdp->fd_cdir);
1183 	vrele(fdp->fd_rdir);
1184 	if (fdp->fd_jdir)
1185 		vrele(fdp->fd_jdir);
1186 	if (fdp->fd_knlist)
1187 		free(fdp->fd_knlist, M_KQUEUE);
1188 	if (fdp->fd_knhash)
1189 		free(fdp->fd_knhash, M_KQUEUE);
1190 	free(fdp, M_FILEDESC);
1191 }
1192 
1193 /*
1194  * For setugid programs, we don't want to people to use that setugidness
1195  * to generate error messages which write to a file which otherwise would
1196  * otherwise be off-limits to the process.
1197  *
1198  * This is a gross hack to plug the hole.  A better solution would involve
1199  * a special vop or other form of generalized access control mechanism.  We
1200  * go ahead and just reject all procfs file systems accesses as dangerous.
1201  *
1202  * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
1203  * sufficient.  We also don't for check setugidness since we know we are.
1204  */
1205 static int
1206 is_unsafe(struct file *fp)
1207 {
1208 	if (fp->f_type == DTYPE_VNODE &&
1209 	    ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS)
1210 		return (1);
1211 	return (0);
1212 }
1213 
1214 /*
1215  * Make this setguid thing safe, if at all possible.
1216  */
1217 void
1218 setugidsafety(struct proc *p)
1219 {
1220 	struct thread *td = p->p_thread;
1221 	struct filedesc *fdp = p->p_fd;
1222 	int i;
1223 
1224 	/* Certain daemons might not have file descriptors. */
1225 	if (fdp == NULL)
1226 		return;
1227 
1228 	/*
1229 	 * note: fdp->fd_ofiles may be reallocated out from under us while
1230 	 * we are blocked in a close.  Be careful!
1231 	 */
1232 	for (i = 0; i <= fdp->fd_lastfile; i++) {
1233 		if (i > 2)
1234 			break;
1235 		if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) {
1236 			struct file *fp;
1237 
1238 #if 0
1239 			if ((fdp->fd_ofileflags[i] & UF_MAPPED) != 0)
1240 				(void) munmapfd(p, i);
1241 #endif
1242 			if (i < fdp->fd_knlistsize)
1243 				knote_fdclose(p, i);
1244 			/*
1245 			 * NULL-out descriptor prior to close to avoid
1246 			 * a race while close blocks.
1247 			 */
1248 			fp = fdp->fd_ofiles[i];
1249 			fdp->fd_ofiles[i] = NULL;
1250 			fdp->fd_ofileflags[i] = 0;
1251 			if (i < fdp->fd_freefile)
1252 				fdp->fd_freefile = i;
1253 			(void) closef(fp, td);
1254 		}
1255 	}
1256 	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
1257 		fdp->fd_lastfile--;
1258 }
1259 
1260 /*
1261  * Close any files on exec?
1262  */
1263 void
1264 fdcloseexec(struct proc *p)
1265 {
1266 	struct thread *td = p->p_thread;
1267 	struct filedesc *fdp = p->p_fd;
1268 	int i;
1269 
1270 	/* Certain daemons might not have file descriptors. */
1271 	if (fdp == NULL)
1272 		return;
1273 
1274 	/*
1275 	 * We cannot cache fd_ofiles or fd_ofileflags since operations
1276 	 * may block and rip them out from under us.
1277 	 */
1278 	for (i = 0; i <= fdp->fd_lastfile; i++) {
1279 		if (fdp->fd_ofiles[i] != NULL &&
1280 		    (fdp->fd_ofileflags[i] & UF_EXCLOSE)) {
1281 			struct file *fp;
1282 
1283 #if 0
1284 			if (fdp->fd_ofileflags[i] & UF_MAPPED)
1285 				(void) munmapfd(p, i);
1286 #endif
1287 			if (i < fdp->fd_knlistsize)
1288 				knote_fdclose(p, i);
1289 			/*
1290 			 * NULL-out descriptor prior to close to avoid
1291 			 * a race while close blocks.
1292 			 */
1293 			fp = fdp->fd_ofiles[i];
1294 			fdp->fd_ofiles[i] = NULL;
1295 			fdp->fd_ofileflags[i] = 0;
1296 			if (i < fdp->fd_freefile)
1297 				fdp->fd_freefile = i;
1298 			(void) closef(fp, td);
1299 		}
1300 	}
1301 	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
1302 		fdp->fd_lastfile--;
1303 }
1304 
1305 /*
1306  * It is unsafe for set[ug]id processes to be started with file
1307  * descriptors 0..2 closed, as these descriptors are given implicit
1308  * significance in the Standard C library.  fdcheckstd() will create a
1309  * descriptor referencing /dev/null for each of stdin, stdout, and
1310  * stderr that is not already open.
1311  */
1312 int
1313 fdcheckstd(struct proc *p)
1314 {
1315 	struct thread *td = p->p_thread;
1316 	struct nameidata nd;
1317 	struct filedesc *fdp;
1318 	struct file *fp;
1319 	register_t retval;
1320 	int fd, i, error, flags, devnull;
1321 
1322        fdp = p->p_fd;
1323        if (fdp == NULL)
1324                return (0);
1325        devnull = -1;
1326        error = 0;
1327        for (i = 0; i < 3; i++) {
1328                if (fdp->fd_ofiles[i] != NULL)
1329                        continue;
1330                if (devnull < 0) {
1331                        error = falloc(p, &fp, &fd);
1332                        if (error != 0)
1333                                break;
1334                        NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_SYSSPACE,
1335 			   "/dev/null", td);
1336                        flags = FREAD | FWRITE;
1337                        error = vn_open(&nd, flags, 0);
1338                        if (error != 0) {
1339                                fdp->fd_ofiles[i] = NULL;
1340                                fdrop(fp, td);
1341                                break;
1342                        }
1343                        NDFREE(&nd, NDF_ONLY_PNBUF);
1344                        fp->f_data = (caddr_t)nd.ni_vp;
1345                        fp->f_flag = flags;
1346                        fp->f_ops = &vnops;
1347                        fp->f_type = DTYPE_VNODE;
1348                        VOP_UNLOCK(nd.ni_vp, NULL, 0, td);
1349                        devnull = fd;
1350                } else {
1351                        error = kern_dup(DUP_FIXED, devnull, i, &retval);
1352                        if (error != 0)
1353                                break;
1354                }
1355        }
1356        return (error);
1357 }
1358 
1359 /*
1360  * Internal form of close.
1361  * Decrement reference count on file structure.
1362  * Note: td and/or p may be NULL when closing a file
1363  * that was being passed in a message.
1364  */
1365 int
1366 closef(struct file *fp, struct thread *td)
1367 {
1368 	struct vnode *vp;
1369 	struct flock lf;
1370 	struct filedesc_to_leader *fdtol;
1371 	struct proc *p;
1372 
1373 	if (fp == NULL)
1374 		return (0);
1375 	if (td == NULL) {
1376 		td = curthread;
1377 		p = NULL;		/* allow no proc association */
1378 	} else {
1379 		p = td->td_proc;	/* can also be NULL */
1380 	}
1381 	/*
1382 	 * POSIX record locking dictates that any close releases ALL
1383 	 * locks owned by this process.  This is handled by setting
1384 	 * a flag in the unlock to free ONLY locks obeying POSIX
1385 	 * semantics, and not to free BSD-style file locks.
1386 	 * If the descriptor was in a message, POSIX-style locks
1387 	 * aren't passed with the descriptor.
1388 	 */
1389 	if (p != NULL &&
1390 	    fp->f_type == DTYPE_VNODE) {
1391 		if ((p->p_leader->p_flag & P_ADVLOCK) != 0) {
1392 			lf.l_whence = SEEK_SET;
1393 			lf.l_start = 0;
1394 			lf.l_len = 0;
1395 			lf.l_type = F_UNLCK;
1396 			vp = (struct vnode *)fp->f_data;
1397 			(void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
1398 					   &lf, F_POSIX);
1399 		}
1400 		fdtol = p->p_fdtol;
1401 		if (fdtol != NULL) {
1402 			/*
1403 			 * Handle special case where file descriptor table
1404 			 * is shared between multiple process leaders.
1405 			 */
1406 			for (fdtol = fdtol->fdl_next;
1407 			     fdtol != p->p_fdtol;
1408 			     fdtol = fdtol->fdl_next) {
1409 				if ((fdtol->fdl_leader->p_flag &
1410 				     P_ADVLOCK) == 0)
1411 					continue;
1412 				fdtol->fdl_holdcount++;
1413 				lf.l_whence = SEEK_SET;
1414 				lf.l_start = 0;
1415 				lf.l_len = 0;
1416 				lf.l_type = F_UNLCK;
1417 				vp = (struct vnode *)fp->f_data;
1418 				(void) VOP_ADVLOCK(vp,
1419 						   (caddr_t)p->p_leader,
1420 						   F_UNLCK, &lf, F_POSIX);
1421 				fdtol->fdl_holdcount--;
1422 				if (fdtol->fdl_holdcount == 0 &&
1423 				    fdtol->fdl_wakeup != 0) {
1424 					fdtol->fdl_wakeup = 0;
1425 					wakeup(fdtol);
1426 				}
1427 			}
1428 		}
1429 	}
1430 	return (fdrop(fp, td));
1431 }
1432 
1433 int
1434 fdrop(struct file *fp, struct thread *td)
1435 {
1436 	struct flock lf;
1437 	struct vnode *vp;
1438 	int error;
1439 
1440 	if (--fp->f_count > 0)
1441 		return (0);
1442 	if (fp->f_count < 0)
1443 		panic("fdrop: count < 0");
1444 	if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE) {
1445 		lf.l_whence = SEEK_SET;
1446 		lf.l_start = 0;
1447 		lf.l_len = 0;
1448 		lf.l_type = F_UNLCK;
1449 		vp = (struct vnode *)fp->f_data;
1450 		(void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
1451 	}
1452 	if (fp->f_ops != &badfileops)
1453 		error = fo_close(fp, td);
1454 	else
1455 		error = 0;
1456 	ffree(fp);
1457 	return (error);
1458 }
1459 
1460 /*
1461  * Apply an advisory lock on a file descriptor.
1462  *
1463  * Just attempt to get a record lock of the requested type on
1464  * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
1465  */
1466 /* ARGSUSED */
1467 int
1468 flock(struct flock_args *uap)
1469 {
1470 	struct proc *p = curproc;
1471 	struct filedesc *fdp = p->p_fd;
1472 	struct file *fp;
1473 	struct vnode *vp;
1474 	struct flock lf;
1475 
1476 	if ((unsigned)uap->fd >= fdp->fd_nfiles ||
1477 	    (fp = fdp->fd_ofiles[uap->fd]) == NULL)
1478 		return (EBADF);
1479 	if (fp->f_type != DTYPE_VNODE)
1480 		return (EOPNOTSUPP);
1481 	vp = (struct vnode *)fp->f_data;
1482 	lf.l_whence = SEEK_SET;
1483 	lf.l_start = 0;
1484 	lf.l_len = 0;
1485 	if (uap->how & LOCK_UN) {
1486 		lf.l_type = F_UNLCK;
1487 		fp->f_flag &= ~FHASLOCK;
1488 		return (VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK));
1489 	}
1490 	if (uap->how & LOCK_EX)
1491 		lf.l_type = F_WRLCK;
1492 	else if (uap->how & LOCK_SH)
1493 		lf.l_type = F_RDLCK;
1494 	else
1495 		return (EBADF);
1496 	fp->f_flag |= FHASLOCK;
1497 	if (uap->how & LOCK_NB)
1498 		return (VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_FLOCK));
1499 	return (VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_FLOCK|F_WAIT));
1500 }
1501 
1502 /*
1503  * File Descriptor pseudo-device driver (/dev/fd/).
1504  *
1505  * Opening minor device N dup()s the file (if any) connected to file
1506  * descriptor N belonging to the calling process.  Note that this driver
1507  * consists of only the ``open()'' routine, because all subsequent
1508  * references to this file will be direct to the other driver.
1509  */
1510 /* ARGSUSED */
1511 static int
1512 fdopen(dev_t dev, int mode, int type, struct thread *td)
1513 {
1514 	KKASSERT(td->td_proc != NULL);
1515 
1516 	/*
1517 	 * XXX Kludge: set curproc->p_dupfd to contain the value of the
1518 	 * the file descriptor being sought for duplication. The error
1519 	 * return ensures that the vnode for this device will be released
1520 	 * by vn_open. Open will detect this special error and take the
1521 	 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
1522 	 * will simply report the error.
1523 	 */
1524 	td->td_proc->p_dupfd = minor(dev);
1525 	return (ENODEV);
1526 }
1527 
1528 /*
1529  * Duplicate the specified descriptor to a free descriptor.
1530  */
1531 int
1532 dupfdopen(struct filedesc *fdp, int indx, int dfd, int mode, int error)
1533 {
1534 	struct file *wfp;
1535 	struct file *fp;
1536 
1537 	/*
1538 	 * If the to-be-dup'd fd number is greater than the allowed number
1539 	 * of file descriptors, or the fd to be dup'd has already been
1540 	 * closed, then reject.
1541 	 */
1542 	if ((u_int)dfd >= fdp->fd_nfiles ||
1543 	    (wfp = fdp->fd_ofiles[dfd]) == NULL) {
1544 		return (EBADF);
1545 	}
1546 
1547 	/*
1548 	 * There are two cases of interest here.
1549 	 *
1550 	 * For ENODEV simply dup (dfd) to file descriptor
1551 	 * (indx) and return.
1552 	 *
1553 	 * For ENXIO steal away the file structure from (dfd) and
1554 	 * store it in (indx).  (dfd) is effectively closed by
1555 	 * this operation.
1556 	 *
1557 	 * Any other error code is just returned.
1558 	 */
1559 	switch (error) {
1560 	case ENODEV:
1561 		/*
1562 		 * Check that the mode the file is being opened for is a
1563 		 * subset of the mode of the existing descriptor.
1564 		 */
1565 		if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag)
1566 			return (EACCES);
1567 		fp = fdp->fd_ofiles[indx];
1568 #if 0
1569 		if (fp && fdp->fd_ofileflags[indx] & UF_MAPPED)
1570 			(void) munmapfd(p, indx);
1571 #endif
1572 		fdp->fd_ofiles[indx] = wfp;
1573 		fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
1574 		fhold(wfp);
1575 		if (indx > fdp->fd_lastfile)
1576 			fdp->fd_lastfile = indx;
1577 		/*
1578 		 * we now own the reference to fp that the ofiles[] array
1579 		 * used to own.  Release it.
1580 		 */
1581 		if (fp)
1582 			fdrop(fp, curthread);
1583 		return (0);
1584 
1585 	case ENXIO:
1586 		/*
1587 		 * Steal away the file pointer from dfd, and stuff it into indx.
1588 		 */
1589 		fp = fdp->fd_ofiles[indx];
1590 #if 0
1591 		if (fp && fdp->fd_ofileflags[indx] & UF_MAPPED)
1592 			(void) munmapfd(p, indx);
1593 #endif
1594 		fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd];
1595 		fdp->fd_ofiles[dfd] = NULL;
1596 		fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
1597 		fdp->fd_ofileflags[dfd] = 0;
1598 
1599 		/*
1600 		 * we now own the reference to fp that the ofiles[] array
1601 		 * used to own.  Release it.
1602 		 */
1603 		if (fp)
1604 			fdrop(fp, curthread);
1605 		/*
1606 		 * Complete the clean up of the filedesc structure by
1607 		 * recomputing the various hints.
1608 		 */
1609 		if (indx > fdp->fd_lastfile) {
1610 			fdp->fd_lastfile = indx;
1611 		} else {
1612 			while (fdp->fd_lastfile > 0 &&
1613 			   fdp->fd_ofiles[fdp->fd_lastfile] == NULL) {
1614 				fdp->fd_lastfile--;
1615 			}
1616 			if (dfd < fdp->fd_freefile)
1617 				fdp->fd_freefile = dfd;
1618 		}
1619 		return (0);
1620 
1621 	default:
1622 		return (error);
1623 	}
1624 	/* NOTREACHED */
1625 }
1626 
1627 
1628 struct filedesc_to_leader *
1629 filedesc_to_leader_alloc(struct filedesc_to_leader *old,
1630 			 struct proc *leader)
1631 {
1632 	struct filedesc_to_leader *fdtol;
1633 
1634 	fdtol = malloc(sizeof(struct filedesc_to_leader),
1635 			M_FILEDESC_TO_LEADER, M_WAITOK);
1636 	fdtol->fdl_refcount = 1;
1637 	fdtol->fdl_holdcount = 0;
1638 	fdtol->fdl_wakeup = 0;
1639 	fdtol->fdl_leader = leader;
1640 	if (old != NULL) {
1641 		fdtol->fdl_next = old->fdl_next;
1642 		fdtol->fdl_prev = old;
1643 		old->fdl_next = fdtol;
1644 		fdtol->fdl_next->fdl_prev = fdtol;
1645 	} else {
1646 		fdtol->fdl_next = fdtol;
1647 		fdtol->fdl_prev = fdtol;
1648 	}
1649 	return fdtol;
1650 }
1651 
1652 /*
1653  * Get file structures.
1654  */
1655 static int
1656 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
1657 {
1658 	int error;
1659 	struct file *fp;
1660 
1661 	if (!req->oldptr) {
1662 		/*
1663 		 * overestimate by 10 files
1664 		 */
1665 		return (SYSCTL_OUT(req, 0, sizeof(filehead) +
1666 				(nfiles + 10) * sizeof(struct file)));
1667 	}
1668 
1669 	error = SYSCTL_OUT(req, (caddr_t)&filehead, sizeof(filehead));
1670 	if (error)
1671 		return (error);
1672 
1673 	/*
1674 	 * followed by an array of file structures
1675 	 */
1676 	LIST_FOREACH(fp, &filehead, f_list) {
1677 		error = SYSCTL_OUT(req, (caddr_t)fp, sizeof (struct file));
1678 		if (error)
1679 			return (error);
1680 	}
1681 	return (0);
1682 }
1683 
1684 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
1685     0, 0, sysctl_kern_file, "S,file", "Entire file table");
1686 
1687 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
1688     &maxfilesperproc, 0, "Maximum files allowed open per process");
1689 
1690 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
1691     &maxfiles, 0, "Maximum number of files");
1692 
1693 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW,
1694     &maxfilesrootres, 0, "Descriptors reserved for root use");
1695 
1696 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
1697 	&nfiles, 0, "System-wide number of open files");
1698 
1699 static void
1700 fildesc_drvinit(void *unused)
1701 {
1702 	int fd;
1703 
1704 	cdevsw_add(&fildesc_cdevsw, 0, 0);
1705 	for (fd = 0; fd < NUMFDESC; fd++) {
1706 		make_dev(&fildesc_cdevsw, fd,
1707 		    UID_BIN, GID_BIN, 0666, "fd/%d", fd);
1708 	}
1709 	make_dev(&fildesc_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "stdin");
1710 	make_dev(&fildesc_cdevsw, 1, UID_ROOT, GID_WHEEL, 0666, "stdout");
1711 	make_dev(&fildesc_cdevsw, 2, UID_ROOT, GID_WHEEL, 0666, "stderr");
1712 }
1713 
1714 struct fileops badfileops = {
1715 	NULL,	/* port */
1716 	NULL,	/* clone */
1717 	badfo_readwrite,
1718 	badfo_readwrite,
1719 	badfo_ioctl,
1720 	badfo_poll,
1721 	badfo_kqfilter,
1722 	badfo_stat,
1723 	badfo_close
1724 };
1725 
1726 static int
1727 badfo_readwrite(
1728 	struct file *fp,
1729 	struct uio *uio,
1730 	struct ucred *cred,
1731 	int flags,
1732 	struct thread *td
1733 ) {
1734 	return (EBADF);
1735 }
1736 
1737 static int
1738 badfo_ioctl(struct file *fp, u_long com, caddr_t data, struct thread *td)
1739 {
1740 	return (EBADF);
1741 }
1742 
1743 static int
1744 badfo_poll(struct file *fp, int events, struct ucred *cred, struct thread *td)
1745 {
1746 	return (0);
1747 }
1748 
1749 static int
1750 badfo_kqfilter(struct file *fp, struct knote *kn)
1751 {
1752 	return (0);
1753 }
1754 
1755 static int
1756 badfo_stat(struct file *fp, struct stat *sb, struct thread *td)
1757 {
1758 	return (EBADF);
1759 }
1760 
1761 static int
1762 badfo_close(struct file *fp, struct thread *td)
1763 {
1764 	return (EBADF);
1765 }
1766 
1767 SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,
1768 					fildesc_drvinit,NULL)
1769