xref: /dragonfly/sys/kern/kern_descrip.c (revision c022ba7f)
1 /*
2  * Copyright (c) 2005-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey Hsu and Matthew Dillon.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *
35  * Copyright (c) 1982, 1986, 1989, 1991, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  * (c) UNIX System Laboratories, Inc.
38  * All or some portions of this file are derived from material licensed
39  * to the University of California by American Telephone and Telegraph
40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41  * the permission of UNIX System Laboratories, Inc.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)kern_descrip.c	8.6 (Berkeley) 4/19/94
68  * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
74 #include <sys/sysmsg.h>
75 #include <sys/conf.h>
76 #include <sys/device.h>
77 #include <sys/file.h>
78 #include <sys/filedesc.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81 #include <sys/vnode.h>
82 #include <sys/proc.h>
83 #include <sys/nlookup.h>
84 #include <sys/stat.h>
85 #include <sys/filio.h>
86 #include <sys/fcntl.h>
87 #include <sys/unistd.h>
88 #include <sys/resourcevar.h>
89 #include <sys/event.h>
90 #include <sys/kern_syscall.h>
91 #include <sys/kcore.h>
92 #include <sys/kinfo.h>
93 #include <sys/un.h>
94 #include <sys/objcache.h>
95 
96 #include <vm/vm.h>
97 #include <vm/vm_extern.h>
98 
99 #include <sys/file2.h>
100 #include <sys/spinlock2.h>
101 
102 static int fdalloc_locked(struct proc *p, struct filedesc *fdp,
103 			int want, int *result);
104 static void fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd);
105 static void fdreserve_locked (struct filedesc *fdp, int fd0, int incr);
106 static struct file *funsetfd_locked (struct filedesc *fdp, int fd);
107 static void ffree(struct file *fp);
108 
109 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
110 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader",
111 			"file desc to leader structures");
112 static MALLOC_DEFINE_OBJ(M_FILE, sizeof(struct file),
113 			"file", "Open file structure");
114 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
115 
116 static struct krate krate_uidinfo = { .freq = 1 };
117 
118 static	 d_open_t  fdopen;
119 #define NUMFDESC 64
120 
121 #define CDEV_MAJOR 22
122 static struct dev_ops fildesc_ops = {
123 	{ "FD", 0, 0 },
124 	.d_open =	fdopen,
125 };
126 
127 /*
128  * Descriptor management.
129  */
130 #ifndef NFILELIST_HEADS
131 #define NFILELIST_HEADS		257	/* primary number */
132 #endif
133 
134 struct filelist_head {
135 	struct spinlock		spin;
136 	struct filelist		list;
137 } __cachealign;
138 
139 static struct filelist_head	filelist_heads[NFILELIST_HEADS];
140 
141 static int nfiles;		/* actual number of open files */
142 extern int cmask;
143 
144 struct lwkt_token revoke_token = LWKT_TOKEN_INITIALIZER(revoke_token);
145 
146 /*
147  * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared.
148  *
149  * must be called with fdp->fd_spin exclusively held
150  */
151 static __inline
152 void
fdfixup_locked(struct filedesc * fdp,int fd)153 fdfixup_locked(struct filedesc *fdp, int fd)
154 {
155 	if (fd < fdp->fd_freefile) {
156 	       fdp->fd_freefile = fd;
157 	}
158 	while (fdp->fd_lastfile >= 0 &&
159 	       fdp->fd_files[fdp->fd_lastfile].fp == NULL &&
160 	       fdp->fd_files[fdp->fd_lastfile].reserved == 0
161 	) {
162 		--fdp->fd_lastfile;
163 	}
164 }
165 
166 /*
167  * Clear the fd thread caches for this fdnode.
168  *
169  * If match_fdc is NULL, all thread caches of fdn will be cleared.
170  * The caller must hold fdp->fd_spin exclusively.  The threads caching
171  * the descriptor do not have to be the current thread.  The (status)
172  * argument is ignored.
173  *
174  * If match_fdc is not NULL, only the match_fdc's cache will be cleared.
175  * The caller must hold fdp->fd_spin shared and match_fdc must match a
176  * fdcache entry in curthread.  match_fdc has been locked by the caller
177  * and had the specified (status).
178  *
179  * Since we are matching against a fp in the fdp (which must still be present
180  * at this time), fp will have at least two refs on any match and we can
181  * decrement the count trivially.
182  */
183 static
184 void
fclearcache(struct fdnode * fdn,struct fdcache * match_fdc,int status)185 fclearcache(struct fdnode *fdn, struct fdcache *match_fdc, int status)
186 {
187 	struct fdcache *fdc;
188 	struct file *fp;
189 	int i;
190 
191 	/*
192 	 * match_fdc == NULL	We are cleaning out all tdcache entries
193 	 *			for the fdn and hold fdp->fd_spin exclusively.
194 	 *			This can race against the target threads
195 	 *			cleaning out specific entries.
196 	 *
197 	 * match_fdc != NULL	We are cleaning out a specific tdcache
198 	 *			entry on behalf of the owning thread
199 	 *			and hold fdp->fd_spin shared.  The thread
200 	 *			has already locked the entry.  This cannot
201 	 *			race.
202 	 */
203 	fp = fdn->fp;
204 	for (i = 0; i < NTDCACHEFD; ++i) {
205 		if ((fdc = fdn->tdcache[i]) == NULL)
206 			continue;
207 
208 		/*
209 		 * If match_fdc is non-NULL we are being asked to
210 		 * clear a specific fdc owned by curthread.  There must
211 		 * be exactly one match.  The caller has already locked
212 		 * the cache entry and will dispose of the lock after
213 		 * we return.
214 		 *
215 		 * Since we also have a shared lock on fdp, we
216 		 * can do this without atomic ops.
217 		 */
218 		if (match_fdc) {
219 			if (fdc != match_fdc)
220 				continue;
221 			fdn->tdcache[i] = NULL;
222 			KASSERT(fp == fdc->fp,
223 				("fclearcache(1): fp mismatch %p/%p\n",
224 				fp, fdc->fp));
225 			fdc->fp = NULL;
226 			fdc->fd = -1;
227 
228 			/*
229 			 * status can be 0 or 2.  If 2 the ref is borrowed,
230 			 * if 0 the ref is not borrowed and we have to drop
231 			 * it.
232 			 */
233 			if (status == 0)
234 				atomic_add_int(&fp->f_count, -1);
235 			fdn->isfull = 0;	/* heuristic */
236 			return;
237 		}
238 
239 		/*
240 		 * Otherwise we hold an exclusive spin-lock and can only
241 		 * race thread consumers borrowing cache entries.
242 		 *
243 		 * Acquire the lock and dispose of the entry.  We have to
244 		 * spin until we get the lock.
245 		 */
246 		for (;;) {
247 			status = atomic_swap_int(&fdc->locked, 1);
248 			if (status == 1) {	/* foreign lock, retry */
249 				cpu_pause();
250 				continue;
251 			}
252 			fdn->tdcache[i] = NULL;
253 			KASSERT(fp == fdc->fp,
254 				("fclearcache(2): fp mismatch %p/%p\n",
255 				fp, fdc->fp));
256 			fdc->fp = NULL;
257 			fdc->fd = -1;
258 			if (status == 0)
259 				atomic_add_int(&fp->f_count, -1);
260 			fdn->isfull = 0;	/* heuristic */
261 			atomic_swap_int(&fdc->locked, 0);
262 			break;
263 		}
264 	}
265 	KKASSERT(match_fdc == NULL);
266 }
267 
268 /*
269  * Retrieve the fp for the specified fd given the specified file descriptor
270  * table.  The fdp does not have to be owned by the current process.
271  * If flags != -1, fp->f_flag must contain at least one of the flags.
272  *
273  * This function is not able to cache the fp.
274  */
275 struct file *
holdfp_fdp(struct filedesc * fdp,int fd,int flag)276 holdfp_fdp(struct filedesc *fdp, int fd, int flag)
277 {
278 	struct file *fp;
279 
280 	spin_lock_shared(&fdp->fd_spin);
281 	if (((u_int)fd) < fdp->fd_nfiles) {
282 		fp = fdp->fd_files[fd].fp;	/* can be NULL */
283 		if (fp) {
284 			if ((fp->f_flag & flag) == 0 && flag != -1) {
285 				fp = NULL;
286 			} else {
287 				fhold(fp);
288 			}
289 		}
290 	} else {
291 		fp = NULL;
292 	}
293 	spin_unlock_shared(&fdp->fd_spin);
294 
295 	return fp;
296 }
297 
298 struct file *
holdfp_fdp_locked(struct filedesc * fdp,int fd,int flag)299 holdfp_fdp_locked(struct filedesc *fdp, int fd, int flag)
300 {
301 	struct file *fp;
302 
303 	if (((u_int)fd) < fdp->fd_nfiles) {
304 		fp = fdp->fd_files[fd].fp;	/* can be NULL */
305 		if (fp) {
306 			if ((fp->f_flag & flag) == 0 && flag != -1) {
307 				fp = NULL;
308 			} else {
309 				fhold(fp);
310 			}
311 		}
312 	} else {
313 		fp = NULL;
314 	}
315 	return fp;
316 }
317 
318 /*
319  * Acquire the fp for the specified file descriptor, using the thread
320  * cache if possible and caching it if possible.
321  *
322  * td must be the curren thread.
323  */
324 static
325 struct file *
_holdfp_cache(thread_t td,int fd)326 _holdfp_cache(thread_t td, int fd)
327 {
328 	struct filedesc *fdp;
329 	struct fdcache *fdc;
330 	struct fdcache *best;
331 	struct fdnode *fdn;
332 	struct file *fp;
333 	int status;
334 	int delta;
335 	int i;
336 
337 	/*
338 	 * Fast
339 	 */
340 	for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) {
341 		if (fdc->fd != fd || fdc->fp == NULL)
342 			continue;
343 		status = atomic_swap_int(&fdc->locked, 1);
344 
345 		/*
346 		 * If someone else has locked our cache entry they are in
347 		 * the middle of clearing it, skip the entry.
348 		 */
349 		if (status == 1)
350 			continue;
351 
352 		/*
353 		 * We have locked the entry, but if it no longer matches
354 		 * restore the previous state (0 or 2) and skip the entry.
355 		 */
356 		if (fdc->fd != fd || fdc->fp == NULL) {
357 			atomic_swap_int(&fdc->locked, status);
358 			continue;
359 		}
360 
361 		/*
362 		 * We have locked a valid entry.  We can borrow the ref
363 		 * for a mode 0 entry.  We can get a valid fp for a mode
364 		 * 2 entry but not borrow the ref.
365 		 */
366 		if (status == 0) {
367 			fp = fdc->fp;
368 			fdc->lru = ++td->td_fdcache_lru;
369 			atomic_swap_int(&fdc->locked, 2);
370 
371 			return fp;
372 		}
373 		if (status == 2) {
374 			fp = fdc->fp;
375 			fhold(fp);
376 			fdc->lru = ++td->td_fdcache_lru;
377 			atomic_swap_int(&fdc->locked, 2);
378 
379 			return fp;
380 		}
381 		KKASSERT(0);
382 	}
383 
384 	/*
385 	 * Lookup the descriptor the slow way.  This can contend against
386 	 * modifying operations in a multi-threaded environment and cause
387 	 * cache line ping ponging otherwise.
388 	 */
389 	fdp = td->td_proc->p_fd;
390 	spin_lock_shared(&fdp->fd_spin);
391 
392 	if (((u_int)fd) < fdp->fd_nfiles) {
393 		fp = fdp->fd_files[fd].fp;	/* can be NULL */
394 		if (fp) {
395 			fhold(fp);
396 			if (fdp->fd_files[fd].isfull == 0)
397 				goto enter;
398 		}
399 	} else {
400 		fp = NULL;
401 	}
402 	spin_unlock_shared(&fdp->fd_spin);
403 
404 	return fp;
405 
406 	/*
407 	 * We found a valid fp and held it, fdp is still shared locked.
408 	 * Enter the fp into the per-thread cache.  Find the oldest entry
409 	 * via lru, or an empty entry.
410 	 *
411 	 * Because fdp's spinlock is held (shared is fine), no other
412 	 * thread should be in the middle of clearing our selected entry.
413 	 */
414 enter:
415 	best = &td->td_fdcache[0];
416 	for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) {
417 		if (fdc->fp == NULL) {
418 			best = fdc;
419 			break;
420 		}
421 		delta = fdc->lru - best->lru;
422 		if (delta < 0)
423 			best = fdc;
424 	}
425 
426 	/*
427 	 * Replace best
428 	 *
429 	 * Don't enter into the cache if we cannot get the lock.
430 	 */
431 	status = atomic_swap_int(&best->locked, 1);
432 	if (status == 1)
433 		goto done;
434 
435 	/*
436 	 * Clear the previous cache entry if present
437 	 */
438 	if (best->fp) {
439 		KKASSERT(best->fd >= 0);
440 		fclearcache(&fdp->fd_files[best->fd], best, status);
441 	}
442 
443 	/*
444 	 * Create our new cache entry.  This entry is 'safe' until we tie
445 	 * into the fdnode.  If we cannot tie in, we will clear the entry.
446 	 */
447 	best->fd = fd;
448 	best->fp = fp;
449 	best->lru = ++td->td_fdcache_lru;
450 	best->locked = 2;			/* borrowed ref */
451 
452 	fdn = &fdp->fd_files[fd];
453 	for (i = 0; i < NTDCACHEFD; ++i) {
454 		if (fdn->tdcache[i] == NULL &&
455 		    atomic_cmpset_ptr((void **)&fdn->tdcache[i], NULL, best)) {
456 			goto done;
457 		}
458 	}
459 	fdn->isfull = 1;			/* no space */
460 	best->fd = -1;
461 	best->fp = NULL;
462 	best->locked = 0;
463 done:
464 	spin_unlock_shared(&fdp->fd_spin);
465 
466 	return fp;
467 }
468 
469 /*
470  * holdfp(), bypassing the cache in order to also be able to return
471  * the descriptor flags.  A bit of a hack.
472  */
473 static
474 struct file *
_holdfp2(thread_t td,int fd,char * fflagsp)475 _holdfp2(thread_t td, int fd, char *fflagsp)
476 {
477 	struct filedesc *fdp;
478 	struct file *fp;
479 
480 	/*
481 	 * Lookup the descriptor the slow way.  This can contend against
482 	 * modifying operations in a multi-threaded environment and cause
483 	 * cache line ping ponging otherwise.
484 	 */
485 	fdp = td->td_proc->p_fd;
486 	spin_lock_shared(&fdp->fd_spin);
487 
488 	if (((u_int)fd) < fdp->fd_nfiles) {
489 		fp = fdp->fd_files[fd].fp;	/* can be NULL */
490 		if (fp) {
491 			*fflagsp = fdp->fd_files[fd].fileflags;
492 			fhold(fp);
493 		}
494 	} else {
495 		fp = NULL;
496 	}
497 	spin_unlock_shared(&fdp->fd_spin);
498 
499 	return fp;
500 }
501 
502 
503 /*
504  * Drop the file pointer and return to the thread cache if possible.
505  *
506  * Caller must not hold fdp's spin lock.
507  * td must be the current thread.
508  */
509 void
dropfp(thread_t td,int fd,struct file * fp)510 dropfp(thread_t td, int fd, struct file *fp)
511 {
512 	struct filedesc *fdp;
513 	struct fdcache *fdc;
514 	int status;
515 
516 	fdp = td->td_proc->p_fd;
517 
518 	/*
519 	 * If our placeholder is still present we can re-cache the ref.
520 	 *
521 	 * Note that we can race an fclearcache().
522 	 */
523 	for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) {
524 		if (fdc->fp != fp || fdc->fd != fd)
525 			continue;
526 		status = atomic_swap_int(&fdc->locked, 1);
527 		switch(status) {
528 		case 0:
529 			/*
530 			 * Not in mode 2, fdrop fp without caching.
531 			 */
532 			atomic_swap_int(&fdc->locked, 0);
533 			break;
534 		case 1:
535 			/*
536 			 * Not in mode 2, locked by someone else.
537 			 * fdrop fp without caching.
538 			 */
539 			break;
540 		case 2:
541 			/*
542 			 * Intact borrowed ref, return to mode 0
543 			 * indicating that we have returned the ref.
544 			 *
545 			 * Return the borrowed ref (2->1->0)
546 			 */
547 			if (fdc->fp == fp && fdc->fd == fd) {
548 				atomic_swap_int(&fdc->locked, 0);
549 				return;
550 			}
551 			atomic_swap_int(&fdc->locked, 2);
552 			break;
553 		}
554 	}
555 
556 	/*
557 	 * Failed to re-cache, drop the fp without caching.
558 	 */
559 	fdrop(fp);
560 }
561 
562 /*
563  * Clear all descriptors cached in the per-thread fd cache for
564  * the specified thread.
565  *
566  * Caller must not hold p_fd->spin.  This function will temporarily
567  * obtain a shared spin lock.
568  */
569 void
fexitcache(thread_t td)570 fexitcache(thread_t td)
571 {
572 	struct filedesc *fdp;
573 	struct fdcache *fdc;
574 	int status;
575 	int i;
576 
577 	if (td->td_proc == NULL)
578 		return;
579 	fdp = td->td_proc->p_fd;
580 	if (fdp == NULL)
581 		return;
582 
583 	/*
584 	 * A shared lock is sufficient as the caller controls td and we
585 	 * are only clearing td's cache.
586 	 */
587 	spin_lock_shared(&fdp->fd_spin);
588 	for (i = 0; i < NFDCACHE; ++i) {
589 		fdc = &td->td_fdcache[i];
590 		if (fdc->fp) {
591 			status = atomic_swap_int(&fdc->locked, 1);
592 			if (status == 1) {
593 				cpu_pause();
594 				--i;
595 				continue;
596 			}
597 			if (fdc->fp) {
598 				KKASSERT(fdc->fd >= 0);
599 				fclearcache(&fdp->fd_files[fdc->fd], fdc,
600 					    status);
601 			}
602 			atomic_swap_int(&fdc->locked, 0);
603 		}
604 	}
605 	spin_unlock_shared(&fdp->fd_spin);
606 }
607 
608 static __inline struct filelist_head *
fp2filelist(const struct file * fp)609 fp2filelist(const struct file *fp)
610 {
611 	u_int i;
612 
613 	i = (u_int)(uintptr_t)fp % NFILELIST_HEADS;
614 	return &filelist_heads[i];
615 }
616 
617 static __inline
618 struct plimit *
readplimits(struct proc * p)619 readplimits(struct proc *p)
620 {
621 	thread_t td = curthread;
622 	struct plimit *limit;
623 
624 	limit = td->td_limit;
625 	if (limit != p->p_limit) {
626 		spin_lock_shared(&p->p_spin);
627 		limit = p->p_limit;
628 		atomic_add_int(&limit->p_refcnt, 1);
629 		spin_unlock_shared(&p->p_spin);
630 		if (td->td_limit)
631 			plimit_free(td->td_limit);
632 		td->td_limit = limit;
633 	}
634 	return limit;
635 }
636 
637 /*
638  * System calls on descriptors.
639  */
640 int
sys_getdtablesize(struct sysmsg * sysmsg,const struct getdtablesize_args * uap)641 sys_getdtablesize(struct sysmsg *sysmsg, const struct getdtablesize_args *uap)
642 {
643 	struct proc *p = curproc;
644 	struct plimit *limit = readplimits(p);
645 	int dtsize;
646 
647 	if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
648 		dtsize = INT_MAX;
649 	else
650 		dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur;
651 
652 	if (dtsize > maxfilesperproc)
653 		dtsize = maxfilesperproc;
654 	if (dtsize < minfilesperproc)
655 		dtsize = minfilesperproc;
656 	if (p->p_ucred->cr_uid && dtsize > maxfilesperuser)
657 		dtsize = maxfilesperuser;
658 	sysmsg->sysmsg_result = dtsize;
659 	return (0);
660 }
661 
662 /*
663  * Duplicate a file descriptor to a particular value.
664  *
665  * note: keep in mind that a potential race condition exists when closing
666  * descriptors from a shared descriptor table (via rfork).
667  */
668 int
sys_dup2(struct sysmsg * sysmsg,const struct dup2_args * uap)669 sys_dup2(struct sysmsg *sysmsg, const struct dup2_args *uap)
670 {
671 	int error;
672 	int fd = 0;
673 
674 	error = kern_dup(DUP_FIXED, uap->from, uap->to, &fd);
675 	sysmsg->sysmsg_fds[0] = fd;
676 
677 	return (error);
678 }
679 
680 /*
681  * Duplicate a file descriptor.
682  */
683 int
sys_dup(struct sysmsg * sysmsg,const struct dup_args * uap)684 sys_dup(struct sysmsg *sysmsg, const struct dup_args *uap)
685 {
686 	int error;
687 	int fd = 0;
688 
689 	error = kern_dup(DUP_VARIABLE, uap->fd, 0, &fd);
690 	sysmsg->sysmsg_fds[0] = fd;
691 
692 	return (error);
693 }
694 
695 /*
696  * MPALMOSTSAFE - acquires mplock for fp operations
697  */
698 int
kern_fcntl(int fd,int cmd,union fcntl_dat * dat,struct ucred * cred)699 kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred)
700 {
701 	struct thread *td = curthread;
702 	struct proc *p = td->td_proc;
703 	struct file *fp;
704 	struct vnode *vp;
705 	u_int newmin;
706 	u_int oflags;
707 	u_int nflags;
708 	int closedcounter;
709 	int tmp, error, flg = F_POSIX;
710 
711 	KKASSERT(p);
712 
713 	/*
714 	 * Operations on file descriptors that do not require a file pointer.
715 	 */
716 	switch (cmd) {
717 	case F_GETFD:
718 		error = fgetfdflags(p->p_fd, fd, &tmp);
719 		if (error == 0)
720 			dat->fc_cloexec = (tmp & UF_EXCLOSE) ? FD_CLOEXEC : 0;
721 		return (error);
722 
723 	case F_SETFD:
724 		if (dat->fc_cloexec & FD_CLOEXEC)
725 			error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE);
726 		else
727 			error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE);
728 		return (error);
729 	case F_DUPFD:
730 		newmin = dat->fc_fd;
731 		error = kern_dup(DUP_VARIABLE | DUP_FCNTL, fd, newmin,
732 		    &dat->fc_fd);
733 		return (error);
734 	case F_DUPFD_CLOEXEC:
735 		newmin = dat->fc_fd;
736 		error = kern_dup(DUP_VARIABLE | DUP_CLOEXEC | DUP_FCNTL,
737 		    fd, newmin, &dat->fc_fd);
738 		return (error);
739 	case F_DUP2FD:
740 		newmin = dat->fc_fd;
741 		error = kern_dup(DUP_FIXED, fd, newmin, &dat->fc_fd);
742 		return (error);
743 	case F_DUP2FD_CLOEXEC:
744 		newmin = dat->fc_fd;
745 		error = kern_dup(DUP_FIXED | DUP_CLOEXEC, fd, newmin,
746 				 &dat->fc_fd);
747 		return (error);
748 	default:
749 		break;
750 	}
751 
752 	/*
753 	 * Operations on file pointers
754 	 */
755 	closedcounter = p->p_fd->fd_closedcounter;
756 	if ((fp = holdfp(td, fd, -1)) == NULL)
757 		return (EBADF);
758 
759 	switch (cmd) {
760 	case F_GETFL:
761 		dat->fc_flags = OFLAGS(fp->f_flag);
762 		error = 0;
763 		break;
764 
765 	case F_SETFL:
766 		oflags = fp->f_flag;
767 		nflags = FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS;
768 		nflags |= oflags & ~FCNTLFLAGS;
769 
770 		error = 0;
771 		if (((nflags ^ oflags) & O_APPEND) && (oflags & FAPPENDONLY))
772 			error = EINVAL;
773 		if (error == 0 && ((nflags ^ oflags) & FASYNC)) {
774 			tmp = nflags & FASYNC;
775 			error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp,
776 					 cred, NULL);
777 		}
778 
779 		/*
780 		 * If no error, must be atomically set.
781 		 */
782 		while (error == 0) {
783 			oflags = fp->f_flag;
784 			cpu_ccfence();
785 			nflags = (oflags & ~FCNTLFLAGS) | (nflags & FCNTLFLAGS);
786 			if (atomic_cmpset_int(&fp->f_flag, oflags, nflags))
787 				break;
788 			cpu_pause();
789 		}
790 		break;
791 
792 	case F_GETOWN:
793 		error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner,
794 				 cred, NULL);
795 		break;
796 
797 	case F_SETOWN:
798 		error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner,
799 				 cred, NULL);
800 		break;
801 
802 	case F_SETLKW:
803 		flg |= F_WAIT;
804 		/* Fall into F_SETLK */
805 
806 	case F_SETLK:
807 		if (fp->f_type != DTYPE_VNODE) {
808 			error = EBADF;
809 			break;
810 		}
811 		vp = (struct vnode *)fp->f_data;
812 
813 		/*
814 		 * copyin/lockop may block
815 		 */
816 		if (dat->fc_flock.l_whence == SEEK_CUR)
817 			dat->fc_flock.l_start += fp->f_offset;
818 
819 		switch (dat->fc_flock.l_type) {
820 		case F_RDLCK:
821 			if ((fp->f_flag & FREAD) == 0) {
822 				error = EBADF;
823 				break;
824 			}
825 			if (p->p_leader->p_advlock_flag == 0)
826 				p->p_leader->p_advlock_flag = 1;
827 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
828 					    &dat->fc_flock, flg);
829 			break;
830 		case F_WRLCK:
831 			if ((fp->f_flag & FWRITE) == 0) {
832 				error = EBADF;
833 				break;
834 			}
835 			if (p->p_leader->p_advlock_flag == 0)
836 				p->p_leader->p_advlock_flag = 1;
837 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
838 					    &dat->fc_flock, flg);
839 			break;
840 		case F_UNLCK:
841 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
842 					    &dat->fc_flock, F_POSIX);
843 			break;
844 		default:
845 			error = EINVAL;
846 			break;
847 		}
848 
849 		/*
850 		 * It is possible to race a close() on the descriptor while
851 		 * we were blocked getting the lock.  If this occurs the
852 		 * close might not have caught the lock.
853 		 */
854 		if (checkfdclosed(td, p->p_fd, fd, fp, closedcounter)) {
855 			dat->fc_flock.l_whence = SEEK_SET;
856 			dat->fc_flock.l_start = 0;
857 			dat->fc_flock.l_len = 0;
858 			dat->fc_flock.l_type = F_UNLCK;
859 			VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
860 				    F_UNLCK, &dat->fc_flock, F_POSIX);
861 		}
862 		break;
863 
864 	case F_GETLK:
865 		if (fp->f_type != DTYPE_VNODE) {
866 			error = EBADF;
867 			break;
868 		}
869 		vp = (struct vnode *)fp->f_data;
870 		/*
871 		 * copyin/lockop may block
872 		 */
873 		if (dat->fc_flock.l_type != F_RDLCK &&
874 		    dat->fc_flock.l_type != F_WRLCK &&
875 		    dat->fc_flock.l_type != F_UNLCK) {
876 			error = EINVAL;
877 			break;
878 		}
879 		if (dat->fc_flock.l_whence == SEEK_CUR)
880 			dat->fc_flock.l_start += fp->f_offset;
881 		error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK,
882 				    &dat->fc_flock, F_POSIX);
883 		break;
884 
885 	case F_GETPATH:
886 		if (fp->f_type != DTYPE_VNODE) {
887 			error = EBADF;
888 			break;
889 		}
890 
891 		/*
892 		 * cache_fullpath() itself is limited to MAXPATHLEN so we
893 		 * do not need an explicit length check, but we do have
894 		 * to munge the error to ERANGE as per fcntl.2
895 		 */
896 		error = cache_fullpath(p, &fp->f_nchandle, NULL,
897 				       &dat->fc_path.ptr, &dat->fc_path.buf, 1);
898 		if (error == ENOMEM)
899 			error = ERANGE;
900 		break;
901 
902 	default:
903 		error = EINVAL;
904 		break;
905 	}
906 
907 	fdrop(fp);
908 	return (error);
909 }
910 
911 /*
912  * The file control system call.
913  */
914 int
sys_fcntl(struct sysmsg * sysmsg,const struct fcntl_args * uap)915 sys_fcntl(struct sysmsg *sysmsg, const struct fcntl_args *uap)
916 {
917 	union fcntl_dat dat;
918 	int error;
919 
920 	switch (uap->cmd) {
921 	case F_DUPFD:
922 	case F_DUP2FD:
923 	case F_DUPFD_CLOEXEC:
924 	case F_DUP2FD_CLOEXEC:
925 		dat.fc_fd = uap->arg;
926 		break;
927 	case F_SETFD:
928 		dat.fc_cloexec = uap->arg;
929 		break;
930 	case F_SETFL:
931 		dat.fc_flags = uap->arg;
932 		break;
933 	case F_SETOWN:
934 		dat.fc_owner = uap->arg;
935 		break;
936 	case F_SETLKW:
937 	case F_SETLK:
938 	case F_GETLK:
939 		error = copyin((caddr_t)uap->arg, &dat.fc_flock,
940 			       sizeof(struct flock));
941 		if (error)
942 			return (error);
943 		break;
944 	}
945 
946 	error = kern_fcntl(uap->fd, uap->cmd, &dat, curthread->td_ucred);
947 
948 	if (error == 0) {
949 		switch (uap->cmd) {
950 		case F_DUPFD:
951 		case F_DUP2FD:
952 		case F_DUPFD_CLOEXEC:
953 		case F_DUP2FD_CLOEXEC:
954 			sysmsg->sysmsg_result = dat.fc_fd;
955 			break;
956 		case F_GETFD:
957 			sysmsg->sysmsg_result = dat.fc_cloexec;
958 			break;
959 		case F_GETFL:
960 			sysmsg->sysmsg_result = dat.fc_flags;
961 			break;
962 		case F_GETOWN:
963 			sysmsg->sysmsg_result = dat.fc_owner;
964 			break;
965 		case F_GETLK:
966 			error = copyout(&dat.fc_flock, (caddr_t)uap->arg,
967 			    sizeof(struct flock));
968 			break;
969 		case F_GETPATH:
970 			error = copyout(dat.fc_path.ptr, (caddr_t)uap->arg,
971 					strlen(dat.fc_path.ptr) + 1);
972 			kfree(dat.fc_path.buf, M_TEMP);
973 			break;
974 		}
975 	}
976 
977 	return (error);
978 }
979 
980 /*
981  * Common code for dup, dup2, and fcntl(F_DUPFD).
982  *
983  * There are four type flags: DUP_FCNTL, DUP_FIXED, DUP_VARIABLE, and
984  * DUP_CLOEXEC.
985  *
986  * DUP_FCNTL is for handling EINVAL vs. EBADF differences between
987  * fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC and dup2() (per POSIX).
988  * The next two flags are mutually exclusive, and the fourth is optional.
989  * DUP_FIXED tells kern_dup() to destructively dup over an existing file
990  * descriptor if "new" is already open.  DUP_VARIABLE tells kern_dup()
991  * to find the lowest unused file descriptor that is greater than or
992  * equal to "new".  DUP_CLOEXEC, which works with either of the first
993  * two flags, sets the close-on-exec flag on the "new" file descriptor.
994  */
995 int
kern_dup(int flags,int old,int new,int * res)996 kern_dup(int flags, int old, int new, int *res)
997 {
998 	struct thread *td = curthread;
999 	struct proc *p = td->td_proc;
1000 	struct plimit *limit = readplimits(p);
1001 	struct filedesc *fdp = p->p_fd;
1002 	struct file *fp;
1003 	struct file *delfp;
1004 	int oldflags;
1005 	int holdleaders;
1006 	int dtsize;
1007 	int error, newfd;
1008 
1009 	/*
1010 	 * Verify that we have a valid descriptor to dup from and
1011 	 * possibly to dup to. When the new descriptor is out of
1012 	 * bounds, fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC must
1013 	 * return EINVAL, while dup2() returns EBADF in
1014 	 * this case.
1015 	 *
1016 	 * NOTE: maxfilesperuser is not applicable to dup()
1017 	 */
1018 retry:
1019 	if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
1020 		dtsize = INT_MAX;
1021 	else
1022 		dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur;
1023 	if (dtsize > maxfilesperproc)
1024 		dtsize = maxfilesperproc;
1025 	if (dtsize < minfilesperproc)
1026 		dtsize = minfilesperproc;
1027 
1028 	if (new < 0 || new >= dtsize)
1029 		return (flags & DUP_FCNTL ? EINVAL : EBADF);
1030 
1031 	spin_lock(&fdp->fd_spin);
1032 	if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) {
1033 		spin_unlock(&fdp->fd_spin);
1034 		return (EBADF);
1035 	}
1036 	if ((flags & DUP_FIXED) && old == new) {
1037 		*res = new;
1038 		if (flags & DUP_CLOEXEC)
1039 			fdp->fd_files[new].fileflags |= UF_EXCLOSE;
1040 		spin_unlock(&fdp->fd_spin);
1041 		return (0);
1042 	}
1043 	fp = fdp->fd_files[old].fp;
1044 	oldflags = fdp->fd_files[old].fileflags;
1045 	fhold(fp);
1046 
1047 	/*
1048 	 * Allocate a new descriptor if DUP_VARIABLE, or expand the table
1049 	 * if the requested descriptor is beyond the current table size.
1050 	 *
1051 	 * This can block.  Retry if the source descriptor no longer matches
1052 	 * or if our expectation in the expansion case races.
1053 	 *
1054 	 * If we are not expanding or allocating a new decriptor, then reset
1055 	 * the target descriptor to a reserved state so we have a uniform
1056 	 * setup for the next code block.
1057 	 */
1058 	if ((flags & DUP_VARIABLE) || new >= fdp->fd_nfiles) {
1059 		error = fdalloc_locked(p, fdp, new, &newfd);
1060 		if (error) {
1061 			spin_unlock(&fdp->fd_spin);
1062 			fdrop(fp);
1063 			return (error);
1064 		}
1065 		/*
1066 		 * Check for ripout
1067 		 */
1068 		if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) {
1069 			fsetfd_locked(fdp, NULL, newfd);
1070 			spin_unlock(&fdp->fd_spin);
1071 			fdrop(fp);
1072 			goto retry;
1073 		}
1074 		/*
1075 		 * Check for expansion race
1076 		 */
1077 		if ((flags & DUP_VARIABLE) == 0 && new != newfd) {
1078 			fsetfd_locked(fdp, NULL, newfd);
1079 			spin_unlock(&fdp->fd_spin);
1080 			fdrop(fp);
1081 			goto retry;
1082 		}
1083 		/*
1084 		 * Check for ripout, newfd reused old (this case probably
1085 		 * can't occur).
1086 		 */
1087 		if (old == newfd) {
1088 			fsetfd_locked(fdp, NULL, newfd);
1089 			spin_unlock(&fdp->fd_spin);
1090 			fdrop(fp);
1091 			goto retry;
1092 		}
1093 		new = newfd;
1094 		delfp = NULL;
1095 	} else {
1096 		if (fdp->fd_files[new].reserved) {
1097 			spin_unlock(&fdp->fd_spin);
1098 			fdrop(fp);
1099 			kprintf("Warning: dup(): target descriptor %d is "
1100 				"reserved, waiting for it to be resolved\n",
1101 				new);
1102 			tsleep(fdp, 0, "fdres", hz);
1103 			goto retry;
1104 		}
1105 
1106 		/*
1107 		 * If the target descriptor was never allocated we have
1108 		 * to allocate it.  If it was we have to clean out the
1109 		 * old descriptor.  delfp inherits the ref from the
1110 		 * descriptor table.
1111 		 */
1112 		++fdp->fd_closedcounter;
1113 		fclearcache(&fdp->fd_files[new], NULL, 0);
1114 		++fdp->fd_closedcounter;
1115 		delfp = fdp->fd_files[new].fp;
1116 		fdp->fd_files[new].fp = NULL;
1117 		fdp->fd_files[new].reserved = 1;
1118 		if (delfp == NULL) {
1119 			fdreserve_locked(fdp, new, 1);
1120 			if (new > fdp->fd_lastfile)
1121 				fdp->fd_lastfile = new;
1122 		}
1123 
1124 	}
1125 
1126 	/*
1127 	 * NOTE: still holding an exclusive spinlock
1128 	 */
1129 
1130 	/*
1131 	 * If a descriptor is being overwritten we may hve to tell
1132 	 * fdfree() to sleep to ensure that all relevant process
1133 	 * leaders can be traversed in closef().
1134 	 */
1135 	if (delfp != NULL && p->p_fdtol != NULL) {
1136 		fdp->fd_holdleaderscount++;
1137 		holdleaders = 1;
1138 	} else {
1139 		holdleaders = 0;
1140 	}
1141 	KASSERT(delfp == NULL || (flags & DUP_FIXED),
1142 		("dup() picked an open file"));
1143 
1144 	/*
1145 	 * Duplicate the source descriptor, update lastfile.  If the new
1146 	 * descriptor was not allocated and we aren't replacing an existing
1147 	 * descriptor we have to mark the descriptor as being in use.
1148 	 *
1149 	 * The fd_files[] array inherits fp's hold reference.
1150 	 */
1151 	fsetfd_locked(fdp, fp, new);
1152 	if ((flags & DUP_CLOEXEC) != 0)
1153 		fdp->fd_files[new].fileflags = oldflags | UF_EXCLOSE;
1154 	else
1155 		fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE;
1156 	spin_unlock(&fdp->fd_spin);
1157 	fdrop(fp);
1158 	*res = new;
1159 
1160 	/*
1161 	 * If we dup'd over a valid file, we now own the reference to it
1162 	 * and must dispose of it using closef() semantics (as if a
1163 	 * close() were performed on it).
1164 	 */
1165 	if (delfp) {
1166 		if (SLIST_FIRST(&delfp->f_klist))
1167 			knote_fdclose(delfp, fdp, new);
1168 		closef(delfp, p);
1169 		if (holdleaders) {
1170 			spin_lock(&fdp->fd_spin);
1171 			fdp->fd_holdleaderscount--;
1172 			if (fdp->fd_holdleaderscount == 0 &&
1173 			    fdp->fd_holdleaderswakeup != 0) {
1174 				fdp->fd_holdleaderswakeup = 0;
1175 				spin_unlock(&fdp->fd_spin);
1176 				wakeup(&fdp->fd_holdleaderscount);
1177 			} else {
1178 				spin_unlock(&fdp->fd_spin);
1179 			}
1180 		}
1181 	}
1182 	return (0);
1183 }
1184 
1185 /*
1186  * If sigio is on the list associated with a process or process group,
1187  * disable signalling from the device, remove sigio from the list and
1188  * free sigio.
1189  */
1190 void
funsetown(struct sigio ** sigiop)1191 funsetown(struct sigio **sigiop)
1192 {
1193 	struct pgrp *pgrp;
1194 	struct proc *p;
1195 	struct sigio *sigio;
1196 
1197 	if ((sigio = *sigiop) != NULL) {
1198 		lwkt_gettoken(&sigio_token);	/* protect sigio */
1199 		KKASSERT(sigiop == sigio->sio_myref);
1200 		sigio = *sigiop;
1201 		*sigiop = NULL;
1202 		lwkt_reltoken(&sigio_token);
1203 	}
1204 	if (sigio == NULL)
1205 		return;
1206 
1207 	if (sigio->sio_pgid < 0) {
1208 		pgrp = sigio->sio_pgrp;
1209 		sigio->sio_pgrp = NULL;
1210 		lwkt_gettoken(&pgrp->pg_token);
1211 		SLIST_REMOVE(&pgrp->pg_sigiolst, sigio, sigio, sio_pgsigio);
1212 		lwkt_reltoken(&pgrp->pg_token);
1213 		pgrel(pgrp);
1214 	} else /* if ((*sigiop)->sio_pgid > 0) */ {
1215 		p = sigio->sio_proc;
1216 		sigio->sio_proc = NULL;
1217 		PHOLD(p);
1218 		lwkt_gettoken(&p->p_token);
1219 		SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, sio_pgsigio);
1220 		lwkt_reltoken(&p->p_token);
1221 		PRELE(p);
1222 	}
1223 	crfree(sigio->sio_ucred);
1224 	sigio->sio_ucred = NULL;
1225 	kfree(sigio, M_SIGIO);
1226 }
1227 
1228 /*
1229  * Free a list of sigio structures.  Caller is responsible for ensuring
1230  * that the list is MPSAFE.
1231  */
1232 void
funsetownlst(struct sigiolst * sigiolst)1233 funsetownlst(struct sigiolst *sigiolst)
1234 {
1235 	struct sigio *sigio;
1236 
1237 	while ((sigio = SLIST_FIRST(sigiolst)) != NULL)
1238 		funsetown(sigio->sio_myref);
1239 }
1240 
1241 /*
1242  * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
1243  *
1244  * After permission checking, add a sigio structure to the sigio list for
1245  * the process or process group.
1246  */
1247 int
fsetown(pid_t pgid,struct sigio ** sigiop)1248 fsetown(pid_t pgid, struct sigio **sigiop)
1249 {
1250 	struct proc *proc = NULL;
1251 	struct pgrp *pgrp = NULL;
1252 	struct sigio *sigio;
1253 	int error;
1254 
1255 	if (pgid == 0) {
1256 		funsetown(sigiop);
1257 		return (0);
1258 	}
1259 
1260 	if (pgid > 0) {
1261 		proc = pfind(pgid);
1262 		if (proc == NULL) {
1263 			error = ESRCH;
1264 			goto done;
1265 		}
1266 
1267 		/*
1268 		 * Policy - Don't allow a process to FSETOWN a process
1269 		 * in another session.
1270 		 *
1271 		 * Remove this test to allow maximum flexibility or
1272 		 * restrict FSETOWN to the current process or process
1273 		 * group for maximum safety.
1274 		 */
1275 		if (proc->p_session != curproc->p_session) {
1276 			error = EPERM;
1277 			goto done;
1278 		}
1279 	} else /* if (pgid < 0) */ {
1280 		pgrp = pgfind(-pgid);
1281 		if (pgrp == NULL) {
1282 			error = ESRCH;
1283 			goto done;
1284 		}
1285 
1286 		/*
1287 		 * Policy - Don't allow a process to FSETOWN a process
1288 		 * in another session.
1289 		 *
1290 		 * Remove this test to allow maximum flexibility or
1291 		 * restrict FSETOWN to the current process or process
1292 		 * group for maximum safety.
1293 		 */
1294 		if (pgrp->pg_session != curproc->p_session) {
1295 			error = EPERM;
1296 			goto done;
1297 		}
1298 	}
1299 	sigio = kmalloc(sizeof(struct sigio), M_SIGIO, M_WAITOK | M_ZERO);
1300 	if (pgid > 0) {
1301 		KKASSERT(pgrp == NULL);
1302 		lwkt_gettoken(&proc->p_token);
1303 		SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
1304 		sigio->sio_proc = proc;
1305 		lwkt_reltoken(&proc->p_token);
1306 	} else {
1307 		KKASSERT(proc == NULL);
1308 		lwkt_gettoken(&pgrp->pg_token);
1309 		SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
1310 		sigio->sio_pgrp = pgrp;
1311 		lwkt_reltoken(&pgrp->pg_token);
1312 		pgrp = NULL;
1313 	}
1314 	sigio->sio_pgid = pgid;
1315 	sigio->sio_ucred = crhold(curthread->td_ucred);
1316 	/* It would be convenient if p_ruid was in ucred. */
1317 	sigio->sio_ruid = sigio->sio_ucred->cr_ruid;
1318 	sigio->sio_myref = sigiop;
1319 
1320 	lwkt_gettoken(&sigio_token);
1321 	while (*sigiop)
1322 		funsetown(sigiop);
1323 	*sigiop = sigio;
1324 	lwkt_reltoken(&sigio_token);
1325 	error = 0;
1326 done:
1327 	if (pgrp)
1328 		pgrel(pgrp);
1329 	if (proc)
1330 		PRELE(proc);
1331 	return (error);
1332 }
1333 
1334 /*
1335  * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
1336  */
1337 pid_t
fgetown(struct sigio ** sigiop)1338 fgetown(struct sigio **sigiop)
1339 {
1340 	struct sigio *sigio;
1341 	pid_t own;
1342 
1343 	lwkt_gettoken_shared(&sigio_token);
1344 	sigio = *sigiop;
1345 	own = (sigio != NULL ? sigio->sio_pgid : 0);
1346 	lwkt_reltoken(&sigio_token);
1347 
1348 	return (own);
1349 }
1350 
1351 /*
1352  * Close many file descriptors.
1353  */
1354 int
sys_closefrom(struct sysmsg * sysmsg,const struct closefrom_args * uap)1355 sys_closefrom(struct sysmsg *sysmsg, const struct closefrom_args *uap)
1356 {
1357 	return(kern_closefrom(uap->fd));
1358 }
1359 
1360 /*
1361  * Close all file descriptors greater then or equal to fd
1362  */
1363 int
kern_closefrom(int fd)1364 kern_closefrom(int fd)
1365 {
1366 	struct thread *td = curthread;
1367 	struct proc *p = td->td_proc;
1368 	struct filedesc *fdp;
1369 	int error;
1370 	int e2;
1371 
1372 	KKASSERT(p);
1373 	fdp = p->p_fd;
1374 
1375 	if (fd < 0)
1376 		return (EINVAL);
1377 
1378 	/*
1379 	 * NOTE: This function will skip unassociated descriptors and
1380 	 *	 reserved descriptors that have not yet been assigned.
1381 	 *	 fd_lastfile can change as a side effect of kern_close().
1382 	 *
1383 	 * NOTE: We accumulate EINTR errors and return EINTR if any
1384 	 *	 close() returned EINTR.  However, the descriptor is
1385 	 *	 still closed and we do not break out of the loop.
1386 	 */
1387 	error = 0;
1388 	spin_lock(&fdp->fd_spin);
1389 	while (fd <= fdp->fd_lastfile) {
1390 		if (fdp->fd_files[fd].fp != NULL) {
1391 			spin_unlock(&fdp->fd_spin);
1392 			/* ok if this races another close */
1393 			e2 = kern_close(fd);
1394 			if (e2 == EINTR)
1395 				error = EINTR;
1396 			spin_lock(&fdp->fd_spin);
1397 		}
1398 		++fd;
1399 	}
1400 	spin_unlock(&fdp->fd_spin);
1401 
1402 	return error;
1403 }
1404 
1405 /*
1406  * Close a file descriptor.
1407  */
1408 int
sys_close(struct sysmsg * sysmsg,const struct close_args * uap)1409 sys_close(struct sysmsg *sysmsg, const struct close_args *uap)
1410 {
1411 	return(kern_close(uap->fd));
1412 }
1413 
1414 /*
1415  * close() helper
1416  */
1417 int
kern_close(int fd)1418 kern_close(int fd)
1419 {
1420 	struct thread *td = curthread;
1421 	struct proc *p = td->td_proc;
1422 	struct filedesc *fdp;
1423 	struct file *fp;
1424 	int error;
1425 	int holdleaders;
1426 
1427 	KKASSERT(p);
1428 	fdp = p->p_fd;
1429 
1430 	/*
1431 	 * funsetfd*() also clears the fd cache
1432 	 */
1433 	spin_lock(&fdp->fd_spin);
1434 	if ((fp = funsetfd_locked(fdp, fd)) == NULL) {
1435 		spin_unlock(&fdp->fd_spin);
1436 		return (EBADF);
1437 	}
1438 	holdleaders = 0;
1439 	if (p->p_fdtol != NULL) {
1440 		/*
1441 		 * Ask fdfree() to sleep to ensure that all relevant
1442 		 * process leaders can be traversed in closef().
1443 		 */
1444 		fdp->fd_holdleaderscount++;
1445 		holdleaders = 1;
1446 	}
1447 
1448 	/*
1449 	 * we now hold the fp reference that used to be owned by the descriptor
1450 	 * array.
1451 	 */
1452 	spin_unlock(&fdp->fd_spin);
1453 	if (SLIST_FIRST(&fp->f_klist))
1454 		knote_fdclose(fp, fdp, fd);
1455 	error = closef(fp, p);
1456 	if (holdleaders) {
1457 		spin_lock(&fdp->fd_spin);
1458 		fdp->fd_holdleaderscount--;
1459 		if (fdp->fd_holdleaderscount == 0 &&
1460 		    fdp->fd_holdleaderswakeup != 0) {
1461 			fdp->fd_holdleaderswakeup = 0;
1462 			spin_unlock(&fdp->fd_spin);
1463 			wakeup(&fdp->fd_holdleaderscount);
1464 		} else {
1465 			spin_unlock(&fdp->fd_spin);
1466 		}
1467 	}
1468 	return (error);
1469 }
1470 
1471 /*
1472  * shutdown_args(int fd, int how)
1473  */
1474 int
kern_shutdown(int fd,int how)1475 kern_shutdown(int fd, int how)
1476 {
1477 	struct thread *td = curthread;
1478 	struct file *fp;
1479 	int error;
1480 
1481 	if ((fp = holdfp(td, fd, -1)) == NULL)
1482 		return (EBADF);
1483 	error = fo_shutdown(fp, how);
1484 	fdrop(fp);
1485 
1486 	return (error);
1487 }
1488 
1489 /*
1490  * MPALMOSTSAFE
1491  */
1492 int
sys_shutdown(struct sysmsg * sysmsg,const struct shutdown_args * uap)1493 sys_shutdown(struct sysmsg *sysmsg, const struct shutdown_args *uap)
1494 {
1495 	int error;
1496 
1497 	error = kern_shutdown(uap->s, uap->how);
1498 
1499 	return (error);
1500 }
1501 
1502 /*
1503  * fstat() helper
1504  */
1505 int
kern_fstat(int fd,struct stat * ub)1506 kern_fstat(int fd, struct stat *ub)
1507 {
1508 	struct thread *td = curthread;
1509 	struct file *fp;
1510 	int error;
1511 
1512 	if ((fp = holdfp(td, fd, -1)) == NULL)
1513 		return (EBADF);
1514 	error = fo_stat(fp, ub, td->td_ucred);
1515 	fdrop(fp);
1516 
1517 	return (error);
1518 }
1519 
1520 /*
1521  * Return status information about a file descriptor.
1522  */
1523 int
sys_fstat(struct sysmsg * sysmsg,const struct fstat_args * uap)1524 sys_fstat(struct sysmsg *sysmsg, const struct fstat_args *uap)
1525 {
1526 	struct stat st;
1527 	int error;
1528 
1529 	error = kern_fstat(uap->fd, &st);
1530 
1531 	if (error == 0)
1532 		error = copyout(&st, uap->sb, sizeof(st));
1533 	return (error);
1534 }
1535 
1536 /*
1537  * Return pathconf information about a file descriptor.
1538  *
1539  * MPALMOSTSAFE
1540  */
1541 int
sys_fpathconf(struct sysmsg * sysmsg,const struct fpathconf_args * uap)1542 sys_fpathconf(struct sysmsg *sysmsg, const struct fpathconf_args *uap)
1543 {
1544 	struct thread *td = curthread;
1545 	struct file *fp;
1546 	struct vnode *vp;
1547 	int error = 0;
1548 
1549 	if ((fp = holdfp(td, uap->fd, -1)) == NULL)
1550 		return (EBADF);
1551 
1552 	switch (fp->f_type) {
1553 	case DTYPE_PIPE:
1554 	case DTYPE_SOCKET:
1555 		if (uap->name != _PC_PIPE_BUF) {
1556 			error = EINVAL;
1557 		} else {
1558 			sysmsg->sysmsg_result = PIPE_BUF;
1559 			error = 0;
1560 		}
1561 		break;
1562 	case DTYPE_FIFO:
1563 	case DTYPE_VNODE:
1564 		vp = (struct vnode *)fp->f_data;
1565 		error = VOP_PATHCONF(vp, uap->name, &sysmsg->sysmsg_reg);
1566 		break;
1567 	default:
1568 		error = EOPNOTSUPP;
1569 		break;
1570 	}
1571 	fdrop(fp);
1572 	return(error);
1573 }
1574 
1575 /*
1576  * Grow the file table so it can hold through descriptor (want).
1577  *
1578  * The fdp's spinlock must be held exclusively on entry and may be held
1579  * exclusively on return.  The spinlock may be cycled by the routine.
1580  */
1581 static void
fdgrow_locked(struct filedesc * fdp,int want)1582 fdgrow_locked(struct filedesc *fdp, int want)
1583 {
1584 	struct fdnode *newfiles;
1585 	struct fdnode *oldfiles;
1586 	int nf, extra;
1587 
1588 	nf = fdp->fd_nfiles;
1589 	do {
1590 		/* nf has to be of the form 2^n - 1 */
1591 		nf = 2 * nf + 1;
1592 	} while (nf <= want);
1593 
1594 	spin_unlock(&fdp->fd_spin);
1595 	newfiles = kmalloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK);
1596 	spin_lock(&fdp->fd_spin);
1597 
1598 	/*
1599 	 * We could have raced another extend while we were not holding
1600 	 * the spinlock.
1601 	 */
1602 	if (fdp->fd_nfiles >= nf) {
1603 		spin_unlock(&fdp->fd_spin);
1604 		kfree(newfiles, M_FILEDESC);
1605 		spin_lock(&fdp->fd_spin);
1606 		return;
1607 	}
1608 	/*
1609 	 * Copy the existing ofile and ofileflags arrays
1610 	 * and zero the new portion of each array.
1611 	 */
1612 	extra = nf - fdp->fd_nfiles;
1613 	bcopy(fdp->fd_files, newfiles, fdp->fd_nfiles * sizeof(struct fdnode));
1614 	bzero(&newfiles[fdp->fd_nfiles], extra * sizeof(struct fdnode));
1615 
1616 	oldfiles = fdp->fd_files;
1617 	fdp->fd_files = newfiles;
1618 	fdp->fd_nfiles = nf;
1619 
1620 	if (oldfiles != fdp->fd_builtin_files) {
1621 		spin_unlock(&fdp->fd_spin);
1622 		kfree(oldfiles, M_FILEDESC);
1623 		spin_lock(&fdp->fd_spin);
1624 	}
1625 }
1626 
1627 /*
1628  * Number of nodes in right subtree, including the root.
1629  */
1630 static __inline int
right_subtree_size(int n)1631 right_subtree_size(int n)
1632 {
1633 	return (n ^ (n | (n + 1)));
1634 }
1635 
1636 /*
1637  * Bigger ancestor.
1638  */
1639 static __inline int
right_ancestor(int n)1640 right_ancestor(int n)
1641 {
1642 	return (n | (n + 1));
1643 }
1644 
1645 /*
1646  * Smaller ancestor.
1647  */
1648 static __inline int
left_ancestor(int n)1649 left_ancestor(int n)
1650 {
1651 	return ((n & (n + 1)) - 1);
1652 }
1653 
1654 /*
1655  * Traverse the in-place binary tree buttom-up adjusting the allocation
1656  * count so scans can determine where free descriptors are located.
1657  *
1658  * caller must be holding an exclusive spinlock on fdp
1659  */
1660 static
1661 void
fdreserve_locked(struct filedesc * fdp,int fd,int incr)1662 fdreserve_locked(struct filedesc *fdp, int fd, int incr)
1663 {
1664 	while (fd >= 0) {
1665 		fdp->fd_files[fd].allocated += incr;
1666 		KKASSERT(fdp->fd_files[fd].allocated >= 0);
1667 		fd = left_ancestor(fd);
1668 	}
1669 }
1670 
1671 /*
1672  * Reserve a file descriptor for the process.  If no error occurs, the
1673  * caller MUST at some point call fsetfd() or assign a file pointer
1674  * or dispose of the reservation.
1675  */
1676 static
1677 int
fdalloc_locked(struct proc * p,struct filedesc * fdp,int want,int * result)1678 fdalloc_locked(struct proc *p, struct filedesc *fdp, int want, int *result)
1679 {
1680 	struct plimit *limit = readplimits(p);
1681 	struct uidinfo *uip;
1682 	int fd, rsize, rsum, node, lim;
1683 
1684 	/*
1685 	 * Check dtable size limit
1686 	 */
1687 	*result = -1;	/* avoid gcc warnings */
1688 	if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
1689 		lim = INT_MAX;
1690 	else
1691 		lim = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur;
1692 
1693 	if (lim > maxfilesperproc)
1694 		lim = maxfilesperproc;
1695 	if (lim < minfilesperproc)
1696 		lim = minfilesperproc;
1697 	if (want >= lim)
1698 		return (EINVAL);
1699 
1700 	/*
1701 	 * Check that the user has not run out of descriptors (non-root only).
1702 	 * As a safety measure the dtable is allowed to have at least
1703 	 * minfilesperproc open fds regardless of the maxfilesperuser limit.
1704 	 *
1705 	 * This isn't as loose a spec as ui_posixlocks, so we use atomic
1706 	 * ops to force synchronize and recheck if we would otherwise
1707 	 * error.
1708 	 */
1709 	if (p->p_ucred->cr_uid && fdp->fd_nfiles >= minfilesperproc) {
1710 		uip = p->p_ucred->cr_uidinfo;
1711 		if (uip->ui_openfiles > maxfilesperuser) {
1712 			int n;
1713 			int count;
1714 
1715 			count = 0;
1716 			for (n = 0; n < ncpus; ++n) {
1717 				count += atomic_swap_int(
1718 					    &uip->ui_pcpu[n].pu_openfiles, 0);
1719 			}
1720 			atomic_add_int(&uip->ui_openfiles, count);
1721 			if (uip->ui_openfiles > maxfilesperuser) {
1722 				krateprintf(&krate_uidinfo,
1723 					    "Warning: user %d pid %d (%s) "
1724 					    "ran out of file descriptors "
1725 					    "(%d/%d)\n",
1726 					    p->p_ucred->cr_uid, (int)p->p_pid,
1727 					    p->p_comm,
1728 					    uip->ui_openfiles, maxfilesperuser);
1729 				return(ENFILE);
1730 			}
1731 		}
1732 	}
1733 
1734 	/*
1735 	 * Grow the dtable if necessary
1736 	 */
1737 	if (want >= fdp->fd_nfiles)
1738 		fdgrow_locked(fdp, want);
1739 
1740 	/*
1741 	 * Search for a free descriptor starting at the higher
1742 	 * of want or fd_freefile.  If that fails, consider
1743 	 * expanding the ofile array.
1744 	 *
1745 	 * NOTE! the 'allocated' field is a cumulative recursive allocation
1746 	 * count.  If we happen to see a value of 0 then we can shortcut
1747 	 * our search.  Otherwise we run through through the tree going
1748 	 * down branches we know have free descriptor(s) until we hit a
1749 	 * leaf node.  The leaf node will be free but will not necessarily
1750 	 * have an allocated field of 0.
1751 	 */
1752 retry:
1753 	/* move up the tree looking for a subtree with a free node */
1754 	for (fd = max(want, fdp->fd_freefile); fd < min(fdp->fd_nfiles, lim);
1755 	     fd = right_ancestor(fd)) {
1756 		if (fdp->fd_files[fd].allocated == 0)
1757 			goto found;
1758 
1759 		rsize = right_subtree_size(fd);
1760 		if (fdp->fd_files[fd].allocated == rsize)
1761 			continue;	/* right subtree full */
1762 
1763 		/*
1764 		 * Free fd is in the right subtree of the tree rooted at fd.
1765 		 * Call that subtree R.  Look for the smallest (leftmost)
1766 		 * subtree of R with an unallocated fd: continue moving
1767 		 * down the left branch until encountering a full left
1768 		 * subtree, then move to the right.
1769 		 */
1770 		for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) {
1771 			node = fd + rsize;
1772 			rsum += fdp->fd_files[node].allocated;
1773 			if (fdp->fd_files[fd].allocated == rsum + rsize) {
1774 				fd = node;	/* move to the right */
1775 				if (fdp->fd_files[node].allocated == 0)
1776 					goto found;
1777 				rsum = 0;
1778 			}
1779 		}
1780 		goto found;
1781 	}
1782 
1783 	/*
1784 	 * No space in current array.  Expand?
1785 	 */
1786 	if (fdp->fd_nfiles >= lim) {
1787 		return (EMFILE);
1788 	}
1789 	fdgrow_locked(fdp, want);
1790 	goto retry;
1791 
1792 found:
1793 	KKASSERT(fd < fdp->fd_nfiles);
1794 	if (fd > fdp->fd_lastfile)
1795 		fdp->fd_lastfile = fd;
1796 	if (want <= fdp->fd_freefile)
1797 		fdp->fd_freefile = fd;
1798 	*result = fd;
1799 	KKASSERT(fdp->fd_files[fd].fp == NULL);
1800 	KKASSERT(fdp->fd_files[fd].reserved == 0);
1801 	fdp->fd_files[fd].fileflags = 0;
1802 	fdp->fd_files[fd].reserved = 1;
1803 	fdreserve_locked(fdp, fd, 1);
1804 
1805 	return (0);
1806 }
1807 
1808 int
fdalloc(struct proc * p,int want,int * result)1809 fdalloc(struct proc *p, int want, int *result)
1810 {
1811 	struct filedesc *fdp = p->p_fd;
1812 	int error;
1813 
1814 	spin_lock(&fdp->fd_spin);
1815 	error = fdalloc_locked(p, fdp, want, result);
1816 	spin_unlock(&fdp->fd_spin);
1817 
1818 	return error;
1819 }
1820 
1821 /*
1822  * Check to see whether n user file descriptors
1823  * are available to the process p.
1824  */
1825 int
fdavail(struct proc * p,int n)1826 fdavail(struct proc *p, int n)
1827 {
1828 	struct plimit *limit = readplimits(p);
1829 	struct filedesc *fdp = p->p_fd;
1830 	struct fdnode *fdnode;
1831 	int i, lim, last;
1832 
1833 	if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
1834 		lim = INT_MAX;
1835 	else
1836 		lim = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur;
1837 
1838 	if (lim > maxfilesperproc)
1839 		lim = maxfilesperproc;
1840 	if (lim < minfilesperproc)
1841 		lim = minfilesperproc;
1842 
1843 	spin_lock(&fdp->fd_spin);
1844 	if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) {
1845 		spin_unlock(&fdp->fd_spin);
1846 		return (1);
1847 	}
1848 	last = min(fdp->fd_nfiles, lim);
1849 	fdnode = &fdp->fd_files[fdp->fd_freefile];
1850 	for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) {
1851 		if (fdnode->fp == NULL && --n <= 0) {
1852 			spin_unlock(&fdp->fd_spin);
1853 			return (1);
1854 		}
1855 	}
1856 	spin_unlock(&fdp->fd_spin);
1857 	return (0);
1858 }
1859 
1860 /*
1861  * Revoke open descriptors referencing (f_data, f_type)
1862  *
1863  * Any revoke executed within a prison is only able to
1864  * revoke descriptors for processes within that prison.
1865  *
1866  * Returns 0 on success or an error code.
1867  */
1868 struct fdrevoke_info {
1869 	void *data;
1870 	short type;
1871 	short unused;
1872 	int found;
1873 	struct ucred *cred;
1874 	struct file *nfp;
1875 };
1876 
1877 static int fdrevoke_check_callback(struct file *fp, void *vinfo);
1878 static int fdrevoke_proc_callback(struct proc *p, void *vinfo);
1879 
1880 int
fdrevoke(void * f_data,short f_type,struct ucred * cred)1881 fdrevoke(void *f_data, short f_type, struct ucred *cred)
1882 {
1883 	struct fdrevoke_info info;
1884 	int error;
1885 
1886 	bzero(&info, sizeof(info));
1887 	info.data = f_data;
1888 	info.type = f_type;
1889 	info.cred = cred;
1890 	error = falloc(NULL, &info.nfp, NULL);
1891 	if (error)
1892 		return (error);
1893 
1894 	/*
1895 	 * Scan the file pointer table once.  dups do not dup file pointers,
1896 	 * only descriptors, so there is no leak.  Set FREVOKED on the fps
1897 	 * being revoked.
1898 	 *
1899 	 * Any fps sent over unix-domain sockets will be revoked by the
1900 	 * socket code checking for FREVOKED when the fps are externialized.
1901 	 * revoke_token is used to make sure that fps marked FREVOKED and
1902 	 * externalized will be picked up by the following allproc_scan().
1903 	 */
1904 	lwkt_gettoken(&revoke_token);
1905 	allfiles_scan_exclusive(fdrevoke_check_callback, &info);
1906 	lwkt_reltoken(&revoke_token);
1907 
1908 	/*
1909 	 * If any fps were marked track down the related descriptors
1910 	 * and close them.  Any dup()s at this point will notice
1911 	 * the FREVOKED already set in the fp and do the right thing.
1912 	 */
1913 	if (info.found)
1914 		allproc_scan(fdrevoke_proc_callback, &info, 0);
1915 	fdrop(info.nfp);
1916 	return(0);
1917 }
1918 
1919 /*
1920  * Locate matching file pointers directly.
1921  *
1922  * WARNING: allfiles_scan_exclusive() holds a spinlock through these calls!
1923  */
1924 static int
fdrevoke_check_callback(struct file * fp,void * vinfo)1925 fdrevoke_check_callback(struct file *fp, void *vinfo)
1926 {
1927 	struct fdrevoke_info *info = vinfo;
1928 
1929 	/*
1930 	 * File pointers already flagged for revokation are skipped.
1931 	 */
1932 	if (fp->f_flag & FREVOKED)
1933 		return(0);
1934 
1935 	/*
1936 	 * If revoking from a prison file pointers created outside of
1937 	 * that prison, or file pointers without creds, cannot be revoked.
1938 	 */
1939 	if (info->cred->cr_prison &&
1940 	    (fp->f_cred == NULL ||
1941 	     info->cred->cr_prison != fp->f_cred->cr_prison)) {
1942 		return(0);
1943 	}
1944 
1945 	/*
1946 	 * If the file pointer matches then mark it for revocation.  The
1947 	 * flag is currently only used by unp_revoke_gc().
1948 	 *
1949 	 * info->found is a heuristic and can race in a SMP environment.
1950 	 */
1951 	if (info->data == fp->f_data && info->type == fp->f_type) {
1952 		atomic_set_int(&fp->f_flag, FREVOKED);
1953 		info->found = 1;
1954 	}
1955 	return(0);
1956 }
1957 
1958 /*
1959  * Locate matching file pointers via process descriptor tables.
1960  */
1961 static int
fdrevoke_proc_callback(struct proc * p,void * vinfo)1962 fdrevoke_proc_callback(struct proc *p, void *vinfo)
1963 {
1964 	struct fdrevoke_info *info = vinfo;
1965 	struct filedesc *fdp;
1966 	struct file *fp;
1967 	int n;
1968 
1969 	if (p->p_stat == SIDL || p->p_stat == SZOMB)
1970 		return(0);
1971 	if (info->cred->cr_prison &&
1972 	    info->cred->cr_prison != p->p_ucred->cr_prison) {
1973 		return(0);
1974 	}
1975 
1976 	/*
1977 	 * If the controlling terminal of the process matches the
1978 	 * vnode being revoked we clear the controlling terminal.
1979 	 *
1980 	 * The normal spec_close() may not catch this because it
1981 	 * uses curproc instead of p.
1982 	 */
1983 	if (p->p_session && info->type == DTYPE_VNODE &&
1984 	    info->data == p->p_session->s_ttyvp) {
1985 		p->p_session->s_ttyvp = NULL;
1986 		vrele(info->data);
1987 	}
1988 
1989 	/*
1990 	 * Softref the fdp to prevent it from being destroyed
1991 	 */
1992 	spin_lock(&p->p_spin);
1993 	if ((fdp = p->p_fd) == NULL) {
1994 		spin_unlock(&p->p_spin);
1995 		return(0);
1996 	}
1997 	atomic_add_int(&fdp->fd_softrefs, 1);
1998 	spin_unlock(&p->p_spin);
1999 
2000 	/*
2001 	 * Locate and close any matching file descriptors, replacing
2002 	 * them with info->nfp.
2003 	 */
2004 	spin_lock(&fdp->fd_spin);
2005 	for (n = 0; n < fdp->fd_nfiles; ++n) {
2006 		if ((fp = fdp->fd_files[n].fp) == NULL)
2007 			continue;
2008 		if (fp->f_flag & FREVOKED) {
2009 			++fdp->fd_closedcounter;
2010 			fclearcache(&fdp->fd_files[n], NULL, 0);
2011 			++fdp->fd_closedcounter;
2012 			fhold(info->nfp);
2013 			fdp->fd_files[n].fp = info->nfp;
2014 			spin_unlock(&fdp->fd_spin);
2015 			knote_fdclose(fp, fdp, n);	/* XXX */
2016 			closef(fp, p);
2017 			spin_lock(&fdp->fd_spin);
2018 		}
2019 	}
2020 	spin_unlock(&fdp->fd_spin);
2021 	atomic_subtract_int(&fdp->fd_softrefs, 1);
2022 	return(0);
2023 }
2024 
2025 /*
2026  * falloc:
2027  *	Create a new open file structure and reserve a file decriptor
2028  *	for the process that refers to it.
2029  *
2030  *	Root creds are checked using lp, or assumed if lp is NULL.  If
2031  *	resultfd is non-NULL then lp must also be non-NULL.  No file
2032  *	descriptor is reserved (and no process context is needed) if
2033  *	resultfd is NULL.
2034  *
2035  *	A file pointer with a refcount of 1 is returned.  Note that the
2036  *	file pointer is NOT associated with the descriptor.  If falloc
2037  *	returns success, fsetfd() MUST be called to either associate the
2038  *	file pointer or clear the reservation.
2039  */
2040 int
falloc(struct lwp * lp,struct file ** resultfp,int * resultfd)2041 falloc(struct lwp *lp, struct file **resultfp, int *resultfd)
2042 {
2043 	static struct timeval lastfail;
2044 	static int curfail;
2045 	struct filelist_head *head;
2046 	struct file *fp;
2047 	struct ucred *cred = lp ? lp->lwp_thread->td_ucred : proc0.p_ucred;
2048 	int error;
2049 
2050 	fp = NULL;
2051 
2052 	/*
2053 	 * Handle filetable full issues and root overfill.
2054 	 */
2055 	if (nfiles >= maxfiles - maxfilesrootres &&
2056 	    (cred->cr_ruid != 0 || nfiles >= maxfiles)) {
2057 		if (ppsratecheck(&lastfail, &curfail, 1)) {
2058 			kprintf("kern.maxfiles limit exceeded by uid %d, "
2059 				"please see tuning(7).\n",
2060 				cred->cr_ruid);
2061 		}
2062 		error = ENFILE;
2063 		goto done;
2064 	}
2065 
2066 	/*
2067 	 * Allocate a new file descriptor.
2068 	 */
2069 	fp = kmalloc_obj(sizeof(*fp), M_FILE, M_WAITOK|M_ZERO);
2070 	spin_init(&fp->f_spin, "falloc");
2071 	SLIST_INIT(&fp->f_klist);
2072 	fp->f_count = 1;
2073 	fp->f_ops = &badfileops;
2074 	fp->f_seqcount = 1;
2075 	fsetcred(fp, cred);
2076 	atomic_add_int(&nfiles, 1);
2077 
2078 	head = fp2filelist(fp);
2079 	spin_lock(&head->spin);
2080 	LIST_INSERT_HEAD(&head->list, fp, f_list);
2081 	spin_unlock(&head->spin);
2082 
2083 	if (resultfd) {
2084 		if ((error = fdalloc(lp->lwp_proc, 0, resultfd)) != 0) {
2085 			fdrop(fp);
2086 			fp = NULL;
2087 		}
2088 	} else {
2089 		error = 0;
2090 	}
2091 done:
2092 	*resultfp = fp;
2093 	return (error);
2094 }
2095 
2096 /*
2097  * Check for races against a file descriptor by determining that the
2098  * file pointer is still associated with the specified file descriptor,
2099  * and a close is not currently in progress.
2100  */
2101 int
checkfdclosed(thread_t td,struct filedesc * fdp,int fd,struct file * fp,int closedcounter)2102 checkfdclosed(thread_t td, struct filedesc *fdp, int fd, struct file *fp,
2103 	      int closedcounter)
2104 {
2105 	struct fdcache *fdc;
2106 	int error;
2107 
2108 	cpu_lfence();
2109 	if (fdp->fd_closedcounter == closedcounter)
2110 		return 0;
2111 
2112 	if (td->td_proc && td->td_proc->p_fd == fdp) {
2113 		for (fdc = &td->td_fdcache[0];
2114 		     fdc < &td->td_fdcache[NFDCACHE]; ++fdc) {
2115 			if (fdc->fd == fd && fdc->fp == fp)
2116 				return 0;
2117 		}
2118 	}
2119 
2120 	spin_lock_shared(&fdp->fd_spin);
2121 	if ((unsigned)fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp)
2122 		error = EBADF;
2123 	else
2124 		error = 0;
2125 	spin_unlock_shared(&fdp->fd_spin);
2126 	return (error);
2127 }
2128 
2129 /*
2130  * Associate a file pointer with a previously reserved file descriptor.
2131  * This function always succeeds.
2132  *
2133  * If fp is NULL, the file descriptor is returned to the pool.
2134  *
2135  * Caller must hold an exclusive spinlock on fdp->fd_spin.
2136  */
2137 static void
fsetfd_locked(struct filedesc * fdp,struct file * fp,int fd)2138 fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd)
2139 {
2140 	KKASSERT((unsigned)fd < fdp->fd_nfiles);
2141 	KKASSERT(fdp->fd_files[fd].reserved != 0);
2142 	if (fp) {
2143 		fhold(fp);
2144 		/* fclearcache(&fdp->fd_files[fd], NULL, 0); */
2145 		fdp->fd_files[fd].fp = fp;
2146 		fdp->fd_files[fd].reserved = 0;
2147 	} else {
2148 		fdp->fd_files[fd].reserved = 0;
2149 		fdreserve_locked(fdp, fd, -1);
2150 		fdfixup_locked(fdp, fd);
2151 	}
2152 }
2153 
2154 /*
2155  * Caller must hold an exclusive spinlock on fdp->fd_spin.
2156  */
2157 void
fsetfd(struct filedesc * fdp,struct file * fp,int fd)2158 fsetfd(struct filedesc *fdp, struct file *fp, int fd)
2159 {
2160 	spin_lock(&fdp->fd_spin);
2161 	fsetfd_locked(fdp, fp, fd);
2162 	spin_unlock(&fdp->fd_spin);
2163 }
2164 
2165 /*
2166  * Caller must hold an exclusive spinlock on fdp->fd_spin.
2167  */
2168 static
2169 struct file *
funsetfd_locked(struct filedesc * fdp,int fd)2170 funsetfd_locked(struct filedesc *fdp, int fd)
2171 {
2172 	struct file *fp;
2173 
2174 	if ((unsigned)fd >= fdp->fd_nfiles)
2175 		return (NULL);
2176 	if ((fp = fdp->fd_files[fd].fp) == NULL)
2177 		return (NULL);
2178 	++fdp->fd_closedcounter;
2179 	fclearcache(&fdp->fd_files[fd], NULL, 0);
2180 	fdp->fd_files[fd].fp = NULL;
2181 	fdp->fd_files[fd].fileflags = 0;
2182 	++fdp->fd_closedcounter;
2183 
2184 	fdreserve_locked(fdp, fd, -1);
2185 	fdfixup_locked(fdp, fd);
2186 
2187 	return(fp);
2188 }
2189 
2190 /*
2191  * WARNING: May not be called before initial fsetfd().
2192  */
2193 int
fgetfdflags(struct filedesc * fdp,int fd,int * flagsp)2194 fgetfdflags(struct filedesc *fdp, int fd, int *flagsp)
2195 {
2196 	int error;
2197 
2198 	spin_lock_shared(&fdp->fd_spin);
2199 	if (((u_int)fd) >= fdp->fd_nfiles) {
2200 		error = EBADF;
2201 	} else if (fdp->fd_files[fd].fp == NULL) {
2202 		error = EBADF;
2203 	} else {
2204 		*flagsp = fdp->fd_files[fd].fileflags;
2205 		error = 0;
2206 	}
2207 	spin_unlock_shared(&fdp->fd_spin);
2208 
2209 	return (error);
2210 }
2211 
2212 /*
2213  * WARNING: May not be called before initial fsetfd().
2214  */
2215 int
fsetfdflags(struct filedesc * fdp,int fd,int add_flags)2216 fsetfdflags(struct filedesc *fdp, int fd, int add_flags)
2217 {
2218 	int error;
2219 
2220 	spin_lock(&fdp->fd_spin);
2221 	if (((u_int)fd) >= fdp->fd_nfiles) {
2222 		error = EBADF;
2223 	} else if (fdp->fd_files[fd].fp == NULL) {
2224 		error = EBADF;
2225 	} else {
2226 		fdp->fd_files[fd].fileflags |= add_flags;
2227 		error = 0;
2228 	}
2229 	spin_unlock(&fdp->fd_spin);
2230 
2231 	return (error);
2232 }
2233 
2234 /*
2235  * WARNING: May not be called before initial fsetfd().
2236  */
2237 int
fclrfdflags(struct filedesc * fdp,int fd,int rem_flags)2238 fclrfdflags(struct filedesc *fdp, int fd, int rem_flags)
2239 {
2240 	int error;
2241 
2242 	spin_lock(&fdp->fd_spin);
2243 	if (((u_int)fd) >= fdp->fd_nfiles) {
2244 		error = EBADF;
2245 	} else if (fdp->fd_files[fd].fp == NULL) {
2246 		error = EBADF;
2247 	} else {
2248 		fdp->fd_files[fd].fileflags &= ~rem_flags;
2249 		error = 0;
2250 	}
2251 	spin_unlock(&fdp->fd_spin);
2252 
2253 	return (error);
2254 }
2255 
2256 /*
2257  * Set/Change/Clear the creds for a fp and synchronize the uidinfo.
2258  */
2259 void
fsetcred(struct file * fp,struct ucred * ncr)2260 fsetcred(struct file *fp, struct ucred *ncr)
2261 {
2262 	struct ucred *ocr;
2263 	struct uidinfo *uip;
2264 	struct uidcount *pup;
2265 	int cpu = mycpuid;
2266 	int count;
2267 
2268 	ocr = fp->f_cred;
2269 	if (ocr == NULL || ncr == NULL || ocr->cr_uidinfo != ncr->cr_uidinfo) {
2270 		if (ocr) {
2271 			uip = ocr->cr_uidinfo;
2272 			pup = &uip->ui_pcpu[cpu];
2273 			atomic_add_int(&pup->pu_openfiles, -1);
2274 			if (pup->pu_openfiles < -PUP_LIMIT ||
2275 			    pup->pu_openfiles > PUP_LIMIT) {
2276 				count = atomic_swap_int(&pup->pu_openfiles, 0);
2277 				atomic_add_int(&uip->ui_openfiles, count);
2278 			}
2279 		}
2280 		if (ncr) {
2281 			uip = ncr->cr_uidinfo;
2282 			pup = &uip->ui_pcpu[cpu];
2283 			atomic_add_int(&pup->pu_openfiles, 1);
2284 			if (pup->pu_openfiles < -PUP_LIMIT ||
2285 			    pup->pu_openfiles > PUP_LIMIT) {
2286 				count = atomic_swap_int(&pup->pu_openfiles, 0);
2287 				atomic_add_int(&uip->ui_openfiles, count);
2288 			}
2289 		}
2290 	}
2291 	if (ncr)
2292 		crhold(ncr);
2293 	fp->f_cred = ncr;
2294 	if (ocr)
2295 		crfree(ocr);
2296 }
2297 
2298 /*
2299  * Free a file descriptor.
2300  */
2301 static
2302 void
ffree(struct file * fp)2303 ffree(struct file *fp)
2304 {
2305 	KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!"));
2306 	fsetcred(fp, NULL);
2307 	if (fp->f_nchandle.ncp)
2308 	    cache_drop(&fp->f_nchandle);
2309 	kfree_obj(fp, M_FILE);
2310 }
2311 
2312 /*
2313  * called from init_main, initialize filedesc0 for proc0.
2314  */
2315 void
fdinit_bootstrap(struct proc * p0,struct filedesc * fdp0,int cmask)2316 fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask)
2317 {
2318 	p0->p_fd = fdp0;
2319 	p0->p_fdtol = NULL;
2320 	fdp0->fd_refcnt = 1;
2321 	fdp0->fd_cmask = cmask;
2322 	fdp0->fd_files = fdp0->fd_builtin_files;
2323 	fdp0->fd_nfiles = NDFILE;
2324 	fdp0->fd_lastfile = -1;
2325 	spin_init(&fdp0->fd_spin, "fdinitbootstrap");
2326 }
2327 
2328 /*
2329  * Build a new filedesc structure.
2330  */
2331 struct filedesc *
fdinit(struct proc * p)2332 fdinit(struct proc *p)
2333 {
2334 	struct filedesc *newfdp;
2335 	struct filedesc *fdp = p->p_fd;
2336 
2337 	newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO);
2338 	spin_lock(&fdp->fd_spin);
2339 	if (fdp->fd_cdir) {
2340 		newfdp->fd_cdir = fdp->fd_cdir;
2341 		vref(newfdp->fd_cdir);
2342 		cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir);
2343 	}
2344 
2345 	/*
2346 	 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of
2347 	 * proc0, but should unconditionally exist in other processes.
2348 	 */
2349 	if (fdp->fd_rdir) {
2350 		newfdp->fd_rdir = fdp->fd_rdir;
2351 		vref(newfdp->fd_rdir);
2352 		cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir);
2353 	}
2354 	if (fdp->fd_jdir) {
2355 		newfdp->fd_jdir = fdp->fd_jdir;
2356 		vref(newfdp->fd_jdir);
2357 		cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir);
2358 	}
2359 	spin_unlock(&fdp->fd_spin);
2360 
2361 	/* Create the file descriptor table. */
2362 	newfdp->fd_refcnt = 1;
2363 	newfdp->fd_cmask = cmask;
2364 	newfdp->fd_files = newfdp->fd_builtin_files;
2365 	newfdp->fd_nfiles = NDFILE;
2366 	newfdp->fd_lastfile = -1;
2367 	spin_init(&newfdp->fd_spin, "fdinit");
2368 
2369 	return (newfdp);
2370 }
2371 
2372 /*
2373  * Share a filedesc structure.
2374  */
2375 struct filedesc *
fdshare(struct proc * p)2376 fdshare(struct proc *p)
2377 {
2378 	struct filedesc *fdp;
2379 
2380 	fdp = p->p_fd;
2381 	spin_lock(&fdp->fd_spin);
2382 	fdp->fd_refcnt++;
2383 	spin_unlock(&fdp->fd_spin);
2384 	return (fdp);
2385 }
2386 
2387 /*
2388  * Copy a filedesc structure.
2389  */
2390 int
fdcopy(struct proc * p,struct filedesc ** fpp)2391 fdcopy(struct proc *p, struct filedesc **fpp)
2392 {
2393 	struct filedesc *fdp = p->p_fd;
2394 	struct filedesc *newfdp;
2395 	struct fdnode *fdnode;
2396 	int i;
2397 	int ni;
2398 
2399 	/*
2400 	 * Certain daemons might not have file descriptors.
2401 	 */
2402 	if (fdp == NULL)
2403 		return (0);
2404 
2405 	/*
2406 	 * Allocate the new filedesc and fd_files[] array.  This can race
2407 	 * with operations by other threads on the fdp so we have to be
2408 	 * careful.
2409 	 */
2410 	newfdp = kmalloc(sizeof(struct filedesc),
2411 			 M_FILEDESC, M_WAITOK | M_ZERO | M_NULLOK);
2412 	if (newfdp == NULL) {
2413 		*fpp = NULL;
2414 		return (-1);
2415 	}
2416 again:
2417 	spin_lock(&fdp->fd_spin);
2418 	if (fdp->fd_lastfile < NDFILE) {
2419 		newfdp->fd_files = newfdp->fd_builtin_files;
2420 		i = NDFILE;
2421 	} else {
2422 		/*
2423 		 * We have to allocate (N^2-1) entries for our in-place
2424 		 * binary tree.  Allow the table to shrink.
2425 		 */
2426 		i = fdp->fd_nfiles;
2427 		ni = (i - 1) / 2;
2428 		while (ni > fdp->fd_lastfile && ni > NDFILE) {
2429 			i = ni;
2430 			ni = (i - 1) / 2;
2431 		}
2432 		spin_unlock(&fdp->fd_spin);
2433 		newfdp->fd_files = kmalloc(i * sizeof(struct fdnode),
2434 					  M_FILEDESC, M_WAITOK | M_ZERO);
2435 
2436 		/*
2437 		 * Check for race, retry
2438 		 */
2439 		spin_lock(&fdp->fd_spin);
2440 		if (i <= fdp->fd_lastfile) {
2441 			spin_unlock(&fdp->fd_spin);
2442 			kfree(newfdp->fd_files, M_FILEDESC);
2443 			goto again;
2444 		}
2445 	}
2446 
2447 	/*
2448 	 * Dup the remaining fields. vref() and cache_hold() can be
2449 	 * safely called while holding the read spinlock on fdp.
2450 	 *
2451 	 * The read spinlock on fdp is still being held.
2452 	 *
2453 	 * NOTE: vref and cache_hold calls for the case where the vnode
2454 	 * or cache entry already has at least one ref may be called
2455 	 * while holding spin locks.
2456 	 */
2457 	if ((newfdp->fd_cdir = fdp->fd_cdir) != NULL) {
2458 		vref(newfdp->fd_cdir);
2459 		cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir);
2460 	}
2461 	/*
2462 	 * We must check for fd_rdir here, at least for now because
2463 	 * the init process is created before we have access to the
2464 	 * rootvode to take a reference to it.
2465 	 */
2466 	if ((newfdp->fd_rdir = fdp->fd_rdir) != NULL) {
2467 		vref(newfdp->fd_rdir);
2468 		cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir);
2469 	}
2470 	if ((newfdp->fd_jdir = fdp->fd_jdir) != NULL) {
2471 		vref(newfdp->fd_jdir);
2472 		cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir);
2473 	}
2474 	newfdp->fd_refcnt = 1;
2475 	newfdp->fd_nfiles = i;
2476 	newfdp->fd_lastfile = fdp->fd_lastfile;
2477 	newfdp->fd_freefile = fdp->fd_freefile;
2478 	newfdp->fd_cmask = fdp->fd_cmask;
2479 	spin_init(&newfdp->fd_spin, "fdcopy");
2480 
2481 	/*
2482 	 * Copy the descriptor table through (i).  This also copies the
2483 	 * allocation state.   Then go through and ref the file pointers
2484 	 * and clean up any KQ descriptors.
2485 	 *
2486 	 * kq descriptors cannot be copied.  Since we haven't ref'd the
2487 	 * copied files yet we can ignore the return value from funsetfd().
2488 	 *
2489 	 * The read spinlock on fdp is still being held.
2490 	 *
2491 	 * Be sure to clean out fdnode->tdcache, otherwise bad things will
2492 	 * happen.
2493 	 */
2494 	bcopy(fdp->fd_files, newfdp->fd_files, i * sizeof(struct fdnode));
2495 	for (i = 0 ; i < newfdp->fd_nfiles; ++i) {
2496 		fdnode = &newfdp->fd_files[i];
2497 		if (fdnode->reserved) {
2498 			fdreserve_locked(newfdp, i, -1);
2499 			fdnode->reserved = 0;
2500 			fdfixup_locked(newfdp, i);
2501 		} else if (fdnode->fp) {
2502 			bzero(&fdnode->tdcache, sizeof(fdnode->tdcache));
2503 			if (fdnode->fp->f_type == DTYPE_KQUEUE) {
2504 				(void)funsetfd_locked(newfdp, i);
2505 			} else {
2506 				fhold(fdnode->fp);
2507 			}
2508 		}
2509 	}
2510 	spin_unlock(&fdp->fd_spin);
2511 	*fpp = newfdp;
2512 	return (0);
2513 }
2514 
2515 /*
2516  * Release a filedesc structure.
2517  *
2518  * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE)
2519  */
2520 void
fdfree(struct proc * p,struct filedesc * repl)2521 fdfree(struct proc *p, struct filedesc *repl)
2522 {
2523 	struct filedesc *fdp;
2524 	struct fdnode *fdnode;
2525 	int i;
2526 	struct filedesc_to_leader *fdtol;
2527 	struct file *fp;
2528 	struct vnode *vp;
2529 	struct flock lf;
2530 
2531 	/*
2532 	 * Before destroying or replacing p->p_fd we must be sure to
2533 	 * clean out the cache of the last thread, which should be
2534 	 * curthread.
2535 	 */
2536 	fexitcache(curthread);
2537 
2538 	/*
2539 	 * Certain daemons might not have file descriptors.
2540 	 */
2541 	fdp = p->p_fd;
2542 	if (fdp == NULL) {
2543 		p->p_fd = repl;
2544 		return;
2545 	}
2546 
2547 	/*
2548 	 * Severe messing around to follow.
2549 	 */
2550 	spin_lock(&fdp->fd_spin);
2551 
2552 	/* Check for special need to clear POSIX style locks */
2553 	fdtol = p->p_fdtol;
2554 	if (fdtol != NULL) {
2555 		KASSERT(fdtol->fdl_refcount > 0,
2556 			("filedesc_to_refcount botch: fdl_refcount=%d",
2557 			 fdtol->fdl_refcount));
2558 		if (fdtol->fdl_refcount == 1 && p->p_leader->p_advlock_flag) {
2559 			for (i = 0; i <= fdp->fd_lastfile; ++i) {
2560 				fdnode = &fdp->fd_files[i];
2561 				if (fdnode->fp == NULL ||
2562 				    fdnode->fp->f_type != DTYPE_VNODE) {
2563 					continue;
2564 				}
2565 				fp = fdnode->fp;
2566 				fhold(fp);
2567 				spin_unlock(&fdp->fd_spin);
2568 
2569 				lf.l_whence = SEEK_SET;
2570 				lf.l_start = 0;
2571 				lf.l_len = 0;
2572 				lf.l_type = F_UNLCK;
2573 				vp = (struct vnode *)fp->f_data;
2574 				VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
2575 					    F_UNLCK, &lf, F_POSIX);
2576 				fdrop(fp);
2577 				spin_lock(&fdp->fd_spin);
2578 			}
2579 		}
2580 	retry:
2581 		if (fdtol->fdl_refcount == 1) {
2582 			if (fdp->fd_holdleaderscount > 0 &&
2583 			    p->p_leader->p_advlock_flag) {
2584 				/*
2585 				 * close() or do_dup() has cleared a reference
2586 				 * in a shared file descriptor table.
2587 				 */
2588 				fdp->fd_holdleaderswakeup = 1;
2589 				ssleep(&fdp->fd_holdleaderscount,
2590 				       &fdp->fd_spin, 0, "fdlhold", 0);
2591 				goto retry;
2592 			}
2593 			if (fdtol->fdl_holdcount > 0) {
2594 				/*
2595 				 * Ensure that fdtol->fdl_leader
2596 				 * remains valid in closef().
2597 				 */
2598 				fdtol->fdl_wakeup = 1;
2599 				ssleep(fdtol, &fdp->fd_spin, 0, "fdlhold", 0);
2600 				goto retry;
2601 			}
2602 		}
2603 		fdtol->fdl_refcount--;
2604 		if (fdtol->fdl_refcount == 0 &&
2605 		    fdtol->fdl_holdcount == 0) {
2606 			fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
2607 			fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
2608 		} else {
2609 			fdtol = NULL;
2610 		}
2611 		p->p_fdtol = NULL;
2612 		if (fdtol != NULL) {
2613 			spin_unlock(&fdp->fd_spin);
2614 			kfree(fdtol, M_FILEDESC_TO_LEADER);
2615 			spin_lock(&fdp->fd_spin);
2616 		}
2617 	}
2618 	if (--fdp->fd_refcnt > 0) {
2619 		spin_unlock(&fdp->fd_spin);
2620 		spin_lock(&p->p_spin);
2621 		p->p_fd = repl;
2622 		spin_unlock(&p->p_spin);
2623 		return;
2624 	}
2625 
2626 	/*
2627 	 * Even though we are the last reference to the structure allproc
2628 	 * scans may still reference the structure.  Maintain proper
2629 	 * locks until we can replace p->p_fd.
2630 	 *
2631 	 * Also note that kqueue's closef still needs to reference the
2632 	 * fdp via p->p_fd, so we have to close the descriptors before
2633 	 * we replace p->p_fd.
2634 	 */
2635 	for (i = 0; i <= fdp->fd_lastfile; ++i) {
2636 		if (fdp->fd_files[i].fp) {
2637 			fp = funsetfd_locked(fdp, i);
2638 			if (fp) {
2639 				spin_unlock(&fdp->fd_spin);
2640 				if (SLIST_FIRST(&fp->f_klist))
2641 					knote_fdclose(fp, fdp, i);
2642 				closef(fp, p);
2643 				spin_lock(&fdp->fd_spin);
2644 			}
2645 		}
2646 	}
2647 	spin_unlock(&fdp->fd_spin);
2648 
2649 	/*
2650 	 * Interlock against an allproc scan operations (typically frevoke).
2651 	 */
2652 	spin_lock(&p->p_spin);
2653 	p->p_fd = repl;
2654 	spin_unlock(&p->p_spin);
2655 
2656 	/*
2657 	 * Wait for any softrefs to go away.  This race rarely occurs so
2658 	 * we can use a non-critical-path style poll/sleep loop.  The
2659 	 * race only occurs against allproc scans.
2660 	 *
2661 	 * No new softrefs can occur with the fdp disconnected from the
2662 	 * process.
2663 	 */
2664 	if (fdp->fd_softrefs) {
2665 		kprintf("pid %d: Warning, fdp race avoided\n", p->p_pid);
2666 		while (fdp->fd_softrefs)
2667 			tsleep(&fdp->fd_softrefs, 0, "fdsoft", 1);
2668 	}
2669 
2670 	if (fdp->fd_files != fdp->fd_builtin_files)
2671 		kfree(fdp->fd_files, M_FILEDESC);
2672 	if (fdp->fd_cdir) {
2673 		cache_drop(&fdp->fd_ncdir);
2674 		vrele(fdp->fd_cdir);
2675 	}
2676 	if (fdp->fd_rdir) {
2677 		cache_drop(&fdp->fd_nrdir);
2678 		vrele(fdp->fd_rdir);
2679 	}
2680 	if (fdp->fd_jdir) {
2681 		cache_drop(&fdp->fd_njdir);
2682 		vrele(fdp->fd_jdir);
2683 	}
2684 	kfree(fdp, M_FILEDESC);
2685 }
2686 
2687 /*
2688  * Retrieve and reference the file pointer associated with a descriptor.
2689  *
2690  * td must be the current thread.
2691  */
2692 struct file *
holdfp(thread_t td,int fd,int flag)2693 holdfp(thread_t td, int fd, int flag)
2694 {
2695 	struct file *fp;
2696 
2697 	fp = _holdfp_cache(td, fd);
2698 	if (fp) {
2699 		if ((fp->f_flag & flag) == 0 && flag != -1) {
2700 			fdrop(fp);
2701 			fp = NULL;
2702 		}
2703 	}
2704 	return fp;
2705 }
2706 
2707 /*
2708  * holdsock() - load the struct file pointer associated
2709  * with a socket into *fpp.  If an error occurs, non-zero
2710  * will be returned and *fpp will be set to NULL.
2711  *
2712  * td must be the current thread.
2713  */
2714 int
holdsock(thread_t td,int fd,struct file ** fpp)2715 holdsock(thread_t td, int fd, struct file **fpp)
2716 {
2717 	struct file *fp;
2718 	int error;
2719 
2720 	/*
2721 	 * Lockless shortcut
2722 	 */
2723 	fp = _holdfp_cache(td, fd);
2724 	if (fp) {
2725 		if (fp->f_type != DTYPE_SOCKET) {
2726 			fdrop(fp);
2727 			fp = NULL;
2728 			error = ENOTSOCK;
2729 		} else {
2730 			error = 0;
2731 		}
2732 	} else {
2733 		error = EBADF;
2734 	}
2735 	*fpp = fp;
2736 
2737 	return (error);
2738 }
2739 
2740 /*
2741  * Convert a user file descriptor to a held file pointer.
2742  *
2743  * td must be the current thread.
2744  */
2745 int
holdvnode(thread_t td,int fd,struct file ** fpp)2746 holdvnode(thread_t td, int fd, struct file **fpp)
2747 {
2748 	struct file *fp;
2749 	int error;
2750 
2751 	fp = _holdfp_cache(td, fd);
2752 	if (fp) {
2753 		if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) {
2754 			fdrop(fp);
2755 			fp = NULL;
2756 			error = EINVAL;
2757 		} else {
2758 			error = 0;
2759 		}
2760 	} else {
2761 		error = EBADF;
2762 	}
2763 	*fpp = fp;
2764 
2765 	return (error);
2766 }
2767 
2768 /*
2769  * Convert a user file descriptor to a held file pointer.
2770  *
2771  * td must be the current thread.
2772  */
2773 int
holdvnode2(thread_t td,int fd,struct file ** fpp,char * fflagsp)2774 holdvnode2(thread_t td, int fd, struct file **fpp, char *fflagsp)
2775 {
2776 	struct file *fp;
2777 	int error;
2778 
2779 	fp = _holdfp2(td, fd, fflagsp);
2780 	if (fp) {
2781 		if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) {
2782 			fdrop(fp);
2783 			fp = NULL;
2784 			error = EINVAL;
2785 		} else {
2786 			error = 0;
2787 		}
2788 	} else {
2789 		error = EBADF;
2790 	}
2791 	*fpp = fp;
2792 
2793 	return (error);
2794 }
2795 
2796 /*
2797  * For setugid programs, we don't want to people to use that setugidness
2798  * to generate error messages which write to a file which otherwise would
2799  * otherwise be off-limits to the process.
2800  *
2801  * This is a gross hack to plug the hole.  A better solution would involve
2802  * a special vop or other form of generalized access control mechanism.  We
2803  * go ahead and just reject all procfs file systems accesses as dangerous.
2804  *
2805  * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
2806  * sufficient.  We also don't for check setugidness since we know we are.
2807  */
2808 static int
is_unsafe(struct file * fp)2809 is_unsafe(struct file *fp)
2810 {
2811 	if (fp->f_type == DTYPE_VNODE &&
2812 	    ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS)
2813 		return (1);
2814 	return (0);
2815 }
2816 
2817 /*
2818  * Make this setguid thing safe, if at all possible.
2819  *
2820  * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2821  */
2822 void
setugidsafety(struct proc * p)2823 setugidsafety(struct proc *p)
2824 {
2825 	struct filedesc *fdp = p->p_fd;
2826 	int i;
2827 
2828 	/* Certain daemons might not have file descriptors. */
2829 	if (fdp == NULL)
2830 		return;
2831 
2832 	/*
2833 	 * note: fdp->fd_files may be reallocated out from under us while
2834 	 * we are blocked in a close.  Be careful!
2835 	 */
2836 	for (i = 0; i <= fdp->fd_lastfile; i++) {
2837 		if (i > 2)
2838 			break;
2839 		if (fdp->fd_files[i].fp && is_unsafe(fdp->fd_files[i].fp)) {
2840 			struct file *fp;
2841 
2842 			/*
2843 			 * NULL-out descriptor prior to close to avoid
2844 			 * a race while close blocks.
2845 			 */
2846 			if ((fp = funsetfd_locked(fdp, i)) != NULL) {
2847 				knote_fdclose(fp, fdp, i);
2848 				closef(fp, p);
2849 			}
2850 		}
2851 	}
2852 }
2853 
2854 /*
2855  * Close all CLOEXEC files on exec.
2856  *
2857  * Only a single thread remains for the current process.
2858  *
2859  * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2860  */
2861 void
fdcloseexec(struct proc * p)2862 fdcloseexec(struct proc *p)
2863 {
2864 	struct filedesc *fdp = p->p_fd;
2865 	int i;
2866 
2867 	/* Certain daemons might not have file descriptors. */
2868 	if (fdp == NULL)
2869 		return;
2870 
2871 	/*
2872 	 * We cannot cache fd_files since operations may block and rip
2873 	 * them out from under us.
2874 	 */
2875 	for (i = 0; i <= fdp->fd_lastfile; i++) {
2876 		if (fdp->fd_files[i].fp != NULL &&
2877 		    (fdp->fd_files[i].fileflags & UF_EXCLOSE)) {
2878 			struct file *fp;
2879 
2880 			/*
2881 			 * NULL-out descriptor prior to close to avoid
2882 			 * a race while close blocks.
2883 			 *
2884 			 * (funsetfd*() also clears the fd cache)
2885 			 */
2886 			if ((fp = funsetfd_locked(fdp, i)) != NULL) {
2887 				knote_fdclose(fp, fdp, i);
2888 				closef(fp, p);
2889 			}
2890 		}
2891 	}
2892 }
2893 
2894 /*
2895  * It is unsafe for set[ug]id processes to be started with file
2896  * descriptors 0..2 closed, as these descriptors are given implicit
2897  * significance in the Standard C library.  fdcheckstd() will create a
2898  * descriptor referencing /dev/null for each of stdin, stdout, and
2899  * stderr that is not already open.
2900  *
2901  * NOT MPSAFE - calls falloc, vn_open, etc
2902  */
2903 int
fdcheckstd(struct lwp * lp)2904 fdcheckstd(struct lwp *lp)
2905 {
2906 	struct nlookupdata nd;
2907 	struct filedesc *fdp;
2908 	struct file *fp;
2909 	int retval;
2910 	int i, error, flags, devnull;
2911 
2912 	fdp = lp->lwp_proc->p_fd;
2913 	if (fdp == NULL)
2914 		return (0);
2915 	devnull = -1;
2916 	error = 0;
2917 	for (i = 0; i < 3; i++) {
2918 		if (fdp->fd_files[i].fp != NULL)
2919 			continue;
2920 		if (devnull < 0) {
2921 			if ((error = falloc(lp, &fp, &devnull)) != 0)
2922 				break;
2923 
2924 			error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE,
2925 						NLC_FOLLOW|NLC_LOCKVP);
2926 			flags = FREAD | FWRITE;
2927 			if (error == 0)
2928 				error = vn_open(&nd, &fp, flags, 0);
2929 			if (error == 0)
2930 				fsetfd(fdp, fp, devnull);
2931 			else
2932 				fsetfd(fdp, NULL, devnull);
2933 			fdrop(fp);
2934 			nlookup_done(&nd);
2935 			if (error)
2936 				break;
2937 			KKASSERT(i == devnull);
2938 		} else {
2939 			error = kern_dup(DUP_FIXED, devnull, i, &retval);
2940 			if (error != 0)
2941 				break;
2942 		}
2943 	}
2944 	return (error);
2945 }
2946 
2947 /*
2948  * Internal form of close.
2949  * Decrement reference count on file structure.
2950  * Note: td and/or p may be NULL when closing a file
2951  * that was being passed in a message.
2952  *
2953  * MPALMOSTSAFE - acquires mplock for VOP operations
2954  */
2955 int
closef(struct file * fp,struct proc * p)2956 closef(struct file *fp, struct proc *p)
2957 {
2958 	struct vnode *vp;
2959 	struct flock lf;
2960 	struct filedesc_to_leader *fdtol;
2961 
2962 	if (fp == NULL)
2963 		return (0);
2964 
2965 	/*
2966 	 * POSIX record locking dictates that any close releases ALL
2967 	 * locks owned by this process.  This is handled by setting
2968 	 * a flag in the unlock to free ONLY locks obeying POSIX
2969 	 * semantics, and not to free BSD-style file locks.
2970 	 * If the descriptor was in a message, POSIX-style locks
2971 	 * aren't passed with the descriptor.
2972 	 */
2973 	if (p != NULL && fp->f_type == DTYPE_VNODE &&
2974 	    (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS)
2975 	) {
2976 		if (p->p_leader->p_advlock_flag) {
2977 			lf.l_whence = SEEK_SET;
2978 			lf.l_start = 0;
2979 			lf.l_len = 0;
2980 			lf.l_type = F_UNLCK;
2981 			vp = (struct vnode *)fp->f_data;
2982 			VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
2983 				    &lf, F_POSIX);
2984 		}
2985 		fdtol = p->p_fdtol;
2986 		if (fdtol != NULL) {
2987 			lwkt_gettoken(&p->p_token);
2988 
2989 			/*
2990 			 * Handle special case where file descriptor table
2991 			 * is shared between multiple process leaders.
2992 			 */
2993 			for (fdtol = fdtol->fdl_next;
2994 			     fdtol != p->p_fdtol;
2995 			     fdtol = fdtol->fdl_next) {
2996 				if (fdtol->fdl_leader->p_advlock_flag == 0)
2997 					continue;
2998 				fdtol->fdl_holdcount++;
2999 				lf.l_whence = SEEK_SET;
3000 				lf.l_start = 0;
3001 				lf.l_len = 0;
3002 				lf.l_type = F_UNLCK;
3003 				vp = (struct vnode *)fp->f_data;
3004 				VOP_ADVLOCK(vp, (caddr_t)fdtol->fdl_leader,
3005 					    F_UNLCK, &lf, F_POSIX);
3006 				fdtol->fdl_holdcount--;
3007 				if (fdtol->fdl_holdcount == 0 &&
3008 				    fdtol->fdl_wakeup != 0) {
3009 					fdtol->fdl_wakeup = 0;
3010 					wakeup(fdtol);
3011 				}
3012 			}
3013 			lwkt_reltoken(&p->p_token);
3014 		}
3015 	}
3016 	return (fdrop(fp));
3017 }
3018 
3019 /*
3020  * fhold() can only be called if f_count is already at least 1 (i.e. the
3021  * caller of fhold() already has a reference to the file pointer in some
3022  * manner or other).
3023  *
3024  * Atomic ops are used for incrementing and decrementing f_count before
3025  * the 1->0 transition.  f_count 1->0 transition is special, see the
3026  * comment in fdrop().
3027  */
3028 void
fhold(struct file * fp)3029 fhold(struct file *fp)
3030 {
3031 	/* 0->1 transition will never work */
3032 	KASSERT(fp->f_count > 0, ("fhold: invalid f_count %d", fp->f_count));
3033 	atomic_add_int(&fp->f_count, 1);
3034 }
3035 
3036 /*
3037  * fdrop() - drop a reference to a descriptor
3038  */
3039 int
fdrop(struct file * fp)3040 fdrop(struct file *fp)
3041 {
3042 	struct flock lf;
3043 	struct vnode *vp;
3044 	int error, do_free = 0;
3045 
3046 	/*
3047 	 * NOTE:
3048 	 * Simple atomic_fetchadd_int(f_count, -1) here will cause use-
3049 	 * after-free or double free (due to f_count 0->1 transition), if
3050 	 * fhold() is called on the fps found through filehead iteration.
3051 	 */
3052 	for (;;) {
3053 		int count = fp->f_count;
3054 
3055 		cpu_ccfence();
3056 		KASSERT(count > 0, ("fdrop: invalid f_count %d", count));
3057 		if (count == 1) {
3058 			struct filelist_head *head = fp2filelist(fp);
3059 
3060 			/*
3061 			 * About to drop the last reference, hold the
3062 			 * filehead spin lock and drop it, so that no
3063 			 * one could see this fp through filehead anymore,
3064 			 * let alone fhold() this fp.
3065 			 */
3066 			spin_lock(&head->spin);
3067 			if (atomic_cmpset_int(&fp->f_count, count, 0)) {
3068 				LIST_REMOVE(fp, f_list);
3069 				spin_unlock(&head->spin);
3070 				atomic_subtract_int(&nfiles, 1);
3071 				do_free = 1; /* free this fp */
3072 				break;
3073 			}
3074 			spin_unlock(&head->spin);
3075 			/* retry */
3076 		} else if (atomic_cmpset_int(&fp->f_count, count, count - 1)) {
3077 			break;
3078 		}
3079 		/* retry */
3080 	}
3081 	if (!do_free)
3082 		return (0);
3083 
3084 	KKASSERT(SLIST_FIRST(&fp->f_klist) == NULL);
3085 
3086 	/*
3087 	 * The last reference has gone away, we own the fp structure free
3088 	 * and clear.
3089 	 */
3090 	if (fp->f_count < 0)
3091 		panic("fdrop: count < 0");
3092 	if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE &&
3093 	    (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS)
3094 	) {
3095 		lf.l_whence = SEEK_SET;
3096 		lf.l_start = 0;
3097 		lf.l_len = 0;
3098 		lf.l_type = F_UNLCK;
3099 		vp = (struct vnode *)fp->f_data;
3100 		VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0);
3101 	}
3102 	if (fp->f_ops != &badfileops)
3103 		error = fo_close(fp);
3104 	else
3105 		error = 0;
3106 	ffree(fp);
3107 	return (error);
3108 }
3109 
3110 /*
3111  * Apply an advisory lock on a file descriptor.
3112  *
3113  * Just attempt to get a record lock of the requested type on
3114  * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
3115  *
3116  * MPALMOSTSAFE
3117  */
3118 int
sys_flock(struct sysmsg * sysmsg,const struct flock_args * uap)3119 sys_flock(struct sysmsg *sysmsg, const struct flock_args *uap)
3120 {
3121 	thread_t td = curthread;
3122 	struct file *fp;
3123 	struct vnode *vp;
3124 	struct flock lf;
3125 	int error;
3126 
3127 	if ((fp = holdfp(td, uap->fd, -1)) == NULL)
3128 		return (EBADF);
3129 	if (fp->f_type != DTYPE_VNODE) {
3130 		error = EOPNOTSUPP;
3131 		goto done;
3132 	}
3133 	vp = (struct vnode *)fp->f_data;
3134 	lf.l_whence = SEEK_SET;
3135 	lf.l_start = 0;
3136 	lf.l_len = 0;
3137 	if (uap->how & LOCK_UN) {
3138 		lf.l_type = F_UNLCK;
3139 		atomic_clear_int(&fp->f_flag, FHASLOCK); /* race ok */
3140 		error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0);
3141 		goto done;
3142 	}
3143 	if (uap->how & LOCK_EX)
3144 		lf.l_type = F_WRLCK;
3145 	else if (uap->how & LOCK_SH)
3146 		lf.l_type = F_RDLCK;
3147 	else {
3148 		error = EBADF;
3149 		goto done;
3150 	}
3151 	if (uap->how & LOCK_NB)
3152 		error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 0);
3153 	else
3154 		error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_WAIT);
3155 	atomic_set_int(&fp->f_flag, FHASLOCK);	/* race ok */
3156 done:
3157 	fdrop(fp);
3158 	return (error);
3159 }
3160 
3161 /*
3162  * File Descriptor pseudo-device driver ( /dev/fd/N ).
3163  *
3164  * This interface is now a bit more linux-compatible and attempts to not
3165  * share seek positions by not sharing the fp of the descriptor when
3166  * possible.
3167  *
3168  * Probably a good idea anyhow, but now particularly important for
3169  * fexecve() which uses /dev/fd/N.
3170  *
3171  * The original interface effectively dup()d the descriptor.
3172  */
3173 static int
fdopen(struct dev_open_args * ap)3174 fdopen(struct dev_open_args *ap)
3175 {
3176 	struct file *wfp;
3177 	thread_t td;
3178 	int error;
3179 	int sfd;
3180 
3181 	td = curthread;
3182 	KKASSERT(td->td_lwp != NULL);
3183 
3184 	/*
3185 	 * Get the fp for /dev/fd/N
3186 	 */
3187 	sfd = minor(ap->a_head.a_dev);
3188 	if ((wfp = holdfp(td, sfd, -1)) == NULL)
3189 		return (EBADF);
3190 
3191 	/*
3192 	 * Close a revoke/dup race.  Duping a descriptor marked as revoked
3193 	 * will dup a dummy descriptor instead of the real one.
3194 	 */
3195 	if (wfp->f_flag & FREVOKED) {
3196 		kprintf("Warning: attempt to dup() a revoked descriptor\n");
3197 		fdrop(wfp);
3198 		wfp = NULL;
3199 		error = falloc(NULL, &wfp, NULL);
3200 		if (error)
3201 			return (error);
3202 	}
3203 
3204 	/*
3205 	 * Check that the mode the file is being opened for is a
3206 	 * subset of the mode of the existing descriptor.
3207 	 */
3208 	if (ap->a_fpp == NULL) {
3209 		fdrop(wfp);
3210 		return EINVAL;
3211 	}
3212 	if (((ap->a_oflags & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) {
3213 		fdrop(wfp);
3214 		return EACCES;
3215 	}
3216 	if (wfp->f_type == DTYPE_VNODE && wfp->f_data) {
3217 		/*
3218 		 * If wfp is a vnode create a new fp so things like the
3219 		 * seek position (etc) are not shared with the original.
3220 		 *
3221 		 * Don't try to call VOP_OPEN().  Adjust the open-count
3222 		 * ourselves.
3223 		 */
3224 		struct vnode *vp;
3225 		struct file *fp;
3226 
3227 		vp = wfp->f_data;
3228                 fp = *ap->a_fpp;
3229 
3230 		/*
3231 		 * Yah... this wouldn't be good.
3232 		 */
3233 		if ((ap->a_oflags & (FWRITE|O_TRUNC)) && vp->v_type == VDIR) {
3234 			fdrop(wfp);
3235 			return EISDIR;
3236 		}
3237 
3238 		/*
3239 		 * Setup the new fp and simulate an open(), but for now do
3240 		 * not actually call VOP_OPEN() though we probably could.
3241 		 */
3242 		fp->f_type = DTYPE_VNODE;
3243                 /* retain flags not to be copied */
3244                 fp->f_flag = (fp->f_flag & ~FMASK) | (ap->a_oflags & FMASK);
3245                 fp->f_ops = &vnode_fileops;
3246                 fp->f_data = vp;
3247                 vref(vp);
3248 
3249 		if (ap->a_oflags & FWRITE)
3250 			atomic_add_int(&vp->v_writecount, 1);
3251 		KKASSERT(vp->v_opencount >= 0 && vp->v_opencount != INT_MAX);
3252 		atomic_add_int(&vp->v_opencount, 1);
3253 		fdrop(wfp);
3254 	} else {
3255 		/*
3256 		 * If wfp is not a vnode we have to share it directly.
3257 		 */
3258 		fdrop(*ap->a_fpp);
3259 		*ap->a_fpp = wfp;	/* transfer hold count */
3260 	}
3261 	return EALREADY;
3262 }
3263 
3264 /*
3265  * NOT MPSAFE - I think these refer to a common file descriptor table
3266  * and we need to spinlock that to link fdtol in.
3267  */
3268 struct filedesc_to_leader *
filedesc_to_leader_alloc(struct filedesc_to_leader * old,struct proc * leader)3269 filedesc_to_leader_alloc(struct filedesc_to_leader *old,
3270 			 struct proc *leader)
3271 {
3272 	struct filedesc_to_leader *fdtol;
3273 
3274 	fdtol = kmalloc(sizeof(struct filedesc_to_leader),
3275 			M_FILEDESC_TO_LEADER, M_WAITOK | M_ZERO);
3276 	fdtol->fdl_refcount = 1;
3277 	fdtol->fdl_holdcount = 0;
3278 	fdtol->fdl_wakeup = 0;
3279 	fdtol->fdl_leader = leader;
3280 	if (old != NULL) {
3281 		fdtol->fdl_next = old->fdl_next;
3282 		fdtol->fdl_prev = old;
3283 		old->fdl_next = fdtol;
3284 		fdtol->fdl_next->fdl_prev = fdtol;
3285 	} else {
3286 		fdtol->fdl_next = fdtol;
3287 		fdtol->fdl_prev = fdtol;
3288 	}
3289 	return fdtol;
3290 }
3291 
3292 /*
3293  * Scan all file pointers in the system.  The callback is made with
3294  * the master list spinlock held exclusively.
3295  */
3296 void
allfiles_scan_exclusive(int (* callback)(struct file *,void *),void * data)3297 allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data)
3298 {
3299 	int i;
3300 
3301 	for (i = 0; i < NFILELIST_HEADS; ++i) {
3302 		struct filelist_head *head = &filelist_heads[i];
3303 		struct file *fp;
3304 
3305 		spin_lock(&head->spin);
3306 		LIST_FOREACH(fp, &head->list, f_list) {
3307 			int res;
3308 
3309 			res = callback(fp, data);
3310 			if (res < 0)
3311 				break;
3312 		}
3313 		spin_unlock(&head->spin);
3314 	}
3315 }
3316 
3317 /*
3318  * Get file structures.
3319  *
3320  * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe)
3321  */
3322 
3323 struct sysctl_kern_file_info {
3324 	int count;
3325 	int error;
3326 	struct sysctl_req *req;
3327 };
3328 
3329 static int sysctl_kern_file_callback(struct proc *p, void *data);
3330 
3331 static int
sysctl_kern_file(SYSCTL_HANDLER_ARGS)3332 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
3333 {
3334 	struct sysctl_kern_file_info info;
3335 
3336 	/*
3337 	 * Note: because the number of file descriptors is calculated
3338 	 * in different ways for sizing vs returning the data,
3339 	 * there is information leakage from the first loop.  However,
3340 	 * it is of a similar order of magnitude to the leakage from
3341 	 * global system statistics such as kern.openfiles.
3342 	 *
3343 	 * When just doing a count, note that we cannot just count
3344 	 * the elements and add f_count via the filehead list because
3345 	 * threaded processes share their descriptor table and f_count might
3346 	 * still be '1' in that case.
3347 	 *
3348 	 * Since the SYSCTL op can block, we must hold the process to
3349 	 * prevent it being ripped out from under us either in the
3350 	 * file descriptor loop or in the greater LIST_FOREACH.  The
3351 	 * process may be in varying states of disrepair.  If the process
3352 	 * is in SZOMB we may have caught it just as it is being removed
3353 	 * from the allproc list, we must skip it in that case to maintain
3354 	 * an unbroken chain through the allproc list.
3355 	 */
3356 	info.count = 0;
3357 	info.error = 0;
3358 	info.req = req;
3359 	allproc_scan(sysctl_kern_file_callback, &info, 0);
3360 
3361 	/*
3362 	 * When just calculating the size, overestimate a bit to try to
3363 	 * prevent system activity from causing the buffer-fill call
3364 	 * to fail later on.
3365 	 */
3366 	if (req->oldptr == NULL) {
3367 		info.count = (info.count + 16) + (info.count / 10);
3368 		info.error = SYSCTL_OUT(req, NULL,
3369 					info.count * sizeof(struct kinfo_file));
3370 	}
3371 	return (info.error);
3372 }
3373 
3374 static int
sysctl_kern_file_callback(struct proc * p,void * data)3375 sysctl_kern_file_callback(struct proc *p, void *data)
3376 {
3377 	struct sysctl_kern_file_info *info = data;
3378 	struct kinfo_file kf;
3379 	struct filedesc *fdp;
3380 	struct file *fp;
3381 	uid_t uid;
3382 	int n;
3383 
3384 	if (p->p_stat == SIDL || p->p_stat == SZOMB)
3385 		return(0);
3386 	if (!(PRISON_CHECK(info->req->td->td_ucred, p->p_ucred) != 0))
3387 		return(0);
3388 
3389 	/*
3390 	 * Softref the fdp to prevent it from being destroyed
3391 	 */
3392 	spin_lock(&p->p_spin);
3393 	if ((fdp = p->p_fd) == NULL) {
3394 		spin_unlock(&p->p_spin);
3395 		return(0);
3396 	}
3397 	atomic_add_int(&fdp->fd_softrefs, 1);
3398 	spin_unlock(&p->p_spin);
3399 
3400 	/*
3401 	 * The fdp's own spinlock prevents the contents from being
3402 	 * modified.
3403 	 */
3404 	spin_lock_shared(&fdp->fd_spin);
3405 	for (n = 0; n < fdp->fd_nfiles; ++n) {
3406 		if ((fp = fdp->fd_files[n].fp) == NULL)
3407 			continue;
3408 		if (info->req->oldptr == NULL) {
3409 			++info->count;
3410 		} else {
3411 			uid = p->p_ucred ? p->p_ucred->cr_uid : -1;
3412 			kcore_make_file(&kf, fp, p->p_pid, uid, n);
3413 			spin_unlock_shared(&fdp->fd_spin);
3414 			info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf));
3415 			spin_lock_shared(&fdp->fd_spin);
3416 			if (info->error)
3417 				break;
3418 		}
3419 	}
3420 	spin_unlock_shared(&fdp->fd_spin);
3421 	atomic_subtract_int(&fdp->fd_softrefs, 1);
3422 	if (info->error)
3423 		return(-1);
3424 	return(0);
3425 }
3426 
3427 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
3428     0, 0, sysctl_kern_file, "S,file", "Entire file table");
3429 
3430 SYSCTL_INT(_kern, OID_AUTO, minfilesperproc, CTLFLAG_RW,
3431     &minfilesperproc, 0, "Minimum files allowed open per process");
3432 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
3433     &maxfilesperproc, 0, "Maximum files allowed open per process");
3434 SYSCTL_INT(_kern, OID_AUTO, maxfilesperuser, CTLFLAG_RW,
3435     &maxfilesperuser, 0, "Maximum files allowed open per user");
3436 
3437 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
3438     &maxfiles, 0, "Maximum number of files");
3439 
3440 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW,
3441     &maxfilesrootres, 0, "Descriptors reserved for root use");
3442 
3443 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
3444 	&nfiles, 0, "System-wide number of open files");
3445 
3446 static void
fildesc_drvinit(void * unused)3447 fildesc_drvinit(void *unused)
3448 {
3449 	int fd;
3450 
3451 	for (fd = 0; fd < NUMFDESC; fd++) {
3452 		make_dev(&fildesc_ops, fd,
3453 			 UID_BIN, GID_BIN, 0666, "fd/%d", fd);
3454 	}
3455 
3456 	make_dev(&fildesc_ops, 0, UID_ROOT, GID_WHEEL, 0666, "stdin");
3457 	make_dev(&fildesc_ops, 1, UID_ROOT, GID_WHEEL, 0666, "stdout");
3458 	make_dev(&fildesc_ops, 2, UID_ROOT, GID_WHEEL, 0666, "stderr");
3459 }
3460 
3461 struct fileops badfileops = {
3462 	.fo_read = badfo_readwrite,
3463 	.fo_write = badfo_readwrite,
3464 	.fo_ioctl = badfo_ioctl,
3465 	.fo_kqfilter = badfo_kqfilter,
3466 	.fo_stat = badfo_stat,
3467 	.fo_close = badfo_close,
3468 	.fo_shutdown = badfo_shutdown
3469 };
3470 
3471 int
badfo_readwrite(struct file * fp,struct uio * uio,struct ucred * cred,int flags)3472 badfo_readwrite(
3473 	struct file *fp,
3474 	struct uio *uio,
3475 	struct ucred *cred,
3476 	int flags
3477 ) {
3478 	return (EBADF);
3479 }
3480 
3481 int
badfo_ioctl(struct file * fp,u_long com,caddr_t data,struct ucred * cred,struct sysmsg * msgv)3482 badfo_ioctl(struct file *fp, u_long com, caddr_t data,
3483 	    struct ucred *cred, struct sysmsg *msgv)
3484 {
3485 	return (EBADF);
3486 }
3487 
3488 /*
3489  * Must return an error to prevent registration, typically
3490  * due to a revoked descriptor (file_filtops assigned).
3491  */
3492 int
badfo_kqfilter(struct file * fp,struct knote * kn)3493 badfo_kqfilter(struct file *fp, struct knote *kn)
3494 {
3495 	return (EOPNOTSUPP);
3496 }
3497 
3498 int
badfo_stat(struct file * fp,struct stat * sb,struct ucred * cred)3499 badfo_stat(struct file *fp, struct stat *sb, struct ucred *cred)
3500 {
3501 	return (EBADF);
3502 }
3503 
3504 int
badfo_close(struct file * fp)3505 badfo_close(struct file *fp)
3506 {
3507 	return (EBADF);
3508 }
3509 
3510 int
badfo_shutdown(struct file * fp,int how)3511 badfo_shutdown(struct file *fp, int how)
3512 {
3513 	return (EBADF);
3514 }
3515 
3516 int
nofo_shutdown(struct file * fp,int how)3517 nofo_shutdown(struct file *fp, int how)
3518 {
3519 	return (EOPNOTSUPP);
3520 }
3521 
3522 SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR,
3523     fildesc_drvinit,NULL);
3524 
3525 static void
filelist_heads_init(void * arg __unused)3526 filelist_heads_init(void *arg __unused)
3527 {
3528 	int i;
3529 
3530 	for (i = 0; i < NFILELIST_HEADS; ++i) {
3531 		struct filelist_head *head = &filelist_heads[i];
3532 
3533 		spin_init(&head->spin, "filehead_spin");
3534 		LIST_INIT(&head->list);
3535 	}
3536 }
3537 
3538 SYSINIT(filelistheads, SI_BOOT1_LOCK, SI_ORDER_ANY, filelist_heads_init, NULL);
3539