1 /* $NetBSD: kern_descrip.c,v 1.257 2023/04/22 14:23:59 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1982, 1986, 1989, 1991, 1993
34 * The Regents of the University of California. All rights reserved.
35 * (c) UNIX System Laboratories, Inc.
36 * All or some portions of this file are derived from material licensed
37 * to the University of California by American Telephone and Telegraph
38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39 * the permission of UNIX System Laboratories, Inc.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)kern_descrip.c 8.8 (Berkeley) 2/14/95
66 */
67
68 /*
69 * File descriptor management.
70 */
71
72 #include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: kern_descrip.c,v 1.257 2023/04/22 14:23:59 riastradh Exp $");
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/kernel.h>
79 #include <sys/proc.h>
80 #include <sys/file.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h>
83 #include <sys/stat.h>
84 #include <sys/ioctl.h>
85 #include <sys/fcntl.h>
86 #include <sys/pool.h>
87 #include <sys/unistd.h>
88 #include <sys/resourcevar.h>
89 #include <sys/conf.h>
90 #include <sys/event.h>
91 #include <sys/kauth.h>
92 #include <sys/atomic.h>
93 #include <sys/syscallargs.h>
94 #include <sys/cpu.h>
95 #include <sys/kmem.h>
96 #include <sys/vnode.h>
97 #include <sys/sysctl.h>
98 #include <sys/ktrace.h>
99
100 /*
101 * A list (head) of open files, counter, and lock protecting them.
102 */
103 struct filelist filehead __cacheline_aligned;
104 static u_int nfiles __cacheline_aligned;
105 kmutex_t filelist_lock __cacheline_aligned;
106
107 static pool_cache_t filedesc_cache __read_mostly;
108 static pool_cache_t file_cache __read_mostly;
109 static pool_cache_t fdfile_cache __read_mostly;
110
111 static int file_ctor(void *, void *, int);
112 static void file_dtor(void *, void *);
113 static int fdfile_ctor(void *, void *, int);
114 static void fdfile_dtor(void *, void *);
115 static int filedesc_ctor(void *, void *, int);
116 static void filedesc_dtor(void *, void *);
117 static int filedescopen(dev_t, int, int, lwp_t *);
118
119 static int sysctl_kern_file(SYSCTLFN_PROTO);
120 static int sysctl_kern_file2(SYSCTLFN_PROTO);
121 static void fill_file(struct file *, const struct file *);
122 static void fill_file2(struct kinfo_file *, const file_t *, const fdfile_t *,
123 int, pid_t);
124
125 const struct cdevsw filedesc_cdevsw = {
126 .d_open = filedescopen,
127 .d_close = noclose,
128 .d_read = noread,
129 .d_write = nowrite,
130 .d_ioctl = noioctl,
131 .d_stop = nostop,
132 .d_tty = notty,
133 .d_poll = nopoll,
134 .d_mmap = nommap,
135 .d_kqfilter = nokqfilter,
136 .d_discard = nodiscard,
137 .d_flag = D_OTHER | D_MPSAFE
138 };
139
140 /* For ease of reading. */
__strong_alias(fd_putvnode,fd_putfile)141 __strong_alias(fd_putvnode,fd_putfile)
142 __strong_alias(fd_putsock,fd_putfile)
143
144 /*
145 * Initialize the descriptor system.
146 */
147 void
148 fd_sys_init(void)
149 {
150 static struct sysctllog *clog;
151
152 mutex_init(&filelist_lock, MUTEX_DEFAULT, IPL_NONE);
153
154 LIST_INIT(&filehead);
155
156 file_cache = pool_cache_init(sizeof(file_t), coherency_unit, 0,
157 0, "file", NULL, IPL_NONE, file_ctor, file_dtor, NULL);
158 KASSERT(file_cache != NULL);
159
160 fdfile_cache = pool_cache_init(sizeof(fdfile_t), coherency_unit, 0,
161 PR_LARGECACHE, "fdfile", NULL, IPL_NONE, fdfile_ctor, fdfile_dtor,
162 NULL);
163 KASSERT(fdfile_cache != NULL);
164
165 filedesc_cache = pool_cache_init(sizeof(filedesc_t), coherency_unit,
166 0, 0, "filedesc", NULL, IPL_NONE, filedesc_ctor, filedesc_dtor,
167 NULL);
168 KASSERT(filedesc_cache != NULL);
169
170 sysctl_createv(&clog, 0, NULL, NULL,
171 CTLFLAG_PERMANENT,
172 CTLTYPE_STRUCT, "file",
173 SYSCTL_DESCR("System open file table"),
174 sysctl_kern_file, 0, NULL, 0,
175 CTL_KERN, KERN_FILE, CTL_EOL);
176 sysctl_createv(&clog, 0, NULL, NULL,
177 CTLFLAG_PERMANENT,
178 CTLTYPE_STRUCT, "file2",
179 SYSCTL_DESCR("System open file table"),
180 sysctl_kern_file2, 0, NULL, 0,
181 CTL_KERN, KERN_FILE2, CTL_EOL);
182 }
183
184 static bool
fd_isused(filedesc_t * fdp,unsigned fd)185 fd_isused(filedesc_t *fdp, unsigned fd)
186 {
187 u_int off = fd >> NDENTRYSHIFT;
188
189 KASSERT(fd < atomic_load_consume(&fdp->fd_dt)->dt_nfiles);
190
191 return (fdp->fd_lomap[off] & (1U << (fd & NDENTRYMASK))) != 0;
192 }
193
194 /*
195 * Verify that the bitmaps match the descriptor table.
196 */
197 static inline void
fd_checkmaps(filedesc_t * fdp)198 fd_checkmaps(filedesc_t *fdp)
199 {
200 #ifdef DEBUG
201 fdtab_t *dt;
202 u_int fd;
203
204 KASSERT(fdp->fd_refcnt <= 1 || mutex_owned(&fdp->fd_lock));
205
206 dt = fdp->fd_dt;
207 if (fdp->fd_refcnt == -1) {
208 /*
209 * fd_free tears down the table without maintaining its bitmap.
210 */
211 return;
212 }
213 for (fd = 0; fd < dt->dt_nfiles; fd++) {
214 if (fd < NDFDFILE) {
215 KASSERT(dt->dt_ff[fd] ==
216 (fdfile_t *)fdp->fd_dfdfile[fd]);
217 }
218 if (dt->dt_ff[fd] == NULL) {
219 KASSERT(!fd_isused(fdp, fd));
220 } else if (dt->dt_ff[fd]->ff_file != NULL) {
221 KASSERT(fd_isused(fdp, fd));
222 }
223 }
224 #endif
225 }
226
227 static int
fd_next_zero(filedesc_t * fdp,uint32_t * bitmap,int want,u_int bits)228 fd_next_zero(filedesc_t *fdp, uint32_t *bitmap, int want, u_int bits)
229 {
230 int i, off, maxoff;
231 uint32_t sub;
232
233 KASSERT(mutex_owned(&fdp->fd_lock));
234
235 fd_checkmaps(fdp);
236
237 if (want > bits)
238 return -1;
239
240 off = want >> NDENTRYSHIFT;
241 i = want & NDENTRYMASK;
242 if (i) {
243 sub = bitmap[off] | ((u_int)~0 >> (NDENTRIES - i));
244 if (sub != ~0)
245 goto found;
246 off++;
247 }
248
249 maxoff = NDLOSLOTS(bits);
250 while (off < maxoff) {
251 if ((sub = bitmap[off]) != ~0)
252 goto found;
253 off++;
254 }
255
256 return -1;
257
258 found:
259 return (off << NDENTRYSHIFT) + ffs(~sub) - 1;
260 }
261
262 static int
fd_last_set(filedesc_t * fd,int last)263 fd_last_set(filedesc_t *fd, int last)
264 {
265 int off, i;
266 fdfile_t **ff = fd->fd_dt->dt_ff;
267 uint32_t *bitmap = fd->fd_lomap;
268
269 KASSERT(mutex_owned(&fd->fd_lock));
270
271 fd_checkmaps(fd);
272
273 off = (last - 1) >> NDENTRYSHIFT;
274
275 while (off >= 0 && !bitmap[off])
276 off--;
277
278 if (off < 0)
279 return -1;
280
281 i = ((off + 1) << NDENTRYSHIFT) - 1;
282 if (i >= last)
283 i = last - 1;
284
285 /* XXX should use bitmap */
286 while (i > 0 && (ff[i] == NULL || !ff[i]->ff_allocated))
287 i--;
288
289 return i;
290 }
291
292 static inline void
fd_used(filedesc_t * fdp,unsigned fd)293 fd_used(filedesc_t *fdp, unsigned fd)
294 {
295 u_int off = fd >> NDENTRYSHIFT;
296 fdfile_t *ff;
297
298 ff = fdp->fd_dt->dt_ff[fd];
299
300 KASSERT(mutex_owned(&fdp->fd_lock));
301 KASSERT((fdp->fd_lomap[off] & (1U << (fd & NDENTRYMASK))) == 0);
302 KASSERT(ff != NULL);
303 KASSERT(ff->ff_file == NULL);
304 KASSERT(!ff->ff_allocated);
305
306 ff->ff_allocated = true;
307 fdp->fd_lomap[off] |= 1U << (fd & NDENTRYMASK);
308 if (__predict_false(fdp->fd_lomap[off] == ~0)) {
309 KASSERT((fdp->fd_himap[off >> NDENTRYSHIFT] &
310 (1U << (off & NDENTRYMASK))) == 0);
311 fdp->fd_himap[off >> NDENTRYSHIFT] |= 1U << (off & NDENTRYMASK);
312 }
313
314 if ((int)fd > fdp->fd_lastfile) {
315 fdp->fd_lastfile = fd;
316 }
317
318 fd_checkmaps(fdp);
319 }
320
321 static inline void
fd_unused(filedesc_t * fdp,unsigned fd)322 fd_unused(filedesc_t *fdp, unsigned fd)
323 {
324 u_int off = fd >> NDENTRYSHIFT;
325 fdfile_t *ff;
326
327 ff = fdp->fd_dt->dt_ff[fd];
328
329 KASSERT(mutex_owned(&fdp->fd_lock));
330 KASSERT(ff != NULL);
331 KASSERT(ff->ff_file == NULL);
332 KASSERT(ff->ff_allocated);
333
334 if (fd < fdp->fd_freefile) {
335 fdp->fd_freefile = fd;
336 }
337
338 if (fdp->fd_lomap[off] == ~0) {
339 KASSERT((fdp->fd_himap[off >> NDENTRYSHIFT] &
340 (1U << (off & NDENTRYMASK))) != 0);
341 fdp->fd_himap[off >> NDENTRYSHIFT] &=
342 ~(1U << (off & NDENTRYMASK));
343 }
344 KASSERT((fdp->fd_lomap[off] & (1U << (fd & NDENTRYMASK))) != 0);
345 fdp->fd_lomap[off] &= ~(1U << (fd & NDENTRYMASK));
346 ff->ff_allocated = false;
347
348 KASSERT(fd <= fdp->fd_lastfile);
349 if (fd == fdp->fd_lastfile) {
350 fdp->fd_lastfile = fd_last_set(fdp, fd);
351 }
352 fd_checkmaps(fdp);
353 }
354
355 /*
356 * Look up the file structure corresponding to a file descriptor
357 * and return the file, holding a reference on the descriptor.
358 */
359 file_t *
fd_getfile(unsigned fd)360 fd_getfile(unsigned fd)
361 {
362 filedesc_t *fdp;
363 fdfile_t *ff;
364 file_t *fp;
365 fdtab_t *dt;
366
367 /*
368 * Look up the fdfile structure representing this descriptor.
369 * We are doing this unlocked. See fd_tryexpand().
370 */
371 fdp = curlwp->l_fd;
372 dt = atomic_load_consume(&fdp->fd_dt);
373 if (__predict_false(fd >= dt->dt_nfiles)) {
374 return NULL;
375 }
376 ff = dt->dt_ff[fd];
377 KASSERT(fd >= NDFDFILE || ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
378 if (__predict_false(ff == NULL)) {
379 return NULL;
380 }
381
382 /* Now get a reference to the descriptor. */
383 if (fdp->fd_refcnt == 1) {
384 /*
385 * Single threaded: don't need to worry about concurrent
386 * access (other than earlier calls to kqueue, which may
387 * hold a reference to the descriptor).
388 */
389 ff->ff_refcnt++;
390 } else {
391 /*
392 * Multi threaded: issue a memory barrier to ensure that we
393 * acquire the file pointer _after_ adding a reference. If
394 * no memory barrier, we could fetch a stale pointer.
395 *
396 * In particular, we must coordinate the following four
397 * memory operations:
398 *
399 * A. fd_close store ff->ff_file = NULL
400 * B. fd_close refcnt = atomic_dec_uint_nv(&ff->ff_refcnt)
401 * C. fd_getfile atomic_inc_uint(&ff->ff_refcnt)
402 * D. fd_getfile load fp = ff->ff_file
403 *
404 * If the order is D;A;B;C:
405 *
406 * 1. D: fp = ff->ff_file
407 * 2. A: ff->ff_file = NULL
408 * 3. B: refcnt = atomic_dec_uint_nv(&ff->ff_refcnt)
409 * 4. C: atomic_inc_uint(&ff->ff_refcnt)
410 *
411 * then fd_close determines that there are no more
412 * references and decides to free fp immediately, at
413 * the same that fd_getfile ends up with an fp that's
414 * about to be freed. *boom*
415 *
416 * By making B a release operation in fd_close, and by
417 * making C an acquire operation in fd_getfile, since
418 * they are atomic operations on the same object, which
419 * has a total modification order, we guarantee either:
420 *
421 * - B happens before C. Then since A is
422 * sequenced before B in fd_close, and C is
423 * sequenced before D in fd_getfile, we
424 * guarantee A happens before D, so fd_getfile
425 * reads a null fp and safely fails.
426 *
427 * - C happens before B. Then fd_getfile may read
428 * null or nonnull, but either way, fd_close
429 * will safely wait for references to drain.
430 */
431 atomic_inc_uint(&ff->ff_refcnt);
432 membar_acquire();
433 }
434
435 /*
436 * If the file is not open or is being closed then put the
437 * reference back.
438 */
439 fp = atomic_load_consume(&ff->ff_file);
440 if (__predict_true(fp != NULL)) {
441 return fp;
442 }
443 fd_putfile(fd);
444 return NULL;
445 }
446
447 /*
448 * Release a reference to a file descriptor acquired with fd_getfile().
449 */
450 void
fd_putfile(unsigned fd)451 fd_putfile(unsigned fd)
452 {
453 filedesc_t *fdp;
454 fdfile_t *ff;
455 u_int u, v;
456
457 fdp = curlwp->l_fd;
458 KASSERT(fd < atomic_load_consume(&fdp->fd_dt)->dt_nfiles);
459 ff = atomic_load_consume(&fdp->fd_dt)->dt_ff[fd];
460
461 KASSERT(ff != NULL);
462 KASSERT((ff->ff_refcnt & FR_MASK) > 0);
463 KASSERT(fd >= NDFDFILE || ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
464
465 if (fdp->fd_refcnt == 1) {
466 /*
467 * Single threaded: don't need to worry about concurrent
468 * access (other than earlier calls to kqueue, which may
469 * hold a reference to the descriptor).
470 */
471 if (__predict_false((ff->ff_refcnt & FR_CLOSING) != 0)) {
472 fd_close(fd);
473 return;
474 }
475 ff->ff_refcnt--;
476 return;
477 }
478
479 /*
480 * Ensure that any use of the file is complete and globally
481 * visible before dropping the final reference. If no membar,
482 * the current CPU could still access memory associated with
483 * the file after it has been freed or recycled by another
484 * CPU.
485 */
486 membar_release();
487
488 /*
489 * Be optimistic and start out with the assumption that no other
490 * threads are trying to close the descriptor. If the CAS fails,
491 * we lost a race and/or it's being closed.
492 */
493 for (u = ff->ff_refcnt & FR_MASK;; u = v) {
494 v = atomic_cas_uint(&ff->ff_refcnt, u, u - 1);
495 if (__predict_true(u == v)) {
496 return;
497 }
498 if (__predict_false((v & FR_CLOSING) != 0)) {
499 break;
500 }
501 }
502
503 /* Another thread is waiting to close the file: join it. */
504 (void)fd_close(fd);
505 }
506
507 /*
508 * Convenience wrapper around fd_getfile() that returns reference
509 * to a vnode.
510 */
511 int
fd_getvnode(unsigned fd,file_t ** fpp)512 fd_getvnode(unsigned fd, file_t **fpp)
513 {
514 vnode_t *vp;
515 file_t *fp;
516
517 fp = fd_getfile(fd);
518 if (__predict_false(fp == NULL)) {
519 return EBADF;
520 }
521 if (__predict_false(fp->f_type != DTYPE_VNODE)) {
522 fd_putfile(fd);
523 return EINVAL;
524 }
525 vp = fp->f_vnode;
526 if (__predict_false(vp->v_type == VBAD)) {
527 /* XXX Is this case really necessary? */
528 fd_putfile(fd);
529 return EBADF;
530 }
531 *fpp = fp;
532 return 0;
533 }
534
535 /*
536 * Convenience wrapper around fd_getfile() that returns reference
537 * to a socket.
538 */
539 int
fd_getsock1(unsigned fd,struct socket ** sop,file_t ** fp)540 fd_getsock1(unsigned fd, struct socket **sop, file_t **fp)
541 {
542 *fp = fd_getfile(fd);
543 if (__predict_false(*fp == NULL)) {
544 return EBADF;
545 }
546 if (__predict_false((*fp)->f_type != DTYPE_SOCKET)) {
547 fd_putfile(fd);
548 return ENOTSOCK;
549 }
550 *sop = (*fp)->f_socket;
551 return 0;
552 }
553
554 int
fd_getsock(unsigned fd,struct socket ** sop)555 fd_getsock(unsigned fd, struct socket **sop)
556 {
557 file_t *fp;
558 return fd_getsock1(fd, sop, &fp);
559 }
560
561 /*
562 * Look up the file structure corresponding to a file descriptor
563 * and return it with a reference held on the file, not the
564 * descriptor.
565 *
566 * This is heavyweight and only used when accessing descriptors
567 * from a foreign process. The caller must ensure that `p' does
568 * not exit or fork across this call.
569 *
570 * To release the file (not descriptor) reference, use closef().
571 */
572 file_t *
fd_getfile2(proc_t * p,unsigned fd)573 fd_getfile2(proc_t *p, unsigned fd)
574 {
575 filedesc_t *fdp;
576 fdfile_t *ff;
577 file_t *fp;
578 fdtab_t *dt;
579
580 fdp = p->p_fd;
581 mutex_enter(&fdp->fd_lock);
582 dt = fdp->fd_dt;
583 if (fd >= dt->dt_nfiles) {
584 mutex_exit(&fdp->fd_lock);
585 return NULL;
586 }
587 if ((ff = dt->dt_ff[fd]) == NULL) {
588 mutex_exit(&fdp->fd_lock);
589 return NULL;
590 }
591 if ((fp = atomic_load_consume(&ff->ff_file)) == NULL) {
592 mutex_exit(&fdp->fd_lock);
593 return NULL;
594 }
595 mutex_enter(&fp->f_lock);
596 fp->f_count++;
597 mutex_exit(&fp->f_lock);
598 mutex_exit(&fdp->fd_lock);
599
600 return fp;
601 }
602
603 /*
604 * Internal form of close. Must be called with a reference to the
605 * descriptor, and will drop the reference. When all descriptor
606 * references are dropped, releases the descriptor slot and a single
607 * reference to the file structure.
608 */
609 int
fd_close(unsigned fd)610 fd_close(unsigned fd)
611 {
612 struct flock lf;
613 filedesc_t *fdp;
614 fdfile_t *ff;
615 file_t *fp;
616 proc_t *p;
617 lwp_t *l;
618 u_int refcnt;
619
620 l = curlwp;
621 p = l->l_proc;
622 fdp = l->l_fd;
623 ff = atomic_load_consume(&fdp->fd_dt)->dt_ff[fd];
624
625 KASSERT(fd >= NDFDFILE || ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
626
627 mutex_enter(&fdp->fd_lock);
628 KASSERT((ff->ff_refcnt & FR_MASK) > 0);
629 fp = atomic_load_consume(&ff->ff_file);
630 if (__predict_false(fp == NULL)) {
631 /*
632 * Another user of the file is already closing, and is
633 * waiting for other users of the file to drain. Release
634 * our reference, and wake up the closer.
635 */
636 membar_release();
637 atomic_dec_uint(&ff->ff_refcnt);
638 cv_broadcast(&ff->ff_closing);
639 mutex_exit(&fdp->fd_lock);
640
641 /*
642 * An application error, so pretend that the descriptor
643 * was already closed. We can't safely wait for it to
644 * be closed without potentially deadlocking.
645 */
646 return (EBADF);
647 }
648 KASSERT((ff->ff_refcnt & FR_CLOSING) == 0);
649
650 /*
651 * There may be multiple users of this file within the process.
652 * Notify existing and new users that the file is closing. This
653 * will prevent them from adding additional uses to this file
654 * while we are closing it.
655 */
656 atomic_store_relaxed(&ff->ff_file, NULL);
657 ff->ff_exclose = false;
658
659 /*
660 * We expect the caller to hold a descriptor reference - drop it.
661 * The reference count may increase beyond zero at this point due
662 * to an erroneous descriptor reference by an application, but
663 * fd_getfile() will notice that the file is being closed and drop
664 * the reference again.
665 */
666 if (fdp->fd_refcnt == 1) {
667 /* Single threaded. */
668 refcnt = --(ff->ff_refcnt);
669 } else {
670 /* Multi threaded. */
671 membar_release();
672 refcnt = atomic_dec_uint_nv(&ff->ff_refcnt);
673 membar_acquire();
674 }
675 if (__predict_false(refcnt != 0)) {
676 /*
677 * Wait for other references to drain. This is typically
678 * an application error - the descriptor is being closed
679 * while still in use.
680 * (Or just a threaded application trying to unblock its
681 * thread that sleeps in (say) accept()).
682 */
683 atomic_or_uint(&ff->ff_refcnt, FR_CLOSING);
684
685 /*
686 * Remove any knotes attached to the file. A knote
687 * attached to the descriptor can hold references on it.
688 */
689 mutex_exit(&fdp->fd_lock);
690 if (!SLIST_EMPTY(&ff->ff_knlist)) {
691 knote_fdclose(fd);
692 }
693
694 /*
695 * Since the file system code doesn't know which fd
696 * each request came from (think dup()), we have to
697 * ask it to return ERESTART for any long-term blocks.
698 * The re-entry through read/write/etc will detect the
699 * closed fd and return EBAFD.
700 * Blocked partial writes may return a short length.
701 */
702 (*fp->f_ops->fo_restart)(fp);
703 mutex_enter(&fdp->fd_lock);
704
705 /*
706 * We need to see the count drop to zero at least once,
707 * in order to ensure that all pre-existing references
708 * have been drained. New references past this point are
709 * of no interest.
710 * XXX (dsl) this may need to call fo_restart() after a
711 * timeout to guarantee that all the system calls exit.
712 */
713 while ((ff->ff_refcnt & FR_MASK) != 0) {
714 cv_wait(&ff->ff_closing, &fdp->fd_lock);
715 }
716 atomic_and_uint(&ff->ff_refcnt, ~FR_CLOSING);
717 } else {
718 /* If no references, there must be no knotes. */
719 KASSERT(SLIST_EMPTY(&ff->ff_knlist));
720 }
721
722 /*
723 * POSIX record locking dictates that any close releases ALL
724 * locks owned by this process. This is handled by setting
725 * a flag in the unlock to free ONLY locks obeying POSIX
726 * semantics, and not to free BSD-style file locks.
727 * If the descriptor was in a message, POSIX-style locks
728 * aren't passed with the descriptor.
729 */
730 if (__predict_false((p->p_flag & PK_ADVLOCK) != 0) &&
731 fp->f_ops->fo_advlock != NULL) {
732 lf.l_whence = SEEK_SET;
733 lf.l_start = 0;
734 lf.l_len = 0;
735 lf.l_type = F_UNLCK;
736 mutex_exit(&fdp->fd_lock);
737 (void)(*fp->f_ops->fo_advlock)(fp, p, F_UNLCK, &lf, F_POSIX);
738 mutex_enter(&fdp->fd_lock);
739 }
740
741 /* Free descriptor slot. */
742 fd_unused(fdp, fd);
743 mutex_exit(&fdp->fd_lock);
744
745 /* Now drop reference to the file itself. */
746 return closef(fp);
747 }
748
749 /*
750 * Duplicate a file descriptor.
751 */
752 int
fd_dup(file_t * fp,int minfd,int * newp,bool exclose)753 fd_dup(file_t *fp, int minfd, int *newp, bool exclose)
754 {
755 proc_t *p = curproc;
756 fdtab_t *dt;
757 int error;
758
759 while ((error = fd_alloc(p, minfd, newp)) != 0) {
760 if (error != ENOSPC) {
761 return error;
762 }
763 fd_tryexpand(p);
764 }
765
766 dt = atomic_load_consume(&curlwp->l_fd->fd_dt);
767 dt->dt_ff[*newp]->ff_exclose = exclose;
768 fd_affix(p, fp, *newp);
769 return 0;
770 }
771
772 /*
773 * dup2 operation.
774 */
775 int
fd_dup2(file_t * fp,unsigned newfd,int flags)776 fd_dup2(file_t *fp, unsigned newfd, int flags)
777 {
778 filedesc_t *fdp = curlwp->l_fd;
779 fdfile_t *ff;
780 fdtab_t *dt;
781
782 if (flags & ~(O_CLOEXEC|O_NONBLOCK|O_NOSIGPIPE))
783 return EINVAL;
784 /*
785 * Ensure there are enough slots in the descriptor table,
786 * and allocate an fdfile_t up front in case we need it.
787 */
788 while (newfd >= atomic_load_consume(&fdp->fd_dt)->dt_nfiles) {
789 fd_tryexpand(curproc);
790 }
791 ff = pool_cache_get(fdfile_cache, PR_WAITOK);
792
793 /*
794 * If there is already a file open, close it. If the file is
795 * half open, wait for it to be constructed before closing it.
796 * XXX Potential for deadlock here?
797 */
798 mutex_enter(&fdp->fd_lock);
799 while (fd_isused(fdp, newfd)) {
800 mutex_exit(&fdp->fd_lock);
801 if (fd_getfile(newfd) != NULL) {
802 (void)fd_close(newfd);
803 } else {
804 /*
805 * Crummy, but unlikely to happen.
806 * Can occur if we interrupt another
807 * thread while it is opening a file.
808 */
809 kpause("dup2", false, 1, NULL);
810 }
811 mutex_enter(&fdp->fd_lock);
812 }
813 dt = fdp->fd_dt;
814 if (dt->dt_ff[newfd] == NULL) {
815 KASSERT(newfd >= NDFDFILE);
816 dt->dt_ff[newfd] = ff;
817 ff = NULL;
818 }
819 fd_used(fdp, newfd);
820 mutex_exit(&fdp->fd_lock);
821
822 dt->dt_ff[newfd]->ff_exclose = (flags & O_CLOEXEC) != 0;
823 fp->f_flag |= flags & (FNONBLOCK|FNOSIGPIPE);
824 /* Slot is now allocated. Insert copy of the file. */
825 fd_affix(curproc, fp, newfd);
826 if (ff != NULL) {
827 pool_cache_put(fdfile_cache, ff);
828 }
829 return 0;
830 }
831
832 /*
833 * Drop reference to a file structure.
834 */
835 int
closef(file_t * fp)836 closef(file_t *fp)
837 {
838 struct flock lf;
839 int error;
840
841 /*
842 * Drop reference. If referenced elsewhere it's still open
843 * and we have nothing more to do.
844 */
845 mutex_enter(&fp->f_lock);
846 KASSERT(fp->f_count > 0);
847 if (--fp->f_count > 0) {
848 mutex_exit(&fp->f_lock);
849 return 0;
850 }
851 KASSERT(fp->f_count == 0);
852 mutex_exit(&fp->f_lock);
853
854 /* We held the last reference - release locks, close and free. */
855 if (fp->f_ops->fo_advlock == NULL) {
856 KASSERT((fp->f_flag & FHASLOCK) == 0);
857 } else if (fp->f_flag & FHASLOCK) {
858 lf.l_whence = SEEK_SET;
859 lf.l_start = 0;
860 lf.l_len = 0;
861 lf.l_type = F_UNLCK;
862 (void)(*fp->f_ops->fo_advlock)(fp, fp, F_UNLCK, &lf, F_FLOCK);
863 }
864 if (fp->f_ops != NULL) {
865 error = (*fp->f_ops->fo_close)(fp);
866 } else {
867 error = 0;
868 }
869 KASSERT(fp->f_count == 0);
870 KASSERT(fp->f_cred != NULL);
871 pool_cache_put(file_cache, fp);
872
873 return error;
874 }
875
876 /*
877 * Allocate a file descriptor for the process.
878 */
879 int
fd_alloc(proc_t * p,int want,int * result)880 fd_alloc(proc_t *p, int want, int *result)
881 {
882 filedesc_t *fdp = p->p_fd;
883 int i, lim, last, error, hi;
884 u_int off;
885 fdtab_t *dt;
886
887 KASSERT(p == curproc || p == &proc0);
888
889 /*
890 * Search for a free descriptor starting at the higher
891 * of want or fd_freefile.
892 */
893 mutex_enter(&fdp->fd_lock);
894 fd_checkmaps(fdp);
895 dt = fdp->fd_dt;
896 KASSERT(dt->dt_ff[0] == (fdfile_t *)fdp->fd_dfdfile[0]);
897 lim = uimin((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles);
898 last = uimin(dt->dt_nfiles, lim);
899 for (;;) {
900 if ((i = want) < fdp->fd_freefile)
901 i = fdp->fd_freefile;
902 off = i >> NDENTRYSHIFT;
903 hi = fd_next_zero(fdp, fdp->fd_himap, off,
904 (last + NDENTRIES - 1) >> NDENTRYSHIFT);
905 if (hi == -1)
906 break;
907 i = fd_next_zero(fdp, &fdp->fd_lomap[hi],
908 hi > off ? 0 : i & NDENTRYMASK, NDENTRIES);
909 if (i == -1) {
910 /*
911 * Free file descriptor in this block was
912 * below want, try again with higher want.
913 */
914 want = (hi + 1) << NDENTRYSHIFT;
915 continue;
916 }
917 i += (hi << NDENTRYSHIFT);
918 if (i >= last) {
919 break;
920 }
921 if (dt->dt_ff[i] == NULL) {
922 KASSERT(i >= NDFDFILE);
923 dt->dt_ff[i] = pool_cache_get(fdfile_cache, PR_WAITOK);
924 }
925 KASSERT(dt->dt_ff[i]->ff_file == NULL);
926 fd_used(fdp, i);
927 if (want <= fdp->fd_freefile) {
928 fdp->fd_freefile = i;
929 }
930 *result = i;
931 KASSERT(i >= NDFDFILE ||
932 dt->dt_ff[i] == (fdfile_t *)fdp->fd_dfdfile[i]);
933 fd_checkmaps(fdp);
934 mutex_exit(&fdp->fd_lock);
935 return 0;
936 }
937
938 /* No space in current array. Let the caller expand and retry. */
939 error = (dt->dt_nfiles >= lim) ? EMFILE : ENOSPC;
940 mutex_exit(&fdp->fd_lock);
941 return error;
942 }
943
944 /*
945 * Allocate memory for a descriptor table.
946 */
947 static fdtab_t *
fd_dtab_alloc(int n)948 fd_dtab_alloc(int n)
949 {
950 fdtab_t *dt;
951 size_t sz;
952
953 KASSERT(n > NDFILE);
954
955 sz = sizeof(*dt) + (n - NDFILE) * sizeof(dt->dt_ff[0]);
956 dt = kmem_alloc(sz, KM_SLEEP);
957 #ifdef DIAGNOSTIC
958 memset(dt, 0xff, sz);
959 #endif
960 dt->dt_nfiles = n;
961 dt->dt_link = NULL;
962 return dt;
963 }
964
965 /*
966 * Free a descriptor table, and all tables linked for deferred free.
967 */
968 static void
fd_dtab_free(fdtab_t * dt)969 fd_dtab_free(fdtab_t *dt)
970 {
971 fdtab_t *next;
972 size_t sz;
973
974 do {
975 next = dt->dt_link;
976 KASSERT(dt->dt_nfiles > NDFILE);
977 sz = sizeof(*dt) +
978 (dt->dt_nfiles - NDFILE) * sizeof(dt->dt_ff[0]);
979 #ifdef DIAGNOSTIC
980 memset(dt, 0xff, sz);
981 #endif
982 kmem_free(dt, sz);
983 dt = next;
984 } while (dt != NULL);
985 }
986
987 /*
988 * Allocate descriptor bitmap.
989 */
990 static void
fd_map_alloc(int n,uint32_t ** lo,uint32_t ** hi)991 fd_map_alloc(int n, uint32_t **lo, uint32_t **hi)
992 {
993 uint8_t *ptr;
994 size_t szlo, szhi;
995
996 KASSERT(n > NDENTRIES);
997
998 szlo = NDLOSLOTS(n) * sizeof(uint32_t);
999 szhi = NDHISLOTS(n) * sizeof(uint32_t);
1000 ptr = kmem_alloc(szlo + szhi, KM_SLEEP);
1001 *lo = (uint32_t *)ptr;
1002 *hi = (uint32_t *)(ptr + szlo);
1003 }
1004
1005 /*
1006 * Free descriptor bitmap.
1007 */
1008 static void
fd_map_free(int n,uint32_t * lo,uint32_t * hi)1009 fd_map_free(int n, uint32_t *lo, uint32_t *hi)
1010 {
1011 size_t szlo, szhi;
1012
1013 KASSERT(n > NDENTRIES);
1014
1015 szlo = NDLOSLOTS(n) * sizeof(uint32_t);
1016 szhi = NDHISLOTS(n) * sizeof(uint32_t);
1017 KASSERT(hi == (uint32_t *)((uint8_t *)lo + szlo));
1018 kmem_free(lo, szlo + szhi);
1019 }
1020
1021 /*
1022 * Expand a process' descriptor table.
1023 */
1024 void
fd_tryexpand(proc_t * p)1025 fd_tryexpand(proc_t *p)
1026 {
1027 filedesc_t *fdp;
1028 int i, numfiles, oldnfiles;
1029 fdtab_t *newdt, *dt;
1030 uint32_t *newhimap, *newlomap;
1031
1032 KASSERT(p == curproc || p == &proc0);
1033
1034 fdp = p->p_fd;
1035 newhimap = NULL;
1036 newlomap = NULL;
1037 oldnfiles = atomic_load_consume(&fdp->fd_dt)->dt_nfiles;
1038
1039 if (oldnfiles < NDEXTENT)
1040 numfiles = NDEXTENT;
1041 else
1042 numfiles = 2 * oldnfiles;
1043
1044 newdt = fd_dtab_alloc(numfiles);
1045 if (NDHISLOTS(numfiles) > NDHISLOTS(oldnfiles)) {
1046 fd_map_alloc(numfiles, &newlomap, &newhimap);
1047 }
1048
1049 mutex_enter(&fdp->fd_lock);
1050 dt = fdp->fd_dt;
1051 KASSERT(dt->dt_ff[0] == (fdfile_t *)fdp->fd_dfdfile[0]);
1052 if (dt->dt_nfiles != oldnfiles) {
1053 /* fdp changed; caller must retry */
1054 mutex_exit(&fdp->fd_lock);
1055 fd_dtab_free(newdt);
1056 if (NDHISLOTS(numfiles) > NDHISLOTS(oldnfiles)) {
1057 fd_map_free(numfiles, newlomap, newhimap);
1058 }
1059 return;
1060 }
1061
1062 /* Copy the existing descriptor table and zero the new portion. */
1063 i = sizeof(fdfile_t *) * oldnfiles;
1064 memcpy(newdt->dt_ff, dt->dt_ff, i);
1065 memset((uint8_t *)newdt->dt_ff + i, 0,
1066 numfiles * sizeof(fdfile_t *) - i);
1067
1068 /*
1069 * Link old descriptor array into list to be discarded. We defer
1070 * freeing until the last reference to the descriptor table goes
1071 * away (usually process exit). This allows us to do lockless
1072 * lookups in fd_getfile().
1073 */
1074 if (oldnfiles > NDFILE) {
1075 if (fdp->fd_refcnt > 1) {
1076 newdt->dt_link = dt;
1077 } else {
1078 fd_dtab_free(dt);
1079 }
1080 }
1081
1082 if (NDHISLOTS(numfiles) > NDHISLOTS(oldnfiles)) {
1083 i = NDHISLOTS(oldnfiles) * sizeof(uint32_t);
1084 memcpy(newhimap, fdp->fd_himap, i);
1085 memset((uint8_t *)newhimap + i, 0,
1086 NDHISLOTS(numfiles) * sizeof(uint32_t) - i);
1087
1088 i = NDLOSLOTS(oldnfiles) * sizeof(uint32_t);
1089 memcpy(newlomap, fdp->fd_lomap, i);
1090 memset((uint8_t *)newlomap + i, 0,
1091 NDLOSLOTS(numfiles) * sizeof(uint32_t) - i);
1092
1093 if (NDHISLOTS(oldnfiles) > NDHISLOTS(NDFILE)) {
1094 fd_map_free(oldnfiles, fdp->fd_lomap, fdp->fd_himap);
1095 }
1096 fdp->fd_himap = newhimap;
1097 fdp->fd_lomap = newlomap;
1098 }
1099
1100 /*
1101 * All other modifications must become globally visible before
1102 * the change to fd_dt. See fd_getfile().
1103 */
1104 atomic_store_release(&fdp->fd_dt, newdt);
1105 KASSERT(newdt->dt_ff[0] == (fdfile_t *)fdp->fd_dfdfile[0]);
1106 fd_checkmaps(fdp);
1107 mutex_exit(&fdp->fd_lock);
1108 }
1109
1110 /*
1111 * Create a new open file structure and allocate a file descriptor
1112 * for the current process.
1113 */
1114 int
fd_allocfile(file_t ** resultfp,int * resultfd)1115 fd_allocfile(file_t **resultfp, int *resultfd)
1116 {
1117 proc_t *p = curproc;
1118 kauth_cred_t cred;
1119 file_t *fp;
1120 int error;
1121
1122 while ((error = fd_alloc(p, 0, resultfd)) != 0) {
1123 if (error != ENOSPC) {
1124 return error;
1125 }
1126 fd_tryexpand(p);
1127 }
1128
1129 fp = pool_cache_get(file_cache, PR_WAITOK);
1130 if (fp == NULL) {
1131 fd_abort(p, NULL, *resultfd);
1132 return ENFILE;
1133 }
1134 KASSERT(fp->f_count == 0);
1135 KASSERT(fp->f_msgcount == 0);
1136 KASSERT(fp->f_unpcount == 0);
1137
1138 /* Replace cached credentials if not what we need. */
1139 cred = curlwp->l_cred;
1140 if (__predict_false(cred != fp->f_cred)) {
1141 kauth_cred_free(fp->f_cred);
1142 kauth_cred_hold(cred);
1143 fp->f_cred = cred;
1144 }
1145
1146 /*
1147 * Don't allow recycled files to be scanned.
1148 * See uipc_usrreq.c.
1149 */
1150 if (__predict_false((fp->f_flag & FSCAN) != 0)) {
1151 mutex_enter(&fp->f_lock);
1152 atomic_and_uint(&fp->f_flag, ~FSCAN);
1153 mutex_exit(&fp->f_lock);
1154 }
1155
1156 fp->f_advice = 0;
1157 fp->f_offset = 0;
1158 *resultfp = fp;
1159
1160 return 0;
1161 }
1162
1163 /*
1164 * Successful creation of a new descriptor: make visible to the process.
1165 */
1166 void
fd_affix(proc_t * p,file_t * fp,unsigned fd)1167 fd_affix(proc_t *p, file_t *fp, unsigned fd)
1168 {
1169 fdfile_t *ff;
1170 filedesc_t *fdp;
1171 fdtab_t *dt;
1172
1173 KASSERT(p == curproc || p == &proc0);
1174
1175 /* Add a reference to the file structure. */
1176 mutex_enter(&fp->f_lock);
1177 fp->f_count++;
1178 mutex_exit(&fp->f_lock);
1179
1180 /*
1181 * Insert the new file into the descriptor slot.
1182 */
1183 fdp = p->p_fd;
1184 dt = atomic_load_consume(&fdp->fd_dt);
1185 ff = dt->dt_ff[fd];
1186
1187 KASSERT(ff != NULL);
1188 KASSERT(ff->ff_file == NULL);
1189 KASSERT(ff->ff_allocated);
1190 KASSERT(fd_isused(fdp, fd));
1191 KASSERT(fd >= NDFDFILE || ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
1192
1193 /* No need to lock in order to make file initially visible. */
1194 atomic_store_release(&ff->ff_file, fp);
1195 }
1196
1197 /*
1198 * Abort creation of a new descriptor: free descriptor slot and file.
1199 */
1200 void
fd_abort(proc_t * p,file_t * fp,unsigned fd)1201 fd_abort(proc_t *p, file_t *fp, unsigned fd)
1202 {
1203 filedesc_t *fdp;
1204 fdfile_t *ff;
1205
1206 KASSERT(p == curproc || p == &proc0);
1207
1208 fdp = p->p_fd;
1209 ff = atomic_load_consume(&fdp->fd_dt)->dt_ff[fd];
1210 ff->ff_exclose = false;
1211
1212 KASSERT(fd >= NDFDFILE || ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
1213
1214 mutex_enter(&fdp->fd_lock);
1215 KASSERT(fd_isused(fdp, fd));
1216 fd_unused(fdp, fd);
1217 mutex_exit(&fdp->fd_lock);
1218
1219 if (fp != NULL) {
1220 KASSERT(fp->f_count == 0);
1221 KASSERT(fp->f_cred != NULL);
1222 pool_cache_put(file_cache, fp);
1223 }
1224 }
1225
1226 static int
file_ctor(void * arg,void * obj,int flags)1227 file_ctor(void *arg, void *obj, int flags)
1228 {
1229 file_t *fp = obj;
1230
1231 memset(fp, 0, sizeof(*fp));
1232
1233 mutex_enter(&filelist_lock);
1234 if (__predict_false(nfiles >= maxfiles)) {
1235 mutex_exit(&filelist_lock);
1236 tablefull("file", "increase kern.maxfiles or MAXFILES");
1237 return ENFILE;
1238 }
1239 nfiles++;
1240 LIST_INSERT_HEAD(&filehead, fp, f_list);
1241 mutex_init(&fp->f_lock, MUTEX_DEFAULT, IPL_NONE);
1242 fp->f_cred = curlwp->l_cred;
1243 kauth_cred_hold(fp->f_cred);
1244 mutex_exit(&filelist_lock);
1245
1246 return 0;
1247 }
1248
1249 static void
file_dtor(void * arg,void * obj)1250 file_dtor(void *arg, void *obj)
1251 {
1252 file_t *fp = obj;
1253
1254 mutex_enter(&filelist_lock);
1255 nfiles--;
1256 LIST_REMOVE(fp, f_list);
1257 mutex_exit(&filelist_lock);
1258
1259 KASSERT(fp->f_count == 0);
1260 kauth_cred_free(fp->f_cred);
1261 mutex_destroy(&fp->f_lock);
1262 }
1263
1264 static int
fdfile_ctor(void * arg,void * obj,int flags)1265 fdfile_ctor(void *arg, void *obj, int flags)
1266 {
1267 fdfile_t *ff = obj;
1268
1269 memset(ff, 0, sizeof(*ff));
1270 cv_init(&ff->ff_closing, "fdclose");
1271
1272 return 0;
1273 }
1274
1275 static void
fdfile_dtor(void * arg,void * obj)1276 fdfile_dtor(void *arg, void *obj)
1277 {
1278 fdfile_t *ff = obj;
1279
1280 cv_destroy(&ff->ff_closing);
1281 }
1282
1283 file_t *
fgetdummy(void)1284 fgetdummy(void)
1285 {
1286 file_t *fp;
1287
1288 fp = kmem_zalloc(sizeof(*fp), KM_SLEEP);
1289 mutex_init(&fp->f_lock, MUTEX_DEFAULT, IPL_NONE);
1290 return fp;
1291 }
1292
1293 void
fputdummy(file_t * fp)1294 fputdummy(file_t *fp)
1295 {
1296
1297 mutex_destroy(&fp->f_lock);
1298 kmem_free(fp, sizeof(*fp));
1299 }
1300
1301 /*
1302 * Create an initial filedesc structure.
1303 */
1304 filedesc_t *
fd_init(filedesc_t * fdp)1305 fd_init(filedesc_t *fdp)
1306 {
1307 #ifdef DIAGNOSTIC
1308 unsigned fd;
1309 #endif
1310
1311 if (__predict_true(fdp == NULL)) {
1312 fdp = pool_cache_get(filedesc_cache, PR_WAITOK);
1313 } else {
1314 KASSERT(fdp == &filedesc0);
1315 filedesc_ctor(NULL, fdp, PR_WAITOK);
1316 }
1317
1318 #ifdef DIAGNOSTIC
1319 KASSERT(fdp->fd_lastfile == -1);
1320 KASSERT(fdp->fd_lastkqfile == -1);
1321 KASSERT(fdp->fd_knhash == NULL);
1322 KASSERT(fdp->fd_freefile == 0);
1323 KASSERT(fdp->fd_exclose == false);
1324 KASSERT(fdp->fd_dt == &fdp->fd_dtbuiltin);
1325 KASSERT(fdp->fd_dtbuiltin.dt_nfiles == NDFILE);
1326 for (fd = 0; fd < NDFDFILE; fd++) {
1327 KASSERT(fdp->fd_dtbuiltin.dt_ff[fd] ==
1328 (fdfile_t *)fdp->fd_dfdfile[fd]);
1329 }
1330 for (fd = NDFDFILE; fd < NDFILE; fd++) {
1331 KASSERT(fdp->fd_dtbuiltin.dt_ff[fd] == NULL);
1332 }
1333 KASSERT(fdp->fd_himap == fdp->fd_dhimap);
1334 KASSERT(fdp->fd_lomap == fdp->fd_dlomap);
1335 #endif /* DIAGNOSTIC */
1336
1337 fdp->fd_refcnt = 1;
1338 fd_checkmaps(fdp);
1339
1340 return fdp;
1341 }
1342
1343 /*
1344 * Initialize a file descriptor table.
1345 */
1346 static int
filedesc_ctor(void * arg,void * obj,int flag)1347 filedesc_ctor(void *arg, void *obj, int flag)
1348 {
1349 filedesc_t *fdp = obj;
1350 fdfile_t **ffp;
1351 int i;
1352
1353 memset(fdp, 0, sizeof(*fdp));
1354 mutex_init(&fdp->fd_lock, MUTEX_DEFAULT, IPL_NONE);
1355 fdp->fd_lastfile = -1;
1356 fdp->fd_lastkqfile = -1;
1357 fdp->fd_dt = &fdp->fd_dtbuiltin;
1358 fdp->fd_dtbuiltin.dt_nfiles = NDFILE;
1359 fdp->fd_himap = fdp->fd_dhimap;
1360 fdp->fd_lomap = fdp->fd_dlomap;
1361
1362 CTASSERT(sizeof(fdp->fd_dfdfile[0]) >= sizeof(fdfile_t));
1363 for (i = 0, ffp = fdp->fd_dt->dt_ff; i < NDFDFILE; i++, ffp++) {
1364 *ffp = (fdfile_t *)fdp->fd_dfdfile[i];
1365 (void)fdfile_ctor(NULL, fdp->fd_dfdfile[i], PR_WAITOK);
1366 }
1367
1368 return 0;
1369 }
1370
1371 static void
filedesc_dtor(void * arg,void * obj)1372 filedesc_dtor(void *arg, void *obj)
1373 {
1374 filedesc_t *fdp = obj;
1375 int i;
1376
1377 for (i = 0; i < NDFDFILE; i++) {
1378 fdfile_dtor(NULL, fdp->fd_dfdfile[i]);
1379 }
1380
1381 mutex_destroy(&fdp->fd_lock);
1382 }
1383
1384 /*
1385 * Make p share curproc's filedesc structure.
1386 */
1387 void
fd_share(struct proc * p)1388 fd_share(struct proc *p)
1389 {
1390 filedesc_t *fdp;
1391
1392 fdp = curlwp->l_fd;
1393 p->p_fd = fdp;
1394 atomic_inc_uint(&fdp->fd_refcnt);
1395 }
1396
1397 /*
1398 * Acquire a hold on a filedesc structure.
1399 */
1400 void
fd_hold(lwp_t * l)1401 fd_hold(lwp_t *l)
1402 {
1403 filedesc_t *fdp = l->l_fd;
1404
1405 atomic_inc_uint(&fdp->fd_refcnt);
1406 }
1407
1408 /*
1409 * Copy a filedesc structure.
1410 */
1411 filedesc_t *
fd_copy(void)1412 fd_copy(void)
1413 {
1414 filedesc_t *newfdp, *fdp;
1415 fdfile_t *ff, **ffp, **nffp, *ff2;
1416 int i, j, numfiles, lastfile, newlast;
1417 file_t *fp;
1418 fdtab_t *newdt;
1419
1420 fdp = curproc->p_fd;
1421 newfdp = pool_cache_get(filedesc_cache, PR_WAITOK);
1422 newfdp->fd_refcnt = 1;
1423
1424 #ifdef DIAGNOSTIC
1425 KASSERT(newfdp->fd_lastfile == -1);
1426 KASSERT(newfdp->fd_lastkqfile == -1);
1427 KASSERT(newfdp->fd_knhash == NULL);
1428 KASSERT(newfdp->fd_freefile == 0);
1429 KASSERT(newfdp->fd_exclose == false);
1430 KASSERT(newfdp->fd_dt == &newfdp->fd_dtbuiltin);
1431 KASSERT(newfdp->fd_dtbuiltin.dt_nfiles == NDFILE);
1432 for (i = 0; i < NDFDFILE; i++) {
1433 KASSERT(newfdp->fd_dtbuiltin.dt_ff[i] ==
1434 (fdfile_t *)&newfdp->fd_dfdfile[i]);
1435 }
1436 for (i = NDFDFILE; i < NDFILE; i++) {
1437 KASSERT(newfdp->fd_dtbuiltin.dt_ff[i] == NULL);
1438 }
1439 #endif /* DIAGNOSTIC */
1440
1441 mutex_enter(&fdp->fd_lock);
1442 fd_checkmaps(fdp);
1443 numfiles = fdp->fd_dt->dt_nfiles;
1444 lastfile = fdp->fd_lastfile;
1445
1446 /*
1447 * If the number of open files fits in the internal arrays
1448 * of the open file structure, use them, otherwise allocate
1449 * additional memory for the number of descriptors currently
1450 * in use.
1451 */
1452 if (lastfile < NDFILE) {
1453 i = NDFILE;
1454 newdt = newfdp->fd_dt;
1455 KASSERT(newfdp->fd_dt == &newfdp->fd_dtbuiltin);
1456 } else {
1457 /*
1458 * Compute the smallest multiple of NDEXTENT needed
1459 * for the file descriptors currently in use,
1460 * allowing the table to shrink.
1461 */
1462 i = numfiles;
1463 while (i >= 2 * NDEXTENT && i > lastfile * 2) {
1464 i /= 2;
1465 }
1466 KASSERT(i > NDFILE);
1467 newdt = fd_dtab_alloc(i);
1468 newfdp->fd_dt = newdt;
1469 memcpy(newdt->dt_ff, newfdp->fd_dtbuiltin.dt_ff,
1470 NDFDFILE * sizeof(fdfile_t **));
1471 memset(newdt->dt_ff + NDFDFILE, 0,
1472 (i - NDFDFILE) * sizeof(fdfile_t **));
1473 }
1474 if (NDHISLOTS(i) <= NDHISLOTS(NDFILE)) {
1475 newfdp->fd_himap = newfdp->fd_dhimap;
1476 newfdp->fd_lomap = newfdp->fd_dlomap;
1477 } else {
1478 fd_map_alloc(i, &newfdp->fd_lomap, &newfdp->fd_himap);
1479 KASSERT(i >= NDENTRIES * NDENTRIES);
1480 memset(newfdp->fd_himap, 0, NDHISLOTS(i)*sizeof(uint32_t));
1481 memset(newfdp->fd_lomap, 0, NDLOSLOTS(i)*sizeof(uint32_t));
1482 }
1483 newfdp->fd_freefile = fdp->fd_freefile;
1484 newfdp->fd_exclose = fdp->fd_exclose;
1485
1486 ffp = fdp->fd_dt->dt_ff;
1487 nffp = newdt->dt_ff;
1488 newlast = -1;
1489 for (i = 0; i <= lastfile; i++, ffp++, nffp++) {
1490 KASSERT(i >= NDFDFILE ||
1491 *nffp == (fdfile_t *)newfdp->fd_dfdfile[i]);
1492 ff = *ffp;
1493 if (ff == NULL ||
1494 (fp = atomic_load_consume(&ff->ff_file)) == NULL) {
1495 /* Descriptor unused, or descriptor half open. */
1496 KASSERT(!fd_isused(newfdp, i));
1497 continue;
1498 }
1499 if (__predict_false(fp->f_type == DTYPE_KQUEUE)) {
1500 /* kqueue descriptors cannot be copied. */
1501 if (i < newfdp->fd_freefile) {
1502 newfdp->fd_freefile = i;
1503 }
1504 continue;
1505 }
1506 /* It's active: add a reference to the file. */
1507 mutex_enter(&fp->f_lock);
1508 fp->f_count++;
1509 mutex_exit(&fp->f_lock);
1510
1511 /* Allocate an fdfile_t to represent it. */
1512 if (i >= NDFDFILE) {
1513 ff2 = pool_cache_get(fdfile_cache, PR_WAITOK);
1514 *nffp = ff2;
1515 } else {
1516 ff2 = newdt->dt_ff[i];
1517 }
1518 ff2->ff_file = fp;
1519 ff2->ff_exclose = ff->ff_exclose;
1520 ff2->ff_allocated = true;
1521
1522 /* Fix up bitmaps. */
1523 j = i >> NDENTRYSHIFT;
1524 KASSERT((newfdp->fd_lomap[j] & (1U << (i & NDENTRYMASK))) == 0);
1525 newfdp->fd_lomap[j] |= 1U << (i & NDENTRYMASK);
1526 if (__predict_false(newfdp->fd_lomap[j] == ~0)) {
1527 KASSERT((newfdp->fd_himap[j >> NDENTRYSHIFT] &
1528 (1U << (j & NDENTRYMASK))) == 0);
1529 newfdp->fd_himap[j >> NDENTRYSHIFT] |=
1530 1U << (j & NDENTRYMASK);
1531 }
1532 newlast = i;
1533 }
1534 KASSERT(newdt->dt_ff[0] == (fdfile_t *)newfdp->fd_dfdfile[0]);
1535 newfdp->fd_lastfile = newlast;
1536 fd_checkmaps(newfdp);
1537 mutex_exit(&fdp->fd_lock);
1538
1539 return newfdp;
1540 }
1541
1542 /*
1543 * Release a filedesc structure.
1544 */
1545 void
fd_free(void)1546 fd_free(void)
1547 {
1548 fdfile_t *ff;
1549 file_t *fp;
1550 int fd, nf;
1551 fdtab_t *dt;
1552 lwp_t * const l = curlwp;
1553 filedesc_t * const fdp = l->l_fd;
1554 const bool noadvlock = (l->l_proc->p_flag & PK_ADVLOCK) == 0;
1555
1556 KASSERT(atomic_load_consume(&fdp->fd_dt)->dt_ff[0] ==
1557 (fdfile_t *)fdp->fd_dfdfile[0]);
1558 KASSERT(fdp->fd_dtbuiltin.dt_nfiles == NDFILE);
1559 KASSERT(fdp->fd_dtbuiltin.dt_link == NULL);
1560
1561 membar_release();
1562 if (atomic_dec_uint_nv(&fdp->fd_refcnt) > 0)
1563 return;
1564 membar_acquire();
1565
1566 /*
1567 * Close any files that the process holds open.
1568 */
1569 dt = fdp->fd_dt;
1570 fd_checkmaps(fdp);
1571 #ifdef DEBUG
1572 fdp->fd_refcnt = -1; /* see fd_checkmaps */
1573 #endif
1574 for (fd = 0, nf = dt->dt_nfiles; fd < nf; fd++) {
1575 ff = dt->dt_ff[fd];
1576 KASSERT(fd >= NDFDFILE ||
1577 ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
1578 if (ff == NULL)
1579 continue;
1580 if ((fp = atomic_load_consume(&ff->ff_file)) != NULL) {
1581 /*
1582 * Must use fd_close() here if there is
1583 * a reference from kqueue or we might have posix
1584 * advisory locks.
1585 */
1586 if (__predict_true(ff->ff_refcnt == 0) &&
1587 (noadvlock || fp->f_type != DTYPE_VNODE)) {
1588 ff->ff_file = NULL;
1589 ff->ff_exclose = false;
1590 ff->ff_allocated = false;
1591 closef(fp);
1592 } else {
1593 ff->ff_refcnt++;
1594 fd_close(fd);
1595 }
1596 }
1597 KASSERT(ff->ff_refcnt == 0);
1598 KASSERT(ff->ff_file == NULL);
1599 KASSERT(!ff->ff_exclose);
1600 KASSERT(!ff->ff_allocated);
1601 if (fd >= NDFDFILE) {
1602 pool_cache_put(fdfile_cache, ff);
1603 dt->dt_ff[fd] = NULL;
1604 }
1605 }
1606
1607 /*
1608 * Clean out the descriptor table for the next user and return
1609 * to the cache.
1610 */
1611 if (__predict_false(dt != &fdp->fd_dtbuiltin)) {
1612 fd_dtab_free(fdp->fd_dt);
1613 /* Otherwise, done above. */
1614 memset(&fdp->fd_dtbuiltin.dt_ff[NDFDFILE], 0,
1615 (NDFILE - NDFDFILE) * sizeof(fdp->fd_dtbuiltin.dt_ff[0]));
1616 fdp->fd_dt = &fdp->fd_dtbuiltin;
1617 }
1618 if (__predict_false(NDHISLOTS(nf) > NDHISLOTS(NDFILE))) {
1619 KASSERT(fdp->fd_himap != fdp->fd_dhimap);
1620 KASSERT(fdp->fd_lomap != fdp->fd_dlomap);
1621 fd_map_free(nf, fdp->fd_lomap, fdp->fd_himap);
1622 }
1623 if (__predict_false(fdp->fd_knhash != NULL)) {
1624 hashdone(fdp->fd_knhash, HASH_LIST, fdp->fd_knhashmask);
1625 fdp->fd_knhash = NULL;
1626 fdp->fd_knhashmask = 0;
1627 } else {
1628 KASSERT(fdp->fd_knhashmask == 0);
1629 }
1630 fdp->fd_dt = &fdp->fd_dtbuiltin;
1631 fdp->fd_lastkqfile = -1;
1632 fdp->fd_lastfile = -1;
1633 fdp->fd_freefile = 0;
1634 fdp->fd_exclose = false;
1635 memset(&fdp->fd_startzero, 0, sizeof(*fdp) -
1636 offsetof(filedesc_t, fd_startzero));
1637 fdp->fd_himap = fdp->fd_dhimap;
1638 fdp->fd_lomap = fdp->fd_dlomap;
1639 KASSERT(fdp->fd_dtbuiltin.dt_nfiles == NDFILE);
1640 KASSERT(fdp->fd_dtbuiltin.dt_link == NULL);
1641 KASSERT(fdp->fd_dt == &fdp->fd_dtbuiltin);
1642 #ifdef DEBUG
1643 fdp->fd_refcnt = 0; /* see fd_checkmaps */
1644 #endif
1645 fd_checkmaps(fdp);
1646 pool_cache_put(filedesc_cache, fdp);
1647 }
1648
1649 /*
1650 * File Descriptor pseudo-device driver (/dev/fd/).
1651 *
1652 * Opening minor device N dup()s the file (if any) connected to file
1653 * descriptor N belonging to the calling process. Note that this driver
1654 * consists of only the ``open()'' routine, because all subsequent
1655 * references to this file will be direct to the other driver.
1656 */
1657 static int
filedescopen(dev_t dev,int mode,int type,lwp_t * l)1658 filedescopen(dev_t dev, int mode, int type, lwp_t *l)
1659 {
1660
1661 /*
1662 * XXX Kludge: set dupfd to contain the value of the
1663 * the file descriptor being sought for duplication. The error
1664 * return ensures that the vnode for this device will be released
1665 * by vn_open. Open will detect this special error and take the
1666 * actions in fd_dupopen below. Other callers of vn_open or VOP_OPEN
1667 * will simply report the error.
1668 */
1669 l->l_dupfd = minor(dev); /* XXX */
1670 return EDUPFD;
1671 }
1672
1673 /*
1674 * Duplicate the specified descriptor to a free descriptor.
1675 *
1676 * old is the original fd.
1677 * moveit is true if we should move rather than duplicate.
1678 * flags are the open flags (converted from O_* to F*).
1679 * newp returns the new fd on success.
1680 *
1681 * These two cases are produced by the EDUPFD and EMOVEFD magic
1682 * errnos, but in the interest of removing that regrettable interface,
1683 * vn_open has been changed to intercept them. Now vn_open returns
1684 * either a vnode or a filehandle, and the filehandle is accompanied
1685 * by a boolean that says whether we should dup (moveit == false) or
1686 * move (moveit == true) the fd.
1687 *
1688 * The dup case is used by /dev/stderr, /proc/self/fd, and such. The
1689 * move case is used by cloner devices that allocate a fd of their
1690 * own (a layering violation that should go away eventually) that
1691 * then needs to be put in the place open() expects it.
1692 */
1693 int
fd_dupopen(int old,bool moveit,int flags,int * newp)1694 fd_dupopen(int old, bool moveit, int flags, int *newp)
1695 {
1696 filedesc_t *fdp;
1697 fdfile_t *ff;
1698 file_t *fp;
1699 fdtab_t *dt;
1700 int error;
1701
1702 if ((fp = fd_getfile(old)) == NULL) {
1703 return EBADF;
1704 }
1705 fdp = curlwp->l_fd;
1706 dt = atomic_load_consume(&fdp->fd_dt);
1707 ff = dt->dt_ff[old];
1708
1709 /*
1710 * There are two cases of interest here.
1711 *
1712 * 1. moveit == false (used to be the EDUPFD magic errno):
1713 * simply dup (old) to file descriptor (new) and return.
1714 *
1715 * 2. moveit == true (used to be the EMOVEFD magic errno):
1716 * steal away the file structure from (old) and store it in
1717 * (new). (old) is effectively closed by this operation.
1718 */
1719 if (moveit == false) {
1720 /*
1721 * Check that the mode the file is being opened for is a
1722 * subset of the mode of the existing descriptor.
1723 */
1724 if (((flags & (FREAD|FWRITE)) | fp->f_flag) != fp->f_flag) {
1725 error = EACCES;
1726 goto out;
1727 }
1728
1729 /* Copy it. */
1730 error = fd_dup(fp, 0, newp, ff->ff_exclose);
1731 } else {
1732 /* Copy it. */
1733 error = fd_dup(fp, 0, newp, ff->ff_exclose);
1734 if (error != 0) {
1735 goto out;
1736 }
1737
1738 /* Steal away the file pointer from 'old'. */
1739 (void)fd_close(old);
1740 return 0;
1741 }
1742
1743 out:
1744 fd_putfile(old);
1745 return error;
1746 }
1747
1748 /*
1749 * Close open files on exec.
1750 */
1751 void
fd_closeexec(void)1752 fd_closeexec(void)
1753 {
1754 proc_t *p;
1755 filedesc_t *fdp;
1756 fdfile_t *ff;
1757 lwp_t *l;
1758 fdtab_t *dt;
1759 int fd;
1760
1761 l = curlwp;
1762 p = l->l_proc;
1763 fdp = p->p_fd;
1764
1765 if (fdp->fd_refcnt > 1) {
1766 fdp = fd_copy();
1767 fd_free();
1768 p->p_fd = fdp;
1769 l->l_fd = fdp;
1770 }
1771 if (!fdp->fd_exclose) {
1772 return;
1773 }
1774 fdp->fd_exclose = false;
1775 dt = atomic_load_consume(&fdp->fd_dt);
1776
1777 for (fd = 0; fd <= fdp->fd_lastfile; fd++) {
1778 if ((ff = dt->dt_ff[fd]) == NULL) {
1779 KASSERT(fd >= NDFDFILE);
1780 continue;
1781 }
1782 KASSERT(fd >= NDFDFILE ||
1783 ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
1784 if (ff->ff_file == NULL)
1785 continue;
1786 if (ff->ff_exclose) {
1787 /*
1788 * We need a reference to close the file.
1789 * No other threads can see the fdfile_t at
1790 * this point, so don't bother locking.
1791 */
1792 KASSERT((ff->ff_refcnt & FR_CLOSING) == 0);
1793 ff->ff_refcnt++;
1794 fd_close(fd);
1795 }
1796 }
1797 }
1798
1799 /*
1800 * Sets descriptor owner. If the owner is a process, 'pgid'
1801 * is set to positive value, process ID. If the owner is process group,
1802 * 'pgid' is set to -pg_id.
1803 */
1804 int
fsetown(pid_t * pgid,u_long cmd,const void * data)1805 fsetown(pid_t *pgid, u_long cmd, const void *data)
1806 {
1807 pid_t id = *(const pid_t *)data;
1808 int error;
1809
1810 if (id == INT_MIN)
1811 return EINVAL;
1812
1813 switch (cmd) {
1814 case TIOCSPGRP:
1815 if (id < 0)
1816 return EINVAL;
1817 id = -id;
1818 break;
1819 default:
1820 break;
1821 }
1822 if (id > 0) {
1823 mutex_enter(&proc_lock);
1824 error = proc_find(id) ? 0 : ESRCH;
1825 mutex_exit(&proc_lock);
1826 } else if (id < 0) {
1827 error = pgid_in_session(curproc, -id);
1828 } else {
1829 error = 0;
1830 }
1831 if (!error) {
1832 *pgid = id;
1833 }
1834 return error;
1835 }
1836
1837 void
fd_set_exclose(struct lwp * l,int fd,bool exclose)1838 fd_set_exclose(struct lwp *l, int fd, bool exclose)
1839 {
1840 filedesc_t *fdp = l->l_fd;
1841 fdfile_t *ff = atomic_load_consume(&fdp->fd_dt)->dt_ff[fd];
1842
1843 ff->ff_exclose = exclose;
1844 if (exclose)
1845 fdp->fd_exclose = true;
1846 }
1847
1848 /*
1849 * Return descriptor owner information. If the value is positive,
1850 * it's process ID. If it's negative, it's process group ID and
1851 * needs the sign removed before use.
1852 */
1853 int
fgetown(pid_t pgid,u_long cmd,void * data)1854 fgetown(pid_t pgid, u_long cmd, void *data)
1855 {
1856
1857 switch (cmd) {
1858 case TIOCGPGRP:
1859 *(int *)data = -pgid;
1860 break;
1861 default:
1862 *(int *)data = pgid;
1863 break;
1864 }
1865 return 0;
1866 }
1867
1868 /*
1869 * Send signal to descriptor owner, either process or process group.
1870 */
1871 void
fownsignal(pid_t pgid,int signo,int code,int band,void * fdescdata)1872 fownsignal(pid_t pgid, int signo, int code, int band, void *fdescdata)
1873 {
1874 ksiginfo_t ksi;
1875
1876 KASSERT(!cpu_intr_p());
1877
1878 if (pgid == 0) {
1879 return;
1880 }
1881
1882 KSI_INIT(&ksi);
1883 ksi.ksi_signo = signo;
1884 ksi.ksi_code = code;
1885 ksi.ksi_band = band;
1886
1887 mutex_enter(&proc_lock);
1888 if (pgid > 0) {
1889 struct proc *p1;
1890
1891 p1 = proc_find(pgid);
1892 if (p1 != NULL) {
1893 kpsignal(p1, &ksi, fdescdata);
1894 }
1895 } else {
1896 struct pgrp *pgrp;
1897
1898 KASSERT(pgid < 0);
1899 pgrp = pgrp_find(-pgid);
1900 if (pgrp != NULL) {
1901 kpgsignal(pgrp, &ksi, fdescdata, 0);
1902 }
1903 }
1904 mutex_exit(&proc_lock);
1905 }
1906
1907 int
fd_clone(file_t * fp,unsigned fd,int flag,const struct fileops * fops,void * data)1908 fd_clone(file_t *fp, unsigned fd, int flag, const struct fileops *fops,
1909 void *data)
1910 {
1911 fdfile_t *ff;
1912 filedesc_t *fdp;
1913
1914 fp->f_flag = flag & FMASK;
1915 fdp = curproc->p_fd;
1916 ff = atomic_load_consume(&fdp->fd_dt)->dt_ff[fd];
1917 KASSERT(ff != NULL);
1918 ff->ff_exclose = (flag & O_CLOEXEC) != 0;
1919 fp->f_type = DTYPE_MISC;
1920 fp->f_ops = fops;
1921 fp->f_data = data;
1922 curlwp->l_dupfd = fd;
1923 fd_affix(curproc, fp, fd);
1924
1925 return EMOVEFD;
1926 }
1927
1928 int
fnullop_fcntl(file_t * fp,u_int cmd,void * data)1929 fnullop_fcntl(file_t *fp, u_int cmd, void *data)
1930 {
1931
1932 if (cmd == F_SETFL)
1933 return 0;
1934
1935 return EOPNOTSUPP;
1936 }
1937
1938 int
fnullop_poll(file_t * fp,int which)1939 fnullop_poll(file_t *fp, int which)
1940 {
1941
1942 return 0;
1943 }
1944
1945 int
fnullop_kqfilter(file_t * fp,struct knote * kn)1946 fnullop_kqfilter(file_t *fp, struct knote *kn)
1947 {
1948
1949 return EOPNOTSUPP;
1950 }
1951
1952 void
fnullop_restart(file_t * fp)1953 fnullop_restart(file_t *fp)
1954 {
1955
1956 }
1957
1958 int
fbadop_read(file_t * fp,off_t * offset,struct uio * uio,kauth_cred_t cred,int flags)1959 fbadop_read(file_t *fp, off_t *offset, struct uio *uio,
1960 kauth_cred_t cred, int flags)
1961 {
1962
1963 return EOPNOTSUPP;
1964 }
1965
1966 int
fbadop_write(file_t * fp,off_t * offset,struct uio * uio,kauth_cred_t cred,int flags)1967 fbadop_write(file_t *fp, off_t *offset, struct uio *uio,
1968 kauth_cred_t cred, int flags)
1969 {
1970
1971 return EOPNOTSUPP;
1972 }
1973
1974 int
fbadop_ioctl(file_t * fp,u_long com,void * data)1975 fbadop_ioctl(file_t *fp, u_long com, void *data)
1976 {
1977
1978 return EOPNOTSUPP;
1979 }
1980
1981 int
fbadop_stat(file_t * fp,struct stat * sb)1982 fbadop_stat(file_t *fp, struct stat *sb)
1983 {
1984
1985 return EOPNOTSUPP;
1986 }
1987
1988 int
fbadop_close(file_t * fp)1989 fbadop_close(file_t *fp)
1990 {
1991
1992 return EOPNOTSUPP;
1993 }
1994
1995 /*
1996 * sysctl routines pertaining to file descriptors
1997 */
1998
1999 /* Initialized in sysctl_init() for now... */
2000 extern kmutex_t sysctl_file_marker_lock;
2001 static u_int sysctl_file_marker = 1;
2002
2003 /*
2004 * Expects to be called with proc_lock and sysctl_file_marker_lock locked.
2005 */
2006 static void
sysctl_file_marker_reset(void)2007 sysctl_file_marker_reset(void)
2008 {
2009 struct proc *p;
2010
2011 PROCLIST_FOREACH(p, &allproc) {
2012 struct filedesc *fd = p->p_fd;
2013 fdtab_t *dt;
2014 u_int i;
2015
2016 mutex_enter(&fd->fd_lock);
2017 dt = fd->fd_dt;
2018 for (i = 0; i < dt->dt_nfiles; i++) {
2019 struct file *fp;
2020 fdfile_t *ff;
2021
2022 if ((ff = dt->dt_ff[i]) == NULL) {
2023 continue;
2024 }
2025 if ((fp = atomic_load_consume(&ff->ff_file)) == NULL) {
2026 continue;
2027 }
2028 fp->f_marker = 0;
2029 }
2030 mutex_exit(&fd->fd_lock);
2031 }
2032 }
2033
2034 /*
2035 * sysctl helper routine for kern.file pseudo-subtree.
2036 */
2037 static int
sysctl_kern_file(SYSCTLFN_ARGS)2038 sysctl_kern_file(SYSCTLFN_ARGS)
2039 {
2040 const bool allowaddr = get_expose_address(curproc);
2041 struct filelist flist;
2042 int error;
2043 size_t buflen;
2044 struct file *fp, fbuf;
2045 char *start, *where;
2046 struct proc *p;
2047
2048 start = where = oldp;
2049 buflen = *oldlenp;
2050
2051 if (where == NULL) {
2052 /*
2053 * overestimate by 10 files
2054 */
2055 *oldlenp = sizeof(filehead) + (nfiles + 10) *
2056 sizeof(struct file);
2057 return 0;
2058 }
2059
2060 /*
2061 * first sysctl_copyout filehead
2062 */
2063 if (buflen < sizeof(filehead)) {
2064 *oldlenp = 0;
2065 return 0;
2066 }
2067 sysctl_unlock();
2068 if (allowaddr) {
2069 memcpy(&flist, &filehead, sizeof(flist));
2070 } else {
2071 memset(&flist, 0, sizeof(flist));
2072 }
2073 error = sysctl_copyout(l, &flist, where, sizeof(flist));
2074 if (error) {
2075 sysctl_relock();
2076 return error;
2077 }
2078 buflen -= sizeof(flist);
2079 where += sizeof(flist);
2080
2081 /*
2082 * followed by an array of file structures
2083 */
2084 mutex_enter(&sysctl_file_marker_lock);
2085 mutex_enter(&proc_lock);
2086 PROCLIST_FOREACH(p, &allproc) {
2087 struct filedesc *fd;
2088 fdtab_t *dt;
2089 u_int i;
2090
2091 if (p->p_stat == SIDL) {
2092 /* skip embryonic processes */
2093 continue;
2094 }
2095 mutex_enter(p->p_lock);
2096 error = kauth_authorize_process(l->l_cred,
2097 KAUTH_PROCESS_CANSEE, p,
2098 KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_OPENFILES),
2099 NULL, NULL);
2100 mutex_exit(p->p_lock);
2101 if (error != 0) {
2102 /*
2103 * Don't leak kauth retval if we're silently
2104 * skipping this entry.
2105 */
2106 error = 0;
2107 continue;
2108 }
2109
2110 /*
2111 * Grab a hold on the process.
2112 */
2113 if (!rw_tryenter(&p->p_reflock, RW_READER)) {
2114 continue;
2115 }
2116 mutex_exit(&proc_lock);
2117
2118 fd = p->p_fd;
2119 mutex_enter(&fd->fd_lock);
2120 dt = fd->fd_dt;
2121 for (i = 0; i < dt->dt_nfiles; i++) {
2122 fdfile_t *ff;
2123
2124 if ((ff = dt->dt_ff[i]) == NULL) {
2125 continue;
2126 }
2127 if ((fp = atomic_load_consume(&ff->ff_file)) == NULL) {
2128 continue;
2129 }
2130
2131 mutex_enter(&fp->f_lock);
2132
2133 if ((fp->f_count == 0) ||
2134 (fp->f_marker == sysctl_file_marker)) {
2135 mutex_exit(&fp->f_lock);
2136 continue;
2137 }
2138
2139 /* Check that we have enough space. */
2140 if (buflen < sizeof(struct file)) {
2141 *oldlenp = where - start;
2142 mutex_exit(&fp->f_lock);
2143 error = ENOMEM;
2144 break;
2145 }
2146
2147 fill_file(&fbuf, fp);
2148 mutex_exit(&fp->f_lock);
2149 error = sysctl_copyout(l, &fbuf, where, sizeof(fbuf));
2150 if (error) {
2151 break;
2152 }
2153 buflen -= sizeof(struct file);
2154 where += sizeof(struct file);
2155
2156 fp->f_marker = sysctl_file_marker;
2157 }
2158 mutex_exit(&fd->fd_lock);
2159
2160 /*
2161 * Release reference to process.
2162 */
2163 mutex_enter(&proc_lock);
2164 rw_exit(&p->p_reflock);
2165
2166 if (error)
2167 break;
2168 }
2169
2170 sysctl_file_marker++;
2171 /* Reset all markers if wrapped. */
2172 if (sysctl_file_marker == 0) {
2173 sysctl_file_marker_reset();
2174 sysctl_file_marker++;
2175 }
2176
2177 mutex_exit(&proc_lock);
2178 mutex_exit(&sysctl_file_marker_lock);
2179
2180 *oldlenp = where - start;
2181 sysctl_relock();
2182 return error;
2183 }
2184
2185 /*
2186 * sysctl helper function for kern.file2
2187 */
2188 static int
sysctl_kern_file2(SYSCTLFN_ARGS)2189 sysctl_kern_file2(SYSCTLFN_ARGS)
2190 {
2191 struct proc *p;
2192 struct file *fp;
2193 struct filedesc *fd;
2194 struct kinfo_file kf;
2195 char *dp;
2196 u_int i, op;
2197 size_t len, needed, elem_size, out_size;
2198 int error, arg, elem_count;
2199 fdfile_t *ff;
2200 fdtab_t *dt;
2201
2202 if (namelen == 1 && name[0] == CTL_QUERY)
2203 return sysctl_query(SYSCTLFN_CALL(rnode));
2204
2205 if (namelen != 4)
2206 return EINVAL;
2207
2208 error = 0;
2209 dp = oldp;
2210 len = (oldp != NULL) ? *oldlenp : 0;
2211 op = name[0];
2212 arg = name[1];
2213 elem_size = name[2];
2214 elem_count = name[3];
2215 out_size = MIN(sizeof(kf), elem_size);
2216 needed = 0;
2217
2218 if (elem_size < 1 || elem_count < 0)
2219 return EINVAL;
2220
2221 switch (op) {
2222 case KERN_FILE_BYFILE:
2223 case KERN_FILE_BYPID:
2224 /*
2225 * We're traversing the process list in both cases; the BYFILE
2226 * case does additional work of keeping track of files already
2227 * looked at.
2228 */
2229
2230 /* doesn't use arg so it must be zero */
2231 if ((op == KERN_FILE_BYFILE) && (arg != 0))
2232 return EINVAL;
2233
2234 if ((op == KERN_FILE_BYPID) && (arg < -1))
2235 /* -1 means all processes */
2236 return EINVAL;
2237
2238 sysctl_unlock();
2239 if (op == KERN_FILE_BYFILE)
2240 mutex_enter(&sysctl_file_marker_lock);
2241 mutex_enter(&proc_lock);
2242 PROCLIST_FOREACH(p, &allproc) {
2243 if (p->p_stat == SIDL) {
2244 /* skip embryonic processes */
2245 continue;
2246 }
2247 if (arg > 0 && p->p_pid != arg) {
2248 /* pick only the one we want */
2249 /* XXX want 0 to mean "kernel files" */
2250 continue;
2251 }
2252 mutex_enter(p->p_lock);
2253 error = kauth_authorize_process(l->l_cred,
2254 KAUTH_PROCESS_CANSEE, p,
2255 KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_OPENFILES),
2256 NULL, NULL);
2257 mutex_exit(p->p_lock);
2258 if (error != 0) {
2259 /*
2260 * Don't leak kauth retval if we're silently
2261 * skipping this entry.
2262 */
2263 error = 0;
2264 continue;
2265 }
2266
2267 /*
2268 * Grab a hold on the process.
2269 */
2270 if (!rw_tryenter(&p->p_reflock, RW_READER)) {
2271 continue;
2272 }
2273 mutex_exit(&proc_lock);
2274
2275 fd = p->p_fd;
2276 mutex_enter(&fd->fd_lock);
2277 dt = fd->fd_dt;
2278 for (i = 0; i < dt->dt_nfiles; i++) {
2279 if ((ff = dt->dt_ff[i]) == NULL) {
2280 continue;
2281 }
2282 if ((fp = atomic_load_consume(&ff->ff_file)) ==
2283 NULL) {
2284 continue;
2285 }
2286
2287 if ((op == KERN_FILE_BYFILE) &&
2288 (fp->f_marker == sysctl_file_marker)) {
2289 continue;
2290 }
2291 if (len >= elem_size && elem_count > 0) {
2292 mutex_enter(&fp->f_lock);
2293 fill_file2(&kf, fp, ff, i, p->p_pid);
2294 mutex_exit(&fp->f_lock);
2295 mutex_exit(&fd->fd_lock);
2296 error = sysctl_copyout(l,
2297 &kf, dp, out_size);
2298 mutex_enter(&fd->fd_lock);
2299 if (error)
2300 break;
2301 dp += elem_size;
2302 len -= elem_size;
2303 }
2304 if (op == KERN_FILE_BYFILE)
2305 fp->f_marker = sysctl_file_marker;
2306 needed += elem_size;
2307 if (elem_count > 0 && elem_count != INT_MAX)
2308 elem_count--;
2309 }
2310 mutex_exit(&fd->fd_lock);
2311
2312 /*
2313 * Release reference to process.
2314 */
2315 mutex_enter(&proc_lock);
2316 rw_exit(&p->p_reflock);
2317 }
2318 if (op == KERN_FILE_BYFILE) {
2319 sysctl_file_marker++;
2320
2321 /* Reset all markers if wrapped. */
2322 if (sysctl_file_marker == 0) {
2323 sysctl_file_marker_reset();
2324 sysctl_file_marker++;
2325 }
2326 }
2327 mutex_exit(&proc_lock);
2328 if (op == KERN_FILE_BYFILE)
2329 mutex_exit(&sysctl_file_marker_lock);
2330 sysctl_relock();
2331 break;
2332 default:
2333 return EINVAL;
2334 }
2335
2336 if (oldp == NULL)
2337 needed += KERN_FILESLOP * elem_size;
2338 *oldlenp = needed;
2339
2340 return error;
2341 }
2342
2343 static void
fill_file(struct file * fp,const struct file * fpsrc)2344 fill_file(struct file *fp, const struct file *fpsrc)
2345 {
2346 const bool allowaddr = get_expose_address(curproc);
2347
2348 memset(fp, 0, sizeof(*fp));
2349
2350 fp->f_offset = fpsrc->f_offset;
2351 COND_SET_PTR(fp->f_cred, fpsrc->f_cred, allowaddr);
2352 COND_SET_CPTR(fp->f_ops, fpsrc->f_ops, allowaddr);
2353 COND_SET_STRUCT(fp->f_undata, fpsrc->f_undata, allowaddr);
2354 COND_SET_STRUCT(fp->f_list, fpsrc->f_list, allowaddr);
2355 fp->f_flag = fpsrc->f_flag;
2356 fp->f_marker = fpsrc->f_marker;
2357 fp->f_type = fpsrc->f_type;
2358 fp->f_advice = fpsrc->f_advice;
2359 fp->f_count = fpsrc->f_count;
2360 fp->f_msgcount = fpsrc->f_msgcount;
2361 fp->f_unpcount = fpsrc->f_unpcount;
2362 COND_SET_STRUCT(fp->f_unplist, fpsrc->f_unplist, allowaddr);
2363 }
2364
2365 static void
fill_file2(struct kinfo_file * kp,const file_t * fp,const fdfile_t * ff,int i,pid_t pid)2366 fill_file2(struct kinfo_file *kp, const file_t *fp, const fdfile_t *ff,
2367 int i, pid_t pid)
2368 {
2369 const bool allowaddr = get_expose_address(curproc);
2370
2371 memset(kp, 0, sizeof(*kp));
2372
2373 COND_SET_VALUE(kp->ki_fileaddr, PTRTOUINT64(fp), allowaddr);
2374 kp->ki_flag = fp->f_flag;
2375 kp->ki_iflags = 0;
2376 kp->ki_ftype = fp->f_type;
2377 kp->ki_count = fp->f_count;
2378 kp->ki_msgcount = fp->f_msgcount;
2379 COND_SET_VALUE(kp->ki_fucred, PTRTOUINT64(fp->f_cred), allowaddr);
2380 kp->ki_fuid = kauth_cred_geteuid(fp->f_cred);
2381 kp->ki_fgid = kauth_cred_getegid(fp->f_cred);
2382 COND_SET_VALUE(kp->ki_fops, PTRTOUINT64(fp->f_ops), allowaddr);
2383 kp->ki_foffset = fp->f_offset;
2384 COND_SET_VALUE(kp->ki_fdata, PTRTOUINT64(fp->f_data), allowaddr);
2385
2386 /* vnode information to glue this file to something */
2387 if (fp->f_type == DTYPE_VNODE) {
2388 struct vnode *vp = fp->f_vnode;
2389
2390 COND_SET_VALUE(kp->ki_vun, PTRTOUINT64(vp->v_un.vu_socket),
2391 allowaddr);
2392 kp->ki_vsize = vp->v_size;
2393 kp->ki_vtype = vp->v_type;
2394 kp->ki_vtag = vp->v_tag;
2395 COND_SET_VALUE(kp->ki_vdata, PTRTOUINT64(vp->v_data),
2396 allowaddr);
2397 }
2398
2399 /* process information when retrieved via KERN_FILE_BYPID */
2400 if (ff != NULL) {
2401 kp->ki_pid = pid;
2402 kp->ki_fd = i;
2403 kp->ki_ofileflags = ff->ff_exclose;
2404 kp->ki_usecount = ff->ff_refcnt;
2405 }
2406 }
2407