1 /* 2 * Copyright (c) 2005-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey Hsu and Matthew Dillon. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * 35 * Copyright (c) 1982, 1986, 1989, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94 68 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $ 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/malloc.h> 74 #include <sys/sysproto.h> 75 #include <sys/conf.h> 76 #include <sys/device.h> 77 #include <sys/file.h> 78 #include <sys/filedesc.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 #include <sys/vnode.h> 82 #include <sys/proc.h> 83 #include <sys/nlookup.h> 84 #include <sys/stat.h> 85 #include <sys/filio.h> 86 #include <sys/fcntl.h> 87 #include <sys/unistd.h> 88 #include <sys/resourcevar.h> 89 #include <sys/event.h> 90 #include <sys/kern_syscall.h> 91 #include <sys/kcore.h> 92 #include <sys/kinfo.h> 93 #include <sys/un.h> 94 #include <sys/objcache.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_extern.h> 98 99 #include <sys/thread2.h> 100 #include <sys/file2.h> 101 #include <sys/spinlock2.h> 102 103 static int fdalloc_locked(struct proc *p, struct filedesc *fdp, 104 int want, int *result); 105 static void fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd); 106 static void fdreserve_locked (struct filedesc *fdp, int fd0, int incr); 107 static struct file *funsetfd_locked (struct filedesc *fdp, int fd); 108 static void ffree(struct file *fp); 109 110 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table"); 111 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader", 112 "file desc to leader structures"); 113 MALLOC_DEFINE(M_FILE, "file", "Open file structure"); 114 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures"); 115 116 static struct krate krate_uidinfo = { .freq = 1 }; 117 118 static d_open_t fdopen; 119 #define NUMFDESC 64 120 121 #define CDEV_MAJOR 22 122 static struct dev_ops fildesc_ops = { 123 { "FD", 0, 0 }, 124 .d_open = fdopen, 125 }; 126 127 /* 128 * Descriptor management. 129 */ 130 #ifndef NFILELIST_HEADS 131 #define NFILELIST_HEADS 257 /* primary number */ 132 #endif 133 134 struct filelist_head { 135 struct spinlock spin; 136 struct filelist list; 137 } __cachealign; 138 139 static struct filelist_head filelist_heads[NFILELIST_HEADS]; 140 141 static int nfiles; /* actual number of open files */ 142 extern int cmask; 143 144 struct lwkt_token revoke_token = LWKT_TOKEN_INITIALIZER(revoke_token); 145 146 static struct objcache *file_objcache; 147 148 static struct objcache_malloc_args file_malloc_args = { 149 .objsize = sizeof(struct file), 150 .mtype = M_FILE 151 }; 152 153 /* 154 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared. 155 * 156 * must be called with fdp->fd_spin exclusively held 157 */ 158 static __inline 159 void 160 fdfixup_locked(struct filedesc *fdp, int fd) 161 { 162 if (fd < fdp->fd_freefile) { 163 fdp->fd_freefile = fd; 164 } 165 while (fdp->fd_lastfile >= 0 && 166 fdp->fd_files[fdp->fd_lastfile].fp == NULL && 167 fdp->fd_files[fdp->fd_lastfile].reserved == 0 168 ) { 169 --fdp->fd_lastfile; 170 } 171 } 172 173 /* 174 * Clear the fd thread caches for this fdnode. 175 * 176 * If match_fdc is NULL, all thread caches of fdn will be cleared. 177 * The caller must hold fdp->fd_spin exclusively. The threads caching 178 * the descriptor do not have to be the current thread. The (status) 179 * argument is ignored. 180 * 181 * If match_fdc is not NULL, only the match_fdc's cache will be cleared. 182 * The caller must hold fdp->fd_spin shared and match_fdc must match a 183 * fdcache entry in curthread. match_fdc has been locked by the caller 184 * and had the specified (status). 185 * 186 * Since we are matching against a fp in the fdp (which must still be present 187 * at this time), fp will have at least two refs on any match and we can 188 * decrement the count trivially. 189 */ 190 static 191 void 192 fclearcache(struct fdnode *fdn, struct fdcache *match_fdc, int status) 193 { 194 struct fdcache *fdc; 195 struct file *fp; 196 int i; 197 198 /* 199 * match_fdc == NULL We are cleaning out all tdcache entries 200 * for the fdn and hold fdp->fd_spin exclusively. 201 * This can race against the target threads 202 * cleaning out specific entries. 203 * 204 * match_fdc != NULL We are cleaning out a specific tdcache 205 * entry on behalf of the owning thread 206 * and hold fdp->fd_spin shared. The thread 207 * has already locked the entry. This cannot 208 * race. 209 */ 210 fp = fdn->fp; 211 for (i = 0; i < NTDCACHEFD; ++i) { 212 if ((fdc = fdn->tdcache[i]) == NULL) 213 continue; 214 215 /* 216 * If match_fdc is non-NULL we are being asked to 217 * clear a specific fdc owned by curthread. There must 218 * be exactly one match. The caller has already locked 219 * the cache entry and will dispose of the lock after 220 * we return. 221 * 222 * Since we also have a shared lock on fdp, we 223 * can do this without atomic ops. 224 */ 225 if (match_fdc) { 226 if (fdc != match_fdc) 227 continue; 228 fdn->tdcache[i] = NULL; 229 KASSERT(fp == fdc->fp, 230 ("fclearcache(1): fp mismatch %p/%p\n", 231 fp, fdc->fp)); 232 fdc->fp = NULL; 233 fdc->fd = -1; 234 235 /* 236 * status can be 0 or 2. If 2 the ref is borrowed, 237 * if 0 the ref is not borrowed and we have to drop 238 * it. 239 */ 240 if (status == 0) 241 atomic_add_int(&fp->f_count, -1); 242 fdn->isfull = 0; /* heuristic */ 243 return; 244 } 245 246 /* 247 * Otherwise we hold an exclusive spin-lock and can only 248 * race thread consumers borrowing cache entries. 249 * 250 * Acquire the lock and dispose of the entry. We have to 251 * spin until we get the lock. 252 */ 253 for (;;) { 254 status = atomic_swap_int(&fdc->locked, 1); 255 if (status == 1) { /* foreign lock, retry */ 256 cpu_pause(); 257 continue; 258 } 259 fdn->tdcache[i] = NULL; 260 KASSERT(fp == fdc->fp, 261 ("fclearcache(2): fp mismatch %p/%p\n", 262 fp, fdc->fp)); 263 fdc->fp = NULL; 264 fdc->fd = -1; 265 if (status == 0) 266 atomic_add_int(&fp->f_count, -1); 267 fdn->isfull = 0; /* heuristic */ 268 atomic_swap_int(&fdc->locked, 0); 269 break; 270 } 271 } 272 KKASSERT(match_fdc == NULL); 273 } 274 275 /* 276 * Retrieve the fp for the specified fd given the specified file descriptor 277 * table. The fdp does not have to be owned by the current process. 278 * If flags != -1, fp->f_flag must contain at least one of the flags. 279 * 280 * This function is not able to cache the fp. 281 */ 282 struct file * 283 holdfp_fdp(struct filedesc *fdp, int fd, int flag) 284 { 285 struct file *fp; 286 287 spin_lock_shared(&fdp->fd_spin); 288 if (((u_int)fd) < fdp->fd_nfiles) { 289 fp = fdp->fd_files[fd].fp; /* can be NULL */ 290 if (fp) { 291 if ((fp->f_flag & flag) == 0 && flag != -1) { 292 fp = NULL; 293 } else { 294 fhold(fp); 295 } 296 } 297 } else { 298 fp = NULL; 299 } 300 spin_unlock_shared(&fdp->fd_spin); 301 302 return fp; 303 } 304 305 struct file * 306 holdfp_fdp_locked(struct filedesc *fdp, int fd, int flag) 307 { 308 struct file *fp; 309 310 if (((u_int)fd) < fdp->fd_nfiles) { 311 fp = fdp->fd_files[fd].fp; /* can be NULL */ 312 if (fp) { 313 if ((fp->f_flag & flag) == 0 && flag != -1) { 314 fp = NULL; 315 } else { 316 fhold(fp); 317 } 318 } 319 } else { 320 fp = NULL; 321 } 322 return fp; 323 } 324 325 /* 326 * Acquire the fp for the specified file descriptor, using the thread 327 * cache if possible and caching it if possible. 328 * 329 * td must be the curren thread. 330 */ 331 static 332 struct file * 333 _holdfp_cache(thread_t td, int fd) 334 { 335 struct filedesc *fdp; 336 struct fdcache *fdc; 337 struct fdcache *best; 338 struct fdnode *fdn; 339 struct file *fp; 340 int status; 341 int delta; 342 int i; 343 344 /* 345 * Fast 346 */ 347 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 348 if (fdc->fd != fd || fdc->fp == NULL) 349 continue; 350 status = atomic_swap_int(&fdc->locked, 1); 351 352 /* 353 * If someone else has locked our cache entry they are in 354 * the middle of clearing it, skip the entry. 355 */ 356 if (status == 1) 357 continue; 358 359 /* 360 * We have locked the entry, but if it no longer matches 361 * restore the previous state (0 or 2) and skip the entry. 362 */ 363 if (fdc->fd != fd || fdc->fp == NULL) { 364 atomic_swap_int(&fdc->locked, status); 365 continue; 366 } 367 368 /* 369 * We have locked a valid entry. We can borrow the ref 370 * for a mode 0 entry. We can get a valid fp for a mode 371 * 2 entry but not borrow the ref. 372 */ 373 if (status == 0) { 374 fp = fdc->fp; 375 fdc->lru = ++td->td_fdcache_lru; 376 atomic_swap_int(&fdc->locked, 2); 377 378 return fp; 379 } 380 if (status == 2) { 381 fp = fdc->fp; 382 fhold(fp); 383 fdc->lru = ++td->td_fdcache_lru; 384 atomic_swap_int(&fdc->locked, 2); 385 386 return fp; 387 } 388 KKASSERT(0); 389 } 390 391 /* 392 * Lookup the descriptor the slow way. This can contend against 393 * modifying operations in a multi-threaded environment and cause 394 * cache line ping ponging otherwise. 395 */ 396 fdp = td->td_proc->p_fd; 397 spin_lock_shared(&fdp->fd_spin); 398 399 if (((u_int)fd) < fdp->fd_nfiles) { 400 fp = fdp->fd_files[fd].fp; /* can be NULL */ 401 if (fp) { 402 fhold(fp); 403 if (fdp->fd_files[fd].isfull == 0) 404 goto enter; 405 } 406 } else { 407 fp = NULL; 408 } 409 spin_unlock_shared(&fdp->fd_spin); 410 411 return fp; 412 413 /* 414 * We found a valid fp and held it, fdp is still shared locked. 415 * Enter the fp into the per-thread cache. Find the oldest entry 416 * via lru, or an empty entry. 417 * 418 * Because fdp's spinlock is held (shared is fine), no other 419 * thread should be in the middle of clearing our selected entry. 420 */ 421 enter: 422 best = &td->td_fdcache[0]; 423 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 424 if (fdc->fp == NULL) { 425 best = fdc; 426 break; 427 } 428 delta = fdc->lru - best->lru; 429 if (delta < 0) 430 best = fdc; 431 } 432 433 /* 434 * Replace best 435 * 436 * Don't enter into the cache if we cannot get the lock. 437 */ 438 status = atomic_swap_int(&best->locked, 1); 439 if (status == 1) 440 goto done; 441 442 /* 443 * Clear the previous cache entry if present 444 */ 445 if (best->fp) { 446 KKASSERT(best->fd >= 0); 447 fclearcache(&fdp->fd_files[best->fd], best, status); 448 } 449 450 /* 451 * Create our new cache entry. This entry is 'safe' until we tie 452 * into the fdnode. If we cannot tie in, we will clear the entry. 453 */ 454 best->fd = fd; 455 best->fp = fp; 456 best->lru = ++td->td_fdcache_lru; 457 best->locked = 2; /* borrowed ref */ 458 459 fdn = &fdp->fd_files[fd]; 460 for (i = 0; i < NTDCACHEFD; ++i) { 461 if (fdn->tdcache[i] == NULL && 462 atomic_cmpset_ptr((void **)&fdn->tdcache[i], NULL, best)) { 463 goto done; 464 } 465 } 466 fdn->isfull = 1; /* no space */ 467 best->fd = -1; 468 best->fp = NULL; 469 best->locked = 0; 470 done: 471 spin_unlock_shared(&fdp->fd_spin); 472 473 return fp; 474 } 475 476 /* 477 * Drop the file pointer and return to the thread cache if possible. 478 * 479 * Caller must not hold fdp's spin lock. 480 * td must be the current thread. 481 */ 482 void 483 dropfp(thread_t td, int fd, struct file *fp) 484 { 485 struct filedesc *fdp; 486 struct fdcache *fdc; 487 int status; 488 489 fdp = td->td_proc->p_fd; 490 491 /* 492 * If our placeholder is still present we can re-cache the ref. 493 * 494 * Note that we can race an fclearcache(). 495 */ 496 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 497 if (fdc->fp != fp || fdc->fd != fd) 498 continue; 499 status = atomic_swap_int(&fdc->locked, 1); 500 switch(status) { 501 case 0: 502 /* 503 * Not in mode 2, fdrop fp without caching. 504 */ 505 atomic_swap_int(&fdc->locked, 0); 506 break; 507 case 1: 508 /* 509 * Not in mode 2, locked by someone else. 510 * fdrop fp without caching. 511 */ 512 break; 513 case 2: 514 /* 515 * Intact borrowed ref, return to mode 0 516 * indicating that we have returned the ref. 517 * 518 * Return the borrowed ref (2->1->0) 519 */ 520 if (fdc->fp == fp && fdc->fd == fd) { 521 atomic_swap_int(&fdc->locked, 0); 522 return; 523 } 524 atomic_swap_int(&fdc->locked, 2); 525 break; 526 } 527 } 528 529 /* 530 * Failed to re-cache, drop the fp without caching. 531 */ 532 fdrop(fp); 533 } 534 535 /* 536 * Clear all descriptors cached in the per-thread fd cache for 537 * the specified thread. 538 * 539 * Caller must not hold p_fd->spin. This function will temporarily 540 * obtain a shared spin lock. 541 */ 542 void 543 fexitcache(thread_t td) 544 { 545 struct filedesc *fdp; 546 struct fdcache *fdc; 547 int status; 548 int i; 549 550 if (td->td_proc == NULL) 551 return; 552 fdp = td->td_proc->p_fd; 553 if (fdp == NULL) 554 return; 555 556 /* 557 * A shared lock is sufficient as the caller controls td and we 558 * are only clearing td's cache. 559 */ 560 spin_lock_shared(&fdp->fd_spin); 561 for (i = 0; i < NFDCACHE; ++i) { 562 fdc = &td->td_fdcache[i]; 563 if (fdc->fp) { 564 status = atomic_swap_int(&fdc->locked, 1); 565 if (status == 1) { 566 cpu_pause(); 567 --i; 568 continue; 569 } 570 if (fdc->fp) { 571 KKASSERT(fdc->fd >= 0); 572 fclearcache(&fdp->fd_files[fdc->fd], fdc, 573 status); 574 } 575 atomic_swap_int(&fdc->locked, 0); 576 } 577 } 578 spin_unlock_shared(&fdp->fd_spin); 579 } 580 581 static __inline struct filelist_head * 582 fp2filelist(const struct file *fp) 583 { 584 u_int i; 585 586 i = (u_int)(uintptr_t)fp % NFILELIST_HEADS; 587 return &filelist_heads[i]; 588 } 589 590 static __inline 591 struct plimit * 592 readplimits(struct proc *p) 593 { 594 thread_t td = curthread; 595 struct plimit *limit; 596 597 limit = td->td_limit; 598 if (limit != p->p_limit) { 599 spin_lock_shared(&p->p_spin); 600 limit = p->p_limit; 601 atomic_add_int(&limit->p_refcnt, 1); 602 spin_unlock_shared(&p->p_spin); 603 if (td->td_limit) 604 plimit_free(td->td_limit); 605 td->td_limit = limit; 606 } 607 return limit; 608 } 609 610 /* 611 * System calls on descriptors. 612 */ 613 int 614 sys_getdtablesize(struct getdtablesize_args *uap) 615 { 616 struct proc *p = curproc; 617 struct plimit *limit = readplimits(p); 618 int dtsize; 619 620 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 621 dtsize = INT_MAX; 622 else 623 dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 624 625 if (dtsize > maxfilesperproc) 626 dtsize = maxfilesperproc; 627 if (dtsize < minfilesperproc) 628 dtsize = minfilesperproc; 629 if (p->p_ucred->cr_uid && dtsize > maxfilesperuser) 630 dtsize = maxfilesperuser; 631 uap->sysmsg_result = dtsize; 632 return (0); 633 } 634 635 /* 636 * Duplicate a file descriptor to a particular value. 637 * 638 * note: keep in mind that a potential race condition exists when closing 639 * descriptors from a shared descriptor table (via rfork). 640 */ 641 int 642 sys_dup2(struct dup2_args *uap) 643 { 644 int error; 645 int fd = 0; 646 647 error = kern_dup(DUP_FIXED, uap->from, uap->to, &fd); 648 uap->sysmsg_fds[0] = fd; 649 650 return (error); 651 } 652 653 /* 654 * Duplicate a file descriptor. 655 */ 656 int 657 sys_dup(struct dup_args *uap) 658 { 659 int error; 660 int fd = 0; 661 662 error = kern_dup(DUP_VARIABLE, uap->fd, 0, &fd); 663 uap->sysmsg_fds[0] = fd; 664 665 return (error); 666 } 667 668 /* 669 * MPALMOSTSAFE - acquires mplock for fp operations 670 */ 671 int 672 kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred) 673 { 674 struct thread *td = curthread; 675 struct proc *p = td->td_proc; 676 struct file *fp; 677 struct vnode *vp; 678 u_int newmin; 679 u_int oflags; 680 u_int nflags; 681 int closedcounter; 682 int tmp, error, flg = F_POSIX; 683 684 KKASSERT(p); 685 686 /* 687 * Operations on file descriptors that do not require a file pointer. 688 */ 689 switch (cmd) { 690 case F_GETFD: 691 error = fgetfdflags(p->p_fd, fd, &tmp); 692 if (error == 0) 693 dat->fc_cloexec = (tmp & UF_EXCLOSE) ? FD_CLOEXEC : 0; 694 return (error); 695 696 case F_SETFD: 697 if (dat->fc_cloexec & FD_CLOEXEC) 698 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE); 699 else 700 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE); 701 return (error); 702 case F_DUPFD: 703 newmin = dat->fc_fd; 704 error = kern_dup(DUP_VARIABLE | DUP_FCNTL, fd, newmin, 705 &dat->fc_fd); 706 return (error); 707 case F_DUPFD_CLOEXEC: 708 newmin = dat->fc_fd; 709 error = kern_dup(DUP_VARIABLE | DUP_CLOEXEC | DUP_FCNTL, 710 fd, newmin, &dat->fc_fd); 711 return (error); 712 case F_DUP2FD: 713 newmin = dat->fc_fd; 714 error = kern_dup(DUP_FIXED, fd, newmin, &dat->fc_fd); 715 return (error); 716 case F_DUP2FD_CLOEXEC: 717 newmin = dat->fc_fd; 718 error = kern_dup(DUP_FIXED | DUP_CLOEXEC, fd, newmin, 719 &dat->fc_fd); 720 return (error); 721 default: 722 break; 723 } 724 725 /* 726 * Operations on file pointers 727 */ 728 closedcounter = p->p_fd->fd_closedcounter; 729 if ((fp = holdfp(td, fd, -1)) == NULL) 730 return (EBADF); 731 732 switch (cmd) { 733 case F_GETFL: 734 dat->fc_flags = OFLAGS(fp->f_flag); 735 error = 0; 736 break; 737 738 case F_SETFL: 739 oflags = fp->f_flag; 740 nflags = FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS; 741 nflags |= oflags & ~FCNTLFLAGS; 742 743 error = 0; 744 if (((nflags ^ oflags) & O_APPEND) && (oflags & FAPPENDONLY)) 745 error = EINVAL; 746 if (error == 0 && ((nflags ^ oflags) & FASYNC)) { 747 tmp = nflags & FASYNC; 748 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, 749 cred, NULL); 750 } 751 752 /* 753 * If no error, must be atomically set. 754 */ 755 while (error == 0) { 756 oflags = fp->f_flag; 757 cpu_ccfence(); 758 nflags = (oflags & ~FCNTLFLAGS) | (nflags & FCNTLFLAGS); 759 if (atomic_cmpset_int(&fp->f_flag, oflags, nflags)) 760 break; 761 cpu_pause(); 762 } 763 break; 764 765 case F_GETOWN: 766 error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner, 767 cred, NULL); 768 break; 769 770 case F_SETOWN: 771 error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner, 772 cred, NULL); 773 break; 774 775 case F_SETLKW: 776 flg |= F_WAIT; 777 /* Fall into F_SETLK */ 778 779 case F_SETLK: 780 if (fp->f_type != DTYPE_VNODE) { 781 error = EBADF; 782 break; 783 } 784 vp = (struct vnode *)fp->f_data; 785 786 /* 787 * copyin/lockop may block 788 */ 789 if (dat->fc_flock.l_whence == SEEK_CUR) 790 dat->fc_flock.l_start += fp->f_offset; 791 792 switch (dat->fc_flock.l_type) { 793 case F_RDLCK: 794 if ((fp->f_flag & FREAD) == 0) { 795 error = EBADF; 796 break; 797 } 798 if (p->p_leader->p_advlock_flag == 0) 799 p->p_leader->p_advlock_flag = 1; 800 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 801 &dat->fc_flock, flg); 802 break; 803 case F_WRLCK: 804 if ((fp->f_flag & FWRITE) == 0) { 805 error = EBADF; 806 break; 807 } 808 if (p->p_leader->p_advlock_flag == 0) 809 p->p_leader->p_advlock_flag = 1; 810 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 811 &dat->fc_flock, flg); 812 break; 813 case F_UNLCK: 814 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 815 &dat->fc_flock, F_POSIX); 816 break; 817 default: 818 error = EINVAL; 819 break; 820 } 821 822 /* 823 * It is possible to race a close() on the descriptor while 824 * we were blocked getting the lock. If this occurs the 825 * close might not have caught the lock. 826 */ 827 if (checkfdclosed(td, p->p_fd, fd, fp, closedcounter)) { 828 dat->fc_flock.l_whence = SEEK_SET; 829 dat->fc_flock.l_start = 0; 830 dat->fc_flock.l_len = 0; 831 dat->fc_flock.l_type = F_UNLCK; 832 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 833 F_UNLCK, &dat->fc_flock, F_POSIX); 834 } 835 break; 836 837 case F_GETLK: 838 if (fp->f_type != DTYPE_VNODE) { 839 error = EBADF; 840 break; 841 } 842 vp = (struct vnode *)fp->f_data; 843 /* 844 * copyin/lockop may block 845 */ 846 if (dat->fc_flock.l_type != F_RDLCK && 847 dat->fc_flock.l_type != F_WRLCK && 848 dat->fc_flock.l_type != F_UNLCK) { 849 error = EINVAL; 850 break; 851 } 852 if (dat->fc_flock.l_whence == SEEK_CUR) 853 dat->fc_flock.l_start += fp->f_offset; 854 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, 855 &dat->fc_flock, F_POSIX); 856 break; 857 default: 858 error = EINVAL; 859 break; 860 } 861 862 fdrop(fp); 863 return (error); 864 } 865 866 /* 867 * The file control system call. 868 */ 869 int 870 sys_fcntl(struct fcntl_args *uap) 871 { 872 union fcntl_dat dat; 873 int error; 874 875 switch (uap->cmd) { 876 case F_DUPFD: 877 case F_DUP2FD: 878 case F_DUPFD_CLOEXEC: 879 case F_DUP2FD_CLOEXEC: 880 dat.fc_fd = uap->arg; 881 break; 882 case F_SETFD: 883 dat.fc_cloexec = uap->arg; 884 break; 885 case F_SETFL: 886 dat.fc_flags = uap->arg; 887 break; 888 case F_SETOWN: 889 dat.fc_owner = uap->arg; 890 break; 891 case F_SETLKW: 892 case F_SETLK: 893 case F_GETLK: 894 error = copyin((caddr_t)uap->arg, &dat.fc_flock, 895 sizeof(struct flock)); 896 if (error) 897 return (error); 898 break; 899 } 900 901 error = kern_fcntl(uap->fd, uap->cmd, &dat, curthread->td_ucred); 902 903 if (error == 0) { 904 switch (uap->cmd) { 905 case F_DUPFD: 906 case F_DUP2FD: 907 case F_DUPFD_CLOEXEC: 908 case F_DUP2FD_CLOEXEC: 909 uap->sysmsg_result = dat.fc_fd; 910 break; 911 case F_GETFD: 912 uap->sysmsg_result = dat.fc_cloexec; 913 break; 914 case F_GETFL: 915 uap->sysmsg_result = dat.fc_flags; 916 break; 917 case F_GETOWN: 918 uap->sysmsg_result = dat.fc_owner; 919 break; 920 case F_GETLK: 921 error = copyout(&dat.fc_flock, (caddr_t)uap->arg, 922 sizeof(struct flock)); 923 break; 924 } 925 } 926 927 return (error); 928 } 929 930 /* 931 * Common code for dup, dup2, and fcntl(F_DUPFD). 932 * 933 * There are four type flags: DUP_FCNTL, DUP_FIXED, DUP_VARIABLE, and 934 * DUP_CLOEXEC. 935 * 936 * DUP_FCNTL is for handling EINVAL vs. EBADF differences between 937 * fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC and dup2() (per POSIX). 938 * The next two flags are mutually exclusive, and the fourth is optional. 939 * DUP_FIXED tells kern_dup() to destructively dup over an existing file 940 * descriptor if "new" is already open. DUP_VARIABLE tells kern_dup() 941 * to find the lowest unused file descriptor that is greater than or 942 * equal to "new". DUP_CLOEXEC, which works with either of the first 943 * two flags, sets the close-on-exec flag on the "new" file descriptor. 944 */ 945 int 946 kern_dup(int flags, int old, int new, int *res) 947 { 948 struct thread *td = curthread; 949 struct proc *p = td->td_proc; 950 struct plimit *limit = readplimits(p); 951 struct filedesc *fdp = p->p_fd; 952 struct file *fp; 953 struct file *delfp; 954 int oldflags; 955 int holdleaders; 956 int dtsize; 957 int error, newfd; 958 959 /* 960 * Verify that we have a valid descriptor to dup from and 961 * possibly to dup to. When the new descriptor is out of 962 * bounds, fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC must 963 * return EINVAL, while dup2() returns EBADF in 964 * this case. 965 * 966 * NOTE: maxfilesperuser is not applicable to dup() 967 */ 968 retry: 969 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 970 dtsize = INT_MAX; 971 else 972 dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 973 if (dtsize > maxfilesperproc) 974 dtsize = maxfilesperproc; 975 if (dtsize < minfilesperproc) 976 dtsize = minfilesperproc; 977 978 if (new < 0 || new > dtsize) 979 return (flags & DUP_FCNTL ? EINVAL : EBADF); 980 981 spin_lock(&fdp->fd_spin); 982 if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) { 983 spin_unlock(&fdp->fd_spin); 984 return (EBADF); 985 } 986 if ((flags & DUP_FIXED) && old == new) { 987 *res = new; 988 if (flags & DUP_CLOEXEC) 989 fdp->fd_files[new].fileflags |= UF_EXCLOSE; 990 spin_unlock(&fdp->fd_spin); 991 return (0); 992 } 993 fp = fdp->fd_files[old].fp; 994 oldflags = fdp->fd_files[old].fileflags; 995 fhold(fp); 996 997 /* 998 * Allocate a new descriptor if DUP_VARIABLE, or expand the table 999 * if the requested descriptor is beyond the current table size. 1000 * 1001 * This can block. Retry if the source descriptor no longer matches 1002 * or if our expectation in the expansion case races. 1003 * 1004 * If we are not expanding or allocating a new decriptor, then reset 1005 * the target descriptor to a reserved state so we have a uniform 1006 * setup for the next code block. 1007 */ 1008 if ((flags & DUP_VARIABLE) || new >= fdp->fd_nfiles) { 1009 error = fdalloc_locked(p, fdp, new, &newfd); 1010 if (error) { 1011 spin_unlock(&fdp->fd_spin); 1012 fdrop(fp); 1013 return (error); 1014 } 1015 /* 1016 * Check for ripout 1017 */ 1018 if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) { 1019 fsetfd_locked(fdp, NULL, newfd); 1020 spin_unlock(&fdp->fd_spin); 1021 fdrop(fp); 1022 goto retry; 1023 } 1024 /* 1025 * Check for expansion race 1026 */ 1027 if ((flags & DUP_VARIABLE) == 0 && new != newfd) { 1028 fsetfd_locked(fdp, NULL, newfd); 1029 spin_unlock(&fdp->fd_spin); 1030 fdrop(fp); 1031 goto retry; 1032 } 1033 /* 1034 * Check for ripout, newfd reused old (this case probably 1035 * can't occur). 1036 */ 1037 if (old == newfd) { 1038 fsetfd_locked(fdp, NULL, newfd); 1039 spin_unlock(&fdp->fd_spin); 1040 fdrop(fp); 1041 goto retry; 1042 } 1043 new = newfd; 1044 delfp = NULL; 1045 } else { 1046 if (fdp->fd_files[new].reserved) { 1047 spin_unlock(&fdp->fd_spin); 1048 fdrop(fp); 1049 kprintf("Warning: dup(): target descriptor %d is " 1050 "reserved, waiting for it to be resolved\n", 1051 new); 1052 tsleep(fdp, 0, "fdres", hz); 1053 goto retry; 1054 } 1055 1056 /* 1057 * If the target descriptor was never allocated we have 1058 * to allocate it. If it was we have to clean out the 1059 * old descriptor. delfp inherits the ref from the 1060 * descriptor table. 1061 */ 1062 ++fdp->fd_closedcounter; 1063 fclearcache(&fdp->fd_files[new], NULL, 0); 1064 ++fdp->fd_closedcounter; 1065 delfp = fdp->fd_files[new].fp; 1066 fdp->fd_files[new].fp = NULL; 1067 fdp->fd_files[new].reserved = 1; 1068 if (delfp == NULL) { 1069 fdreserve_locked(fdp, new, 1); 1070 if (new > fdp->fd_lastfile) 1071 fdp->fd_lastfile = new; 1072 } 1073 1074 } 1075 1076 /* 1077 * NOTE: still holding an exclusive spinlock 1078 */ 1079 1080 /* 1081 * If a descriptor is being overwritten we may hve to tell 1082 * fdfree() to sleep to ensure that all relevant process 1083 * leaders can be traversed in closef(). 1084 */ 1085 if (delfp != NULL && p->p_fdtol != NULL) { 1086 fdp->fd_holdleaderscount++; 1087 holdleaders = 1; 1088 } else { 1089 holdleaders = 0; 1090 } 1091 KASSERT(delfp == NULL || (flags & DUP_FIXED), 1092 ("dup() picked an open file")); 1093 1094 /* 1095 * Duplicate the source descriptor, update lastfile. If the new 1096 * descriptor was not allocated and we aren't replacing an existing 1097 * descriptor we have to mark the descriptor as being in use. 1098 * 1099 * The fd_files[] array inherits fp's hold reference. 1100 */ 1101 fsetfd_locked(fdp, fp, new); 1102 if ((flags & DUP_CLOEXEC) != 0) 1103 fdp->fd_files[new].fileflags = oldflags | UF_EXCLOSE; 1104 else 1105 fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE; 1106 spin_unlock(&fdp->fd_spin); 1107 fdrop(fp); 1108 *res = new; 1109 1110 /* 1111 * If we dup'd over a valid file, we now own the reference to it 1112 * and must dispose of it using closef() semantics (as if a 1113 * close() were performed on it). 1114 */ 1115 if (delfp) { 1116 if (SLIST_FIRST(&delfp->f_klist)) 1117 knote_fdclose(delfp, fdp, new); 1118 closef(delfp, p); 1119 if (holdleaders) { 1120 spin_lock(&fdp->fd_spin); 1121 fdp->fd_holdleaderscount--; 1122 if (fdp->fd_holdleaderscount == 0 && 1123 fdp->fd_holdleaderswakeup != 0) { 1124 fdp->fd_holdleaderswakeup = 0; 1125 spin_unlock(&fdp->fd_spin); 1126 wakeup(&fdp->fd_holdleaderscount); 1127 } else { 1128 spin_unlock(&fdp->fd_spin); 1129 } 1130 } 1131 } 1132 return (0); 1133 } 1134 1135 /* 1136 * If sigio is on the list associated with a process or process group, 1137 * disable signalling from the device, remove sigio from the list and 1138 * free sigio. 1139 */ 1140 void 1141 funsetown(struct sigio **sigiop) 1142 { 1143 struct pgrp *pgrp; 1144 struct proc *p; 1145 struct sigio *sigio; 1146 1147 if ((sigio = *sigiop) != NULL) { 1148 lwkt_gettoken(&sigio_token); /* protect sigio */ 1149 KKASSERT(sigiop == sigio->sio_myref); 1150 sigio = *sigiop; 1151 *sigiop = NULL; 1152 lwkt_reltoken(&sigio_token); 1153 } 1154 if (sigio == NULL) 1155 return; 1156 1157 if (sigio->sio_pgid < 0) { 1158 pgrp = sigio->sio_pgrp; 1159 sigio->sio_pgrp = NULL; 1160 lwkt_gettoken(&pgrp->pg_token); 1161 SLIST_REMOVE(&pgrp->pg_sigiolst, sigio, sigio, sio_pgsigio); 1162 lwkt_reltoken(&pgrp->pg_token); 1163 pgrel(pgrp); 1164 } else /* if ((*sigiop)->sio_pgid > 0) */ { 1165 p = sigio->sio_proc; 1166 sigio->sio_proc = NULL; 1167 PHOLD(p); 1168 lwkt_gettoken(&p->p_token); 1169 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, sio_pgsigio); 1170 lwkt_reltoken(&p->p_token); 1171 PRELE(p); 1172 } 1173 crfree(sigio->sio_ucred); 1174 sigio->sio_ucred = NULL; 1175 kfree(sigio, M_SIGIO); 1176 } 1177 1178 /* 1179 * Free a list of sigio structures. Caller is responsible for ensuring 1180 * that the list is MPSAFE. 1181 */ 1182 void 1183 funsetownlst(struct sigiolst *sigiolst) 1184 { 1185 struct sigio *sigio; 1186 1187 while ((sigio = SLIST_FIRST(sigiolst)) != NULL) 1188 funsetown(sigio->sio_myref); 1189 } 1190 1191 /* 1192 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg). 1193 * 1194 * After permission checking, add a sigio structure to the sigio list for 1195 * the process or process group. 1196 */ 1197 int 1198 fsetown(pid_t pgid, struct sigio **sigiop) 1199 { 1200 struct proc *proc = NULL; 1201 struct pgrp *pgrp = NULL; 1202 struct sigio *sigio; 1203 int error; 1204 1205 if (pgid == 0) { 1206 funsetown(sigiop); 1207 return (0); 1208 } 1209 1210 if (pgid > 0) { 1211 proc = pfind(pgid); 1212 if (proc == NULL) { 1213 error = ESRCH; 1214 goto done; 1215 } 1216 1217 /* 1218 * Policy - Don't allow a process to FSETOWN a process 1219 * in another session. 1220 * 1221 * Remove this test to allow maximum flexibility or 1222 * restrict FSETOWN to the current process or process 1223 * group for maximum safety. 1224 */ 1225 if (proc->p_session != curproc->p_session) { 1226 error = EPERM; 1227 goto done; 1228 } 1229 } else /* if (pgid < 0) */ { 1230 pgrp = pgfind(-pgid); 1231 if (pgrp == NULL) { 1232 error = ESRCH; 1233 goto done; 1234 } 1235 1236 /* 1237 * Policy - Don't allow a process to FSETOWN a process 1238 * in another session. 1239 * 1240 * Remove this test to allow maximum flexibility or 1241 * restrict FSETOWN to the current process or process 1242 * group for maximum safety. 1243 */ 1244 if (pgrp->pg_session != curproc->p_session) { 1245 error = EPERM; 1246 goto done; 1247 } 1248 } 1249 sigio = kmalloc(sizeof(struct sigio), M_SIGIO, M_WAITOK | M_ZERO); 1250 if (pgid > 0) { 1251 KKASSERT(pgrp == NULL); 1252 lwkt_gettoken(&proc->p_token); 1253 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio); 1254 sigio->sio_proc = proc; 1255 lwkt_reltoken(&proc->p_token); 1256 } else { 1257 KKASSERT(proc == NULL); 1258 lwkt_gettoken(&pgrp->pg_token); 1259 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio); 1260 sigio->sio_pgrp = pgrp; 1261 lwkt_reltoken(&pgrp->pg_token); 1262 pgrp = NULL; 1263 } 1264 sigio->sio_pgid = pgid; 1265 sigio->sio_ucred = crhold(curthread->td_ucred); 1266 /* It would be convenient if p_ruid was in ucred. */ 1267 sigio->sio_ruid = sigio->sio_ucred->cr_ruid; 1268 sigio->sio_myref = sigiop; 1269 1270 lwkt_gettoken(&sigio_token); 1271 while (*sigiop) 1272 funsetown(sigiop); 1273 *sigiop = sigio; 1274 lwkt_reltoken(&sigio_token); 1275 error = 0; 1276 done: 1277 if (pgrp) 1278 pgrel(pgrp); 1279 if (proc) 1280 PRELE(proc); 1281 return (error); 1282 } 1283 1284 /* 1285 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg). 1286 */ 1287 pid_t 1288 fgetown(struct sigio **sigiop) 1289 { 1290 struct sigio *sigio; 1291 pid_t own; 1292 1293 lwkt_gettoken_shared(&sigio_token); 1294 sigio = *sigiop; 1295 own = (sigio != NULL ? sigio->sio_pgid : 0); 1296 lwkt_reltoken(&sigio_token); 1297 1298 return (own); 1299 } 1300 1301 /* 1302 * Close many file descriptors. 1303 */ 1304 int 1305 sys_closefrom(struct closefrom_args *uap) 1306 { 1307 return(kern_closefrom(uap->fd)); 1308 } 1309 1310 /* 1311 * Close all file descriptors greater then or equal to fd 1312 */ 1313 int 1314 kern_closefrom(int fd) 1315 { 1316 struct thread *td = curthread; 1317 struct proc *p = td->td_proc; 1318 struct filedesc *fdp; 1319 1320 KKASSERT(p); 1321 fdp = p->p_fd; 1322 1323 if (fd < 0) 1324 return (EINVAL); 1325 1326 /* 1327 * NOTE: This function will skip unassociated descriptors and 1328 * reserved descriptors that have not yet been assigned. 1329 * fd_lastfile can change as a side effect of kern_close(). 1330 */ 1331 spin_lock(&fdp->fd_spin); 1332 while (fd <= fdp->fd_lastfile) { 1333 if (fdp->fd_files[fd].fp != NULL) { 1334 spin_unlock(&fdp->fd_spin); 1335 /* ok if this races another close */ 1336 if (kern_close(fd) == EINTR) 1337 return (EINTR); 1338 spin_lock(&fdp->fd_spin); 1339 } 1340 ++fd; 1341 } 1342 spin_unlock(&fdp->fd_spin); 1343 return (0); 1344 } 1345 1346 /* 1347 * Close a file descriptor. 1348 */ 1349 int 1350 sys_close(struct close_args *uap) 1351 { 1352 return(kern_close(uap->fd)); 1353 } 1354 1355 /* 1356 * close() helper 1357 */ 1358 int 1359 kern_close(int fd) 1360 { 1361 struct thread *td = curthread; 1362 struct proc *p = td->td_proc; 1363 struct filedesc *fdp; 1364 struct file *fp; 1365 int error; 1366 int holdleaders; 1367 1368 KKASSERT(p); 1369 fdp = p->p_fd; 1370 1371 /* 1372 * funsetfd*() also clears the fd cache 1373 */ 1374 spin_lock(&fdp->fd_spin); 1375 if ((fp = funsetfd_locked(fdp, fd)) == NULL) { 1376 spin_unlock(&fdp->fd_spin); 1377 return (EBADF); 1378 } 1379 holdleaders = 0; 1380 if (p->p_fdtol != NULL) { 1381 /* 1382 * Ask fdfree() to sleep to ensure that all relevant 1383 * process leaders can be traversed in closef(). 1384 */ 1385 fdp->fd_holdleaderscount++; 1386 holdleaders = 1; 1387 } 1388 1389 /* 1390 * we now hold the fp reference that used to be owned by the descriptor 1391 * array. 1392 */ 1393 spin_unlock(&fdp->fd_spin); 1394 if (SLIST_FIRST(&fp->f_klist)) 1395 knote_fdclose(fp, fdp, fd); 1396 error = closef(fp, p); 1397 if (holdleaders) { 1398 spin_lock(&fdp->fd_spin); 1399 fdp->fd_holdleaderscount--; 1400 if (fdp->fd_holdleaderscount == 0 && 1401 fdp->fd_holdleaderswakeup != 0) { 1402 fdp->fd_holdleaderswakeup = 0; 1403 spin_unlock(&fdp->fd_spin); 1404 wakeup(&fdp->fd_holdleaderscount); 1405 } else { 1406 spin_unlock(&fdp->fd_spin); 1407 } 1408 } 1409 return (error); 1410 } 1411 1412 /* 1413 * shutdown_args(int fd, int how) 1414 */ 1415 int 1416 kern_shutdown(int fd, int how) 1417 { 1418 struct thread *td = curthread; 1419 struct file *fp; 1420 int error; 1421 1422 if ((fp = holdfp(td, fd, -1)) == NULL) 1423 return (EBADF); 1424 error = fo_shutdown(fp, how); 1425 fdrop(fp); 1426 1427 return (error); 1428 } 1429 1430 /* 1431 * MPALMOSTSAFE 1432 */ 1433 int 1434 sys_shutdown(struct shutdown_args *uap) 1435 { 1436 int error; 1437 1438 error = kern_shutdown(uap->s, uap->how); 1439 1440 return (error); 1441 } 1442 1443 /* 1444 * fstat() helper 1445 */ 1446 int 1447 kern_fstat(int fd, struct stat *ub) 1448 { 1449 struct thread *td = curthread; 1450 struct file *fp; 1451 int error; 1452 1453 if ((fp = holdfp(td, fd, -1)) == NULL) 1454 return (EBADF); 1455 error = fo_stat(fp, ub, td->td_ucred); 1456 fdrop(fp); 1457 1458 return (error); 1459 } 1460 1461 /* 1462 * Return status information about a file descriptor. 1463 */ 1464 int 1465 sys_fstat(struct fstat_args *uap) 1466 { 1467 struct stat st; 1468 int error; 1469 1470 error = kern_fstat(uap->fd, &st); 1471 1472 if (error == 0) 1473 error = copyout(&st, uap->sb, sizeof(st)); 1474 return (error); 1475 } 1476 1477 /* 1478 * Return pathconf information about a file descriptor. 1479 * 1480 * MPALMOSTSAFE 1481 */ 1482 int 1483 sys_fpathconf(struct fpathconf_args *uap) 1484 { 1485 struct thread *td = curthread; 1486 struct file *fp; 1487 struct vnode *vp; 1488 int error = 0; 1489 1490 if ((fp = holdfp(td, uap->fd, -1)) == NULL) 1491 return (EBADF); 1492 1493 switch (fp->f_type) { 1494 case DTYPE_PIPE: 1495 case DTYPE_SOCKET: 1496 if (uap->name != _PC_PIPE_BUF) { 1497 error = EINVAL; 1498 } else { 1499 uap->sysmsg_result = PIPE_BUF; 1500 error = 0; 1501 } 1502 break; 1503 case DTYPE_FIFO: 1504 case DTYPE_VNODE: 1505 vp = (struct vnode *)fp->f_data; 1506 error = VOP_PATHCONF(vp, uap->name, &uap->sysmsg_reg); 1507 break; 1508 default: 1509 error = EOPNOTSUPP; 1510 break; 1511 } 1512 fdrop(fp); 1513 return(error); 1514 } 1515 1516 /* 1517 * Grow the file table so it can hold through descriptor (want). 1518 * 1519 * The fdp's spinlock must be held exclusively on entry and may be held 1520 * exclusively on return. The spinlock may be cycled by the routine. 1521 */ 1522 static void 1523 fdgrow_locked(struct filedesc *fdp, int want) 1524 { 1525 struct fdnode *newfiles; 1526 struct fdnode *oldfiles; 1527 int nf, extra; 1528 1529 nf = fdp->fd_nfiles; 1530 do { 1531 /* nf has to be of the form 2^n - 1 */ 1532 nf = 2 * nf + 1; 1533 } while (nf <= want); 1534 1535 spin_unlock(&fdp->fd_spin); 1536 newfiles = kmalloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK); 1537 spin_lock(&fdp->fd_spin); 1538 1539 /* 1540 * We could have raced another extend while we were not holding 1541 * the spinlock. 1542 */ 1543 if (fdp->fd_nfiles >= nf) { 1544 spin_unlock(&fdp->fd_spin); 1545 kfree(newfiles, M_FILEDESC); 1546 spin_lock(&fdp->fd_spin); 1547 return; 1548 } 1549 /* 1550 * Copy the existing ofile and ofileflags arrays 1551 * and zero the new portion of each array. 1552 */ 1553 extra = nf - fdp->fd_nfiles; 1554 bcopy(fdp->fd_files, newfiles, fdp->fd_nfiles * sizeof(struct fdnode)); 1555 bzero(&newfiles[fdp->fd_nfiles], extra * sizeof(struct fdnode)); 1556 1557 oldfiles = fdp->fd_files; 1558 fdp->fd_files = newfiles; 1559 fdp->fd_nfiles = nf; 1560 1561 if (oldfiles != fdp->fd_builtin_files) { 1562 spin_unlock(&fdp->fd_spin); 1563 kfree(oldfiles, M_FILEDESC); 1564 spin_lock(&fdp->fd_spin); 1565 } 1566 } 1567 1568 /* 1569 * Number of nodes in right subtree, including the root. 1570 */ 1571 static __inline int 1572 right_subtree_size(int n) 1573 { 1574 return (n ^ (n | (n + 1))); 1575 } 1576 1577 /* 1578 * Bigger ancestor. 1579 */ 1580 static __inline int 1581 right_ancestor(int n) 1582 { 1583 return (n | (n + 1)); 1584 } 1585 1586 /* 1587 * Smaller ancestor. 1588 */ 1589 static __inline int 1590 left_ancestor(int n) 1591 { 1592 return ((n & (n + 1)) - 1); 1593 } 1594 1595 /* 1596 * Traverse the in-place binary tree buttom-up adjusting the allocation 1597 * count so scans can determine where free descriptors are located. 1598 * 1599 * caller must be holding an exclusive spinlock on fdp 1600 */ 1601 static 1602 void 1603 fdreserve_locked(struct filedesc *fdp, int fd, int incr) 1604 { 1605 while (fd >= 0) { 1606 fdp->fd_files[fd].allocated += incr; 1607 KKASSERT(fdp->fd_files[fd].allocated >= 0); 1608 fd = left_ancestor(fd); 1609 } 1610 } 1611 1612 /* 1613 * Reserve a file descriptor for the process. If no error occurs, the 1614 * caller MUST at some point call fsetfd() or assign a file pointer 1615 * or dispose of the reservation. 1616 */ 1617 static 1618 int 1619 fdalloc_locked(struct proc *p, struct filedesc *fdp, int want, int *result) 1620 { 1621 struct plimit *limit = readplimits(p); 1622 struct uidinfo *uip; 1623 int fd, rsize, rsum, node, lim; 1624 1625 /* 1626 * Check dtable size limit 1627 */ 1628 *result = -1; /* avoid gcc warnings */ 1629 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 1630 lim = INT_MAX; 1631 else 1632 lim = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 1633 1634 if (lim > maxfilesperproc) 1635 lim = maxfilesperproc; 1636 if (lim < minfilesperproc) 1637 lim = minfilesperproc; 1638 if (want >= lim) 1639 return (EMFILE); 1640 1641 /* 1642 * Check that the user has not run out of descriptors (non-root only). 1643 * As a safety measure the dtable is allowed to have at least 1644 * minfilesperproc open fds regardless of the maxfilesperuser limit. 1645 * 1646 * This isn't as loose a spec as ui_posixlocks, so we use atomic 1647 * ops to force synchronize and recheck if we would otherwise 1648 * error. 1649 */ 1650 if (p->p_ucred->cr_uid && fdp->fd_nfiles >= minfilesperproc) { 1651 uip = p->p_ucred->cr_uidinfo; 1652 if (uip->ui_openfiles > maxfilesperuser) { 1653 int n; 1654 int count; 1655 1656 count = 0; 1657 for (n = 0; n < ncpus; ++n) { 1658 count += atomic_swap_int( 1659 &uip->ui_pcpu[n].pu_openfiles, 0); 1660 } 1661 atomic_add_int(&uip->ui_openfiles, count); 1662 if (uip->ui_openfiles > maxfilesperuser) { 1663 krateprintf(&krate_uidinfo, 1664 "Warning: user %d pid %d (%s) " 1665 "ran out of file descriptors " 1666 "(%d/%d)\n", 1667 p->p_ucred->cr_uid, (int)p->p_pid, 1668 p->p_comm, 1669 uip->ui_openfiles, maxfilesperuser); 1670 return(ENFILE); 1671 } 1672 } 1673 } 1674 1675 /* 1676 * Grow the dtable if necessary 1677 */ 1678 if (want >= fdp->fd_nfiles) 1679 fdgrow_locked(fdp, want); 1680 1681 /* 1682 * Search for a free descriptor starting at the higher 1683 * of want or fd_freefile. If that fails, consider 1684 * expanding the ofile array. 1685 * 1686 * NOTE! the 'allocated' field is a cumulative recursive allocation 1687 * count. If we happen to see a value of 0 then we can shortcut 1688 * our search. Otherwise we run through through the tree going 1689 * down branches we know have free descriptor(s) until we hit a 1690 * leaf node. The leaf node will be free but will not necessarily 1691 * have an allocated field of 0. 1692 */ 1693 retry: 1694 /* move up the tree looking for a subtree with a free node */ 1695 for (fd = max(want, fdp->fd_freefile); fd < min(fdp->fd_nfiles, lim); 1696 fd = right_ancestor(fd)) { 1697 if (fdp->fd_files[fd].allocated == 0) 1698 goto found; 1699 1700 rsize = right_subtree_size(fd); 1701 if (fdp->fd_files[fd].allocated == rsize) 1702 continue; /* right subtree full */ 1703 1704 /* 1705 * Free fd is in the right subtree of the tree rooted at fd. 1706 * Call that subtree R. Look for the smallest (leftmost) 1707 * subtree of R with an unallocated fd: continue moving 1708 * down the left branch until encountering a full left 1709 * subtree, then move to the right. 1710 */ 1711 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) { 1712 node = fd + rsize; 1713 rsum += fdp->fd_files[node].allocated; 1714 if (fdp->fd_files[fd].allocated == rsum + rsize) { 1715 fd = node; /* move to the right */ 1716 if (fdp->fd_files[node].allocated == 0) 1717 goto found; 1718 rsum = 0; 1719 } 1720 } 1721 goto found; 1722 } 1723 1724 /* 1725 * No space in current array. Expand? 1726 */ 1727 if (fdp->fd_nfiles >= lim) { 1728 return (EMFILE); 1729 } 1730 fdgrow_locked(fdp, want); 1731 goto retry; 1732 1733 found: 1734 KKASSERT(fd < fdp->fd_nfiles); 1735 if (fd > fdp->fd_lastfile) 1736 fdp->fd_lastfile = fd; 1737 if (want <= fdp->fd_freefile) 1738 fdp->fd_freefile = fd; 1739 *result = fd; 1740 KKASSERT(fdp->fd_files[fd].fp == NULL); 1741 KKASSERT(fdp->fd_files[fd].reserved == 0); 1742 fdp->fd_files[fd].fileflags = 0; 1743 fdp->fd_files[fd].reserved = 1; 1744 fdreserve_locked(fdp, fd, 1); 1745 1746 return (0); 1747 } 1748 1749 int 1750 fdalloc(struct proc *p, int want, int *result) 1751 { 1752 struct filedesc *fdp = p->p_fd; 1753 int error; 1754 1755 spin_lock(&fdp->fd_spin); 1756 error = fdalloc_locked(p, fdp, want, result); 1757 spin_unlock(&fdp->fd_spin); 1758 1759 return error; 1760 } 1761 1762 /* 1763 * Check to see whether n user file descriptors 1764 * are available to the process p. 1765 */ 1766 int 1767 fdavail(struct proc *p, int n) 1768 { 1769 struct plimit *limit = readplimits(p); 1770 struct filedesc *fdp = p->p_fd; 1771 struct fdnode *fdnode; 1772 int i, lim, last; 1773 1774 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 1775 lim = INT_MAX; 1776 else 1777 lim = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 1778 1779 if (lim > maxfilesperproc) 1780 lim = maxfilesperproc; 1781 if (lim < minfilesperproc) 1782 lim = minfilesperproc; 1783 1784 spin_lock(&fdp->fd_spin); 1785 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) { 1786 spin_unlock(&fdp->fd_spin); 1787 return (1); 1788 } 1789 last = min(fdp->fd_nfiles, lim); 1790 fdnode = &fdp->fd_files[fdp->fd_freefile]; 1791 for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) { 1792 if (fdnode->fp == NULL && --n <= 0) { 1793 spin_unlock(&fdp->fd_spin); 1794 return (1); 1795 } 1796 } 1797 spin_unlock(&fdp->fd_spin); 1798 return (0); 1799 } 1800 1801 /* 1802 * Revoke open descriptors referencing (f_data, f_type) 1803 * 1804 * Any revoke executed within a prison is only able to 1805 * revoke descriptors for processes within that prison. 1806 * 1807 * Returns 0 on success or an error code. 1808 */ 1809 struct fdrevoke_info { 1810 void *data; 1811 short type; 1812 short unused; 1813 int found; 1814 struct ucred *cred; 1815 struct file *nfp; 1816 }; 1817 1818 static int fdrevoke_check_callback(struct file *fp, void *vinfo); 1819 static int fdrevoke_proc_callback(struct proc *p, void *vinfo); 1820 1821 int 1822 fdrevoke(void *f_data, short f_type, struct ucred *cred) 1823 { 1824 struct fdrevoke_info info; 1825 int error; 1826 1827 bzero(&info, sizeof(info)); 1828 info.data = f_data; 1829 info.type = f_type; 1830 info.cred = cred; 1831 error = falloc(NULL, &info.nfp, NULL); 1832 if (error) 1833 return (error); 1834 1835 /* 1836 * Scan the file pointer table once. dups do not dup file pointers, 1837 * only descriptors, so there is no leak. Set FREVOKED on the fps 1838 * being revoked. 1839 * 1840 * Any fps sent over unix-domain sockets will be revoked by the 1841 * socket code checking for FREVOKED when the fps are externialized. 1842 * revoke_token is used to make sure that fps marked FREVOKED and 1843 * externalized will be picked up by the following allproc_scan(). 1844 */ 1845 lwkt_gettoken(&revoke_token); 1846 allfiles_scan_exclusive(fdrevoke_check_callback, &info); 1847 lwkt_reltoken(&revoke_token); 1848 1849 /* 1850 * If any fps were marked track down the related descriptors 1851 * and close them. Any dup()s at this point will notice 1852 * the FREVOKED already set in the fp and do the right thing. 1853 */ 1854 if (info.found) 1855 allproc_scan(fdrevoke_proc_callback, &info, 0); 1856 fdrop(info.nfp); 1857 return(0); 1858 } 1859 1860 /* 1861 * Locate matching file pointers directly. 1862 * 1863 * WARNING: allfiles_scan_exclusive() holds a spinlock through these calls! 1864 */ 1865 static int 1866 fdrevoke_check_callback(struct file *fp, void *vinfo) 1867 { 1868 struct fdrevoke_info *info = vinfo; 1869 1870 /* 1871 * File pointers already flagged for revokation are skipped. 1872 */ 1873 if (fp->f_flag & FREVOKED) 1874 return(0); 1875 1876 /* 1877 * If revoking from a prison file pointers created outside of 1878 * that prison, or file pointers without creds, cannot be revoked. 1879 */ 1880 if (info->cred->cr_prison && 1881 (fp->f_cred == NULL || 1882 info->cred->cr_prison != fp->f_cred->cr_prison)) { 1883 return(0); 1884 } 1885 1886 /* 1887 * If the file pointer matches then mark it for revocation. The 1888 * flag is currently only used by unp_revoke_gc(). 1889 * 1890 * info->found is a heuristic and can race in a SMP environment. 1891 */ 1892 if (info->data == fp->f_data && info->type == fp->f_type) { 1893 atomic_set_int(&fp->f_flag, FREVOKED); 1894 info->found = 1; 1895 } 1896 return(0); 1897 } 1898 1899 /* 1900 * Locate matching file pointers via process descriptor tables. 1901 */ 1902 static int 1903 fdrevoke_proc_callback(struct proc *p, void *vinfo) 1904 { 1905 struct fdrevoke_info *info = vinfo; 1906 struct filedesc *fdp; 1907 struct file *fp; 1908 int n; 1909 1910 if (p->p_stat == SIDL || p->p_stat == SZOMB) 1911 return(0); 1912 if (info->cred->cr_prison && 1913 info->cred->cr_prison != p->p_ucred->cr_prison) { 1914 return(0); 1915 } 1916 1917 /* 1918 * If the controlling terminal of the process matches the 1919 * vnode being revoked we clear the controlling terminal. 1920 * 1921 * The normal spec_close() may not catch this because it 1922 * uses curproc instead of p. 1923 */ 1924 if (p->p_session && info->type == DTYPE_VNODE && 1925 info->data == p->p_session->s_ttyvp) { 1926 p->p_session->s_ttyvp = NULL; 1927 vrele(info->data); 1928 } 1929 1930 /* 1931 * Softref the fdp to prevent it from being destroyed 1932 */ 1933 spin_lock(&p->p_spin); 1934 if ((fdp = p->p_fd) == NULL) { 1935 spin_unlock(&p->p_spin); 1936 return(0); 1937 } 1938 atomic_add_int(&fdp->fd_softrefs, 1); 1939 spin_unlock(&p->p_spin); 1940 1941 /* 1942 * Locate and close any matching file descriptors, replacing 1943 * them with info->nfp. 1944 */ 1945 spin_lock(&fdp->fd_spin); 1946 for (n = 0; n < fdp->fd_nfiles; ++n) { 1947 if ((fp = fdp->fd_files[n].fp) == NULL) 1948 continue; 1949 if (fp->f_flag & FREVOKED) { 1950 ++fdp->fd_closedcounter; 1951 fclearcache(&fdp->fd_files[n], NULL, 0); 1952 ++fdp->fd_closedcounter; 1953 fhold(info->nfp); 1954 fdp->fd_files[n].fp = info->nfp; 1955 spin_unlock(&fdp->fd_spin); 1956 knote_fdclose(fp, fdp, n); /* XXX */ 1957 closef(fp, p); 1958 spin_lock(&fdp->fd_spin); 1959 } 1960 } 1961 spin_unlock(&fdp->fd_spin); 1962 atomic_subtract_int(&fdp->fd_softrefs, 1); 1963 return(0); 1964 } 1965 1966 /* 1967 * falloc: 1968 * Create a new open file structure and reserve a file decriptor 1969 * for the process that refers to it. 1970 * 1971 * Root creds are checked using lp, or assumed if lp is NULL. If 1972 * resultfd is non-NULL then lp must also be non-NULL. No file 1973 * descriptor is reserved (and no process context is needed) if 1974 * resultfd is NULL. 1975 * 1976 * A file pointer with a refcount of 1 is returned. Note that the 1977 * file pointer is NOT associated with the descriptor. If falloc 1978 * returns success, fsetfd() MUST be called to either associate the 1979 * file pointer or clear the reservation. 1980 */ 1981 int 1982 falloc(struct lwp *lp, struct file **resultfp, int *resultfd) 1983 { 1984 static struct timeval lastfail; 1985 static int curfail; 1986 struct filelist_head *head; 1987 struct file *fp; 1988 struct ucred *cred = lp ? lp->lwp_thread->td_ucred : proc0.p_ucred; 1989 int error; 1990 1991 fp = NULL; 1992 1993 /* 1994 * Handle filetable full issues and root overfill. 1995 */ 1996 if (nfiles >= maxfiles - maxfilesrootres && 1997 (cred->cr_ruid != 0 || nfiles >= maxfiles)) { 1998 if (ppsratecheck(&lastfail, &curfail, 1)) { 1999 kprintf("kern.maxfiles limit exceeded by uid %d, " 2000 "please see tuning(7).\n", 2001 cred->cr_ruid); 2002 } 2003 error = ENFILE; 2004 goto done; 2005 } 2006 2007 /* 2008 * Allocate a new file descriptor. 2009 */ 2010 fp = objcache_get(file_objcache, M_WAITOK); 2011 bzero(fp, sizeof(*fp)); 2012 spin_init(&fp->f_spin, "falloc"); 2013 SLIST_INIT(&fp->f_klist); 2014 fp->f_count = 1; 2015 fp->f_ops = &badfileops; 2016 fp->f_seqcount = 1; 2017 fsetcred(fp, cred); 2018 atomic_add_int(&nfiles, 1); 2019 2020 head = fp2filelist(fp); 2021 spin_lock(&head->spin); 2022 LIST_INSERT_HEAD(&head->list, fp, f_list); 2023 spin_unlock(&head->spin); 2024 2025 if (resultfd) { 2026 if ((error = fdalloc(lp->lwp_proc, 0, resultfd)) != 0) { 2027 fdrop(fp); 2028 fp = NULL; 2029 } 2030 } else { 2031 error = 0; 2032 } 2033 done: 2034 *resultfp = fp; 2035 return (error); 2036 } 2037 2038 /* 2039 * Check for races against a file descriptor by determining that the 2040 * file pointer is still associated with the specified file descriptor, 2041 * and a close is not currently in progress. 2042 */ 2043 int 2044 checkfdclosed(thread_t td, struct filedesc *fdp, int fd, struct file *fp, 2045 int closedcounter) 2046 { 2047 struct fdcache *fdc; 2048 int error; 2049 2050 cpu_lfence(); 2051 if (fdp->fd_closedcounter == closedcounter) 2052 return 0; 2053 2054 if (td->td_proc && td->td_proc->p_fd == fdp) { 2055 for (fdc = &td->td_fdcache[0]; 2056 fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 2057 if (fdc->fd == fd && fdc->fp == fp) 2058 return 0; 2059 } 2060 } 2061 2062 spin_lock_shared(&fdp->fd_spin); 2063 if ((unsigned)fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp) 2064 error = EBADF; 2065 else 2066 error = 0; 2067 spin_unlock_shared(&fdp->fd_spin); 2068 return (error); 2069 } 2070 2071 /* 2072 * Associate a file pointer with a previously reserved file descriptor. 2073 * This function always succeeds. 2074 * 2075 * If fp is NULL, the file descriptor is returned to the pool. 2076 * 2077 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2078 */ 2079 static void 2080 fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd) 2081 { 2082 KKASSERT((unsigned)fd < fdp->fd_nfiles); 2083 KKASSERT(fdp->fd_files[fd].reserved != 0); 2084 if (fp) { 2085 fhold(fp); 2086 /* fclearcache(&fdp->fd_files[fd], NULL, 0); */ 2087 fdp->fd_files[fd].fp = fp; 2088 fdp->fd_files[fd].reserved = 0; 2089 } else { 2090 fdp->fd_files[fd].reserved = 0; 2091 fdreserve_locked(fdp, fd, -1); 2092 fdfixup_locked(fdp, fd); 2093 } 2094 } 2095 2096 /* 2097 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2098 */ 2099 void 2100 fsetfd(struct filedesc *fdp, struct file *fp, int fd) 2101 { 2102 spin_lock(&fdp->fd_spin); 2103 fsetfd_locked(fdp, fp, fd); 2104 spin_unlock(&fdp->fd_spin); 2105 } 2106 2107 /* 2108 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2109 */ 2110 static 2111 struct file * 2112 funsetfd_locked(struct filedesc *fdp, int fd) 2113 { 2114 struct file *fp; 2115 2116 if ((unsigned)fd >= fdp->fd_nfiles) 2117 return (NULL); 2118 if ((fp = fdp->fd_files[fd].fp) == NULL) 2119 return (NULL); 2120 ++fdp->fd_closedcounter; 2121 fclearcache(&fdp->fd_files[fd], NULL, 0); 2122 fdp->fd_files[fd].fp = NULL; 2123 fdp->fd_files[fd].fileflags = 0; 2124 ++fdp->fd_closedcounter; 2125 2126 fdreserve_locked(fdp, fd, -1); 2127 fdfixup_locked(fdp, fd); 2128 2129 return(fp); 2130 } 2131 2132 /* 2133 * WARNING: May not be called before initial fsetfd(). 2134 */ 2135 int 2136 fgetfdflags(struct filedesc *fdp, int fd, int *flagsp) 2137 { 2138 int error; 2139 2140 spin_lock_shared(&fdp->fd_spin); 2141 if (((u_int)fd) >= fdp->fd_nfiles) { 2142 error = EBADF; 2143 } else if (fdp->fd_files[fd].fp == NULL) { 2144 error = EBADF; 2145 } else { 2146 *flagsp = fdp->fd_files[fd].fileflags; 2147 error = 0; 2148 } 2149 spin_unlock_shared(&fdp->fd_spin); 2150 2151 return (error); 2152 } 2153 2154 /* 2155 * WARNING: May not be called before initial fsetfd(). 2156 */ 2157 int 2158 fsetfdflags(struct filedesc *fdp, int fd, int add_flags) 2159 { 2160 int error; 2161 2162 spin_lock(&fdp->fd_spin); 2163 if (((u_int)fd) >= fdp->fd_nfiles) { 2164 error = EBADF; 2165 } else if (fdp->fd_files[fd].fp == NULL) { 2166 error = EBADF; 2167 } else { 2168 fdp->fd_files[fd].fileflags |= add_flags; 2169 error = 0; 2170 } 2171 spin_unlock(&fdp->fd_spin); 2172 2173 return (error); 2174 } 2175 2176 /* 2177 * WARNING: May not be called before initial fsetfd(). 2178 */ 2179 int 2180 fclrfdflags(struct filedesc *fdp, int fd, int rem_flags) 2181 { 2182 int error; 2183 2184 spin_lock(&fdp->fd_spin); 2185 if (((u_int)fd) >= fdp->fd_nfiles) { 2186 error = EBADF; 2187 } else if (fdp->fd_files[fd].fp == NULL) { 2188 error = EBADF; 2189 } else { 2190 fdp->fd_files[fd].fileflags &= ~rem_flags; 2191 error = 0; 2192 } 2193 spin_unlock(&fdp->fd_spin); 2194 2195 return (error); 2196 } 2197 2198 /* 2199 * Set/Change/Clear the creds for a fp and synchronize the uidinfo. 2200 */ 2201 void 2202 fsetcred(struct file *fp, struct ucred *ncr) 2203 { 2204 struct ucred *ocr; 2205 struct uidinfo *uip; 2206 struct uidcount *pup; 2207 int cpu = mycpuid; 2208 int count; 2209 2210 ocr = fp->f_cred; 2211 if (ocr == NULL || ncr == NULL || ocr->cr_uidinfo != ncr->cr_uidinfo) { 2212 if (ocr) { 2213 uip = ocr->cr_uidinfo; 2214 pup = &uip->ui_pcpu[cpu]; 2215 atomic_add_int(&pup->pu_openfiles, -1); 2216 if (pup->pu_openfiles < -PUP_LIMIT || 2217 pup->pu_openfiles > PUP_LIMIT) { 2218 count = atomic_swap_int(&pup->pu_openfiles, 0); 2219 atomic_add_int(&uip->ui_openfiles, count); 2220 } 2221 } 2222 if (ncr) { 2223 uip = ncr->cr_uidinfo; 2224 pup = &uip->ui_pcpu[cpu]; 2225 atomic_add_int(&pup->pu_openfiles, 1); 2226 if (pup->pu_openfiles < -PUP_LIMIT || 2227 pup->pu_openfiles > PUP_LIMIT) { 2228 count = atomic_swap_int(&pup->pu_openfiles, 0); 2229 atomic_add_int(&uip->ui_openfiles, count); 2230 } 2231 } 2232 } 2233 if (ncr) 2234 crhold(ncr); 2235 fp->f_cred = ncr; 2236 if (ocr) 2237 crfree(ocr); 2238 } 2239 2240 /* 2241 * Free a file descriptor. 2242 */ 2243 static 2244 void 2245 ffree(struct file *fp) 2246 { 2247 KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!")); 2248 fsetcred(fp, NULL); 2249 if (fp->f_nchandle.ncp) 2250 cache_drop(&fp->f_nchandle); 2251 objcache_put(file_objcache, fp); 2252 } 2253 2254 /* 2255 * called from init_main, initialize filedesc0 for proc0. 2256 */ 2257 void 2258 fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask) 2259 { 2260 p0->p_fd = fdp0; 2261 p0->p_fdtol = NULL; 2262 fdp0->fd_refcnt = 1; 2263 fdp0->fd_cmask = cmask; 2264 fdp0->fd_files = fdp0->fd_builtin_files; 2265 fdp0->fd_nfiles = NDFILE; 2266 fdp0->fd_lastfile = -1; 2267 spin_init(&fdp0->fd_spin, "fdinitbootstrap"); 2268 } 2269 2270 /* 2271 * Build a new filedesc structure. 2272 */ 2273 struct filedesc * 2274 fdinit(struct proc *p) 2275 { 2276 struct filedesc *newfdp; 2277 struct filedesc *fdp = p->p_fd; 2278 2279 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO); 2280 spin_lock(&fdp->fd_spin); 2281 if (fdp->fd_cdir) { 2282 newfdp->fd_cdir = fdp->fd_cdir; 2283 vref(newfdp->fd_cdir); 2284 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir); 2285 } 2286 2287 /* 2288 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of 2289 * proc0, but should unconditionally exist in other processes. 2290 */ 2291 if (fdp->fd_rdir) { 2292 newfdp->fd_rdir = fdp->fd_rdir; 2293 vref(newfdp->fd_rdir); 2294 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir); 2295 } 2296 if (fdp->fd_jdir) { 2297 newfdp->fd_jdir = fdp->fd_jdir; 2298 vref(newfdp->fd_jdir); 2299 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir); 2300 } 2301 spin_unlock(&fdp->fd_spin); 2302 2303 /* Create the file descriptor table. */ 2304 newfdp->fd_refcnt = 1; 2305 newfdp->fd_cmask = cmask; 2306 newfdp->fd_files = newfdp->fd_builtin_files; 2307 newfdp->fd_nfiles = NDFILE; 2308 newfdp->fd_lastfile = -1; 2309 spin_init(&newfdp->fd_spin, "fdinit"); 2310 2311 return (newfdp); 2312 } 2313 2314 /* 2315 * Share a filedesc structure. 2316 */ 2317 struct filedesc * 2318 fdshare(struct proc *p) 2319 { 2320 struct filedesc *fdp; 2321 2322 fdp = p->p_fd; 2323 spin_lock(&fdp->fd_spin); 2324 fdp->fd_refcnt++; 2325 spin_unlock(&fdp->fd_spin); 2326 return (fdp); 2327 } 2328 2329 /* 2330 * Copy a filedesc structure. 2331 */ 2332 int 2333 fdcopy(struct proc *p, struct filedesc **fpp) 2334 { 2335 struct filedesc *fdp = p->p_fd; 2336 struct filedesc *newfdp; 2337 struct fdnode *fdnode; 2338 int i; 2339 int ni; 2340 2341 /* 2342 * Certain daemons might not have file descriptors. 2343 */ 2344 if (fdp == NULL) 2345 return (0); 2346 2347 /* 2348 * Allocate the new filedesc and fd_files[] array. This can race 2349 * with operations by other threads on the fdp so we have to be 2350 * careful. 2351 */ 2352 newfdp = kmalloc(sizeof(struct filedesc), 2353 M_FILEDESC, M_WAITOK | M_ZERO | M_NULLOK); 2354 if (newfdp == NULL) { 2355 *fpp = NULL; 2356 return (-1); 2357 } 2358 again: 2359 spin_lock(&fdp->fd_spin); 2360 if (fdp->fd_lastfile < NDFILE) { 2361 newfdp->fd_files = newfdp->fd_builtin_files; 2362 i = NDFILE; 2363 } else { 2364 /* 2365 * We have to allocate (N^2-1) entries for our in-place 2366 * binary tree. Allow the table to shrink. 2367 */ 2368 i = fdp->fd_nfiles; 2369 ni = (i - 1) / 2; 2370 while (ni > fdp->fd_lastfile && ni > NDFILE) { 2371 i = ni; 2372 ni = (i - 1) / 2; 2373 } 2374 spin_unlock(&fdp->fd_spin); 2375 newfdp->fd_files = kmalloc(i * sizeof(struct fdnode), 2376 M_FILEDESC, M_WAITOK | M_ZERO); 2377 2378 /* 2379 * Check for race, retry 2380 */ 2381 spin_lock(&fdp->fd_spin); 2382 if (i <= fdp->fd_lastfile) { 2383 spin_unlock(&fdp->fd_spin); 2384 kfree(newfdp->fd_files, M_FILEDESC); 2385 goto again; 2386 } 2387 } 2388 2389 /* 2390 * Dup the remaining fields. vref() and cache_hold() can be 2391 * safely called while holding the read spinlock on fdp. 2392 * 2393 * The read spinlock on fdp is still being held. 2394 * 2395 * NOTE: vref and cache_hold calls for the case where the vnode 2396 * or cache entry already has at least one ref may be called 2397 * while holding spin locks. 2398 */ 2399 if ((newfdp->fd_cdir = fdp->fd_cdir) != NULL) { 2400 vref(newfdp->fd_cdir); 2401 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir); 2402 } 2403 /* 2404 * We must check for fd_rdir here, at least for now because 2405 * the init process is created before we have access to the 2406 * rootvode to take a reference to it. 2407 */ 2408 if ((newfdp->fd_rdir = fdp->fd_rdir) != NULL) { 2409 vref(newfdp->fd_rdir); 2410 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir); 2411 } 2412 if ((newfdp->fd_jdir = fdp->fd_jdir) != NULL) { 2413 vref(newfdp->fd_jdir); 2414 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir); 2415 } 2416 newfdp->fd_refcnt = 1; 2417 newfdp->fd_nfiles = i; 2418 newfdp->fd_lastfile = fdp->fd_lastfile; 2419 newfdp->fd_freefile = fdp->fd_freefile; 2420 newfdp->fd_cmask = fdp->fd_cmask; 2421 spin_init(&newfdp->fd_spin, "fdcopy"); 2422 2423 /* 2424 * Copy the descriptor table through (i). This also copies the 2425 * allocation state. Then go through and ref the file pointers 2426 * and clean up any KQ descriptors. 2427 * 2428 * kq descriptors cannot be copied. Since we haven't ref'd the 2429 * copied files yet we can ignore the return value from funsetfd(). 2430 * 2431 * The read spinlock on fdp is still being held. 2432 * 2433 * Be sure to clean out fdnode->tdcache, otherwise bad things will 2434 * happen. 2435 */ 2436 bcopy(fdp->fd_files, newfdp->fd_files, i * sizeof(struct fdnode)); 2437 for (i = 0 ; i < newfdp->fd_nfiles; ++i) { 2438 fdnode = &newfdp->fd_files[i]; 2439 if (fdnode->reserved) { 2440 fdreserve_locked(newfdp, i, -1); 2441 fdnode->reserved = 0; 2442 fdfixup_locked(newfdp, i); 2443 } else if (fdnode->fp) { 2444 bzero(&fdnode->tdcache, sizeof(fdnode->tdcache)); 2445 if (fdnode->fp->f_type == DTYPE_KQUEUE) { 2446 (void)funsetfd_locked(newfdp, i); 2447 } else { 2448 fhold(fdnode->fp); 2449 } 2450 } 2451 } 2452 spin_unlock(&fdp->fd_spin); 2453 *fpp = newfdp; 2454 return (0); 2455 } 2456 2457 /* 2458 * Release a filedesc structure. 2459 * 2460 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE) 2461 */ 2462 void 2463 fdfree(struct proc *p, struct filedesc *repl) 2464 { 2465 struct filedesc *fdp; 2466 struct fdnode *fdnode; 2467 int i; 2468 struct filedesc_to_leader *fdtol; 2469 struct file *fp; 2470 struct vnode *vp; 2471 struct flock lf; 2472 2473 /* 2474 * Before destroying or replacing p->p_fd we must be sure to 2475 * clean out the cache of the last thread, which should be 2476 * curthread. 2477 */ 2478 fexitcache(curthread); 2479 2480 /* 2481 * Certain daemons might not have file descriptors. 2482 */ 2483 fdp = p->p_fd; 2484 if (fdp == NULL) { 2485 p->p_fd = repl; 2486 return; 2487 } 2488 2489 /* 2490 * Severe messing around to follow. 2491 */ 2492 spin_lock(&fdp->fd_spin); 2493 2494 /* Check for special need to clear POSIX style locks */ 2495 fdtol = p->p_fdtol; 2496 if (fdtol != NULL) { 2497 KASSERT(fdtol->fdl_refcount > 0, 2498 ("filedesc_to_refcount botch: fdl_refcount=%d", 2499 fdtol->fdl_refcount)); 2500 if (fdtol->fdl_refcount == 1 && p->p_leader->p_advlock_flag) { 2501 for (i = 0; i <= fdp->fd_lastfile; ++i) { 2502 fdnode = &fdp->fd_files[i]; 2503 if (fdnode->fp == NULL || 2504 fdnode->fp->f_type != DTYPE_VNODE) { 2505 continue; 2506 } 2507 fp = fdnode->fp; 2508 fhold(fp); 2509 spin_unlock(&fdp->fd_spin); 2510 2511 lf.l_whence = SEEK_SET; 2512 lf.l_start = 0; 2513 lf.l_len = 0; 2514 lf.l_type = F_UNLCK; 2515 vp = (struct vnode *)fp->f_data; 2516 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 2517 F_UNLCK, &lf, F_POSIX); 2518 fdrop(fp); 2519 spin_lock(&fdp->fd_spin); 2520 } 2521 } 2522 retry: 2523 if (fdtol->fdl_refcount == 1) { 2524 if (fdp->fd_holdleaderscount > 0 && 2525 p->p_leader->p_advlock_flag) { 2526 /* 2527 * close() or do_dup() has cleared a reference 2528 * in a shared file descriptor table. 2529 */ 2530 fdp->fd_holdleaderswakeup = 1; 2531 ssleep(&fdp->fd_holdleaderscount, 2532 &fdp->fd_spin, 0, "fdlhold", 0); 2533 goto retry; 2534 } 2535 if (fdtol->fdl_holdcount > 0) { 2536 /* 2537 * Ensure that fdtol->fdl_leader 2538 * remains valid in closef(). 2539 */ 2540 fdtol->fdl_wakeup = 1; 2541 ssleep(fdtol, &fdp->fd_spin, 0, "fdlhold", 0); 2542 goto retry; 2543 } 2544 } 2545 fdtol->fdl_refcount--; 2546 if (fdtol->fdl_refcount == 0 && 2547 fdtol->fdl_holdcount == 0) { 2548 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev; 2549 fdtol->fdl_prev->fdl_next = fdtol->fdl_next; 2550 } else { 2551 fdtol = NULL; 2552 } 2553 p->p_fdtol = NULL; 2554 if (fdtol != NULL) { 2555 spin_unlock(&fdp->fd_spin); 2556 kfree(fdtol, M_FILEDESC_TO_LEADER); 2557 spin_lock(&fdp->fd_spin); 2558 } 2559 } 2560 if (--fdp->fd_refcnt > 0) { 2561 spin_unlock(&fdp->fd_spin); 2562 spin_lock(&p->p_spin); 2563 p->p_fd = repl; 2564 spin_unlock(&p->p_spin); 2565 return; 2566 } 2567 2568 /* 2569 * Even though we are the last reference to the structure allproc 2570 * scans may still reference the structure. Maintain proper 2571 * locks until we can replace p->p_fd. 2572 * 2573 * Also note that kqueue's closef still needs to reference the 2574 * fdp via p->p_fd, so we have to close the descriptors before 2575 * we replace p->p_fd. 2576 */ 2577 for (i = 0; i <= fdp->fd_lastfile; ++i) { 2578 if (fdp->fd_files[i].fp) { 2579 fp = funsetfd_locked(fdp, i); 2580 if (fp) { 2581 spin_unlock(&fdp->fd_spin); 2582 if (SLIST_FIRST(&fp->f_klist)) 2583 knote_fdclose(fp, fdp, i); 2584 closef(fp, p); 2585 spin_lock(&fdp->fd_spin); 2586 } 2587 } 2588 } 2589 spin_unlock(&fdp->fd_spin); 2590 2591 /* 2592 * Interlock against an allproc scan operations (typically frevoke). 2593 */ 2594 spin_lock(&p->p_spin); 2595 p->p_fd = repl; 2596 spin_unlock(&p->p_spin); 2597 2598 /* 2599 * Wait for any softrefs to go away. This race rarely occurs so 2600 * we can use a non-critical-path style poll/sleep loop. The 2601 * race only occurs against allproc scans. 2602 * 2603 * No new softrefs can occur with the fdp disconnected from the 2604 * process. 2605 */ 2606 if (fdp->fd_softrefs) { 2607 kprintf("pid %d: Warning, fdp race avoided\n", p->p_pid); 2608 while (fdp->fd_softrefs) 2609 tsleep(&fdp->fd_softrefs, 0, "fdsoft", 1); 2610 } 2611 2612 if (fdp->fd_files != fdp->fd_builtin_files) 2613 kfree(fdp->fd_files, M_FILEDESC); 2614 if (fdp->fd_cdir) { 2615 cache_drop(&fdp->fd_ncdir); 2616 vrele(fdp->fd_cdir); 2617 } 2618 if (fdp->fd_rdir) { 2619 cache_drop(&fdp->fd_nrdir); 2620 vrele(fdp->fd_rdir); 2621 } 2622 if (fdp->fd_jdir) { 2623 cache_drop(&fdp->fd_njdir); 2624 vrele(fdp->fd_jdir); 2625 } 2626 kfree(fdp, M_FILEDESC); 2627 } 2628 2629 /* 2630 * Retrieve and reference the file pointer associated with a descriptor. 2631 * 2632 * td must be the current thread. 2633 */ 2634 struct file * 2635 holdfp(thread_t td, int fd, int flag) 2636 { 2637 struct file *fp; 2638 2639 fp = _holdfp_cache(td, fd); 2640 if (fp) { 2641 if ((fp->f_flag & flag) == 0 && flag != -1) { 2642 fdrop(fp); 2643 fp = NULL; 2644 } 2645 } 2646 return fp; 2647 } 2648 2649 /* 2650 * holdsock() - load the struct file pointer associated 2651 * with a socket into *fpp. If an error occurs, non-zero 2652 * will be returned and *fpp will be set to NULL. 2653 * 2654 * td must be the current thread. 2655 */ 2656 int 2657 holdsock(thread_t td, int fd, struct file **fpp) 2658 { 2659 struct file *fp; 2660 int error; 2661 2662 /* 2663 * Lockless shortcut 2664 */ 2665 fp = _holdfp_cache(td, fd); 2666 if (fp) { 2667 if (fp->f_type != DTYPE_SOCKET) { 2668 fdrop(fp); 2669 fp = NULL; 2670 error = ENOTSOCK; 2671 } else { 2672 error = 0; 2673 } 2674 } else { 2675 error = EBADF; 2676 } 2677 *fpp = fp; 2678 2679 return (error); 2680 } 2681 2682 /* 2683 * Convert a user file descriptor to a held file pointer. 2684 * 2685 * td must be the current thread. 2686 */ 2687 int 2688 holdvnode(thread_t td, int fd, struct file **fpp) 2689 { 2690 struct file *fp; 2691 int error; 2692 2693 fp = _holdfp_cache(td, fd); 2694 if (fp) { 2695 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) { 2696 fdrop(fp); 2697 fp = NULL; 2698 error = EINVAL; 2699 } else { 2700 error = 0; 2701 } 2702 } else { 2703 error = EBADF; 2704 } 2705 *fpp = fp; 2706 2707 return (error); 2708 } 2709 2710 /* 2711 * For setugid programs, we don't want to people to use that setugidness 2712 * to generate error messages which write to a file which otherwise would 2713 * otherwise be off-limits to the process. 2714 * 2715 * This is a gross hack to plug the hole. A better solution would involve 2716 * a special vop or other form of generalized access control mechanism. We 2717 * go ahead and just reject all procfs file systems accesses as dangerous. 2718 * 2719 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is 2720 * sufficient. We also don't for check setugidness since we know we are. 2721 */ 2722 static int 2723 is_unsafe(struct file *fp) 2724 { 2725 if (fp->f_type == DTYPE_VNODE && 2726 ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS) 2727 return (1); 2728 return (0); 2729 } 2730 2731 /* 2732 * Make this setguid thing safe, if at all possible. 2733 * 2734 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose() 2735 */ 2736 void 2737 setugidsafety(struct proc *p) 2738 { 2739 struct filedesc *fdp = p->p_fd; 2740 int i; 2741 2742 /* Certain daemons might not have file descriptors. */ 2743 if (fdp == NULL) 2744 return; 2745 2746 /* 2747 * note: fdp->fd_files may be reallocated out from under us while 2748 * we are blocked in a close. Be careful! 2749 */ 2750 for (i = 0; i <= fdp->fd_lastfile; i++) { 2751 if (i > 2) 2752 break; 2753 if (fdp->fd_files[i].fp && is_unsafe(fdp->fd_files[i].fp)) { 2754 struct file *fp; 2755 2756 /* 2757 * NULL-out descriptor prior to close to avoid 2758 * a race while close blocks. 2759 */ 2760 if ((fp = funsetfd_locked(fdp, i)) != NULL) { 2761 knote_fdclose(fp, fdp, i); 2762 closef(fp, p); 2763 } 2764 } 2765 } 2766 } 2767 2768 /* 2769 * Close all CLOEXEC files on exec. 2770 * 2771 * Only a single thread remains for the current process. 2772 * 2773 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose() 2774 */ 2775 void 2776 fdcloseexec(struct proc *p) 2777 { 2778 struct filedesc *fdp = p->p_fd; 2779 int i; 2780 2781 /* Certain daemons might not have file descriptors. */ 2782 if (fdp == NULL) 2783 return; 2784 2785 /* 2786 * We cannot cache fd_files since operations may block and rip 2787 * them out from under us. 2788 */ 2789 for (i = 0; i <= fdp->fd_lastfile; i++) { 2790 if (fdp->fd_files[i].fp != NULL && 2791 (fdp->fd_files[i].fileflags & UF_EXCLOSE)) { 2792 struct file *fp; 2793 2794 /* 2795 * NULL-out descriptor prior to close to avoid 2796 * a race while close blocks. 2797 * 2798 * (funsetfd*() also clears the fd cache) 2799 */ 2800 if ((fp = funsetfd_locked(fdp, i)) != NULL) { 2801 knote_fdclose(fp, fdp, i); 2802 closef(fp, p); 2803 } 2804 } 2805 } 2806 } 2807 2808 /* 2809 * It is unsafe for set[ug]id processes to be started with file 2810 * descriptors 0..2 closed, as these descriptors are given implicit 2811 * significance in the Standard C library. fdcheckstd() will create a 2812 * descriptor referencing /dev/null for each of stdin, stdout, and 2813 * stderr that is not already open. 2814 * 2815 * NOT MPSAFE - calls falloc, vn_open, etc 2816 */ 2817 int 2818 fdcheckstd(struct lwp *lp) 2819 { 2820 struct nlookupdata nd; 2821 struct filedesc *fdp; 2822 struct file *fp; 2823 int retval; 2824 int i, error, flags, devnull; 2825 2826 fdp = lp->lwp_proc->p_fd; 2827 if (fdp == NULL) 2828 return (0); 2829 devnull = -1; 2830 error = 0; 2831 for (i = 0; i < 3; i++) { 2832 if (fdp->fd_files[i].fp != NULL) 2833 continue; 2834 if (devnull < 0) { 2835 if ((error = falloc(lp, &fp, &devnull)) != 0) 2836 break; 2837 2838 error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE, 2839 NLC_FOLLOW|NLC_LOCKVP); 2840 flags = FREAD | FWRITE; 2841 if (error == 0) 2842 error = vn_open(&nd, fp, flags, 0); 2843 if (error == 0) 2844 fsetfd(fdp, fp, devnull); 2845 else 2846 fsetfd(fdp, NULL, devnull); 2847 fdrop(fp); 2848 nlookup_done(&nd); 2849 if (error) 2850 break; 2851 KKASSERT(i == devnull); 2852 } else { 2853 error = kern_dup(DUP_FIXED, devnull, i, &retval); 2854 if (error != 0) 2855 break; 2856 } 2857 } 2858 return (error); 2859 } 2860 2861 /* 2862 * Internal form of close. 2863 * Decrement reference count on file structure. 2864 * Note: td and/or p may be NULL when closing a file 2865 * that was being passed in a message. 2866 * 2867 * MPALMOSTSAFE - acquires mplock for VOP operations 2868 */ 2869 int 2870 closef(struct file *fp, struct proc *p) 2871 { 2872 struct vnode *vp; 2873 struct flock lf; 2874 struct filedesc_to_leader *fdtol; 2875 2876 if (fp == NULL) 2877 return (0); 2878 2879 /* 2880 * POSIX record locking dictates that any close releases ALL 2881 * locks owned by this process. This is handled by setting 2882 * a flag in the unlock to free ONLY locks obeying POSIX 2883 * semantics, and not to free BSD-style file locks. 2884 * If the descriptor was in a message, POSIX-style locks 2885 * aren't passed with the descriptor. 2886 */ 2887 if (p != NULL && fp->f_type == DTYPE_VNODE && 2888 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) 2889 ) { 2890 if (p->p_leader->p_advlock_flag) { 2891 lf.l_whence = SEEK_SET; 2892 lf.l_start = 0; 2893 lf.l_len = 0; 2894 lf.l_type = F_UNLCK; 2895 vp = (struct vnode *)fp->f_data; 2896 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 2897 &lf, F_POSIX); 2898 } 2899 fdtol = p->p_fdtol; 2900 if (fdtol != NULL) { 2901 lwkt_gettoken(&p->p_token); 2902 2903 /* 2904 * Handle special case where file descriptor table 2905 * is shared between multiple process leaders. 2906 */ 2907 for (fdtol = fdtol->fdl_next; 2908 fdtol != p->p_fdtol; 2909 fdtol = fdtol->fdl_next) { 2910 if (fdtol->fdl_leader->p_advlock_flag == 0) 2911 continue; 2912 fdtol->fdl_holdcount++; 2913 lf.l_whence = SEEK_SET; 2914 lf.l_start = 0; 2915 lf.l_len = 0; 2916 lf.l_type = F_UNLCK; 2917 vp = (struct vnode *)fp->f_data; 2918 VOP_ADVLOCK(vp, (caddr_t)fdtol->fdl_leader, 2919 F_UNLCK, &lf, F_POSIX); 2920 fdtol->fdl_holdcount--; 2921 if (fdtol->fdl_holdcount == 0 && 2922 fdtol->fdl_wakeup != 0) { 2923 fdtol->fdl_wakeup = 0; 2924 wakeup(fdtol); 2925 } 2926 } 2927 lwkt_reltoken(&p->p_token); 2928 } 2929 } 2930 return (fdrop(fp)); 2931 } 2932 2933 /* 2934 * fhold() can only be called if f_count is already at least 1 (i.e. the 2935 * caller of fhold() already has a reference to the file pointer in some 2936 * manner or other). 2937 * 2938 * Atomic ops are used for incrementing and decrementing f_count before 2939 * the 1->0 transition. f_count 1->0 transition is special, see the 2940 * comment in fdrop(). 2941 */ 2942 void 2943 fhold(struct file *fp) 2944 { 2945 /* 0->1 transition will never work */ 2946 KASSERT(fp->f_count > 0, ("fhold: invalid f_count %d", fp->f_count)); 2947 atomic_add_int(&fp->f_count, 1); 2948 } 2949 2950 /* 2951 * fdrop() - drop a reference to a descriptor 2952 */ 2953 int 2954 fdrop(struct file *fp) 2955 { 2956 struct flock lf; 2957 struct vnode *vp; 2958 int error, do_free = 0; 2959 2960 /* 2961 * NOTE: 2962 * Simple atomic_fetchadd_int(f_count, -1) here will cause use- 2963 * after-free or double free (due to f_count 0->1 transition), if 2964 * fhold() is called on the fps found through filehead iteration. 2965 */ 2966 for (;;) { 2967 int count = fp->f_count; 2968 2969 cpu_ccfence(); 2970 KASSERT(count > 0, ("fdrop: invalid f_count %d", count)); 2971 if (count == 1) { 2972 struct filelist_head *head = fp2filelist(fp); 2973 2974 /* 2975 * About to drop the last reference, hold the 2976 * filehead spin lock and drop it, so that no 2977 * one could see this fp through filehead anymore, 2978 * let alone fhold() this fp. 2979 */ 2980 spin_lock(&head->spin); 2981 if (atomic_cmpset_int(&fp->f_count, count, 0)) { 2982 LIST_REMOVE(fp, f_list); 2983 spin_unlock(&head->spin); 2984 atomic_subtract_int(&nfiles, 1); 2985 do_free = 1; /* free this fp */ 2986 break; 2987 } 2988 spin_unlock(&head->spin); 2989 /* retry */ 2990 } else if (atomic_cmpset_int(&fp->f_count, count, count - 1)) { 2991 break; 2992 } 2993 /* retry */ 2994 } 2995 if (!do_free) 2996 return (0); 2997 2998 KKASSERT(SLIST_FIRST(&fp->f_klist) == NULL); 2999 3000 /* 3001 * The last reference has gone away, we own the fp structure free 3002 * and clear. 3003 */ 3004 if (fp->f_count < 0) 3005 panic("fdrop: count < 0"); 3006 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE && 3007 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) 3008 ) { 3009 lf.l_whence = SEEK_SET; 3010 lf.l_start = 0; 3011 lf.l_len = 0; 3012 lf.l_type = F_UNLCK; 3013 vp = (struct vnode *)fp->f_data; 3014 VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0); 3015 } 3016 if (fp->f_ops != &badfileops) 3017 error = fo_close(fp); 3018 else 3019 error = 0; 3020 ffree(fp); 3021 return (error); 3022 } 3023 3024 /* 3025 * Apply an advisory lock on a file descriptor. 3026 * 3027 * Just attempt to get a record lock of the requested type on 3028 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0). 3029 * 3030 * MPALMOSTSAFE 3031 */ 3032 int 3033 sys_flock(struct flock_args *uap) 3034 { 3035 thread_t td = curthread; 3036 struct file *fp; 3037 struct vnode *vp; 3038 struct flock lf; 3039 int error; 3040 3041 if ((fp = holdfp(td, uap->fd, -1)) == NULL) 3042 return (EBADF); 3043 if (fp->f_type != DTYPE_VNODE) { 3044 error = EOPNOTSUPP; 3045 goto done; 3046 } 3047 vp = (struct vnode *)fp->f_data; 3048 lf.l_whence = SEEK_SET; 3049 lf.l_start = 0; 3050 lf.l_len = 0; 3051 if (uap->how & LOCK_UN) { 3052 lf.l_type = F_UNLCK; 3053 atomic_clear_int(&fp->f_flag, FHASLOCK); /* race ok */ 3054 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0); 3055 goto done; 3056 } 3057 if (uap->how & LOCK_EX) 3058 lf.l_type = F_WRLCK; 3059 else if (uap->how & LOCK_SH) 3060 lf.l_type = F_RDLCK; 3061 else { 3062 error = EBADF; 3063 goto done; 3064 } 3065 if (uap->how & LOCK_NB) 3066 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 0); 3067 else 3068 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_WAIT); 3069 atomic_set_int(&fp->f_flag, FHASLOCK); /* race ok */ 3070 done: 3071 fdrop(fp); 3072 return (error); 3073 } 3074 3075 /* 3076 * File Descriptor pseudo-device driver (/dev/fd/). 3077 * 3078 * Opening minor device N dup()s the file (if any) connected to file 3079 * descriptor N belonging to the calling process. Note that this driver 3080 * consists of only the ``open()'' routine, because all subsequent 3081 * references to this file will be direct to the other driver. 3082 */ 3083 static int 3084 fdopen(struct dev_open_args *ap) 3085 { 3086 thread_t td = curthread; 3087 3088 KKASSERT(td->td_lwp != NULL); 3089 3090 /* 3091 * XXX Kludge: set curlwp->lwp_dupfd to contain the value of the 3092 * the file descriptor being sought for duplication. The error 3093 * return ensures that the vnode for this device will be released 3094 * by vn_open. Open will detect this special error and take the 3095 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN 3096 * will simply report the error. 3097 */ 3098 td->td_lwp->lwp_dupfd = minor(ap->a_head.a_dev); 3099 return (ENODEV); 3100 } 3101 3102 /* 3103 * The caller has reserved the file descriptor dfd for us. On success we 3104 * must fsetfd() it. On failure the caller will clean it up. 3105 */ 3106 int 3107 dupfdopen(thread_t td, int dfd, int sfd, int mode, int error) 3108 { 3109 struct filedesc *fdp; 3110 struct file *wfp; 3111 struct file *xfp; 3112 int werror; 3113 3114 if ((wfp = holdfp(td, sfd, -1)) == NULL) 3115 return (EBADF); 3116 3117 /* 3118 * Close a revoke/dup race. Duping a descriptor marked as revoked 3119 * will dup a dummy descriptor instead of the real one. 3120 */ 3121 if (wfp->f_flag & FREVOKED) { 3122 kprintf("Warning: attempt to dup() a revoked descriptor\n"); 3123 fdrop(wfp); 3124 wfp = NULL; 3125 werror = falloc(NULL, &wfp, NULL); 3126 if (werror) 3127 return (werror); 3128 } 3129 3130 fdp = td->td_proc->p_fd; 3131 3132 /* 3133 * There are two cases of interest here. 3134 * 3135 * For ENODEV simply dup sfd to file descriptor dfd and return. 3136 * 3137 * For ENXIO steal away the file structure from sfd and store it 3138 * dfd. sfd is effectively closed by this operation. 3139 * 3140 * Any other error code is just returned. 3141 */ 3142 switch (error) { 3143 case ENODEV: 3144 /* 3145 * Check that the mode the file is being opened for is a 3146 * subset of the mode of the existing descriptor. 3147 */ 3148 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) { 3149 error = EACCES; 3150 break; 3151 } 3152 spin_lock(&fdp->fd_spin); 3153 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags; 3154 fsetfd_locked(fdp, wfp, dfd); 3155 spin_unlock(&fdp->fd_spin); 3156 error = 0; 3157 break; 3158 case ENXIO: 3159 /* 3160 * Steal away the file pointer from dfd, and stuff it into indx. 3161 */ 3162 spin_lock(&fdp->fd_spin); 3163 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags; 3164 fsetfd(fdp, wfp, dfd); 3165 if ((xfp = funsetfd_locked(fdp, sfd)) != NULL) { 3166 spin_unlock(&fdp->fd_spin); 3167 fdrop(xfp); 3168 } else { 3169 spin_unlock(&fdp->fd_spin); 3170 } 3171 error = 0; 3172 break; 3173 default: 3174 break; 3175 } 3176 fdrop(wfp); 3177 return (error); 3178 } 3179 3180 /* 3181 * NOT MPSAFE - I think these refer to a common file descriptor table 3182 * and we need to spinlock that to link fdtol in. 3183 */ 3184 struct filedesc_to_leader * 3185 filedesc_to_leader_alloc(struct filedesc_to_leader *old, 3186 struct proc *leader) 3187 { 3188 struct filedesc_to_leader *fdtol; 3189 3190 fdtol = kmalloc(sizeof(struct filedesc_to_leader), 3191 M_FILEDESC_TO_LEADER, M_WAITOK | M_ZERO); 3192 fdtol->fdl_refcount = 1; 3193 fdtol->fdl_holdcount = 0; 3194 fdtol->fdl_wakeup = 0; 3195 fdtol->fdl_leader = leader; 3196 if (old != NULL) { 3197 fdtol->fdl_next = old->fdl_next; 3198 fdtol->fdl_prev = old; 3199 old->fdl_next = fdtol; 3200 fdtol->fdl_next->fdl_prev = fdtol; 3201 } else { 3202 fdtol->fdl_next = fdtol; 3203 fdtol->fdl_prev = fdtol; 3204 } 3205 return fdtol; 3206 } 3207 3208 /* 3209 * Scan all file pointers in the system. The callback is made with 3210 * the master list spinlock held exclusively. 3211 */ 3212 void 3213 allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data) 3214 { 3215 int i; 3216 3217 for (i = 0; i < NFILELIST_HEADS; ++i) { 3218 struct filelist_head *head = &filelist_heads[i]; 3219 struct file *fp; 3220 3221 spin_lock(&head->spin); 3222 LIST_FOREACH(fp, &head->list, f_list) { 3223 int res; 3224 3225 res = callback(fp, data); 3226 if (res < 0) 3227 break; 3228 } 3229 spin_unlock(&head->spin); 3230 } 3231 } 3232 3233 /* 3234 * Get file structures. 3235 * 3236 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe) 3237 */ 3238 3239 struct sysctl_kern_file_info { 3240 int count; 3241 int error; 3242 struct sysctl_req *req; 3243 }; 3244 3245 static int sysctl_kern_file_callback(struct proc *p, void *data); 3246 3247 static int 3248 sysctl_kern_file(SYSCTL_HANDLER_ARGS) 3249 { 3250 struct sysctl_kern_file_info info; 3251 3252 /* 3253 * Note: because the number of file descriptors is calculated 3254 * in different ways for sizing vs returning the data, 3255 * there is information leakage from the first loop. However, 3256 * it is of a similar order of magnitude to the leakage from 3257 * global system statistics such as kern.openfiles. 3258 * 3259 * When just doing a count, note that we cannot just count 3260 * the elements and add f_count via the filehead list because 3261 * threaded processes share their descriptor table and f_count might 3262 * still be '1' in that case. 3263 * 3264 * Since the SYSCTL op can block, we must hold the process to 3265 * prevent it being ripped out from under us either in the 3266 * file descriptor loop or in the greater LIST_FOREACH. The 3267 * process may be in varying states of disrepair. If the process 3268 * is in SZOMB we may have caught it just as it is being removed 3269 * from the allproc list, we must skip it in that case to maintain 3270 * an unbroken chain through the allproc list. 3271 */ 3272 info.count = 0; 3273 info.error = 0; 3274 info.req = req; 3275 allproc_scan(sysctl_kern_file_callback, &info, 0); 3276 3277 /* 3278 * When just calculating the size, overestimate a bit to try to 3279 * prevent system activity from causing the buffer-fill call 3280 * to fail later on. 3281 */ 3282 if (req->oldptr == NULL) { 3283 info.count = (info.count + 16) + (info.count / 10); 3284 info.error = SYSCTL_OUT(req, NULL, 3285 info.count * sizeof(struct kinfo_file)); 3286 } 3287 return (info.error); 3288 } 3289 3290 static int 3291 sysctl_kern_file_callback(struct proc *p, void *data) 3292 { 3293 struct sysctl_kern_file_info *info = data; 3294 struct kinfo_file kf; 3295 struct filedesc *fdp; 3296 struct file *fp; 3297 uid_t uid; 3298 int n; 3299 3300 if (p->p_stat == SIDL || p->p_stat == SZOMB) 3301 return(0); 3302 if (!(PRISON_CHECK(info->req->td->td_ucred, p->p_ucred) != 0)) 3303 return(0); 3304 3305 /* 3306 * Softref the fdp to prevent it from being destroyed 3307 */ 3308 spin_lock(&p->p_spin); 3309 if ((fdp = p->p_fd) == NULL) { 3310 spin_unlock(&p->p_spin); 3311 return(0); 3312 } 3313 atomic_add_int(&fdp->fd_softrefs, 1); 3314 spin_unlock(&p->p_spin); 3315 3316 /* 3317 * The fdp's own spinlock prevents the contents from being 3318 * modified. 3319 */ 3320 spin_lock_shared(&fdp->fd_spin); 3321 for (n = 0; n < fdp->fd_nfiles; ++n) { 3322 if ((fp = fdp->fd_files[n].fp) == NULL) 3323 continue; 3324 if (info->req->oldptr == NULL) { 3325 ++info->count; 3326 } else { 3327 uid = p->p_ucred ? p->p_ucred->cr_uid : -1; 3328 kcore_make_file(&kf, fp, p->p_pid, uid, n); 3329 spin_unlock_shared(&fdp->fd_spin); 3330 info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf)); 3331 spin_lock_shared(&fdp->fd_spin); 3332 if (info->error) 3333 break; 3334 } 3335 } 3336 spin_unlock_shared(&fdp->fd_spin); 3337 atomic_subtract_int(&fdp->fd_softrefs, 1); 3338 if (info->error) 3339 return(-1); 3340 return(0); 3341 } 3342 3343 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD, 3344 0, 0, sysctl_kern_file, "S,file", "Entire file table"); 3345 3346 SYSCTL_INT(_kern, OID_AUTO, minfilesperproc, CTLFLAG_RW, 3347 &minfilesperproc, 0, "Minimum files allowed open per process"); 3348 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, 3349 &maxfilesperproc, 0, "Maximum files allowed open per process"); 3350 SYSCTL_INT(_kern, OID_AUTO, maxfilesperuser, CTLFLAG_RW, 3351 &maxfilesperuser, 0, "Maximum files allowed open per user"); 3352 3353 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW, 3354 &maxfiles, 0, "Maximum number of files"); 3355 3356 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW, 3357 &maxfilesrootres, 0, "Descriptors reserved for root use"); 3358 3359 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD, 3360 &nfiles, 0, "System-wide number of open files"); 3361 3362 static void 3363 fildesc_drvinit(void *unused) 3364 { 3365 int fd; 3366 3367 for (fd = 0; fd < NUMFDESC; fd++) { 3368 make_dev(&fildesc_ops, fd, 3369 UID_BIN, GID_BIN, 0666, "fd/%d", fd); 3370 } 3371 3372 make_dev(&fildesc_ops, 0, UID_ROOT, GID_WHEEL, 0666, "stdin"); 3373 make_dev(&fildesc_ops, 1, UID_ROOT, GID_WHEEL, 0666, "stdout"); 3374 make_dev(&fildesc_ops, 2, UID_ROOT, GID_WHEEL, 0666, "stderr"); 3375 } 3376 3377 struct fileops badfileops = { 3378 .fo_read = badfo_readwrite, 3379 .fo_write = badfo_readwrite, 3380 .fo_ioctl = badfo_ioctl, 3381 .fo_kqfilter = badfo_kqfilter, 3382 .fo_stat = badfo_stat, 3383 .fo_close = badfo_close, 3384 .fo_shutdown = badfo_shutdown 3385 }; 3386 3387 int 3388 badfo_readwrite( 3389 struct file *fp, 3390 struct uio *uio, 3391 struct ucred *cred, 3392 int flags 3393 ) { 3394 return (EBADF); 3395 } 3396 3397 int 3398 badfo_ioctl(struct file *fp, u_long com, caddr_t data, 3399 struct ucred *cred, struct sysmsg *msgv) 3400 { 3401 return (EBADF); 3402 } 3403 3404 /* 3405 * Must return an error to prevent registration, typically 3406 * due to a revoked descriptor (file_filtops assigned). 3407 */ 3408 int 3409 badfo_kqfilter(struct file *fp, struct knote *kn) 3410 { 3411 return (EOPNOTSUPP); 3412 } 3413 3414 int 3415 badfo_stat(struct file *fp, struct stat *sb, struct ucred *cred) 3416 { 3417 return (EBADF); 3418 } 3419 3420 int 3421 badfo_close(struct file *fp) 3422 { 3423 return (EBADF); 3424 } 3425 3426 int 3427 badfo_shutdown(struct file *fp, int how) 3428 { 3429 return (EBADF); 3430 } 3431 3432 int 3433 nofo_shutdown(struct file *fp, int how) 3434 { 3435 return (EOPNOTSUPP); 3436 } 3437 3438 SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR, 3439 fildesc_drvinit,NULL); 3440 3441 static void 3442 filelist_heads_init(void *arg __unused) 3443 { 3444 int i; 3445 3446 for (i = 0; i < NFILELIST_HEADS; ++i) { 3447 struct filelist_head *head = &filelist_heads[i]; 3448 3449 spin_init(&head->spin, "filehead_spin"); 3450 LIST_INIT(&head->list); 3451 } 3452 } 3453 3454 SYSINIT(filelistheads, SI_BOOT1_LOCK, SI_ORDER_ANY, 3455 filelist_heads_init, NULL); 3456 3457 static void 3458 file_objcache_init(void *dummy __unused) 3459 { 3460 file_objcache = objcache_create("file", maxfiles, maxfiles / 8, 3461 NULL, NULL, NULL, /* TODO: ctor/dtor */ 3462 objcache_malloc_alloc, objcache_malloc_free, &file_malloc_args); 3463 } 3464 SYSINIT(fpobjcache, SI_BOOT2_POST_SMP, SI_ORDER_ANY, file_objcache_init, NULL); 3465