1 /* 2 * Copyright (c) 2005-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey Hsu and Matthew Dillon. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * 35 * Copyright (c) 1982, 1986, 1989, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94 68 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $ 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/malloc.h> 74 #include <sys/sysmsg.h> 75 #include <sys/conf.h> 76 #include <sys/device.h> 77 #include <sys/file.h> 78 #include <sys/filedesc.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 #include <sys/vnode.h> 82 #include <sys/proc.h> 83 #include <sys/nlookup.h> 84 #include <sys/stat.h> 85 #include <sys/filio.h> 86 #include <sys/fcntl.h> 87 #include <sys/unistd.h> 88 #include <sys/resourcevar.h> 89 #include <sys/event.h> 90 #include <sys/kern_syscall.h> 91 #include <sys/kcore.h> 92 #include <sys/kinfo.h> 93 #include <sys/un.h> 94 #include <sys/objcache.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_extern.h> 98 99 #include <sys/file2.h> 100 #include <sys/spinlock2.h> 101 102 static int fdalloc_locked(struct proc *p, struct filedesc *fdp, 103 int want, int *result); 104 static void fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd); 105 static void fdreserve_locked (struct filedesc *fdp, int fd0, int incr); 106 static struct file *funsetfd_locked (struct filedesc *fdp, int fd); 107 static void ffree(struct file *fp); 108 109 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table"); 110 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader", 111 "file desc to leader structures"); 112 static MALLOC_DEFINE_OBJ(M_FILE, sizeof(struct file), 113 "file", "Open file structure"); 114 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures"); 115 116 static struct krate krate_uidinfo = { .freq = 1 }; 117 118 static d_open_t fdopen; 119 #define NUMFDESC 64 120 121 #define CDEV_MAJOR 22 122 static struct dev_ops fildesc_ops = { 123 { "FD", 0, 0 }, 124 .d_open = fdopen, 125 }; 126 127 /* 128 * Descriptor management. 129 */ 130 #ifndef NFILELIST_HEADS 131 #define NFILELIST_HEADS 257 /* primary number */ 132 #endif 133 134 struct filelist_head { 135 struct spinlock spin; 136 struct filelist list; 137 } __cachealign; 138 139 static struct filelist_head filelist_heads[NFILELIST_HEADS]; 140 141 static int nfiles; /* actual number of open files */ 142 extern int cmask; 143 144 struct lwkt_token revoke_token = LWKT_TOKEN_INITIALIZER(revoke_token); 145 146 /* 147 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared. 148 * 149 * must be called with fdp->fd_spin exclusively held 150 */ 151 static __inline 152 void 153 fdfixup_locked(struct filedesc *fdp, int fd) 154 { 155 if (fd < fdp->fd_freefile) { 156 fdp->fd_freefile = fd; 157 } 158 while (fdp->fd_lastfile >= 0 && 159 fdp->fd_files[fdp->fd_lastfile].fp == NULL && 160 fdp->fd_files[fdp->fd_lastfile].reserved == 0 161 ) { 162 --fdp->fd_lastfile; 163 } 164 } 165 166 /* 167 * Clear the fd thread caches for this fdnode. 168 * 169 * If match_fdc is NULL, all thread caches of fdn will be cleared. 170 * The caller must hold fdp->fd_spin exclusively. The threads caching 171 * the descriptor do not have to be the current thread. The (status) 172 * argument is ignored. 173 * 174 * If match_fdc is not NULL, only the match_fdc's cache will be cleared. 175 * The caller must hold fdp->fd_spin shared and match_fdc must match a 176 * fdcache entry in curthread. match_fdc has been locked by the caller 177 * and had the specified (status). 178 * 179 * Since we are matching against a fp in the fdp (which must still be present 180 * at this time), fp will have at least two refs on any match and we can 181 * decrement the count trivially. 182 */ 183 static 184 void 185 fclearcache(struct fdnode *fdn, struct fdcache *match_fdc, int status) 186 { 187 struct fdcache *fdc; 188 struct file *fp; 189 int i; 190 191 /* 192 * match_fdc == NULL We are cleaning out all tdcache entries 193 * for the fdn and hold fdp->fd_spin exclusively. 194 * This can race against the target threads 195 * cleaning out specific entries. 196 * 197 * match_fdc != NULL We are cleaning out a specific tdcache 198 * entry on behalf of the owning thread 199 * and hold fdp->fd_spin shared. The thread 200 * has already locked the entry. This cannot 201 * race. 202 */ 203 fp = fdn->fp; 204 for (i = 0; i < NTDCACHEFD; ++i) { 205 if ((fdc = fdn->tdcache[i]) == NULL) 206 continue; 207 208 /* 209 * If match_fdc is non-NULL we are being asked to 210 * clear a specific fdc owned by curthread. There must 211 * be exactly one match. The caller has already locked 212 * the cache entry and will dispose of the lock after 213 * we return. 214 * 215 * Since we also have a shared lock on fdp, we 216 * can do this without atomic ops. 217 */ 218 if (match_fdc) { 219 if (fdc != match_fdc) 220 continue; 221 fdn->tdcache[i] = NULL; 222 KASSERT(fp == fdc->fp, 223 ("fclearcache(1): fp mismatch %p/%p\n", 224 fp, fdc->fp)); 225 fdc->fp = NULL; 226 fdc->fd = -1; 227 228 /* 229 * status can be 0 or 2. If 2 the ref is borrowed, 230 * if 0 the ref is not borrowed and we have to drop 231 * it. 232 */ 233 if (status == 0) 234 atomic_add_int(&fp->f_count, -1); 235 fdn->isfull = 0; /* heuristic */ 236 return; 237 } 238 239 /* 240 * Otherwise we hold an exclusive spin-lock and can only 241 * race thread consumers borrowing cache entries. 242 * 243 * Acquire the lock and dispose of the entry. We have to 244 * spin until we get the lock. 245 */ 246 for (;;) { 247 status = atomic_swap_int(&fdc->locked, 1); 248 if (status == 1) { /* foreign lock, retry */ 249 cpu_pause(); 250 continue; 251 } 252 fdn->tdcache[i] = NULL; 253 KASSERT(fp == fdc->fp, 254 ("fclearcache(2): fp mismatch %p/%p\n", 255 fp, fdc->fp)); 256 fdc->fp = NULL; 257 fdc->fd = -1; 258 if (status == 0) 259 atomic_add_int(&fp->f_count, -1); 260 fdn->isfull = 0; /* heuristic */ 261 atomic_swap_int(&fdc->locked, 0); 262 break; 263 } 264 } 265 KKASSERT(match_fdc == NULL); 266 } 267 268 /* 269 * Retrieve the fp for the specified fd given the specified file descriptor 270 * table. The fdp does not have to be owned by the current process. 271 * If flags != -1, fp->f_flag must contain at least one of the flags. 272 * 273 * This function is not able to cache the fp. 274 */ 275 struct file * 276 holdfp_fdp(struct filedesc *fdp, int fd, int flag) 277 { 278 struct file *fp; 279 280 spin_lock_shared(&fdp->fd_spin); 281 if (((u_int)fd) < fdp->fd_nfiles) { 282 fp = fdp->fd_files[fd].fp; /* can be NULL */ 283 if (fp) { 284 if ((fp->f_flag & flag) == 0 && flag != -1) { 285 fp = NULL; 286 } else { 287 fhold(fp); 288 } 289 } 290 } else { 291 fp = NULL; 292 } 293 spin_unlock_shared(&fdp->fd_spin); 294 295 return fp; 296 } 297 298 struct file * 299 holdfp_fdp_locked(struct filedesc *fdp, int fd, int flag) 300 { 301 struct file *fp; 302 303 if (((u_int)fd) < fdp->fd_nfiles) { 304 fp = fdp->fd_files[fd].fp; /* can be NULL */ 305 if (fp) { 306 if ((fp->f_flag & flag) == 0 && flag != -1) { 307 fp = NULL; 308 } else { 309 fhold(fp); 310 } 311 } 312 } else { 313 fp = NULL; 314 } 315 return fp; 316 } 317 318 /* 319 * Acquire the fp for the specified file descriptor, using the thread 320 * cache if possible and caching it if possible. 321 * 322 * td must be the curren thread. 323 */ 324 static 325 struct file * 326 _holdfp_cache(thread_t td, int fd) 327 { 328 struct filedesc *fdp; 329 struct fdcache *fdc; 330 struct fdcache *best; 331 struct fdnode *fdn; 332 struct file *fp; 333 int status; 334 int delta; 335 int i; 336 337 /* 338 * Fast 339 */ 340 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 341 if (fdc->fd != fd || fdc->fp == NULL) 342 continue; 343 status = atomic_swap_int(&fdc->locked, 1); 344 345 /* 346 * If someone else has locked our cache entry they are in 347 * the middle of clearing it, skip the entry. 348 */ 349 if (status == 1) 350 continue; 351 352 /* 353 * We have locked the entry, but if it no longer matches 354 * restore the previous state (0 or 2) and skip the entry. 355 */ 356 if (fdc->fd != fd || fdc->fp == NULL) { 357 atomic_swap_int(&fdc->locked, status); 358 continue; 359 } 360 361 /* 362 * We have locked a valid entry. We can borrow the ref 363 * for a mode 0 entry. We can get a valid fp for a mode 364 * 2 entry but not borrow the ref. 365 */ 366 if (status == 0) { 367 fp = fdc->fp; 368 fdc->lru = ++td->td_fdcache_lru; 369 atomic_swap_int(&fdc->locked, 2); 370 371 return fp; 372 } 373 if (status == 2) { 374 fp = fdc->fp; 375 fhold(fp); 376 fdc->lru = ++td->td_fdcache_lru; 377 atomic_swap_int(&fdc->locked, 2); 378 379 return fp; 380 } 381 KKASSERT(0); 382 } 383 384 /* 385 * Lookup the descriptor the slow way. This can contend against 386 * modifying operations in a multi-threaded environment and cause 387 * cache line ping ponging otherwise. 388 */ 389 fdp = td->td_proc->p_fd; 390 spin_lock_shared(&fdp->fd_spin); 391 392 if (((u_int)fd) < fdp->fd_nfiles) { 393 fp = fdp->fd_files[fd].fp; /* can be NULL */ 394 if (fp) { 395 fhold(fp); 396 if (fdp->fd_files[fd].isfull == 0) 397 goto enter; 398 } 399 } else { 400 fp = NULL; 401 } 402 spin_unlock_shared(&fdp->fd_spin); 403 404 return fp; 405 406 /* 407 * We found a valid fp and held it, fdp is still shared locked. 408 * Enter the fp into the per-thread cache. Find the oldest entry 409 * via lru, or an empty entry. 410 * 411 * Because fdp's spinlock is held (shared is fine), no other 412 * thread should be in the middle of clearing our selected entry. 413 */ 414 enter: 415 best = &td->td_fdcache[0]; 416 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 417 if (fdc->fp == NULL) { 418 best = fdc; 419 break; 420 } 421 delta = fdc->lru - best->lru; 422 if (delta < 0) 423 best = fdc; 424 } 425 426 /* 427 * Replace best 428 * 429 * Don't enter into the cache if we cannot get the lock. 430 */ 431 status = atomic_swap_int(&best->locked, 1); 432 if (status == 1) 433 goto done; 434 435 /* 436 * Clear the previous cache entry if present 437 */ 438 if (best->fp) { 439 KKASSERT(best->fd >= 0); 440 fclearcache(&fdp->fd_files[best->fd], best, status); 441 } 442 443 /* 444 * Create our new cache entry. This entry is 'safe' until we tie 445 * into the fdnode. If we cannot tie in, we will clear the entry. 446 */ 447 best->fd = fd; 448 best->fp = fp; 449 best->lru = ++td->td_fdcache_lru; 450 best->locked = 2; /* borrowed ref */ 451 452 fdn = &fdp->fd_files[fd]; 453 for (i = 0; i < NTDCACHEFD; ++i) { 454 if (fdn->tdcache[i] == NULL && 455 atomic_cmpset_ptr((void **)&fdn->tdcache[i], NULL, best)) { 456 goto done; 457 } 458 } 459 fdn->isfull = 1; /* no space */ 460 best->fd = -1; 461 best->fp = NULL; 462 best->locked = 0; 463 done: 464 spin_unlock_shared(&fdp->fd_spin); 465 466 return fp; 467 } 468 469 /* 470 * holdfp(), bypassing the cache in order to also be able to return 471 * the descriptor flags. A bit of a hack. 472 */ 473 static 474 struct file * 475 _holdfp2(thread_t td, int fd, char *fflagsp) 476 { 477 struct filedesc *fdp; 478 struct file *fp; 479 480 /* 481 * Lookup the descriptor the slow way. This can contend against 482 * modifying operations in a multi-threaded environment and cause 483 * cache line ping ponging otherwise. 484 */ 485 fdp = td->td_proc->p_fd; 486 spin_lock_shared(&fdp->fd_spin); 487 488 if (((u_int)fd) < fdp->fd_nfiles) { 489 fp = fdp->fd_files[fd].fp; /* can be NULL */ 490 if (fp) { 491 *fflagsp = fdp->fd_files[fd].fileflags; 492 fhold(fp); 493 } 494 } else { 495 fp = NULL; 496 } 497 spin_unlock_shared(&fdp->fd_spin); 498 499 return fp; 500 } 501 502 503 /* 504 * Drop the file pointer and return to the thread cache if possible. 505 * 506 * Caller must not hold fdp's spin lock. 507 * td must be the current thread. 508 */ 509 void 510 dropfp(thread_t td, int fd, struct file *fp) 511 { 512 struct filedesc *fdp; 513 struct fdcache *fdc; 514 int status; 515 516 fdp = td->td_proc->p_fd; 517 518 /* 519 * If our placeholder is still present we can re-cache the ref. 520 * 521 * Note that we can race an fclearcache(). 522 */ 523 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 524 if (fdc->fp != fp || fdc->fd != fd) 525 continue; 526 status = atomic_swap_int(&fdc->locked, 1); 527 switch(status) { 528 case 0: 529 /* 530 * Not in mode 2, fdrop fp without caching. 531 */ 532 atomic_swap_int(&fdc->locked, 0); 533 break; 534 case 1: 535 /* 536 * Not in mode 2, locked by someone else. 537 * fdrop fp without caching. 538 */ 539 break; 540 case 2: 541 /* 542 * Intact borrowed ref, return to mode 0 543 * indicating that we have returned the ref. 544 * 545 * Return the borrowed ref (2->1->0) 546 */ 547 if (fdc->fp == fp && fdc->fd == fd) { 548 atomic_swap_int(&fdc->locked, 0); 549 return; 550 } 551 atomic_swap_int(&fdc->locked, 2); 552 break; 553 } 554 } 555 556 /* 557 * Failed to re-cache, drop the fp without caching. 558 */ 559 fdrop(fp); 560 } 561 562 /* 563 * Clear all descriptors cached in the per-thread fd cache for 564 * the specified thread. 565 * 566 * Caller must not hold p_fd->spin. This function will temporarily 567 * obtain a shared spin lock. 568 */ 569 void 570 fexitcache(thread_t td) 571 { 572 struct filedesc *fdp; 573 struct fdcache *fdc; 574 int status; 575 int i; 576 577 if (td->td_proc == NULL) 578 return; 579 fdp = td->td_proc->p_fd; 580 if (fdp == NULL) 581 return; 582 583 /* 584 * A shared lock is sufficient as the caller controls td and we 585 * are only clearing td's cache. 586 */ 587 spin_lock_shared(&fdp->fd_spin); 588 for (i = 0; i < NFDCACHE; ++i) { 589 fdc = &td->td_fdcache[i]; 590 if (fdc->fp) { 591 status = atomic_swap_int(&fdc->locked, 1); 592 if (status == 1) { 593 cpu_pause(); 594 --i; 595 continue; 596 } 597 if (fdc->fp) { 598 KKASSERT(fdc->fd >= 0); 599 fclearcache(&fdp->fd_files[fdc->fd], fdc, 600 status); 601 } 602 atomic_swap_int(&fdc->locked, 0); 603 } 604 } 605 spin_unlock_shared(&fdp->fd_spin); 606 } 607 608 static __inline struct filelist_head * 609 fp2filelist(const struct file *fp) 610 { 611 u_int i; 612 613 i = (u_int)(uintptr_t)fp % NFILELIST_HEADS; 614 return &filelist_heads[i]; 615 } 616 617 static __inline 618 struct plimit * 619 readplimits(struct proc *p) 620 { 621 thread_t td = curthread; 622 struct plimit *limit; 623 624 limit = td->td_limit; 625 if (limit != p->p_limit) { 626 spin_lock_shared(&p->p_spin); 627 limit = p->p_limit; 628 atomic_add_int(&limit->p_refcnt, 1); 629 spin_unlock_shared(&p->p_spin); 630 if (td->td_limit) 631 plimit_free(td->td_limit); 632 td->td_limit = limit; 633 } 634 return limit; 635 } 636 637 /* 638 * System calls on descriptors. 639 */ 640 int 641 sys_getdtablesize(struct sysmsg *sysmsg, const struct getdtablesize_args *uap) 642 { 643 struct proc *p = curproc; 644 struct plimit *limit = readplimits(p); 645 int dtsize; 646 647 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 648 dtsize = INT_MAX; 649 else 650 dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 651 652 if (dtsize > maxfilesperproc) 653 dtsize = maxfilesperproc; 654 if (dtsize < minfilesperproc) 655 dtsize = minfilesperproc; 656 if (p->p_ucred->cr_uid && dtsize > maxfilesperuser) 657 dtsize = maxfilesperuser; 658 sysmsg->sysmsg_result = dtsize; 659 return (0); 660 } 661 662 /* 663 * Duplicate a file descriptor to a particular value. 664 * 665 * note: keep in mind that a potential race condition exists when closing 666 * descriptors from a shared descriptor table (via rfork). 667 */ 668 int 669 sys_dup2(struct sysmsg *sysmsg, const struct dup2_args *uap) 670 { 671 int error; 672 int fd = 0; 673 674 error = kern_dup(DUP_FIXED, uap->from, uap->to, &fd); 675 sysmsg->sysmsg_fds[0] = fd; 676 677 return (error); 678 } 679 680 /* 681 * Duplicate a file descriptor. 682 */ 683 int 684 sys_dup(struct sysmsg *sysmsg, const struct dup_args *uap) 685 { 686 int error; 687 int fd = 0; 688 689 error = kern_dup(DUP_VARIABLE, uap->fd, 0, &fd); 690 sysmsg->sysmsg_fds[0] = fd; 691 692 return (error); 693 } 694 695 /* 696 * MPALMOSTSAFE - acquires mplock for fp operations 697 */ 698 int 699 kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred) 700 { 701 struct thread *td = curthread; 702 struct proc *p = td->td_proc; 703 struct file *fp; 704 struct vnode *vp; 705 u_int newmin; 706 u_int oflags; 707 u_int nflags; 708 int closedcounter; 709 int tmp, error, flg = F_POSIX; 710 711 KKASSERT(p); 712 713 /* 714 * Operations on file descriptors that do not require a file pointer. 715 */ 716 switch (cmd) { 717 case F_GETFD: 718 error = fgetfdflags(p->p_fd, fd, &tmp); 719 if (error == 0) 720 dat->fc_cloexec = (tmp & UF_EXCLOSE) ? FD_CLOEXEC : 0; 721 return (error); 722 723 case F_SETFD: 724 if (dat->fc_cloexec & FD_CLOEXEC) 725 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE); 726 else 727 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE); 728 return (error); 729 case F_DUPFD: 730 newmin = dat->fc_fd; 731 error = kern_dup(DUP_VARIABLE | DUP_FCNTL, fd, newmin, 732 &dat->fc_fd); 733 return (error); 734 case F_DUPFD_CLOEXEC: 735 newmin = dat->fc_fd; 736 error = kern_dup(DUP_VARIABLE | DUP_CLOEXEC | DUP_FCNTL, 737 fd, newmin, &dat->fc_fd); 738 return (error); 739 case F_DUP2FD: 740 newmin = dat->fc_fd; 741 error = kern_dup(DUP_FIXED, fd, newmin, &dat->fc_fd); 742 return (error); 743 case F_DUP2FD_CLOEXEC: 744 newmin = dat->fc_fd; 745 error = kern_dup(DUP_FIXED | DUP_CLOEXEC, fd, newmin, 746 &dat->fc_fd); 747 return (error); 748 default: 749 break; 750 } 751 752 /* 753 * Operations on file pointers 754 */ 755 closedcounter = p->p_fd->fd_closedcounter; 756 if ((fp = holdfp(td, fd, -1)) == NULL) 757 return (EBADF); 758 759 switch (cmd) { 760 case F_GETFL: 761 dat->fc_flags = OFLAGS(fp->f_flag); 762 error = 0; 763 break; 764 765 case F_SETFL: 766 oflags = fp->f_flag; 767 nflags = FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS; 768 nflags |= oflags & ~FCNTLFLAGS; 769 770 error = 0; 771 if (((nflags ^ oflags) & O_APPEND) && (oflags & FAPPENDONLY)) 772 error = EINVAL; 773 if (error == 0 && ((nflags ^ oflags) & FASYNC)) { 774 tmp = nflags & FASYNC; 775 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, 776 cred, NULL); 777 } 778 779 /* 780 * If no error, must be atomically set. 781 */ 782 while (error == 0) { 783 oflags = fp->f_flag; 784 cpu_ccfence(); 785 nflags = (oflags & ~FCNTLFLAGS) | (nflags & FCNTLFLAGS); 786 if (atomic_cmpset_int(&fp->f_flag, oflags, nflags)) 787 break; 788 cpu_pause(); 789 } 790 break; 791 792 case F_GETOWN: 793 error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner, 794 cred, NULL); 795 break; 796 797 case F_SETOWN: 798 error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner, 799 cred, NULL); 800 break; 801 802 case F_SETLKW: 803 flg |= F_WAIT; 804 /* Fall into F_SETLK */ 805 806 case F_SETLK: 807 if (fp->f_type != DTYPE_VNODE) { 808 error = EBADF; 809 break; 810 } 811 vp = (struct vnode *)fp->f_data; 812 813 /* 814 * copyin/lockop may block 815 */ 816 if (dat->fc_flock.l_whence == SEEK_CUR) 817 dat->fc_flock.l_start += fp->f_offset; 818 819 switch (dat->fc_flock.l_type) { 820 case F_RDLCK: 821 if ((fp->f_flag & FREAD) == 0) { 822 error = EBADF; 823 break; 824 } 825 if (p->p_leader->p_advlock_flag == 0) 826 p->p_leader->p_advlock_flag = 1; 827 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 828 &dat->fc_flock, flg); 829 break; 830 case F_WRLCK: 831 if ((fp->f_flag & FWRITE) == 0) { 832 error = EBADF; 833 break; 834 } 835 if (p->p_leader->p_advlock_flag == 0) 836 p->p_leader->p_advlock_flag = 1; 837 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 838 &dat->fc_flock, flg); 839 break; 840 case F_UNLCK: 841 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 842 &dat->fc_flock, F_POSIX); 843 break; 844 default: 845 error = EINVAL; 846 break; 847 } 848 849 /* 850 * It is possible to race a close() on the descriptor while 851 * we were blocked getting the lock. If this occurs the 852 * close might not have caught the lock. 853 */ 854 if (checkfdclosed(td, p->p_fd, fd, fp, closedcounter)) { 855 dat->fc_flock.l_whence = SEEK_SET; 856 dat->fc_flock.l_start = 0; 857 dat->fc_flock.l_len = 0; 858 dat->fc_flock.l_type = F_UNLCK; 859 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 860 F_UNLCK, &dat->fc_flock, F_POSIX); 861 } 862 break; 863 864 case F_GETLK: 865 if (fp->f_type != DTYPE_VNODE) { 866 error = EBADF; 867 break; 868 } 869 vp = (struct vnode *)fp->f_data; 870 /* 871 * copyin/lockop may block 872 */ 873 if (dat->fc_flock.l_type != F_RDLCK && 874 dat->fc_flock.l_type != F_WRLCK && 875 dat->fc_flock.l_type != F_UNLCK) { 876 error = EINVAL; 877 break; 878 } 879 if (dat->fc_flock.l_whence == SEEK_CUR) 880 dat->fc_flock.l_start += fp->f_offset; 881 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, 882 &dat->fc_flock, F_POSIX); 883 break; 884 default: 885 error = EINVAL; 886 break; 887 } 888 889 fdrop(fp); 890 return (error); 891 } 892 893 /* 894 * The file control system call. 895 */ 896 int 897 sys_fcntl(struct sysmsg *sysmsg, const struct fcntl_args *uap) 898 { 899 union fcntl_dat dat; 900 int error; 901 902 switch (uap->cmd) { 903 case F_DUPFD: 904 case F_DUP2FD: 905 case F_DUPFD_CLOEXEC: 906 case F_DUP2FD_CLOEXEC: 907 dat.fc_fd = uap->arg; 908 break; 909 case F_SETFD: 910 dat.fc_cloexec = uap->arg; 911 break; 912 case F_SETFL: 913 dat.fc_flags = uap->arg; 914 break; 915 case F_SETOWN: 916 dat.fc_owner = uap->arg; 917 break; 918 case F_SETLKW: 919 case F_SETLK: 920 case F_GETLK: 921 error = copyin((caddr_t)uap->arg, &dat.fc_flock, 922 sizeof(struct flock)); 923 if (error) 924 return (error); 925 break; 926 } 927 928 error = kern_fcntl(uap->fd, uap->cmd, &dat, curthread->td_ucred); 929 930 if (error == 0) { 931 switch (uap->cmd) { 932 case F_DUPFD: 933 case F_DUP2FD: 934 case F_DUPFD_CLOEXEC: 935 case F_DUP2FD_CLOEXEC: 936 sysmsg->sysmsg_result = dat.fc_fd; 937 break; 938 case F_GETFD: 939 sysmsg->sysmsg_result = dat.fc_cloexec; 940 break; 941 case F_GETFL: 942 sysmsg->sysmsg_result = dat.fc_flags; 943 break; 944 case F_GETOWN: 945 sysmsg->sysmsg_result = dat.fc_owner; 946 break; 947 case F_GETLK: 948 error = copyout(&dat.fc_flock, (caddr_t)uap->arg, 949 sizeof(struct flock)); 950 break; 951 } 952 } 953 954 return (error); 955 } 956 957 /* 958 * Common code for dup, dup2, and fcntl(F_DUPFD). 959 * 960 * There are four type flags: DUP_FCNTL, DUP_FIXED, DUP_VARIABLE, and 961 * DUP_CLOEXEC. 962 * 963 * DUP_FCNTL is for handling EINVAL vs. EBADF differences between 964 * fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC and dup2() (per POSIX). 965 * The next two flags are mutually exclusive, and the fourth is optional. 966 * DUP_FIXED tells kern_dup() to destructively dup over an existing file 967 * descriptor if "new" is already open. DUP_VARIABLE tells kern_dup() 968 * to find the lowest unused file descriptor that is greater than or 969 * equal to "new". DUP_CLOEXEC, which works with either of the first 970 * two flags, sets the close-on-exec flag on the "new" file descriptor. 971 */ 972 int 973 kern_dup(int flags, int old, int new, int *res) 974 { 975 struct thread *td = curthread; 976 struct proc *p = td->td_proc; 977 struct plimit *limit = readplimits(p); 978 struct filedesc *fdp = p->p_fd; 979 struct file *fp; 980 struct file *delfp; 981 int oldflags; 982 int holdleaders; 983 int dtsize; 984 int error, newfd; 985 986 /* 987 * Verify that we have a valid descriptor to dup from and 988 * possibly to dup to. When the new descriptor is out of 989 * bounds, fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC must 990 * return EINVAL, while dup2() returns EBADF in 991 * this case. 992 * 993 * NOTE: maxfilesperuser is not applicable to dup() 994 */ 995 retry: 996 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 997 dtsize = INT_MAX; 998 else 999 dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 1000 if (dtsize > maxfilesperproc) 1001 dtsize = maxfilesperproc; 1002 if (dtsize < minfilesperproc) 1003 dtsize = minfilesperproc; 1004 1005 if (new < 0 || new >= dtsize) 1006 return (flags & DUP_FCNTL ? EINVAL : EBADF); 1007 1008 spin_lock(&fdp->fd_spin); 1009 if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) { 1010 spin_unlock(&fdp->fd_spin); 1011 return (EBADF); 1012 } 1013 if ((flags & DUP_FIXED) && old == new) { 1014 *res = new; 1015 if (flags & DUP_CLOEXEC) 1016 fdp->fd_files[new].fileflags |= UF_EXCLOSE; 1017 spin_unlock(&fdp->fd_spin); 1018 return (0); 1019 } 1020 fp = fdp->fd_files[old].fp; 1021 oldflags = fdp->fd_files[old].fileflags; 1022 fhold(fp); 1023 1024 /* 1025 * Allocate a new descriptor if DUP_VARIABLE, or expand the table 1026 * if the requested descriptor is beyond the current table size. 1027 * 1028 * This can block. Retry if the source descriptor no longer matches 1029 * or if our expectation in the expansion case races. 1030 * 1031 * If we are not expanding or allocating a new decriptor, then reset 1032 * the target descriptor to a reserved state so we have a uniform 1033 * setup for the next code block. 1034 */ 1035 if ((flags & DUP_VARIABLE) || new >= fdp->fd_nfiles) { 1036 error = fdalloc_locked(p, fdp, new, &newfd); 1037 if (error) { 1038 spin_unlock(&fdp->fd_spin); 1039 fdrop(fp); 1040 return (error); 1041 } 1042 /* 1043 * Check for ripout 1044 */ 1045 if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) { 1046 fsetfd_locked(fdp, NULL, newfd); 1047 spin_unlock(&fdp->fd_spin); 1048 fdrop(fp); 1049 goto retry; 1050 } 1051 /* 1052 * Check for expansion race 1053 */ 1054 if ((flags & DUP_VARIABLE) == 0 && new != newfd) { 1055 fsetfd_locked(fdp, NULL, newfd); 1056 spin_unlock(&fdp->fd_spin); 1057 fdrop(fp); 1058 goto retry; 1059 } 1060 /* 1061 * Check for ripout, newfd reused old (this case probably 1062 * can't occur). 1063 */ 1064 if (old == newfd) { 1065 fsetfd_locked(fdp, NULL, newfd); 1066 spin_unlock(&fdp->fd_spin); 1067 fdrop(fp); 1068 goto retry; 1069 } 1070 new = newfd; 1071 delfp = NULL; 1072 } else { 1073 if (fdp->fd_files[new].reserved) { 1074 spin_unlock(&fdp->fd_spin); 1075 fdrop(fp); 1076 kprintf("Warning: dup(): target descriptor %d is " 1077 "reserved, waiting for it to be resolved\n", 1078 new); 1079 tsleep(fdp, 0, "fdres", hz); 1080 goto retry; 1081 } 1082 1083 /* 1084 * If the target descriptor was never allocated we have 1085 * to allocate it. If it was we have to clean out the 1086 * old descriptor. delfp inherits the ref from the 1087 * descriptor table. 1088 */ 1089 ++fdp->fd_closedcounter; 1090 fclearcache(&fdp->fd_files[new], NULL, 0); 1091 ++fdp->fd_closedcounter; 1092 delfp = fdp->fd_files[new].fp; 1093 fdp->fd_files[new].fp = NULL; 1094 fdp->fd_files[new].reserved = 1; 1095 if (delfp == NULL) { 1096 fdreserve_locked(fdp, new, 1); 1097 if (new > fdp->fd_lastfile) 1098 fdp->fd_lastfile = new; 1099 } 1100 1101 } 1102 1103 /* 1104 * NOTE: still holding an exclusive spinlock 1105 */ 1106 1107 /* 1108 * If a descriptor is being overwritten we may hve to tell 1109 * fdfree() to sleep to ensure that all relevant process 1110 * leaders can be traversed in closef(). 1111 */ 1112 if (delfp != NULL && p->p_fdtol != NULL) { 1113 fdp->fd_holdleaderscount++; 1114 holdleaders = 1; 1115 } else { 1116 holdleaders = 0; 1117 } 1118 KASSERT(delfp == NULL || (flags & DUP_FIXED), 1119 ("dup() picked an open file")); 1120 1121 /* 1122 * Duplicate the source descriptor, update lastfile. If the new 1123 * descriptor was not allocated and we aren't replacing an existing 1124 * descriptor we have to mark the descriptor as being in use. 1125 * 1126 * The fd_files[] array inherits fp's hold reference. 1127 */ 1128 fsetfd_locked(fdp, fp, new); 1129 if ((flags & DUP_CLOEXEC) != 0) 1130 fdp->fd_files[new].fileflags = oldflags | UF_EXCLOSE; 1131 else 1132 fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE; 1133 spin_unlock(&fdp->fd_spin); 1134 fdrop(fp); 1135 *res = new; 1136 1137 /* 1138 * If we dup'd over a valid file, we now own the reference to it 1139 * and must dispose of it using closef() semantics (as if a 1140 * close() were performed on it). 1141 */ 1142 if (delfp) { 1143 if (SLIST_FIRST(&delfp->f_klist)) 1144 knote_fdclose(delfp, fdp, new); 1145 closef(delfp, p); 1146 if (holdleaders) { 1147 spin_lock(&fdp->fd_spin); 1148 fdp->fd_holdleaderscount--; 1149 if (fdp->fd_holdleaderscount == 0 && 1150 fdp->fd_holdleaderswakeup != 0) { 1151 fdp->fd_holdleaderswakeup = 0; 1152 spin_unlock(&fdp->fd_spin); 1153 wakeup(&fdp->fd_holdleaderscount); 1154 } else { 1155 spin_unlock(&fdp->fd_spin); 1156 } 1157 } 1158 } 1159 return (0); 1160 } 1161 1162 /* 1163 * If sigio is on the list associated with a process or process group, 1164 * disable signalling from the device, remove sigio from the list and 1165 * free sigio. 1166 */ 1167 void 1168 funsetown(struct sigio **sigiop) 1169 { 1170 struct pgrp *pgrp; 1171 struct proc *p; 1172 struct sigio *sigio; 1173 1174 if ((sigio = *sigiop) != NULL) { 1175 lwkt_gettoken(&sigio_token); /* protect sigio */ 1176 KKASSERT(sigiop == sigio->sio_myref); 1177 sigio = *sigiop; 1178 *sigiop = NULL; 1179 lwkt_reltoken(&sigio_token); 1180 } 1181 if (sigio == NULL) 1182 return; 1183 1184 if (sigio->sio_pgid < 0) { 1185 pgrp = sigio->sio_pgrp; 1186 sigio->sio_pgrp = NULL; 1187 lwkt_gettoken(&pgrp->pg_token); 1188 SLIST_REMOVE(&pgrp->pg_sigiolst, sigio, sigio, sio_pgsigio); 1189 lwkt_reltoken(&pgrp->pg_token); 1190 pgrel(pgrp); 1191 } else /* if ((*sigiop)->sio_pgid > 0) */ { 1192 p = sigio->sio_proc; 1193 sigio->sio_proc = NULL; 1194 PHOLD(p); 1195 lwkt_gettoken(&p->p_token); 1196 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, sio_pgsigio); 1197 lwkt_reltoken(&p->p_token); 1198 PRELE(p); 1199 } 1200 crfree(sigio->sio_ucred); 1201 sigio->sio_ucred = NULL; 1202 kfree(sigio, M_SIGIO); 1203 } 1204 1205 /* 1206 * Free a list of sigio structures. Caller is responsible for ensuring 1207 * that the list is MPSAFE. 1208 */ 1209 void 1210 funsetownlst(struct sigiolst *sigiolst) 1211 { 1212 struct sigio *sigio; 1213 1214 while ((sigio = SLIST_FIRST(sigiolst)) != NULL) 1215 funsetown(sigio->sio_myref); 1216 } 1217 1218 /* 1219 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg). 1220 * 1221 * After permission checking, add a sigio structure to the sigio list for 1222 * the process or process group. 1223 */ 1224 int 1225 fsetown(pid_t pgid, struct sigio **sigiop) 1226 { 1227 struct proc *proc = NULL; 1228 struct pgrp *pgrp = NULL; 1229 struct sigio *sigio; 1230 int error; 1231 1232 if (pgid == 0) { 1233 funsetown(sigiop); 1234 return (0); 1235 } 1236 1237 if (pgid > 0) { 1238 proc = pfind(pgid); 1239 if (proc == NULL) { 1240 error = ESRCH; 1241 goto done; 1242 } 1243 1244 /* 1245 * Policy - Don't allow a process to FSETOWN a process 1246 * in another session. 1247 * 1248 * Remove this test to allow maximum flexibility or 1249 * restrict FSETOWN to the current process or process 1250 * group for maximum safety. 1251 */ 1252 if (proc->p_session != curproc->p_session) { 1253 error = EPERM; 1254 goto done; 1255 } 1256 } else /* if (pgid < 0) */ { 1257 pgrp = pgfind(-pgid); 1258 if (pgrp == NULL) { 1259 error = ESRCH; 1260 goto done; 1261 } 1262 1263 /* 1264 * Policy - Don't allow a process to FSETOWN a process 1265 * in another session. 1266 * 1267 * Remove this test to allow maximum flexibility or 1268 * restrict FSETOWN to the current process or process 1269 * group for maximum safety. 1270 */ 1271 if (pgrp->pg_session != curproc->p_session) { 1272 error = EPERM; 1273 goto done; 1274 } 1275 } 1276 sigio = kmalloc(sizeof(struct sigio), M_SIGIO, M_WAITOK | M_ZERO); 1277 if (pgid > 0) { 1278 KKASSERT(pgrp == NULL); 1279 lwkt_gettoken(&proc->p_token); 1280 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio); 1281 sigio->sio_proc = proc; 1282 lwkt_reltoken(&proc->p_token); 1283 } else { 1284 KKASSERT(proc == NULL); 1285 lwkt_gettoken(&pgrp->pg_token); 1286 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio); 1287 sigio->sio_pgrp = pgrp; 1288 lwkt_reltoken(&pgrp->pg_token); 1289 pgrp = NULL; 1290 } 1291 sigio->sio_pgid = pgid; 1292 sigio->sio_ucred = crhold(curthread->td_ucred); 1293 /* It would be convenient if p_ruid was in ucred. */ 1294 sigio->sio_ruid = sigio->sio_ucred->cr_ruid; 1295 sigio->sio_myref = sigiop; 1296 1297 lwkt_gettoken(&sigio_token); 1298 while (*sigiop) 1299 funsetown(sigiop); 1300 *sigiop = sigio; 1301 lwkt_reltoken(&sigio_token); 1302 error = 0; 1303 done: 1304 if (pgrp) 1305 pgrel(pgrp); 1306 if (proc) 1307 PRELE(proc); 1308 return (error); 1309 } 1310 1311 /* 1312 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg). 1313 */ 1314 pid_t 1315 fgetown(struct sigio **sigiop) 1316 { 1317 struct sigio *sigio; 1318 pid_t own; 1319 1320 lwkt_gettoken_shared(&sigio_token); 1321 sigio = *sigiop; 1322 own = (sigio != NULL ? sigio->sio_pgid : 0); 1323 lwkt_reltoken(&sigio_token); 1324 1325 return (own); 1326 } 1327 1328 /* 1329 * Close many file descriptors. 1330 */ 1331 int 1332 sys_closefrom(struct sysmsg *sysmsg, const struct closefrom_args *uap) 1333 { 1334 return(kern_closefrom(uap->fd)); 1335 } 1336 1337 /* 1338 * Close all file descriptors greater then or equal to fd 1339 */ 1340 int 1341 kern_closefrom(int fd) 1342 { 1343 struct thread *td = curthread; 1344 struct proc *p = td->td_proc; 1345 struct filedesc *fdp; 1346 int error; 1347 int e2; 1348 1349 KKASSERT(p); 1350 fdp = p->p_fd; 1351 1352 if (fd < 0) 1353 return (EINVAL); 1354 1355 /* 1356 * NOTE: This function will skip unassociated descriptors and 1357 * reserved descriptors that have not yet been assigned. 1358 * fd_lastfile can change as a side effect of kern_close(). 1359 * 1360 * NOTE: We accumulate EINTR errors and return EINTR if any 1361 * close() returned EINTR. However, the descriptor is 1362 * still closed and we do not break out of the loop. 1363 */ 1364 error = 0; 1365 spin_lock(&fdp->fd_spin); 1366 while (fd <= fdp->fd_lastfile) { 1367 if (fdp->fd_files[fd].fp != NULL) { 1368 spin_unlock(&fdp->fd_spin); 1369 /* ok if this races another close */ 1370 e2 = kern_close(fd); 1371 if (e2 == EINTR) 1372 error = EINTR; 1373 spin_lock(&fdp->fd_spin); 1374 } 1375 ++fd; 1376 } 1377 spin_unlock(&fdp->fd_spin); 1378 1379 return error; 1380 } 1381 1382 /* 1383 * Close a file descriptor. 1384 */ 1385 int 1386 sys_close(struct sysmsg *sysmsg, const struct close_args *uap) 1387 { 1388 return(kern_close(uap->fd)); 1389 } 1390 1391 /* 1392 * close() helper 1393 */ 1394 int 1395 kern_close(int fd) 1396 { 1397 struct thread *td = curthread; 1398 struct proc *p = td->td_proc; 1399 struct filedesc *fdp; 1400 struct file *fp; 1401 int error; 1402 int holdleaders; 1403 1404 KKASSERT(p); 1405 fdp = p->p_fd; 1406 1407 /* 1408 * funsetfd*() also clears the fd cache 1409 */ 1410 spin_lock(&fdp->fd_spin); 1411 if ((fp = funsetfd_locked(fdp, fd)) == NULL) { 1412 spin_unlock(&fdp->fd_spin); 1413 return (EBADF); 1414 } 1415 holdleaders = 0; 1416 if (p->p_fdtol != NULL) { 1417 /* 1418 * Ask fdfree() to sleep to ensure that all relevant 1419 * process leaders can be traversed in closef(). 1420 */ 1421 fdp->fd_holdleaderscount++; 1422 holdleaders = 1; 1423 } 1424 1425 /* 1426 * we now hold the fp reference that used to be owned by the descriptor 1427 * array. 1428 */ 1429 spin_unlock(&fdp->fd_spin); 1430 if (SLIST_FIRST(&fp->f_klist)) 1431 knote_fdclose(fp, fdp, fd); 1432 error = closef(fp, p); 1433 if (holdleaders) { 1434 spin_lock(&fdp->fd_spin); 1435 fdp->fd_holdleaderscount--; 1436 if (fdp->fd_holdleaderscount == 0 && 1437 fdp->fd_holdleaderswakeup != 0) { 1438 fdp->fd_holdleaderswakeup = 0; 1439 spin_unlock(&fdp->fd_spin); 1440 wakeup(&fdp->fd_holdleaderscount); 1441 } else { 1442 spin_unlock(&fdp->fd_spin); 1443 } 1444 } 1445 return (error); 1446 } 1447 1448 /* 1449 * shutdown_args(int fd, int how) 1450 */ 1451 int 1452 kern_shutdown(int fd, int how) 1453 { 1454 struct thread *td = curthread; 1455 struct file *fp; 1456 int error; 1457 1458 if ((fp = holdfp(td, fd, -1)) == NULL) 1459 return (EBADF); 1460 error = fo_shutdown(fp, how); 1461 fdrop(fp); 1462 1463 return (error); 1464 } 1465 1466 /* 1467 * MPALMOSTSAFE 1468 */ 1469 int 1470 sys_shutdown(struct sysmsg *sysmsg, const struct shutdown_args *uap) 1471 { 1472 int error; 1473 1474 error = kern_shutdown(uap->s, uap->how); 1475 1476 return (error); 1477 } 1478 1479 /* 1480 * fstat() helper 1481 */ 1482 int 1483 kern_fstat(int fd, struct stat *ub) 1484 { 1485 struct thread *td = curthread; 1486 struct file *fp; 1487 int error; 1488 1489 if ((fp = holdfp(td, fd, -1)) == NULL) 1490 return (EBADF); 1491 error = fo_stat(fp, ub, td->td_ucred); 1492 fdrop(fp); 1493 1494 return (error); 1495 } 1496 1497 /* 1498 * Return status information about a file descriptor. 1499 */ 1500 int 1501 sys_fstat(struct sysmsg *sysmsg, const struct fstat_args *uap) 1502 { 1503 struct stat st; 1504 int error; 1505 1506 error = kern_fstat(uap->fd, &st); 1507 1508 if (error == 0) 1509 error = copyout(&st, uap->sb, sizeof(st)); 1510 return (error); 1511 } 1512 1513 /* 1514 * Return pathconf information about a file descriptor. 1515 * 1516 * MPALMOSTSAFE 1517 */ 1518 int 1519 sys_fpathconf(struct sysmsg *sysmsg, const struct fpathconf_args *uap) 1520 { 1521 struct thread *td = curthread; 1522 struct file *fp; 1523 struct vnode *vp; 1524 int error = 0; 1525 1526 if ((fp = holdfp(td, uap->fd, -1)) == NULL) 1527 return (EBADF); 1528 1529 switch (fp->f_type) { 1530 case DTYPE_PIPE: 1531 case DTYPE_SOCKET: 1532 if (uap->name != _PC_PIPE_BUF) { 1533 error = EINVAL; 1534 } else { 1535 sysmsg->sysmsg_result = PIPE_BUF; 1536 error = 0; 1537 } 1538 break; 1539 case DTYPE_FIFO: 1540 case DTYPE_VNODE: 1541 vp = (struct vnode *)fp->f_data; 1542 error = VOP_PATHCONF(vp, uap->name, &sysmsg->sysmsg_reg); 1543 break; 1544 default: 1545 error = EOPNOTSUPP; 1546 break; 1547 } 1548 fdrop(fp); 1549 return(error); 1550 } 1551 1552 /* 1553 * Grow the file table so it can hold through descriptor (want). 1554 * 1555 * The fdp's spinlock must be held exclusively on entry and may be held 1556 * exclusively on return. The spinlock may be cycled by the routine. 1557 */ 1558 static void 1559 fdgrow_locked(struct filedesc *fdp, int want) 1560 { 1561 struct fdnode *newfiles; 1562 struct fdnode *oldfiles; 1563 int nf, extra; 1564 1565 nf = fdp->fd_nfiles; 1566 do { 1567 /* nf has to be of the form 2^n - 1 */ 1568 nf = 2 * nf + 1; 1569 } while (nf <= want); 1570 1571 spin_unlock(&fdp->fd_spin); 1572 newfiles = kmalloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK); 1573 spin_lock(&fdp->fd_spin); 1574 1575 /* 1576 * We could have raced another extend while we were not holding 1577 * the spinlock. 1578 */ 1579 if (fdp->fd_nfiles >= nf) { 1580 spin_unlock(&fdp->fd_spin); 1581 kfree(newfiles, M_FILEDESC); 1582 spin_lock(&fdp->fd_spin); 1583 return; 1584 } 1585 /* 1586 * Copy the existing ofile and ofileflags arrays 1587 * and zero the new portion of each array. 1588 */ 1589 extra = nf - fdp->fd_nfiles; 1590 bcopy(fdp->fd_files, newfiles, fdp->fd_nfiles * sizeof(struct fdnode)); 1591 bzero(&newfiles[fdp->fd_nfiles], extra * sizeof(struct fdnode)); 1592 1593 oldfiles = fdp->fd_files; 1594 fdp->fd_files = newfiles; 1595 fdp->fd_nfiles = nf; 1596 1597 if (oldfiles != fdp->fd_builtin_files) { 1598 spin_unlock(&fdp->fd_spin); 1599 kfree(oldfiles, M_FILEDESC); 1600 spin_lock(&fdp->fd_spin); 1601 } 1602 } 1603 1604 /* 1605 * Number of nodes in right subtree, including the root. 1606 */ 1607 static __inline int 1608 right_subtree_size(int n) 1609 { 1610 return (n ^ (n | (n + 1))); 1611 } 1612 1613 /* 1614 * Bigger ancestor. 1615 */ 1616 static __inline int 1617 right_ancestor(int n) 1618 { 1619 return (n | (n + 1)); 1620 } 1621 1622 /* 1623 * Smaller ancestor. 1624 */ 1625 static __inline int 1626 left_ancestor(int n) 1627 { 1628 return ((n & (n + 1)) - 1); 1629 } 1630 1631 /* 1632 * Traverse the in-place binary tree buttom-up adjusting the allocation 1633 * count so scans can determine where free descriptors are located. 1634 * 1635 * caller must be holding an exclusive spinlock on fdp 1636 */ 1637 static 1638 void 1639 fdreserve_locked(struct filedesc *fdp, int fd, int incr) 1640 { 1641 while (fd >= 0) { 1642 fdp->fd_files[fd].allocated += incr; 1643 KKASSERT(fdp->fd_files[fd].allocated >= 0); 1644 fd = left_ancestor(fd); 1645 } 1646 } 1647 1648 /* 1649 * Reserve a file descriptor for the process. If no error occurs, the 1650 * caller MUST at some point call fsetfd() or assign a file pointer 1651 * or dispose of the reservation. 1652 */ 1653 static 1654 int 1655 fdalloc_locked(struct proc *p, struct filedesc *fdp, int want, int *result) 1656 { 1657 struct plimit *limit = readplimits(p); 1658 struct uidinfo *uip; 1659 int fd, rsize, rsum, node, lim; 1660 1661 /* 1662 * Check dtable size limit 1663 */ 1664 *result = -1; /* avoid gcc warnings */ 1665 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 1666 lim = INT_MAX; 1667 else 1668 lim = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 1669 1670 if (lim > maxfilesperproc) 1671 lim = maxfilesperproc; 1672 if (lim < minfilesperproc) 1673 lim = minfilesperproc; 1674 if (want >= lim) 1675 return (EINVAL); 1676 1677 /* 1678 * Check that the user has not run out of descriptors (non-root only). 1679 * As a safety measure the dtable is allowed to have at least 1680 * minfilesperproc open fds regardless of the maxfilesperuser limit. 1681 * 1682 * This isn't as loose a spec as ui_posixlocks, so we use atomic 1683 * ops to force synchronize and recheck if we would otherwise 1684 * error. 1685 */ 1686 if (p->p_ucred->cr_uid && fdp->fd_nfiles >= minfilesperproc) { 1687 uip = p->p_ucred->cr_uidinfo; 1688 if (uip->ui_openfiles > maxfilesperuser) { 1689 int n; 1690 int count; 1691 1692 count = 0; 1693 for (n = 0; n < ncpus; ++n) { 1694 count += atomic_swap_int( 1695 &uip->ui_pcpu[n].pu_openfiles, 0); 1696 } 1697 atomic_add_int(&uip->ui_openfiles, count); 1698 if (uip->ui_openfiles > maxfilesperuser) { 1699 krateprintf(&krate_uidinfo, 1700 "Warning: user %d pid %d (%s) " 1701 "ran out of file descriptors " 1702 "(%d/%d)\n", 1703 p->p_ucred->cr_uid, (int)p->p_pid, 1704 p->p_comm, 1705 uip->ui_openfiles, maxfilesperuser); 1706 return(ENFILE); 1707 } 1708 } 1709 } 1710 1711 /* 1712 * Grow the dtable if necessary 1713 */ 1714 if (want >= fdp->fd_nfiles) 1715 fdgrow_locked(fdp, want); 1716 1717 /* 1718 * Search for a free descriptor starting at the higher 1719 * of want or fd_freefile. If that fails, consider 1720 * expanding the ofile array. 1721 * 1722 * NOTE! the 'allocated' field is a cumulative recursive allocation 1723 * count. If we happen to see a value of 0 then we can shortcut 1724 * our search. Otherwise we run through through the tree going 1725 * down branches we know have free descriptor(s) until we hit a 1726 * leaf node. The leaf node will be free but will not necessarily 1727 * have an allocated field of 0. 1728 */ 1729 retry: 1730 /* move up the tree looking for a subtree with a free node */ 1731 for (fd = max(want, fdp->fd_freefile); fd < min(fdp->fd_nfiles, lim); 1732 fd = right_ancestor(fd)) { 1733 if (fdp->fd_files[fd].allocated == 0) 1734 goto found; 1735 1736 rsize = right_subtree_size(fd); 1737 if (fdp->fd_files[fd].allocated == rsize) 1738 continue; /* right subtree full */ 1739 1740 /* 1741 * Free fd is in the right subtree of the tree rooted at fd. 1742 * Call that subtree R. Look for the smallest (leftmost) 1743 * subtree of R with an unallocated fd: continue moving 1744 * down the left branch until encountering a full left 1745 * subtree, then move to the right. 1746 */ 1747 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) { 1748 node = fd + rsize; 1749 rsum += fdp->fd_files[node].allocated; 1750 if (fdp->fd_files[fd].allocated == rsum + rsize) { 1751 fd = node; /* move to the right */ 1752 if (fdp->fd_files[node].allocated == 0) 1753 goto found; 1754 rsum = 0; 1755 } 1756 } 1757 goto found; 1758 } 1759 1760 /* 1761 * No space in current array. Expand? 1762 */ 1763 if (fdp->fd_nfiles >= lim) { 1764 return (EMFILE); 1765 } 1766 fdgrow_locked(fdp, want); 1767 goto retry; 1768 1769 found: 1770 KKASSERT(fd < fdp->fd_nfiles); 1771 if (fd > fdp->fd_lastfile) 1772 fdp->fd_lastfile = fd; 1773 if (want <= fdp->fd_freefile) 1774 fdp->fd_freefile = fd; 1775 *result = fd; 1776 KKASSERT(fdp->fd_files[fd].fp == NULL); 1777 KKASSERT(fdp->fd_files[fd].reserved == 0); 1778 fdp->fd_files[fd].fileflags = 0; 1779 fdp->fd_files[fd].reserved = 1; 1780 fdreserve_locked(fdp, fd, 1); 1781 1782 return (0); 1783 } 1784 1785 int 1786 fdalloc(struct proc *p, int want, int *result) 1787 { 1788 struct filedesc *fdp = p->p_fd; 1789 int error; 1790 1791 spin_lock(&fdp->fd_spin); 1792 error = fdalloc_locked(p, fdp, want, result); 1793 spin_unlock(&fdp->fd_spin); 1794 1795 return error; 1796 } 1797 1798 /* 1799 * Check to see whether n user file descriptors 1800 * are available to the process p. 1801 */ 1802 int 1803 fdavail(struct proc *p, int n) 1804 { 1805 struct plimit *limit = readplimits(p); 1806 struct filedesc *fdp = p->p_fd; 1807 struct fdnode *fdnode; 1808 int i, lim, last; 1809 1810 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 1811 lim = INT_MAX; 1812 else 1813 lim = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 1814 1815 if (lim > maxfilesperproc) 1816 lim = maxfilesperproc; 1817 if (lim < minfilesperproc) 1818 lim = minfilesperproc; 1819 1820 spin_lock(&fdp->fd_spin); 1821 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) { 1822 spin_unlock(&fdp->fd_spin); 1823 return (1); 1824 } 1825 last = min(fdp->fd_nfiles, lim); 1826 fdnode = &fdp->fd_files[fdp->fd_freefile]; 1827 for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) { 1828 if (fdnode->fp == NULL && --n <= 0) { 1829 spin_unlock(&fdp->fd_spin); 1830 return (1); 1831 } 1832 } 1833 spin_unlock(&fdp->fd_spin); 1834 return (0); 1835 } 1836 1837 /* 1838 * Revoke open descriptors referencing (f_data, f_type) 1839 * 1840 * Any revoke executed within a prison is only able to 1841 * revoke descriptors for processes within that prison. 1842 * 1843 * Returns 0 on success or an error code. 1844 */ 1845 struct fdrevoke_info { 1846 void *data; 1847 short type; 1848 short unused; 1849 int found; 1850 struct ucred *cred; 1851 struct file *nfp; 1852 }; 1853 1854 static int fdrevoke_check_callback(struct file *fp, void *vinfo); 1855 static int fdrevoke_proc_callback(struct proc *p, void *vinfo); 1856 1857 int 1858 fdrevoke(void *f_data, short f_type, struct ucred *cred) 1859 { 1860 struct fdrevoke_info info; 1861 int error; 1862 1863 bzero(&info, sizeof(info)); 1864 info.data = f_data; 1865 info.type = f_type; 1866 info.cred = cred; 1867 error = falloc(NULL, &info.nfp, NULL); 1868 if (error) 1869 return (error); 1870 1871 /* 1872 * Scan the file pointer table once. dups do not dup file pointers, 1873 * only descriptors, so there is no leak. Set FREVOKED on the fps 1874 * being revoked. 1875 * 1876 * Any fps sent over unix-domain sockets will be revoked by the 1877 * socket code checking for FREVOKED when the fps are externialized. 1878 * revoke_token is used to make sure that fps marked FREVOKED and 1879 * externalized will be picked up by the following allproc_scan(). 1880 */ 1881 lwkt_gettoken(&revoke_token); 1882 allfiles_scan_exclusive(fdrevoke_check_callback, &info); 1883 lwkt_reltoken(&revoke_token); 1884 1885 /* 1886 * If any fps were marked track down the related descriptors 1887 * and close them. Any dup()s at this point will notice 1888 * the FREVOKED already set in the fp and do the right thing. 1889 */ 1890 if (info.found) 1891 allproc_scan(fdrevoke_proc_callback, &info, 0); 1892 fdrop(info.nfp); 1893 return(0); 1894 } 1895 1896 /* 1897 * Locate matching file pointers directly. 1898 * 1899 * WARNING: allfiles_scan_exclusive() holds a spinlock through these calls! 1900 */ 1901 static int 1902 fdrevoke_check_callback(struct file *fp, void *vinfo) 1903 { 1904 struct fdrevoke_info *info = vinfo; 1905 1906 /* 1907 * File pointers already flagged for revokation are skipped. 1908 */ 1909 if (fp->f_flag & FREVOKED) 1910 return(0); 1911 1912 /* 1913 * If revoking from a prison file pointers created outside of 1914 * that prison, or file pointers without creds, cannot be revoked. 1915 */ 1916 if (info->cred->cr_prison && 1917 (fp->f_cred == NULL || 1918 info->cred->cr_prison != fp->f_cred->cr_prison)) { 1919 return(0); 1920 } 1921 1922 /* 1923 * If the file pointer matches then mark it for revocation. The 1924 * flag is currently only used by unp_revoke_gc(). 1925 * 1926 * info->found is a heuristic and can race in a SMP environment. 1927 */ 1928 if (info->data == fp->f_data && info->type == fp->f_type) { 1929 atomic_set_int(&fp->f_flag, FREVOKED); 1930 info->found = 1; 1931 } 1932 return(0); 1933 } 1934 1935 /* 1936 * Locate matching file pointers via process descriptor tables. 1937 */ 1938 static int 1939 fdrevoke_proc_callback(struct proc *p, void *vinfo) 1940 { 1941 struct fdrevoke_info *info = vinfo; 1942 struct filedesc *fdp; 1943 struct file *fp; 1944 int n; 1945 1946 if (p->p_stat == SIDL || p->p_stat == SZOMB) 1947 return(0); 1948 if (info->cred->cr_prison && 1949 info->cred->cr_prison != p->p_ucred->cr_prison) { 1950 return(0); 1951 } 1952 1953 /* 1954 * If the controlling terminal of the process matches the 1955 * vnode being revoked we clear the controlling terminal. 1956 * 1957 * The normal spec_close() may not catch this because it 1958 * uses curproc instead of p. 1959 */ 1960 if (p->p_session && info->type == DTYPE_VNODE && 1961 info->data == p->p_session->s_ttyvp) { 1962 p->p_session->s_ttyvp = NULL; 1963 vrele(info->data); 1964 } 1965 1966 /* 1967 * Softref the fdp to prevent it from being destroyed 1968 */ 1969 spin_lock(&p->p_spin); 1970 if ((fdp = p->p_fd) == NULL) { 1971 spin_unlock(&p->p_spin); 1972 return(0); 1973 } 1974 atomic_add_int(&fdp->fd_softrefs, 1); 1975 spin_unlock(&p->p_spin); 1976 1977 /* 1978 * Locate and close any matching file descriptors, replacing 1979 * them with info->nfp. 1980 */ 1981 spin_lock(&fdp->fd_spin); 1982 for (n = 0; n < fdp->fd_nfiles; ++n) { 1983 if ((fp = fdp->fd_files[n].fp) == NULL) 1984 continue; 1985 if (fp->f_flag & FREVOKED) { 1986 ++fdp->fd_closedcounter; 1987 fclearcache(&fdp->fd_files[n], NULL, 0); 1988 ++fdp->fd_closedcounter; 1989 fhold(info->nfp); 1990 fdp->fd_files[n].fp = info->nfp; 1991 spin_unlock(&fdp->fd_spin); 1992 knote_fdclose(fp, fdp, n); /* XXX */ 1993 closef(fp, p); 1994 spin_lock(&fdp->fd_spin); 1995 } 1996 } 1997 spin_unlock(&fdp->fd_spin); 1998 atomic_subtract_int(&fdp->fd_softrefs, 1); 1999 return(0); 2000 } 2001 2002 /* 2003 * falloc: 2004 * Create a new open file structure and reserve a file decriptor 2005 * for the process that refers to it. 2006 * 2007 * Root creds are checked using lp, or assumed if lp is NULL. If 2008 * resultfd is non-NULL then lp must also be non-NULL. No file 2009 * descriptor is reserved (and no process context is needed) if 2010 * resultfd is NULL. 2011 * 2012 * A file pointer with a refcount of 1 is returned. Note that the 2013 * file pointer is NOT associated with the descriptor. If falloc 2014 * returns success, fsetfd() MUST be called to either associate the 2015 * file pointer or clear the reservation. 2016 */ 2017 int 2018 falloc(struct lwp *lp, struct file **resultfp, int *resultfd) 2019 { 2020 static struct timeval lastfail; 2021 static int curfail; 2022 struct filelist_head *head; 2023 struct file *fp; 2024 struct ucred *cred = lp ? lp->lwp_thread->td_ucred : proc0.p_ucred; 2025 int error; 2026 2027 fp = NULL; 2028 2029 /* 2030 * Handle filetable full issues and root overfill. 2031 */ 2032 if (nfiles >= maxfiles - maxfilesrootres && 2033 (cred->cr_ruid != 0 || nfiles >= maxfiles)) { 2034 if (ppsratecheck(&lastfail, &curfail, 1)) { 2035 kprintf("kern.maxfiles limit exceeded by uid %d, " 2036 "please see tuning(7).\n", 2037 cred->cr_ruid); 2038 } 2039 error = ENFILE; 2040 goto done; 2041 } 2042 2043 /* 2044 * Allocate a new file descriptor. 2045 */ 2046 fp = kmalloc_obj(sizeof(*fp), M_FILE, M_WAITOK|M_ZERO); 2047 spin_init(&fp->f_spin, "falloc"); 2048 SLIST_INIT(&fp->f_klist); 2049 fp->f_count = 1; 2050 fp->f_ops = &badfileops; 2051 fp->f_seqcount = 1; 2052 fsetcred(fp, cred); 2053 atomic_add_int(&nfiles, 1); 2054 2055 head = fp2filelist(fp); 2056 spin_lock(&head->spin); 2057 LIST_INSERT_HEAD(&head->list, fp, f_list); 2058 spin_unlock(&head->spin); 2059 2060 if (resultfd) { 2061 if ((error = fdalloc(lp->lwp_proc, 0, resultfd)) != 0) { 2062 fdrop(fp); 2063 fp = NULL; 2064 } 2065 } else { 2066 error = 0; 2067 } 2068 done: 2069 *resultfp = fp; 2070 return (error); 2071 } 2072 2073 /* 2074 * Check for races against a file descriptor by determining that the 2075 * file pointer is still associated with the specified file descriptor, 2076 * and a close is not currently in progress. 2077 */ 2078 int 2079 checkfdclosed(thread_t td, struct filedesc *fdp, int fd, struct file *fp, 2080 int closedcounter) 2081 { 2082 struct fdcache *fdc; 2083 int error; 2084 2085 cpu_lfence(); 2086 if (fdp->fd_closedcounter == closedcounter) 2087 return 0; 2088 2089 if (td->td_proc && td->td_proc->p_fd == fdp) { 2090 for (fdc = &td->td_fdcache[0]; 2091 fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 2092 if (fdc->fd == fd && fdc->fp == fp) 2093 return 0; 2094 } 2095 } 2096 2097 spin_lock_shared(&fdp->fd_spin); 2098 if ((unsigned)fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp) 2099 error = EBADF; 2100 else 2101 error = 0; 2102 spin_unlock_shared(&fdp->fd_spin); 2103 return (error); 2104 } 2105 2106 /* 2107 * Associate a file pointer with a previously reserved file descriptor. 2108 * This function always succeeds. 2109 * 2110 * If fp is NULL, the file descriptor is returned to the pool. 2111 * 2112 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2113 */ 2114 static void 2115 fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd) 2116 { 2117 KKASSERT((unsigned)fd < fdp->fd_nfiles); 2118 KKASSERT(fdp->fd_files[fd].reserved != 0); 2119 if (fp) { 2120 fhold(fp); 2121 /* fclearcache(&fdp->fd_files[fd], NULL, 0); */ 2122 fdp->fd_files[fd].fp = fp; 2123 fdp->fd_files[fd].reserved = 0; 2124 } else { 2125 fdp->fd_files[fd].reserved = 0; 2126 fdreserve_locked(fdp, fd, -1); 2127 fdfixup_locked(fdp, fd); 2128 } 2129 } 2130 2131 /* 2132 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2133 */ 2134 void 2135 fsetfd(struct filedesc *fdp, struct file *fp, int fd) 2136 { 2137 spin_lock(&fdp->fd_spin); 2138 fsetfd_locked(fdp, fp, fd); 2139 spin_unlock(&fdp->fd_spin); 2140 } 2141 2142 /* 2143 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2144 */ 2145 static 2146 struct file * 2147 funsetfd_locked(struct filedesc *fdp, int fd) 2148 { 2149 struct file *fp; 2150 2151 if ((unsigned)fd >= fdp->fd_nfiles) 2152 return (NULL); 2153 if ((fp = fdp->fd_files[fd].fp) == NULL) 2154 return (NULL); 2155 ++fdp->fd_closedcounter; 2156 fclearcache(&fdp->fd_files[fd], NULL, 0); 2157 fdp->fd_files[fd].fp = NULL; 2158 fdp->fd_files[fd].fileflags = 0; 2159 ++fdp->fd_closedcounter; 2160 2161 fdreserve_locked(fdp, fd, -1); 2162 fdfixup_locked(fdp, fd); 2163 2164 return(fp); 2165 } 2166 2167 /* 2168 * WARNING: May not be called before initial fsetfd(). 2169 */ 2170 int 2171 fgetfdflags(struct filedesc *fdp, int fd, int *flagsp) 2172 { 2173 int error; 2174 2175 spin_lock_shared(&fdp->fd_spin); 2176 if (((u_int)fd) >= fdp->fd_nfiles) { 2177 error = EBADF; 2178 } else if (fdp->fd_files[fd].fp == NULL) { 2179 error = EBADF; 2180 } else { 2181 *flagsp = fdp->fd_files[fd].fileflags; 2182 error = 0; 2183 } 2184 spin_unlock_shared(&fdp->fd_spin); 2185 2186 return (error); 2187 } 2188 2189 /* 2190 * WARNING: May not be called before initial fsetfd(). 2191 */ 2192 int 2193 fsetfdflags(struct filedesc *fdp, int fd, int add_flags) 2194 { 2195 int error; 2196 2197 spin_lock(&fdp->fd_spin); 2198 if (((u_int)fd) >= fdp->fd_nfiles) { 2199 error = EBADF; 2200 } else if (fdp->fd_files[fd].fp == NULL) { 2201 error = EBADF; 2202 } else { 2203 fdp->fd_files[fd].fileflags |= add_flags; 2204 error = 0; 2205 } 2206 spin_unlock(&fdp->fd_spin); 2207 2208 return (error); 2209 } 2210 2211 /* 2212 * WARNING: May not be called before initial fsetfd(). 2213 */ 2214 int 2215 fclrfdflags(struct filedesc *fdp, int fd, int rem_flags) 2216 { 2217 int error; 2218 2219 spin_lock(&fdp->fd_spin); 2220 if (((u_int)fd) >= fdp->fd_nfiles) { 2221 error = EBADF; 2222 } else if (fdp->fd_files[fd].fp == NULL) { 2223 error = EBADF; 2224 } else { 2225 fdp->fd_files[fd].fileflags &= ~rem_flags; 2226 error = 0; 2227 } 2228 spin_unlock(&fdp->fd_spin); 2229 2230 return (error); 2231 } 2232 2233 /* 2234 * Set/Change/Clear the creds for a fp and synchronize the uidinfo. 2235 */ 2236 void 2237 fsetcred(struct file *fp, struct ucred *ncr) 2238 { 2239 struct ucred *ocr; 2240 struct uidinfo *uip; 2241 struct uidcount *pup; 2242 int cpu = mycpuid; 2243 int count; 2244 2245 ocr = fp->f_cred; 2246 if (ocr == NULL || ncr == NULL || ocr->cr_uidinfo != ncr->cr_uidinfo) { 2247 if (ocr) { 2248 uip = ocr->cr_uidinfo; 2249 pup = &uip->ui_pcpu[cpu]; 2250 atomic_add_int(&pup->pu_openfiles, -1); 2251 if (pup->pu_openfiles < -PUP_LIMIT || 2252 pup->pu_openfiles > PUP_LIMIT) { 2253 count = atomic_swap_int(&pup->pu_openfiles, 0); 2254 atomic_add_int(&uip->ui_openfiles, count); 2255 } 2256 } 2257 if (ncr) { 2258 uip = ncr->cr_uidinfo; 2259 pup = &uip->ui_pcpu[cpu]; 2260 atomic_add_int(&pup->pu_openfiles, 1); 2261 if (pup->pu_openfiles < -PUP_LIMIT || 2262 pup->pu_openfiles > PUP_LIMIT) { 2263 count = atomic_swap_int(&pup->pu_openfiles, 0); 2264 atomic_add_int(&uip->ui_openfiles, count); 2265 } 2266 } 2267 } 2268 if (ncr) 2269 crhold(ncr); 2270 fp->f_cred = ncr; 2271 if (ocr) 2272 crfree(ocr); 2273 } 2274 2275 /* 2276 * Free a file descriptor. 2277 */ 2278 static 2279 void 2280 ffree(struct file *fp) 2281 { 2282 KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!")); 2283 fsetcred(fp, NULL); 2284 if (fp->f_nchandle.ncp) 2285 cache_drop(&fp->f_nchandle); 2286 kfree_obj(fp, M_FILE); 2287 } 2288 2289 /* 2290 * called from init_main, initialize filedesc0 for proc0. 2291 */ 2292 void 2293 fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask) 2294 { 2295 p0->p_fd = fdp0; 2296 p0->p_fdtol = NULL; 2297 fdp0->fd_refcnt = 1; 2298 fdp0->fd_cmask = cmask; 2299 fdp0->fd_files = fdp0->fd_builtin_files; 2300 fdp0->fd_nfiles = NDFILE; 2301 fdp0->fd_lastfile = -1; 2302 spin_init(&fdp0->fd_spin, "fdinitbootstrap"); 2303 } 2304 2305 /* 2306 * Build a new filedesc structure. 2307 */ 2308 struct filedesc * 2309 fdinit(struct proc *p) 2310 { 2311 struct filedesc *newfdp; 2312 struct filedesc *fdp = p->p_fd; 2313 2314 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO); 2315 spin_lock(&fdp->fd_spin); 2316 if (fdp->fd_cdir) { 2317 newfdp->fd_cdir = fdp->fd_cdir; 2318 vref(newfdp->fd_cdir); 2319 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir); 2320 } 2321 2322 /* 2323 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of 2324 * proc0, but should unconditionally exist in other processes. 2325 */ 2326 if (fdp->fd_rdir) { 2327 newfdp->fd_rdir = fdp->fd_rdir; 2328 vref(newfdp->fd_rdir); 2329 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir); 2330 } 2331 if (fdp->fd_jdir) { 2332 newfdp->fd_jdir = fdp->fd_jdir; 2333 vref(newfdp->fd_jdir); 2334 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir); 2335 } 2336 spin_unlock(&fdp->fd_spin); 2337 2338 /* Create the file descriptor table. */ 2339 newfdp->fd_refcnt = 1; 2340 newfdp->fd_cmask = cmask; 2341 newfdp->fd_files = newfdp->fd_builtin_files; 2342 newfdp->fd_nfiles = NDFILE; 2343 newfdp->fd_lastfile = -1; 2344 spin_init(&newfdp->fd_spin, "fdinit"); 2345 2346 return (newfdp); 2347 } 2348 2349 /* 2350 * Share a filedesc structure. 2351 */ 2352 struct filedesc * 2353 fdshare(struct proc *p) 2354 { 2355 struct filedesc *fdp; 2356 2357 fdp = p->p_fd; 2358 spin_lock(&fdp->fd_spin); 2359 fdp->fd_refcnt++; 2360 spin_unlock(&fdp->fd_spin); 2361 return (fdp); 2362 } 2363 2364 /* 2365 * Copy a filedesc structure. 2366 */ 2367 int 2368 fdcopy(struct proc *p, struct filedesc **fpp) 2369 { 2370 struct filedesc *fdp = p->p_fd; 2371 struct filedesc *newfdp; 2372 struct fdnode *fdnode; 2373 int i; 2374 int ni; 2375 2376 /* 2377 * Certain daemons might not have file descriptors. 2378 */ 2379 if (fdp == NULL) 2380 return (0); 2381 2382 /* 2383 * Allocate the new filedesc and fd_files[] array. This can race 2384 * with operations by other threads on the fdp so we have to be 2385 * careful. 2386 */ 2387 newfdp = kmalloc(sizeof(struct filedesc), 2388 M_FILEDESC, M_WAITOK | M_ZERO | M_NULLOK); 2389 if (newfdp == NULL) { 2390 *fpp = NULL; 2391 return (-1); 2392 } 2393 again: 2394 spin_lock(&fdp->fd_spin); 2395 if (fdp->fd_lastfile < NDFILE) { 2396 newfdp->fd_files = newfdp->fd_builtin_files; 2397 i = NDFILE; 2398 } else { 2399 /* 2400 * We have to allocate (N^2-1) entries for our in-place 2401 * binary tree. Allow the table to shrink. 2402 */ 2403 i = fdp->fd_nfiles; 2404 ni = (i - 1) / 2; 2405 while (ni > fdp->fd_lastfile && ni > NDFILE) { 2406 i = ni; 2407 ni = (i - 1) / 2; 2408 } 2409 spin_unlock(&fdp->fd_spin); 2410 newfdp->fd_files = kmalloc(i * sizeof(struct fdnode), 2411 M_FILEDESC, M_WAITOK | M_ZERO); 2412 2413 /* 2414 * Check for race, retry 2415 */ 2416 spin_lock(&fdp->fd_spin); 2417 if (i <= fdp->fd_lastfile) { 2418 spin_unlock(&fdp->fd_spin); 2419 kfree(newfdp->fd_files, M_FILEDESC); 2420 goto again; 2421 } 2422 } 2423 2424 /* 2425 * Dup the remaining fields. vref() and cache_hold() can be 2426 * safely called while holding the read spinlock on fdp. 2427 * 2428 * The read spinlock on fdp is still being held. 2429 * 2430 * NOTE: vref and cache_hold calls for the case where the vnode 2431 * or cache entry already has at least one ref may be called 2432 * while holding spin locks. 2433 */ 2434 if ((newfdp->fd_cdir = fdp->fd_cdir) != NULL) { 2435 vref(newfdp->fd_cdir); 2436 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir); 2437 } 2438 /* 2439 * We must check for fd_rdir here, at least for now because 2440 * the init process is created before we have access to the 2441 * rootvode to take a reference to it. 2442 */ 2443 if ((newfdp->fd_rdir = fdp->fd_rdir) != NULL) { 2444 vref(newfdp->fd_rdir); 2445 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir); 2446 } 2447 if ((newfdp->fd_jdir = fdp->fd_jdir) != NULL) { 2448 vref(newfdp->fd_jdir); 2449 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir); 2450 } 2451 newfdp->fd_refcnt = 1; 2452 newfdp->fd_nfiles = i; 2453 newfdp->fd_lastfile = fdp->fd_lastfile; 2454 newfdp->fd_freefile = fdp->fd_freefile; 2455 newfdp->fd_cmask = fdp->fd_cmask; 2456 spin_init(&newfdp->fd_spin, "fdcopy"); 2457 2458 /* 2459 * Copy the descriptor table through (i). This also copies the 2460 * allocation state. Then go through and ref the file pointers 2461 * and clean up any KQ descriptors. 2462 * 2463 * kq descriptors cannot be copied. Since we haven't ref'd the 2464 * copied files yet we can ignore the return value from funsetfd(). 2465 * 2466 * The read spinlock on fdp is still being held. 2467 * 2468 * Be sure to clean out fdnode->tdcache, otherwise bad things will 2469 * happen. 2470 */ 2471 bcopy(fdp->fd_files, newfdp->fd_files, i * sizeof(struct fdnode)); 2472 for (i = 0 ; i < newfdp->fd_nfiles; ++i) { 2473 fdnode = &newfdp->fd_files[i]; 2474 if (fdnode->reserved) { 2475 fdreserve_locked(newfdp, i, -1); 2476 fdnode->reserved = 0; 2477 fdfixup_locked(newfdp, i); 2478 } else if (fdnode->fp) { 2479 bzero(&fdnode->tdcache, sizeof(fdnode->tdcache)); 2480 if (fdnode->fp->f_type == DTYPE_KQUEUE) { 2481 (void)funsetfd_locked(newfdp, i); 2482 } else { 2483 fhold(fdnode->fp); 2484 } 2485 } 2486 } 2487 spin_unlock(&fdp->fd_spin); 2488 *fpp = newfdp; 2489 return (0); 2490 } 2491 2492 /* 2493 * Release a filedesc structure. 2494 * 2495 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE) 2496 */ 2497 void 2498 fdfree(struct proc *p, struct filedesc *repl) 2499 { 2500 struct filedesc *fdp; 2501 struct fdnode *fdnode; 2502 int i; 2503 struct filedesc_to_leader *fdtol; 2504 struct file *fp; 2505 struct vnode *vp; 2506 struct flock lf; 2507 2508 /* 2509 * Before destroying or replacing p->p_fd we must be sure to 2510 * clean out the cache of the last thread, which should be 2511 * curthread. 2512 */ 2513 fexitcache(curthread); 2514 2515 /* 2516 * Certain daemons might not have file descriptors. 2517 */ 2518 fdp = p->p_fd; 2519 if (fdp == NULL) { 2520 p->p_fd = repl; 2521 return; 2522 } 2523 2524 /* 2525 * Severe messing around to follow. 2526 */ 2527 spin_lock(&fdp->fd_spin); 2528 2529 /* Check for special need to clear POSIX style locks */ 2530 fdtol = p->p_fdtol; 2531 if (fdtol != NULL) { 2532 KASSERT(fdtol->fdl_refcount > 0, 2533 ("filedesc_to_refcount botch: fdl_refcount=%d", 2534 fdtol->fdl_refcount)); 2535 if (fdtol->fdl_refcount == 1 && p->p_leader->p_advlock_flag) { 2536 for (i = 0; i <= fdp->fd_lastfile; ++i) { 2537 fdnode = &fdp->fd_files[i]; 2538 if (fdnode->fp == NULL || 2539 fdnode->fp->f_type != DTYPE_VNODE) { 2540 continue; 2541 } 2542 fp = fdnode->fp; 2543 fhold(fp); 2544 spin_unlock(&fdp->fd_spin); 2545 2546 lf.l_whence = SEEK_SET; 2547 lf.l_start = 0; 2548 lf.l_len = 0; 2549 lf.l_type = F_UNLCK; 2550 vp = (struct vnode *)fp->f_data; 2551 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 2552 F_UNLCK, &lf, F_POSIX); 2553 fdrop(fp); 2554 spin_lock(&fdp->fd_spin); 2555 } 2556 } 2557 retry: 2558 if (fdtol->fdl_refcount == 1) { 2559 if (fdp->fd_holdleaderscount > 0 && 2560 p->p_leader->p_advlock_flag) { 2561 /* 2562 * close() or do_dup() has cleared a reference 2563 * in a shared file descriptor table. 2564 */ 2565 fdp->fd_holdleaderswakeup = 1; 2566 ssleep(&fdp->fd_holdleaderscount, 2567 &fdp->fd_spin, 0, "fdlhold", 0); 2568 goto retry; 2569 } 2570 if (fdtol->fdl_holdcount > 0) { 2571 /* 2572 * Ensure that fdtol->fdl_leader 2573 * remains valid in closef(). 2574 */ 2575 fdtol->fdl_wakeup = 1; 2576 ssleep(fdtol, &fdp->fd_spin, 0, "fdlhold", 0); 2577 goto retry; 2578 } 2579 } 2580 fdtol->fdl_refcount--; 2581 if (fdtol->fdl_refcount == 0 && 2582 fdtol->fdl_holdcount == 0) { 2583 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev; 2584 fdtol->fdl_prev->fdl_next = fdtol->fdl_next; 2585 } else { 2586 fdtol = NULL; 2587 } 2588 p->p_fdtol = NULL; 2589 if (fdtol != NULL) { 2590 spin_unlock(&fdp->fd_spin); 2591 kfree(fdtol, M_FILEDESC_TO_LEADER); 2592 spin_lock(&fdp->fd_spin); 2593 } 2594 } 2595 if (--fdp->fd_refcnt > 0) { 2596 spin_unlock(&fdp->fd_spin); 2597 spin_lock(&p->p_spin); 2598 p->p_fd = repl; 2599 spin_unlock(&p->p_spin); 2600 return; 2601 } 2602 2603 /* 2604 * Even though we are the last reference to the structure allproc 2605 * scans may still reference the structure. Maintain proper 2606 * locks until we can replace p->p_fd. 2607 * 2608 * Also note that kqueue's closef still needs to reference the 2609 * fdp via p->p_fd, so we have to close the descriptors before 2610 * we replace p->p_fd. 2611 */ 2612 for (i = 0; i <= fdp->fd_lastfile; ++i) { 2613 if (fdp->fd_files[i].fp) { 2614 fp = funsetfd_locked(fdp, i); 2615 if (fp) { 2616 spin_unlock(&fdp->fd_spin); 2617 if (SLIST_FIRST(&fp->f_klist)) 2618 knote_fdclose(fp, fdp, i); 2619 closef(fp, p); 2620 spin_lock(&fdp->fd_spin); 2621 } 2622 } 2623 } 2624 spin_unlock(&fdp->fd_spin); 2625 2626 /* 2627 * Interlock against an allproc scan operations (typically frevoke). 2628 */ 2629 spin_lock(&p->p_spin); 2630 p->p_fd = repl; 2631 spin_unlock(&p->p_spin); 2632 2633 /* 2634 * Wait for any softrefs to go away. This race rarely occurs so 2635 * we can use a non-critical-path style poll/sleep loop. The 2636 * race only occurs against allproc scans. 2637 * 2638 * No new softrefs can occur with the fdp disconnected from the 2639 * process. 2640 */ 2641 if (fdp->fd_softrefs) { 2642 kprintf("pid %d: Warning, fdp race avoided\n", p->p_pid); 2643 while (fdp->fd_softrefs) 2644 tsleep(&fdp->fd_softrefs, 0, "fdsoft", 1); 2645 } 2646 2647 if (fdp->fd_files != fdp->fd_builtin_files) 2648 kfree(fdp->fd_files, M_FILEDESC); 2649 if (fdp->fd_cdir) { 2650 cache_drop(&fdp->fd_ncdir); 2651 vrele(fdp->fd_cdir); 2652 } 2653 if (fdp->fd_rdir) { 2654 cache_drop(&fdp->fd_nrdir); 2655 vrele(fdp->fd_rdir); 2656 } 2657 if (fdp->fd_jdir) { 2658 cache_drop(&fdp->fd_njdir); 2659 vrele(fdp->fd_jdir); 2660 } 2661 kfree(fdp, M_FILEDESC); 2662 } 2663 2664 /* 2665 * Retrieve and reference the file pointer associated with a descriptor. 2666 * 2667 * td must be the current thread. 2668 */ 2669 struct file * 2670 holdfp(thread_t td, int fd, int flag) 2671 { 2672 struct file *fp; 2673 2674 fp = _holdfp_cache(td, fd); 2675 if (fp) { 2676 if ((fp->f_flag & flag) == 0 && flag != -1) { 2677 fdrop(fp); 2678 fp = NULL; 2679 } 2680 } 2681 return fp; 2682 } 2683 2684 /* 2685 * holdsock() - load the struct file pointer associated 2686 * with a socket into *fpp. If an error occurs, non-zero 2687 * will be returned and *fpp will be set to NULL. 2688 * 2689 * td must be the current thread. 2690 */ 2691 int 2692 holdsock(thread_t td, int fd, struct file **fpp) 2693 { 2694 struct file *fp; 2695 int error; 2696 2697 /* 2698 * Lockless shortcut 2699 */ 2700 fp = _holdfp_cache(td, fd); 2701 if (fp) { 2702 if (fp->f_type != DTYPE_SOCKET) { 2703 fdrop(fp); 2704 fp = NULL; 2705 error = ENOTSOCK; 2706 } else { 2707 error = 0; 2708 } 2709 } else { 2710 error = EBADF; 2711 } 2712 *fpp = fp; 2713 2714 return (error); 2715 } 2716 2717 /* 2718 * Convert a user file descriptor to a held file pointer. 2719 * 2720 * td must be the current thread. 2721 */ 2722 int 2723 holdvnode(thread_t td, int fd, struct file **fpp) 2724 { 2725 struct file *fp; 2726 int error; 2727 2728 fp = _holdfp_cache(td, fd); 2729 if (fp) { 2730 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) { 2731 fdrop(fp); 2732 fp = NULL; 2733 error = EINVAL; 2734 } else { 2735 error = 0; 2736 } 2737 } else { 2738 error = EBADF; 2739 } 2740 *fpp = fp; 2741 2742 return (error); 2743 } 2744 2745 /* 2746 * Convert a user file descriptor to a held file pointer. 2747 * 2748 * td must be the current thread. 2749 */ 2750 int 2751 holdvnode2(thread_t td, int fd, struct file **fpp, char *fflagsp) 2752 { 2753 struct file *fp; 2754 int error; 2755 2756 fp = _holdfp2(td, fd, fflagsp); 2757 if (fp) { 2758 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) { 2759 fdrop(fp); 2760 fp = NULL; 2761 error = EINVAL; 2762 } else { 2763 error = 0; 2764 } 2765 } else { 2766 error = EBADF; 2767 } 2768 *fpp = fp; 2769 2770 return (error); 2771 } 2772 2773 /* 2774 * For setugid programs, we don't want to people to use that setugidness 2775 * to generate error messages which write to a file which otherwise would 2776 * otherwise be off-limits to the process. 2777 * 2778 * This is a gross hack to plug the hole. A better solution would involve 2779 * a special vop or other form of generalized access control mechanism. We 2780 * go ahead and just reject all procfs file systems accesses as dangerous. 2781 * 2782 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is 2783 * sufficient. We also don't for check setugidness since we know we are. 2784 */ 2785 static int 2786 is_unsafe(struct file *fp) 2787 { 2788 if (fp->f_type == DTYPE_VNODE && 2789 ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS) 2790 return (1); 2791 return (0); 2792 } 2793 2794 /* 2795 * Make this setguid thing safe, if at all possible. 2796 * 2797 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose() 2798 */ 2799 void 2800 setugidsafety(struct proc *p) 2801 { 2802 struct filedesc *fdp = p->p_fd; 2803 int i; 2804 2805 /* Certain daemons might not have file descriptors. */ 2806 if (fdp == NULL) 2807 return; 2808 2809 /* 2810 * note: fdp->fd_files may be reallocated out from under us while 2811 * we are blocked in a close. Be careful! 2812 */ 2813 for (i = 0; i <= fdp->fd_lastfile; i++) { 2814 if (i > 2) 2815 break; 2816 if (fdp->fd_files[i].fp && is_unsafe(fdp->fd_files[i].fp)) { 2817 struct file *fp; 2818 2819 /* 2820 * NULL-out descriptor prior to close to avoid 2821 * a race while close blocks. 2822 */ 2823 if ((fp = funsetfd_locked(fdp, i)) != NULL) { 2824 knote_fdclose(fp, fdp, i); 2825 closef(fp, p); 2826 } 2827 } 2828 } 2829 } 2830 2831 /* 2832 * Close all CLOEXEC files on exec. 2833 * 2834 * Only a single thread remains for the current process. 2835 * 2836 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose() 2837 */ 2838 void 2839 fdcloseexec(struct proc *p) 2840 { 2841 struct filedesc *fdp = p->p_fd; 2842 int i; 2843 2844 /* Certain daemons might not have file descriptors. */ 2845 if (fdp == NULL) 2846 return; 2847 2848 /* 2849 * We cannot cache fd_files since operations may block and rip 2850 * them out from under us. 2851 */ 2852 for (i = 0; i <= fdp->fd_lastfile; i++) { 2853 if (fdp->fd_files[i].fp != NULL && 2854 (fdp->fd_files[i].fileflags & UF_EXCLOSE)) { 2855 struct file *fp; 2856 2857 /* 2858 * NULL-out descriptor prior to close to avoid 2859 * a race while close blocks. 2860 * 2861 * (funsetfd*() also clears the fd cache) 2862 */ 2863 if ((fp = funsetfd_locked(fdp, i)) != NULL) { 2864 knote_fdclose(fp, fdp, i); 2865 closef(fp, p); 2866 } 2867 } 2868 } 2869 } 2870 2871 /* 2872 * It is unsafe for set[ug]id processes to be started with file 2873 * descriptors 0..2 closed, as these descriptors are given implicit 2874 * significance in the Standard C library. fdcheckstd() will create a 2875 * descriptor referencing /dev/null for each of stdin, stdout, and 2876 * stderr that is not already open. 2877 * 2878 * NOT MPSAFE - calls falloc, vn_open, etc 2879 */ 2880 int 2881 fdcheckstd(struct lwp *lp) 2882 { 2883 struct nlookupdata nd; 2884 struct filedesc *fdp; 2885 struct file *fp; 2886 int retval; 2887 int i, error, flags, devnull; 2888 2889 fdp = lp->lwp_proc->p_fd; 2890 if (fdp == NULL) 2891 return (0); 2892 devnull = -1; 2893 error = 0; 2894 for (i = 0; i < 3; i++) { 2895 if (fdp->fd_files[i].fp != NULL) 2896 continue; 2897 if (devnull < 0) { 2898 if ((error = falloc(lp, &fp, &devnull)) != 0) 2899 break; 2900 2901 error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE, 2902 NLC_FOLLOW|NLC_LOCKVP); 2903 flags = FREAD | FWRITE; 2904 if (error == 0) 2905 error = vn_open(&nd, &fp, flags, 0); 2906 if (error == 0) 2907 fsetfd(fdp, fp, devnull); 2908 else 2909 fsetfd(fdp, NULL, devnull); 2910 fdrop(fp); 2911 nlookup_done(&nd); 2912 if (error) 2913 break; 2914 KKASSERT(i == devnull); 2915 } else { 2916 error = kern_dup(DUP_FIXED, devnull, i, &retval); 2917 if (error != 0) 2918 break; 2919 } 2920 } 2921 return (error); 2922 } 2923 2924 /* 2925 * Internal form of close. 2926 * Decrement reference count on file structure. 2927 * Note: td and/or p may be NULL when closing a file 2928 * that was being passed in a message. 2929 * 2930 * MPALMOSTSAFE - acquires mplock for VOP operations 2931 */ 2932 int 2933 closef(struct file *fp, struct proc *p) 2934 { 2935 struct vnode *vp; 2936 struct flock lf; 2937 struct filedesc_to_leader *fdtol; 2938 2939 if (fp == NULL) 2940 return (0); 2941 2942 /* 2943 * POSIX record locking dictates that any close releases ALL 2944 * locks owned by this process. This is handled by setting 2945 * a flag in the unlock to free ONLY locks obeying POSIX 2946 * semantics, and not to free BSD-style file locks. 2947 * If the descriptor was in a message, POSIX-style locks 2948 * aren't passed with the descriptor. 2949 */ 2950 if (p != NULL && fp->f_type == DTYPE_VNODE && 2951 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) 2952 ) { 2953 if (p->p_leader->p_advlock_flag) { 2954 lf.l_whence = SEEK_SET; 2955 lf.l_start = 0; 2956 lf.l_len = 0; 2957 lf.l_type = F_UNLCK; 2958 vp = (struct vnode *)fp->f_data; 2959 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 2960 &lf, F_POSIX); 2961 } 2962 fdtol = p->p_fdtol; 2963 if (fdtol != NULL) { 2964 lwkt_gettoken(&p->p_token); 2965 2966 /* 2967 * Handle special case where file descriptor table 2968 * is shared between multiple process leaders. 2969 */ 2970 for (fdtol = fdtol->fdl_next; 2971 fdtol != p->p_fdtol; 2972 fdtol = fdtol->fdl_next) { 2973 if (fdtol->fdl_leader->p_advlock_flag == 0) 2974 continue; 2975 fdtol->fdl_holdcount++; 2976 lf.l_whence = SEEK_SET; 2977 lf.l_start = 0; 2978 lf.l_len = 0; 2979 lf.l_type = F_UNLCK; 2980 vp = (struct vnode *)fp->f_data; 2981 VOP_ADVLOCK(vp, (caddr_t)fdtol->fdl_leader, 2982 F_UNLCK, &lf, F_POSIX); 2983 fdtol->fdl_holdcount--; 2984 if (fdtol->fdl_holdcount == 0 && 2985 fdtol->fdl_wakeup != 0) { 2986 fdtol->fdl_wakeup = 0; 2987 wakeup(fdtol); 2988 } 2989 } 2990 lwkt_reltoken(&p->p_token); 2991 } 2992 } 2993 return (fdrop(fp)); 2994 } 2995 2996 /* 2997 * fhold() can only be called if f_count is already at least 1 (i.e. the 2998 * caller of fhold() already has a reference to the file pointer in some 2999 * manner or other). 3000 * 3001 * Atomic ops are used for incrementing and decrementing f_count before 3002 * the 1->0 transition. f_count 1->0 transition is special, see the 3003 * comment in fdrop(). 3004 */ 3005 void 3006 fhold(struct file *fp) 3007 { 3008 /* 0->1 transition will never work */ 3009 KASSERT(fp->f_count > 0, ("fhold: invalid f_count %d", fp->f_count)); 3010 atomic_add_int(&fp->f_count, 1); 3011 } 3012 3013 /* 3014 * fdrop() - drop a reference to a descriptor 3015 */ 3016 int 3017 fdrop(struct file *fp) 3018 { 3019 struct flock lf; 3020 struct vnode *vp; 3021 int error, do_free = 0; 3022 3023 /* 3024 * NOTE: 3025 * Simple atomic_fetchadd_int(f_count, -1) here will cause use- 3026 * after-free or double free (due to f_count 0->1 transition), if 3027 * fhold() is called on the fps found through filehead iteration. 3028 */ 3029 for (;;) { 3030 int count = fp->f_count; 3031 3032 cpu_ccfence(); 3033 KASSERT(count > 0, ("fdrop: invalid f_count %d", count)); 3034 if (count == 1) { 3035 struct filelist_head *head = fp2filelist(fp); 3036 3037 /* 3038 * About to drop the last reference, hold the 3039 * filehead spin lock and drop it, so that no 3040 * one could see this fp through filehead anymore, 3041 * let alone fhold() this fp. 3042 */ 3043 spin_lock(&head->spin); 3044 if (atomic_cmpset_int(&fp->f_count, count, 0)) { 3045 LIST_REMOVE(fp, f_list); 3046 spin_unlock(&head->spin); 3047 atomic_subtract_int(&nfiles, 1); 3048 do_free = 1; /* free this fp */ 3049 break; 3050 } 3051 spin_unlock(&head->spin); 3052 /* retry */ 3053 } else if (atomic_cmpset_int(&fp->f_count, count, count - 1)) { 3054 break; 3055 } 3056 /* retry */ 3057 } 3058 if (!do_free) 3059 return (0); 3060 3061 KKASSERT(SLIST_FIRST(&fp->f_klist) == NULL); 3062 3063 /* 3064 * The last reference has gone away, we own the fp structure free 3065 * and clear. 3066 */ 3067 if (fp->f_count < 0) 3068 panic("fdrop: count < 0"); 3069 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE && 3070 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) 3071 ) { 3072 lf.l_whence = SEEK_SET; 3073 lf.l_start = 0; 3074 lf.l_len = 0; 3075 lf.l_type = F_UNLCK; 3076 vp = (struct vnode *)fp->f_data; 3077 VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0); 3078 } 3079 if (fp->f_ops != &badfileops) 3080 error = fo_close(fp); 3081 else 3082 error = 0; 3083 ffree(fp); 3084 return (error); 3085 } 3086 3087 /* 3088 * Apply an advisory lock on a file descriptor. 3089 * 3090 * Just attempt to get a record lock of the requested type on 3091 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0). 3092 * 3093 * MPALMOSTSAFE 3094 */ 3095 int 3096 sys_flock(struct sysmsg *sysmsg, const struct flock_args *uap) 3097 { 3098 thread_t td = curthread; 3099 struct file *fp; 3100 struct vnode *vp; 3101 struct flock lf; 3102 int error; 3103 3104 if ((fp = holdfp(td, uap->fd, -1)) == NULL) 3105 return (EBADF); 3106 if (fp->f_type != DTYPE_VNODE) { 3107 error = EOPNOTSUPP; 3108 goto done; 3109 } 3110 vp = (struct vnode *)fp->f_data; 3111 lf.l_whence = SEEK_SET; 3112 lf.l_start = 0; 3113 lf.l_len = 0; 3114 if (uap->how & LOCK_UN) { 3115 lf.l_type = F_UNLCK; 3116 atomic_clear_int(&fp->f_flag, FHASLOCK); /* race ok */ 3117 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0); 3118 goto done; 3119 } 3120 if (uap->how & LOCK_EX) 3121 lf.l_type = F_WRLCK; 3122 else if (uap->how & LOCK_SH) 3123 lf.l_type = F_RDLCK; 3124 else { 3125 error = EBADF; 3126 goto done; 3127 } 3128 if (uap->how & LOCK_NB) 3129 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 0); 3130 else 3131 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_WAIT); 3132 atomic_set_int(&fp->f_flag, FHASLOCK); /* race ok */ 3133 done: 3134 fdrop(fp); 3135 return (error); 3136 } 3137 3138 /* 3139 * File Descriptor pseudo-device driver ( /dev/fd/N ). 3140 * 3141 * This interface is now a bit more linux-compatible and attempts to not 3142 * share seek positions by not sharing the fp of the descriptor when 3143 * possible. 3144 * 3145 * Probably a good idea anyhow, but now particularly important for 3146 * fexecve() which uses /dev/fd/N. 3147 * 3148 * The original interface effectively dup()d the descriptor. 3149 */ 3150 static int 3151 fdopen(struct dev_open_args *ap) 3152 { 3153 struct file *wfp; 3154 thread_t td; 3155 int error; 3156 int sfd; 3157 3158 td = curthread; 3159 KKASSERT(td->td_lwp != NULL); 3160 3161 /* 3162 * Get the fp for /dev/fd/N 3163 */ 3164 sfd = minor(ap->a_head.a_dev); 3165 if ((wfp = holdfp(td, sfd, -1)) == NULL) 3166 return (EBADF); 3167 3168 /* 3169 * Close a revoke/dup race. Duping a descriptor marked as revoked 3170 * will dup a dummy descriptor instead of the real one. 3171 */ 3172 if (wfp->f_flag & FREVOKED) { 3173 kprintf("Warning: attempt to dup() a revoked descriptor\n"); 3174 fdrop(wfp); 3175 wfp = NULL; 3176 error = falloc(NULL, &wfp, NULL); 3177 if (error) 3178 return (error); 3179 } 3180 3181 /* 3182 * Check that the mode the file is being opened for is a 3183 * subset of the mode of the existing descriptor. 3184 */ 3185 if (ap->a_fpp == NULL) { 3186 fdrop(wfp); 3187 return EINVAL; 3188 } 3189 if (((ap->a_oflags & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) { 3190 fdrop(wfp); 3191 return EACCES; 3192 } 3193 if (wfp->f_type == DTYPE_VNODE && wfp->f_data) { 3194 /* 3195 * If wfp is a vnode create a new fp so things like the 3196 * seek position (etc) are not shared with the original. 3197 * 3198 * Don't try to call VOP_OPEN(). Adjust the open-count 3199 * ourselves. 3200 */ 3201 struct vnode *vp; 3202 struct file *fp; 3203 3204 vp = wfp->f_data; 3205 fp = *ap->a_fpp; 3206 3207 /* 3208 * Yah... this wouldn't be good. 3209 */ 3210 if ((ap->a_oflags & (FWRITE|O_TRUNC)) && vp->v_type == VDIR) { 3211 fdrop(wfp); 3212 return EISDIR; 3213 } 3214 3215 /* 3216 * Setup the new fp and simulate an open(), but for now do 3217 * not actually call VOP_OPEN() though we probably could. 3218 */ 3219 fp->f_type = DTYPE_VNODE; 3220 /* retain flags not to be copied */ 3221 fp->f_flag = (fp->f_flag & ~FMASK) | (ap->a_oflags & FMASK); 3222 fp->f_ops = &vnode_fileops; 3223 fp->f_data = vp; 3224 vref(vp); 3225 3226 if (ap->a_oflags & FWRITE) 3227 atomic_add_int(&vp->v_writecount, 1); 3228 KKASSERT(vp->v_opencount >= 0 && vp->v_opencount != INT_MAX); 3229 atomic_add_int(&vp->v_opencount, 1); 3230 fdrop(wfp); 3231 } else { 3232 /* 3233 * If wfp is not a vnode we have to share it directly. 3234 */ 3235 fdrop(*ap->a_fpp); 3236 *ap->a_fpp = wfp; /* transfer hold count */ 3237 } 3238 return EALREADY; 3239 } 3240 3241 /* 3242 * NOT MPSAFE - I think these refer to a common file descriptor table 3243 * and we need to spinlock that to link fdtol in. 3244 */ 3245 struct filedesc_to_leader * 3246 filedesc_to_leader_alloc(struct filedesc_to_leader *old, 3247 struct proc *leader) 3248 { 3249 struct filedesc_to_leader *fdtol; 3250 3251 fdtol = kmalloc(sizeof(struct filedesc_to_leader), 3252 M_FILEDESC_TO_LEADER, M_WAITOK | M_ZERO); 3253 fdtol->fdl_refcount = 1; 3254 fdtol->fdl_holdcount = 0; 3255 fdtol->fdl_wakeup = 0; 3256 fdtol->fdl_leader = leader; 3257 if (old != NULL) { 3258 fdtol->fdl_next = old->fdl_next; 3259 fdtol->fdl_prev = old; 3260 old->fdl_next = fdtol; 3261 fdtol->fdl_next->fdl_prev = fdtol; 3262 } else { 3263 fdtol->fdl_next = fdtol; 3264 fdtol->fdl_prev = fdtol; 3265 } 3266 return fdtol; 3267 } 3268 3269 /* 3270 * Scan all file pointers in the system. The callback is made with 3271 * the master list spinlock held exclusively. 3272 */ 3273 void 3274 allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data) 3275 { 3276 int i; 3277 3278 for (i = 0; i < NFILELIST_HEADS; ++i) { 3279 struct filelist_head *head = &filelist_heads[i]; 3280 struct file *fp; 3281 3282 spin_lock(&head->spin); 3283 LIST_FOREACH(fp, &head->list, f_list) { 3284 int res; 3285 3286 res = callback(fp, data); 3287 if (res < 0) 3288 break; 3289 } 3290 spin_unlock(&head->spin); 3291 } 3292 } 3293 3294 /* 3295 * Get file structures. 3296 * 3297 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe) 3298 */ 3299 3300 struct sysctl_kern_file_info { 3301 int count; 3302 int error; 3303 struct sysctl_req *req; 3304 }; 3305 3306 static int sysctl_kern_file_callback(struct proc *p, void *data); 3307 3308 static int 3309 sysctl_kern_file(SYSCTL_HANDLER_ARGS) 3310 { 3311 struct sysctl_kern_file_info info; 3312 3313 /* 3314 * Note: because the number of file descriptors is calculated 3315 * in different ways for sizing vs returning the data, 3316 * there is information leakage from the first loop. However, 3317 * it is of a similar order of magnitude to the leakage from 3318 * global system statistics such as kern.openfiles. 3319 * 3320 * When just doing a count, note that we cannot just count 3321 * the elements and add f_count via the filehead list because 3322 * threaded processes share their descriptor table and f_count might 3323 * still be '1' in that case. 3324 * 3325 * Since the SYSCTL op can block, we must hold the process to 3326 * prevent it being ripped out from under us either in the 3327 * file descriptor loop or in the greater LIST_FOREACH. The 3328 * process may be in varying states of disrepair. If the process 3329 * is in SZOMB we may have caught it just as it is being removed 3330 * from the allproc list, we must skip it in that case to maintain 3331 * an unbroken chain through the allproc list. 3332 */ 3333 info.count = 0; 3334 info.error = 0; 3335 info.req = req; 3336 allproc_scan(sysctl_kern_file_callback, &info, 0); 3337 3338 /* 3339 * When just calculating the size, overestimate a bit to try to 3340 * prevent system activity from causing the buffer-fill call 3341 * to fail later on. 3342 */ 3343 if (req->oldptr == NULL) { 3344 info.count = (info.count + 16) + (info.count / 10); 3345 info.error = SYSCTL_OUT(req, NULL, 3346 info.count * sizeof(struct kinfo_file)); 3347 } 3348 return (info.error); 3349 } 3350 3351 static int 3352 sysctl_kern_file_callback(struct proc *p, void *data) 3353 { 3354 struct sysctl_kern_file_info *info = data; 3355 struct kinfo_file kf; 3356 struct filedesc *fdp; 3357 struct file *fp; 3358 uid_t uid; 3359 int n; 3360 3361 if (p->p_stat == SIDL || p->p_stat == SZOMB) 3362 return(0); 3363 if (!(PRISON_CHECK(info->req->td->td_ucred, p->p_ucred) != 0)) 3364 return(0); 3365 3366 /* 3367 * Softref the fdp to prevent it from being destroyed 3368 */ 3369 spin_lock(&p->p_spin); 3370 if ((fdp = p->p_fd) == NULL) { 3371 spin_unlock(&p->p_spin); 3372 return(0); 3373 } 3374 atomic_add_int(&fdp->fd_softrefs, 1); 3375 spin_unlock(&p->p_spin); 3376 3377 /* 3378 * The fdp's own spinlock prevents the contents from being 3379 * modified. 3380 */ 3381 spin_lock_shared(&fdp->fd_spin); 3382 for (n = 0; n < fdp->fd_nfiles; ++n) { 3383 if ((fp = fdp->fd_files[n].fp) == NULL) 3384 continue; 3385 if (info->req->oldptr == NULL) { 3386 ++info->count; 3387 } else { 3388 uid = p->p_ucred ? p->p_ucred->cr_uid : -1; 3389 kcore_make_file(&kf, fp, p->p_pid, uid, n); 3390 spin_unlock_shared(&fdp->fd_spin); 3391 info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf)); 3392 spin_lock_shared(&fdp->fd_spin); 3393 if (info->error) 3394 break; 3395 } 3396 } 3397 spin_unlock_shared(&fdp->fd_spin); 3398 atomic_subtract_int(&fdp->fd_softrefs, 1); 3399 if (info->error) 3400 return(-1); 3401 return(0); 3402 } 3403 3404 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD, 3405 0, 0, sysctl_kern_file, "S,file", "Entire file table"); 3406 3407 SYSCTL_INT(_kern, OID_AUTO, minfilesperproc, CTLFLAG_RW, 3408 &minfilesperproc, 0, "Minimum files allowed open per process"); 3409 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, 3410 &maxfilesperproc, 0, "Maximum files allowed open per process"); 3411 SYSCTL_INT(_kern, OID_AUTO, maxfilesperuser, CTLFLAG_RW, 3412 &maxfilesperuser, 0, "Maximum files allowed open per user"); 3413 3414 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW, 3415 &maxfiles, 0, "Maximum number of files"); 3416 3417 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW, 3418 &maxfilesrootres, 0, "Descriptors reserved for root use"); 3419 3420 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD, 3421 &nfiles, 0, "System-wide number of open files"); 3422 3423 static void 3424 fildesc_drvinit(void *unused) 3425 { 3426 int fd; 3427 3428 for (fd = 0; fd < NUMFDESC; fd++) { 3429 make_dev(&fildesc_ops, fd, 3430 UID_BIN, GID_BIN, 0666, "fd/%d", fd); 3431 } 3432 3433 make_dev(&fildesc_ops, 0, UID_ROOT, GID_WHEEL, 0666, "stdin"); 3434 make_dev(&fildesc_ops, 1, UID_ROOT, GID_WHEEL, 0666, "stdout"); 3435 make_dev(&fildesc_ops, 2, UID_ROOT, GID_WHEEL, 0666, "stderr"); 3436 } 3437 3438 struct fileops badfileops = { 3439 .fo_read = badfo_readwrite, 3440 .fo_write = badfo_readwrite, 3441 .fo_ioctl = badfo_ioctl, 3442 .fo_kqfilter = badfo_kqfilter, 3443 .fo_stat = badfo_stat, 3444 .fo_close = badfo_close, 3445 .fo_shutdown = badfo_shutdown 3446 }; 3447 3448 int 3449 badfo_readwrite( 3450 struct file *fp, 3451 struct uio *uio, 3452 struct ucred *cred, 3453 int flags 3454 ) { 3455 return (EBADF); 3456 } 3457 3458 int 3459 badfo_ioctl(struct file *fp, u_long com, caddr_t data, 3460 struct ucred *cred, struct sysmsg *msgv) 3461 { 3462 return (EBADF); 3463 } 3464 3465 /* 3466 * Must return an error to prevent registration, typically 3467 * due to a revoked descriptor (file_filtops assigned). 3468 */ 3469 int 3470 badfo_kqfilter(struct file *fp, struct knote *kn) 3471 { 3472 return (EOPNOTSUPP); 3473 } 3474 3475 int 3476 badfo_stat(struct file *fp, struct stat *sb, struct ucred *cred) 3477 { 3478 return (EBADF); 3479 } 3480 3481 int 3482 badfo_close(struct file *fp) 3483 { 3484 return (EBADF); 3485 } 3486 3487 int 3488 badfo_shutdown(struct file *fp, int how) 3489 { 3490 return (EBADF); 3491 } 3492 3493 int 3494 nofo_shutdown(struct file *fp, int how) 3495 { 3496 return (EOPNOTSUPP); 3497 } 3498 3499 SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR, 3500 fildesc_drvinit,NULL); 3501 3502 static void 3503 filelist_heads_init(void *arg __unused) 3504 { 3505 int i; 3506 3507 for (i = 0; i < NFILELIST_HEADS; ++i) { 3508 struct filelist_head *head = &filelist_heads[i]; 3509 3510 spin_init(&head->spin, "filehead_spin"); 3511 LIST_INIT(&head->list); 3512 } 3513 } 3514 3515 SYSINIT(filelistheads, SI_BOOT1_LOCK, SI_ORDER_ANY, filelist_heads_init, NULL); 3516