1 /* 2 * Copyright (c) 2005-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey Hsu and Matthew Dillon. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * 35 * Copyright (c) 1982, 1986, 1989, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94 68 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $ 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/malloc.h> 74 #include <sys/sysmsg.h> 75 #include <sys/conf.h> 76 #include <sys/device.h> 77 #include <sys/file.h> 78 #include <sys/filedesc.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 #include <sys/vnode.h> 82 #include <sys/proc.h> 83 #include <sys/nlookup.h> 84 #include <sys/stat.h> 85 #include <sys/filio.h> 86 #include <sys/fcntl.h> 87 #include <sys/unistd.h> 88 #include <sys/resourcevar.h> 89 #include <sys/event.h> 90 #include <sys/kern_syscall.h> 91 #include <sys/kcore.h> 92 #include <sys/kinfo.h> 93 #include <sys/un.h> 94 #include <sys/objcache.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_extern.h> 98 99 #include <sys/file2.h> 100 #include <sys/spinlock2.h> 101 102 static int fdalloc_locked(struct proc *p, struct filedesc *fdp, 103 int want, int *result); 104 static void fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd); 105 static void fdreserve_locked (struct filedesc *fdp, int fd0, int incr); 106 static struct file *funsetfd_locked (struct filedesc *fdp, int fd); 107 static void ffree(struct file *fp); 108 109 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table"); 110 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader", 111 "file desc to leader structures"); 112 MALLOC_DEFINE(M_FILE, "file", "Open file structure"); 113 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures"); 114 115 static struct krate krate_uidinfo = { .freq = 1 }; 116 117 static d_open_t fdopen; 118 #define NUMFDESC 64 119 120 #define CDEV_MAJOR 22 121 static struct dev_ops fildesc_ops = { 122 { "FD", 0, 0 }, 123 .d_open = fdopen, 124 }; 125 126 /* 127 * Descriptor management. 128 */ 129 #ifndef NFILELIST_HEADS 130 #define NFILELIST_HEADS 257 /* primary number */ 131 #endif 132 133 struct filelist_head { 134 struct spinlock spin; 135 struct filelist list; 136 } __cachealign; 137 138 static struct filelist_head filelist_heads[NFILELIST_HEADS]; 139 140 static int nfiles; /* actual number of open files */ 141 extern int cmask; 142 143 struct lwkt_token revoke_token = LWKT_TOKEN_INITIALIZER(revoke_token); 144 145 static struct objcache *file_objcache; 146 147 static struct objcache_malloc_args file_malloc_args = { 148 .objsize = sizeof(struct file), 149 .mtype = M_FILE 150 }; 151 152 /* 153 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared. 154 * 155 * must be called with fdp->fd_spin exclusively held 156 */ 157 static __inline 158 void 159 fdfixup_locked(struct filedesc *fdp, int fd) 160 { 161 if (fd < fdp->fd_freefile) { 162 fdp->fd_freefile = fd; 163 } 164 while (fdp->fd_lastfile >= 0 && 165 fdp->fd_files[fdp->fd_lastfile].fp == NULL && 166 fdp->fd_files[fdp->fd_lastfile].reserved == 0 167 ) { 168 --fdp->fd_lastfile; 169 } 170 } 171 172 /* 173 * Clear the fd thread caches for this fdnode. 174 * 175 * If match_fdc is NULL, all thread caches of fdn will be cleared. 176 * The caller must hold fdp->fd_spin exclusively. The threads caching 177 * the descriptor do not have to be the current thread. The (status) 178 * argument is ignored. 179 * 180 * If match_fdc is not NULL, only the match_fdc's cache will be cleared. 181 * The caller must hold fdp->fd_spin shared and match_fdc must match a 182 * fdcache entry in curthread. match_fdc has been locked by the caller 183 * and had the specified (status). 184 * 185 * Since we are matching against a fp in the fdp (which must still be present 186 * at this time), fp will have at least two refs on any match and we can 187 * decrement the count trivially. 188 */ 189 static 190 void 191 fclearcache(struct fdnode *fdn, struct fdcache *match_fdc, int status) 192 { 193 struct fdcache *fdc; 194 struct file *fp; 195 int i; 196 197 /* 198 * match_fdc == NULL We are cleaning out all tdcache entries 199 * for the fdn and hold fdp->fd_spin exclusively. 200 * This can race against the target threads 201 * cleaning out specific entries. 202 * 203 * match_fdc != NULL We are cleaning out a specific tdcache 204 * entry on behalf of the owning thread 205 * and hold fdp->fd_spin shared. The thread 206 * has already locked the entry. This cannot 207 * race. 208 */ 209 fp = fdn->fp; 210 for (i = 0; i < NTDCACHEFD; ++i) { 211 if ((fdc = fdn->tdcache[i]) == NULL) 212 continue; 213 214 /* 215 * If match_fdc is non-NULL we are being asked to 216 * clear a specific fdc owned by curthread. There must 217 * be exactly one match. The caller has already locked 218 * the cache entry and will dispose of the lock after 219 * we return. 220 * 221 * Since we also have a shared lock on fdp, we 222 * can do this without atomic ops. 223 */ 224 if (match_fdc) { 225 if (fdc != match_fdc) 226 continue; 227 fdn->tdcache[i] = NULL; 228 KASSERT(fp == fdc->fp, 229 ("fclearcache(1): fp mismatch %p/%p\n", 230 fp, fdc->fp)); 231 fdc->fp = NULL; 232 fdc->fd = -1; 233 234 /* 235 * status can be 0 or 2. If 2 the ref is borrowed, 236 * if 0 the ref is not borrowed and we have to drop 237 * it. 238 */ 239 if (status == 0) 240 atomic_add_int(&fp->f_count, -1); 241 fdn->isfull = 0; /* heuristic */ 242 return; 243 } 244 245 /* 246 * Otherwise we hold an exclusive spin-lock and can only 247 * race thread consumers borrowing cache entries. 248 * 249 * Acquire the lock and dispose of the entry. We have to 250 * spin until we get the lock. 251 */ 252 for (;;) { 253 status = atomic_swap_int(&fdc->locked, 1); 254 if (status == 1) { /* foreign lock, retry */ 255 cpu_pause(); 256 continue; 257 } 258 fdn->tdcache[i] = NULL; 259 KASSERT(fp == fdc->fp, 260 ("fclearcache(2): fp mismatch %p/%p\n", 261 fp, fdc->fp)); 262 fdc->fp = NULL; 263 fdc->fd = -1; 264 if (status == 0) 265 atomic_add_int(&fp->f_count, -1); 266 fdn->isfull = 0; /* heuristic */ 267 atomic_swap_int(&fdc->locked, 0); 268 break; 269 } 270 } 271 KKASSERT(match_fdc == NULL); 272 } 273 274 /* 275 * Retrieve the fp for the specified fd given the specified file descriptor 276 * table. The fdp does not have to be owned by the current process. 277 * If flags != -1, fp->f_flag must contain at least one of the flags. 278 * 279 * This function is not able to cache the fp. 280 */ 281 struct file * 282 holdfp_fdp(struct filedesc *fdp, int fd, int flag) 283 { 284 struct file *fp; 285 286 spin_lock_shared(&fdp->fd_spin); 287 if (((u_int)fd) < fdp->fd_nfiles) { 288 fp = fdp->fd_files[fd].fp; /* can be NULL */ 289 if (fp) { 290 if ((fp->f_flag & flag) == 0 && flag != -1) { 291 fp = NULL; 292 } else { 293 fhold(fp); 294 } 295 } 296 } else { 297 fp = NULL; 298 } 299 spin_unlock_shared(&fdp->fd_spin); 300 301 return fp; 302 } 303 304 struct file * 305 holdfp_fdp_locked(struct filedesc *fdp, int fd, int flag) 306 { 307 struct file *fp; 308 309 if (((u_int)fd) < fdp->fd_nfiles) { 310 fp = fdp->fd_files[fd].fp; /* can be NULL */ 311 if (fp) { 312 if ((fp->f_flag & flag) == 0 && flag != -1) { 313 fp = NULL; 314 } else { 315 fhold(fp); 316 } 317 } 318 } else { 319 fp = NULL; 320 } 321 return fp; 322 } 323 324 /* 325 * Acquire the fp for the specified file descriptor, using the thread 326 * cache if possible and caching it if possible. 327 * 328 * td must be the curren thread. 329 */ 330 static 331 struct file * 332 _holdfp_cache(thread_t td, int fd) 333 { 334 struct filedesc *fdp; 335 struct fdcache *fdc; 336 struct fdcache *best; 337 struct fdnode *fdn; 338 struct file *fp; 339 int status; 340 int delta; 341 int i; 342 343 /* 344 * Fast 345 */ 346 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 347 if (fdc->fd != fd || fdc->fp == NULL) 348 continue; 349 status = atomic_swap_int(&fdc->locked, 1); 350 351 /* 352 * If someone else has locked our cache entry they are in 353 * the middle of clearing it, skip the entry. 354 */ 355 if (status == 1) 356 continue; 357 358 /* 359 * We have locked the entry, but if it no longer matches 360 * restore the previous state (0 or 2) and skip the entry. 361 */ 362 if (fdc->fd != fd || fdc->fp == NULL) { 363 atomic_swap_int(&fdc->locked, status); 364 continue; 365 } 366 367 /* 368 * We have locked a valid entry. We can borrow the ref 369 * for a mode 0 entry. We can get a valid fp for a mode 370 * 2 entry but not borrow the ref. 371 */ 372 if (status == 0) { 373 fp = fdc->fp; 374 fdc->lru = ++td->td_fdcache_lru; 375 atomic_swap_int(&fdc->locked, 2); 376 377 return fp; 378 } 379 if (status == 2) { 380 fp = fdc->fp; 381 fhold(fp); 382 fdc->lru = ++td->td_fdcache_lru; 383 atomic_swap_int(&fdc->locked, 2); 384 385 return fp; 386 } 387 KKASSERT(0); 388 } 389 390 /* 391 * Lookup the descriptor the slow way. This can contend against 392 * modifying operations in a multi-threaded environment and cause 393 * cache line ping ponging otherwise. 394 */ 395 fdp = td->td_proc->p_fd; 396 spin_lock_shared(&fdp->fd_spin); 397 398 if (((u_int)fd) < fdp->fd_nfiles) { 399 fp = fdp->fd_files[fd].fp; /* can be NULL */ 400 if (fp) { 401 fhold(fp); 402 if (fdp->fd_files[fd].isfull == 0) 403 goto enter; 404 } 405 } else { 406 fp = NULL; 407 } 408 spin_unlock_shared(&fdp->fd_spin); 409 410 return fp; 411 412 /* 413 * We found a valid fp and held it, fdp is still shared locked. 414 * Enter the fp into the per-thread cache. Find the oldest entry 415 * via lru, or an empty entry. 416 * 417 * Because fdp's spinlock is held (shared is fine), no other 418 * thread should be in the middle of clearing our selected entry. 419 */ 420 enter: 421 best = &td->td_fdcache[0]; 422 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 423 if (fdc->fp == NULL) { 424 best = fdc; 425 break; 426 } 427 delta = fdc->lru - best->lru; 428 if (delta < 0) 429 best = fdc; 430 } 431 432 /* 433 * Replace best 434 * 435 * Don't enter into the cache if we cannot get the lock. 436 */ 437 status = atomic_swap_int(&best->locked, 1); 438 if (status == 1) 439 goto done; 440 441 /* 442 * Clear the previous cache entry if present 443 */ 444 if (best->fp) { 445 KKASSERT(best->fd >= 0); 446 fclearcache(&fdp->fd_files[best->fd], best, status); 447 } 448 449 /* 450 * Create our new cache entry. This entry is 'safe' until we tie 451 * into the fdnode. If we cannot tie in, we will clear the entry. 452 */ 453 best->fd = fd; 454 best->fp = fp; 455 best->lru = ++td->td_fdcache_lru; 456 best->locked = 2; /* borrowed ref */ 457 458 fdn = &fdp->fd_files[fd]; 459 for (i = 0; i < NTDCACHEFD; ++i) { 460 if (fdn->tdcache[i] == NULL && 461 atomic_cmpset_ptr((void **)&fdn->tdcache[i], NULL, best)) { 462 goto done; 463 } 464 } 465 fdn->isfull = 1; /* no space */ 466 best->fd = -1; 467 best->fp = NULL; 468 best->locked = 0; 469 done: 470 spin_unlock_shared(&fdp->fd_spin); 471 472 return fp; 473 } 474 475 /* 476 * holdfp(), bypassing the cache in order to also be able to return 477 * the descriptor flags. A bit of a hack. 478 */ 479 static 480 struct file * 481 _holdfp2(thread_t td, int fd, char *fflagsp) 482 { 483 struct filedesc *fdp; 484 struct file *fp; 485 486 /* 487 * Lookup the descriptor the slow way. This can contend against 488 * modifying operations in a multi-threaded environment and cause 489 * cache line ping ponging otherwise. 490 */ 491 fdp = td->td_proc->p_fd; 492 spin_lock_shared(&fdp->fd_spin); 493 494 if (((u_int)fd) < fdp->fd_nfiles) { 495 fp = fdp->fd_files[fd].fp; /* can be NULL */ 496 if (fp) { 497 *fflagsp = fdp->fd_files[fd].fileflags; 498 fhold(fp); 499 } 500 } else { 501 fp = NULL; 502 } 503 spin_unlock_shared(&fdp->fd_spin); 504 505 return fp; 506 } 507 508 509 /* 510 * Drop the file pointer and return to the thread cache if possible. 511 * 512 * Caller must not hold fdp's spin lock. 513 * td must be the current thread. 514 */ 515 void 516 dropfp(thread_t td, int fd, struct file *fp) 517 { 518 struct filedesc *fdp; 519 struct fdcache *fdc; 520 int status; 521 522 fdp = td->td_proc->p_fd; 523 524 /* 525 * If our placeholder is still present we can re-cache the ref. 526 * 527 * Note that we can race an fclearcache(). 528 */ 529 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 530 if (fdc->fp != fp || fdc->fd != fd) 531 continue; 532 status = atomic_swap_int(&fdc->locked, 1); 533 switch(status) { 534 case 0: 535 /* 536 * Not in mode 2, fdrop fp without caching. 537 */ 538 atomic_swap_int(&fdc->locked, 0); 539 break; 540 case 1: 541 /* 542 * Not in mode 2, locked by someone else. 543 * fdrop fp without caching. 544 */ 545 break; 546 case 2: 547 /* 548 * Intact borrowed ref, return to mode 0 549 * indicating that we have returned the ref. 550 * 551 * Return the borrowed ref (2->1->0) 552 */ 553 if (fdc->fp == fp && fdc->fd == fd) { 554 atomic_swap_int(&fdc->locked, 0); 555 return; 556 } 557 atomic_swap_int(&fdc->locked, 2); 558 break; 559 } 560 } 561 562 /* 563 * Failed to re-cache, drop the fp without caching. 564 */ 565 fdrop(fp); 566 } 567 568 /* 569 * Clear all descriptors cached in the per-thread fd cache for 570 * the specified thread. 571 * 572 * Caller must not hold p_fd->spin. This function will temporarily 573 * obtain a shared spin lock. 574 */ 575 void 576 fexitcache(thread_t td) 577 { 578 struct filedesc *fdp; 579 struct fdcache *fdc; 580 int status; 581 int i; 582 583 if (td->td_proc == NULL) 584 return; 585 fdp = td->td_proc->p_fd; 586 if (fdp == NULL) 587 return; 588 589 /* 590 * A shared lock is sufficient as the caller controls td and we 591 * are only clearing td's cache. 592 */ 593 spin_lock_shared(&fdp->fd_spin); 594 for (i = 0; i < NFDCACHE; ++i) { 595 fdc = &td->td_fdcache[i]; 596 if (fdc->fp) { 597 status = atomic_swap_int(&fdc->locked, 1); 598 if (status == 1) { 599 cpu_pause(); 600 --i; 601 continue; 602 } 603 if (fdc->fp) { 604 KKASSERT(fdc->fd >= 0); 605 fclearcache(&fdp->fd_files[fdc->fd], fdc, 606 status); 607 } 608 atomic_swap_int(&fdc->locked, 0); 609 } 610 } 611 spin_unlock_shared(&fdp->fd_spin); 612 } 613 614 static __inline struct filelist_head * 615 fp2filelist(const struct file *fp) 616 { 617 u_int i; 618 619 i = (u_int)(uintptr_t)fp % NFILELIST_HEADS; 620 return &filelist_heads[i]; 621 } 622 623 static __inline 624 struct plimit * 625 readplimits(struct proc *p) 626 { 627 thread_t td = curthread; 628 struct plimit *limit; 629 630 limit = td->td_limit; 631 if (limit != p->p_limit) { 632 spin_lock_shared(&p->p_spin); 633 limit = p->p_limit; 634 atomic_add_int(&limit->p_refcnt, 1); 635 spin_unlock_shared(&p->p_spin); 636 if (td->td_limit) 637 plimit_free(td->td_limit); 638 td->td_limit = limit; 639 } 640 return limit; 641 } 642 643 /* 644 * System calls on descriptors. 645 */ 646 int 647 sys_getdtablesize(struct sysmsg *sysmsg, const struct getdtablesize_args *uap) 648 { 649 struct proc *p = curproc; 650 struct plimit *limit = readplimits(p); 651 int dtsize; 652 653 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 654 dtsize = INT_MAX; 655 else 656 dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 657 658 if (dtsize > maxfilesperproc) 659 dtsize = maxfilesperproc; 660 if (dtsize < minfilesperproc) 661 dtsize = minfilesperproc; 662 if (p->p_ucred->cr_uid && dtsize > maxfilesperuser) 663 dtsize = maxfilesperuser; 664 sysmsg->sysmsg_result = dtsize; 665 return (0); 666 } 667 668 /* 669 * Duplicate a file descriptor to a particular value. 670 * 671 * note: keep in mind that a potential race condition exists when closing 672 * descriptors from a shared descriptor table (via rfork). 673 */ 674 int 675 sys_dup2(struct sysmsg *sysmsg, const struct dup2_args *uap) 676 { 677 int error; 678 int fd = 0; 679 680 error = kern_dup(DUP_FIXED, uap->from, uap->to, &fd); 681 sysmsg->sysmsg_fds[0] = fd; 682 683 return (error); 684 } 685 686 /* 687 * Duplicate a file descriptor. 688 */ 689 int 690 sys_dup(struct sysmsg *sysmsg, const struct dup_args *uap) 691 { 692 int error; 693 int fd = 0; 694 695 error = kern_dup(DUP_VARIABLE, uap->fd, 0, &fd); 696 sysmsg->sysmsg_fds[0] = fd; 697 698 return (error); 699 } 700 701 /* 702 * MPALMOSTSAFE - acquires mplock for fp operations 703 */ 704 int 705 kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred) 706 { 707 struct thread *td = curthread; 708 struct proc *p = td->td_proc; 709 struct file *fp; 710 struct vnode *vp; 711 u_int newmin; 712 u_int oflags; 713 u_int nflags; 714 int closedcounter; 715 int tmp, error, flg = F_POSIX; 716 717 KKASSERT(p); 718 719 /* 720 * Operations on file descriptors that do not require a file pointer. 721 */ 722 switch (cmd) { 723 case F_GETFD: 724 error = fgetfdflags(p->p_fd, fd, &tmp); 725 if (error == 0) 726 dat->fc_cloexec = (tmp & UF_EXCLOSE) ? FD_CLOEXEC : 0; 727 return (error); 728 729 case F_SETFD: 730 if (dat->fc_cloexec & FD_CLOEXEC) 731 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE); 732 else 733 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE); 734 return (error); 735 case F_DUPFD: 736 newmin = dat->fc_fd; 737 error = kern_dup(DUP_VARIABLE | DUP_FCNTL, fd, newmin, 738 &dat->fc_fd); 739 return (error); 740 case F_DUPFD_CLOEXEC: 741 newmin = dat->fc_fd; 742 error = kern_dup(DUP_VARIABLE | DUP_CLOEXEC | DUP_FCNTL, 743 fd, newmin, &dat->fc_fd); 744 return (error); 745 case F_DUP2FD: 746 newmin = dat->fc_fd; 747 error = kern_dup(DUP_FIXED, fd, newmin, &dat->fc_fd); 748 return (error); 749 case F_DUP2FD_CLOEXEC: 750 newmin = dat->fc_fd; 751 error = kern_dup(DUP_FIXED | DUP_CLOEXEC, fd, newmin, 752 &dat->fc_fd); 753 return (error); 754 default: 755 break; 756 } 757 758 /* 759 * Operations on file pointers 760 */ 761 closedcounter = p->p_fd->fd_closedcounter; 762 if ((fp = holdfp(td, fd, -1)) == NULL) 763 return (EBADF); 764 765 switch (cmd) { 766 case F_GETFL: 767 dat->fc_flags = OFLAGS(fp->f_flag); 768 error = 0; 769 break; 770 771 case F_SETFL: 772 oflags = fp->f_flag; 773 nflags = FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS; 774 nflags |= oflags & ~FCNTLFLAGS; 775 776 error = 0; 777 if (((nflags ^ oflags) & O_APPEND) && (oflags & FAPPENDONLY)) 778 error = EINVAL; 779 if (error == 0 && ((nflags ^ oflags) & FASYNC)) { 780 tmp = nflags & FASYNC; 781 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, 782 cred, NULL); 783 } 784 785 /* 786 * If no error, must be atomically set. 787 */ 788 while (error == 0) { 789 oflags = fp->f_flag; 790 cpu_ccfence(); 791 nflags = (oflags & ~FCNTLFLAGS) | (nflags & FCNTLFLAGS); 792 if (atomic_cmpset_int(&fp->f_flag, oflags, nflags)) 793 break; 794 cpu_pause(); 795 } 796 break; 797 798 case F_GETOWN: 799 error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner, 800 cred, NULL); 801 break; 802 803 case F_SETOWN: 804 error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner, 805 cred, NULL); 806 break; 807 808 case F_SETLKW: 809 flg |= F_WAIT; 810 /* Fall into F_SETLK */ 811 812 case F_SETLK: 813 if (fp->f_type != DTYPE_VNODE) { 814 error = EBADF; 815 break; 816 } 817 vp = (struct vnode *)fp->f_data; 818 819 /* 820 * copyin/lockop may block 821 */ 822 if (dat->fc_flock.l_whence == SEEK_CUR) 823 dat->fc_flock.l_start += fp->f_offset; 824 825 switch (dat->fc_flock.l_type) { 826 case F_RDLCK: 827 if ((fp->f_flag & FREAD) == 0) { 828 error = EBADF; 829 break; 830 } 831 if (p->p_leader->p_advlock_flag == 0) 832 p->p_leader->p_advlock_flag = 1; 833 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 834 &dat->fc_flock, flg); 835 break; 836 case F_WRLCK: 837 if ((fp->f_flag & FWRITE) == 0) { 838 error = EBADF; 839 break; 840 } 841 if (p->p_leader->p_advlock_flag == 0) 842 p->p_leader->p_advlock_flag = 1; 843 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 844 &dat->fc_flock, flg); 845 break; 846 case F_UNLCK: 847 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 848 &dat->fc_flock, F_POSIX); 849 break; 850 default: 851 error = EINVAL; 852 break; 853 } 854 855 /* 856 * It is possible to race a close() on the descriptor while 857 * we were blocked getting the lock. If this occurs the 858 * close might not have caught the lock. 859 */ 860 if (checkfdclosed(td, p->p_fd, fd, fp, closedcounter)) { 861 dat->fc_flock.l_whence = SEEK_SET; 862 dat->fc_flock.l_start = 0; 863 dat->fc_flock.l_len = 0; 864 dat->fc_flock.l_type = F_UNLCK; 865 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 866 F_UNLCK, &dat->fc_flock, F_POSIX); 867 } 868 break; 869 870 case F_GETLK: 871 if (fp->f_type != DTYPE_VNODE) { 872 error = EBADF; 873 break; 874 } 875 vp = (struct vnode *)fp->f_data; 876 /* 877 * copyin/lockop may block 878 */ 879 if (dat->fc_flock.l_type != F_RDLCK && 880 dat->fc_flock.l_type != F_WRLCK && 881 dat->fc_flock.l_type != F_UNLCK) { 882 error = EINVAL; 883 break; 884 } 885 if (dat->fc_flock.l_whence == SEEK_CUR) 886 dat->fc_flock.l_start += fp->f_offset; 887 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, 888 &dat->fc_flock, F_POSIX); 889 break; 890 default: 891 error = EINVAL; 892 break; 893 } 894 895 fdrop(fp); 896 return (error); 897 } 898 899 /* 900 * The file control system call. 901 */ 902 int 903 sys_fcntl(struct sysmsg *sysmsg, const struct fcntl_args *uap) 904 { 905 union fcntl_dat dat; 906 int error; 907 908 switch (uap->cmd) { 909 case F_DUPFD: 910 case F_DUP2FD: 911 case F_DUPFD_CLOEXEC: 912 case F_DUP2FD_CLOEXEC: 913 dat.fc_fd = uap->arg; 914 break; 915 case F_SETFD: 916 dat.fc_cloexec = uap->arg; 917 break; 918 case F_SETFL: 919 dat.fc_flags = uap->arg; 920 break; 921 case F_SETOWN: 922 dat.fc_owner = uap->arg; 923 break; 924 case F_SETLKW: 925 case F_SETLK: 926 case F_GETLK: 927 error = copyin((caddr_t)uap->arg, &dat.fc_flock, 928 sizeof(struct flock)); 929 if (error) 930 return (error); 931 break; 932 } 933 934 error = kern_fcntl(uap->fd, uap->cmd, &dat, curthread->td_ucred); 935 936 if (error == 0) { 937 switch (uap->cmd) { 938 case F_DUPFD: 939 case F_DUP2FD: 940 case F_DUPFD_CLOEXEC: 941 case F_DUP2FD_CLOEXEC: 942 sysmsg->sysmsg_result = dat.fc_fd; 943 break; 944 case F_GETFD: 945 sysmsg->sysmsg_result = dat.fc_cloexec; 946 break; 947 case F_GETFL: 948 sysmsg->sysmsg_result = dat.fc_flags; 949 break; 950 case F_GETOWN: 951 sysmsg->sysmsg_result = dat.fc_owner; 952 break; 953 case F_GETLK: 954 error = copyout(&dat.fc_flock, (caddr_t)uap->arg, 955 sizeof(struct flock)); 956 break; 957 } 958 } 959 960 return (error); 961 } 962 963 /* 964 * Common code for dup, dup2, and fcntl(F_DUPFD). 965 * 966 * There are four type flags: DUP_FCNTL, DUP_FIXED, DUP_VARIABLE, and 967 * DUP_CLOEXEC. 968 * 969 * DUP_FCNTL is for handling EINVAL vs. EBADF differences between 970 * fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC and dup2() (per POSIX). 971 * The next two flags are mutually exclusive, and the fourth is optional. 972 * DUP_FIXED tells kern_dup() to destructively dup over an existing file 973 * descriptor if "new" is already open. DUP_VARIABLE tells kern_dup() 974 * to find the lowest unused file descriptor that is greater than or 975 * equal to "new". DUP_CLOEXEC, which works with either of the first 976 * two flags, sets the close-on-exec flag on the "new" file descriptor. 977 */ 978 int 979 kern_dup(int flags, int old, int new, int *res) 980 { 981 struct thread *td = curthread; 982 struct proc *p = td->td_proc; 983 struct plimit *limit = readplimits(p); 984 struct filedesc *fdp = p->p_fd; 985 struct file *fp; 986 struct file *delfp; 987 int oldflags; 988 int holdleaders; 989 int dtsize; 990 int error, newfd; 991 992 /* 993 * Verify that we have a valid descriptor to dup from and 994 * possibly to dup to. When the new descriptor is out of 995 * bounds, fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC must 996 * return EINVAL, while dup2() returns EBADF in 997 * this case. 998 * 999 * NOTE: maxfilesperuser is not applicable to dup() 1000 */ 1001 retry: 1002 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 1003 dtsize = INT_MAX; 1004 else 1005 dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 1006 if (dtsize > maxfilesperproc) 1007 dtsize = maxfilesperproc; 1008 if (dtsize < minfilesperproc) 1009 dtsize = minfilesperproc; 1010 1011 if (new < 0 || new >= dtsize) 1012 return (flags & DUP_FCNTL ? EINVAL : EBADF); 1013 1014 spin_lock(&fdp->fd_spin); 1015 if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) { 1016 spin_unlock(&fdp->fd_spin); 1017 return (EBADF); 1018 } 1019 if ((flags & DUP_FIXED) && old == new) { 1020 *res = new; 1021 if (flags & DUP_CLOEXEC) 1022 fdp->fd_files[new].fileflags |= UF_EXCLOSE; 1023 spin_unlock(&fdp->fd_spin); 1024 return (0); 1025 } 1026 fp = fdp->fd_files[old].fp; 1027 oldflags = fdp->fd_files[old].fileflags; 1028 fhold(fp); 1029 1030 /* 1031 * Allocate a new descriptor if DUP_VARIABLE, or expand the table 1032 * if the requested descriptor is beyond the current table size. 1033 * 1034 * This can block. Retry if the source descriptor no longer matches 1035 * or if our expectation in the expansion case races. 1036 * 1037 * If we are not expanding or allocating a new decriptor, then reset 1038 * the target descriptor to a reserved state so we have a uniform 1039 * setup for the next code block. 1040 */ 1041 if ((flags & DUP_VARIABLE) || new >= fdp->fd_nfiles) { 1042 error = fdalloc_locked(p, fdp, new, &newfd); 1043 if (error) { 1044 spin_unlock(&fdp->fd_spin); 1045 fdrop(fp); 1046 return (error); 1047 } 1048 /* 1049 * Check for ripout 1050 */ 1051 if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) { 1052 fsetfd_locked(fdp, NULL, newfd); 1053 spin_unlock(&fdp->fd_spin); 1054 fdrop(fp); 1055 goto retry; 1056 } 1057 /* 1058 * Check for expansion race 1059 */ 1060 if ((flags & DUP_VARIABLE) == 0 && new != newfd) { 1061 fsetfd_locked(fdp, NULL, newfd); 1062 spin_unlock(&fdp->fd_spin); 1063 fdrop(fp); 1064 goto retry; 1065 } 1066 /* 1067 * Check for ripout, newfd reused old (this case probably 1068 * can't occur). 1069 */ 1070 if (old == newfd) { 1071 fsetfd_locked(fdp, NULL, newfd); 1072 spin_unlock(&fdp->fd_spin); 1073 fdrop(fp); 1074 goto retry; 1075 } 1076 new = newfd; 1077 delfp = NULL; 1078 } else { 1079 if (fdp->fd_files[new].reserved) { 1080 spin_unlock(&fdp->fd_spin); 1081 fdrop(fp); 1082 kprintf("Warning: dup(): target descriptor %d is " 1083 "reserved, waiting for it to be resolved\n", 1084 new); 1085 tsleep(fdp, 0, "fdres", hz); 1086 goto retry; 1087 } 1088 1089 /* 1090 * If the target descriptor was never allocated we have 1091 * to allocate it. If it was we have to clean out the 1092 * old descriptor. delfp inherits the ref from the 1093 * descriptor table. 1094 */ 1095 ++fdp->fd_closedcounter; 1096 fclearcache(&fdp->fd_files[new], NULL, 0); 1097 ++fdp->fd_closedcounter; 1098 delfp = fdp->fd_files[new].fp; 1099 fdp->fd_files[new].fp = NULL; 1100 fdp->fd_files[new].reserved = 1; 1101 if (delfp == NULL) { 1102 fdreserve_locked(fdp, new, 1); 1103 if (new > fdp->fd_lastfile) 1104 fdp->fd_lastfile = new; 1105 } 1106 1107 } 1108 1109 /* 1110 * NOTE: still holding an exclusive spinlock 1111 */ 1112 1113 /* 1114 * If a descriptor is being overwritten we may hve to tell 1115 * fdfree() to sleep to ensure that all relevant process 1116 * leaders can be traversed in closef(). 1117 */ 1118 if (delfp != NULL && p->p_fdtol != NULL) { 1119 fdp->fd_holdleaderscount++; 1120 holdleaders = 1; 1121 } else { 1122 holdleaders = 0; 1123 } 1124 KASSERT(delfp == NULL || (flags & DUP_FIXED), 1125 ("dup() picked an open file")); 1126 1127 /* 1128 * Duplicate the source descriptor, update lastfile. If the new 1129 * descriptor was not allocated and we aren't replacing an existing 1130 * descriptor we have to mark the descriptor as being in use. 1131 * 1132 * The fd_files[] array inherits fp's hold reference. 1133 */ 1134 fsetfd_locked(fdp, fp, new); 1135 if ((flags & DUP_CLOEXEC) != 0) 1136 fdp->fd_files[new].fileflags = oldflags | UF_EXCLOSE; 1137 else 1138 fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE; 1139 spin_unlock(&fdp->fd_spin); 1140 fdrop(fp); 1141 *res = new; 1142 1143 /* 1144 * If we dup'd over a valid file, we now own the reference to it 1145 * and must dispose of it using closef() semantics (as if a 1146 * close() were performed on it). 1147 */ 1148 if (delfp) { 1149 if (SLIST_FIRST(&delfp->f_klist)) 1150 knote_fdclose(delfp, fdp, new); 1151 closef(delfp, p); 1152 if (holdleaders) { 1153 spin_lock(&fdp->fd_spin); 1154 fdp->fd_holdleaderscount--; 1155 if (fdp->fd_holdleaderscount == 0 && 1156 fdp->fd_holdleaderswakeup != 0) { 1157 fdp->fd_holdleaderswakeup = 0; 1158 spin_unlock(&fdp->fd_spin); 1159 wakeup(&fdp->fd_holdleaderscount); 1160 } else { 1161 spin_unlock(&fdp->fd_spin); 1162 } 1163 } 1164 } 1165 return (0); 1166 } 1167 1168 /* 1169 * If sigio is on the list associated with a process or process group, 1170 * disable signalling from the device, remove sigio from the list and 1171 * free sigio. 1172 */ 1173 void 1174 funsetown(struct sigio **sigiop) 1175 { 1176 struct pgrp *pgrp; 1177 struct proc *p; 1178 struct sigio *sigio; 1179 1180 if ((sigio = *sigiop) != NULL) { 1181 lwkt_gettoken(&sigio_token); /* protect sigio */ 1182 KKASSERT(sigiop == sigio->sio_myref); 1183 sigio = *sigiop; 1184 *sigiop = NULL; 1185 lwkt_reltoken(&sigio_token); 1186 } 1187 if (sigio == NULL) 1188 return; 1189 1190 if (sigio->sio_pgid < 0) { 1191 pgrp = sigio->sio_pgrp; 1192 sigio->sio_pgrp = NULL; 1193 lwkt_gettoken(&pgrp->pg_token); 1194 SLIST_REMOVE(&pgrp->pg_sigiolst, sigio, sigio, sio_pgsigio); 1195 lwkt_reltoken(&pgrp->pg_token); 1196 pgrel(pgrp); 1197 } else /* if ((*sigiop)->sio_pgid > 0) */ { 1198 p = sigio->sio_proc; 1199 sigio->sio_proc = NULL; 1200 PHOLD(p); 1201 lwkt_gettoken(&p->p_token); 1202 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, sio_pgsigio); 1203 lwkt_reltoken(&p->p_token); 1204 PRELE(p); 1205 } 1206 crfree(sigio->sio_ucred); 1207 sigio->sio_ucred = NULL; 1208 kfree(sigio, M_SIGIO); 1209 } 1210 1211 /* 1212 * Free a list of sigio structures. Caller is responsible for ensuring 1213 * that the list is MPSAFE. 1214 */ 1215 void 1216 funsetownlst(struct sigiolst *sigiolst) 1217 { 1218 struct sigio *sigio; 1219 1220 while ((sigio = SLIST_FIRST(sigiolst)) != NULL) 1221 funsetown(sigio->sio_myref); 1222 } 1223 1224 /* 1225 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg). 1226 * 1227 * After permission checking, add a sigio structure to the sigio list for 1228 * the process or process group. 1229 */ 1230 int 1231 fsetown(pid_t pgid, struct sigio **sigiop) 1232 { 1233 struct proc *proc = NULL; 1234 struct pgrp *pgrp = NULL; 1235 struct sigio *sigio; 1236 int error; 1237 1238 if (pgid == 0) { 1239 funsetown(sigiop); 1240 return (0); 1241 } 1242 1243 if (pgid > 0) { 1244 proc = pfind(pgid); 1245 if (proc == NULL) { 1246 error = ESRCH; 1247 goto done; 1248 } 1249 1250 /* 1251 * Policy - Don't allow a process to FSETOWN a process 1252 * in another session. 1253 * 1254 * Remove this test to allow maximum flexibility or 1255 * restrict FSETOWN to the current process or process 1256 * group for maximum safety. 1257 */ 1258 if (proc->p_session != curproc->p_session) { 1259 error = EPERM; 1260 goto done; 1261 } 1262 } else /* if (pgid < 0) */ { 1263 pgrp = pgfind(-pgid); 1264 if (pgrp == NULL) { 1265 error = ESRCH; 1266 goto done; 1267 } 1268 1269 /* 1270 * Policy - Don't allow a process to FSETOWN a process 1271 * in another session. 1272 * 1273 * Remove this test to allow maximum flexibility or 1274 * restrict FSETOWN to the current process or process 1275 * group for maximum safety. 1276 */ 1277 if (pgrp->pg_session != curproc->p_session) { 1278 error = EPERM; 1279 goto done; 1280 } 1281 } 1282 sigio = kmalloc(sizeof(struct sigio), M_SIGIO, M_WAITOK | M_ZERO); 1283 if (pgid > 0) { 1284 KKASSERT(pgrp == NULL); 1285 lwkt_gettoken(&proc->p_token); 1286 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio); 1287 sigio->sio_proc = proc; 1288 lwkt_reltoken(&proc->p_token); 1289 } else { 1290 KKASSERT(proc == NULL); 1291 lwkt_gettoken(&pgrp->pg_token); 1292 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio); 1293 sigio->sio_pgrp = pgrp; 1294 lwkt_reltoken(&pgrp->pg_token); 1295 pgrp = NULL; 1296 } 1297 sigio->sio_pgid = pgid; 1298 sigio->sio_ucred = crhold(curthread->td_ucred); 1299 /* It would be convenient if p_ruid was in ucred. */ 1300 sigio->sio_ruid = sigio->sio_ucred->cr_ruid; 1301 sigio->sio_myref = sigiop; 1302 1303 lwkt_gettoken(&sigio_token); 1304 while (*sigiop) 1305 funsetown(sigiop); 1306 *sigiop = sigio; 1307 lwkt_reltoken(&sigio_token); 1308 error = 0; 1309 done: 1310 if (pgrp) 1311 pgrel(pgrp); 1312 if (proc) 1313 PRELE(proc); 1314 return (error); 1315 } 1316 1317 /* 1318 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg). 1319 */ 1320 pid_t 1321 fgetown(struct sigio **sigiop) 1322 { 1323 struct sigio *sigio; 1324 pid_t own; 1325 1326 lwkt_gettoken_shared(&sigio_token); 1327 sigio = *sigiop; 1328 own = (sigio != NULL ? sigio->sio_pgid : 0); 1329 lwkt_reltoken(&sigio_token); 1330 1331 return (own); 1332 } 1333 1334 /* 1335 * Close many file descriptors. 1336 */ 1337 int 1338 sys_closefrom(struct sysmsg *sysmsg, const struct closefrom_args *uap) 1339 { 1340 return(kern_closefrom(uap->fd)); 1341 } 1342 1343 /* 1344 * Close all file descriptors greater then or equal to fd 1345 */ 1346 int 1347 kern_closefrom(int fd) 1348 { 1349 struct thread *td = curthread; 1350 struct proc *p = td->td_proc; 1351 struct filedesc *fdp; 1352 int error; 1353 int e2; 1354 1355 KKASSERT(p); 1356 fdp = p->p_fd; 1357 1358 if (fd < 0) 1359 return (EINVAL); 1360 1361 /* 1362 * NOTE: This function will skip unassociated descriptors and 1363 * reserved descriptors that have not yet been assigned. 1364 * fd_lastfile can change as a side effect of kern_close(). 1365 * 1366 * NOTE: We accumulate EINTR errors and return EINTR if any 1367 * close() returned EINTR. However, the descriptor is 1368 * still closed and we do not break out of the loop. 1369 */ 1370 error = 0; 1371 spin_lock(&fdp->fd_spin); 1372 while (fd <= fdp->fd_lastfile) { 1373 if (fdp->fd_files[fd].fp != NULL) { 1374 spin_unlock(&fdp->fd_spin); 1375 /* ok if this races another close */ 1376 e2 = kern_close(fd); 1377 if (e2 == EINTR) 1378 error = EINTR; 1379 spin_lock(&fdp->fd_spin); 1380 } 1381 ++fd; 1382 } 1383 spin_unlock(&fdp->fd_spin); 1384 1385 return error; 1386 } 1387 1388 /* 1389 * Close a file descriptor. 1390 */ 1391 int 1392 sys_close(struct sysmsg *sysmsg, const struct close_args *uap) 1393 { 1394 return(kern_close(uap->fd)); 1395 } 1396 1397 /* 1398 * close() helper 1399 */ 1400 int 1401 kern_close(int fd) 1402 { 1403 struct thread *td = curthread; 1404 struct proc *p = td->td_proc; 1405 struct filedesc *fdp; 1406 struct file *fp; 1407 int error; 1408 int holdleaders; 1409 1410 KKASSERT(p); 1411 fdp = p->p_fd; 1412 1413 /* 1414 * funsetfd*() also clears the fd cache 1415 */ 1416 spin_lock(&fdp->fd_spin); 1417 if ((fp = funsetfd_locked(fdp, fd)) == NULL) { 1418 spin_unlock(&fdp->fd_spin); 1419 return (EBADF); 1420 } 1421 holdleaders = 0; 1422 if (p->p_fdtol != NULL) { 1423 /* 1424 * Ask fdfree() to sleep to ensure that all relevant 1425 * process leaders can be traversed in closef(). 1426 */ 1427 fdp->fd_holdleaderscount++; 1428 holdleaders = 1; 1429 } 1430 1431 /* 1432 * we now hold the fp reference that used to be owned by the descriptor 1433 * array. 1434 */ 1435 spin_unlock(&fdp->fd_spin); 1436 if (SLIST_FIRST(&fp->f_klist)) 1437 knote_fdclose(fp, fdp, fd); 1438 error = closef(fp, p); 1439 if (holdleaders) { 1440 spin_lock(&fdp->fd_spin); 1441 fdp->fd_holdleaderscount--; 1442 if (fdp->fd_holdleaderscount == 0 && 1443 fdp->fd_holdleaderswakeup != 0) { 1444 fdp->fd_holdleaderswakeup = 0; 1445 spin_unlock(&fdp->fd_spin); 1446 wakeup(&fdp->fd_holdleaderscount); 1447 } else { 1448 spin_unlock(&fdp->fd_spin); 1449 } 1450 } 1451 return (error); 1452 } 1453 1454 /* 1455 * shutdown_args(int fd, int how) 1456 */ 1457 int 1458 kern_shutdown(int fd, int how) 1459 { 1460 struct thread *td = curthread; 1461 struct file *fp; 1462 int error; 1463 1464 if ((fp = holdfp(td, fd, -1)) == NULL) 1465 return (EBADF); 1466 error = fo_shutdown(fp, how); 1467 fdrop(fp); 1468 1469 return (error); 1470 } 1471 1472 /* 1473 * MPALMOSTSAFE 1474 */ 1475 int 1476 sys_shutdown(struct sysmsg *sysmsg, const struct shutdown_args *uap) 1477 { 1478 int error; 1479 1480 error = kern_shutdown(uap->s, uap->how); 1481 1482 return (error); 1483 } 1484 1485 /* 1486 * fstat() helper 1487 */ 1488 int 1489 kern_fstat(int fd, struct stat *ub) 1490 { 1491 struct thread *td = curthread; 1492 struct file *fp; 1493 int error; 1494 1495 if ((fp = holdfp(td, fd, -1)) == NULL) 1496 return (EBADF); 1497 error = fo_stat(fp, ub, td->td_ucred); 1498 fdrop(fp); 1499 1500 return (error); 1501 } 1502 1503 /* 1504 * Return status information about a file descriptor. 1505 */ 1506 int 1507 sys_fstat(struct sysmsg *sysmsg, const struct fstat_args *uap) 1508 { 1509 struct stat st; 1510 int error; 1511 1512 error = kern_fstat(uap->fd, &st); 1513 1514 if (error == 0) 1515 error = copyout(&st, uap->sb, sizeof(st)); 1516 return (error); 1517 } 1518 1519 /* 1520 * Return pathconf information about a file descriptor. 1521 * 1522 * MPALMOSTSAFE 1523 */ 1524 int 1525 sys_fpathconf(struct sysmsg *sysmsg, const struct fpathconf_args *uap) 1526 { 1527 struct thread *td = curthread; 1528 struct file *fp; 1529 struct vnode *vp; 1530 int error = 0; 1531 1532 if ((fp = holdfp(td, uap->fd, -1)) == NULL) 1533 return (EBADF); 1534 1535 switch (fp->f_type) { 1536 case DTYPE_PIPE: 1537 case DTYPE_SOCKET: 1538 if (uap->name != _PC_PIPE_BUF) { 1539 error = EINVAL; 1540 } else { 1541 sysmsg->sysmsg_result = PIPE_BUF; 1542 error = 0; 1543 } 1544 break; 1545 case DTYPE_FIFO: 1546 case DTYPE_VNODE: 1547 vp = (struct vnode *)fp->f_data; 1548 error = VOP_PATHCONF(vp, uap->name, &sysmsg->sysmsg_reg); 1549 break; 1550 default: 1551 error = EOPNOTSUPP; 1552 break; 1553 } 1554 fdrop(fp); 1555 return(error); 1556 } 1557 1558 /* 1559 * Grow the file table so it can hold through descriptor (want). 1560 * 1561 * The fdp's spinlock must be held exclusively on entry and may be held 1562 * exclusively on return. The spinlock may be cycled by the routine. 1563 */ 1564 static void 1565 fdgrow_locked(struct filedesc *fdp, int want) 1566 { 1567 struct fdnode *newfiles; 1568 struct fdnode *oldfiles; 1569 int nf, extra; 1570 1571 nf = fdp->fd_nfiles; 1572 do { 1573 /* nf has to be of the form 2^n - 1 */ 1574 nf = 2 * nf + 1; 1575 } while (nf <= want); 1576 1577 spin_unlock(&fdp->fd_spin); 1578 newfiles = kmalloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK); 1579 spin_lock(&fdp->fd_spin); 1580 1581 /* 1582 * We could have raced another extend while we were not holding 1583 * the spinlock. 1584 */ 1585 if (fdp->fd_nfiles >= nf) { 1586 spin_unlock(&fdp->fd_spin); 1587 kfree(newfiles, M_FILEDESC); 1588 spin_lock(&fdp->fd_spin); 1589 return; 1590 } 1591 /* 1592 * Copy the existing ofile and ofileflags arrays 1593 * and zero the new portion of each array. 1594 */ 1595 extra = nf - fdp->fd_nfiles; 1596 bcopy(fdp->fd_files, newfiles, fdp->fd_nfiles * sizeof(struct fdnode)); 1597 bzero(&newfiles[fdp->fd_nfiles], extra * sizeof(struct fdnode)); 1598 1599 oldfiles = fdp->fd_files; 1600 fdp->fd_files = newfiles; 1601 fdp->fd_nfiles = nf; 1602 1603 if (oldfiles != fdp->fd_builtin_files) { 1604 spin_unlock(&fdp->fd_spin); 1605 kfree(oldfiles, M_FILEDESC); 1606 spin_lock(&fdp->fd_spin); 1607 } 1608 } 1609 1610 /* 1611 * Number of nodes in right subtree, including the root. 1612 */ 1613 static __inline int 1614 right_subtree_size(int n) 1615 { 1616 return (n ^ (n | (n + 1))); 1617 } 1618 1619 /* 1620 * Bigger ancestor. 1621 */ 1622 static __inline int 1623 right_ancestor(int n) 1624 { 1625 return (n | (n + 1)); 1626 } 1627 1628 /* 1629 * Smaller ancestor. 1630 */ 1631 static __inline int 1632 left_ancestor(int n) 1633 { 1634 return ((n & (n + 1)) - 1); 1635 } 1636 1637 /* 1638 * Traverse the in-place binary tree buttom-up adjusting the allocation 1639 * count so scans can determine where free descriptors are located. 1640 * 1641 * caller must be holding an exclusive spinlock on fdp 1642 */ 1643 static 1644 void 1645 fdreserve_locked(struct filedesc *fdp, int fd, int incr) 1646 { 1647 while (fd >= 0) { 1648 fdp->fd_files[fd].allocated += incr; 1649 KKASSERT(fdp->fd_files[fd].allocated >= 0); 1650 fd = left_ancestor(fd); 1651 } 1652 } 1653 1654 /* 1655 * Reserve a file descriptor for the process. If no error occurs, the 1656 * caller MUST at some point call fsetfd() or assign a file pointer 1657 * or dispose of the reservation. 1658 */ 1659 static 1660 int 1661 fdalloc_locked(struct proc *p, struct filedesc *fdp, int want, int *result) 1662 { 1663 struct plimit *limit = readplimits(p); 1664 struct uidinfo *uip; 1665 int fd, rsize, rsum, node, lim; 1666 1667 /* 1668 * Check dtable size limit 1669 */ 1670 *result = -1; /* avoid gcc warnings */ 1671 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 1672 lim = INT_MAX; 1673 else 1674 lim = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 1675 1676 if (lim > maxfilesperproc) 1677 lim = maxfilesperproc; 1678 if (lim < minfilesperproc) 1679 lim = minfilesperproc; 1680 if (want >= lim) 1681 return (EINVAL); 1682 1683 /* 1684 * Check that the user has not run out of descriptors (non-root only). 1685 * As a safety measure the dtable is allowed to have at least 1686 * minfilesperproc open fds regardless of the maxfilesperuser limit. 1687 * 1688 * This isn't as loose a spec as ui_posixlocks, so we use atomic 1689 * ops to force synchronize and recheck if we would otherwise 1690 * error. 1691 */ 1692 if (p->p_ucred->cr_uid && fdp->fd_nfiles >= minfilesperproc) { 1693 uip = p->p_ucred->cr_uidinfo; 1694 if (uip->ui_openfiles > maxfilesperuser) { 1695 int n; 1696 int count; 1697 1698 count = 0; 1699 for (n = 0; n < ncpus; ++n) { 1700 count += atomic_swap_int( 1701 &uip->ui_pcpu[n].pu_openfiles, 0); 1702 } 1703 atomic_add_int(&uip->ui_openfiles, count); 1704 if (uip->ui_openfiles > maxfilesperuser) { 1705 krateprintf(&krate_uidinfo, 1706 "Warning: user %d pid %d (%s) " 1707 "ran out of file descriptors " 1708 "(%d/%d)\n", 1709 p->p_ucred->cr_uid, (int)p->p_pid, 1710 p->p_comm, 1711 uip->ui_openfiles, maxfilesperuser); 1712 return(ENFILE); 1713 } 1714 } 1715 } 1716 1717 /* 1718 * Grow the dtable if necessary 1719 */ 1720 if (want >= fdp->fd_nfiles) 1721 fdgrow_locked(fdp, want); 1722 1723 /* 1724 * Search for a free descriptor starting at the higher 1725 * of want or fd_freefile. If that fails, consider 1726 * expanding the ofile array. 1727 * 1728 * NOTE! the 'allocated' field is a cumulative recursive allocation 1729 * count. If we happen to see a value of 0 then we can shortcut 1730 * our search. Otherwise we run through through the tree going 1731 * down branches we know have free descriptor(s) until we hit a 1732 * leaf node. The leaf node will be free but will not necessarily 1733 * have an allocated field of 0. 1734 */ 1735 retry: 1736 /* move up the tree looking for a subtree with a free node */ 1737 for (fd = max(want, fdp->fd_freefile); fd < min(fdp->fd_nfiles, lim); 1738 fd = right_ancestor(fd)) { 1739 if (fdp->fd_files[fd].allocated == 0) 1740 goto found; 1741 1742 rsize = right_subtree_size(fd); 1743 if (fdp->fd_files[fd].allocated == rsize) 1744 continue; /* right subtree full */ 1745 1746 /* 1747 * Free fd is in the right subtree of the tree rooted at fd. 1748 * Call that subtree R. Look for the smallest (leftmost) 1749 * subtree of R with an unallocated fd: continue moving 1750 * down the left branch until encountering a full left 1751 * subtree, then move to the right. 1752 */ 1753 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) { 1754 node = fd + rsize; 1755 rsum += fdp->fd_files[node].allocated; 1756 if (fdp->fd_files[fd].allocated == rsum + rsize) { 1757 fd = node; /* move to the right */ 1758 if (fdp->fd_files[node].allocated == 0) 1759 goto found; 1760 rsum = 0; 1761 } 1762 } 1763 goto found; 1764 } 1765 1766 /* 1767 * No space in current array. Expand? 1768 */ 1769 if (fdp->fd_nfiles >= lim) { 1770 return (EMFILE); 1771 } 1772 fdgrow_locked(fdp, want); 1773 goto retry; 1774 1775 found: 1776 KKASSERT(fd < fdp->fd_nfiles); 1777 if (fd > fdp->fd_lastfile) 1778 fdp->fd_lastfile = fd; 1779 if (want <= fdp->fd_freefile) 1780 fdp->fd_freefile = fd; 1781 *result = fd; 1782 KKASSERT(fdp->fd_files[fd].fp == NULL); 1783 KKASSERT(fdp->fd_files[fd].reserved == 0); 1784 fdp->fd_files[fd].fileflags = 0; 1785 fdp->fd_files[fd].reserved = 1; 1786 fdreserve_locked(fdp, fd, 1); 1787 1788 return (0); 1789 } 1790 1791 int 1792 fdalloc(struct proc *p, int want, int *result) 1793 { 1794 struct filedesc *fdp = p->p_fd; 1795 int error; 1796 1797 spin_lock(&fdp->fd_spin); 1798 error = fdalloc_locked(p, fdp, want, result); 1799 spin_unlock(&fdp->fd_spin); 1800 1801 return error; 1802 } 1803 1804 /* 1805 * Check to see whether n user file descriptors 1806 * are available to the process p. 1807 */ 1808 int 1809 fdavail(struct proc *p, int n) 1810 { 1811 struct plimit *limit = readplimits(p); 1812 struct filedesc *fdp = p->p_fd; 1813 struct fdnode *fdnode; 1814 int i, lim, last; 1815 1816 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX) 1817 lim = INT_MAX; 1818 else 1819 lim = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur; 1820 1821 if (lim > maxfilesperproc) 1822 lim = maxfilesperproc; 1823 if (lim < minfilesperproc) 1824 lim = minfilesperproc; 1825 1826 spin_lock(&fdp->fd_spin); 1827 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) { 1828 spin_unlock(&fdp->fd_spin); 1829 return (1); 1830 } 1831 last = min(fdp->fd_nfiles, lim); 1832 fdnode = &fdp->fd_files[fdp->fd_freefile]; 1833 for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) { 1834 if (fdnode->fp == NULL && --n <= 0) { 1835 spin_unlock(&fdp->fd_spin); 1836 return (1); 1837 } 1838 } 1839 spin_unlock(&fdp->fd_spin); 1840 return (0); 1841 } 1842 1843 /* 1844 * Revoke open descriptors referencing (f_data, f_type) 1845 * 1846 * Any revoke executed within a prison is only able to 1847 * revoke descriptors for processes within that prison. 1848 * 1849 * Returns 0 on success or an error code. 1850 */ 1851 struct fdrevoke_info { 1852 void *data; 1853 short type; 1854 short unused; 1855 int found; 1856 struct ucred *cred; 1857 struct file *nfp; 1858 }; 1859 1860 static int fdrevoke_check_callback(struct file *fp, void *vinfo); 1861 static int fdrevoke_proc_callback(struct proc *p, void *vinfo); 1862 1863 int 1864 fdrevoke(void *f_data, short f_type, struct ucred *cred) 1865 { 1866 struct fdrevoke_info info; 1867 int error; 1868 1869 bzero(&info, sizeof(info)); 1870 info.data = f_data; 1871 info.type = f_type; 1872 info.cred = cred; 1873 error = falloc(NULL, &info.nfp, NULL); 1874 if (error) 1875 return (error); 1876 1877 /* 1878 * Scan the file pointer table once. dups do not dup file pointers, 1879 * only descriptors, so there is no leak. Set FREVOKED on the fps 1880 * being revoked. 1881 * 1882 * Any fps sent over unix-domain sockets will be revoked by the 1883 * socket code checking for FREVOKED when the fps are externialized. 1884 * revoke_token is used to make sure that fps marked FREVOKED and 1885 * externalized will be picked up by the following allproc_scan(). 1886 */ 1887 lwkt_gettoken(&revoke_token); 1888 allfiles_scan_exclusive(fdrevoke_check_callback, &info); 1889 lwkt_reltoken(&revoke_token); 1890 1891 /* 1892 * If any fps were marked track down the related descriptors 1893 * and close them. Any dup()s at this point will notice 1894 * the FREVOKED already set in the fp and do the right thing. 1895 */ 1896 if (info.found) 1897 allproc_scan(fdrevoke_proc_callback, &info, 0); 1898 fdrop(info.nfp); 1899 return(0); 1900 } 1901 1902 /* 1903 * Locate matching file pointers directly. 1904 * 1905 * WARNING: allfiles_scan_exclusive() holds a spinlock through these calls! 1906 */ 1907 static int 1908 fdrevoke_check_callback(struct file *fp, void *vinfo) 1909 { 1910 struct fdrevoke_info *info = vinfo; 1911 1912 /* 1913 * File pointers already flagged for revokation are skipped. 1914 */ 1915 if (fp->f_flag & FREVOKED) 1916 return(0); 1917 1918 /* 1919 * If revoking from a prison file pointers created outside of 1920 * that prison, or file pointers without creds, cannot be revoked. 1921 */ 1922 if (info->cred->cr_prison && 1923 (fp->f_cred == NULL || 1924 info->cred->cr_prison != fp->f_cred->cr_prison)) { 1925 return(0); 1926 } 1927 1928 /* 1929 * If the file pointer matches then mark it for revocation. The 1930 * flag is currently only used by unp_revoke_gc(). 1931 * 1932 * info->found is a heuristic and can race in a SMP environment. 1933 */ 1934 if (info->data == fp->f_data && info->type == fp->f_type) { 1935 atomic_set_int(&fp->f_flag, FREVOKED); 1936 info->found = 1; 1937 } 1938 return(0); 1939 } 1940 1941 /* 1942 * Locate matching file pointers via process descriptor tables. 1943 */ 1944 static int 1945 fdrevoke_proc_callback(struct proc *p, void *vinfo) 1946 { 1947 struct fdrevoke_info *info = vinfo; 1948 struct filedesc *fdp; 1949 struct file *fp; 1950 int n; 1951 1952 if (p->p_stat == SIDL || p->p_stat == SZOMB) 1953 return(0); 1954 if (info->cred->cr_prison && 1955 info->cred->cr_prison != p->p_ucred->cr_prison) { 1956 return(0); 1957 } 1958 1959 /* 1960 * If the controlling terminal of the process matches the 1961 * vnode being revoked we clear the controlling terminal. 1962 * 1963 * The normal spec_close() may not catch this because it 1964 * uses curproc instead of p. 1965 */ 1966 if (p->p_session && info->type == DTYPE_VNODE && 1967 info->data == p->p_session->s_ttyvp) { 1968 p->p_session->s_ttyvp = NULL; 1969 vrele(info->data); 1970 } 1971 1972 /* 1973 * Softref the fdp to prevent it from being destroyed 1974 */ 1975 spin_lock(&p->p_spin); 1976 if ((fdp = p->p_fd) == NULL) { 1977 spin_unlock(&p->p_spin); 1978 return(0); 1979 } 1980 atomic_add_int(&fdp->fd_softrefs, 1); 1981 spin_unlock(&p->p_spin); 1982 1983 /* 1984 * Locate and close any matching file descriptors, replacing 1985 * them with info->nfp. 1986 */ 1987 spin_lock(&fdp->fd_spin); 1988 for (n = 0; n < fdp->fd_nfiles; ++n) { 1989 if ((fp = fdp->fd_files[n].fp) == NULL) 1990 continue; 1991 if (fp->f_flag & FREVOKED) { 1992 ++fdp->fd_closedcounter; 1993 fclearcache(&fdp->fd_files[n], NULL, 0); 1994 ++fdp->fd_closedcounter; 1995 fhold(info->nfp); 1996 fdp->fd_files[n].fp = info->nfp; 1997 spin_unlock(&fdp->fd_spin); 1998 knote_fdclose(fp, fdp, n); /* XXX */ 1999 closef(fp, p); 2000 spin_lock(&fdp->fd_spin); 2001 } 2002 } 2003 spin_unlock(&fdp->fd_spin); 2004 atomic_subtract_int(&fdp->fd_softrefs, 1); 2005 return(0); 2006 } 2007 2008 /* 2009 * falloc: 2010 * Create a new open file structure and reserve a file decriptor 2011 * for the process that refers to it. 2012 * 2013 * Root creds are checked using lp, or assumed if lp is NULL. If 2014 * resultfd is non-NULL then lp must also be non-NULL. No file 2015 * descriptor is reserved (and no process context is needed) if 2016 * resultfd is NULL. 2017 * 2018 * A file pointer with a refcount of 1 is returned. Note that the 2019 * file pointer is NOT associated with the descriptor. If falloc 2020 * returns success, fsetfd() MUST be called to either associate the 2021 * file pointer or clear the reservation. 2022 */ 2023 int 2024 falloc(struct lwp *lp, struct file **resultfp, int *resultfd) 2025 { 2026 static struct timeval lastfail; 2027 static int curfail; 2028 struct filelist_head *head; 2029 struct file *fp; 2030 struct ucred *cred = lp ? lp->lwp_thread->td_ucred : proc0.p_ucred; 2031 int error; 2032 2033 fp = NULL; 2034 2035 /* 2036 * Handle filetable full issues and root overfill. 2037 */ 2038 if (nfiles >= maxfiles - maxfilesrootres && 2039 (cred->cr_ruid != 0 || nfiles >= maxfiles)) { 2040 if (ppsratecheck(&lastfail, &curfail, 1)) { 2041 kprintf("kern.maxfiles limit exceeded by uid %d, " 2042 "please see tuning(7).\n", 2043 cred->cr_ruid); 2044 } 2045 error = ENFILE; 2046 goto done; 2047 } 2048 2049 /* 2050 * Allocate a new file descriptor. 2051 */ 2052 fp = objcache_get(file_objcache, M_WAITOK); 2053 bzero(fp, sizeof(*fp)); 2054 spin_init(&fp->f_spin, "falloc"); 2055 SLIST_INIT(&fp->f_klist); 2056 fp->f_count = 1; 2057 fp->f_ops = &badfileops; 2058 fp->f_seqcount = 1; 2059 fsetcred(fp, cred); 2060 atomic_add_int(&nfiles, 1); 2061 2062 head = fp2filelist(fp); 2063 spin_lock(&head->spin); 2064 LIST_INSERT_HEAD(&head->list, fp, f_list); 2065 spin_unlock(&head->spin); 2066 2067 if (resultfd) { 2068 if ((error = fdalloc(lp->lwp_proc, 0, resultfd)) != 0) { 2069 fdrop(fp); 2070 fp = NULL; 2071 } 2072 } else { 2073 error = 0; 2074 } 2075 done: 2076 *resultfp = fp; 2077 return (error); 2078 } 2079 2080 /* 2081 * Check for races against a file descriptor by determining that the 2082 * file pointer is still associated with the specified file descriptor, 2083 * and a close is not currently in progress. 2084 */ 2085 int 2086 checkfdclosed(thread_t td, struct filedesc *fdp, int fd, struct file *fp, 2087 int closedcounter) 2088 { 2089 struct fdcache *fdc; 2090 int error; 2091 2092 cpu_lfence(); 2093 if (fdp->fd_closedcounter == closedcounter) 2094 return 0; 2095 2096 if (td->td_proc && td->td_proc->p_fd == fdp) { 2097 for (fdc = &td->td_fdcache[0]; 2098 fdc < &td->td_fdcache[NFDCACHE]; ++fdc) { 2099 if (fdc->fd == fd && fdc->fp == fp) 2100 return 0; 2101 } 2102 } 2103 2104 spin_lock_shared(&fdp->fd_spin); 2105 if ((unsigned)fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp) 2106 error = EBADF; 2107 else 2108 error = 0; 2109 spin_unlock_shared(&fdp->fd_spin); 2110 return (error); 2111 } 2112 2113 /* 2114 * Associate a file pointer with a previously reserved file descriptor. 2115 * This function always succeeds. 2116 * 2117 * If fp is NULL, the file descriptor is returned to the pool. 2118 * 2119 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2120 */ 2121 static void 2122 fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd) 2123 { 2124 KKASSERT((unsigned)fd < fdp->fd_nfiles); 2125 KKASSERT(fdp->fd_files[fd].reserved != 0); 2126 if (fp) { 2127 fhold(fp); 2128 /* fclearcache(&fdp->fd_files[fd], NULL, 0); */ 2129 fdp->fd_files[fd].fp = fp; 2130 fdp->fd_files[fd].reserved = 0; 2131 } else { 2132 fdp->fd_files[fd].reserved = 0; 2133 fdreserve_locked(fdp, fd, -1); 2134 fdfixup_locked(fdp, fd); 2135 } 2136 } 2137 2138 /* 2139 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2140 */ 2141 void 2142 fsetfd(struct filedesc *fdp, struct file *fp, int fd) 2143 { 2144 spin_lock(&fdp->fd_spin); 2145 fsetfd_locked(fdp, fp, fd); 2146 spin_unlock(&fdp->fd_spin); 2147 } 2148 2149 /* 2150 * Caller must hold an exclusive spinlock on fdp->fd_spin. 2151 */ 2152 static 2153 struct file * 2154 funsetfd_locked(struct filedesc *fdp, int fd) 2155 { 2156 struct file *fp; 2157 2158 if ((unsigned)fd >= fdp->fd_nfiles) 2159 return (NULL); 2160 if ((fp = fdp->fd_files[fd].fp) == NULL) 2161 return (NULL); 2162 ++fdp->fd_closedcounter; 2163 fclearcache(&fdp->fd_files[fd], NULL, 0); 2164 fdp->fd_files[fd].fp = NULL; 2165 fdp->fd_files[fd].fileflags = 0; 2166 ++fdp->fd_closedcounter; 2167 2168 fdreserve_locked(fdp, fd, -1); 2169 fdfixup_locked(fdp, fd); 2170 2171 return(fp); 2172 } 2173 2174 /* 2175 * WARNING: May not be called before initial fsetfd(). 2176 */ 2177 int 2178 fgetfdflags(struct filedesc *fdp, int fd, int *flagsp) 2179 { 2180 int error; 2181 2182 spin_lock_shared(&fdp->fd_spin); 2183 if (((u_int)fd) >= fdp->fd_nfiles) { 2184 error = EBADF; 2185 } else if (fdp->fd_files[fd].fp == NULL) { 2186 error = EBADF; 2187 } else { 2188 *flagsp = fdp->fd_files[fd].fileflags; 2189 error = 0; 2190 } 2191 spin_unlock_shared(&fdp->fd_spin); 2192 2193 return (error); 2194 } 2195 2196 /* 2197 * WARNING: May not be called before initial fsetfd(). 2198 */ 2199 int 2200 fsetfdflags(struct filedesc *fdp, int fd, int add_flags) 2201 { 2202 int error; 2203 2204 spin_lock(&fdp->fd_spin); 2205 if (((u_int)fd) >= fdp->fd_nfiles) { 2206 error = EBADF; 2207 } else if (fdp->fd_files[fd].fp == NULL) { 2208 error = EBADF; 2209 } else { 2210 fdp->fd_files[fd].fileflags |= add_flags; 2211 error = 0; 2212 } 2213 spin_unlock(&fdp->fd_spin); 2214 2215 return (error); 2216 } 2217 2218 /* 2219 * WARNING: May not be called before initial fsetfd(). 2220 */ 2221 int 2222 fclrfdflags(struct filedesc *fdp, int fd, int rem_flags) 2223 { 2224 int error; 2225 2226 spin_lock(&fdp->fd_spin); 2227 if (((u_int)fd) >= fdp->fd_nfiles) { 2228 error = EBADF; 2229 } else if (fdp->fd_files[fd].fp == NULL) { 2230 error = EBADF; 2231 } else { 2232 fdp->fd_files[fd].fileflags &= ~rem_flags; 2233 error = 0; 2234 } 2235 spin_unlock(&fdp->fd_spin); 2236 2237 return (error); 2238 } 2239 2240 /* 2241 * Set/Change/Clear the creds for a fp and synchronize the uidinfo. 2242 */ 2243 void 2244 fsetcred(struct file *fp, struct ucred *ncr) 2245 { 2246 struct ucred *ocr; 2247 struct uidinfo *uip; 2248 struct uidcount *pup; 2249 int cpu = mycpuid; 2250 int count; 2251 2252 ocr = fp->f_cred; 2253 if (ocr == NULL || ncr == NULL || ocr->cr_uidinfo != ncr->cr_uidinfo) { 2254 if (ocr) { 2255 uip = ocr->cr_uidinfo; 2256 pup = &uip->ui_pcpu[cpu]; 2257 atomic_add_int(&pup->pu_openfiles, -1); 2258 if (pup->pu_openfiles < -PUP_LIMIT || 2259 pup->pu_openfiles > PUP_LIMIT) { 2260 count = atomic_swap_int(&pup->pu_openfiles, 0); 2261 atomic_add_int(&uip->ui_openfiles, count); 2262 } 2263 } 2264 if (ncr) { 2265 uip = ncr->cr_uidinfo; 2266 pup = &uip->ui_pcpu[cpu]; 2267 atomic_add_int(&pup->pu_openfiles, 1); 2268 if (pup->pu_openfiles < -PUP_LIMIT || 2269 pup->pu_openfiles > PUP_LIMIT) { 2270 count = atomic_swap_int(&pup->pu_openfiles, 0); 2271 atomic_add_int(&uip->ui_openfiles, count); 2272 } 2273 } 2274 } 2275 if (ncr) 2276 crhold(ncr); 2277 fp->f_cred = ncr; 2278 if (ocr) 2279 crfree(ocr); 2280 } 2281 2282 /* 2283 * Free a file descriptor. 2284 */ 2285 static 2286 void 2287 ffree(struct file *fp) 2288 { 2289 KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!")); 2290 fsetcred(fp, NULL); 2291 if (fp->f_nchandle.ncp) 2292 cache_drop(&fp->f_nchandle); 2293 objcache_put(file_objcache, fp); 2294 } 2295 2296 /* 2297 * called from init_main, initialize filedesc0 for proc0. 2298 */ 2299 void 2300 fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask) 2301 { 2302 p0->p_fd = fdp0; 2303 p0->p_fdtol = NULL; 2304 fdp0->fd_refcnt = 1; 2305 fdp0->fd_cmask = cmask; 2306 fdp0->fd_files = fdp0->fd_builtin_files; 2307 fdp0->fd_nfiles = NDFILE; 2308 fdp0->fd_lastfile = -1; 2309 spin_init(&fdp0->fd_spin, "fdinitbootstrap"); 2310 } 2311 2312 /* 2313 * Build a new filedesc structure. 2314 */ 2315 struct filedesc * 2316 fdinit(struct proc *p) 2317 { 2318 struct filedesc *newfdp; 2319 struct filedesc *fdp = p->p_fd; 2320 2321 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO); 2322 spin_lock(&fdp->fd_spin); 2323 if (fdp->fd_cdir) { 2324 newfdp->fd_cdir = fdp->fd_cdir; 2325 vref(newfdp->fd_cdir); 2326 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir); 2327 } 2328 2329 /* 2330 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of 2331 * proc0, but should unconditionally exist in other processes. 2332 */ 2333 if (fdp->fd_rdir) { 2334 newfdp->fd_rdir = fdp->fd_rdir; 2335 vref(newfdp->fd_rdir); 2336 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir); 2337 } 2338 if (fdp->fd_jdir) { 2339 newfdp->fd_jdir = fdp->fd_jdir; 2340 vref(newfdp->fd_jdir); 2341 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir); 2342 } 2343 spin_unlock(&fdp->fd_spin); 2344 2345 /* Create the file descriptor table. */ 2346 newfdp->fd_refcnt = 1; 2347 newfdp->fd_cmask = cmask; 2348 newfdp->fd_files = newfdp->fd_builtin_files; 2349 newfdp->fd_nfiles = NDFILE; 2350 newfdp->fd_lastfile = -1; 2351 spin_init(&newfdp->fd_spin, "fdinit"); 2352 2353 return (newfdp); 2354 } 2355 2356 /* 2357 * Share a filedesc structure. 2358 */ 2359 struct filedesc * 2360 fdshare(struct proc *p) 2361 { 2362 struct filedesc *fdp; 2363 2364 fdp = p->p_fd; 2365 spin_lock(&fdp->fd_spin); 2366 fdp->fd_refcnt++; 2367 spin_unlock(&fdp->fd_spin); 2368 return (fdp); 2369 } 2370 2371 /* 2372 * Copy a filedesc structure. 2373 */ 2374 int 2375 fdcopy(struct proc *p, struct filedesc **fpp) 2376 { 2377 struct filedesc *fdp = p->p_fd; 2378 struct filedesc *newfdp; 2379 struct fdnode *fdnode; 2380 int i; 2381 int ni; 2382 2383 /* 2384 * Certain daemons might not have file descriptors. 2385 */ 2386 if (fdp == NULL) 2387 return (0); 2388 2389 /* 2390 * Allocate the new filedesc and fd_files[] array. This can race 2391 * with operations by other threads on the fdp so we have to be 2392 * careful. 2393 */ 2394 newfdp = kmalloc(sizeof(struct filedesc), 2395 M_FILEDESC, M_WAITOK | M_ZERO | M_NULLOK); 2396 if (newfdp == NULL) { 2397 *fpp = NULL; 2398 return (-1); 2399 } 2400 again: 2401 spin_lock(&fdp->fd_spin); 2402 if (fdp->fd_lastfile < NDFILE) { 2403 newfdp->fd_files = newfdp->fd_builtin_files; 2404 i = NDFILE; 2405 } else { 2406 /* 2407 * We have to allocate (N^2-1) entries for our in-place 2408 * binary tree. Allow the table to shrink. 2409 */ 2410 i = fdp->fd_nfiles; 2411 ni = (i - 1) / 2; 2412 while (ni > fdp->fd_lastfile && ni > NDFILE) { 2413 i = ni; 2414 ni = (i - 1) / 2; 2415 } 2416 spin_unlock(&fdp->fd_spin); 2417 newfdp->fd_files = kmalloc(i * sizeof(struct fdnode), 2418 M_FILEDESC, M_WAITOK | M_ZERO); 2419 2420 /* 2421 * Check for race, retry 2422 */ 2423 spin_lock(&fdp->fd_spin); 2424 if (i <= fdp->fd_lastfile) { 2425 spin_unlock(&fdp->fd_spin); 2426 kfree(newfdp->fd_files, M_FILEDESC); 2427 goto again; 2428 } 2429 } 2430 2431 /* 2432 * Dup the remaining fields. vref() and cache_hold() can be 2433 * safely called while holding the read spinlock on fdp. 2434 * 2435 * The read spinlock on fdp is still being held. 2436 * 2437 * NOTE: vref and cache_hold calls for the case where the vnode 2438 * or cache entry already has at least one ref may be called 2439 * while holding spin locks. 2440 */ 2441 if ((newfdp->fd_cdir = fdp->fd_cdir) != NULL) { 2442 vref(newfdp->fd_cdir); 2443 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir); 2444 } 2445 /* 2446 * We must check for fd_rdir here, at least for now because 2447 * the init process is created before we have access to the 2448 * rootvode to take a reference to it. 2449 */ 2450 if ((newfdp->fd_rdir = fdp->fd_rdir) != NULL) { 2451 vref(newfdp->fd_rdir); 2452 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir); 2453 } 2454 if ((newfdp->fd_jdir = fdp->fd_jdir) != NULL) { 2455 vref(newfdp->fd_jdir); 2456 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir); 2457 } 2458 newfdp->fd_refcnt = 1; 2459 newfdp->fd_nfiles = i; 2460 newfdp->fd_lastfile = fdp->fd_lastfile; 2461 newfdp->fd_freefile = fdp->fd_freefile; 2462 newfdp->fd_cmask = fdp->fd_cmask; 2463 spin_init(&newfdp->fd_spin, "fdcopy"); 2464 2465 /* 2466 * Copy the descriptor table through (i). This also copies the 2467 * allocation state. Then go through and ref the file pointers 2468 * and clean up any KQ descriptors. 2469 * 2470 * kq descriptors cannot be copied. Since we haven't ref'd the 2471 * copied files yet we can ignore the return value from funsetfd(). 2472 * 2473 * The read spinlock on fdp is still being held. 2474 * 2475 * Be sure to clean out fdnode->tdcache, otherwise bad things will 2476 * happen. 2477 */ 2478 bcopy(fdp->fd_files, newfdp->fd_files, i * sizeof(struct fdnode)); 2479 for (i = 0 ; i < newfdp->fd_nfiles; ++i) { 2480 fdnode = &newfdp->fd_files[i]; 2481 if (fdnode->reserved) { 2482 fdreserve_locked(newfdp, i, -1); 2483 fdnode->reserved = 0; 2484 fdfixup_locked(newfdp, i); 2485 } else if (fdnode->fp) { 2486 bzero(&fdnode->tdcache, sizeof(fdnode->tdcache)); 2487 if (fdnode->fp->f_type == DTYPE_KQUEUE) { 2488 (void)funsetfd_locked(newfdp, i); 2489 } else { 2490 fhold(fdnode->fp); 2491 } 2492 } 2493 } 2494 spin_unlock(&fdp->fd_spin); 2495 *fpp = newfdp; 2496 return (0); 2497 } 2498 2499 /* 2500 * Release a filedesc structure. 2501 * 2502 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE) 2503 */ 2504 void 2505 fdfree(struct proc *p, struct filedesc *repl) 2506 { 2507 struct filedesc *fdp; 2508 struct fdnode *fdnode; 2509 int i; 2510 struct filedesc_to_leader *fdtol; 2511 struct file *fp; 2512 struct vnode *vp; 2513 struct flock lf; 2514 2515 /* 2516 * Before destroying or replacing p->p_fd we must be sure to 2517 * clean out the cache of the last thread, which should be 2518 * curthread. 2519 */ 2520 fexitcache(curthread); 2521 2522 /* 2523 * Certain daemons might not have file descriptors. 2524 */ 2525 fdp = p->p_fd; 2526 if (fdp == NULL) { 2527 p->p_fd = repl; 2528 return; 2529 } 2530 2531 /* 2532 * Severe messing around to follow. 2533 */ 2534 spin_lock(&fdp->fd_spin); 2535 2536 /* Check for special need to clear POSIX style locks */ 2537 fdtol = p->p_fdtol; 2538 if (fdtol != NULL) { 2539 KASSERT(fdtol->fdl_refcount > 0, 2540 ("filedesc_to_refcount botch: fdl_refcount=%d", 2541 fdtol->fdl_refcount)); 2542 if (fdtol->fdl_refcount == 1 && p->p_leader->p_advlock_flag) { 2543 for (i = 0; i <= fdp->fd_lastfile; ++i) { 2544 fdnode = &fdp->fd_files[i]; 2545 if (fdnode->fp == NULL || 2546 fdnode->fp->f_type != DTYPE_VNODE) { 2547 continue; 2548 } 2549 fp = fdnode->fp; 2550 fhold(fp); 2551 spin_unlock(&fdp->fd_spin); 2552 2553 lf.l_whence = SEEK_SET; 2554 lf.l_start = 0; 2555 lf.l_len = 0; 2556 lf.l_type = F_UNLCK; 2557 vp = (struct vnode *)fp->f_data; 2558 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 2559 F_UNLCK, &lf, F_POSIX); 2560 fdrop(fp); 2561 spin_lock(&fdp->fd_spin); 2562 } 2563 } 2564 retry: 2565 if (fdtol->fdl_refcount == 1) { 2566 if (fdp->fd_holdleaderscount > 0 && 2567 p->p_leader->p_advlock_flag) { 2568 /* 2569 * close() or do_dup() has cleared a reference 2570 * in a shared file descriptor table. 2571 */ 2572 fdp->fd_holdleaderswakeup = 1; 2573 ssleep(&fdp->fd_holdleaderscount, 2574 &fdp->fd_spin, 0, "fdlhold", 0); 2575 goto retry; 2576 } 2577 if (fdtol->fdl_holdcount > 0) { 2578 /* 2579 * Ensure that fdtol->fdl_leader 2580 * remains valid in closef(). 2581 */ 2582 fdtol->fdl_wakeup = 1; 2583 ssleep(fdtol, &fdp->fd_spin, 0, "fdlhold", 0); 2584 goto retry; 2585 } 2586 } 2587 fdtol->fdl_refcount--; 2588 if (fdtol->fdl_refcount == 0 && 2589 fdtol->fdl_holdcount == 0) { 2590 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev; 2591 fdtol->fdl_prev->fdl_next = fdtol->fdl_next; 2592 } else { 2593 fdtol = NULL; 2594 } 2595 p->p_fdtol = NULL; 2596 if (fdtol != NULL) { 2597 spin_unlock(&fdp->fd_spin); 2598 kfree(fdtol, M_FILEDESC_TO_LEADER); 2599 spin_lock(&fdp->fd_spin); 2600 } 2601 } 2602 if (--fdp->fd_refcnt > 0) { 2603 spin_unlock(&fdp->fd_spin); 2604 spin_lock(&p->p_spin); 2605 p->p_fd = repl; 2606 spin_unlock(&p->p_spin); 2607 return; 2608 } 2609 2610 /* 2611 * Even though we are the last reference to the structure allproc 2612 * scans may still reference the structure. Maintain proper 2613 * locks until we can replace p->p_fd. 2614 * 2615 * Also note that kqueue's closef still needs to reference the 2616 * fdp via p->p_fd, so we have to close the descriptors before 2617 * we replace p->p_fd. 2618 */ 2619 for (i = 0; i <= fdp->fd_lastfile; ++i) { 2620 if (fdp->fd_files[i].fp) { 2621 fp = funsetfd_locked(fdp, i); 2622 if (fp) { 2623 spin_unlock(&fdp->fd_spin); 2624 if (SLIST_FIRST(&fp->f_klist)) 2625 knote_fdclose(fp, fdp, i); 2626 closef(fp, p); 2627 spin_lock(&fdp->fd_spin); 2628 } 2629 } 2630 } 2631 spin_unlock(&fdp->fd_spin); 2632 2633 /* 2634 * Interlock against an allproc scan operations (typically frevoke). 2635 */ 2636 spin_lock(&p->p_spin); 2637 p->p_fd = repl; 2638 spin_unlock(&p->p_spin); 2639 2640 /* 2641 * Wait for any softrefs to go away. This race rarely occurs so 2642 * we can use a non-critical-path style poll/sleep loop. The 2643 * race only occurs against allproc scans. 2644 * 2645 * No new softrefs can occur with the fdp disconnected from the 2646 * process. 2647 */ 2648 if (fdp->fd_softrefs) { 2649 kprintf("pid %d: Warning, fdp race avoided\n", p->p_pid); 2650 while (fdp->fd_softrefs) 2651 tsleep(&fdp->fd_softrefs, 0, "fdsoft", 1); 2652 } 2653 2654 if (fdp->fd_files != fdp->fd_builtin_files) 2655 kfree(fdp->fd_files, M_FILEDESC); 2656 if (fdp->fd_cdir) { 2657 cache_drop(&fdp->fd_ncdir); 2658 vrele(fdp->fd_cdir); 2659 } 2660 if (fdp->fd_rdir) { 2661 cache_drop(&fdp->fd_nrdir); 2662 vrele(fdp->fd_rdir); 2663 } 2664 if (fdp->fd_jdir) { 2665 cache_drop(&fdp->fd_njdir); 2666 vrele(fdp->fd_jdir); 2667 } 2668 kfree(fdp, M_FILEDESC); 2669 } 2670 2671 /* 2672 * Retrieve and reference the file pointer associated with a descriptor. 2673 * 2674 * td must be the current thread. 2675 */ 2676 struct file * 2677 holdfp(thread_t td, int fd, int flag) 2678 { 2679 struct file *fp; 2680 2681 fp = _holdfp_cache(td, fd); 2682 if (fp) { 2683 if ((fp->f_flag & flag) == 0 && flag != -1) { 2684 fdrop(fp); 2685 fp = NULL; 2686 } 2687 } 2688 return fp; 2689 } 2690 2691 /* 2692 * holdsock() - load the struct file pointer associated 2693 * with a socket into *fpp. If an error occurs, non-zero 2694 * will be returned and *fpp will be set to NULL. 2695 * 2696 * td must be the current thread. 2697 */ 2698 int 2699 holdsock(thread_t td, int fd, struct file **fpp) 2700 { 2701 struct file *fp; 2702 int error; 2703 2704 /* 2705 * Lockless shortcut 2706 */ 2707 fp = _holdfp_cache(td, fd); 2708 if (fp) { 2709 if (fp->f_type != DTYPE_SOCKET) { 2710 fdrop(fp); 2711 fp = NULL; 2712 error = ENOTSOCK; 2713 } else { 2714 error = 0; 2715 } 2716 } else { 2717 error = EBADF; 2718 } 2719 *fpp = fp; 2720 2721 return (error); 2722 } 2723 2724 /* 2725 * Convert a user file descriptor to a held file pointer. 2726 * 2727 * td must be the current thread. 2728 */ 2729 int 2730 holdvnode(thread_t td, int fd, struct file **fpp) 2731 { 2732 struct file *fp; 2733 int error; 2734 2735 fp = _holdfp_cache(td, fd); 2736 if (fp) { 2737 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) { 2738 fdrop(fp); 2739 fp = NULL; 2740 error = EINVAL; 2741 } else { 2742 error = 0; 2743 } 2744 } else { 2745 error = EBADF; 2746 } 2747 *fpp = fp; 2748 2749 return (error); 2750 } 2751 2752 /* 2753 * Convert a user file descriptor to a held file pointer. 2754 * 2755 * td must be the current thread. 2756 */ 2757 int 2758 holdvnode2(thread_t td, int fd, struct file **fpp, char *fflagsp) 2759 { 2760 struct file *fp; 2761 int error; 2762 2763 fp = _holdfp2(td, fd, fflagsp); 2764 if (fp) { 2765 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) { 2766 fdrop(fp); 2767 fp = NULL; 2768 error = EINVAL; 2769 } else { 2770 error = 0; 2771 } 2772 } else { 2773 error = EBADF; 2774 } 2775 *fpp = fp; 2776 2777 return (error); 2778 } 2779 2780 /* 2781 * For setugid programs, we don't want to people to use that setugidness 2782 * to generate error messages which write to a file which otherwise would 2783 * otherwise be off-limits to the process. 2784 * 2785 * This is a gross hack to plug the hole. A better solution would involve 2786 * a special vop or other form of generalized access control mechanism. We 2787 * go ahead and just reject all procfs file systems accesses as dangerous. 2788 * 2789 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is 2790 * sufficient. We also don't for check setugidness since we know we are. 2791 */ 2792 static int 2793 is_unsafe(struct file *fp) 2794 { 2795 if (fp->f_type == DTYPE_VNODE && 2796 ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS) 2797 return (1); 2798 return (0); 2799 } 2800 2801 /* 2802 * Make this setguid thing safe, if at all possible. 2803 * 2804 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose() 2805 */ 2806 void 2807 setugidsafety(struct proc *p) 2808 { 2809 struct filedesc *fdp = p->p_fd; 2810 int i; 2811 2812 /* Certain daemons might not have file descriptors. */ 2813 if (fdp == NULL) 2814 return; 2815 2816 /* 2817 * note: fdp->fd_files may be reallocated out from under us while 2818 * we are blocked in a close. Be careful! 2819 */ 2820 for (i = 0; i <= fdp->fd_lastfile; i++) { 2821 if (i > 2) 2822 break; 2823 if (fdp->fd_files[i].fp && is_unsafe(fdp->fd_files[i].fp)) { 2824 struct file *fp; 2825 2826 /* 2827 * NULL-out descriptor prior to close to avoid 2828 * a race while close blocks. 2829 */ 2830 if ((fp = funsetfd_locked(fdp, i)) != NULL) { 2831 knote_fdclose(fp, fdp, i); 2832 closef(fp, p); 2833 } 2834 } 2835 } 2836 } 2837 2838 /* 2839 * Close all CLOEXEC files on exec. 2840 * 2841 * Only a single thread remains for the current process. 2842 * 2843 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose() 2844 */ 2845 void 2846 fdcloseexec(struct proc *p) 2847 { 2848 struct filedesc *fdp = p->p_fd; 2849 int i; 2850 2851 /* Certain daemons might not have file descriptors. */ 2852 if (fdp == NULL) 2853 return; 2854 2855 /* 2856 * We cannot cache fd_files since operations may block and rip 2857 * them out from under us. 2858 */ 2859 for (i = 0; i <= fdp->fd_lastfile; i++) { 2860 if (fdp->fd_files[i].fp != NULL && 2861 (fdp->fd_files[i].fileflags & UF_EXCLOSE)) { 2862 struct file *fp; 2863 2864 /* 2865 * NULL-out descriptor prior to close to avoid 2866 * a race while close blocks. 2867 * 2868 * (funsetfd*() also clears the fd cache) 2869 */ 2870 if ((fp = funsetfd_locked(fdp, i)) != NULL) { 2871 knote_fdclose(fp, fdp, i); 2872 closef(fp, p); 2873 } 2874 } 2875 } 2876 } 2877 2878 /* 2879 * It is unsafe for set[ug]id processes to be started with file 2880 * descriptors 0..2 closed, as these descriptors are given implicit 2881 * significance in the Standard C library. fdcheckstd() will create a 2882 * descriptor referencing /dev/null for each of stdin, stdout, and 2883 * stderr that is not already open. 2884 * 2885 * NOT MPSAFE - calls falloc, vn_open, etc 2886 */ 2887 int 2888 fdcheckstd(struct lwp *lp) 2889 { 2890 struct nlookupdata nd; 2891 struct filedesc *fdp; 2892 struct file *fp; 2893 int retval; 2894 int i, error, flags, devnull; 2895 2896 fdp = lp->lwp_proc->p_fd; 2897 if (fdp == NULL) 2898 return (0); 2899 devnull = -1; 2900 error = 0; 2901 for (i = 0; i < 3; i++) { 2902 if (fdp->fd_files[i].fp != NULL) 2903 continue; 2904 if (devnull < 0) { 2905 if ((error = falloc(lp, &fp, &devnull)) != 0) 2906 break; 2907 2908 error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE, 2909 NLC_FOLLOW|NLC_LOCKVP); 2910 flags = FREAD | FWRITE; 2911 if (error == 0) 2912 error = vn_open(&nd, fp, flags, 0); 2913 if (error == 0) 2914 fsetfd(fdp, fp, devnull); 2915 else 2916 fsetfd(fdp, NULL, devnull); 2917 fdrop(fp); 2918 nlookup_done(&nd); 2919 if (error) 2920 break; 2921 KKASSERT(i == devnull); 2922 } else { 2923 error = kern_dup(DUP_FIXED, devnull, i, &retval); 2924 if (error != 0) 2925 break; 2926 } 2927 } 2928 return (error); 2929 } 2930 2931 /* 2932 * Internal form of close. 2933 * Decrement reference count on file structure. 2934 * Note: td and/or p may be NULL when closing a file 2935 * that was being passed in a message. 2936 * 2937 * MPALMOSTSAFE - acquires mplock for VOP operations 2938 */ 2939 int 2940 closef(struct file *fp, struct proc *p) 2941 { 2942 struct vnode *vp; 2943 struct flock lf; 2944 struct filedesc_to_leader *fdtol; 2945 2946 if (fp == NULL) 2947 return (0); 2948 2949 /* 2950 * POSIX record locking dictates that any close releases ALL 2951 * locks owned by this process. This is handled by setting 2952 * a flag in the unlock to free ONLY locks obeying POSIX 2953 * semantics, and not to free BSD-style file locks. 2954 * If the descriptor was in a message, POSIX-style locks 2955 * aren't passed with the descriptor. 2956 */ 2957 if (p != NULL && fp->f_type == DTYPE_VNODE && 2958 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) 2959 ) { 2960 if (p->p_leader->p_advlock_flag) { 2961 lf.l_whence = SEEK_SET; 2962 lf.l_start = 0; 2963 lf.l_len = 0; 2964 lf.l_type = F_UNLCK; 2965 vp = (struct vnode *)fp->f_data; 2966 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 2967 &lf, F_POSIX); 2968 } 2969 fdtol = p->p_fdtol; 2970 if (fdtol != NULL) { 2971 lwkt_gettoken(&p->p_token); 2972 2973 /* 2974 * Handle special case where file descriptor table 2975 * is shared between multiple process leaders. 2976 */ 2977 for (fdtol = fdtol->fdl_next; 2978 fdtol != p->p_fdtol; 2979 fdtol = fdtol->fdl_next) { 2980 if (fdtol->fdl_leader->p_advlock_flag == 0) 2981 continue; 2982 fdtol->fdl_holdcount++; 2983 lf.l_whence = SEEK_SET; 2984 lf.l_start = 0; 2985 lf.l_len = 0; 2986 lf.l_type = F_UNLCK; 2987 vp = (struct vnode *)fp->f_data; 2988 VOP_ADVLOCK(vp, (caddr_t)fdtol->fdl_leader, 2989 F_UNLCK, &lf, F_POSIX); 2990 fdtol->fdl_holdcount--; 2991 if (fdtol->fdl_holdcount == 0 && 2992 fdtol->fdl_wakeup != 0) { 2993 fdtol->fdl_wakeup = 0; 2994 wakeup(fdtol); 2995 } 2996 } 2997 lwkt_reltoken(&p->p_token); 2998 } 2999 } 3000 return (fdrop(fp)); 3001 } 3002 3003 /* 3004 * fhold() can only be called if f_count is already at least 1 (i.e. the 3005 * caller of fhold() already has a reference to the file pointer in some 3006 * manner or other). 3007 * 3008 * Atomic ops are used for incrementing and decrementing f_count before 3009 * the 1->0 transition. f_count 1->0 transition is special, see the 3010 * comment in fdrop(). 3011 */ 3012 void 3013 fhold(struct file *fp) 3014 { 3015 /* 0->1 transition will never work */ 3016 KASSERT(fp->f_count > 0, ("fhold: invalid f_count %d", fp->f_count)); 3017 atomic_add_int(&fp->f_count, 1); 3018 } 3019 3020 /* 3021 * fdrop() - drop a reference to a descriptor 3022 */ 3023 int 3024 fdrop(struct file *fp) 3025 { 3026 struct flock lf; 3027 struct vnode *vp; 3028 int error, do_free = 0; 3029 3030 /* 3031 * NOTE: 3032 * Simple atomic_fetchadd_int(f_count, -1) here will cause use- 3033 * after-free or double free (due to f_count 0->1 transition), if 3034 * fhold() is called on the fps found through filehead iteration. 3035 */ 3036 for (;;) { 3037 int count = fp->f_count; 3038 3039 cpu_ccfence(); 3040 KASSERT(count > 0, ("fdrop: invalid f_count %d", count)); 3041 if (count == 1) { 3042 struct filelist_head *head = fp2filelist(fp); 3043 3044 /* 3045 * About to drop the last reference, hold the 3046 * filehead spin lock and drop it, so that no 3047 * one could see this fp through filehead anymore, 3048 * let alone fhold() this fp. 3049 */ 3050 spin_lock(&head->spin); 3051 if (atomic_cmpset_int(&fp->f_count, count, 0)) { 3052 LIST_REMOVE(fp, f_list); 3053 spin_unlock(&head->spin); 3054 atomic_subtract_int(&nfiles, 1); 3055 do_free = 1; /* free this fp */ 3056 break; 3057 } 3058 spin_unlock(&head->spin); 3059 /* retry */ 3060 } else if (atomic_cmpset_int(&fp->f_count, count, count - 1)) { 3061 break; 3062 } 3063 /* retry */ 3064 } 3065 if (!do_free) 3066 return (0); 3067 3068 KKASSERT(SLIST_FIRST(&fp->f_klist) == NULL); 3069 3070 /* 3071 * The last reference has gone away, we own the fp structure free 3072 * and clear. 3073 */ 3074 if (fp->f_count < 0) 3075 panic("fdrop: count < 0"); 3076 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE && 3077 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS) 3078 ) { 3079 lf.l_whence = SEEK_SET; 3080 lf.l_start = 0; 3081 lf.l_len = 0; 3082 lf.l_type = F_UNLCK; 3083 vp = (struct vnode *)fp->f_data; 3084 VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0); 3085 } 3086 if (fp->f_ops != &badfileops) 3087 error = fo_close(fp); 3088 else 3089 error = 0; 3090 ffree(fp); 3091 return (error); 3092 } 3093 3094 /* 3095 * Apply an advisory lock on a file descriptor. 3096 * 3097 * Just attempt to get a record lock of the requested type on 3098 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0). 3099 * 3100 * MPALMOSTSAFE 3101 */ 3102 int 3103 sys_flock(struct sysmsg *sysmsg, const struct flock_args *uap) 3104 { 3105 thread_t td = curthread; 3106 struct file *fp; 3107 struct vnode *vp; 3108 struct flock lf; 3109 int error; 3110 3111 if ((fp = holdfp(td, uap->fd, -1)) == NULL) 3112 return (EBADF); 3113 if (fp->f_type != DTYPE_VNODE) { 3114 error = EOPNOTSUPP; 3115 goto done; 3116 } 3117 vp = (struct vnode *)fp->f_data; 3118 lf.l_whence = SEEK_SET; 3119 lf.l_start = 0; 3120 lf.l_len = 0; 3121 if (uap->how & LOCK_UN) { 3122 lf.l_type = F_UNLCK; 3123 atomic_clear_int(&fp->f_flag, FHASLOCK); /* race ok */ 3124 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0); 3125 goto done; 3126 } 3127 if (uap->how & LOCK_EX) 3128 lf.l_type = F_WRLCK; 3129 else if (uap->how & LOCK_SH) 3130 lf.l_type = F_RDLCK; 3131 else { 3132 error = EBADF; 3133 goto done; 3134 } 3135 if (uap->how & LOCK_NB) 3136 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 0); 3137 else 3138 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_WAIT); 3139 atomic_set_int(&fp->f_flag, FHASLOCK); /* race ok */ 3140 done: 3141 fdrop(fp); 3142 return (error); 3143 } 3144 3145 /* 3146 * File Descriptor pseudo-device driver (/dev/fd/). 3147 * 3148 * Opening minor device N dup()s the file (if any) connected to file 3149 * descriptor N belonging to the calling process. Note that this driver 3150 * consists of only the ``open()'' routine, because all subsequent 3151 * references to this file will be direct to the other driver. 3152 */ 3153 static int 3154 fdopen(struct dev_open_args *ap) 3155 { 3156 thread_t td = curthread; 3157 3158 KKASSERT(td->td_lwp != NULL); 3159 3160 /* 3161 * XXX Kludge: set curlwp->lwp_dupfd to contain the value of the 3162 * the file descriptor being sought for duplication. The error 3163 * return ensures that the vnode for this device will be released 3164 * by vn_open. Open will detect this special error and take the 3165 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN 3166 * will simply report the error. 3167 */ 3168 td->td_lwp->lwp_dupfd = minor(ap->a_head.a_dev); 3169 return (ENODEV); 3170 } 3171 3172 /* 3173 * The caller has reserved the file descriptor dfd for us. On success we 3174 * must fsetfd() it. On failure the caller will clean it up. 3175 */ 3176 int 3177 dupfdopen(thread_t td, int dfd, int sfd, int mode, int error) 3178 { 3179 struct filedesc *fdp; 3180 struct file *wfp; 3181 struct file *xfp; 3182 int werror; 3183 3184 if ((wfp = holdfp(td, sfd, -1)) == NULL) 3185 return (EBADF); 3186 3187 /* 3188 * Close a revoke/dup race. Duping a descriptor marked as revoked 3189 * will dup a dummy descriptor instead of the real one. 3190 */ 3191 if (wfp->f_flag & FREVOKED) { 3192 kprintf("Warning: attempt to dup() a revoked descriptor\n"); 3193 fdrop(wfp); 3194 wfp = NULL; 3195 werror = falloc(NULL, &wfp, NULL); 3196 if (werror) 3197 return (werror); 3198 } 3199 3200 fdp = td->td_proc->p_fd; 3201 3202 /* 3203 * There are two cases of interest here. 3204 * 3205 * For ENODEV simply dup sfd to file descriptor dfd and return. 3206 * 3207 * For ENXIO steal away the file structure from sfd and store it 3208 * dfd. sfd is effectively closed by this operation. 3209 * 3210 * Any other error code is just returned. 3211 */ 3212 switch (error) { 3213 case ENODEV: 3214 /* 3215 * Check that the mode the file is being opened for is a 3216 * subset of the mode of the existing descriptor. 3217 */ 3218 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) { 3219 error = EACCES; 3220 break; 3221 } 3222 spin_lock(&fdp->fd_spin); 3223 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags; 3224 fsetfd_locked(fdp, wfp, dfd); 3225 spin_unlock(&fdp->fd_spin); 3226 error = 0; 3227 break; 3228 case ENXIO: 3229 /* 3230 * Steal away the file pointer from dfd, and stuff it into indx. 3231 */ 3232 spin_lock(&fdp->fd_spin); 3233 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags; 3234 fsetfd(fdp, wfp, dfd); 3235 if ((xfp = funsetfd_locked(fdp, sfd)) != NULL) { 3236 spin_unlock(&fdp->fd_spin); 3237 fdrop(xfp); 3238 } else { 3239 spin_unlock(&fdp->fd_spin); 3240 } 3241 error = 0; 3242 break; 3243 default: 3244 break; 3245 } 3246 fdrop(wfp); 3247 return (error); 3248 } 3249 3250 /* 3251 * NOT MPSAFE - I think these refer to a common file descriptor table 3252 * and we need to spinlock that to link fdtol in. 3253 */ 3254 struct filedesc_to_leader * 3255 filedesc_to_leader_alloc(struct filedesc_to_leader *old, 3256 struct proc *leader) 3257 { 3258 struct filedesc_to_leader *fdtol; 3259 3260 fdtol = kmalloc(sizeof(struct filedesc_to_leader), 3261 M_FILEDESC_TO_LEADER, M_WAITOK | M_ZERO); 3262 fdtol->fdl_refcount = 1; 3263 fdtol->fdl_holdcount = 0; 3264 fdtol->fdl_wakeup = 0; 3265 fdtol->fdl_leader = leader; 3266 if (old != NULL) { 3267 fdtol->fdl_next = old->fdl_next; 3268 fdtol->fdl_prev = old; 3269 old->fdl_next = fdtol; 3270 fdtol->fdl_next->fdl_prev = fdtol; 3271 } else { 3272 fdtol->fdl_next = fdtol; 3273 fdtol->fdl_prev = fdtol; 3274 } 3275 return fdtol; 3276 } 3277 3278 /* 3279 * Scan all file pointers in the system. The callback is made with 3280 * the master list spinlock held exclusively. 3281 */ 3282 void 3283 allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data) 3284 { 3285 int i; 3286 3287 for (i = 0; i < NFILELIST_HEADS; ++i) { 3288 struct filelist_head *head = &filelist_heads[i]; 3289 struct file *fp; 3290 3291 spin_lock(&head->spin); 3292 LIST_FOREACH(fp, &head->list, f_list) { 3293 int res; 3294 3295 res = callback(fp, data); 3296 if (res < 0) 3297 break; 3298 } 3299 spin_unlock(&head->spin); 3300 } 3301 } 3302 3303 /* 3304 * Get file structures. 3305 * 3306 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe) 3307 */ 3308 3309 struct sysctl_kern_file_info { 3310 int count; 3311 int error; 3312 struct sysctl_req *req; 3313 }; 3314 3315 static int sysctl_kern_file_callback(struct proc *p, void *data); 3316 3317 static int 3318 sysctl_kern_file(SYSCTL_HANDLER_ARGS) 3319 { 3320 struct sysctl_kern_file_info info; 3321 3322 /* 3323 * Note: because the number of file descriptors is calculated 3324 * in different ways for sizing vs returning the data, 3325 * there is information leakage from the first loop. However, 3326 * it is of a similar order of magnitude to the leakage from 3327 * global system statistics such as kern.openfiles. 3328 * 3329 * When just doing a count, note that we cannot just count 3330 * the elements and add f_count via the filehead list because 3331 * threaded processes share their descriptor table and f_count might 3332 * still be '1' in that case. 3333 * 3334 * Since the SYSCTL op can block, we must hold the process to 3335 * prevent it being ripped out from under us either in the 3336 * file descriptor loop or in the greater LIST_FOREACH. The 3337 * process may be in varying states of disrepair. If the process 3338 * is in SZOMB we may have caught it just as it is being removed 3339 * from the allproc list, we must skip it in that case to maintain 3340 * an unbroken chain through the allproc list. 3341 */ 3342 info.count = 0; 3343 info.error = 0; 3344 info.req = req; 3345 allproc_scan(sysctl_kern_file_callback, &info, 0); 3346 3347 /* 3348 * When just calculating the size, overestimate a bit to try to 3349 * prevent system activity from causing the buffer-fill call 3350 * to fail later on. 3351 */ 3352 if (req->oldptr == NULL) { 3353 info.count = (info.count + 16) + (info.count / 10); 3354 info.error = SYSCTL_OUT(req, NULL, 3355 info.count * sizeof(struct kinfo_file)); 3356 } 3357 return (info.error); 3358 } 3359 3360 static int 3361 sysctl_kern_file_callback(struct proc *p, void *data) 3362 { 3363 struct sysctl_kern_file_info *info = data; 3364 struct kinfo_file kf; 3365 struct filedesc *fdp; 3366 struct file *fp; 3367 uid_t uid; 3368 int n; 3369 3370 if (p->p_stat == SIDL || p->p_stat == SZOMB) 3371 return(0); 3372 if (!(PRISON_CHECK(info->req->td->td_ucred, p->p_ucred) != 0)) 3373 return(0); 3374 3375 /* 3376 * Softref the fdp to prevent it from being destroyed 3377 */ 3378 spin_lock(&p->p_spin); 3379 if ((fdp = p->p_fd) == NULL) { 3380 spin_unlock(&p->p_spin); 3381 return(0); 3382 } 3383 atomic_add_int(&fdp->fd_softrefs, 1); 3384 spin_unlock(&p->p_spin); 3385 3386 /* 3387 * The fdp's own spinlock prevents the contents from being 3388 * modified. 3389 */ 3390 spin_lock_shared(&fdp->fd_spin); 3391 for (n = 0; n < fdp->fd_nfiles; ++n) { 3392 if ((fp = fdp->fd_files[n].fp) == NULL) 3393 continue; 3394 if (info->req->oldptr == NULL) { 3395 ++info->count; 3396 } else { 3397 uid = p->p_ucred ? p->p_ucred->cr_uid : -1; 3398 kcore_make_file(&kf, fp, p->p_pid, uid, n); 3399 spin_unlock_shared(&fdp->fd_spin); 3400 info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf)); 3401 spin_lock_shared(&fdp->fd_spin); 3402 if (info->error) 3403 break; 3404 } 3405 } 3406 spin_unlock_shared(&fdp->fd_spin); 3407 atomic_subtract_int(&fdp->fd_softrefs, 1); 3408 if (info->error) 3409 return(-1); 3410 return(0); 3411 } 3412 3413 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD, 3414 0, 0, sysctl_kern_file, "S,file", "Entire file table"); 3415 3416 SYSCTL_INT(_kern, OID_AUTO, minfilesperproc, CTLFLAG_RW, 3417 &minfilesperproc, 0, "Minimum files allowed open per process"); 3418 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, 3419 &maxfilesperproc, 0, "Maximum files allowed open per process"); 3420 SYSCTL_INT(_kern, OID_AUTO, maxfilesperuser, CTLFLAG_RW, 3421 &maxfilesperuser, 0, "Maximum files allowed open per user"); 3422 3423 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW, 3424 &maxfiles, 0, "Maximum number of files"); 3425 3426 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW, 3427 &maxfilesrootres, 0, "Descriptors reserved for root use"); 3428 3429 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD, 3430 &nfiles, 0, "System-wide number of open files"); 3431 3432 static void 3433 fildesc_drvinit(void *unused) 3434 { 3435 int fd; 3436 3437 for (fd = 0; fd < NUMFDESC; fd++) { 3438 make_dev(&fildesc_ops, fd, 3439 UID_BIN, GID_BIN, 0666, "fd/%d", fd); 3440 } 3441 3442 make_dev(&fildesc_ops, 0, UID_ROOT, GID_WHEEL, 0666, "stdin"); 3443 make_dev(&fildesc_ops, 1, UID_ROOT, GID_WHEEL, 0666, "stdout"); 3444 make_dev(&fildesc_ops, 2, UID_ROOT, GID_WHEEL, 0666, "stderr"); 3445 } 3446 3447 struct fileops badfileops = { 3448 .fo_read = badfo_readwrite, 3449 .fo_write = badfo_readwrite, 3450 .fo_ioctl = badfo_ioctl, 3451 .fo_kqfilter = badfo_kqfilter, 3452 .fo_stat = badfo_stat, 3453 .fo_close = badfo_close, 3454 .fo_shutdown = badfo_shutdown 3455 }; 3456 3457 int 3458 badfo_readwrite( 3459 struct file *fp, 3460 struct uio *uio, 3461 struct ucred *cred, 3462 int flags 3463 ) { 3464 return (EBADF); 3465 } 3466 3467 int 3468 badfo_ioctl(struct file *fp, u_long com, caddr_t data, 3469 struct ucred *cred, struct sysmsg *msgv) 3470 { 3471 return (EBADF); 3472 } 3473 3474 /* 3475 * Must return an error to prevent registration, typically 3476 * due to a revoked descriptor (file_filtops assigned). 3477 */ 3478 int 3479 badfo_kqfilter(struct file *fp, struct knote *kn) 3480 { 3481 return (EOPNOTSUPP); 3482 } 3483 3484 int 3485 badfo_stat(struct file *fp, struct stat *sb, struct ucred *cred) 3486 { 3487 return (EBADF); 3488 } 3489 3490 int 3491 badfo_close(struct file *fp) 3492 { 3493 return (EBADF); 3494 } 3495 3496 int 3497 badfo_shutdown(struct file *fp, int how) 3498 { 3499 return (EBADF); 3500 } 3501 3502 int 3503 nofo_shutdown(struct file *fp, int how) 3504 { 3505 return (EOPNOTSUPP); 3506 } 3507 3508 SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR, 3509 fildesc_drvinit,NULL); 3510 3511 static void 3512 filelist_heads_init(void *arg __unused) 3513 { 3514 int i; 3515 3516 for (i = 0; i < NFILELIST_HEADS; ++i) { 3517 struct filelist_head *head = &filelist_heads[i]; 3518 3519 spin_init(&head->spin, "filehead_spin"); 3520 LIST_INIT(&head->list); 3521 } 3522 } 3523 3524 SYSINIT(filelistheads, SI_BOOT1_LOCK, SI_ORDER_ANY, 3525 filelist_heads_init, NULL); 3526 3527 static void 3528 file_objcache_init(void *dummy __unused) 3529 { 3530 file_objcache = objcache_create("file", maxfiles, maxfiles / 8, 3531 NULL, NULL, NULL, /* TODO: ctor/dtor */ 3532 objcache_malloc_alloc, objcache_malloc_free, &file_malloc_args); 3533 } 3534 SYSINIT(fpobjcache, SI_BOOT2_POST_SMP, SI_ORDER_ANY, file_objcache_init, NULL); 3535