1 /* $OpenBSD: vfs_subr.c,v 1.183 2009/08/17 13:11:58 jasper Exp $ */ 2 /* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 38 */ 39 40 /* 41 * External virtual filesystem routines 42 */ 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/proc.h> 47 #include <sys/mount.h> 48 #include <sys/time.h> 49 #include <sys/fcntl.h> 50 #include <sys/kernel.h> 51 #include <sys/vnode.h> 52 #include <sys/stat.h> 53 #include <sys/namei.h> 54 #include <sys/ucred.h> 55 #include <sys/buf.h> 56 #include <sys/errno.h> 57 #include <sys/malloc.h> 58 #include <sys/domain.h> 59 #include <sys/mbuf.h> 60 #include <sys/syscallargs.h> 61 #include <sys/pool.h> 62 #include <sys/tree.h> 63 64 #include <uvm/uvm_extern.h> 65 #include <sys/sysctl.h> 66 67 #include <miscfs/specfs/specdev.h> 68 69 enum vtype iftovt_tab[16] = { 70 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 71 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 72 }; 73 74 int vttoif_tab[9] = { 75 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 76 S_IFSOCK, S_IFIFO, S_IFMT, 77 }; 78 79 int doforce = 1; /* 1 => permit forcible unmounting */ 80 int prtactive = 0; /* 1 => print out reclaim of active vnodes */ 81 int suid_clear = 1; /* 1 => clear SUID / SGID on owner change */ 82 83 /* 84 * Insq/Remq for the vnode usage lists. 85 */ 86 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 87 #define bufremvn(bp) { \ 88 LIST_REMOVE(bp, b_vnbufs); \ 89 LIST_NEXT(bp, b_vnbufs) = NOLIST; \ 90 } 91 92 struct freelst vnode_hold_list; /* list of vnodes referencing buffers */ 93 struct freelst vnode_free_list; /* vnode free list */ 94 95 struct mntlist mountlist; /* mounted filesystem list */ 96 97 void vclean(struct vnode *, int, struct proc *); 98 void vhold(struct vnode *); 99 void vdrop(struct vnode *); 100 101 void insmntque(struct vnode *, struct mount *); 102 int getdevvp(dev_t, struct vnode **, enum vtype); 103 104 int vfs_hang_addrlist(struct mount *, struct netexport *, 105 struct export_args *); 106 int vfs_free_netcred(struct radix_node *, void *); 107 void vfs_free_addrlist(struct netexport *); 108 void vputonfreelist(struct vnode *); 109 110 int vflush_vnode(struct vnode *, void *); 111 int maxvnodes; 112 113 #ifdef DEBUG 114 void printlockedvnodes(void); 115 #endif 116 117 struct pool vnode_pool; 118 119 static int rb_buf_compare(struct buf *b1, struct buf *b2); 120 RB_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare); 121 122 static int 123 rb_buf_compare(struct buf *b1, struct buf *b2) 124 { 125 if (b1->b_lblkno < b2->b_lblkno) 126 return(-1); 127 if (b1->b_lblkno > b2->b_lblkno) 128 return(1); 129 return(0); 130 } 131 132 /* 133 * Initialize the vnode management data structures. 134 */ 135 void 136 vntblinit(void) 137 { 138 /* buffer cache may need a vnode for each buffer */ 139 maxvnodes = desiredvnodes; 140 pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes", 141 &pool_allocator_nointr); 142 TAILQ_INIT(&vnode_hold_list); 143 TAILQ_INIT(&vnode_free_list); 144 CIRCLEQ_INIT(&mountlist); 145 /* 146 * Initialize the filesystem syncer. 147 */ 148 vn_initialize_syncerd(); 149 } 150 151 /* 152 * Mark a mount point as busy. Used to synchronize access and to delay 153 * unmounting. 154 * 155 * Default behaviour is to attempt getting a READ lock and in case of an 156 * ongoing unmount, to wait for it to finish and then return failure. 157 */ 158 int 159 vfs_busy(struct mount *mp, int flags) 160 { 161 int rwflags = 0; 162 163 /* new mountpoints need their lock initialised */ 164 if (mp->mnt_lock.rwl_name == NULL) 165 rw_init(&mp->mnt_lock, "vfslock"); 166 167 if (flags & VB_WRITE) 168 rwflags |= RW_WRITE; 169 else 170 rwflags |= RW_READ; 171 172 if (flags & VB_WAIT) 173 rwflags |= RW_SLEEPFAIL; 174 else 175 rwflags |= RW_NOSLEEP; 176 177 if (rw_enter(&mp->mnt_lock, rwflags)) 178 return (EBUSY); 179 180 return (0); 181 } 182 183 /* 184 * Free a busy file system 185 */ 186 void 187 vfs_unbusy(struct mount *mp) 188 { 189 rw_exit(&mp->mnt_lock); 190 } 191 192 int 193 vfs_isbusy(struct mount *mp) 194 { 195 if (RWLOCK_OWNER(&mp->mnt_lock) > 0) 196 return (1); 197 else 198 return (0); 199 } 200 201 /* 202 * Lookup a filesystem type, and if found allocate and initialize 203 * a mount structure for it. 204 * 205 * Devname is usually updated by mount(8) after booting. 206 */ 207 int 208 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 209 { 210 struct vfsconf *vfsp; 211 struct mount *mp; 212 213 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 214 if (!strcmp(vfsp->vfc_name, fstypename)) 215 break; 216 if (vfsp == NULL) 217 return (ENODEV); 218 mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK|M_ZERO); 219 (void)vfs_busy(mp, VB_READ|VB_NOWAIT); 220 LIST_INIT(&mp->mnt_vnodelist); 221 mp->mnt_vfc = vfsp; 222 mp->mnt_op = vfsp->vfc_vfsops; 223 mp->mnt_flag = MNT_RDONLY; 224 mp->mnt_vnodecovered = NULLVP; 225 vfsp->vfc_refcount++; 226 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 227 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 228 mp->mnt_stat.f_mntonname[0] = '/'; 229 (void)copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 230 *mpp = mp; 231 return (0); 232 } 233 234 /* 235 * Lookup a mount point by filesystem identifier. 236 */ 237 struct mount * 238 vfs_getvfs(fsid_t *fsid) 239 { 240 struct mount *mp; 241 242 CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) { 243 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 244 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 245 return (mp); 246 } 247 } 248 249 return (NULL); 250 } 251 252 253 /* 254 * Get a new unique fsid 255 */ 256 void 257 vfs_getnewfsid(struct mount *mp) 258 { 259 static u_short xxxfs_mntid; 260 261 fsid_t tfsid; 262 int mtype; 263 264 mtype = mp->mnt_vfc->vfc_typenum; 265 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 266 mp->mnt_stat.f_fsid.val[1] = mtype; 267 if (xxxfs_mntid == 0) 268 ++xxxfs_mntid; 269 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); 270 tfsid.val[1] = mtype; 271 if (!CIRCLEQ_EMPTY(&mountlist)) { 272 while (vfs_getvfs(&tfsid)) { 273 tfsid.val[0]++; 274 xxxfs_mntid++; 275 } 276 } 277 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 278 } 279 280 /* 281 * Make a 'unique' number from a mount type name. 282 * Note that this is no longer used for ffs which 283 * now has an on-disk filesystem id. 284 */ 285 long 286 makefstype(char *type) 287 { 288 long rv; 289 290 for (rv = 0; *type; type++) { 291 rv <<= 2; 292 rv ^= *type; 293 } 294 return rv; 295 } 296 297 /* 298 * Set vnode attributes to VNOVAL 299 */ 300 void 301 vattr_null(struct vattr *vap) 302 { 303 304 vap->va_type = VNON; 305 /* XXX These next two used to be one line, but for a GCC bug. */ 306 vap->va_size = VNOVAL; 307 vap->va_bytes = VNOVAL; 308 vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid = 309 vap->va_fsid = vap->va_fileid = 310 vap->va_blocksize = vap->va_rdev = 311 vap->va_atime.tv_sec = vap->va_atime.tv_nsec = 312 vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec = 313 vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec = 314 vap->va_flags = vap->va_gen = VNOVAL; 315 vap->va_vaflags = 0; 316 } 317 318 /* 319 * Routines having to do with the management of the vnode table. 320 */ 321 extern int (**dead_vnodeop_p)(void *); 322 long numvnodes; 323 324 /* 325 * Return the next vnode from the free list. 326 */ 327 int 328 getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *), 329 struct vnode **vpp) 330 { 331 struct proc *p = curproc; 332 struct freelst *listhd; 333 static int toggle; 334 struct vnode *vp; 335 int s; 336 337 /* 338 * We must choose whether to allocate a new vnode or recycle an 339 * existing one. The criterion for allocating a new one is that 340 * the total number of vnodes is less than the number desired or 341 * there are no vnodes on either free list. Generally we only 342 * want to recycle vnodes that have no buffers associated with 343 * them, so we look first on the vnode_free_list. If it is empty, 344 * we next consider vnodes with referencing buffers on the 345 * vnode_hold_list. The toggle ensures that half the time we 346 * will use a buffer from the vnode_hold_list, and half the time 347 * we will allocate a new one unless the list has grown to twice 348 * the desired size. We are reticent to recycle vnodes from the 349 * vnode_hold_list because we will lose the identity of all its 350 * referencing buffers. 351 */ 352 toggle ^= 1; 353 if (numvnodes > 2 * maxvnodes) 354 toggle = 0; 355 356 s = splbio(); 357 if ((numvnodes < maxvnodes) || 358 ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) && 359 ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) { 360 splx(s); 361 vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO); 362 RB_INIT(&vp->v_bufs_tree); 363 RB_INIT(&vp->v_nc_tree); 364 TAILQ_INIT(&vp->v_cache_dst); 365 numvnodes++; 366 } else { 367 for (vp = TAILQ_FIRST(listhd); vp != NULLVP; 368 vp = TAILQ_NEXT(vp, v_freelist)) { 369 if (VOP_ISLOCKED(vp) == 0) 370 break; 371 } 372 /* 373 * Unless this is a bad time of the month, at most 374 * the first NCPUS items on the free list are 375 * locked, so this is close enough to being empty. 376 */ 377 if (vp == NULL) { 378 splx(s); 379 tablefull("vnode"); 380 *vpp = 0; 381 return (ENFILE); 382 } 383 384 #ifdef DIAGNOSTIC 385 if (vp->v_usecount) { 386 vprint("free vnode", vp); 387 panic("free vnode isn't"); 388 } 389 #endif 390 391 TAILQ_REMOVE(listhd, vp, v_freelist); 392 vp->v_bioflag &= ~VBIOONFREELIST; 393 splx(s); 394 395 if (vp->v_type != VBAD) 396 vgonel(vp, p); 397 #ifdef DIAGNOSTIC 398 if (vp->v_data) { 399 vprint("cleaned vnode", vp); 400 panic("cleaned vnode isn't"); 401 } 402 s = splbio(); 403 if (vp->v_numoutput) 404 panic("Clean vnode has pending I/O's"); 405 splx(s); 406 #endif 407 vp->v_flag = 0; 408 vp->v_socket = 0; 409 } 410 vp->v_type = VNON; 411 cache_purge(vp); 412 vp->v_tag = tag; 413 vp->v_op = vops; 414 insmntque(vp, mp); 415 *vpp = vp; 416 vp->v_usecount = 1; 417 vp->v_data = 0; 418 simple_lock_init(&vp->v_uvm.u_obj.vmobjlock); 419 return (0); 420 } 421 422 /* 423 * Move a vnode from one mount queue to another. 424 */ 425 void 426 insmntque(struct vnode *vp, struct mount *mp) 427 { 428 /* 429 * Delete from old mount point vnode list, if on one. 430 */ 431 if (vp->v_mount != NULL) 432 LIST_REMOVE(vp, v_mntvnodes); 433 /* 434 * Insert into list of vnodes for the new mount point, if available. 435 */ 436 if ((vp->v_mount = mp) != NULL) 437 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 438 } 439 440 /* 441 * Create a vnode for a block device. 442 * Used for root filesystem, argdev, and swap areas. 443 * Also used for memory file system special devices. 444 */ 445 int 446 bdevvp(dev_t dev, struct vnode **vpp) 447 { 448 return (getdevvp(dev, vpp, VBLK)); 449 } 450 451 /* 452 * Create a vnode for a character device. 453 * Used for console handling. 454 */ 455 int 456 cdevvp(dev_t dev, struct vnode **vpp) 457 { 458 return (getdevvp(dev, vpp, VCHR)); 459 } 460 461 /* 462 * Create a vnode for a device. 463 * Used by bdevvp (block device) for root file system etc., 464 * and by cdevvp (character device) for console. 465 */ 466 int 467 getdevvp(dev_t dev, struct vnode **vpp, enum vtype type) 468 { 469 struct vnode *vp; 470 struct vnode *nvp; 471 int error; 472 473 if (dev == NODEV) { 474 *vpp = NULLVP; 475 return (0); 476 } 477 error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp); 478 if (error) { 479 *vpp = NULLVP; 480 return (error); 481 } 482 vp = nvp; 483 vp->v_type = type; 484 if ((nvp = checkalias(vp, dev, NULL)) != 0) { 485 vput(vp); 486 vp = nvp; 487 } 488 *vpp = vp; 489 return (0); 490 } 491 492 /* 493 * Check to see if the new vnode represents a special device 494 * for which we already have a vnode (either because of 495 * bdevvp() or because of a different vnode representing 496 * the same block device). If such an alias exists, deallocate 497 * the existing contents and return the aliased vnode. The 498 * caller is responsible for filling it with its new contents. 499 */ 500 struct vnode * 501 checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp) 502 { 503 struct proc *p = curproc; 504 struct vnode *vp; 505 struct vnode **vpp; 506 507 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 508 return (NULLVP); 509 510 vpp = &speclisth[SPECHASH(nvp_rdev)]; 511 loop: 512 for (vp = *vpp; vp; vp = vp->v_specnext) { 513 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) { 514 continue; 515 } 516 /* 517 * Alias, but not in use, so flush it out. 518 */ 519 if (vp->v_usecount == 0) { 520 vgonel(vp, p); 521 goto loop; 522 } 523 if (vget(vp, LK_EXCLUSIVE, p)) { 524 goto loop; 525 } 526 break; 527 } 528 529 /* 530 * Common case is actually in the if statement 531 */ 532 if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) { 533 nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE, 534 M_WAITOK); 535 nvp->v_rdev = nvp_rdev; 536 nvp->v_hashchain = vpp; 537 nvp->v_specnext = *vpp; 538 nvp->v_specmountpoint = NULL; 539 nvp->v_speclockf = NULL; 540 bzero(nvp->v_specbitmap, sizeof(nvp->v_specbitmap)); 541 *vpp = nvp; 542 if (vp != NULLVP) { 543 nvp->v_flag |= VALIASED; 544 vp->v_flag |= VALIASED; 545 vput(vp); 546 } 547 return (NULLVP); 548 } 549 550 /* 551 * This code is the uncommon case. It is called in case 552 * we found an alias that was VT_NON && vtype of VBLK 553 * This means we found a block device that was created 554 * using bdevvp. 555 * An example of such a vnode is the root partition device vnode 556 * created in ffs_mountroot. 557 * 558 * The vnodes created by bdevvp should not be aliased (why?). 559 */ 560 561 VOP_UNLOCK(vp, 0, p); 562 vclean(vp, 0, p); 563 vp->v_op = nvp->v_op; 564 vp->v_tag = nvp->v_tag; 565 nvp->v_type = VNON; 566 insmntque(vp, mp); 567 return (vp); 568 } 569 570 /* 571 * Grab a particular vnode from the free list, increment its 572 * reference count and lock it. If the vnode lock bit is set, 573 * the vnode is being eliminated in vgone. In that case, we 574 * cannot grab it, so the process is awakened when the 575 * transition is completed, and an error code is returned to 576 * indicate that the vnode is no longer usable, possibly 577 * having been changed to a new file system type. 578 */ 579 int 580 vget(struct vnode *vp, int flags, struct proc *p) 581 { 582 int error, s, onfreelist; 583 584 /* 585 * If the vnode is in the process of being cleaned out for 586 * another use, we wait for the cleaning to finish and then 587 * return failure. Cleaning is determined by checking that 588 * the VXLOCK flag is set. 589 */ 590 591 if (vp->v_flag & VXLOCK) { 592 if (flags & LK_NOWAIT) { 593 return (EBUSY); 594 } 595 596 vp->v_flag |= VXWANT; 597 tsleep(vp, PINOD, "vget", 0); 598 return (ENOENT); 599 } 600 601 onfreelist = vp->v_bioflag & VBIOONFREELIST; 602 if (vp->v_usecount == 0 && onfreelist) { 603 s = splbio(); 604 if (vp->v_holdcnt > 0) 605 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 606 else 607 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 608 vp->v_bioflag &= ~VBIOONFREELIST; 609 splx(s); 610 } 611 612 vp->v_usecount++; 613 if (flags & LK_TYPE_MASK) { 614 if ((error = vn_lock(vp, flags, p)) != 0) { 615 vp->v_usecount--; 616 if (vp->v_usecount == 0 && onfreelist) 617 vputonfreelist(vp); 618 } 619 return (error); 620 } 621 622 return (0); 623 } 624 625 626 /* Vnode reference. */ 627 void 628 vref(struct vnode *vp) 629 { 630 #ifdef DIAGNOSTIC 631 if (vp->v_usecount == 0) 632 panic("vref used where vget required"); 633 #endif 634 vp->v_usecount++; 635 } 636 637 void 638 vputonfreelist(struct vnode *vp) 639 { 640 int s; 641 struct freelst *lst; 642 643 s = splbio(); 644 #ifdef DIAGNOSTIC 645 if (vp->v_usecount != 0) 646 panic("Use count is not zero!"); 647 648 if (vp->v_bioflag & VBIOONFREELIST) { 649 vprint("vnode already on free list: ", vp); 650 panic("vnode already on free list"); 651 } 652 #endif 653 654 vp->v_bioflag |= VBIOONFREELIST; 655 656 if (vp->v_holdcnt > 0) 657 lst = &vnode_hold_list; 658 else 659 lst = &vnode_free_list; 660 661 if (vp->v_type == VBAD) 662 TAILQ_INSERT_HEAD(lst, vp, v_freelist); 663 else 664 TAILQ_INSERT_TAIL(lst, vp, v_freelist); 665 666 splx(s); 667 } 668 669 /* 670 * vput(), just unlock and vrele() 671 */ 672 void 673 vput(struct vnode *vp) 674 { 675 struct proc *p = curproc; 676 677 #ifdef DIAGNOSTIC 678 if (vp == NULL) 679 panic("vput: null vp"); 680 #endif 681 682 #ifdef DIAGNOSTIC 683 if (vp->v_usecount == 0) { 684 vprint("vput: bad ref count", vp); 685 panic("vput: ref cnt"); 686 } 687 #endif 688 vp->v_usecount--; 689 if (vp->v_usecount > 0) { 690 VOP_UNLOCK(vp, 0, p); 691 return; 692 } 693 694 #ifdef DIAGNOSTIC 695 if (vp->v_writecount != 0) { 696 vprint("vput: bad writecount", vp); 697 panic("vput: v_writecount != 0"); 698 } 699 #endif 700 701 VOP_INACTIVE(vp, p); 702 703 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST)) 704 vputonfreelist(vp); 705 } 706 707 /* 708 * Vnode release - use for active VNODES. 709 * If count drops to zero, call inactive routine and return to freelist. 710 * Returns 0 if it did not sleep. 711 */ 712 int 713 vrele(struct vnode *vp) 714 { 715 struct proc *p = curproc; 716 717 #ifdef DIAGNOSTIC 718 if (vp == NULL) 719 panic("vrele: null vp"); 720 #endif 721 #ifdef DIAGNOSTIC 722 if (vp->v_usecount == 0) { 723 vprint("vrele: bad ref count", vp); 724 panic("vrele: ref cnt"); 725 } 726 #endif 727 vp->v_usecount--; 728 if (vp->v_usecount > 0) { 729 return (0); 730 } 731 732 #ifdef DIAGNOSTIC 733 if (vp->v_writecount != 0) { 734 vprint("vrele: bad writecount", vp); 735 panic("vrele: v_writecount != 0"); 736 } 737 #endif 738 739 if (vn_lock(vp, LK_EXCLUSIVE, p)) { 740 #ifdef DIAGNOSTIC 741 vprint("vrele: cannot lock", vp); 742 #endif 743 return (1); 744 } 745 746 VOP_INACTIVE(vp, p); 747 748 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST)) 749 vputonfreelist(vp); 750 return (1); 751 } 752 753 /* Page or buffer structure gets a reference. */ 754 void 755 vhold(struct vnode *vp) 756 { 757 /* 758 * If it is on the freelist and the hold count is currently 759 * zero, move it to the hold list. 760 */ 761 if ((vp->v_bioflag & VBIOONFREELIST) && 762 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 763 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 764 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 765 } 766 vp->v_holdcnt++; 767 } 768 769 /* Lose interest in a vnode. */ 770 void 771 vdrop(struct vnode *vp) 772 { 773 #ifdef DIAGNOSTIC 774 if (vp->v_holdcnt == 0) 775 panic("vdrop: zero holdcnt"); 776 #endif 777 778 vp->v_holdcnt--; 779 780 /* 781 * If it is on the holdlist and the hold count drops to 782 * zero, move it to the free list. 783 */ 784 if ((vp->v_bioflag & VBIOONFREELIST) && 785 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 786 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 787 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 788 } 789 } 790 791 /* 792 * Remove any vnodes in the vnode table belonging to mount point mp. 793 * 794 * If MNT_NOFORCE is specified, there should not be any active ones, 795 * return error if any are found (nb: this is a user error, not a 796 * system error). If MNT_FORCE is specified, detach any active vnodes 797 * that are found. 798 */ 799 #ifdef DEBUG 800 int busyprt = 0; /* print out busy vnodes */ 801 struct ctldebug debug1 = { "busyprt", &busyprt }; 802 #endif 803 804 int 805 vfs_mount_foreach_vnode(struct mount *mp, 806 int (*func)(struct vnode *, void *), void *arg) { 807 struct vnode *vp, *nvp; 808 int error = 0; 809 810 loop: 811 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { 812 if (vp->v_mount != mp) 813 goto loop; 814 nvp = LIST_NEXT(vp, v_mntvnodes); 815 816 error = func(vp, arg); 817 818 if (error != 0) 819 break; 820 } 821 822 return (error); 823 } 824 825 struct vflush_args { 826 struct vnode *skipvp; 827 int busy; 828 int flags; 829 }; 830 831 int 832 vflush_vnode(struct vnode *vp, void *arg) { 833 struct vflush_args *va = arg; 834 struct proc *p = curproc; 835 836 if (vp == va->skipvp) { 837 return (0); 838 } 839 840 if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 841 return (0); 842 } 843 844 /* 845 * If WRITECLOSE is set, only flush out regular file 846 * vnodes open for writing. 847 */ 848 if ((va->flags & WRITECLOSE) && 849 (vp->v_writecount == 0 || vp->v_type != VREG)) { 850 return (0); 851 } 852 853 /* 854 * With v_usecount == 0, all we need to do is clear 855 * out the vnode data structures and we are done. 856 */ 857 if (vp->v_usecount == 0) { 858 vgonel(vp, p); 859 return (0); 860 } 861 862 /* 863 * If FORCECLOSE is set, forcibly close the vnode. 864 * For block or character devices, revert to an 865 * anonymous device. For all other files, just kill them. 866 */ 867 if (va->flags & FORCECLOSE) { 868 if (vp->v_type != VBLK && vp->v_type != VCHR) { 869 vgonel(vp, p); 870 } else { 871 vclean(vp, 0, p); 872 vp->v_op = spec_vnodeop_p; 873 insmntque(vp, (struct mount *)0); 874 } 875 return (0); 876 } 877 878 #ifdef DEBUG 879 if (busyprt) 880 vprint("vflush: busy vnode", vp); 881 #endif 882 va->busy++; 883 return (0); 884 } 885 886 int 887 vflush(struct mount *mp, struct vnode *skipvp, int flags) 888 { 889 struct vflush_args va; 890 va.skipvp = skipvp; 891 va.busy = 0; 892 va.flags = flags; 893 894 vfs_mount_foreach_vnode(mp, vflush_vnode, &va); 895 896 if (va.busy) 897 return (EBUSY); 898 return (0); 899 } 900 901 /* 902 * Disassociate the underlying file system from a vnode. 903 */ 904 void 905 vclean(struct vnode *vp, int flags, struct proc *p) 906 { 907 int active; 908 909 /* 910 * Check to see if the vnode is in use. 911 * If so we have to reference it before we clean it out 912 * so that its count cannot fall to zero and generate a 913 * race against ourselves to recycle it. 914 */ 915 if ((active = vp->v_usecount) != 0) 916 vp->v_usecount++; 917 918 /* 919 * Prevent the vnode from being recycled or 920 * brought into use while we clean it out. 921 */ 922 if (vp->v_flag & VXLOCK) 923 panic("vclean: deadlock"); 924 vp->v_flag |= VXLOCK; 925 /* 926 * Even if the count is zero, the VOP_INACTIVE routine may still 927 * have the object locked while it cleans it out. The VOP_LOCK 928 * ensures that the VOP_INACTIVE routine is done with its work. 929 * For active vnodes, it ensures that no other activity can 930 * occur while the underlying object is being cleaned out. 931 */ 932 VOP_LOCK(vp, LK_DRAIN, p); 933 934 /* 935 * Clean out any VM data associated with the vnode. 936 */ 937 uvm_vnp_terminate(vp); 938 /* 939 * Clean out any buffers associated with the vnode. 940 */ 941 if (flags & DOCLOSE) 942 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 943 /* 944 * If purging an active vnode, it must be closed and 945 * deactivated before being reclaimed. Note that the 946 * VOP_INACTIVE will unlock the vnode 947 */ 948 if (active) { 949 if (flags & DOCLOSE) 950 VOP_CLOSE(vp, FNONBLOCK, NOCRED, p); 951 VOP_INACTIVE(vp, p); 952 } else { 953 /* 954 * Any other processes trying to obtain this lock must first 955 * wait for VXLOCK to clear, then call the new lock operation. 956 */ 957 VOP_UNLOCK(vp, 0, p); 958 } 959 960 /* 961 * Reclaim the vnode. 962 */ 963 if (VOP_RECLAIM(vp, p)) 964 panic("vclean: cannot reclaim"); 965 if (active) { 966 vp->v_usecount--; 967 if (vp->v_usecount == 0) { 968 if (vp->v_holdcnt > 0) 969 panic("vclean: not clean"); 970 vputonfreelist(vp); 971 } 972 } 973 cache_purge(vp); 974 975 /* 976 * Done with purge, notify sleepers of the grim news. 977 */ 978 vp->v_op = dead_vnodeop_p; 979 VN_KNOTE(vp, NOTE_REVOKE); 980 vp->v_tag = VT_NON; 981 vp->v_flag &= ~VXLOCK; 982 #ifdef VFSDEBUG 983 vp->v_flag &= ~VLOCKSWORK; 984 #endif 985 if (vp->v_flag & VXWANT) { 986 vp->v_flag &= ~VXWANT; 987 wakeup(vp); 988 } 989 } 990 991 /* 992 * Recycle an unused vnode to the front of the free list. 993 */ 994 int 995 vrecycle(struct vnode *vp, struct proc *p) 996 { 997 if (vp->v_usecount == 0) { 998 vgonel(vp, p); 999 return (1); 1000 } 1001 return (0); 1002 } 1003 1004 /* 1005 * Eliminate all activity associated with a vnode 1006 * in preparation for reuse. 1007 */ 1008 void 1009 vgone(struct vnode *vp) 1010 { 1011 struct proc *p = curproc; 1012 vgonel(vp, p); 1013 } 1014 1015 /* 1016 * vgone, with struct proc. 1017 */ 1018 void 1019 vgonel(struct vnode *vp, struct proc *p) 1020 { 1021 struct vnode *vq; 1022 struct vnode *vx; 1023 1024 /* 1025 * If a vgone (or vclean) is already in progress, 1026 * wait until it is done and return. 1027 */ 1028 if (vp->v_flag & VXLOCK) { 1029 vp->v_flag |= VXWANT; 1030 tsleep(vp, PINOD, "vgone", 0); 1031 return; 1032 } 1033 1034 /* 1035 * Clean out the filesystem specific data. 1036 */ 1037 vclean(vp, DOCLOSE, p); 1038 /* 1039 * Delete from old mount point vnode list, if on one. 1040 */ 1041 if (vp->v_mount != NULL) 1042 insmntque(vp, (struct mount *)0); 1043 /* 1044 * If special device, remove it from special device alias list 1045 * if it is on one. 1046 */ 1047 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1048 if (*vp->v_hashchain == vp) { 1049 *vp->v_hashchain = vp->v_specnext; 1050 } else { 1051 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1052 if (vq->v_specnext != vp) 1053 continue; 1054 vq->v_specnext = vp->v_specnext; 1055 break; 1056 } 1057 if (vq == NULL) 1058 panic("missing bdev"); 1059 } 1060 if (vp->v_flag & VALIASED) { 1061 vx = NULL; 1062 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1063 if (vq->v_rdev != vp->v_rdev || 1064 vq->v_type != vp->v_type) 1065 continue; 1066 if (vx) 1067 break; 1068 vx = vq; 1069 } 1070 if (vx == NULL) 1071 panic("missing alias"); 1072 if (vq == NULL) 1073 vx->v_flag &= ~VALIASED; 1074 vp->v_flag &= ~VALIASED; 1075 } 1076 free(vp->v_specinfo, M_VNODE); 1077 vp->v_specinfo = NULL; 1078 } 1079 /* 1080 * If it is on the freelist and not already at the head, 1081 * move it to the head of the list. 1082 */ 1083 vp->v_type = VBAD; 1084 1085 /* 1086 * Move onto the free list, unless we were called from 1087 * getnewvnode and we're not on any free list 1088 */ 1089 if (vp->v_usecount == 0 && 1090 (vp->v_bioflag & VBIOONFREELIST)) { 1091 int s; 1092 1093 s = splbio(); 1094 1095 if (vp->v_holdcnt > 0) 1096 panic("vgonel: not clean"); 1097 1098 if (TAILQ_FIRST(&vnode_free_list) != vp) { 1099 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1100 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1101 } 1102 splx(s); 1103 } 1104 } 1105 1106 /* 1107 * Lookup a vnode by device number. 1108 */ 1109 int 1110 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp) 1111 { 1112 struct vnode *vp; 1113 int rc =0; 1114 1115 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1116 if (dev != vp->v_rdev || type != vp->v_type) 1117 continue; 1118 *vpp = vp; 1119 rc = 1; 1120 break; 1121 } 1122 return (rc); 1123 } 1124 1125 /* 1126 * Revoke all the vnodes corresponding to the specified minor number 1127 * range (endpoints inclusive) of the specified major. 1128 */ 1129 void 1130 vdevgone(int maj, int minl, int minh, enum vtype type) 1131 { 1132 struct vnode *vp; 1133 int mn; 1134 1135 for (mn = minl; mn <= minh; mn++) 1136 if (vfinddev(makedev(maj, mn), type, &vp)) 1137 VOP_REVOKE(vp, REVOKEALL); 1138 } 1139 1140 /* 1141 * Calculate the total number of references to a special device. 1142 */ 1143 int 1144 vcount(struct vnode *vp) 1145 { 1146 struct vnode *vq, *vnext; 1147 int count; 1148 1149 loop: 1150 if ((vp->v_flag & VALIASED) == 0) 1151 return (vp->v_usecount); 1152 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1153 vnext = vq->v_specnext; 1154 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1155 continue; 1156 /* 1157 * Alias, but not in use, so flush it out. 1158 */ 1159 if (vq->v_usecount == 0 && vq != vp) { 1160 vgone(vq); 1161 goto loop; 1162 } 1163 count += vq->v_usecount; 1164 } 1165 return (count); 1166 } 1167 1168 #if defined(DEBUG) || defined(DIAGNOSTIC) 1169 /* 1170 * Print out a description of a vnode. 1171 */ 1172 static char *typename[] = 1173 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; 1174 1175 void 1176 vprint(char *label, struct vnode *vp) 1177 { 1178 char buf[64]; 1179 1180 if (label != NULL) 1181 printf("%s: ", label); 1182 printf("%p, type %s, use %u, write %u, hold %u,", 1183 vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1184 vp->v_holdcnt); 1185 buf[0] = '\0'; 1186 if (vp->v_flag & VROOT) 1187 strlcat(buf, "|VROOT", sizeof buf); 1188 if (vp->v_flag & VTEXT) 1189 strlcat(buf, "|VTEXT", sizeof buf); 1190 if (vp->v_flag & VSYSTEM) 1191 strlcat(buf, "|VSYSTEM", sizeof buf); 1192 if (vp->v_flag & VXLOCK) 1193 strlcat(buf, "|VXLOCK", sizeof buf); 1194 if (vp->v_flag & VXWANT) 1195 strlcat(buf, "|VXWANT", sizeof buf); 1196 if (vp->v_bioflag & VBIOWAIT) 1197 strlcat(buf, "|VBIOWAIT", sizeof buf); 1198 if (vp->v_bioflag & VBIOONFREELIST) 1199 strlcat(buf, "|VBIOONFREELIST", sizeof buf); 1200 if (vp->v_bioflag & VBIOONSYNCLIST) 1201 strlcat(buf, "|VBIOONSYNCLIST", sizeof buf); 1202 if (vp->v_flag & VALIASED) 1203 strlcat(buf, "|VALIASED", sizeof buf); 1204 if (buf[0] != '\0') 1205 printf(" flags (%s)", &buf[1]); 1206 if (vp->v_data == NULL) { 1207 printf("\n"); 1208 } else { 1209 printf("\n\t"); 1210 VOP_PRINT(vp); 1211 } 1212 } 1213 #endif /* DEBUG || DIAGNOSTIC */ 1214 1215 #ifdef DEBUG 1216 /* 1217 * List all of the locked vnodes in the system. 1218 * Called when debugging the kernel. 1219 */ 1220 void 1221 printlockedvnodes(void) 1222 { 1223 struct mount *mp, *nmp; 1224 struct vnode *vp; 1225 1226 printf("Locked vnodes\n"); 1227 1228 for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist); 1229 mp = nmp) { 1230 if (vfs_busy(mp, VB_READ|VB_NOWAIT)) { 1231 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1232 continue; 1233 } 1234 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 1235 if (VOP_ISLOCKED(vp)) 1236 vprint((char *)0, vp); 1237 } 1238 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1239 vfs_unbusy(mp); 1240 } 1241 1242 } 1243 #endif 1244 1245 /* 1246 * Top level filesystem related information gathering. 1247 */ 1248 int 1249 vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1250 size_t newlen, struct proc *p) 1251 { 1252 struct vfsconf *vfsp, *tmpvfsp; 1253 int ret; 1254 1255 /* all sysctl names at this level are at least name and field */ 1256 if (namelen < 2) 1257 return (ENOTDIR); /* overloaded */ 1258 1259 if (name[0] != VFS_GENERIC) { 1260 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1261 if (vfsp->vfc_typenum == name[0]) 1262 break; 1263 1264 if (vfsp == NULL) 1265 return (EOPNOTSUPP); 1266 1267 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1268 oldp, oldlenp, newp, newlen, p)); 1269 } 1270 1271 switch (name[1]) { 1272 case VFS_MAXTYPENUM: 1273 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf)); 1274 1275 case VFS_CONF: 1276 if (namelen < 3) 1277 return (ENOTDIR); /* overloaded */ 1278 1279 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1280 if (vfsp->vfc_typenum == name[2]) 1281 break; 1282 1283 if (vfsp == NULL) 1284 return (EOPNOTSUPP); 1285 1286 /* Make a copy, clear out kernel pointers */ 1287 tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK); 1288 bcopy(vfsp, tmpvfsp, sizeof(*tmpvfsp)); 1289 tmpvfsp->vfc_vfsops = NULL; 1290 tmpvfsp->vfc_next = NULL; 1291 1292 ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp, 1293 sizeof(struct vfsconf)); 1294 1295 free(tmpvfsp, M_TEMP); 1296 return (ret); 1297 case VFS_BCACHESTAT: /* buffer cache statistics */ 1298 ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats, 1299 sizeof(struct bcachestats)); 1300 return(ret); 1301 } 1302 return (EOPNOTSUPP); 1303 } 1304 1305 int kinfo_vdebug = 1; 1306 #define KINFO_VNODESLOP 10 1307 /* 1308 * Dump vnode list (via sysctl). 1309 * Copyout address of vnode followed by vnode. 1310 */ 1311 /* ARGSUSED */ 1312 int 1313 sysctl_vnode(char *where, size_t *sizep, struct proc *p) 1314 { 1315 struct mount *mp, *nmp; 1316 struct vnode *vp, *nvp; 1317 char *bp = where, *savebp; 1318 char *ewhere; 1319 int error; 1320 1321 if (where == NULL) { 1322 *sizep = (numvnodes + KINFO_VNODESLOP) * sizeof(struct e_vnode); 1323 return (0); 1324 } 1325 ewhere = where + *sizep; 1326 1327 for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist); 1328 mp = nmp) { 1329 if (vfs_busy(mp, VB_READ|VB_NOWAIT)) { 1330 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1331 continue; 1332 } 1333 savebp = bp; 1334 again: 1335 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; 1336 vp = nvp) { 1337 /* 1338 * Check that the vp is still associated with 1339 * this filesystem. RACE: could have been 1340 * recycled onto the same filesystem. 1341 */ 1342 if (vp->v_mount != mp) { 1343 if (kinfo_vdebug) 1344 printf("kinfo: vp changed\n"); 1345 bp = savebp; 1346 goto again; 1347 } 1348 nvp = LIST_NEXT(vp, v_mntvnodes); 1349 if (bp + sizeof(struct e_vnode) > ewhere) { 1350 *sizep = bp - where; 1351 vfs_unbusy(mp); 1352 return (ENOMEM); 1353 } 1354 if ((error = copyout(&vp, 1355 &((struct e_vnode *)bp)->vptr, 1356 sizeof(struct vnode *))) || 1357 (error = copyout(vp, 1358 &((struct e_vnode *)bp)->vnode, 1359 sizeof(struct vnode)))) { 1360 vfs_unbusy(mp); 1361 return (error); 1362 } 1363 bp += sizeof(struct e_vnode); 1364 } 1365 1366 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1367 vfs_unbusy(mp); 1368 } 1369 1370 *sizep = bp - where; 1371 1372 return (0); 1373 } 1374 1375 /* 1376 * Check to see if a filesystem is mounted on a block device. 1377 */ 1378 int 1379 vfs_mountedon(struct vnode *vp) 1380 { 1381 struct vnode *vq; 1382 int error = 0; 1383 1384 if (vp->v_specmountpoint != NULL) 1385 return (EBUSY); 1386 if (vp->v_flag & VALIASED) { 1387 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1388 if (vq->v_rdev != vp->v_rdev || 1389 vq->v_type != vp->v_type) 1390 continue; 1391 if (vq->v_specmountpoint != NULL) { 1392 error = EBUSY; 1393 break; 1394 } 1395 } 1396 } 1397 return (error); 1398 } 1399 1400 /* 1401 * Build hash lists of net addresses and hang them off the mount point. 1402 * Called by ufs_mount() to set up the lists of export addresses. 1403 */ 1404 int 1405 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1406 struct export_args *argp) 1407 { 1408 struct netcred *np; 1409 struct radix_node_head *rnh; 1410 int i; 1411 struct radix_node *rn; 1412 struct sockaddr *saddr, *smask = 0; 1413 struct domain *dom; 1414 int error; 1415 1416 if (argp->ex_addrlen == 0) { 1417 if (mp->mnt_flag & MNT_DEFEXPORTED) 1418 return (EPERM); 1419 np = &nep->ne_defexported; 1420 np->netc_exflags = argp->ex_flags; 1421 np->netc_anon = argp->ex_anon; 1422 np->netc_anon.cr_ref = 1; 1423 mp->mnt_flag |= MNT_DEFEXPORTED; 1424 return (0); 1425 } 1426 if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN || 1427 argp->ex_addrlen < 0 || argp->ex_masklen < 0) 1428 return (EINVAL); 1429 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1430 np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK|M_ZERO); 1431 saddr = (struct sockaddr *)(np + 1); 1432 error = copyin(argp->ex_addr, saddr, argp->ex_addrlen); 1433 if (error) 1434 goto out; 1435 if (saddr->sa_len > argp->ex_addrlen) 1436 saddr->sa_len = argp->ex_addrlen; 1437 if (argp->ex_masklen) { 1438 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1439 error = copyin(argp->ex_mask, smask, argp->ex_masklen); 1440 if (error) 1441 goto out; 1442 if (smask->sa_len > argp->ex_masklen) 1443 smask->sa_len = argp->ex_masklen; 1444 } 1445 i = saddr->sa_family; 1446 if (i < 0 || i > AF_MAX) { 1447 error = EINVAL; 1448 goto out; 1449 } 1450 if ((rnh = nep->ne_rtable[i]) == 0) { 1451 /* 1452 * Seems silly to initialize every AF when most are not 1453 * used, do so on demand here 1454 */ 1455 for (dom = domains; dom; dom = dom->dom_next) 1456 if (dom->dom_family == i && dom->dom_rtattach) { 1457 dom->dom_rtattach((void **)&nep->ne_rtable[i], 1458 dom->dom_rtoffset); 1459 break; 1460 } 1461 if ((rnh = nep->ne_rtable[i]) == 0) { 1462 error = ENOBUFS; 1463 goto out; 1464 } 1465 } 1466 rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, 1467 np->netc_rnodes, 0); 1468 if (rn == 0 || np != (struct netcred *)rn) { /* already exists */ 1469 error = EPERM; 1470 goto out; 1471 } 1472 np->netc_exflags = argp->ex_flags; 1473 np->netc_anon = argp->ex_anon; 1474 np->netc_anon.cr_ref = 1; 1475 return (0); 1476 out: 1477 free(np, M_NETADDR); 1478 return (error); 1479 } 1480 1481 /* ARGSUSED */ 1482 int 1483 vfs_free_netcred(struct radix_node *rn, void *w) 1484 { 1485 struct radix_node_head *rnh = (struct radix_node_head *)w; 1486 1487 (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh, NULL); 1488 free(rn, M_NETADDR); 1489 return (0); 1490 } 1491 1492 /* 1493 * Free the net address hash lists that are hanging off the mount points. 1494 */ 1495 void 1496 vfs_free_addrlist(struct netexport *nep) 1497 { 1498 int i; 1499 struct radix_node_head *rnh; 1500 1501 for (i = 0; i <= AF_MAX; i++) 1502 if ((rnh = nep->ne_rtable[i]) != NULL) { 1503 (*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh); 1504 free(rnh, M_RTABLE); 1505 nep->ne_rtable[i] = 0; 1506 } 1507 } 1508 1509 int 1510 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp) 1511 { 1512 int error; 1513 1514 if (argp->ex_flags & MNT_DELEXPORT) { 1515 vfs_free_addrlist(nep); 1516 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 1517 } 1518 if (argp->ex_flags & MNT_EXPORTED) { 1519 if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0) 1520 return (error); 1521 mp->mnt_flag |= MNT_EXPORTED; 1522 } 1523 return (0); 1524 } 1525 1526 struct netcred * 1527 vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam) 1528 { 1529 struct netcred *np; 1530 struct radix_node_head *rnh; 1531 struct sockaddr *saddr; 1532 1533 np = NULL; 1534 if (mp->mnt_flag & MNT_EXPORTED) { 1535 /* 1536 * Lookup in the export list first. 1537 */ 1538 if (nam != NULL) { 1539 saddr = mtod(nam, struct sockaddr *); 1540 rnh = nep->ne_rtable[saddr->sa_family]; 1541 if (rnh != NULL) { 1542 np = (struct netcred *) 1543 (*rnh->rnh_matchaddr)((caddr_t)saddr, 1544 rnh); 1545 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 1546 np = NULL; 1547 } 1548 } 1549 /* 1550 * If no address match, use the default if it exists. 1551 */ 1552 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 1553 np = &nep->ne_defexported; 1554 } 1555 return (np); 1556 } 1557 1558 /* 1559 * Do the usual access checking. 1560 * file_mode, uid and gid are from the vnode in question, 1561 * while acc_mode and cred are from the VOP_ACCESS parameter list 1562 */ 1563 int 1564 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1565 mode_t acc_mode, struct ucred *cred) 1566 { 1567 mode_t mask; 1568 1569 /* User id 0 always gets read/write access. */ 1570 if (cred->cr_uid == 0) { 1571 /* For VEXEC, at least one of the execute bits must be set. */ 1572 if ((acc_mode & VEXEC) && type != VDIR && 1573 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1574 return EACCES; 1575 return 0; 1576 } 1577 1578 mask = 0; 1579 1580 /* Otherwise, check the owner. */ 1581 if (cred->cr_uid == uid) { 1582 if (acc_mode & VEXEC) 1583 mask |= S_IXUSR; 1584 if (acc_mode & VREAD) 1585 mask |= S_IRUSR; 1586 if (acc_mode & VWRITE) 1587 mask |= S_IWUSR; 1588 return (file_mode & mask) == mask ? 0 : EACCES; 1589 } 1590 1591 /* Otherwise, check the groups. */ 1592 if (cred->cr_gid == gid || groupmember(gid, cred)) { 1593 if (acc_mode & VEXEC) 1594 mask |= S_IXGRP; 1595 if (acc_mode & VREAD) 1596 mask |= S_IRGRP; 1597 if (acc_mode & VWRITE) 1598 mask |= S_IWGRP; 1599 return (file_mode & mask) == mask ? 0 : EACCES; 1600 } 1601 1602 /* Otherwise, check everyone else. */ 1603 if (acc_mode & VEXEC) 1604 mask |= S_IXOTH; 1605 if (acc_mode & VREAD) 1606 mask |= S_IROTH; 1607 if (acc_mode & VWRITE) 1608 mask |= S_IWOTH; 1609 return (file_mode & mask) == mask ? 0 : EACCES; 1610 } 1611 1612 /* 1613 * Unmount all file systems. 1614 * We traverse the list in reverse order under the assumption that doing so 1615 * will avoid needing to worry about dependencies. 1616 */ 1617 void 1618 vfs_unmountall(void) 1619 { 1620 struct mount *mp, *nmp; 1621 int allerror, error, again = 1; 1622 1623 retry: 1624 allerror = 0; 1625 for (mp = CIRCLEQ_LAST(&mountlist); mp != CIRCLEQ_END(&mountlist); 1626 mp = nmp) { 1627 nmp = CIRCLEQ_PREV(mp, mnt_list); 1628 if ((vfs_busy(mp, VB_WRITE|VB_NOWAIT)) != 0) 1629 continue; 1630 if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) { 1631 printf("unmount of %s failed with error %d\n", 1632 mp->mnt_stat.f_mntonname, error); 1633 allerror = 1; 1634 } 1635 } 1636 1637 if (allerror) { 1638 printf("WARNING: some file systems would not unmount\n"); 1639 if (again) { 1640 printf("retrying\n"); 1641 again = 0; 1642 goto retry; 1643 } 1644 } 1645 } 1646 1647 /* 1648 * Sync and unmount file systems before shutting down. 1649 */ 1650 void 1651 vfs_shutdown(void) 1652 { 1653 #ifdef ACCOUNTING 1654 extern void acct_shutdown(void); 1655 1656 acct_shutdown(); 1657 #endif 1658 1659 /* XXX Should suspend scheduling. */ 1660 (void) spl0(); 1661 1662 printf("syncing disks... "); 1663 1664 if (panicstr == 0) { 1665 /* Sync before unmount, in case we hang on something. */ 1666 sys_sync(&proc0, (void *)0, (register_t *)0); 1667 1668 /* Unmount file systems. */ 1669 vfs_unmountall(); 1670 } 1671 1672 if (vfs_syncwait(1)) 1673 printf("giving up\n"); 1674 else 1675 printf("done\n"); 1676 } 1677 1678 /* 1679 * perform sync() operation and wait for buffers to flush. 1680 * assumptions: called w/ scheduler disabled and physical io enabled 1681 * for now called at spl0() XXX 1682 */ 1683 int 1684 vfs_syncwait(int verbose) 1685 { 1686 struct buf *bp; 1687 int iter, nbusy, dcount, s; 1688 struct proc *p; 1689 1690 p = curproc? curproc : &proc0; 1691 sys_sync(p, (void *)0, (register_t *)0); 1692 1693 /* Wait for sync to finish. */ 1694 dcount = 10000; 1695 for (iter = 0; iter < 20; iter++) { 1696 nbusy = 0; 1697 LIST_FOREACH(bp, &bufhead, b_list) { 1698 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 1699 nbusy++; 1700 /* 1701 * With soft updates, some buffers that are 1702 * written will be remarked as dirty until other 1703 * buffers are written. 1704 */ 1705 if (bp->b_flags & B_DELWRI) { 1706 s = splbio(); 1707 bremfree(bp); 1708 buf_acquire(bp); 1709 splx(s); 1710 nbusy++; 1711 bawrite(bp); 1712 if (dcount-- <= 0) { 1713 if (verbose) 1714 printf("softdep "); 1715 return 1; 1716 } 1717 } 1718 } 1719 if (nbusy == 0) 1720 break; 1721 if (verbose) 1722 printf("%d ", nbusy); 1723 DELAY(40000 * iter); 1724 } 1725 1726 return nbusy; 1727 } 1728 1729 /* 1730 * posix file system related system variables. 1731 */ 1732 int 1733 fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, 1734 void *newp, size_t newlen, struct proc *p) 1735 { 1736 /* all sysctl names at this level are terminal */ 1737 if (namelen != 1) 1738 return (ENOTDIR); 1739 1740 switch (name[0]) { 1741 case FS_POSIX_SETUID: 1742 if (newp && securelevel > 0) 1743 return (EPERM); 1744 return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear)); 1745 default: 1746 return (EOPNOTSUPP); 1747 } 1748 /* NOTREACHED */ 1749 } 1750 1751 /* 1752 * file system related system variables. 1753 */ 1754 int 1755 fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1756 size_t newlen, struct proc *p) 1757 { 1758 sysctlfn *fn; 1759 1760 switch (name[0]) { 1761 case FS_POSIX: 1762 fn = fs_posix_sysctl; 1763 break; 1764 default: 1765 return (EOPNOTSUPP); 1766 } 1767 return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p); 1768 } 1769 1770 1771 /* 1772 * Routines dealing with vnodes and buffers 1773 */ 1774 1775 /* 1776 * Wait for all outstanding I/Os to complete 1777 * 1778 * Manipulates v_numoutput. Must be called at splbio() 1779 */ 1780 int 1781 vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo) 1782 { 1783 int error = 0; 1784 1785 splassert(IPL_BIO); 1786 1787 while (vp->v_numoutput) { 1788 vp->v_bioflag |= VBIOWAIT; 1789 error = tsleep(&vp->v_numoutput, 1790 slpflag | (PRIBIO + 1), wmesg, timeo); 1791 if (error) 1792 break; 1793 } 1794 1795 return (error); 1796 } 1797 1798 /* 1799 * Update outstanding I/O count and do wakeup if requested. 1800 * 1801 * Manipulates v_numoutput. Must be called at splbio() 1802 */ 1803 void 1804 vwakeup(struct vnode *vp) 1805 { 1806 splassert(IPL_BIO); 1807 1808 if (vp != NULL) { 1809 if (vp->v_numoutput-- == 0) 1810 panic("vwakeup: neg numoutput"); 1811 if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) { 1812 vp->v_bioflag &= ~VBIOWAIT; 1813 wakeup(&vp->v_numoutput); 1814 } 1815 } 1816 } 1817 1818 /* 1819 * Flush out and invalidate all buffers associated with a vnode. 1820 * Called with the underlying object locked. 1821 */ 1822 int 1823 vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p, 1824 int slpflag, int slptimeo) 1825 { 1826 struct buf *bp; 1827 struct buf *nbp, *blist; 1828 int s, error; 1829 1830 #ifdef VFSDEBUG 1831 if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp)) 1832 panic("vinvalbuf(): vp isn't locked"); 1833 #endif 1834 1835 if (flags & V_SAVE) { 1836 s = splbio(); 1837 vwaitforio(vp, 0, "vinvalbuf", 0); 1838 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 1839 splx(s); 1840 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0) 1841 return (error); 1842 s = splbio(); 1843 if (vp->v_numoutput > 0 || 1844 !LIST_EMPTY(&vp->v_dirtyblkhd)) 1845 panic("vinvalbuf: dirty bufs"); 1846 } 1847 splx(s); 1848 } 1849 loop: 1850 s = splbio(); 1851 for (;;) { 1852 if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) && 1853 (flags & V_SAVEMETA)) 1854 while (blist && blist->b_lblkno < 0) 1855 blist = LIST_NEXT(blist, b_vnbufs); 1856 if (blist == NULL && 1857 (blist = LIST_FIRST(&vp->v_dirtyblkhd)) && 1858 (flags & V_SAVEMETA)) 1859 while (blist && blist->b_lblkno < 0) 1860 blist = LIST_NEXT(blist, b_vnbufs); 1861 if (!blist) 1862 break; 1863 1864 for (bp = blist; bp; bp = nbp) { 1865 nbp = LIST_NEXT(bp, b_vnbufs); 1866 if (flags & V_SAVEMETA && bp->b_lblkno < 0) 1867 continue; 1868 if (bp->b_flags & B_BUSY) { 1869 bp->b_flags |= B_WANTED; 1870 error = tsleep(bp, slpflag | (PRIBIO + 1), 1871 "vinvalbuf", slptimeo); 1872 if (error) { 1873 splx(s); 1874 return (error); 1875 } 1876 break; 1877 } 1878 bremfree(bp); 1879 buf_acquire(bp); 1880 /* 1881 * XXX Since there are no node locks for NFS, I believe 1882 * there is a slight chance that a delayed write will 1883 * occur while sleeping just above, so check for it. 1884 */ 1885 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 1886 splx(s); 1887 (void) VOP_BWRITE(bp); 1888 goto loop; 1889 } 1890 bp->b_flags |= B_INVAL; 1891 brelse(bp); 1892 } 1893 } 1894 if (!(flags & V_SAVEMETA) && 1895 (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd))) 1896 panic("vinvalbuf: flush failed"); 1897 splx(s); 1898 return (0); 1899 } 1900 1901 void 1902 vflushbuf(struct vnode *vp, int sync) 1903 { 1904 struct buf *bp, *nbp; 1905 int s; 1906 1907 loop: 1908 s = splbio(); 1909 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); 1910 bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) { 1911 nbp = LIST_NEXT(bp, b_vnbufs); 1912 if ((bp->b_flags & B_BUSY)) 1913 continue; 1914 if ((bp->b_flags & B_DELWRI) == 0) 1915 panic("vflushbuf: not dirty"); 1916 bremfree(bp); 1917 buf_acquire(bp); 1918 splx(s); 1919 /* 1920 * Wait for I/O associated with indirect blocks to complete, 1921 * since there is no way to quickly wait for them below. 1922 */ 1923 if (bp->b_vp == vp || sync == 0) 1924 (void) bawrite(bp); 1925 else 1926 (void) bwrite(bp); 1927 goto loop; 1928 } 1929 if (sync == 0) { 1930 splx(s); 1931 return; 1932 } 1933 vwaitforio(vp, 0, "vflushbuf", 0); 1934 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 1935 splx(s); 1936 #ifdef DIAGNOSTIC 1937 vprint("vflushbuf: dirty", vp); 1938 #endif 1939 goto loop; 1940 } 1941 splx(s); 1942 } 1943 1944 /* 1945 * Associate a buffer with a vnode. 1946 * 1947 * Manipulates buffer vnode queues. Must be called at splbio(). 1948 */ 1949 void 1950 bgetvp(struct vnode *vp, struct buf *bp) 1951 { 1952 splassert(IPL_BIO); 1953 1954 1955 if (bp->b_vp) 1956 panic("bgetvp: not free"); 1957 vhold(vp); 1958 bp->b_vp = vp; 1959 if (vp->v_type == VBLK || vp->v_type == VCHR) 1960 bp->b_dev = vp->v_rdev; 1961 else 1962 bp->b_dev = NODEV; 1963 /* 1964 * Insert onto list for new vnode. 1965 */ 1966 bufinsvn(bp, &vp->v_cleanblkhd); 1967 } 1968 1969 /* 1970 * Disassociate a buffer from a vnode. 1971 * 1972 * Manipulates vnode buffer queues. Must be called at splbio(). 1973 */ 1974 void 1975 brelvp(struct buf *bp) 1976 { 1977 struct vnode *vp; 1978 1979 splassert(IPL_BIO); 1980 1981 if ((vp = bp->b_vp) == (struct vnode *) 0) 1982 panic("brelvp: NULL"); 1983 /* 1984 * Delete from old vnode list, if on one. 1985 */ 1986 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 1987 bufremvn(bp); 1988 if ((vp->v_bioflag & VBIOONSYNCLIST) && 1989 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 1990 vp->v_bioflag &= ~VBIOONSYNCLIST; 1991 LIST_REMOVE(vp, v_synclist); 1992 } 1993 bp->b_vp = NULL; 1994 1995 vdrop(vp); 1996 } 1997 1998 /* 1999 * Replaces the current vnode associated with the buffer, if any, 2000 * with a new vnode. 2001 * 2002 * If an output I/O is pending on the buffer, the old vnode 2003 * I/O count is adjusted. 2004 * 2005 * Ignores vnode buffer queues. Must be called at splbio(). 2006 */ 2007 void 2008 buf_replacevnode(struct buf *bp, struct vnode *newvp) 2009 { 2010 struct vnode *oldvp = bp->b_vp; 2011 2012 splassert(IPL_BIO); 2013 2014 if (oldvp) 2015 brelvp(bp); 2016 2017 if ((bp->b_flags & (B_READ | B_DONE)) == 0) { 2018 newvp->v_numoutput++; /* put it on swapdev */ 2019 vwakeup(oldvp); 2020 } 2021 2022 bgetvp(newvp, bp); 2023 bufremvn(bp); 2024 } 2025 2026 /* 2027 * Used to assign buffers to the appropriate clean or dirty list on 2028 * the vnode and to add newly dirty vnodes to the appropriate 2029 * filesystem syncer list. 2030 * 2031 * Manipulates vnode buffer queues. Must be called at splbio(). 2032 */ 2033 void 2034 reassignbuf(struct buf *bp) 2035 { 2036 struct buflists *listheadp; 2037 int delay; 2038 struct vnode *vp = bp->b_vp; 2039 2040 splassert(IPL_BIO); 2041 2042 /* 2043 * Delete from old vnode list, if on one. 2044 */ 2045 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 2046 bufremvn(bp); 2047 2048 /* 2049 * If dirty, put on list of dirty buffers; 2050 * otherwise insert onto list of clean buffers. 2051 */ 2052 if ((bp->b_flags & B_DELWRI) == 0) { 2053 listheadp = &vp->v_cleanblkhd; 2054 if ((vp->v_bioflag & VBIOONSYNCLIST) && 2055 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 2056 vp->v_bioflag &= ~VBIOONSYNCLIST; 2057 LIST_REMOVE(vp, v_synclist); 2058 } 2059 } else { 2060 listheadp = &vp->v_dirtyblkhd; 2061 if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) { 2062 switch (vp->v_type) { 2063 case VDIR: 2064 delay = syncdelay / 2; 2065 break; 2066 case VBLK: 2067 if (vp->v_specmountpoint != NULL) { 2068 delay = syncdelay / 3; 2069 break; 2070 } 2071 /* FALLTHROUGH */ 2072 default: 2073 delay = syncdelay; 2074 } 2075 vn_syncer_add_to_worklist(vp, delay); 2076 } 2077 } 2078 bufinsvn(bp, listheadp); 2079 } 2080 2081 int 2082 vfs_register(struct vfsconf *vfs) 2083 { 2084 struct vfsconf *vfsp; 2085 struct vfsconf **vfspp; 2086 2087 #ifdef DIAGNOSTIC 2088 /* Paranoia? */ 2089 if (vfs->vfc_refcount != 0) 2090 printf("vfs_register called with vfc_refcount > 0\n"); 2091 #endif 2092 2093 /* Check if filesystem already known */ 2094 for (vfspp = &vfsconf, vfsp = vfsconf; vfsp; 2095 vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) 2096 if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0) 2097 return (EEXIST); 2098 2099 if (vfs->vfc_typenum > maxvfsconf) 2100 maxvfsconf = vfs->vfc_typenum; 2101 2102 vfs->vfc_next = NULL; 2103 2104 /* Add to the end of the list */ 2105 *vfspp = vfs; 2106 2107 /* Call vfs_init() */ 2108 if (vfs->vfc_vfsops->vfs_init) 2109 (*(vfs->vfc_vfsops->vfs_init))(vfs); 2110 2111 return 0; 2112 } 2113 2114 int 2115 vfs_unregister(struct vfsconf *vfs) 2116 { 2117 struct vfsconf *vfsp; 2118 struct vfsconf **vfspp; 2119 int maxtypenum; 2120 2121 /* Find our vfsconf struct */ 2122 for (vfspp = &vfsconf, vfsp = vfsconf; vfsp; 2123 vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) { 2124 if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0) 2125 break; 2126 } 2127 2128 if (!vfsp) /* Not found */ 2129 return (ENOENT); 2130 2131 if (vfsp->vfc_refcount) /* In use */ 2132 return (EBUSY); 2133 2134 /* Remove from list and free */ 2135 *vfspp = vfsp->vfc_next; 2136 2137 maxtypenum = 0; 2138 2139 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2140 if (vfsp->vfc_typenum > maxtypenum) 2141 maxtypenum = vfsp->vfc_typenum; 2142 2143 maxvfsconf = maxtypenum; 2144 return 0; 2145 } 2146 2147 /* 2148 * Check if vnode represents a disk device 2149 */ 2150 int 2151 vn_isdisk(struct vnode *vp, int *errp) 2152 { 2153 if (vp->v_type != VBLK && vp->v_type != VCHR) 2154 return (0); 2155 2156 return (1); 2157 } 2158 2159 #ifdef DDB 2160 #include <machine/db_machdep.h> 2161 #include <ddb/db_interface.h> 2162 #include <ddb/db_output.h> 2163 2164 void 2165 vfs_buf_print(void *b, int full, int (*pr)(const char *, ...)) 2166 { 2167 struct buf *bp = b; 2168 2169 (*pr)(" vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n" 2170 " proc %p error %d flags %b\n", 2171 bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev, 2172 bp->b_proc, bp->b_error, bp->b_flags, B_BITS); 2173 2174 (*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx sync 0x%x\n" 2175 " data %p saveaddr %p dep %p iodone %p\n", 2176 bp->b_bufsize, bp->b_bcount, (long)bp->b_resid, bp->b_synctime, 2177 bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep), bp->b_iodone); 2178 2179 (*pr)(" dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n", 2180 bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend); 2181 2182 #ifdef FFS_SOFTUPDATES 2183 if (full) 2184 softdep_print(bp, full, pr); 2185 #endif 2186 } 2187 2188 const char *vtypes[] = { VTYPE_NAMES }; 2189 const char *vtags[] = { VTAG_NAMES }; 2190 2191 void 2192 vfs_vnode_print(void *v, int full, int (*pr)(const char *, ...)) 2193 { 2194 struct vnode *vp = v; 2195 2196 #define NENTS(n) (sizeof n / sizeof(n[0])) 2197 (*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n", 2198 vp->v_tag > NENTS(vtags)? "<unk>":vtags[vp->v_tag], vp->v_tag, 2199 vp->v_type > NENTS(vtypes)? "<unk>":vtypes[vp->v_type], 2200 vp->v_type, vp->v_mount, vp->v_mountedhere); 2201 2202 (*pr)("data %p usecount %d writecount %ld holdcnt %ld numoutput %d\n", 2203 vp->v_data, vp->v_usecount, vp->v_writecount, 2204 vp->v_holdcnt, vp->v_numoutput); 2205 2206 /* uvm_object_printit(&vp->v_uobj, full, pr); */ 2207 2208 if (full) { 2209 struct buf *bp; 2210 2211 (*pr)("clean bufs:\n"); 2212 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) { 2213 (*pr)(" bp %p\n", bp); 2214 vfs_buf_print(bp, full, pr); 2215 } 2216 2217 (*pr)("dirty bufs:\n"); 2218 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2219 (*pr)(" bp %p\n", bp); 2220 vfs_buf_print(bp, full, pr); 2221 } 2222 } 2223 } 2224 2225 void 2226 vfs_mount_print(struct mount *mp, int full, int (*pr)(const char *, ...)) 2227 { 2228 struct vfsconf *vfc = mp->mnt_vfc; 2229 struct vnode *vp; 2230 int cnt = 0; 2231 2232 (*pr)("flags %b\nvnodecovered %p syncer %p data %p\n", 2233 mp->mnt_flag, MNT_BITS, 2234 mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data); 2235 2236 (*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n", 2237 vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum, 2238 vfc->vfc_refcount, vfc->vfc_flags); 2239 2240 (*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n", 2241 mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks, 2242 mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail); 2243 2244 (*pr)(" files %llu ffiles %llu favail $lld\n", mp->mnt_stat.f_files, 2245 mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail); 2246 2247 (*pr)(" f_fsidx {0x%x, 0x%x} owner %u ctime 0x%x\n", 2248 mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1], 2249 mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime); 2250 2251 (*pr)(" syncwrites %llu asyncwrites = %llu\n", 2252 mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites); 2253 2254 (*pr)(" syncreads %llu asyncreads = %llu\n", 2255 mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads); 2256 2257 (*pr)(" fstype \"%s\" mnton \"%s\" mntfrom \"%s\"\n", 2258 mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname, 2259 mp->mnt_stat.f_mntfromname); 2260 2261 (*pr)("locked vnodes:"); 2262 /* XXX would take mountlist lock, except ddb has no context */ 2263 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) 2264 if (VOP_ISLOCKED(vp)) { 2265 if (!LIST_NEXT(vp, v_mntvnodes)) 2266 (*pr)(" %p", vp); 2267 else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4)))) 2268 (*pr)("\n\t%p", vp); 2269 else 2270 (*pr)(", %p", vp); 2271 } 2272 (*pr)("\n"); 2273 2274 if (full) { 2275 (*pr)("all vnodes:\n\t"); 2276 /* XXX would take mountlist lock, except ddb has no context */ 2277 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) 2278 if (!LIST_NEXT(vp, v_mntvnodes)) 2279 (*pr)(" %p", vp); 2280 else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4)))) 2281 (*pr)(" %p,\n\t", vp); 2282 else 2283 (*pr)(" %p,", vp); 2284 (*pr)("\n"); 2285 } 2286 } 2287 #endif /* DDB */ 2288 2289 void 2290 copy_statfs_info(struct statfs *sbp, const struct mount *mp) 2291 { 2292 const struct statfs *mbp; 2293 2294 strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN); 2295 2296 if (sbp == (mbp = &mp->mnt_stat)) 2297 return; 2298 2299 sbp->f_fsid = mbp->f_fsid; 2300 sbp->f_owner = mbp->f_owner; 2301 sbp->f_flags = mbp->f_flags; 2302 sbp->f_syncwrites = mbp->f_syncwrites; 2303 sbp->f_asyncwrites = mbp->f_asyncwrites; 2304 sbp->f_syncreads = mbp->f_syncreads; 2305 sbp->f_asyncreads = mbp->f_asyncreads; 2306 sbp->f_namemax = mbp->f_namemax; 2307 bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); 2308 bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); 2309 bcopy(&mp->mnt_stat.mount_info.ufs_args, &sbp->mount_info.ufs_args, 2310 sizeof(struct ufs_args)); 2311 } 2312