1 /* $NetBSD: vfs_subr.c,v 1.182 2002/10/29 12:31:24 blymn Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1989, 1993 42 * The Regents of the University of California. All rights reserved. 43 * (c) UNIX System Laboratories, Inc. 44 * All or some portions of this file are derived from material licensed 45 * to the University of California by American Telephone and Telegraph 46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 47 * the permission of UNIX System Laboratories, Inc. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. All advertising materials mentioning features or use of this software 58 * must display the following acknowledgement: 59 * This product includes software developed by the University of 60 * California, Berkeley and its contributors. 61 * 4. Neither the name of the University nor the names of its contributors 62 * may be used to endorse or promote products derived from this software 63 * without specific prior written permission. 64 * 65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 75 * SUCH DAMAGE. 76 * 77 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 78 */ 79 80 /* 81 * External virtual filesystem routines 82 */ 83 84 #include <sys/cdefs.h> 85 __KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.182 2002/10/29 12:31:24 blymn Exp $"); 86 87 #include "opt_ddb.h" 88 #include "opt_compat_netbsd.h" 89 #include "opt_compat_43.h" 90 91 #include <sys/param.h> 92 #include <sys/systm.h> 93 #include <sys/proc.h> 94 #include <sys/kernel.h> 95 #include <sys/mount.h> 96 #include <sys/time.h> 97 #include <sys/event.h> 98 #include <sys/fcntl.h> 99 #include <sys/vnode.h> 100 #include <sys/stat.h> 101 #include <sys/namei.h> 102 #include <sys/ucred.h> 103 #include <sys/buf.h> 104 #include <sys/errno.h> 105 #include <sys/malloc.h> 106 #include <sys/domain.h> 107 #include <sys/mbuf.h> 108 #include <sys/syscallargs.h> 109 #include <sys/device.h> 110 #include <sys/dirent.h> 111 112 #include <miscfs/specfs/specdev.h> 113 #include <miscfs/genfs/genfs.h> 114 #include <miscfs/syncfs/syncfs.h> 115 116 #include <uvm/uvm.h> 117 #include <uvm/uvm_ddb.h> 118 119 #include <sys/sysctl.h> 120 121 enum vtype iftovt_tab[16] = { 122 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 123 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 124 }; 125 const int vttoif_tab[9] = { 126 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 127 S_IFSOCK, S_IFIFO, S_IFMT, 128 }; 129 130 int doforce = 1; /* 1 => permit forcible unmounting */ 131 int prtactive = 0; /* 1 => print out reclaim of active vnodes */ 132 133 extern int dovfsusermount; /* 1 => permit any user to mount filesystems */ 134 135 /* 136 * Insq/Remq for the vnode usage lists. 137 */ 138 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 139 #define bufremvn(bp) { \ 140 LIST_REMOVE(bp, b_vnbufs); \ 141 (bp)->b_vnbufs.le_next = NOLIST; \ 142 } 143 /* TAILQ_HEAD(freelst, vnode) vnode_free_list = vnode free list (in vnode.h) */ 144 struct freelst vnode_free_list = TAILQ_HEAD_INITIALIZER(vnode_free_list); 145 struct freelst vnode_hold_list = TAILQ_HEAD_INITIALIZER(vnode_hold_list); 146 147 struct mntlist mountlist = /* mounted filesystem list */ 148 CIRCLEQ_HEAD_INITIALIZER(mountlist); 149 struct vfs_list_head vfs_list = /* vfs list */ 150 LIST_HEAD_INITIALIZER(vfs_list); 151 152 struct nfs_public nfs_pub; /* publicly exported FS */ 153 154 struct simplelock mountlist_slock = SIMPLELOCK_INITIALIZER; 155 static struct simplelock mntid_slock = SIMPLELOCK_INITIALIZER; 156 struct simplelock mntvnode_slock = SIMPLELOCK_INITIALIZER; 157 struct simplelock vnode_free_list_slock = SIMPLELOCK_INITIALIZER; 158 struct simplelock spechash_slock = SIMPLELOCK_INITIALIZER; 159 160 /* 161 * These define the root filesystem and device. 162 */ 163 struct mount *rootfs; 164 struct vnode *rootvnode; 165 struct device *root_device; /* root device */ 166 167 struct pool vnode_pool; /* memory pool for vnodes */ 168 169 /* 170 * Local declarations. 171 */ 172 void insmntque __P((struct vnode *, struct mount *)); 173 int getdevvp __P((dev_t, struct vnode **, enum vtype)); 174 void vgoneall __P((struct vnode *)); 175 176 void vclean(struct vnode *, int, struct proc *); 177 178 static int vfs_hang_addrlist __P((struct mount *, struct netexport *, 179 struct export_args *)); 180 static int vfs_free_netcred __P((struct radix_node *, void *)); 181 static void vfs_free_addrlist __P((struct netexport *)); 182 183 #ifdef DEBUG 184 void printlockedvnodes __P((void)); 185 #endif 186 187 /* 188 * Initialize the vnode management data structures. 189 */ 190 void 191 vntblinit() 192 { 193 194 pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodepl", 195 &pool_allocator_nointr); 196 197 /* 198 * Initialize the filesystem syncer. 199 */ 200 vn_initialize_syncerd(); 201 } 202 203 /* 204 * Mark a mount point as busy. Used to synchronize access and to delay 205 * unmounting. Interlock is not released on failure. 206 */ 207 int 208 vfs_busy(mp, flags, interlkp) 209 struct mount *mp; 210 int flags; 211 struct simplelock *interlkp; 212 { 213 int lkflags; 214 215 while (mp->mnt_flag & MNT_UNMOUNT) { 216 int gone; 217 218 if (flags & LK_NOWAIT) 219 return (ENOENT); 220 if ((flags & LK_RECURSEFAIL) && mp->mnt_unmounter != NULL 221 && mp->mnt_unmounter == curproc) 222 return (EDEADLK); 223 if (interlkp) 224 simple_unlock(interlkp); 225 /* 226 * Since all busy locks are shared except the exclusive 227 * lock granted when unmounting, the only place that a 228 * wakeup needs to be done is at the release of the 229 * exclusive lock at the end of dounmount. 230 * 231 * XXX MP: add spinlock protecting mnt_wcnt here once you 232 * can atomically unlock-and-sleep. 233 */ 234 mp->mnt_wcnt++; 235 tsleep((caddr_t)mp, PVFS, "vfs_busy", 0); 236 mp->mnt_wcnt--; 237 gone = mp->mnt_flag & MNT_GONE; 238 239 if (mp->mnt_wcnt == 0) 240 wakeup(&mp->mnt_wcnt); 241 if (interlkp) 242 simple_lock(interlkp); 243 if (gone) 244 return (ENOENT); 245 } 246 lkflags = LK_SHARED; 247 if (interlkp) 248 lkflags |= LK_INTERLOCK; 249 if (lockmgr(&mp->mnt_lock, lkflags, interlkp)) 250 panic("vfs_busy: unexpected lock failure"); 251 return (0); 252 } 253 254 /* 255 * Free a busy filesystem. 256 */ 257 void 258 vfs_unbusy(mp) 259 struct mount *mp; 260 { 261 262 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL); 263 } 264 265 /* 266 * Lookup a filesystem type, and if found allocate and initialize 267 * a mount structure for it. 268 * 269 * Devname is usually updated by mount(8) after booting. 270 */ 271 int 272 vfs_rootmountalloc(fstypename, devname, mpp) 273 char *fstypename; 274 char *devname; 275 struct mount **mpp; 276 { 277 struct vfsops *vfsp = NULL; 278 struct mount *mp; 279 280 LIST_FOREACH(vfsp, &vfs_list, vfs_list) 281 if (!strncmp(vfsp->vfs_name, fstypename, MFSNAMELEN)) 282 break; 283 284 if (vfsp == NULL) 285 return (ENODEV); 286 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 287 memset((char *)mp, 0, (u_long)sizeof(struct mount)); 288 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); 289 (void)vfs_busy(mp, LK_NOWAIT, 0); 290 LIST_INIT(&mp->mnt_vnodelist); 291 mp->mnt_op = vfsp; 292 mp->mnt_flag = MNT_RDONLY; 293 mp->mnt_vnodecovered = NULLVP; 294 vfsp->vfs_refcount++; 295 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfs_name, MFSNAMELEN); 296 mp->mnt_stat.f_mntonname[0] = '/'; 297 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 298 *mpp = mp; 299 return (0); 300 } 301 302 /* 303 * Lookup a mount point by filesystem identifier. 304 */ 305 struct mount * 306 vfs_getvfs(fsid) 307 fsid_t *fsid; 308 { 309 struct mount *mp; 310 311 simple_lock(&mountlist_slock); 312 CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) { 313 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 314 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 315 simple_unlock(&mountlist_slock); 316 return (mp); 317 } 318 } 319 simple_unlock(&mountlist_slock); 320 return ((struct mount *)0); 321 } 322 323 /* 324 * Get a new unique fsid 325 */ 326 void 327 vfs_getnewfsid(mp) 328 struct mount *mp; 329 { 330 static u_short xxxfs_mntid; 331 fsid_t tfsid; 332 int mtype; 333 334 simple_lock(&mntid_slock); 335 mtype = makefstype(mp->mnt_op->vfs_name); 336 mp->mnt_stat.f_fsid.val[0] = makedev(mtype, 0); 337 mp->mnt_stat.f_fsid.val[1] = mtype; 338 if (xxxfs_mntid == 0) 339 ++xxxfs_mntid; 340 tfsid.val[0] = makedev(mtype & 0xff, xxxfs_mntid); 341 tfsid.val[1] = mtype; 342 if (!CIRCLEQ_EMPTY(&mountlist)) { 343 while (vfs_getvfs(&tfsid)) { 344 tfsid.val[0]++; 345 xxxfs_mntid++; 346 } 347 } 348 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 349 simple_unlock(&mntid_slock); 350 } 351 352 /* 353 * Make a 'unique' number from a mount type name. 354 */ 355 long 356 makefstype(type) 357 const char *type; 358 { 359 long rv; 360 361 for (rv = 0; *type; type++) { 362 rv <<= 2; 363 rv ^= *type; 364 } 365 return rv; 366 } 367 368 369 /* 370 * Set vnode attributes to VNOVAL 371 */ 372 void 373 vattr_null(vap) 374 struct vattr *vap; 375 { 376 377 vap->va_type = VNON; 378 379 /* 380 * Assign individually so that it is safe even if size and 381 * sign of each member are varied. 382 */ 383 vap->va_mode = VNOVAL; 384 vap->va_nlink = VNOVAL; 385 vap->va_uid = VNOVAL; 386 vap->va_gid = VNOVAL; 387 vap->va_fsid = VNOVAL; 388 vap->va_fileid = VNOVAL; 389 vap->va_size = VNOVAL; 390 vap->va_blocksize = VNOVAL; 391 vap->va_atime.tv_sec = 392 vap->va_mtime.tv_sec = 393 vap->va_ctime.tv_sec = VNOVAL; 394 vap->va_atime.tv_nsec = 395 vap->va_mtime.tv_nsec = 396 vap->va_ctime.tv_nsec = VNOVAL; 397 vap->va_gen = VNOVAL; 398 vap->va_flags = VNOVAL; 399 vap->va_rdev = VNOVAL; 400 vap->va_bytes = VNOVAL; 401 vap->va_vaflags = 0; 402 } 403 404 /* 405 * Routines having to do with the management of the vnode table. 406 */ 407 extern int (**dead_vnodeop_p) __P((void *)); 408 long numvnodes; 409 410 /* 411 * Return the next vnode from the free list. 412 */ 413 int 414 getnewvnode(tag, mp, vops, vpp) 415 enum vtagtype tag; 416 struct mount *mp; 417 int (**vops) __P((void *)); 418 struct vnode **vpp; 419 { 420 extern struct uvm_pagerops uvm_vnodeops; 421 struct uvm_object *uobj; 422 struct proc *p = curproc; /* XXX */ 423 struct freelst *listhd; 424 static int toggle; 425 struct vnode *vp; 426 int error = 0, tryalloc; 427 428 try_again: 429 if (mp) { 430 /* 431 * Mark filesystem busy while we're creating a vnode. 432 * If unmount is in progress, this will wait; if the 433 * unmount succeeds (only if umount -f), this will 434 * return an error. If the unmount fails, we'll keep 435 * going afterwards. 436 * (This puts the per-mount vnode list logically under 437 * the protection of the vfs_busy lock). 438 */ 439 error = vfs_busy(mp, LK_RECURSEFAIL, 0); 440 if (error && error != EDEADLK) 441 return error; 442 } 443 444 /* 445 * We must choose whether to allocate a new vnode or recycle an 446 * existing one. The criterion for allocating a new one is that 447 * the total number of vnodes is less than the number desired or 448 * there are no vnodes on either free list. Generally we only 449 * want to recycle vnodes that have no buffers associated with 450 * them, so we look first on the vnode_free_list. If it is empty, 451 * we next consider vnodes with referencing buffers on the 452 * vnode_hold_list. The toggle ensures that half the time we 453 * will use a buffer from the vnode_hold_list, and half the time 454 * we will allocate a new one unless the list has grown to twice 455 * the desired size. We are reticent to recycle vnodes from the 456 * vnode_hold_list because we will lose the identity of all its 457 * referencing buffers. 458 */ 459 460 vp = NULL; 461 462 simple_lock(&vnode_free_list_slock); 463 464 toggle ^= 1; 465 if (numvnodes > 2 * desiredvnodes) 466 toggle = 0; 467 468 tryalloc = numvnodes < desiredvnodes || 469 (TAILQ_FIRST(&vnode_free_list) == NULL && 470 (TAILQ_FIRST(&vnode_hold_list) == NULL || toggle)); 471 472 if (tryalloc && 473 (vp = pool_get(&vnode_pool, PR_NOWAIT)) != NULL) { 474 simple_unlock(&vnode_free_list_slock); 475 memset(vp, 0, sizeof(*vp)); 476 simple_lock_init(&vp->v_interlock); 477 uobj = &vp->v_uobj; 478 uobj->pgops = &uvm_vnodeops; 479 uobj->uo_npages = 0; 480 TAILQ_INIT(&uobj->memq); 481 numvnodes++; 482 } else { 483 if ((vp = TAILQ_FIRST(listhd = &vnode_free_list)) == NULL) 484 vp = TAILQ_FIRST(listhd = &vnode_hold_list); 485 for (; vp != NULL; vp = TAILQ_NEXT(vp, v_freelist)) { 486 if (simple_lock_try(&vp->v_interlock)) { 487 if ((vp->v_flag & VLAYER) == 0) { 488 break; 489 } 490 if (VOP_ISLOCKED(vp) == 0) 491 break; 492 else 493 simple_unlock(&vp->v_interlock); 494 } 495 } 496 /* 497 * Unless this is a bad time of the month, at most 498 * the first NCPUS items on the free list are 499 * locked, so this is close enough to being empty. 500 */ 501 if (vp == NULLVP) { 502 simple_unlock(&vnode_free_list_slock); 503 if (mp && error != EDEADLK) 504 vfs_unbusy(mp); 505 if (tryalloc) { 506 printf("WARNING: unable to allocate new " 507 "vnode, retrying...\n"); 508 (void) tsleep(&lbolt, PRIBIO, "newvn", hz); 509 goto try_again; 510 } 511 tablefull("vnode", "increase kern.maxvnodes or NVNODE"); 512 *vpp = 0; 513 return (ENFILE); 514 } 515 if (vp->v_usecount) 516 panic("free vnode isn't, vp %p", vp); 517 TAILQ_REMOVE(listhd, vp, v_freelist); 518 /* see comment on why 0xdeadb is set at end of vgone (below) */ 519 vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb; 520 simple_unlock(&vnode_free_list_slock); 521 vp->v_lease = NULL; 522 523 if (vp->v_type != VBAD) 524 vgonel(vp, p); 525 else 526 simple_unlock(&vp->v_interlock); 527 #ifdef DIAGNOSTIC 528 if (vp->v_data || vp->v_uobj.uo_npages || 529 TAILQ_FIRST(&vp->v_uobj.memq)) 530 panic("cleaned vnode isn't, vp %p", vp); 531 if (vp->v_numoutput) 532 panic("clean vnode has pending I/O's, vp %p", vp); 533 #endif 534 KASSERT((vp->v_flag & VONWORKLST) == 0); 535 vp->v_flag = 0; 536 vp->v_socket = NULL; 537 #ifdef VERIFIED_EXEC 538 vp->fp_status = FINGERPRINT_INVALID; 539 #endif 540 } 541 vp->v_type = VNON; 542 vp->v_vnlock = &vp->v_lock; 543 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 544 cache_purge(vp); 545 vp->v_tag = tag; 546 vp->v_op = vops; 547 insmntque(vp, mp); 548 *vpp = vp; 549 vp->v_usecount = 1; 550 vp->v_data = 0; 551 simple_lock_init(&vp->v_uobj.vmobjlock); 552 553 /* 554 * initialize uvm_object within vnode. 555 */ 556 557 uobj = &vp->v_uobj; 558 KASSERT(uobj->pgops == &uvm_vnodeops); 559 KASSERT(uobj->uo_npages == 0); 560 KASSERT(TAILQ_FIRST(&uobj->memq) == NULL); 561 vp->v_size = VSIZENOTSET; 562 563 if (mp && error != EDEADLK) 564 vfs_unbusy(mp); 565 return (0); 566 } 567 568 /* 569 * This is really just the reverse of getnewvnode(). Needed for 570 * VFS_VGET functions who may need to push back a vnode in case 571 * of a locking race. 572 */ 573 void 574 ungetnewvnode(vp) 575 struct vnode *vp; 576 { 577 #ifdef DIAGNOSTIC 578 if (vp->v_usecount != 1) 579 panic("ungetnewvnode: busy vnode"); 580 #endif 581 vp->v_usecount--; 582 insmntque(vp, NULL); 583 vp->v_type = VBAD; 584 585 simple_lock(&vp->v_interlock); 586 /* 587 * Insert at head of LRU list 588 */ 589 simple_lock(&vnode_free_list_slock); 590 if (vp->v_holdcnt > 0) 591 TAILQ_INSERT_HEAD(&vnode_hold_list, vp, v_freelist); 592 else 593 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 594 simple_unlock(&vnode_free_list_slock); 595 simple_unlock(&vp->v_interlock); 596 } 597 598 /* 599 * Move a vnode from one mount queue to another. 600 */ 601 void 602 insmntque(vp, mp) 603 struct vnode *vp; 604 struct mount *mp; 605 { 606 607 #ifdef DIAGNOSTIC 608 if ((mp != NULL) && 609 (mp->mnt_flag & MNT_UNMOUNT) && 610 !(mp->mnt_flag & MNT_SOFTDEP) && 611 vp->v_tag != VT_VFS) { 612 panic("insmntque into dying filesystem"); 613 } 614 #endif 615 616 simple_lock(&mntvnode_slock); 617 /* 618 * Delete from old mount point vnode list, if on one. 619 */ 620 if (vp->v_mount != NULL) 621 LIST_REMOVE(vp, v_mntvnodes); 622 /* 623 * Insert into list of vnodes for the new mount point, if available. 624 */ 625 if ((vp->v_mount = mp) != NULL) 626 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 627 simple_unlock(&mntvnode_slock); 628 } 629 630 /* 631 * Update outstanding I/O count and do wakeup if requested. 632 */ 633 void 634 vwakeup(bp) 635 struct buf *bp; 636 { 637 struct vnode *vp; 638 639 if ((vp = bp->b_vp) != NULL) { 640 if (--vp->v_numoutput < 0) 641 panic("vwakeup: neg numoutput, vp %p", vp); 642 if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) { 643 vp->v_flag &= ~VBWAIT; 644 wakeup((caddr_t)&vp->v_numoutput); 645 } 646 } 647 } 648 649 /* 650 * Flush out and invalidate all buffers associated with a vnode. 651 * Called with the underlying vnode locked, which should prevent new dirty 652 * buffers from being queued. 653 */ 654 int 655 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) 656 struct vnode *vp; 657 int flags; 658 struct ucred *cred; 659 struct proc *p; 660 int slpflag, slptimeo; 661 { 662 struct buf *bp, *nbp; 663 int s, error; 664 int flushflags = PGO_ALLPAGES | PGO_FREE | PGO_SYNCIO | 665 (flags & V_SAVE ? PGO_CLEANIT : 0); 666 667 /* XXXUBC this doesn't look at flags or slp* */ 668 simple_lock(&vp->v_interlock); 669 error = VOP_PUTPAGES(vp, 0, 0, flushflags); 670 if (error) { 671 return error; 672 } 673 674 if (flags & V_SAVE) { 675 error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0, p); 676 if (error) 677 return (error); 678 #ifdef DIAGNOSTIC 679 s = splbio(); 680 if (vp->v_numoutput > 0 || !LIST_EMPTY(&vp->v_dirtyblkhd)) 681 panic("vinvalbuf: dirty bufs, vp %p", vp); 682 splx(s); 683 #endif 684 } 685 686 s = splbio(); 687 688 restart: 689 for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 690 nbp = LIST_NEXT(bp, b_vnbufs); 691 if (bp->b_flags & B_BUSY) { 692 bp->b_flags |= B_WANTED; 693 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), 694 "vinvalbuf", slptimeo); 695 if (error) { 696 splx(s); 697 return (error); 698 } 699 goto restart; 700 } 701 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 702 brelse(bp); 703 } 704 705 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 706 nbp = LIST_NEXT(bp, b_vnbufs); 707 if (bp->b_flags & B_BUSY) { 708 bp->b_flags |= B_WANTED; 709 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), 710 "vinvalbuf", slptimeo); 711 if (error) { 712 splx(s); 713 return (error); 714 } 715 goto restart; 716 } 717 /* 718 * XXX Since there are no node locks for NFS, I believe 719 * there is a slight chance that a delayed write will 720 * occur while sleeping just above, so check for it. 721 */ 722 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 723 #ifdef DEBUG 724 printf("buffer still DELWRI\n"); 725 #endif 726 bp->b_flags |= B_BUSY | B_VFLUSH; 727 VOP_BWRITE(bp); 728 goto restart; 729 } 730 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 731 brelse(bp); 732 } 733 734 #ifdef DIAGNOSTIC 735 if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd)) 736 panic("vinvalbuf: flush failed, vp %p", vp); 737 #endif 738 739 splx(s); 740 741 return (0); 742 } 743 744 /* 745 * Destroy any in core blocks past the truncation length. 746 * Called with the underlying vnode locked, which should prevent new dirty 747 * buffers from being queued. 748 */ 749 int 750 vtruncbuf(vp, lbn, slpflag, slptimeo) 751 struct vnode *vp; 752 daddr_t lbn; 753 int slpflag, slptimeo; 754 { 755 struct buf *bp, *nbp; 756 int s, error; 757 voff_t off; 758 759 off = round_page((voff_t)lbn << vp->v_mount->mnt_fs_bshift); 760 simple_lock(&vp->v_interlock); 761 error = VOP_PUTPAGES(vp, off, 0, PGO_FREE | PGO_SYNCIO); 762 if (error) { 763 return error; 764 } 765 766 s = splbio(); 767 768 restart: 769 for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 770 nbp = LIST_NEXT(bp, b_vnbufs); 771 if (bp->b_lblkno < lbn) 772 continue; 773 if (bp->b_flags & B_BUSY) { 774 bp->b_flags |= B_WANTED; 775 error = tsleep(bp, slpflag | (PRIBIO + 1), 776 "vtruncbuf", slptimeo); 777 if (error) { 778 splx(s); 779 return (error); 780 } 781 goto restart; 782 } 783 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 784 brelse(bp); 785 } 786 787 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 788 nbp = LIST_NEXT(bp, b_vnbufs); 789 if (bp->b_lblkno < lbn) 790 continue; 791 if (bp->b_flags & B_BUSY) { 792 bp->b_flags |= B_WANTED; 793 error = tsleep(bp, slpflag | (PRIBIO + 1), 794 "vtruncbuf", slptimeo); 795 if (error) { 796 splx(s); 797 return (error); 798 } 799 goto restart; 800 } 801 bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; 802 brelse(bp); 803 } 804 805 splx(s); 806 807 return (0); 808 } 809 810 void 811 vflushbuf(vp, sync) 812 struct vnode *vp; 813 int sync; 814 { 815 struct buf *bp, *nbp; 816 int flags = PGO_CLEANIT | PGO_ALLPAGES | (sync ? PGO_SYNCIO : 0); 817 int s; 818 819 simple_lock(&vp->v_interlock); 820 (void) VOP_PUTPAGES(vp, 0, 0, flags); 821 822 loop: 823 s = splbio(); 824 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 825 nbp = LIST_NEXT(bp, b_vnbufs); 826 if ((bp->b_flags & B_BUSY)) 827 continue; 828 if ((bp->b_flags & B_DELWRI) == 0) 829 panic("vflushbuf: not dirty, bp %p", bp); 830 bp->b_flags |= B_BUSY | B_VFLUSH; 831 splx(s); 832 /* 833 * Wait for I/O associated with indirect blocks to complete, 834 * since there is no way to quickly wait for them below. 835 */ 836 if (bp->b_vp == vp || sync == 0) 837 (void) bawrite(bp); 838 else 839 (void) bwrite(bp); 840 goto loop; 841 } 842 if (sync == 0) { 843 splx(s); 844 return; 845 } 846 while (vp->v_numoutput) { 847 vp->v_flag |= VBWAIT; 848 tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "vflushbuf", 0); 849 } 850 splx(s); 851 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 852 vprint("vflushbuf: dirty", vp); 853 goto loop; 854 } 855 } 856 857 /* 858 * Associate a buffer with a vnode. 859 */ 860 void 861 bgetvp(vp, bp) 862 struct vnode *vp; 863 struct buf *bp; 864 { 865 int s; 866 867 if (bp->b_vp) 868 panic("bgetvp: not free, bp %p", bp); 869 VHOLD(vp); 870 s = splbio(); 871 bp->b_vp = vp; 872 if (vp->v_type == VBLK || vp->v_type == VCHR) 873 bp->b_dev = vp->v_rdev; 874 else 875 bp->b_dev = NODEV; 876 /* 877 * Insert onto list for new vnode. 878 */ 879 bufinsvn(bp, &vp->v_cleanblkhd); 880 splx(s); 881 } 882 883 /* 884 * Disassociate a buffer from a vnode. 885 */ 886 void 887 brelvp(bp) 888 struct buf *bp; 889 { 890 struct vnode *vp; 891 int s; 892 893 if (bp->b_vp == NULL) 894 panic("brelvp: vp NULL, bp %p", bp); 895 896 s = splbio(); 897 vp = bp->b_vp; 898 /* 899 * Delete from old vnode list, if on one. 900 */ 901 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 902 bufremvn(bp); 903 904 if (TAILQ_EMPTY(&vp->v_uobj.memq) && (vp->v_flag & VONWORKLST) && 905 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 906 vp->v_flag &= ~VONWORKLST; 907 LIST_REMOVE(vp, v_synclist); 908 } 909 910 bp->b_vp = NULL; 911 HOLDRELE(vp); 912 splx(s); 913 } 914 915 /* 916 * Reassign a buffer from one vnode to another. 917 * Used to assign file specific control information 918 * (indirect blocks) to the vnode to which they belong. 919 * 920 * This function must be called at splbio(). 921 */ 922 void 923 reassignbuf(bp, newvp) 924 struct buf *bp; 925 struct vnode *newvp; 926 { 927 struct buflists *listheadp; 928 int delay; 929 930 /* 931 * Delete from old vnode list, if on one. 932 */ 933 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 934 bufremvn(bp); 935 /* 936 * If dirty, put on list of dirty buffers; 937 * otherwise insert onto list of clean buffers. 938 */ 939 if ((bp->b_flags & B_DELWRI) == 0) { 940 listheadp = &newvp->v_cleanblkhd; 941 if (TAILQ_EMPTY(&newvp->v_uobj.memq) && 942 (newvp->v_flag & VONWORKLST) && 943 LIST_FIRST(&newvp->v_dirtyblkhd) == NULL) { 944 newvp->v_flag &= ~VONWORKLST; 945 LIST_REMOVE(newvp, v_synclist); 946 } 947 } else { 948 listheadp = &newvp->v_dirtyblkhd; 949 if ((newvp->v_flag & VONWORKLST) == 0) { 950 switch (newvp->v_type) { 951 case VDIR: 952 delay = dirdelay; 953 break; 954 case VBLK: 955 if (newvp->v_specmountpoint != NULL) { 956 delay = metadelay; 957 break; 958 } 959 /* fall through */ 960 default: 961 delay = filedelay; 962 break; 963 } 964 if (!newvp->v_mount || 965 (newvp->v_mount->mnt_flag & MNT_ASYNC) == 0) 966 vn_syncer_add_to_worklist(newvp, delay); 967 } 968 } 969 bufinsvn(bp, listheadp); 970 } 971 972 /* 973 * Create a vnode for a block device. 974 * Used for root filesystem and swap areas. 975 * Also used for memory file system special devices. 976 */ 977 int 978 bdevvp(dev, vpp) 979 dev_t dev; 980 struct vnode **vpp; 981 { 982 983 return (getdevvp(dev, vpp, VBLK)); 984 } 985 986 /* 987 * Create a vnode for a character device. 988 * Used for kernfs and some console handling. 989 */ 990 int 991 cdevvp(dev, vpp) 992 dev_t dev; 993 struct vnode **vpp; 994 { 995 996 return (getdevvp(dev, vpp, VCHR)); 997 } 998 999 /* 1000 * Create a vnode for a device. 1001 * Used by bdevvp (block device) for root file system etc., 1002 * and by cdevvp (character device) for console and kernfs. 1003 */ 1004 int 1005 getdevvp(dev, vpp, type) 1006 dev_t dev; 1007 struct vnode **vpp; 1008 enum vtype type; 1009 { 1010 struct vnode *vp; 1011 struct vnode *nvp; 1012 int error; 1013 1014 if (dev == NODEV) { 1015 *vpp = NULLVP; 1016 return (0); 1017 } 1018 error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp); 1019 if (error) { 1020 *vpp = NULLVP; 1021 return (error); 1022 } 1023 vp = nvp; 1024 vp->v_type = type; 1025 if ((nvp = checkalias(vp, dev, NULL)) != 0) { 1026 vput(vp); 1027 vp = nvp; 1028 } 1029 *vpp = vp; 1030 return (0); 1031 } 1032 1033 /* 1034 * Check to see if the new vnode represents a special device 1035 * for which we already have a vnode (either because of 1036 * bdevvp() or because of a different vnode representing 1037 * the same block device). If such an alias exists, deallocate 1038 * the existing contents and return the aliased vnode. The 1039 * caller is responsible for filling it with its new contents. 1040 */ 1041 struct vnode * 1042 checkalias(nvp, nvp_rdev, mp) 1043 struct vnode *nvp; 1044 dev_t nvp_rdev; 1045 struct mount *mp; 1046 { 1047 struct proc *p = curproc; /* XXX */ 1048 struct vnode *vp; 1049 struct vnode **vpp; 1050 1051 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1052 return (NULLVP); 1053 1054 vpp = &speclisth[SPECHASH(nvp_rdev)]; 1055 loop: 1056 simple_lock(&spechash_slock); 1057 for (vp = *vpp; vp; vp = vp->v_specnext) { 1058 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) 1059 continue; 1060 /* 1061 * Alias, but not in use, so flush it out. 1062 */ 1063 simple_lock(&vp->v_interlock); 1064 if (vp->v_usecount == 0) { 1065 simple_unlock(&spechash_slock); 1066 vgonel(vp, p); 1067 goto loop; 1068 } 1069 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { 1070 simple_unlock(&spechash_slock); 1071 goto loop; 1072 } 1073 break; 1074 } 1075 if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) { 1076 MALLOC(nvp->v_specinfo, struct specinfo *, 1077 sizeof(struct specinfo), M_VNODE, M_NOWAIT); 1078 /* XXX Erg. */ 1079 if (nvp->v_specinfo == NULL) { 1080 simple_unlock(&spechash_slock); 1081 uvm_wait("checkalias"); 1082 goto loop; 1083 } 1084 1085 nvp->v_rdev = nvp_rdev; 1086 nvp->v_hashchain = vpp; 1087 nvp->v_specnext = *vpp; 1088 nvp->v_specmountpoint = NULL; 1089 simple_unlock(&spechash_slock); 1090 nvp->v_speclockf = NULL; 1091 *vpp = nvp; 1092 if (vp != NULLVP) { 1093 nvp->v_flag |= VALIASED; 1094 vp->v_flag |= VALIASED; 1095 vput(vp); 1096 } 1097 return (NULLVP); 1098 } 1099 simple_unlock(&spechash_slock); 1100 VOP_UNLOCK(vp, 0); 1101 simple_lock(&vp->v_interlock); 1102 vclean(vp, 0, p); 1103 vp->v_op = nvp->v_op; 1104 vp->v_tag = nvp->v_tag; 1105 vp->v_vnlock = &vp->v_lock; 1106 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 1107 nvp->v_type = VNON; 1108 insmntque(vp, mp); 1109 return (vp); 1110 } 1111 1112 /* 1113 * Grab a particular vnode from the free list, increment its 1114 * reference count and lock it. If the vnode lock bit is set the 1115 * vnode is being eliminated in vgone. In that case, we can not 1116 * grab the vnode, so the process is awakened when the transition is 1117 * completed, and an error returned to indicate that the vnode is no 1118 * longer usable (possibly having been changed to a new file system type). 1119 */ 1120 int 1121 vget(vp, flags) 1122 struct vnode *vp; 1123 int flags; 1124 { 1125 int error; 1126 1127 /* 1128 * If the vnode is in the process of being cleaned out for 1129 * another use, we wait for the cleaning to finish and then 1130 * return failure. Cleaning is determined by checking that 1131 * the VXLOCK flag is set. 1132 */ 1133 1134 if ((flags & LK_INTERLOCK) == 0) 1135 simple_lock(&vp->v_interlock); 1136 if (vp->v_flag & VXLOCK) { 1137 if (flags & LK_NOWAIT) { 1138 simple_unlock(&vp->v_interlock); 1139 return EBUSY; 1140 } 1141 vp->v_flag |= VXWANT; 1142 ltsleep(vp, PINOD|PNORELOCK, "vget", 0, &vp->v_interlock); 1143 return (ENOENT); 1144 } 1145 if (vp->v_usecount == 0) { 1146 simple_lock(&vnode_free_list_slock); 1147 if (vp->v_holdcnt > 0) 1148 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 1149 else 1150 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1151 simple_unlock(&vnode_free_list_slock); 1152 } 1153 vp->v_usecount++; 1154 #ifdef DIAGNOSTIC 1155 if (vp->v_usecount == 0) { 1156 vprint("vget", vp); 1157 panic("vget: usecount overflow, vp %p", vp); 1158 } 1159 #endif 1160 if (flags & LK_TYPE_MASK) { 1161 if ((error = vn_lock(vp, flags | LK_INTERLOCK))) { 1162 /* 1163 * must expand vrele here because we do not want 1164 * to call VOP_INACTIVE if the reference count 1165 * drops back to zero since it was never really 1166 * active. We must remove it from the free list 1167 * before sleeping so that multiple processes do 1168 * not try to recycle it. 1169 */ 1170 simple_lock(&vp->v_interlock); 1171 vp->v_usecount--; 1172 if (vp->v_usecount > 0) { 1173 simple_unlock(&vp->v_interlock); 1174 return (error); 1175 } 1176 /* 1177 * insert at tail of LRU list 1178 */ 1179 simple_lock(&vnode_free_list_slock); 1180 if (vp->v_holdcnt > 0) 1181 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, 1182 v_freelist); 1183 else 1184 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 1185 v_freelist); 1186 simple_unlock(&vnode_free_list_slock); 1187 simple_unlock(&vp->v_interlock); 1188 } 1189 return (error); 1190 } 1191 simple_unlock(&vp->v_interlock); 1192 return (0); 1193 } 1194 1195 /* 1196 * vput(), just unlock and vrele() 1197 */ 1198 void 1199 vput(vp) 1200 struct vnode *vp; 1201 { 1202 struct proc *p = curproc; /* XXX */ 1203 1204 #ifdef DIAGNOSTIC 1205 if (vp == NULL) 1206 panic("vput: null vp"); 1207 #endif 1208 simple_lock(&vp->v_interlock); 1209 vp->v_usecount--; 1210 if (vp->v_usecount > 0) { 1211 simple_unlock(&vp->v_interlock); 1212 VOP_UNLOCK(vp, 0); 1213 return; 1214 } 1215 #ifdef DIAGNOSTIC 1216 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1217 vprint("vput: bad ref count", vp); 1218 panic("vput: ref cnt"); 1219 } 1220 #endif 1221 /* 1222 * Insert at tail of LRU list. 1223 */ 1224 simple_lock(&vnode_free_list_slock); 1225 if (vp->v_holdcnt > 0) 1226 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1227 else 1228 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1229 simple_unlock(&vnode_free_list_slock); 1230 if (vp->v_flag & VEXECMAP) { 1231 uvmexp.execpages -= vp->v_uobj.uo_npages; 1232 uvmexp.filepages += vp->v_uobj.uo_npages; 1233 } 1234 vp->v_flag &= ~(VTEXT|VEXECMAP); 1235 simple_unlock(&vp->v_interlock); 1236 VOP_INACTIVE(vp, p); 1237 } 1238 1239 /* 1240 * Vnode release. 1241 * If count drops to zero, call inactive routine and return to freelist. 1242 */ 1243 void 1244 vrele(vp) 1245 struct vnode *vp; 1246 { 1247 struct proc *p = curproc; /* XXX */ 1248 1249 #ifdef DIAGNOSTIC 1250 if (vp == NULL) 1251 panic("vrele: null vp"); 1252 #endif 1253 simple_lock(&vp->v_interlock); 1254 vp->v_usecount--; 1255 if (vp->v_usecount > 0) { 1256 simple_unlock(&vp->v_interlock); 1257 return; 1258 } 1259 #ifdef DIAGNOSTIC 1260 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1261 vprint("vrele: bad ref count", vp); 1262 panic("vrele: ref cnt vp %p", vp); 1263 } 1264 #endif 1265 /* 1266 * Insert at tail of LRU list. 1267 */ 1268 simple_lock(&vnode_free_list_slock); 1269 if (vp->v_holdcnt > 0) 1270 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1271 else 1272 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1273 simple_unlock(&vnode_free_list_slock); 1274 if (vp->v_flag & VEXECMAP) { 1275 uvmexp.execpages -= vp->v_uobj.uo_npages; 1276 uvmexp.filepages += vp->v_uobj.uo_npages; 1277 } 1278 vp->v_flag &= ~(VTEXT|VEXECMAP); 1279 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0) 1280 VOP_INACTIVE(vp, p); 1281 } 1282 1283 #ifdef DIAGNOSTIC 1284 /* 1285 * Page or buffer structure gets a reference. 1286 */ 1287 void 1288 vhold(vp) 1289 struct vnode *vp; 1290 { 1291 1292 /* 1293 * If it is on the freelist and the hold count is currently 1294 * zero, move it to the hold list. The test of the back 1295 * pointer and the use reference count of zero is because 1296 * it will be removed from a free list by getnewvnode, 1297 * but will not have its reference count incremented until 1298 * after calling vgone. If the reference count were 1299 * incremented first, vgone would (incorrectly) try to 1300 * close the previous instance of the underlying object. 1301 * So, the back pointer is explicitly set to `0xdeadb' in 1302 * getnewvnode after removing it from a freelist to ensure 1303 * that we do not try to move it here. 1304 */ 1305 simple_lock(&vp->v_interlock); 1306 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) && 1307 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 1308 simple_lock(&vnode_free_list_slock); 1309 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1310 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 1311 simple_unlock(&vnode_free_list_slock); 1312 } 1313 vp->v_holdcnt++; 1314 simple_unlock(&vp->v_interlock); 1315 } 1316 1317 /* 1318 * Page or buffer structure frees a reference. 1319 */ 1320 void 1321 holdrele(vp) 1322 struct vnode *vp; 1323 { 1324 1325 simple_lock(&vp->v_interlock); 1326 if (vp->v_holdcnt <= 0) 1327 panic("holdrele: holdcnt vp %p", vp); 1328 vp->v_holdcnt--; 1329 1330 /* 1331 * If it is on the holdlist and the hold count drops to 1332 * zero, move it to the free list. The test of the back 1333 * pointer and the use reference count of zero is because 1334 * it will be removed from a free list by getnewvnode, 1335 * but will not have its reference count incremented until 1336 * after calling vgone. If the reference count were 1337 * incremented first, vgone would (incorrectly) try to 1338 * close the previous instance of the underlying object. 1339 * So, the back pointer is explicitly set to `0xdeadb' in 1340 * getnewvnode after removing it from a freelist to ensure 1341 * that we do not try to move it here. 1342 */ 1343 1344 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) && 1345 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 1346 simple_lock(&vnode_free_list_slock); 1347 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 1348 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1349 simple_unlock(&vnode_free_list_slock); 1350 } 1351 simple_unlock(&vp->v_interlock); 1352 } 1353 1354 /* 1355 * Vnode reference. 1356 */ 1357 void 1358 vref(vp) 1359 struct vnode *vp; 1360 { 1361 1362 simple_lock(&vp->v_interlock); 1363 if (vp->v_usecount <= 0) 1364 panic("vref used where vget required, vp %p", vp); 1365 vp->v_usecount++; 1366 #ifdef DIAGNOSTIC 1367 if (vp->v_usecount == 0) { 1368 vprint("vref", vp); 1369 panic("vref: usecount overflow, vp %p", vp); 1370 } 1371 #endif 1372 simple_unlock(&vp->v_interlock); 1373 } 1374 #endif /* DIAGNOSTIC */ 1375 1376 /* 1377 * Remove any vnodes in the vnode table belonging to mount point mp. 1378 * 1379 * If MNT_NOFORCE is specified, there should not be any active ones, 1380 * return error if any are found (nb: this is a user error, not a 1381 * system error). If MNT_FORCE is specified, detach any active vnodes 1382 * that are found. 1383 */ 1384 #ifdef DEBUG 1385 int busyprt = 0; /* print out busy vnodes */ 1386 struct ctldebug debug1 = { "busyprt", &busyprt }; 1387 #endif 1388 1389 int 1390 vflush(mp, skipvp, flags) 1391 struct mount *mp; 1392 struct vnode *skipvp; 1393 int flags; 1394 { 1395 struct proc *p = curproc; /* XXX */ 1396 struct vnode *vp, *nvp; 1397 int busy = 0; 1398 1399 simple_lock(&mntvnode_slock); 1400 loop: 1401 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) { 1402 if (vp->v_mount != mp) 1403 goto loop; 1404 nvp = LIST_NEXT(vp, v_mntvnodes); 1405 /* 1406 * Skip over a selected vnode. 1407 */ 1408 if (vp == skipvp) 1409 continue; 1410 simple_lock(&vp->v_interlock); 1411 /* 1412 * Skip over a vnodes marked VSYSTEM. 1413 */ 1414 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1415 simple_unlock(&vp->v_interlock); 1416 continue; 1417 } 1418 /* 1419 * If WRITECLOSE is set, only flush out regular file 1420 * vnodes open for writing. 1421 */ 1422 if ((flags & WRITECLOSE) && 1423 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1424 simple_unlock(&vp->v_interlock); 1425 continue; 1426 } 1427 /* 1428 * With v_usecount == 0, all we need to do is clear 1429 * out the vnode data structures and we are done. 1430 */ 1431 if (vp->v_usecount == 0) { 1432 simple_unlock(&mntvnode_slock); 1433 vgonel(vp, p); 1434 simple_lock(&mntvnode_slock); 1435 continue; 1436 } 1437 /* 1438 * If FORCECLOSE is set, forcibly close the vnode. 1439 * For block or character devices, revert to an 1440 * anonymous device. For all other files, just kill them. 1441 */ 1442 if (flags & FORCECLOSE) { 1443 simple_unlock(&mntvnode_slock); 1444 if (vp->v_type != VBLK && vp->v_type != VCHR) { 1445 vgonel(vp, p); 1446 } else { 1447 vclean(vp, 0, p); 1448 vp->v_op = spec_vnodeop_p; 1449 insmntque(vp, (struct mount *)0); 1450 } 1451 simple_lock(&mntvnode_slock); 1452 continue; 1453 } 1454 #ifdef DEBUG 1455 if (busyprt) 1456 vprint("vflush: busy vnode", vp); 1457 #endif 1458 simple_unlock(&vp->v_interlock); 1459 busy++; 1460 } 1461 simple_unlock(&mntvnode_slock); 1462 if (busy) 1463 return (EBUSY); 1464 return (0); 1465 } 1466 1467 /* 1468 * Disassociate the underlying file system from a vnode. 1469 */ 1470 void 1471 vclean(vp, flags, p) 1472 struct vnode *vp; 1473 int flags; 1474 struct proc *p; 1475 { 1476 int active; 1477 1478 LOCK_ASSERT(simple_lock_held(&vp->v_interlock)); 1479 1480 /* 1481 * Check to see if the vnode is in use. 1482 * If so we have to reference it before we clean it out 1483 * so that its count cannot fall to zero and generate a 1484 * race against ourselves to recycle it. 1485 */ 1486 1487 if ((active = vp->v_usecount) != 0) { 1488 vp->v_usecount++; 1489 #ifdef DIAGNOSTIC 1490 if (vp->v_usecount == 0) { 1491 vprint("vclean", vp); 1492 panic("vclean: usecount overflow"); 1493 } 1494 #endif 1495 } 1496 1497 /* 1498 * Prevent the vnode from being recycled or 1499 * brought into use while we clean it out. 1500 */ 1501 if (vp->v_flag & VXLOCK) 1502 panic("vclean: deadlock, vp %p", vp); 1503 vp->v_flag |= VXLOCK; 1504 if (vp->v_flag & VEXECMAP) { 1505 uvmexp.execpages -= vp->v_uobj.uo_npages; 1506 uvmexp.filepages += vp->v_uobj.uo_npages; 1507 } 1508 vp->v_flag &= ~(VTEXT|VEXECMAP); 1509 1510 /* 1511 * Even if the count is zero, the VOP_INACTIVE routine may still 1512 * have the object locked while it cleans it out. The VOP_LOCK 1513 * ensures that the VOP_INACTIVE routine is done with its work. 1514 * For active vnodes, it ensures that no other activity can 1515 * occur while the underlying object is being cleaned out. 1516 */ 1517 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK); 1518 1519 /* 1520 * Clean out any cached data associated with the vnode. 1521 */ 1522 if (flags & DOCLOSE) { 1523 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 1524 KASSERT((vp->v_flag & VONWORKLST) == 0); 1525 } 1526 LOCK_ASSERT(!simple_lock_held(&vp->v_interlock)); 1527 1528 /* 1529 * If purging an active vnode, it must be closed and 1530 * deactivated before being reclaimed. Note that the 1531 * VOP_INACTIVE will unlock the vnode. 1532 */ 1533 if (active) { 1534 if (flags & DOCLOSE) 1535 VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL); 1536 VOP_INACTIVE(vp, p); 1537 } else { 1538 /* 1539 * Any other processes trying to obtain this lock must first 1540 * wait for VXLOCK to clear, then call the new lock operation. 1541 */ 1542 VOP_UNLOCK(vp, 0); 1543 } 1544 /* 1545 * Reclaim the vnode. 1546 */ 1547 if (VOP_RECLAIM(vp, p)) 1548 panic("vclean: cannot reclaim, vp %p", vp); 1549 if (active) { 1550 /* 1551 * Inline copy of vrele() since VOP_INACTIVE 1552 * has already been called. 1553 */ 1554 simple_lock(&vp->v_interlock); 1555 if (--vp->v_usecount <= 0) { 1556 #ifdef DIAGNOSTIC 1557 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1558 vprint("vclean: bad ref count", vp); 1559 panic("vclean: ref cnt"); 1560 } 1561 #endif 1562 /* 1563 * Insert at tail of LRU list. 1564 */ 1565 1566 simple_unlock(&vp->v_interlock); 1567 simple_lock(&vnode_free_list_slock); 1568 #ifdef DIAGNOSTIC 1569 if (vp->v_holdcnt > 0) 1570 panic("vclean: not clean, vp %p", vp); 1571 #endif 1572 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 1573 simple_unlock(&vnode_free_list_slock); 1574 } else 1575 simple_unlock(&vp->v_interlock); 1576 } 1577 1578 KASSERT(vp->v_uobj.uo_npages == 0); 1579 cache_purge(vp); 1580 1581 /* 1582 * Done with purge, notify sleepers of the grim news. 1583 */ 1584 vp->v_op = dead_vnodeop_p; 1585 vp->v_tag = VT_NON; 1586 simple_lock(&vp->v_interlock); 1587 VN_KNOTE(vp, NOTE_REVOKE); /* FreeBSD has this in vn_pollgone() */ 1588 vp->v_flag &= ~VXLOCK; 1589 if (vp->v_flag & VXWANT) { 1590 vp->v_flag &= ~VXWANT; 1591 simple_unlock(&vp->v_interlock); 1592 wakeup((caddr_t)vp); 1593 } else 1594 simple_unlock(&vp->v_interlock); 1595 } 1596 1597 /* 1598 * Recycle an unused vnode to the front of the free list. 1599 * Release the passed interlock if the vnode will be recycled. 1600 */ 1601 int 1602 vrecycle(vp, inter_lkp, p) 1603 struct vnode *vp; 1604 struct simplelock *inter_lkp; 1605 struct proc *p; 1606 { 1607 1608 simple_lock(&vp->v_interlock); 1609 if (vp->v_usecount == 0) { 1610 if (inter_lkp) 1611 simple_unlock(inter_lkp); 1612 vgonel(vp, p); 1613 return (1); 1614 } 1615 simple_unlock(&vp->v_interlock); 1616 return (0); 1617 } 1618 1619 /* 1620 * Eliminate all activity associated with a vnode 1621 * in preparation for reuse. 1622 */ 1623 void 1624 vgone(vp) 1625 struct vnode *vp; 1626 { 1627 struct proc *p = curproc; /* XXX */ 1628 1629 simple_lock(&vp->v_interlock); 1630 vgonel(vp, p); 1631 } 1632 1633 /* 1634 * vgone, with the vp interlock held. 1635 */ 1636 void 1637 vgonel(vp, p) 1638 struct vnode *vp; 1639 struct proc *p; 1640 { 1641 struct vnode *vq; 1642 struct vnode *vx; 1643 1644 LOCK_ASSERT(simple_lock_held(&vp->v_interlock)); 1645 1646 /* 1647 * If a vgone (or vclean) is already in progress, 1648 * wait until it is done and return. 1649 */ 1650 1651 if (vp->v_flag & VXLOCK) { 1652 vp->v_flag |= VXWANT; 1653 ltsleep(vp, PINOD | PNORELOCK, "vgone", 0, &vp->v_interlock); 1654 return; 1655 } 1656 1657 /* 1658 * Clean out the filesystem specific data. 1659 */ 1660 1661 vclean(vp, DOCLOSE, p); 1662 KASSERT((vp->v_flag & VONWORKLST) == 0); 1663 1664 /* 1665 * Delete from old mount point vnode list, if on one. 1666 */ 1667 1668 if (vp->v_mount != NULL) 1669 insmntque(vp, (struct mount *)0); 1670 1671 /* 1672 * If special device, remove it from special device alias list. 1673 * if it is on one. 1674 */ 1675 1676 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1677 simple_lock(&spechash_slock); 1678 if (vp->v_hashchain != NULL) { 1679 if (*vp->v_hashchain == vp) { 1680 *vp->v_hashchain = vp->v_specnext; 1681 } else { 1682 for (vq = *vp->v_hashchain; vq; 1683 vq = vq->v_specnext) { 1684 if (vq->v_specnext != vp) 1685 continue; 1686 vq->v_specnext = vp->v_specnext; 1687 break; 1688 } 1689 if (vq == NULL) 1690 panic("missing bdev"); 1691 } 1692 if (vp->v_flag & VALIASED) { 1693 vx = NULL; 1694 for (vq = *vp->v_hashchain; vq; 1695 vq = vq->v_specnext) { 1696 if (vq->v_rdev != vp->v_rdev || 1697 vq->v_type != vp->v_type) 1698 continue; 1699 if (vx) 1700 break; 1701 vx = vq; 1702 } 1703 if (vx == NULL) 1704 panic("missing alias"); 1705 if (vq == NULL) 1706 vx->v_flag &= ~VALIASED; 1707 vp->v_flag &= ~VALIASED; 1708 } 1709 } 1710 simple_unlock(&spechash_slock); 1711 FREE(vp->v_specinfo, M_VNODE); 1712 vp->v_specinfo = NULL; 1713 } 1714 1715 /* 1716 * If it is on the freelist and not already at the head, 1717 * move it to the head of the list. The test of the back 1718 * pointer and the reference count of zero is because 1719 * it will be removed from the free list by getnewvnode, 1720 * but will not have its reference count incremented until 1721 * after calling vgone. If the reference count were 1722 * incremented first, vgone would (incorrectly) try to 1723 * close the previous instance of the underlying object. 1724 * So, the back pointer is explicitly set to `0xdeadb' in 1725 * getnewvnode after removing it from the freelist to ensure 1726 * that we do not try to move it here. 1727 */ 1728 1729 if (vp->v_usecount == 0) { 1730 simple_lock(&vnode_free_list_slock); 1731 if (vp->v_holdcnt > 0) 1732 panic("vgonel: not clean, vp %p", vp); 1733 if (vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb && 1734 TAILQ_FIRST(&vnode_free_list) != vp) { 1735 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1736 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1737 } 1738 simple_unlock(&vnode_free_list_slock); 1739 } 1740 vp->v_type = VBAD; 1741 } 1742 1743 /* 1744 * Lookup a vnode by device number. 1745 */ 1746 int 1747 vfinddev(dev, type, vpp) 1748 dev_t dev; 1749 enum vtype type; 1750 struct vnode **vpp; 1751 { 1752 struct vnode *vp; 1753 int rc = 0; 1754 1755 simple_lock(&spechash_slock); 1756 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1757 if (dev != vp->v_rdev || type != vp->v_type) 1758 continue; 1759 *vpp = vp; 1760 rc = 1; 1761 break; 1762 } 1763 simple_unlock(&spechash_slock); 1764 return (rc); 1765 } 1766 1767 /* 1768 * Revoke all the vnodes corresponding to the specified minor number 1769 * range (endpoints inclusive) of the specified major. 1770 */ 1771 void 1772 vdevgone(maj, minl, minh, type) 1773 int maj, minl, minh; 1774 enum vtype type; 1775 { 1776 struct vnode *vp; 1777 int mn; 1778 1779 for (mn = minl; mn <= minh; mn++) 1780 if (vfinddev(makedev(maj, mn), type, &vp)) 1781 VOP_REVOKE(vp, REVOKEALL); 1782 } 1783 1784 /* 1785 * Calculate the total number of references to a special device. 1786 */ 1787 int 1788 vcount(vp) 1789 struct vnode *vp; 1790 { 1791 struct vnode *vq, *vnext; 1792 int count; 1793 1794 loop: 1795 if ((vp->v_flag & VALIASED) == 0) 1796 return (vp->v_usecount); 1797 simple_lock(&spechash_slock); 1798 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1799 vnext = vq->v_specnext; 1800 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1801 continue; 1802 /* 1803 * Alias, but not in use, so flush it out. 1804 */ 1805 if (vq->v_usecount == 0 && vq != vp && 1806 (vq->v_flag & VXLOCK) == 0) { 1807 simple_unlock(&spechash_slock); 1808 vgone(vq); 1809 goto loop; 1810 } 1811 count += vq->v_usecount; 1812 } 1813 simple_unlock(&spechash_slock); 1814 return (count); 1815 } 1816 1817 /* 1818 * Print out a description of a vnode. 1819 */ 1820 static const char * const typename[] = 1821 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; 1822 1823 void 1824 vprint(label, vp) 1825 char *label; 1826 struct vnode *vp; 1827 { 1828 char buf[96]; 1829 1830 if (label != NULL) 1831 printf("%s: ", label); 1832 printf("tag %d type %s, usecount %d, writecount %ld, refcount %ld,", 1833 vp->v_tag, typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1834 vp->v_holdcnt); 1835 buf[0] = '\0'; 1836 if (vp->v_flag & VROOT) 1837 strcat(buf, "|VROOT"); 1838 if (vp->v_flag & VTEXT) 1839 strcat(buf, "|VTEXT"); 1840 if (vp->v_flag & VEXECMAP) 1841 strcat(buf, "|VEXECMAP"); 1842 if (vp->v_flag & VSYSTEM) 1843 strcat(buf, "|VSYSTEM"); 1844 if (vp->v_flag & VXLOCK) 1845 strcat(buf, "|VXLOCK"); 1846 if (vp->v_flag & VXWANT) 1847 strcat(buf, "|VXWANT"); 1848 if (vp->v_flag & VBWAIT) 1849 strcat(buf, "|VBWAIT"); 1850 if (vp->v_flag & VALIASED) 1851 strcat(buf, "|VALIASED"); 1852 if (buf[0] != '\0') 1853 printf(" flags (%s)", &buf[1]); 1854 if (vp->v_data == NULL) { 1855 printf("\n"); 1856 } else { 1857 printf("\n\t"); 1858 VOP_PRINT(vp); 1859 } 1860 } 1861 1862 #ifdef DEBUG 1863 /* 1864 * List all of the locked vnodes in the system. 1865 * Called when debugging the kernel. 1866 */ 1867 void 1868 printlockedvnodes() 1869 { 1870 struct mount *mp, *nmp; 1871 struct vnode *vp; 1872 1873 printf("Locked vnodes\n"); 1874 simple_lock(&mountlist_slock); 1875 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist; 1876 mp = nmp) { 1877 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) { 1878 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1879 continue; 1880 } 1881 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 1882 if (VOP_ISLOCKED(vp)) 1883 vprint(NULL, vp); 1884 } 1885 simple_lock(&mountlist_slock); 1886 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1887 vfs_unbusy(mp); 1888 } 1889 simple_unlock(&mountlist_slock); 1890 } 1891 #endif 1892 1893 /* 1894 * Top level filesystem related information gathering. 1895 */ 1896 int 1897 vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) 1898 int *name; 1899 u_int namelen; 1900 void *oldp; 1901 size_t *oldlenp; 1902 void *newp; 1903 size_t newlen; 1904 struct proc *p; 1905 { 1906 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44) 1907 struct vfsconf vfc; 1908 extern const char * const mountcompatnames[]; 1909 extern int nmountcompatnames; 1910 #endif 1911 struct vfsops *vfsp; 1912 1913 /* all sysctl names at this level are at least name and field */ 1914 if (namelen < 2) 1915 return (ENOTDIR); /* overloaded */ 1916 1917 /* Not generic: goes to file system. */ 1918 if (name[0] != VFS_GENERIC) { 1919 static const struct ctlname vfsnames[VFS_MAXID+1]=CTL_VFS_NAMES; 1920 const char *vfsname; 1921 1922 if (name[0] < 0 || name[0] > VFS_MAXID 1923 || (vfsname = vfsnames[name[0]].ctl_name) == NULL) 1924 return (EOPNOTSUPP); 1925 1926 vfsp = vfs_getopsbyname(vfsname); 1927 if (vfsp == NULL || vfsp->vfs_sysctl == NULL) 1928 return (EOPNOTSUPP); 1929 return ((*vfsp->vfs_sysctl)(&name[1], namelen - 1, 1930 oldp, oldlenp, newp, newlen, p)); 1931 } 1932 1933 /* The rest are generic vfs sysctls. */ 1934 switch (name[1]) { 1935 case VFS_USERMOUNT: 1936 return sysctl_int(oldp, oldlenp, newp, newlen, &dovfsusermount); 1937 #if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44) 1938 case VFS_MAXTYPENUM: 1939 /* 1940 * Provided for 4.4BSD-Lite2 compatibility. 1941 */ 1942 return (sysctl_rdint(oldp, oldlenp, newp, nmountcompatnames)); 1943 case VFS_CONF: 1944 /* 1945 * Special: a node, next is a file system name. 1946 * Provided for 4.4BSD-Lite2 compatibility. 1947 */ 1948 if (namelen < 3) 1949 return (ENOTDIR); /* overloaded */ 1950 if (name[2] >= nmountcompatnames || name[2] < 0 || 1951 mountcompatnames[name[2]] == NULL) 1952 return (EOPNOTSUPP); 1953 vfsp = vfs_getopsbyname(mountcompatnames[name[2]]); 1954 if (vfsp == NULL) 1955 return (EOPNOTSUPP); 1956 vfc.vfc_vfsops = vfsp; 1957 strncpy(vfc.vfc_name, vfsp->vfs_name, MFSNAMELEN); 1958 vfc.vfc_typenum = name[2]; 1959 vfc.vfc_refcount = vfsp->vfs_refcount; 1960 vfc.vfc_flags = 0; 1961 vfc.vfc_mountroot = vfsp->vfs_mountroot; 1962 vfc.vfc_next = NULL; 1963 return (sysctl_rdstruct(oldp, oldlenp, newp, &vfc, 1964 sizeof(struct vfsconf))); 1965 #endif 1966 default: 1967 break; 1968 } 1969 return (EOPNOTSUPP); 1970 } 1971 1972 int kinfo_vdebug = 1; 1973 int kinfo_vgetfailed; 1974 #define KINFO_VNODESLOP 10 1975 /* 1976 * Dump vnode list (via sysctl). 1977 * Copyout address of vnode followed by vnode. 1978 */ 1979 /* ARGSUSED */ 1980 int 1981 sysctl_vnode(where, sizep, p) 1982 char *where; 1983 size_t *sizep; 1984 struct proc *p; 1985 { 1986 struct mount *mp, *nmp; 1987 struct vnode *nvp, *vp; 1988 char *bp = where, *savebp; 1989 char *ewhere; 1990 int error; 1991 1992 #define VPTRSZ sizeof(struct vnode *) 1993 #define VNODESZ sizeof(struct vnode) 1994 if (where == NULL) { 1995 *sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ); 1996 return (0); 1997 } 1998 ewhere = where + *sizep; 1999 2000 simple_lock(&mountlist_slock); 2001 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist; 2002 mp = nmp) { 2003 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) { 2004 nmp = CIRCLEQ_NEXT(mp, mnt_list); 2005 continue; 2006 } 2007 savebp = bp; 2008 again: 2009 simple_lock(&mntvnode_slock); 2010 for (vp = LIST_FIRST(&mp->mnt_vnodelist); 2011 vp != NULL; 2012 vp = nvp) { 2013 /* 2014 * Check that the vp is still associated with 2015 * this filesystem. RACE: could have been 2016 * recycled onto the same filesystem. 2017 */ 2018 if (vp->v_mount != mp) { 2019 simple_unlock(&mntvnode_slock); 2020 if (kinfo_vdebug) 2021 printf("kinfo: vp changed\n"); 2022 bp = savebp; 2023 goto again; 2024 } 2025 nvp = LIST_NEXT(vp, v_mntvnodes); 2026 if (bp + VPTRSZ + VNODESZ > ewhere) { 2027 simple_unlock(&mntvnode_slock); 2028 *sizep = bp - where; 2029 return (ENOMEM); 2030 } 2031 simple_unlock(&mntvnode_slock); 2032 if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) || 2033 (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ))) 2034 return (error); 2035 bp += VPTRSZ + VNODESZ; 2036 simple_lock(&mntvnode_slock); 2037 } 2038 simple_unlock(&mntvnode_slock); 2039 simple_lock(&mountlist_slock); 2040 nmp = CIRCLEQ_NEXT(mp, mnt_list); 2041 vfs_unbusy(mp); 2042 } 2043 simple_unlock(&mountlist_slock); 2044 2045 *sizep = bp - where; 2046 return (0); 2047 } 2048 2049 /* 2050 * Check to see if a filesystem is mounted on a block device. 2051 */ 2052 int 2053 vfs_mountedon(vp) 2054 struct vnode *vp; 2055 { 2056 struct vnode *vq; 2057 int error = 0; 2058 2059 if (vp->v_specmountpoint != NULL) 2060 return (EBUSY); 2061 if (vp->v_flag & VALIASED) { 2062 simple_lock(&spechash_slock); 2063 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 2064 if (vq->v_rdev != vp->v_rdev || 2065 vq->v_type != vp->v_type) 2066 continue; 2067 if (vq->v_specmountpoint != NULL) { 2068 error = EBUSY; 2069 break; 2070 } 2071 } 2072 simple_unlock(&spechash_slock); 2073 } 2074 return (error); 2075 } 2076 2077 /* 2078 * Build hash lists of net addresses and hang them off the mount point. 2079 * Called by ufs_mount() to set up the lists of export addresses. 2080 */ 2081 static int 2082 vfs_hang_addrlist(mp, nep, argp) 2083 struct mount *mp; 2084 struct netexport *nep; 2085 struct export_args *argp; 2086 { 2087 struct netcred *np, *enp; 2088 struct radix_node_head *rnh; 2089 int i; 2090 struct radix_node *rn; 2091 struct sockaddr *saddr, *smask = 0; 2092 struct domain *dom; 2093 int error; 2094 2095 if (argp->ex_addrlen == 0) { 2096 if (mp->mnt_flag & MNT_DEFEXPORTED) 2097 return (EPERM); 2098 np = &nep->ne_defexported; 2099 np->netc_exflags = argp->ex_flags; 2100 crcvt(&np->netc_anon, &argp->ex_anon); 2101 np->netc_anon.cr_ref = 1; 2102 mp->mnt_flag |= MNT_DEFEXPORTED; 2103 return (0); 2104 } 2105 2106 if (argp->ex_addrlen > MLEN) 2107 return (EINVAL); 2108 2109 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 2110 np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK); 2111 memset((caddr_t)np, 0, i); 2112 saddr = (struct sockaddr *)(np + 1); 2113 error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen); 2114 if (error) 2115 goto out; 2116 if (saddr->sa_len > argp->ex_addrlen) 2117 saddr->sa_len = argp->ex_addrlen; 2118 if (argp->ex_masklen) { 2119 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 2120 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 2121 if (error) 2122 goto out; 2123 if (smask->sa_len > argp->ex_masklen) 2124 smask->sa_len = argp->ex_masklen; 2125 } 2126 i = saddr->sa_family; 2127 if ((rnh = nep->ne_rtable[i]) == 0) { 2128 /* 2129 * Seems silly to initialize every AF when most are not 2130 * used, do so on demand here 2131 */ 2132 for (dom = domains; dom; dom = dom->dom_next) 2133 if (dom->dom_family == i && dom->dom_rtattach) { 2134 dom->dom_rtattach((void **)&nep->ne_rtable[i], 2135 dom->dom_rtoffset); 2136 break; 2137 } 2138 if ((rnh = nep->ne_rtable[i]) == 0) { 2139 error = ENOBUFS; 2140 goto out; 2141 } 2142 } 2143 rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, 2144 np->netc_rnodes); 2145 if (rn == 0 || np != (struct netcred *)rn) { /* already exists */ 2146 if (rn == 0) { 2147 enp = (struct netcred *)(*rnh->rnh_lookup)(saddr, 2148 smask, rnh); 2149 if (enp == 0) { 2150 error = EPERM; 2151 goto out; 2152 } 2153 } else 2154 enp = (struct netcred *)rn; 2155 2156 if (enp->netc_exflags != argp->ex_flags || 2157 enp->netc_anon.cr_uid != argp->ex_anon.cr_uid || 2158 enp->netc_anon.cr_gid != argp->ex_anon.cr_gid || 2159 enp->netc_anon.cr_ngroups != 2160 (uint32_t) argp->ex_anon.cr_ngroups || 2161 memcmp(&enp->netc_anon.cr_groups, &argp->ex_anon.cr_groups, 2162 enp->netc_anon.cr_ngroups)) 2163 error = EPERM; 2164 else 2165 error = 0; 2166 goto out; 2167 } 2168 np->netc_exflags = argp->ex_flags; 2169 crcvt(&np->netc_anon, &argp->ex_anon); 2170 np->netc_anon.cr_ref = 1; 2171 return (0); 2172 out: 2173 free(np, M_NETADDR); 2174 return (error); 2175 } 2176 2177 /* ARGSUSED */ 2178 static int 2179 vfs_free_netcred(rn, w) 2180 struct radix_node *rn; 2181 void *w; 2182 { 2183 struct radix_node_head *rnh = (struct radix_node_head *)w; 2184 2185 (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh); 2186 free((caddr_t)rn, M_NETADDR); 2187 return (0); 2188 } 2189 2190 /* 2191 * Free the net address hash lists that are hanging off the mount points. 2192 */ 2193 static void 2194 vfs_free_addrlist(nep) 2195 struct netexport *nep; 2196 { 2197 int i; 2198 struct radix_node_head *rnh; 2199 2200 for (i = 0; i <= AF_MAX; i++) 2201 if ((rnh = nep->ne_rtable[i]) != NULL) { 2202 (*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh); 2203 free((caddr_t)rnh, M_RTABLE); 2204 nep->ne_rtable[i] = 0; 2205 } 2206 } 2207 2208 int 2209 vfs_export(mp, nep, argp) 2210 struct mount *mp; 2211 struct netexport *nep; 2212 struct export_args *argp; 2213 { 2214 int error; 2215 2216 if (argp->ex_flags & MNT_DELEXPORT) { 2217 if (mp->mnt_flag & MNT_EXPUBLIC) { 2218 vfs_setpublicfs(NULL, NULL, NULL); 2219 mp->mnt_flag &= ~MNT_EXPUBLIC; 2220 } 2221 vfs_free_addrlist(nep); 2222 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2223 } 2224 if (argp->ex_flags & MNT_EXPORTED) { 2225 if (argp->ex_flags & MNT_EXPUBLIC) { 2226 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2227 return (error); 2228 mp->mnt_flag |= MNT_EXPUBLIC; 2229 } 2230 if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0) 2231 return (error); 2232 mp->mnt_flag |= MNT_EXPORTED; 2233 } 2234 return (0); 2235 } 2236 2237 /* 2238 * Set the publicly exported filesystem (WebNFS). Currently, only 2239 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2240 */ 2241 int 2242 vfs_setpublicfs(mp, nep, argp) 2243 struct mount *mp; 2244 struct netexport *nep; 2245 struct export_args *argp; 2246 { 2247 int error; 2248 struct vnode *rvp; 2249 char *cp; 2250 2251 /* 2252 * mp == NULL -> invalidate the current info, the FS is 2253 * no longer exported. May be called from either vfs_export 2254 * or unmount, so check if it hasn't already been done. 2255 */ 2256 if (mp == NULL) { 2257 if (nfs_pub.np_valid) { 2258 nfs_pub.np_valid = 0; 2259 if (nfs_pub.np_index != NULL) { 2260 FREE(nfs_pub.np_index, M_TEMP); 2261 nfs_pub.np_index = NULL; 2262 } 2263 } 2264 return (0); 2265 } 2266 2267 /* 2268 * Only one allowed at a time. 2269 */ 2270 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2271 return (EBUSY); 2272 2273 /* 2274 * Get real filehandle for root of exported FS. 2275 */ 2276 memset((caddr_t)&nfs_pub.np_handle, 0, sizeof(nfs_pub.np_handle)); 2277 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2278 2279 if ((error = VFS_ROOT(mp, &rvp))) 2280 return (error); 2281 2282 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2283 return (error); 2284 2285 vput(rvp); 2286 2287 /* 2288 * If an indexfile was specified, pull it in. 2289 */ 2290 if (argp->ex_indexfile != NULL) { 2291 MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP, 2292 M_WAITOK); 2293 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2294 MAXNAMLEN, (size_t *)0); 2295 if (!error) { 2296 /* 2297 * Check for illegal filenames. 2298 */ 2299 for (cp = nfs_pub.np_index; *cp; cp++) { 2300 if (*cp == '/') { 2301 error = EINVAL; 2302 break; 2303 } 2304 } 2305 } 2306 if (error) { 2307 FREE(nfs_pub.np_index, M_TEMP); 2308 return (error); 2309 } 2310 } 2311 2312 nfs_pub.np_mount = mp; 2313 nfs_pub.np_valid = 1; 2314 return (0); 2315 } 2316 2317 struct netcred * 2318 vfs_export_lookup(mp, nep, nam) 2319 struct mount *mp; 2320 struct netexport *nep; 2321 struct mbuf *nam; 2322 { 2323 struct netcred *np; 2324 struct radix_node_head *rnh; 2325 struct sockaddr *saddr; 2326 2327 np = NULL; 2328 if (mp->mnt_flag & MNT_EXPORTED) { 2329 /* 2330 * Lookup in the export list first. 2331 */ 2332 if (nam != NULL) { 2333 saddr = mtod(nam, struct sockaddr *); 2334 rnh = nep->ne_rtable[saddr->sa_family]; 2335 if (rnh != NULL) { 2336 np = (struct netcred *) 2337 (*rnh->rnh_matchaddr)((caddr_t)saddr, 2338 rnh); 2339 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2340 np = NULL; 2341 } 2342 } 2343 /* 2344 * If no address match, use the default if it exists. 2345 */ 2346 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2347 np = &nep->ne_defexported; 2348 } 2349 return (np); 2350 } 2351 2352 /* 2353 * Do the usual access checking. 2354 * file_mode, uid and gid are from the vnode in question, 2355 * while acc_mode and cred are from the VOP_ACCESS parameter list 2356 */ 2357 int 2358 vaccess(type, file_mode, uid, gid, acc_mode, cred) 2359 enum vtype type; 2360 mode_t file_mode; 2361 uid_t uid; 2362 gid_t gid; 2363 mode_t acc_mode; 2364 struct ucred *cred; 2365 { 2366 mode_t mask; 2367 2368 /* 2369 * Super-user always gets read/write access, but execute access depends 2370 * on at least one execute bit being set. 2371 */ 2372 if (cred->cr_uid == 0) { 2373 if ((acc_mode & VEXEC) && type != VDIR && 2374 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 2375 return (EACCES); 2376 return (0); 2377 } 2378 2379 mask = 0; 2380 2381 /* Otherwise, check the owner. */ 2382 if (cred->cr_uid == uid) { 2383 if (acc_mode & VEXEC) 2384 mask |= S_IXUSR; 2385 if (acc_mode & VREAD) 2386 mask |= S_IRUSR; 2387 if (acc_mode & VWRITE) 2388 mask |= S_IWUSR; 2389 return ((file_mode & mask) == mask ? 0 : EACCES); 2390 } 2391 2392 /* Otherwise, check the groups. */ 2393 if (cred->cr_gid == gid || groupmember(gid, cred)) { 2394 if (acc_mode & VEXEC) 2395 mask |= S_IXGRP; 2396 if (acc_mode & VREAD) 2397 mask |= S_IRGRP; 2398 if (acc_mode & VWRITE) 2399 mask |= S_IWGRP; 2400 return ((file_mode & mask) == mask ? 0 : EACCES); 2401 } 2402 2403 /* Otherwise, check everyone else. */ 2404 if (acc_mode & VEXEC) 2405 mask |= S_IXOTH; 2406 if (acc_mode & VREAD) 2407 mask |= S_IROTH; 2408 if (acc_mode & VWRITE) 2409 mask |= S_IWOTH; 2410 return ((file_mode & mask) == mask ? 0 : EACCES); 2411 } 2412 2413 /* 2414 * Unmount all file systems. 2415 * We traverse the list in reverse order under the assumption that doing so 2416 * will avoid needing to worry about dependencies. 2417 */ 2418 void 2419 vfs_unmountall(p) 2420 struct proc *p; 2421 { 2422 struct mount *mp, *nmp; 2423 int allerror, error; 2424 2425 for (allerror = 0, 2426 mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { 2427 nmp = mp->mnt_list.cqe_prev; 2428 #ifdef DEBUG 2429 printf("unmounting %s (%s)...\n", 2430 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_mntfromname); 2431 #endif 2432 /* 2433 * XXX Freeze syncer. Must do this before locking the 2434 * mount point. See dounmount() for details. 2435 */ 2436 lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL); 2437 if (vfs_busy(mp, 0, 0)) { 2438 lockmgr(&syncer_lock, LK_RELEASE, NULL); 2439 continue; 2440 } 2441 if ((error = dounmount(mp, MNT_FORCE, p)) != 0) { 2442 printf("unmount of %s failed with error %d\n", 2443 mp->mnt_stat.f_mntonname, error); 2444 allerror = 1; 2445 } 2446 } 2447 if (allerror) 2448 printf("WARNING: some file systems would not unmount\n"); 2449 } 2450 2451 /* 2452 * Sync and unmount file systems before shutting down. 2453 */ 2454 void 2455 vfs_shutdown() 2456 { 2457 struct buf *bp; 2458 int iter, nbusy, nbusy_prev = 0, dcount, s; 2459 struct proc *p = curproc; 2460 2461 /* XXX we're certainly not running in proc0's context! */ 2462 if (p == NULL) 2463 p = &proc0; 2464 2465 printf("syncing disks... "); 2466 2467 /* remove user process from run queue */ 2468 suspendsched(); 2469 (void) spl0(); 2470 2471 /* avoid coming back this way again if we panic. */ 2472 doing_shutdown = 1; 2473 2474 sys_sync(p, NULL, NULL); 2475 2476 /* Wait for sync to finish. */ 2477 dcount = 10000; 2478 for (iter = 0; iter < 20;) { 2479 nbusy = 0; 2480 for (bp = &buf[nbuf]; --bp >= buf; ) { 2481 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 2482 nbusy++; 2483 /* 2484 * With soft updates, some buffers that are 2485 * written will be remarked as dirty until other 2486 * buffers are written. 2487 */ 2488 if (bp->b_vp && bp->b_vp->v_mount 2489 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP) 2490 && (bp->b_flags & B_DELWRI)) { 2491 s = splbio(); 2492 bremfree(bp); 2493 bp->b_flags |= B_BUSY; 2494 splx(s); 2495 nbusy++; 2496 bawrite(bp); 2497 if (dcount-- <= 0) { 2498 printf("softdep "); 2499 goto fail; 2500 } 2501 } 2502 } 2503 if (nbusy == 0) 2504 break; 2505 if (nbusy_prev == 0) 2506 nbusy_prev = nbusy; 2507 printf("%d ", nbusy); 2508 tsleep(&nbusy, PRIBIO, "bflush", 2509 (iter == 0) ? 1 : hz / 25 * iter); 2510 if (nbusy >= nbusy_prev) /* we didn't flush anything */ 2511 iter++; 2512 else 2513 nbusy_prev = nbusy; 2514 } 2515 if (nbusy) { 2516 fail: 2517 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY) 2518 printf("giving up\nPrinting vnodes for busy buffers\n"); 2519 for (bp = &buf[nbuf]; --bp >= buf; ) 2520 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 2521 vprint(NULL, bp->b_vp); 2522 2523 #if defined(DDB) && defined(DEBUG_HALT_BUSY) 2524 Debugger(); 2525 #endif 2526 2527 #else /* defined(DEBUG) || defined(DEBUG_HALT_BUSY) */ 2528 printf("giving up\n"); 2529 #endif /* defined(DEBUG) || defined(DEBUG_HALT_BUSY) */ 2530 return; 2531 } else 2532 printf("done\n"); 2533 2534 /* 2535 * If we've panic'd, don't make the situation potentially 2536 * worse by unmounting the file systems. 2537 */ 2538 if (panicstr != NULL) 2539 return; 2540 2541 /* Release inodes held by texts before update. */ 2542 #ifdef notdef 2543 vnshutdown(); 2544 #endif 2545 /* Unmount file systems. */ 2546 vfs_unmountall(p); 2547 } 2548 2549 /* 2550 * Mount the root file system. If the operator didn't specify a 2551 * file system to use, try all possible file systems until one 2552 * succeeds. 2553 */ 2554 int 2555 vfs_mountroot() 2556 { 2557 struct vfsops *v; 2558 2559 if (root_device == NULL) 2560 panic("vfs_mountroot: root device unknown"); 2561 2562 switch (root_device->dv_class) { 2563 case DV_IFNET: 2564 if (rootdev != NODEV) 2565 panic("vfs_mountroot: rootdev set for DV_IFNET " 2566 "(0x%08x -> %d,%d)", rootdev, 2567 major(rootdev), minor(rootdev)); 2568 break; 2569 2570 case DV_DISK: 2571 if (rootdev == NODEV) 2572 panic("vfs_mountroot: rootdev not set for DV_DISK"); 2573 break; 2574 2575 default: 2576 printf("%s: inappropriate for root file system\n", 2577 root_device->dv_xname); 2578 return (ENODEV); 2579 } 2580 2581 /* 2582 * If user specified a file system, use it. 2583 */ 2584 if (mountroot != NULL) 2585 return ((*mountroot)()); 2586 2587 /* 2588 * Try each file system currently configured into the kernel. 2589 */ 2590 for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) { 2591 if (v->vfs_mountroot == NULL) 2592 continue; 2593 #ifdef DEBUG 2594 printf("mountroot: trying %s...\n", v->vfs_name); 2595 #endif 2596 if ((*v->vfs_mountroot)() == 0) { 2597 printf("root file system type: %s\n", v->vfs_name); 2598 break; 2599 } 2600 } 2601 2602 if (v == NULL) { 2603 printf("no file system for %s", root_device->dv_xname); 2604 if (root_device->dv_class == DV_DISK) 2605 printf(" (dev 0x%x)", rootdev); 2606 printf("\n"); 2607 return (EFTYPE); 2608 } 2609 return (0); 2610 } 2611 2612 /* 2613 * Given a file system name, look up the vfsops for that 2614 * file system, or return NULL if file system isn't present 2615 * in the kernel. 2616 */ 2617 struct vfsops * 2618 vfs_getopsbyname(name) 2619 const char *name; 2620 { 2621 struct vfsops *v; 2622 2623 for (v = LIST_FIRST(&vfs_list); v != NULL; v = LIST_NEXT(v, vfs_list)) { 2624 if (strcmp(v->vfs_name, name) == 0) 2625 break; 2626 } 2627 2628 return (v); 2629 } 2630 2631 /* 2632 * Establish a file system and initialize it. 2633 */ 2634 int 2635 vfs_attach(vfs) 2636 struct vfsops *vfs; 2637 { 2638 struct vfsops *v; 2639 int error = 0; 2640 2641 2642 /* 2643 * Make sure this file system doesn't already exist. 2644 */ 2645 LIST_FOREACH(v, &vfs_list, vfs_list) { 2646 if (strcmp(vfs->vfs_name, v->vfs_name) == 0) { 2647 error = EEXIST; 2648 goto out; 2649 } 2650 } 2651 2652 /* 2653 * Initialize the vnode operations for this file system. 2654 */ 2655 vfs_opv_init(vfs->vfs_opv_descs); 2656 2657 /* 2658 * Now initialize the file system itself. 2659 */ 2660 (*vfs->vfs_init)(); 2661 2662 /* 2663 * ...and link it into the kernel's list. 2664 */ 2665 LIST_INSERT_HEAD(&vfs_list, vfs, vfs_list); 2666 2667 /* 2668 * Sanity: make sure the reference count is 0. 2669 */ 2670 vfs->vfs_refcount = 0; 2671 2672 out: 2673 return (error); 2674 } 2675 2676 /* 2677 * Remove a file system from the kernel. 2678 */ 2679 int 2680 vfs_detach(vfs) 2681 struct vfsops *vfs; 2682 { 2683 struct vfsops *v; 2684 2685 /* 2686 * Make sure no one is using the filesystem. 2687 */ 2688 if (vfs->vfs_refcount != 0) 2689 return (EBUSY); 2690 2691 /* 2692 * ...and remove it from the kernel's list. 2693 */ 2694 LIST_FOREACH(v, &vfs_list, vfs_list) { 2695 if (v == vfs) { 2696 LIST_REMOVE(v, vfs_list); 2697 break; 2698 } 2699 } 2700 2701 if (v == NULL) 2702 return (ESRCH); 2703 2704 /* 2705 * Now run the file system-specific cleanups. 2706 */ 2707 (*vfs->vfs_done)(); 2708 2709 /* 2710 * Free the vnode operations vector. 2711 */ 2712 vfs_opv_free(vfs->vfs_opv_descs); 2713 return (0); 2714 } 2715 2716 void 2717 vfs_reinit(void) 2718 { 2719 struct vfsops *vfs; 2720 2721 LIST_FOREACH(vfs, &vfs_list, vfs_list) { 2722 if (vfs->vfs_reinit) { 2723 (*vfs->vfs_reinit)(); 2724 } 2725 } 2726 } 2727 2728 #ifdef DDB 2729 const char buf_flagbits[] = 2730 "\20\1AGE\2NEEDCOMMIT\3ASYNC\4BAD\5BUSY\6SCANNED\7CALL\10DELWRI" 2731 "\11DIRTY\12DONE\13EINTR\14ERROR\15GATHERED\16INVAL\17LOCKED\20NOCACHE" 2732 "\21ORDERED\22CACHE\23PHYS\24RAW\25READ\26TAPE\30WANTED" 2733 "\32XXX\33VFLUSH"; 2734 2735 void 2736 vfs_buf_print(bp, full, pr) 2737 struct buf *bp; 2738 int full; 2739 void (*pr) __P((const char *, ...)); 2740 { 2741 char buf[1024]; 2742 2743 (*pr)(" vp %p lblkno 0x%x blkno 0x%x dev 0x%x\n", 2744 bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_dev); 2745 2746 bitmask_snprintf(bp->b_flags, buf_flagbits, buf, sizeof(buf)); 2747 (*pr)(" error %d flags 0x%s\n", bp->b_error, buf); 2748 2749 (*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx\n", 2750 bp->b_bufsize, bp->b_bcount, bp->b_resid); 2751 (*pr)(" data %p saveaddr %p dep %p\n", 2752 bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep)); 2753 (*pr)(" iodone %p\n", bp->b_iodone); 2754 } 2755 2756 2757 const char vnode_flagbits[] = 2758 "\20\1ROOT\2TEXT\3SYSTEM\4ISTTY\5EXECMAP" 2759 "\11XLOCK\12XWANT\13BWAIT\14ALIASED" 2760 "\15DIROP\16LAYER\17ONWORKLIST\20DIRTY"; 2761 2762 const char *vnode_types[] = { 2763 "VNON", 2764 "VREG", 2765 "VDIR", 2766 "VBLK", 2767 "VCHR", 2768 "VLNK", 2769 "VSOCK", 2770 "VFIFO", 2771 "VBAD", 2772 }; 2773 2774 const char *vnode_tags[] = { 2775 "VT_NON", 2776 "VT_UFS", 2777 "VT_NFS", 2778 "VT_MFS", 2779 "VT_MSDOSFS", 2780 "VT_LFS", 2781 "VT_LOFS", 2782 "VT_FDESC", 2783 "VT_PORTAL", 2784 "VT_NULL", 2785 "VT_UMAP", 2786 "VT_KERNFS", 2787 "VT_PROCFS", 2788 "VT_AFS", 2789 "VT_ISOFS", 2790 "VT_UNION", 2791 "VT_ADOSFS", 2792 "VT_EXT2FS", 2793 "VT_CODA", 2794 "VT_FILECORE", 2795 "VT_NTFS", 2796 "VT_VFS", 2797 "VT_OVERLAY" 2798 }; 2799 2800 void 2801 vfs_vnode_print(vp, full, pr) 2802 struct vnode *vp; 2803 int full; 2804 void (*pr) __P((const char *, ...)); 2805 { 2806 char buf[256]; 2807 const char *vtype, *vtag; 2808 2809 uvm_object_printit(&vp->v_uobj, full, pr); 2810 bitmask_snprintf(vp->v_flag, vnode_flagbits, buf, sizeof(buf)); 2811 (*pr)("\nVNODE flags %s\n", buf); 2812 (*pr)("mp %p numoutput %d size 0x%llx\n", 2813 vp->v_mount, vp->v_numoutput, vp->v_size); 2814 2815 (*pr)("data %p usecount %d writecount %ld holdcnt %ld numoutput %d\n", 2816 vp->v_data, vp->v_usecount, vp->v_writecount, 2817 vp->v_holdcnt, vp->v_numoutput); 2818 2819 vtype = (vp->v_type >= 0 && 2820 vp->v_type < sizeof(vnode_types) / sizeof(vnode_types[0])) ? 2821 vnode_types[vp->v_type] : "UNKNOWN"; 2822 vtag = (vp->v_tag >= 0 && 2823 vp->v_tag < sizeof(vnode_tags) / sizeof(vnode_tags[0])) ? 2824 vnode_tags[vp->v_tag] : "UNKNOWN"; 2825 2826 (*pr)("type %s(%d) tag %s(%d) id 0x%lx mount %p typedata %p\n", 2827 vtype, vp->v_type, vtag, vp->v_tag, 2828 vp->v_id, vp->v_mount, vp->v_mountedhere); 2829 2830 if (full) { 2831 struct buf *bp; 2832 2833 (*pr)("clean bufs:\n"); 2834 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) { 2835 (*pr)(" bp %p\n", bp); 2836 vfs_buf_print(bp, full, pr); 2837 } 2838 2839 (*pr)("dirty bufs:\n"); 2840 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2841 (*pr)(" bp %p\n", bp); 2842 vfs_buf_print(bp, full, pr); 2843 } 2844 } 2845 } 2846 #endif 2847