1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 40 * $DragonFly: src/sys/kern/vfs_subr.c,v 1.27 2004/03/07 12:09:04 eirikn Exp $ 41 */ 42 43 /* 44 * External virtual filesystem routines 45 */ 46 #include "opt_ddb.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/buf.h> 51 #include <sys/conf.h> 52 #include <sys/dirent.h> 53 #include <sys/domain.h> 54 #include <sys/eventhandler.h> 55 #include <sys/fcntl.h> 56 #include <sys/kernel.h> 57 #include <sys/kthread.h> 58 #include <sys/malloc.h> 59 #include <sys/mbuf.h> 60 #include <sys/mount.h> 61 #include <sys/proc.h> 62 #include <sys/namei.h> 63 #include <sys/reboot.h> 64 #include <sys/socket.h> 65 #include <sys/stat.h> 66 #include <sys/sysctl.h> 67 #include <sys/syslog.h> 68 #include <sys/vmmeter.h> 69 #include <sys/vnode.h> 70 71 #include <machine/limits.h> 72 73 #include <vm/vm.h> 74 #include <vm/vm_object.h> 75 #include <vm/vm_extern.h> 76 #include <vm/vm_kern.h> 77 #include <vm/pmap.h> 78 #include <vm/vm_map.h> 79 #include <vm/vm_page.h> 80 #include <vm/vm_pager.h> 81 #include <vm/vnode_pager.h> 82 #include <vm/vm_zone.h> 83 84 #include <sys/buf2.h> 85 #include <sys/thread2.h> 86 87 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 88 89 static void insmntque (struct vnode *vp, struct mount *mp); 90 static void vclean (struct vnode *vp, lwkt_tokref_t vlock, int flags, struct thread *td); 91 static unsigned long numvnodes; 92 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 93 94 enum vtype iftovt_tab[16] = { 95 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 96 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 97 }; 98 int vttoif_tab[9] = { 99 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 100 S_IFSOCK, S_IFIFO, S_IFMT, 101 }; 102 103 static TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ 104 105 static u_long wantfreevnodes = 25; 106 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 107 static u_long freevnodes = 0; 108 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, ""); 109 110 static int reassignbufcalls; 111 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, ""); 112 static int reassignbufloops; 113 SYSCTL_INT(_vfs, OID_AUTO, reassignbufloops, CTLFLAG_RW, &reassignbufloops, 0, ""); 114 static int reassignbufsortgood; 115 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortgood, CTLFLAG_RW, &reassignbufsortgood, 0, ""); 116 static int reassignbufsortbad; 117 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortbad, CTLFLAG_RW, &reassignbufsortbad, 0, ""); 118 static int reassignbufmethod = 1; 119 SYSCTL_INT(_vfs, OID_AUTO, reassignbufmethod, CTLFLAG_RW, &reassignbufmethod, 0, ""); 120 121 #ifdef ENABLE_VFS_IOOPT 122 int vfs_ioopt = 0; 123 SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, ""); 124 #endif 125 126 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); /* mounted fs */ 127 struct lwkt_token mountlist_token; 128 struct lwkt_token mntvnode_token; 129 int nfs_mount_type = -1; 130 static struct lwkt_token mntid_token; 131 static struct lwkt_token vnode_free_list_token; 132 static struct lwkt_token spechash_token; 133 struct nfs_public nfs_pub; /* publicly exported FS */ 134 static vm_zone_t vnode_zone; 135 136 /* 137 * The workitem queue. 138 */ 139 #define SYNCER_MAXDELAY 32 140 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 141 time_t syncdelay = 30; /* max time to delay syncing data */ 142 SYSCTL_INT(_kern, OID_AUTO, syncdelay, CTLFLAG_RW, &syncdelay, 0, 143 "VFS data synchronization delay"); 144 time_t filedelay = 30; /* time to delay syncing files */ 145 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 146 "File synchronization delay"); 147 time_t dirdelay = 29; /* time to delay syncing directories */ 148 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 149 "Directory synchronization delay"); 150 time_t metadelay = 28; /* time to delay syncing metadata */ 151 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 152 "VFS metadata synchronization delay"); 153 static int rushjob; /* number of slots to run ASAP */ 154 static int stat_rush_requests; /* number of times I/O speeded up */ 155 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, ""); 156 157 static int syncer_delayno = 0; 158 static long syncer_mask; 159 LIST_HEAD(synclist, vnode); 160 static struct synclist *syncer_workitem_pending; 161 162 int desiredvnodes; 163 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 164 &desiredvnodes, 0, "Maximum number of vnodes"); 165 static int minvnodes; 166 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 167 &minvnodes, 0, "Minimum number of vnodes"); 168 static int vnlru_nowhere = 0; 169 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0, 170 "Number of times the vnlru process ran without success"); 171 172 static void vfs_free_addrlist (struct netexport *nep); 173 static int vfs_free_netcred (struct radix_node *rn, void *w); 174 static int vfs_hang_addrlist (struct mount *mp, struct netexport *nep, 175 struct export_args *argp); 176 177 #define VSHOULDFREE(vp) \ 178 (!((vp)->v_flag & (VFREE|VDOOMED)) && \ 179 !(vp)->v_holdcnt && !(vp)->v_usecount && \ 180 (!(vp)->v_object || \ 181 !((vp)->v_object->ref_count || (vp)->v_object->resident_page_count))) 182 183 #define VMIGHTFREE(vp) \ 184 (((vp)->v_flag & (VFREE|VDOOMED|VXLOCK)) == 0 && \ 185 cache_leaf_test(vp) == 0 && (vp)->v_usecount == 0) 186 187 #define VSHOULDBUSY(vp) \ 188 (((vp)->v_flag & VFREE) && \ 189 ((vp)->v_holdcnt || (vp)->v_usecount)) 190 191 static void vbusy(struct vnode *vp); 192 static void vfree(struct vnode *vp); 193 static void vmaybefree(struct vnode *vp); 194 195 /* 196 * NOTE: the vnode interlock must be held on call. 197 */ 198 static __inline void 199 vmaybefree(struct vnode *vp) 200 { 201 if (VSHOULDFREE(vp)) 202 vfree(vp); 203 } 204 205 /* 206 * Initialize the vnode management data structures. 207 */ 208 void 209 vntblinit() 210 { 211 212 /* 213 * Desired vnodes is a result of the physical page count 214 * and the size of kernel's heap. It scales in proportion 215 * to the amount of available physical memory. This can 216 * cause trouble on 64-bit and large memory platforms. 217 */ 218 /* desiredvnodes = maxproc + vmstats.v_page_count / 4; */ 219 desiredvnodes = 220 min(maxproc + vmstats.v_page_count /4, 221 2 * (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 222 (5 * (sizeof(struct vm_object) + sizeof(struct vnode)))); 223 224 minvnodes = desiredvnodes / 4; 225 lwkt_token_init(&mountlist_token); 226 lwkt_token_init(&mntvnode_token); 227 lwkt_token_init(&mntid_token); 228 lwkt_token_init(&spechash_token); 229 TAILQ_INIT(&vnode_free_list); 230 lwkt_token_init(&vnode_free_list_token); 231 vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5); 232 /* 233 * Initialize the filesystem syncer. 234 */ 235 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 236 &syncer_mask); 237 syncer_maxdelay = syncer_mask + 1; 238 } 239 240 /* 241 * Mark a mount point as busy. Used to synchronize access and to delay 242 * unmounting. Interlock is not released on failure. 243 */ 244 int 245 vfs_busy(struct mount *mp, int flags, lwkt_tokref_t interlkp, struct thread *td) 246 { 247 int lkflags; 248 249 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 250 if (flags & LK_NOWAIT) 251 return (ENOENT); 252 mp->mnt_kern_flag |= MNTK_MWAIT; 253 /* 254 * Since all busy locks are shared except the exclusive 255 * lock granted when unmounting, the only place that a 256 * wakeup needs to be done is at the release of the 257 * exclusive lock at the end of dounmount. 258 * 259 * note: interlkp is a serializer and thus can be safely 260 * held through any sleep 261 */ 262 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 263 return (ENOENT); 264 } 265 lkflags = LK_SHARED | LK_NOPAUSE; 266 if (interlkp) 267 lkflags |= LK_INTERLOCK; 268 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td)) 269 panic("vfs_busy: unexpected lock failure"); 270 return (0); 271 } 272 273 /* 274 * Free a busy filesystem. 275 */ 276 void 277 vfs_unbusy(struct mount *mp, struct thread *td) 278 { 279 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td); 280 } 281 282 /* 283 * Lookup a filesystem type, and if found allocate and initialize 284 * a mount structure for it. 285 * 286 * Devname is usually updated by mount(8) after booting. 287 */ 288 int 289 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 290 { 291 struct thread *td = curthread; /* XXX */ 292 struct vfsconf *vfsp; 293 struct mount *mp; 294 295 if (fstypename == NULL) 296 return (ENODEV); 297 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 298 if (!strcmp(vfsp->vfc_name, fstypename)) 299 break; 300 if (vfsp == NULL) 301 return (ENODEV); 302 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 303 bzero((char *)mp, (u_long)sizeof(struct mount)); 304 lockinit(&mp->mnt_lock, 0, "vfslock", VLKTIMEOUT, LK_NOPAUSE); 305 vfs_busy(mp, LK_NOWAIT, NULL, td); 306 TAILQ_INIT(&mp->mnt_nvnodelist); 307 TAILQ_INIT(&mp->mnt_reservedvnlist); 308 mp->mnt_nvnodelistsize = 0; 309 mp->mnt_vfc = vfsp; 310 mp->mnt_op = vfsp->vfc_vfsops; 311 mp->mnt_flag = MNT_RDONLY; 312 mp->mnt_vnodecovered = NULLVP; 313 vfsp->vfc_refcount++; 314 mp->mnt_iosize_max = DFLTPHYS; 315 mp->mnt_stat.f_type = vfsp->vfc_typenum; 316 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 317 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 318 mp->mnt_stat.f_mntonname[0] = '/'; 319 mp->mnt_stat.f_mntonname[1] = 0; 320 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 321 *mpp = mp; 322 return (0); 323 } 324 325 /* 326 * Find an appropriate filesystem to use for the root. If a filesystem 327 * has not been preselected, walk through the list of known filesystems 328 * trying those that have mountroot routines, and try them until one 329 * works or we have tried them all. 330 */ 331 #ifdef notdef /* XXX JH */ 332 int 333 lite2_vfs_mountroot() 334 { 335 struct vfsconf *vfsp; 336 extern int (*lite2_mountroot) (void); 337 int error; 338 339 if (lite2_mountroot != NULL) 340 return ((*lite2_mountroot)()); 341 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 342 if (vfsp->vfc_mountroot == NULL) 343 continue; 344 if ((error = (*vfsp->vfc_mountroot)()) == 0) 345 return (0); 346 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error); 347 } 348 return (ENODEV); 349 } 350 #endif 351 352 /* 353 * Lookup a mount point by filesystem identifier. 354 */ 355 struct mount * 356 vfs_getvfs(fsid) 357 fsid_t *fsid; 358 { 359 struct mount *mp; 360 lwkt_tokref ilock; 361 362 lwkt_gettoken(&ilock, &mountlist_token); 363 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 364 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 365 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 366 break; 367 } 368 } 369 lwkt_reltoken(&ilock); 370 return (mp); 371 } 372 373 /* 374 * Get a new unique fsid. Try to make its val[0] unique, since this value 375 * will be used to create fake device numbers for stat(). Also try (but 376 * not so hard) make its val[0] unique mod 2^16, since some emulators only 377 * support 16-bit device numbers. We end up with unique val[0]'s for the 378 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 379 * 380 * Keep in mind that several mounts may be running in parallel. Starting 381 * the search one past where the previous search terminated is both a 382 * micro-optimization and a defense against returning the same fsid to 383 * different mounts. 384 */ 385 void 386 vfs_getnewfsid(mp) 387 struct mount *mp; 388 { 389 static u_int16_t mntid_base; 390 lwkt_tokref ilock; 391 fsid_t tfsid; 392 int mtype; 393 394 lwkt_gettoken(&ilock, &mntid_token); 395 mtype = mp->mnt_vfc->vfc_typenum; 396 tfsid.val[1] = mtype; 397 mtype = (mtype & 0xFF) << 24; 398 for (;;) { 399 tfsid.val[0] = makeudev(255, 400 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 401 mntid_base++; 402 if (vfs_getvfs(&tfsid) == NULL) 403 break; 404 } 405 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 406 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 407 lwkt_reltoken(&ilock); 408 } 409 410 /* 411 * Knob to control the precision of file timestamps: 412 * 413 * 0 = seconds only; nanoseconds zeroed. 414 * 1 = seconds and nanoseconds, accurate within 1/HZ. 415 * 2 = seconds and nanoseconds, truncated to microseconds. 416 * >=3 = seconds and nanoseconds, maximum precision. 417 */ 418 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 419 420 static int timestamp_precision = TSP_SEC; 421 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 422 ×tamp_precision, 0, ""); 423 424 /* 425 * Get a current timestamp. 426 */ 427 void 428 vfs_timestamp(tsp) 429 struct timespec *tsp; 430 { 431 struct timeval tv; 432 433 switch (timestamp_precision) { 434 case TSP_SEC: 435 tsp->tv_sec = time_second; 436 tsp->tv_nsec = 0; 437 break; 438 case TSP_HZ: 439 getnanotime(tsp); 440 break; 441 case TSP_USEC: 442 microtime(&tv); 443 TIMEVAL_TO_TIMESPEC(&tv, tsp); 444 break; 445 case TSP_NSEC: 446 default: 447 nanotime(tsp); 448 break; 449 } 450 } 451 452 /* 453 * Set vnode attributes to VNOVAL 454 */ 455 void 456 vattr_null(vap) 457 struct vattr *vap; 458 { 459 460 vap->va_type = VNON; 461 vap->va_size = VNOVAL; 462 vap->va_bytes = VNOVAL; 463 vap->va_mode = VNOVAL; 464 vap->va_nlink = VNOVAL; 465 vap->va_uid = VNOVAL; 466 vap->va_gid = VNOVAL; 467 vap->va_fsid = VNOVAL; 468 vap->va_fileid = VNOVAL; 469 vap->va_blocksize = VNOVAL; 470 vap->va_rdev = VNOVAL; 471 vap->va_atime.tv_sec = VNOVAL; 472 vap->va_atime.tv_nsec = VNOVAL; 473 vap->va_mtime.tv_sec = VNOVAL; 474 vap->va_mtime.tv_nsec = VNOVAL; 475 vap->va_ctime.tv_sec = VNOVAL; 476 vap->va_ctime.tv_nsec = VNOVAL; 477 vap->va_flags = VNOVAL; 478 vap->va_gen = VNOVAL; 479 vap->va_vaflags = 0; 480 } 481 482 /* 483 * This routine is called when we have too many vnodes. It attempts 484 * to free <count> vnodes and will potentially free vnodes that still 485 * have VM backing store (VM backing store is typically the cause 486 * of a vnode blowout so we want to do this). Therefore, this operation 487 * is not considered cheap. 488 * 489 * A number of conditions may prevent a vnode from being reclaimed. 490 * the buffer cache may have references on the vnode, a directory 491 * vnode may still have references due to the namei cache representing 492 * underlying files, or the vnode may be in active use. It is not 493 * desireable to reuse such vnodes. These conditions may cause the 494 * number of vnodes to reach some minimum value regardless of what 495 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 496 */ 497 static int 498 vlrureclaim(struct mount *mp) 499 { 500 struct vnode *vp; 501 lwkt_tokref ilock; 502 lwkt_tokref vlock; 503 int done; 504 int trigger; 505 int usevnodes; 506 int count; 507 508 /* 509 * Calculate the trigger point, don't allow user 510 * screwups to blow us up. This prevents us from 511 * recycling vnodes with lots of resident pages. We 512 * aren't trying to free memory, we are trying to 513 * free vnodes. 514 */ 515 usevnodes = desiredvnodes; 516 if (usevnodes <= 0) 517 usevnodes = 1; 518 trigger = vmstats.v_page_count * 2 / usevnodes; 519 520 done = 0; 521 lwkt_gettoken(&ilock, &mntvnode_token); 522 count = mp->mnt_nvnodelistsize / 10 + 1; 523 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) { 524 /* 525 * __VNODESCAN__ 526 * 527 * The VP will stick around while we hold mntvnode_token, 528 * at least until we block, so we can safely do an initial 529 * check. But we have to check again after obtaining 530 * the vnode interlock. vp->v_interlock points to stable 531 * storage so it's ok if the vp gets ripped out from 532 * under us while we are blocked. 533 */ 534 if (vp->v_type == VNON || 535 vp->v_type == VBAD || 536 !VMIGHTFREE(vp) || /* critical path opt */ 537 (vp->v_object && 538 vp->v_object->resident_page_count >= trigger) 539 ) { 540 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 541 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,vp, v_nmntvnodes); 542 --count; 543 continue; 544 } 545 546 /* 547 * Get the interlock, delay moving the node to the tail so 548 * we don't race against new additions to the mountlist. 549 */ 550 lwkt_gettoken(&vlock, vp->v_interlock); 551 if (TAILQ_FIRST(&mp->mnt_nvnodelist) != vp) { 552 lwkt_reltoken(&vlock); 553 continue; 554 } 555 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 556 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,vp, v_nmntvnodes); 557 558 /* 559 * Must check again 560 */ 561 if (vp->v_type == VNON || 562 vp->v_type == VBAD || 563 !VMIGHTFREE(vp) || /* critical path opt */ 564 (vp->v_object && 565 vp->v_object->resident_page_count >= trigger) 566 ) { 567 lwkt_reltoken(&vlock); 568 --count; 569 continue; 570 } 571 vgonel(vp, &vlock, curthread); 572 ++done; 573 --count; 574 } 575 lwkt_reltoken(&ilock); 576 return done; 577 } 578 579 /* 580 * Attempt to recycle vnodes in a context that is always safe to block. 581 * Calling vlrurecycle() from the bowels of file system code has some 582 * interesting deadlock problems. 583 */ 584 static struct thread *vnlruthread; 585 static int vnlruproc_sig; 586 587 static void 588 vnlru_proc(void) 589 { 590 struct mount *mp, *nmp; 591 lwkt_tokref ilock; 592 int s; 593 int done; 594 struct thread *td = curthread; 595 596 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 597 SHUTDOWN_PRI_FIRST); 598 599 s = splbio(); 600 for (;;) { 601 kproc_suspend_loop(); 602 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 603 vnlruproc_sig = 0; 604 wakeup(&vnlruproc_sig); 605 tsleep(td, 0, "vlruwt", hz); 606 continue; 607 } 608 done = 0; 609 lwkt_gettoken(&ilock, &mountlist_token); 610 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 611 if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) { 612 nmp = TAILQ_NEXT(mp, mnt_list); 613 continue; 614 } 615 done += vlrureclaim(mp); 616 lwkt_gettokref(&ilock); 617 nmp = TAILQ_NEXT(mp, mnt_list); 618 vfs_unbusy(mp, td); 619 } 620 lwkt_reltoken(&ilock); 621 if (done == 0) { 622 vnlru_nowhere++; 623 tsleep(td, 0, "vlrup", hz * 3); 624 } 625 } 626 splx(s); 627 } 628 629 static struct kproc_desc vnlru_kp = { 630 "vnlru", 631 vnlru_proc, 632 &vnlruthread 633 }; 634 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 635 636 /* 637 * Routines having to do with the management of the vnode table. 638 */ 639 extern vop_t **dead_vnodeop_p; 640 641 /* 642 * Return the next vnode from the free list. 643 */ 644 int 645 getnewvnode(tag, mp, vops, vpp) 646 enum vtagtype tag; 647 struct mount *mp; 648 vop_t **vops; 649 struct vnode **vpp; 650 { 651 int s; 652 struct thread *td = curthread; /* XXX */ 653 struct vnode *vp = NULL; 654 struct vnode *xvp; 655 vm_object_t object; 656 lwkt_tokref ilock; 657 lwkt_tokref vlock; 658 659 s = splbio(); 660 661 /* 662 * Try to reuse vnodes if we hit the max. This situation only 663 * occurs in certain large-memory (2G+) situations. We cannot 664 * attempt to directly reclaim vnodes due to nasty recursion 665 * problems. 666 */ 667 while (numvnodes - freevnodes > desiredvnodes) { 668 if (vnlruproc_sig == 0) { 669 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 670 wakeup(vnlruthread); 671 } 672 tsleep(&vnlruproc_sig, 0, "vlruwk", hz); 673 } 674 675 676 /* 677 * Attempt to reuse a vnode already on the free list, allocating 678 * a new vnode if we can't find one or if we have not reached a 679 * good minimum for good LRU performance. 680 */ 681 lwkt_gettoken(&ilock, &vnode_free_list_token); 682 if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) { 683 int count; 684 685 for (count = 0; count < freevnodes; count++) { 686 /* 687 * __VNODESCAN__ 688 * 689 * Pull the next vnode off the free list and do some 690 * sanity checks. Note that regardless of how we 691 * block, if freevnodes is non-zero there had better 692 * be something on the list. 693 */ 694 vp = TAILQ_FIRST(&vnode_free_list); 695 if (vp == NULL) 696 panic("getnewvnode: free vnode isn't"); 697 698 /* 699 * Move the vnode to the end of the list so other 700 * processes do not double-block trying to recycle 701 * the same vnode (as an optimization), then get 702 * the interlock. 703 */ 704 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 705 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 706 707 /* 708 * Skip vnodes that are in the process of being 709 * held or referenced. Since the act of adding or 710 * removing a vnode on the freelist requires a token 711 * and may block, the ref count may be adjusted 712 * prior to its addition or removal. 713 */ 714 if (VSHOULDBUSY(vp)) { 715 vp = NULL; 716 continue; 717 } 718 719 720 /* 721 * Obtain the vnode interlock and check that the 722 * vnode is still on the free list. 723 * 724 * This normally devolves into a degenerate case so 725 * it is optimal. Loop up if it isn't. Note that 726 * the vnode could be in the middle of being moved 727 * off the free list (the VSHOULDBUSY() check) and 728 * must be skipped if so. 729 */ 730 lwkt_gettoken(&vlock, vp->v_interlock); 731 TAILQ_FOREACH_REVERSE(xvp, &vnode_free_list, 732 freelst, v_freelist) { 733 if (vp == xvp) 734 break; 735 } 736 if (vp != xvp || VSHOULDBUSY(vp)) { 737 vp = NULL; 738 continue; 739 } 740 741 /* 742 * We now safely own the vnode. If the vnode has 743 * an object do not recycle it if its VM object 744 * has resident pages or references. 745 */ 746 if ((VOP_GETVOBJECT(vp, &object) == 0 && 747 (object->resident_page_count || object->ref_count)) 748 ) { 749 lwkt_reltoken(&vlock); 750 vp = NULL; 751 continue; 752 } 753 754 /* 755 * We can almost reuse this vnode. But we don't want 756 * to recycle it if the vnode has children in the 757 * namecache because that breaks the namecache's 758 * path element chain. (YYY use nc_refs for the 759 * check?) 760 */ 761 KKASSERT(vp->v_flag & VFREE); 762 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 763 764 if (TAILQ_FIRST(&vp->v_namecache) == NULL || 765 cache_leaf_test(vp) >= 0) { 766 /* ok, we can reuse this vnode */ 767 break; 768 } 769 lwkt_reltoken(&vlock); 770 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 771 vp = NULL; 772 } 773 } 774 775 /* 776 * If vp is non-NULL we hold it's interlock. 777 */ 778 if (vp) { 779 vp->v_flag |= VDOOMED; 780 vp->v_flag &= ~VFREE; 781 freevnodes--; 782 lwkt_reltoken(&ilock); 783 cache_purge(vp); /* YYY may block */ 784 vp->v_lease = NULL; 785 if (vp->v_type != VBAD) { 786 vgonel(vp, &vlock, td); 787 } else { 788 lwkt_reltoken(&vlock); 789 } 790 791 #ifdef INVARIANTS 792 { 793 int s; 794 795 if (vp->v_data) 796 panic("cleaned vnode isn't"); 797 s = splbio(); 798 if (vp->v_numoutput) 799 panic("Clean vnode has pending I/O's"); 800 splx(s); 801 } 802 #endif 803 vp->v_flag = 0; 804 vp->v_lastw = 0; 805 vp->v_lasta = 0; 806 vp->v_cstart = 0; 807 vp->v_clen = 0; 808 vp->v_socket = 0; 809 vp->v_writecount = 0; /* XXX */ 810 } else { 811 lwkt_reltoken(&ilock); 812 vp = zalloc(vnode_zone); 813 bzero(vp, sizeof(*vp)); 814 vp->v_interlock = lwkt_token_pool_get(vp); 815 lwkt_token_init(&vp->v_pollinfo.vpi_token); 816 vp->v_dd = vp; 817 cache_purge(vp); 818 TAILQ_INIT(&vp->v_namecache); 819 numvnodes++; 820 } 821 822 TAILQ_INIT(&vp->v_cleanblkhd); 823 TAILQ_INIT(&vp->v_dirtyblkhd); 824 vp->v_type = VNON; 825 vp->v_tag = tag; 826 vp->v_op = vops; 827 insmntque(vp, mp); 828 *vpp = vp; 829 vp->v_usecount = 1; 830 vp->v_data = 0; 831 splx(s); 832 833 vfs_object_create(vp, td); 834 return (0); 835 } 836 837 /* 838 * Move a vnode from one mount queue to another. 839 */ 840 static void 841 insmntque(vp, mp) 842 struct vnode *vp; 843 struct mount *mp; 844 { 845 lwkt_tokref ilock; 846 847 lwkt_gettoken(&ilock, &mntvnode_token); 848 /* 849 * Delete from old mount point vnode list, if on one. 850 */ 851 if (vp->v_mount != NULL) { 852 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0, 853 ("bad mount point vnode list size")); 854 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 855 vp->v_mount->mnt_nvnodelistsize--; 856 } 857 /* 858 * Insert into list of vnodes for the new mount point, if available. 859 */ 860 if ((vp->v_mount = mp) == NULL) { 861 lwkt_reltoken(&ilock); 862 return; 863 } 864 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 865 mp->mnt_nvnodelistsize++; 866 lwkt_reltoken(&ilock); 867 } 868 869 /* 870 * Update outstanding I/O count and do wakeup if requested. 871 */ 872 void 873 vwakeup(bp) 874 struct buf *bp; 875 { 876 struct vnode *vp; 877 878 bp->b_flags &= ~B_WRITEINPROG; 879 if ((vp = bp->b_vp)) { 880 vp->v_numoutput--; 881 if (vp->v_numoutput < 0) 882 panic("vwakeup: neg numoutput"); 883 if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 884 vp->v_flag &= ~VBWAIT; 885 wakeup((caddr_t) &vp->v_numoutput); 886 } 887 } 888 } 889 890 /* 891 * Flush out and invalidate all buffers associated with a vnode. 892 * Called with the underlying object locked. 893 */ 894 int 895 vinvalbuf(struct vnode *vp, int flags, struct thread *td, 896 int slpflag, int slptimeo) 897 { 898 struct buf *bp; 899 struct buf *nbp, *blist; 900 int s, error; 901 vm_object_t object; 902 lwkt_tokref vlock; 903 904 if (flags & V_SAVE) { 905 s = splbio(); 906 while (vp->v_numoutput) { 907 vp->v_flag |= VBWAIT; 908 error = tsleep((caddr_t)&vp->v_numoutput, 909 slpflag, "vinvlbuf", slptimeo); 910 if (error) { 911 splx(s); 912 return (error); 913 } 914 } 915 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 916 splx(s); 917 if ((error = VOP_FSYNC(vp, MNT_WAIT, td)) != 0) 918 return (error); 919 s = splbio(); 920 if (vp->v_numoutput > 0 || 921 !TAILQ_EMPTY(&vp->v_dirtyblkhd)) 922 panic("vinvalbuf: dirty bufs"); 923 } 924 splx(s); 925 } 926 s = splbio(); 927 for (;;) { 928 blist = TAILQ_FIRST(&vp->v_cleanblkhd); 929 if (!blist) 930 blist = TAILQ_FIRST(&vp->v_dirtyblkhd); 931 if (!blist) 932 break; 933 934 for (bp = blist; bp; bp = nbp) { 935 nbp = TAILQ_NEXT(bp, b_vnbufs); 936 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 937 error = BUF_TIMELOCK(bp, 938 LK_EXCLUSIVE | LK_SLEEPFAIL, 939 "vinvalbuf", slpflag, slptimeo); 940 if (error == ENOLCK) 941 break; 942 splx(s); 943 return (error); 944 } 945 /* 946 * XXX Since there are no node locks for NFS, I 947 * believe there is a slight chance that a delayed 948 * write will occur while sleeping just above, so 949 * check for it. Note that vfs_bio_awrite expects 950 * buffers to reside on a queue, while VOP_BWRITE and 951 * brelse do not. 952 */ 953 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 954 (flags & V_SAVE)) { 955 956 if (bp->b_vp == vp) { 957 if (bp->b_flags & B_CLUSTEROK) { 958 BUF_UNLOCK(bp); 959 vfs_bio_awrite(bp); 960 } else { 961 bremfree(bp); 962 bp->b_flags |= B_ASYNC; 963 VOP_BWRITE(bp->b_vp, bp); 964 } 965 } else { 966 bremfree(bp); 967 (void) VOP_BWRITE(bp->b_vp, bp); 968 } 969 break; 970 } 971 bremfree(bp); 972 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 973 bp->b_flags &= ~B_ASYNC; 974 brelse(bp); 975 } 976 } 977 978 /* 979 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 980 * have write I/O in-progress but if there is a VM object then the 981 * VM object can also have read-I/O in-progress. 982 */ 983 do { 984 while (vp->v_numoutput > 0) { 985 vp->v_flag |= VBWAIT; 986 tsleep(&vp->v_numoutput, 0, "vnvlbv", 0); 987 } 988 if (VOP_GETVOBJECT(vp, &object) == 0) { 989 while (object->paging_in_progress) 990 vm_object_pip_sleep(object, "vnvlbx"); 991 } 992 } while (vp->v_numoutput > 0); 993 994 splx(s); 995 996 /* 997 * Destroy the copy in the VM cache, too. 998 */ 999 lwkt_gettoken(&vlock, vp->v_interlock); 1000 if (VOP_GETVOBJECT(vp, &object) == 0) { 1001 vm_object_page_remove(object, 0, 0, 1002 (flags & V_SAVE) ? TRUE : FALSE); 1003 } 1004 lwkt_reltoken(&vlock); 1005 1006 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd)) 1007 panic("vinvalbuf: flush failed"); 1008 return (0); 1009 } 1010 1011 /* 1012 * Truncate a file's buffer and pages to a specified length. This 1013 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1014 * sync activity. 1015 */ 1016 int 1017 vtruncbuf(struct vnode *vp, struct thread *td, off_t length, int blksize) 1018 { 1019 struct buf *bp; 1020 struct buf *nbp; 1021 int s, anyfreed; 1022 int trunclbn; 1023 1024 /* 1025 * Round up to the *next* lbn. 1026 */ 1027 trunclbn = (length + blksize - 1) / blksize; 1028 1029 s = splbio(); 1030 restart: 1031 anyfreed = 1; 1032 for (;anyfreed;) { 1033 anyfreed = 0; 1034 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 1035 nbp = TAILQ_NEXT(bp, b_vnbufs); 1036 if (bp->b_lblkno >= trunclbn) { 1037 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1038 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1039 goto restart; 1040 } else { 1041 bremfree(bp); 1042 bp->b_flags |= (B_INVAL | B_RELBUF); 1043 bp->b_flags &= ~B_ASYNC; 1044 brelse(bp); 1045 anyfreed = 1; 1046 } 1047 if (nbp && 1048 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1049 (nbp->b_vp != vp) || 1050 (nbp->b_flags & B_DELWRI))) { 1051 goto restart; 1052 } 1053 } 1054 } 1055 1056 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1057 nbp = TAILQ_NEXT(bp, b_vnbufs); 1058 if (bp->b_lblkno >= trunclbn) { 1059 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1060 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1061 goto restart; 1062 } else { 1063 bremfree(bp); 1064 bp->b_flags |= (B_INVAL | B_RELBUF); 1065 bp->b_flags &= ~B_ASYNC; 1066 brelse(bp); 1067 anyfreed = 1; 1068 } 1069 if (nbp && 1070 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1071 (nbp->b_vp != vp) || 1072 (nbp->b_flags & B_DELWRI) == 0)) { 1073 goto restart; 1074 } 1075 } 1076 } 1077 } 1078 1079 if (length > 0) { 1080 restartsync: 1081 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1082 nbp = TAILQ_NEXT(bp, b_vnbufs); 1083 if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) { 1084 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1085 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1086 goto restart; 1087 } else { 1088 bremfree(bp); 1089 if (bp->b_vp == vp) { 1090 bp->b_flags |= B_ASYNC; 1091 } else { 1092 bp->b_flags &= ~B_ASYNC; 1093 } 1094 VOP_BWRITE(bp->b_vp, bp); 1095 } 1096 goto restartsync; 1097 } 1098 1099 } 1100 } 1101 1102 while (vp->v_numoutput > 0) { 1103 vp->v_flag |= VBWAIT; 1104 tsleep(&vp->v_numoutput, 0, "vbtrunc", 0); 1105 } 1106 1107 splx(s); 1108 1109 vnode_pager_setsize(vp, length); 1110 1111 return (0); 1112 } 1113 1114 /* 1115 * Associate a buffer with a vnode. 1116 */ 1117 void 1118 bgetvp(vp, bp) 1119 struct vnode *vp; 1120 struct buf *bp; 1121 { 1122 int s; 1123 1124 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 1125 1126 vhold(vp); 1127 bp->b_vp = vp; 1128 bp->b_dev = vn_todev(vp); 1129 /* 1130 * Insert onto list for new vnode. 1131 */ 1132 s = splbio(); 1133 bp->b_xflags |= BX_VNCLEAN; 1134 bp->b_xflags &= ~BX_VNDIRTY; 1135 TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs); 1136 splx(s); 1137 } 1138 1139 /* 1140 * Disassociate a buffer from a vnode. 1141 */ 1142 void 1143 brelvp(bp) 1144 struct buf *bp; 1145 { 1146 struct vnode *vp; 1147 struct buflists *listheadp; 1148 int s; 1149 1150 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1151 1152 /* 1153 * Delete from old vnode list, if on one. 1154 */ 1155 vp = bp->b_vp; 1156 s = splbio(); 1157 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 1158 if (bp->b_xflags & BX_VNDIRTY) 1159 listheadp = &vp->v_dirtyblkhd; 1160 else 1161 listheadp = &vp->v_cleanblkhd; 1162 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 1163 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1164 } 1165 if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 1166 vp->v_flag &= ~VONWORKLST; 1167 LIST_REMOVE(vp, v_synclist); 1168 } 1169 splx(s); 1170 bp->b_vp = (struct vnode *) 0; 1171 vdrop(vp); 1172 } 1173 1174 /* 1175 * The workitem queue. 1176 * 1177 * It is useful to delay writes of file data and filesystem metadata 1178 * for tens of seconds so that quickly created and deleted files need 1179 * not waste disk bandwidth being created and removed. To realize this, 1180 * we append vnodes to a "workitem" queue. When running with a soft 1181 * updates implementation, most pending metadata dependencies should 1182 * not wait for more than a few seconds. Thus, mounted on block devices 1183 * are delayed only about a half the time that file data is delayed. 1184 * Similarly, directory updates are more critical, so are only delayed 1185 * about a third the time that file data is delayed. Thus, there are 1186 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 1187 * one each second (driven off the filesystem syncer process). The 1188 * syncer_delayno variable indicates the next queue that is to be processed. 1189 * Items that need to be processed soon are placed in this queue: 1190 * 1191 * syncer_workitem_pending[syncer_delayno] 1192 * 1193 * A delay of fifteen seconds is done by placing the request fifteen 1194 * entries later in the queue: 1195 * 1196 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 1197 * 1198 */ 1199 1200 /* 1201 * Add an item to the syncer work queue. 1202 */ 1203 static void 1204 vn_syncer_add_to_worklist(struct vnode *vp, int delay) 1205 { 1206 int s, slot; 1207 1208 s = splbio(); 1209 1210 if (vp->v_flag & VONWORKLST) { 1211 LIST_REMOVE(vp, v_synclist); 1212 } 1213 1214 if (delay > syncer_maxdelay - 2) 1215 delay = syncer_maxdelay - 2; 1216 slot = (syncer_delayno + delay) & syncer_mask; 1217 1218 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 1219 vp->v_flag |= VONWORKLST; 1220 splx(s); 1221 } 1222 1223 struct thread *updatethread; 1224 static void sched_sync (void); 1225 static struct kproc_desc up_kp = { 1226 "syncer", 1227 sched_sync, 1228 &updatethread 1229 }; 1230 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 1231 1232 /* 1233 * System filesystem synchronizer daemon. 1234 */ 1235 void 1236 sched_sync(void) 1237 { 1238 struct synclist *slp; 1239 struct vnode *vp; 1240 long starttime; 1241 int s; 1242 struct thread *td = curthread; 1243 1244 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 1245 SHUTDOWN_PRI_LAST); 1246 1247 for (;;) { 1248 kproc_suspend_loop(); 1249 1250 starttime = time_second; 1251 1252 /* 1253 * Push files whose dirty time has expired. Be careful 1254 * of interrupt race on slp queue. 1255 */ 1256 s = splbio(); 1257 slp = &syncer_workitem_pending[syncer_delayno]; 1258 syncer_delayno += 1; 1259 if (syncer_delayno == syncer_maxdelay) 1260 syncer_delayno = 0; 1261 splx(s); 1262 1263 while ((vp = LIST_FIRST(slp)) != NULL) { 1264 if (VOP_ISLOCKED(vp, NULL) == 0) { 1265 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td); 1266 (void) VOP_FSYNC(vp, MNT_LAZY, td); 1267 VOP_UNLOCK(vp, NULL, 0, td); 1268 } 1269 s = splbio(); 1270 if (LIST_FIRST(slp) == vp) { 1271 /* 1272 * Note: v_tag VT_VFS vps can remain on the 1273 * worklist too with no dirty blocks, but 1274 * since sync_fsync() moves it to a different 1275 * slot we are safe. 1276 */ 1277 if (TAILQ_EMPTY(&vp->v_dirtyblkhd) && 1278 !vn_isdisk(vp, NULL)) 1279 panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag); 1280 /* 1281 * Put us back on the worklist. The worklist 1282 * routine will remove us from our current 1283 * position and then add us back in at a later 1284 * position. 1285 */ 1286 vn_syncer_add_to_worklist(vp, syncdelay); 1287 } 1288 splx(s); 1289 } 1290 1291 /* 1292 * Do soft update processing. 1293 */ 1294 if (bioops.io_sync) 1295 (*bioops.io_sync)(NULL); 1296 1297 /* 1298 * The variable rushjob allows the kernel to speed up the 1299 * processing of the filesystem syncer process. A rushjob 1300 * value of N tells the filesystem syncer to process the next 1301 * N seconds worth of work on its queue ASAP. Currently rushjob 1302 * is used by the soft update code to speed up the filesystem 1303 * syncer process when the incore state is getting so far 1304 * ahead of the disk that the kernel memory pool is being 1305 * threatened with exhaustion. 1306 */ 1307 if (rushjob > 0) { 1308 rushjob -= 1; 1309 continue; 1310 } 1311 /* 1312 * If it has taken us less than a second to process the 1313 * current work, then wait. Otherwise start right over 1314 * again. We can still lose time if any single round 1315 * takes more than two seconds, but it does not really 1316 * matter as we are just trying to generally pace the 1317 * filesystem activity. 1318 */ 1319 if (time_second == starttime) 1320 tsleep(&lbolt, 0, "syncer", 0); 1321 } 1322 } 1323 1324 /* 1325 * Request the syncer daemon to speed up its work. 1326 * We never push it to speed up more than half of its 1327 * normal turn time, otherwise it could take over the cpu. 1328 * 1329 * YYY wchan field protected by the BGL. 1330 */ 1331 int 1332 speedup_syncer() 1333 { 1334 crit_enter(); 1335 if (updatethread->td_wchan == &lbolt) { /* YYY */ 1336 unsleep(updatethread); 1337 lwkt_schedule(updatethread); 1338 } 1339 crit_exit(); 1340 if (rushjob < syncdelay / 2) { 1341 rushjob += 1; 1342 stat_rush_requests += 1; 1343 return (1); 1344 } 1345 return(0); 1346 } 1347 1348 /* 1349 * Associate a p-buffer with a vnode. 1350 * 1351 * Also sets B_PAGING flag to indicate that vnode is not fully associated 1352 * with the buffer. i.e. the bp has not been linked into the vnode or 1353 * ref-counted. 1354 */ 1355 void 1356 pbgetvp(vp, bp) 1357 struct vnode *vp; 1358 struct buf *bp; 1359 { 1360 1361 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free")); 1362 1363 bp->b_vp = vp; 1364 bp->b_flags |= B_PAGING; 1365 bp->b_dev = vn_todev(vp); 1366 } 1367 1368 /* 1369 * Disassociate a p-buffer from a vnode. 1370 */ 1371 void 1372 pbrelvp(bp) 1373 struct buf *bp; 1374 { 1375 1376 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); 1377 1378 /* XXX REMOVE ME */ 1379 if (TAILQ_NEXT(bp, b_vnbufs) != NULL) { 1380 panic( 1381 "relpbuf(): b_vp was probably reassignbuf()d %p %x", 1382 bp, 1383 (int)bp->b_flags 1384 ); 1385 } 1386 bp->b_vp = (struct vnode *) 0; 1387 bp->b_flags &= ~B_PAGING; 1388 } 1389 1390 void 1391 pbreassignbuf(bp, newvp) 1392 struct buf *bp; 1393 struct vnode *newvp; 1394 { 1395 if ((bp->b_flags & B_PAGING) == 0) { 1396 panic( 1397 "pbreassignbuf() on non phys bp %p", 1398 bp 1399 ); 1400 } 1401 bp->b_vp = newvp; 1402 } 1403 1404 /* 1405 * Reassign a buffer from one vnode to another. 1406 * Used to assign file specific control information 1407 * (indirect blocks) to the vnode to which they belong. 1408 */ 1409 void 1410 reassignbuf(bp, newvp) 1411 struct buf *bp; 1412 struct vnode *newvp; 1413 { 1414 struct buflists *listheadp; 1415 int delay; 1416 int s; 1417 1418 if (newvp == NULL) { 1419 printf("reassignbuf: NULL"); 1420 return; 1421 } 1422 ++reassignbufcalls; 1423 1424 /* 1425 * B_PAGING flagged buffers cannot be reassigned because their vp 1426 * is not fully linked in. 1427 */ 1428 if (bp->b_flags & B_PAGING) 1429 panic("cannot reassign paging buffer"); 1430 1431 s = splbio(); 1432 /* 1433 * Delete from old vnode list, if on one. 1434 */ 1435 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 1436 if (bp->b_xflags & BX_VNDIRTY) 1437 listheadp = &bp->b_vp->v_dirtyblkhd; 1438 else 1439 listheadp = &bp->b_vp->v_cleanblkhd; 1440 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 1441 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1442 if (bp->b_vp != newvp) { 1443 vdrop(bp->b_vp); 1444 bp->b_vp = NULL; /* for clarification */ 1445 } 1446 } 1447 /* 1448 * If dirty, put on list of dirty buffers; otherwise insert onto list 1449 * of clean buffers. 1450 */ 1451 if (bp->b_flags & B_DELWRI) { 1452 struct buf *tbp; 1453 1454 listheadp = &newvp->v_dirtyblkhd; 1455 if ((newvp->v_flag & VONWORKLST) == 0) { 1456 switch (newvp->v_type) { 1457 case VDIR: 1458 delay = dirdelay; 1459 break; 1460 case VCHR: 1461 case VBLK: 1462 if (newvp->v_specmountpoint != NULL) { 1463 delay = metadelay; 1464 break; 1465 } 1466 /* fall through */ 1467 default: 1468 delay = filedelay; 1469 } 1470 vn_syncer_add_to_worklist(newvp, delay); 1471 } 1472 bp->b_xflags |= BX_VNDIRTY; 1473 tbp = TAILQ_FIRST(listheadp); 1474 if (tbp == NULL || 1475 bp->b_lblkno == 0 || 1476 (bp->b_lblkno > 0 && tbp->b_lblkno < 0) || 1477 (bp->b_lblkno > 0 && bp->b_lblkno < tbp->b_lblkno)) { 1478 TAILQ_INSERT_HEAD(listheadp, bp, b_vnbufs); 1479 ++reassignbufsortgood; 1480 } else if (bp->b_lblkno < 0) { 1481 TAILQ_INSERT_TAIL(listheadp, bp, b_vnbufs); 1482 ++reassignbufsortgood; 1483 } else if (reassignbufmethod == 1) { 1484 /* 1485 * New sorting algorithm, only handle sequential case, 1486 * otherwise append to end (but before metadata) 1487 */ 1488 if ((tbp = gbincore(newvp, bp->b_lblkno - 1)) != NULL && 1489 (tbp->b_xflags & BX_VNDIRTY)) { 1490 /* 1491 * Found the best place to insert the buffer 1492 */ 1493 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1494 ++reassignbufsortgood; 1495 } else { 1496 /* 1497 * Missed, append to end, but before meta-data. 1498 * We know that the head buffer in the list is 1499 * not meta-data due to prior conditionals. 1500 * 1501 * Indirect effects: NFS second stage write 1502 * tends to wind up here, giving maximum 1503 * distance between the unstable write and the 1504 * commit rpc. 1505 */ 1506 tbp = TAILQ_LAST(listheadp, buflists); 1507 while (tbp && tbp->b_lblkno < 0) 1508 tbp = TAILQ_PREV(tbp, buflists, b_vnbufs); 1509 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1510 ++reassignbufsortbad; 1511 } 1512 } else { 1513 /* 1514 * Old sorting algorithm, scan queue and insert 1515 */ 1516 struct buf *ttbp; 1517 while ((ttbp = TAILQ_NEXT(tbp, b_vnbufs)) && 1518 (ttbp->b_lblkno < bp->b_lblkno)) { 1519 ++reassignbufloops; 1520 tbp = ttbp; 1521 } 1522 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1523 } 1524 } else { 1525 bp->b_xflags |= BX_VNCLEAN; 1526 TAILQ_INSERT_TAIL(&newvp->v_cleanblkhd, bp, b_vnbufs); 1527 if ((newvp->v_flag & VONWORKLST) && 1528 TAILQ_EMPTY(&newvp->v_dirtyblkhd)) { 1529 newvp->v_flag &= ~VONWORKLST; 1530 LIST_REMOVE(newvp, v_synclist); 1531 } 1532 } 1533 if (bp->b_vp != newvp) { 1534 bp->b_vp = newvp; 1535 vhold(bp->b_vp); 1536 } 1537 splx(s); 1538 } 1539 1540 /* 1541 * Create a vnode for a block device. 1542 * Used for mounting the root file system. 1543 */ 1544 int 1545 bdevvp(dev, vpp) 1546 dev_t dev; 1547 struct vnode **vpp; 1548 { 1549 struct vnode *vp; 1550 struct vnode *nvp; 1551 int error; 1552 1553 if (dev == NODEV) { 1554 *vpp = NULLVP; 1555 return (ENXIO); 1556 } 1557 error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp); 1558 if (error) { 1559 *vpp = NULLVP; 1560 return (error); 1561 } 1562 vp = nvp; 1563 vp->v_type = VBLK; 1564 addalias(vp, dev); 1565 *vpp = vp; 1566 return (0); 1567 } 1568 1569 /* 1570 * Add a vnode to the alias list hung off the dev_t. 1571 * 1572 * The reason for this gunk is that multiple vnodes can reference 1573 * the same physical device, so checking vp->v_usecount to see 1574 * how many users there are is inadequate; the v_usecount for 1575 * the vnodes need to be accumulated. vcount() does that. 1576 */ 1577 void 1578 addaliasu(struct vnode *nvp, udev_t nvp_rdev) 1579 { 1580 dev_t dev; 1581 1582 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1583 panic("addaliasu on non-special vnode"); 1584 dev = udev2dev(nvp_rdev, nvp->v_type == VBLK ? 1 : 0); 1585 if (dev != NODEV) { 1586 nvp->v_rdev = dev; 1587 addalias(nvp, dev); 1588 } else 1589 nvp->v_rdev = NULL; 1590 } 1591 1592 void 1593 addalias(struct vnode *nvp, dev_t dev) 1594 { 1595 lwkt_tokref ilock; 1596 1597 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1598 panic("addalias on non-special vnode"); 1599 1600 nvp->v_rdev = dev; 1601 lwkt_gettoken(&ilock, &spechash_token); 1602 SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext); 1603 lwkt_reltoken(&ilock); 1604 } 1605 1606 /* 1607 * Grab a particular vnode from the free list, increment its 1608 * reference count and lock it. The vnode lock bit is set if the 1609 * vnode is being eliminated in vgone. The process is awakened 1610 * when the transition is completed, and an error returned to 1611 * indicate that the vnode is no longer usable (possibly having 1612 * been changed to a new file system type). 1613 * 1614 * This code is very sensitive. We are depending on the vnode interlock 1615 * to be maintained through to the vn_lock() call, which means that we 1616 * cannot block which means that we cannot call vbusy() until after vn_lock(). 1617 * If the interlock is not maintained, the VXLOCK check will not properly 1618 * interlock against a vclean()'s LK_DRAIN operation on the lock. 1619 */ 1620 int 1621 vget(struct vnode *vp, lwkt_tokref_t vlock, int flags, thread_t td) 1622 { 1623 int error; 1624 lwkt_tokref vvlock; 1625 1626 /* 1627 * We need the interlock to safely modify the v_ fields. ZZZ it is 1628 * only legal to pass (1) the vnode's interlock and (2) only pass 1629 * NULL w/o LK_INTERLOCK if the vnode is *ALREADY* referenced or 1630 * held. 1631 */ 1632 if ((flags & LK_INTERLOCK) == 0) { 1633 lwkt_gettoken(&vvlock, vp->v_interlock); 1634 vlock = &vvlock; 1635 } 1636 1637 /* 1638 * If the vnode is in the process of being cleaned out for 1639 * another use, we wait for the cleaning to finish and then 1640 * return failure. Cleaning is determined by checking that 1641 * the VXLOCK flag is set. It is possible for the vnode to be 1642 * self-referenced during the cleaning operation. 1643 */ 1644 if (vp->v_flag & VXLOCK) { 1645 if (vp->v_vxthread == curthread) { 1646 #if 0 1647 /* this can now occur in normal operation */ 1648 log(LOG_INFO, "VXLOCK interlock avoided\n"); 1649 #endif 1650 } else { 1651 vp->v_flag |= VXWANT; 1652 lwkt_reltoken(vlock); 1653 tsleep((caddr_t)vp, 0, "vget", 0); 1654 return (ENOENT); 1655 } 1656 } 1657 1658 /* 1659 * Bump v_usecount to prevent the vnode from being recycled. The 1660 * usecount needs to be bumped before we successfully get our lock. 1661 */ 1662 vp->v_usecount++; 1663 if (flags & LK_TYPE_MASK) { 1664 if ((error = vn_lock(vp, vlock, flags | LK_INTERLOCK, td)) != 0) { 1665 /* 1666 * must expand vrele here because we do not want 1667 * to call VOP_INACTIVE if the reference count 1668 * drops back to zero since it was never really 1669 * active. We must remove it from the free list 1670 * before sleeping so that multiple processes do 1671 * not try to recycle it. 1672 */ 1673 lwkt_gettokref(vlock); 1674 vp->v_usecount--; 1675 vmaybefree(vp); 1676 lwkt_reltoken(vlock); 1677 } 1678 return (error); 1679 } 1680 if (VSHOULDBUSY(vp)) 1681 vbusy(vp); /* interlock must be held on call */ 1682 lwkt_reltoken(vlock); 1683 return (0); 1684 } 1685 1686 void 1687 vref(struct vnode *vp) 1688 { 1689 vp->v_usecount++; /* XXX MP */ 1690 } 1691 1692 /* 1693 * Vnode put/release. 1694 * If count drops to zero, call inactive routine and return to freelist. 1695 */ 1696 void 1697 vrele(struct vnode *vp) 1698 { 1699 struct thread *td = curthread; /* XXX */ 1700 lwkt_tokref vlock; 1701 1702 KASSERT(vp != NULL, ("vrele: null vp")); 1703 1704 lwkt_gettoken(&vlock, vp->v_interlock); 1705 1706 if (vp->v_usecount > 1) { 1707 vp->v_usecount--; 1708 lwkt_reltoken(&vlock); 1709 return; 1710 } 1711 1712 if (vp->v_usecount == 1) { 1713 vp->v_usecount--; 1714 /* 1715 * We must call VOP_INACTIVE with the node locked and the 1716 * usecount 0. If we are doing a vpu, the node is already 1717 * locked, but, in the case of vrele, we must explicitly lock 1718 * the vnode before calling VOP_INACTIVE. 1719 */ 1720 1721 if (vn_lock(vp, NULL, LK_EXCLUSIVE, td) == 0) 1722 VOP_INACTIVE(vp, td); 1723 vmaybefree(vp); 1724 lwkt_reltoken(&vlock); 1725 } else { 1726 #ifdef DIAGNOSTIC 1727 vprint("vrele: negative ref count", vp); 1728 #endif 1729 lwkt_reltoken(&vlock); 1730 panic("vrele: negative ref cnt"); 1731 } 1732 } 1733 1734 void 1735 vput(struct vnode *vp) 1736 { 1737 struct thread *td = curthread; /* XXX */ 1738 lwkt_tokref vlock; 1739 1740 KASSERT(vp != NULL, ("vput: null vp")); 1741 1742 lwkt_gettoken(&vlock, vp->v_interlock); 1743 1744 if (vp->v_usecount > 1) { 1745 vp->v_usecount--; 1746 VOP_UNLOCK(vp, &vlock, LK_INTERLOCK, td); 1747 return; 1748 } 1749 1750 if (vp->v_usecount == 1) { 1751 vp->v_usecount--; 1752 /* 1753 * We must call VOP_INACTIVE with the node locked. 1754 * If we are doing a vpu, the node is already locked, 1755 * so we just need to release the vnode mutex. 1756 */ 1757 VOP_INACTIVE(vp, td); 1758 vmaybefree(vp); 1759 lwkt_reltoken(&vlock); 1760 } else { 1761 #ifdef DIAGNOSTIC 1762 vprint("vput: negative ref count", vp); 1763 #endif 1764 lwkt_reltoken(&vlock); 1765 panic("vput: negative ref cnt"); 1766 } 1767 } 1768 1769 /* 1770 * Somebody doesn't want the vnode recycled. ZZZ vnode interlock should 1771 * be held but isn't. 1772 */ 1773 void 1774 vhold(vp) 1775 struct vnode *vp; 1776 { 1777 int s; 1778 1779 s = splbio(); 1780 vp->v_holdcnt++; 1781 if (VSHOULDBUSY(vp)) 1782 vbusy(vp); /* interlock must be held on call */ 1783 splx(s); 1784 } 1785 1786 /* 1787 * One less who cares about this vnode. 1788 */ 1789 void 1790 vdrop(vp) 1791 struct vnode *vp; 1792 { 1793 lwkt_tokref vlock; 1794 1795 lwkt_gettoken(&vlock, vp->v_interlock); 1796 if (vp->v_holdcnt <= 0) 1797 panic("vdrop: holdcnt"); 1798 vp->v_holdcnt--; 1799 vmaybefree(vp); 1800 lwkt_reltoken(&vlock); 1801 } 1802 1803 int 1804 vmntvnodescan( 1805 struct mount *mp, 1806 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 1807 int (*slowfunc)(struct mount *mp, struct vnode *vp, lwkt_tokref_t vlock, void *data), 1808 void *data 1809 ) { 1810 lwkt_tokref ilock; 1811 lwkt_tokref vlock; 1812 struct vnode *pvp; 1813 struct vnode *vp; 1814 int r = 0; 1815 1816 /* 1817 * Scan the vnodes on the mount's vnode list. Use a placemarker 1818 */ 1819 pvp = zalloc(vnode_zone); 1820 pvp->v_flag |= VPLACEMARKER; 1821 1822 lwkt_gettoken(&ilock, &mntvnode_token); 1823 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, pvp, v_nmntvnodes); 1824 1825 while ((vp = TAILQ_NEXT(pvp, v_nmntvnodes)) != NULL) { 1826 /* 1827 * Move the placemarker and skip other placemarkers we 1828 * encounter. The nothing can get in our way so the 1829 * mount point on the vp must be valid. 1830 */ 1831 TAILQ_REMOVE(&mp->mnt_nvnodelist, pvp, v_nmntvnodes); 1832 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, pvp, v_nmntvnodes); 1833 if (vp->v_flag & VPLACEMARKER) 1834 continue; 1835 KKASSERT(vp->v_mount == mp); 1836 1837 /* 1838 * Quick test 1839 */ 1840 if (fastfunc) { 1841 if ((r = fastfunc(mp, vp, data)) < 0) 1842 continue; 1843 if (r) 1844 break; 1845 } 1846 1847 /* 1848 * Get the vnodes interlock and make sure it is still on the 1849 * mount list. Skip it if it has moved (we may encounter it 1850 * later). Then do the with-interlock test. The callback 1851 * is responsible for releasing the vnode interlock. 1852 * 1853 * The interlock is type-stable. 1854 */ 1855 if (slowfunc) { 1856 lwkt_gettoken(&vlock, vp->v_interlock); 1857 if (vp != TAILQ_PREV(pvp, vnodelst, v_nmntvnodes)) { 1858 printf("vmntvnodescan (debug info only): f=%p vp=%p vnode ripped out from under us\n", slowfunc, vp); 1859 lwkt_reltoken(&vlock); 1860 continue; 1861 } 1862 if ((r = slowfunc(mp, vp, &vlock, data)) != 0) { 1863 KKASSERT(lwkt_havetokref(&vlock) == 0); 1864 break; 1865 } 1866 KKASSERT(lwkt_havetokref(&vlock) == 0); 1867 } 1868 } 1869 TAILQ_REMOVE(&mp->mnt_nvnodelist, pvp, v_nmntvnodes); 1870 zfree(vnode_zone, pvp); 1871 lwkt_reltoken(&ilock); 1872 return(r); 1873 } 1874 1875 /* 1876 * Remove any vnodes in the vnode table belonging to mount point mp. 1877 * 1878 * If FORCECLOSE is not specified, there should not be any active ones, 1879 * return error if any are found (nb: this is a user error, not a 1880 * system error). If FORCECLOSE is specified, detach any active vnodes 1881 * that are found. 1882 * 1883 * If WRITECLOSE is set, only flush out regular file vnodes open for 1884 * writing. 1885 * 1886 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1887 * 1888 * `rootrefs' specifies the base reference count for the root vnode 1889 * of this filesystem. The root vnode is considered busy if its 1890 * v_usecount exceeds this value. On a successful return, vflush() 1891 * will call vrele() on the root vnode exactly rootrefs times. 1892 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1893 * be zero. 1894 */ 1895 #ifdef DIAGNOSTIC 1896 static int busyprt = 0; /* print out busy vnodes */ 1897 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1898 #endif 1899 1900 static int vflush_scan(struct mount *mp, struct vnode *vp, lwkt_tokref_t vlock, void *data); 1901 1902 struct vflush_info { 1903 int flags; 1904 int busy; 1905 thread_t td; 1906 }; 1907 1908 int 1909 vflush(mp, rootrefs, flags) 1910 struct mount *mp; 1911 int rootrefs; 1912 int flags; 1913 { 1914 struct thread *td = curthread; /* XXX */ 1915 struct vnode *rootvp = NULL; 1916 int error; 1917 lwkt_tokref vlock; 1918 struct vflush_info vflush_info; 1919 1920 if (rootrefs > 0) { 1921 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1922 ("vflush: bad args")); 1923 /* 1924 * Get the filesystem root vnode. We can vput() it 1925 * immediately, since with rootrefs > 0, it won't go away. 1926 */ 1927 if ((error = VFS_ROOT(mp, &rootvp)) != 0) 1928 return (error); 1929 vput(rootvp); 1930 } 1931 1932 vflush_info.busy = 0; 1933 vflush_info.flags = flags; 1934 vflush_info.td = td; 1935 vmntvnodescan(mp, NULL, vflush_scan, &vflush_info); 1936 1937 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1938 /* 1939 * If just the root vnode is busy, and if its refcount 1940 * is equal to `rootrefs', then go ahead and kill it. 1941 */ 1942 lwkt_gettoken(&vlock, rootvp->v_interlock); 1943 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1944 KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs")); 1945 if (vflush_info.busy == 1 && rootvp->v_usecount == rootrefs) { 1946 vgonel(rootvp, &vlock, td); 1947 vflush_info.busy = 0; 1948 } else { 1949 lwkt_reltoken(&vlock); 1950 } 1951 } 1952 if (vflush_info.busy) 1953 return (EBUSY); 1954 for (; rootrefs > 0; rootrefs--) 1955 vrele(rootvp); 1956 return (0); 1957 } 1958 1959 /* 1960 * The scan callback is made with an interlocked vnode. 1961 */ 1962 static int 1963 vflush_scan(struct mount *mp, struct vnode *vp, lwkt_tokref_t vlock, void *data) 1964 { 1965 struct vflush_info *info = data; 1966 struct vattr vattr; 1967 1968 /* 1969 * Skip over a vnodes marked VSYSTEM. 1970 */ 1971 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1972 lwkt_reltoken(vlock); 1973 return(0); 1974 } 1975 1976 /* 1977 * If WRITECLOSE is set, flush out unlinked but still open 1978 * files (even if open only for reading) and regular file 1979 * vnodes open for writing. 1980 */ 1981 if ((info->flags & WRITECLOSE) && 1982 (vp->v_type == VNON || 1983 (VOP_GETATTR(vp, &vattr, info->td) == 0 && 1984 vattr.va_nlink > 0)) && 1985 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1986 lwkt_reltoken(vlock); 1987 return(0); 1988 } 1989 1990 /* 1991 * With v_usecount == 0, all we need to do is clear out the 1992 * vnode data structures and we are done. 1993 */ 1994 if (vp->v_usecount == 0) { 1995 vgonel(vp, vlock, info->td); 1996 return(0); 1997 } 1998 1999 /* 2000 * If FORCECLOSE is set, forcibly close the vnode. For block 2001 * or character devices, revert to an anonymous device. For 2002 * all other files, just kill them. 2003 */ 2004 if (info->flags & FORCECLOSE) { 2005 if (vp->v_type != VBLK && vp->v_type != VCHR) { 2006 vgonel(vp, vlock, info->td); 2007 } else { 2008 vclean(vp, vlock, 0, info->td); 2009 vp->v_op = spec_vnodeop_p; 2010 insmntque(vp, (struct mount *) 0); 2011 } 2012 return(0); 2013 } 2014 #ifdef DIAGNOSTIC 2015 if (busyprt) 2016 vprint("vflush: busy vnode", vp); 2017 #endif 2018 lwkt_reltoken(vlock); 2019 ++info->busy; 2020 return(0); 2021 } 2022 2023 /* 2024 * Disassociate the underlying file system from a vnode. 2025 */ 2026 static void 2027 vclean(struct vnode *vp, lwkt_tokref_t vlock, int flags, struct thread *td) 2028 { 2029 int active; 2030 2031 /* 2032 * Check to see if the vnode is in use. If so we have to reference it 2033 * before we clean it out so that its count cannot fall to zero and 2034 * generate a race against ourselves to recycle it. 2035 */ 2036 if ((active = vp->v_usecount)) 2037 vp->v_usecount++; 2038 2039 /* 2040 * Prevent the vnode from being recycled or brought into use while we 2041 * clean it out. 2042 */ 2043 if (vp->v_flag & VXLOCK) 2044 panic("vclean: deadlock"); 2045 vp->v_flag |= VXLOCK; 2046 vp->v_vxthread = curthread; 2047 2048 /* 2049 * Even if the count is zero, the VOP_INACTIVE routine may still 2050 * have the object locked while it cleans it out. The VOP_LOCK 2051 * ensures that the VOP_INACTIVE routine is done with its work. 2052 * For active vnodes, it ensures that no other activity can 2053 * occur while the underlying object is being cleaned out. 2054 * 2055 * NOTE: we continue to hold the vnode interlock through to the 2056 * end of vclean(). 2057 */ 2058 VOP_LOCK(vp, NULL, LK_DRAIN, td); 2059 2060 /* 2061 * Clean out any buffers associated with the vnode. 2062 */ 2063 vinvalbuf(vp, V_SAVE, td, 0, 0); 2064 VOP_DESTROYVOBJECT(vp); 2065 2066 /* 2067 * If purging an active vnode, it must be closed and 2068 * deactivated before being reclaimed. Note that the 2069 * VOP_INACTIVE will unlock the vnode. 2070 */ 2071 if (active) { 2072 if (flags & DOCLOSE) 2073 VOP_CLOSE(vp, FNONBLOCK, td); 2074 VOP_INACTIVE(vp, td); 2075 } else { 2076 /* 2077 * Any other processes trying to obtain this lock must first 2078 * wait for VXLOCK to clear, then call the new lock operation. 2079 */ 2080 VOP_UNLOCK(vp, NULL, 0, td); 2081 } 2082 /* 2083 * Reclaim the vnode. 2084 */ 2085 if (VOP_RECLAIM(vp, td)) 2086 panic("vclean: cannot reclaim"); 2087 2088 if (active) { 2089 /* 2090 * Inline copy of vrele() since VOP_INACTIVE 2091 * has already been called. 2092 */ 2093 if (--vp->v_usecount <= 0) { 2094 #ifdef DIAGNOSTIC 2095 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 2096 vprint("vclean: bad ref count", vp); 2097 panic("vclean: ref cnt"); 2098 } 2099 #endif 2100 vfree(vp); 2101 } 2102 } 2103 2104 cache_purge(vp); 2105 vp->v_vnlock = NULL; 2106 vmaybefree(vp); 2107 2108 /* 2109 * Done with purge, notify sleepers of the grim news. 2110 */ 2111 vp->v_op = dead_vnodeop_p; 2112 vn_pollgone(vp); 2113 vp->v_tag = VT_NON; 2114 vp->v_flag &= ~VXLOCK; 2115 vp->v_vxthread = NULL; 2116 if (vp->v_flag & VXWANT) { 2117 vp->v_flag &= ~VXWANT; 2118 wakeup((caddr_t) vp); 2119 } 2120 lwkt_reltoken(vlock); 2121 } 2122 2123 /* 2124 * Eliminate all activity associated with the requested vnode 2125 * and with all vnodes aliased to the requested vnode. 2126 */ 2127 int 2128 vop_revoke(ap) 2129 struct vop_revoke_args /* { 2130 struct vnode *a_vp; 2131 int a_flags; 2132 } */ *ap; 2133 { 2134 struct vnode *vp, *vq; 2135 lwkt_tokref ilock; 2136 dev_t dev; 2137 2138 KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke")); 2139 2140 vp = ap->a_vp; 2141 /* 2142 * If a vgone (or vclean) is already in progress, 2143 * wait until it is done and return. 2144 */ 2145 if (vp->v_flag & VXLOCK) { 2146 vp->v_flag |= VXWANT; 2147 /*lwkt_reltoken(vlock); ZZZ */ 2148 tsleep((caddr_t)vp, 0, "vop_revokeall", 0); 2149 return (0); 2150 } 2151 dev = vp->v_rdev; 2152 for (;;) { 2153 lwkt_gettoken(&ilock, &spechash_token); 2154 vq = SLIST_FIRST(&dev->si_hlist); 2155 lwkt_reltoken(&ilock); 2156 if (!vq) 2157 break; 2158 vgone(vq); 2159 } 2160 return (0); 2161 } 2162 2163 /* 2164 * Recycle an unused vnode to the front of the free list. 2165 * Release the passed interlock if the vnode will be recycled. 2166 */ 2167 int 2168 vrecycle(struct vnode *vp, lwkt_tokref_t inter_lkp, struct thread *td) 2169 { 2170 lwkt_tokref vlock; 2171 2172 lwkt_gettoken(&vlock, vp->v_interlock); 2173 if (vp->v_usecount == 0) { 2174 if (inter_lkp) 2175 lwkt_reltoken(inter_lkp); 2176 vgonel(vp, &vlock, td); 2177 return (1); 2178 } 2179 lwkt_reltoken(&vlock); 2180 return (0); 2181 } 2182 2183 /* 2184 * Eliminate all activity associated with a vnode 2185 * in preparation for reuse. 2186 */ 2187 void 2188 vgone(struct vnode *vp) 2189 { 2190 struct thread *td = curthread; /* XXX */ 2191 lwkt_tokref vlock; 2192 2193 lwkt_gettoken(&vlock, vp->v_interlock); 2194 vgonel(vp, &vlock, td); 2195 } 2196 2197 /* 2198 * vgone, with the vp interlock held. 2199 */ 2200 void 2201 vgonel(struct vnode *vp, lwkt_tokref_t vlock, struct thread *td) 2202 { 2203 lwkt_tokref ilock; 2204 int s; 2205 2206 /* 2207 * If a vgone (or vclean) is already in progress, 2208 * wait until it is done and return. 2209 */ 2210 if (vp->v_flag & VXLOCK) { 2211 vp->v_flag |= VXWANT; 2212 lwkt_reltoken(vlock); 2213 tsleep((caddr_t)vp, 0, "vgone", 0); 2214 return; 2215 } 2216 2217 /* 2218 * Clean out the filesystem specific data. 2219 */ 2220 vclean(vp, vlock, DOCLOSE, td); 2221 lwkt_gettokref(vlock); 2222 2223 /* 2224 * Delete from old mount point vnode list, if on one. 2225 */ 2226 if (vp->v_mount != NULL) 2227 insmntque(vp, (struct mount *)0); 2228 /* 2229 * If special device, remove it from special device alias list 2230 * if it is on one. 2231 */ 2232 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) { 2233 lwkt_gettoken(&ilock, &spechash_token); 2234 SLIST_REMOVE(&vp->v_hashchain, vp, vnode, v_specnext); 2235 freedev(vp->v_rdev); 2236 lwkt_reltoken(&ilock); 2237 vp->v_rdev = NULL; 2238 } 2239 2240 /* 2241 * If it is on the freelist and not already at the head, 2242 * move it to the head of the list. The test of the 2243 * VDOOMED flag and the reference count of zero is because 2244 * it will be removed from the free list by getnewvnode, 2245 * but will not have its reference count incremented until 2246 * after calling vgone. If the reference count were 2247 * incremented first, vgone would (incorrectly) try to 2248 * close the previous instance of the underlying object. 2249 */ 2250 if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) { 2251 s = splbio(); 2252 lwkt_gettoken(&ilock, &vnode_free_list_token); 2253 if (vp->v_flag & VFREE) 2254 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2255 else 2256 freevnodes++; 2257 vp->v_flag |= VFREE; 2258 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2259 lwkt_reltoken(&ilock); 2260 splx(s); 2261 } 2262 vp->v_type = VBAD; 2263 lwkt_reltoken(vlock); 2264 } 2265 2266 /* 2267 * Lookup a vnode by device number. 2268 */ 2269 int 2270 vfinddev(dev, type, vpp) 2271 dev_t dev; 2272 enum vtype type; 2273 struct vnode **vpp; 2274 { 2275 lwkt_tokref ilock; 2276 struct vnode *vp; 2277 2278 lwkt_gettoken(&ilock, &spechash_token); 2279 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) { 2280 if (type == vp->v_type) { 2281 *vpp = vp; 2282 lwkt_reltoken(&ilock); 2283 return (1); 2284 } 2285 } 2286 lwkt_reltoken(&ilock); 2287 return (0); 2288 } 2289 2290 /* 2291 * Calculate the total number of references to a special device. 2292 */ 2293 int 2294 vcount(vp) 2295 struct vnode *vp; 2296 { 2297 lwkt_tokref ilock; 2298 struct vnode *vq; 2299 int count; 2300 2301 count = 0; 2302 lwkt_gettoken(&ilock, &spechash_token); 2303 SLIST_FOREACH(vq, &vp->v_hashchain, v_specnext) 2304 count += vq->v_usecount; 2305 lwkt_reltoken(&ilock); 2306 return (count); 2307 } 2308 2309 /* 2310 * Same as above, but using the dev_t as argument 2311 */ 2312 2313 int 2314 count_dev(dev) 2315 dev_t dev; 2316 { 2317 struct vnode *vp; 2318 2319 vp = SLIST_FIRST(&dev->si_hlist); 2320 if (vp == NULL) 2321 return (0); 2322 return(vcount(vp)); 2323 } 2324 2325 /* 2326 * Print out a description of a vnode. 2327 */ 2328 static char *typename[] = 2329 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 2330 2331 void 2332 vprint(label, vp) 2333 char *label; 2334 struct vnode *vp; 2335 { 2336 char buf[96]; 2337 2338 if (label != NULL) 2339 printf("%s: %p: ", label, (void *)vp); 2340 else 2341 printf("%p: ", (void *)vp); 2342 printf("type %s, usecount %d, writecount %d, refcount %d,", 2343 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 2344 vp->v_holdcnt); 2345 buf[0] = '\0'; 2346 if (vp->v_flag & VROOT) 2347 strcat(buf, "|VROOT"); 2348 if (vp->v_flag & VTEXT) 2349 strcat(buf, "|VTEXT"); 2350 if (vp->v_flag & VSYSTEM) 2351 strcat(buf, "|VSYSTEM"); 2352 if (vp->v_flag & VXLOCK) 2353 strcat(buf, "|VXLOCK"); 2354 if (vp->v_flag & VXWANT) 2355 strcat(buf, "|VXWANT"); 2356 if (vp->v_flag & VBWAIT) 2357 strcat(buf, "|VBWAIT"); 2358 if (vp->v_flag & VDOOMED) 2359 strcat(buf, "|VDOOMED"); 2360 if (vp->v_flag & VFREE) 2361 strcat(buf, "|VFREE"); 2362 if (vp->v_flag & VOBJBUF) 2363 strcat(buf, "|VOBJBUF"); 2364 if (buf[0] != '\0') 2365 printf(" flags (%s)", &buf[1]); 2366 if (vp->v_data == NULL) { 2367 printf("\n"); 2368 } else { 2369 printf("\n\t"); 2370 VOP_PRINT(vp); 2371 } 2372 } 2373 2374 #ifdef DDB 2375 #include <ddb/ddb.h> 2376 /* 2377 * List all of the locked vnodes in the system. 2378 * Called when debugging the kernel. 2379 */ 2380 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 2381 { 2382 struct thread *td = curthread; /* XXX */ 2383 lwkt_tokref ilock; 2384 struct mount *mp, *nmp; 2385 struct vnode *vp; 2386 2387 printf("Locked vnodes\n"); 2388 lwkt_gettoken(&ilock, &mountlist_token); 2389 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2390 if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) { 2391 nmp = TAILQ_NEXT(mp, mnt_list); 2392 continue; 2393 } 2394 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 2395 if (VOP_ISLOCKED(vp, NULL)) 2396 vprint((char *)0, vp); 2397 } 2398 lwkt_gettokref(&ilock); 2399 nmp = TAILQ_NEXT(mp, mnt_list); 2400 vfs_unbusy(mp, td); 2401 } 2402 lwkt_reltoken(&ilock); 2403 } 2404 #endif 2405 2406 /* 2407 * Top level filesystem related information gathering. 2408 */ 2409 static int sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS); 2410 2411 static int 2412 vfs_sysctl(SYSCTL_HANDLER_ARGS) 2413 { 2414 int *name = (int *)arg1 - 1; /* XXX */ 2415 u_int namelen = arg2 + 1; /* XXX */ 2416 struct vfsconf *vfsp; 2417 2418 #if 1 || defined(COMPAT_PRELITE2) 2419 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 2420 if (namelen == 1) 2421 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 2422 #endif 2423 2424 #ifdef notyet 2425 /* all sysctl names at this level are at least name and field */ 2426 if (namelen < 2) 2427 return (ENOTDIR); /* overloaded */ 2428 if (name[0] != VFS_GENERIC) { 2429 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2430 if (vfsp->vfc_typenum == name[0]) 2431 break; 2432 if (vfsp == NULL) 2433 return (EOPNOTSUPP); 2434 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 2435 oldp, oldlenp, newp, newlen, p)); 2436 } 2437 #endif 2438 switch (name[1]) { 2439 case VFS_MAXTYPENUM: 2440 if (namelen != 2) 2441 return (ENOTDIR); 2442 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 2443 case VFS_CONF: 2444 if (namelen != 3) 2445 return (ENOTDIR); /* overloaded */ 2446 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2447 if (vfsp->vfc_typenum == name[2]) 2448 break; 2449 if (vfsp == NULL) 2450 return (EOPNOTSUPP); 2451 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 2452 } 2453 return (EOPNOTSUPP); 2454 } 2455 2456 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 2457 "Generic filesystem"); 2458 2459 #if 1 || defined(COMPAT_PRELITE2) 2460 2461 static int 2462 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 2463 { 2464 int error; 2465 struct vfsconf *vfsp; 2466 struct ovfsconf ovfs; 2467 2468 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 2469 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 2470 strcpy(ovfs.vfc_name, vfsp->vfc_name); 2471 ovfs.vfc_index = vfsp->vfc_typenum; 2472 ovfs.vfc_refcount = vfsp->vfc_refcount; 2473 ovfs.vfc_flags = vfsp->vfc_flags; 2474 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 2475 if (error) 2476 return error; 2477 } 2478 return 0; 2479 } 2480 2481 #endif /* 1 || COMPAT_PRELITE2 */ 2482 2483 #if 0 2484 #define KINFO_VNODESLOP 10 2485 /* 2486 * Dump vnode list (via sysctl). 2487 * Copyout address of vnode followed by vnode. 2488 */ 2489 /* ARGSUSED */ 2490 static int 2491 sysctl_vnode(SYSCTL_HANDLER_ARGS) 2492 { 2493 struct proc *p = curproc; /* XXX */ 2494 struct mount *mp, *nmp; 2495 struct vnode *nvp, *vp; 2496 lwkt_tokref ilock; 2497 lwkt_tokref jlock; 2498 int error; 2499 2500 #define VPTRSZ sizeof (struct vnode *) 2501 #define VNODESZ sizeof (struct vnode) 2502 2503 req->lock = 0; 2504 if (!req->oldptr) /* Make an estimate */ 2505 return (SYSCTL_OUT(req, 0, 2506 (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); 2507 2508 lwkt_gettoken(&ilock, &mountlist_token); 2509 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2510 if (vfs_busy(mp, LK_NOWAIT, &ilock, p)) { 2511 nmp = TAILQ_NEXT(mp, mnt_list); 2512 continue; 2513 } 2514 lwkt_gettoken(&jlock, &mntvnode_token); 2515 again: 2516 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 2517 vp != NULL; 2518 vp = nvp) { 2519 /* 2520 * Check that the vp is still associated with 2521 * this filesystem. RACE: could have been 2522 * recycled onto the same filesystem. 2523 */ 2524 if (vp->v_mount != mp) 2525 goto again; 2526 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 2527 if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || 2528 (error = SYSCTL_OUT(req, vp, VNODESZ))) { 2529 lwkt_reltoken(&jlock); 2530 return (error); 2531 } 2532 } 2533 lwkt_reltoken(&jlock); 2534 lwkt_gettokref(&ilock); 2535 nmp = TAILQ_NEXT(mp, mnt_list); /* ZZZ */ 2536 vfs_unbusy(mp, p); 2537 } 2538 lwkt_reltoken(&ilock); 2539 2540 return (0); 2541 } 2542 #endif 2543 2544 /* 2545 * XXX 2546 * Exporting the vnode list on large systems causes them to crash. 2547 * Exporting the vnode list on medium systems causes sysctl to coredump. 2548 */ 2549 #if 0 2550 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 2551 0, 0, sysctl_vnode, "S,vnode", ""); 2552 #endif 2553 2554 /* 2555 * Check to see if a filesystem is mounted on a block device. 2556 */ 2557 int 2558 vfs_mountedon(vp) 2559 struct vnode *vp; 2560 { 2561 2562 if (vp->v_specmountpoint != NULL) 2563 return (EBUSY); 2564 return (0); 2565 } 2566 2567 /* 2568 * Unmount all filesystems. The list is traversed in reverse order 2569 * of mounting to avoid dependencies. 2570 */ 2571 void 2572 vfs_unmountall() 2573 { 2574 struct mount *mp; 2575 struct thread *td = curthread; 2576 int error; 2577 2578 if (td->td_proc == NULL) 2579 td = initproc->p_thread; /* XXX XXX use proc0 instead? */ 2580 2581 /* 2582 * Since this only runs when rebooting, it is not interlocked. 2583 */ 2584 while(!TAILQ_EMPTY(&mountlist)) { 2585 mp = TAILQ_LAST(&mountlist, mntlist); 2586 error = dounmount(mp, MNT_FORCE, td); 2587 if (error) { 2588 TAILQ_REMOVE(&mountlist, mp, mnt_list); 2589 printf("unmount of %s failed (", 2590 mp->mnt_stat.f_mntonname); 2591 if (error == EBUSY) 2592 printf("BUSY)\n"); 2593 else 2594 printf("%d)\n", error); 2595 } else { 2596 /* The unmount has removed mp from the mountlist */ 2597 } 2598 } 2599 } 2600 2601 /* 2602 * Build hash lists of net addresses and hang them off the mount point. 2603 * Called by ufs_mount() to set up the lists of export addresses. 2604 */ 2605 static int 2606 vfs_hang_addrlist(mp, nep, argp) 2607 struct mount *mp; 2608 struct netexport *nep; 2609 struct export_args *argp; 2610 { 2611 struct netcred *np; 2612 struct radix_node_head *rnh; 2613 int i; 2614 struct radix_node *rn; 2615 struct sockaddr *saddr, *smask = 0; 2616 struct domain *dom; 2617 int error; 2618 2619 if (argp->ex_addrlen == 0) { 2620 if (mp->mnt_flag & MNT_DEFEXPORTED) 2621 return (EPERM); 2622 np = &nep->ne_defexported; 2623 np->netc_exflags = argp->ex_flags; 2624 np->netc_anon = argp->ex_anon; 2625 np->netc_anon.cr_ref = 1; 2626 mp->mnt_flag |= MNT_DEFEXPORTED; 2627 return (0); 2628 } 2629 2630 if (argp->ex_addrlen < 0 || argp->ex_addrlen > MLEN) 2631 return (EINVAL); 2632 if (argp->ex_masklen < 0 || argp->ex_masklen > MLEN) 2633 return (EINVAL); 2634 2635 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 2636 np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK); 2637 bzero((caddr_t) np, i); 2638 saddr = (struct sockaddr *) (np + 1); 2639 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 2640 goto out; 2641 if (saddr->sa_len > argp->ex_addrlen) 2642 saddr->sa_len = argp->ex_addrlen; 2643 if (argp->ex_masklen) { 2644 smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen); 2645 error = copyin(argp->ex_mask, (caddr_t) smask, argp->ex_masklen); 2646 if (error) 2647 goto out; 2648 if (smask->sa_len > argp->ex_masklen) 2649 smask->sa_len = argp->ex_masklen; 2650 } 2651 i = saddr->sa_family; 2652 if ((rnh = nep->ne_rtable[i]) == 0) { 2653 /* 2654 * Seems silly to initialize every AF when most are not used, 2655 * do so on demand here 2656 */ 2657 for (dom = domains; dom; dom = dom->dom_next) 2658 if (dom->dom_family == i && dom->dom_rtattach) { 2659 dom->dom_rtattach((void **) &nep->ne_rtable[i], 2660 dom->dom_rtoffset); 2661 break; 2662 } 2663 if ((rnh = nep->ne_rtable[i]) == 0) { 2664 error = ENOBUFS; 2665 goto out; 2666 } 2667 } 2668 rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh, 2669 np->netc_rnodes); 2670 if (rn == 0 || np != (struct netcred *) rn) { /* already exists */ 2671 error = EPERM; 2672 goto out; 2673 } 2674 np->netc_exflags = argp->ex_flags; 2675 np->netc_anon = argp->ex_anon; 2676 np->netc_anon.cr_ref = 1; 2677 return (0); 2678 out: 2679 free(np, M_NETADDR); 2680 return (error); 2681 } 2682 2683 /* ARGSUSED */ 2684 static int 2685 vfs_free_netcred(rn, w) 2686 struct radix_node *rn; 2687 void *w; 2688 { 2689 struct radix_node_head *rnh = (struct radix_node_head *) w; 2690 2691 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 2692 free((caddr_t) rn, M_NETADDR); 2693 return (0); 2694 } 2695 2696 /* 2697 * Free the net address hash lists that are hanging off the mount points. 2698 */ 2699 static void 2700 vfs_free_addrlist(nep) 2701 struct netexport *nep; 2702 { 2703 int i; 2704 struct radix_node_head *rnh; 2705 2706 for (i = 0; i <= AF_MAX; i++) 2707 if ((rnh = nep->ne_rtable[i])) { 2708 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 2709 (caddr_t) rnh); 2710 free((caddr_t) rnh, M_RTABLE); 2711 nep->ne_rtable[i] = 0; 2712 } 2713 } 2714 2715 int 2716 vfs_export(mp, nep, argp) 2717 struct mount *mp; 2718 struct netexport *nep; 2719 struct export_args *argp; 2720 { 2721 int error; 2722 2723 if (argp->ex_flags & MNT_DELEXPORT) { 2724 if (mp->mnt_flag & MNT_EXPUBLIC) { 2725 vfs_setpublicfs(NULL, NULL, NULL); 2726 mp->mnt_flag &= ~MNT_EXPUBLIC; 2727 } 2728 vfs_free_addrlist(nep); 2729 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2730 } 2731 if (argp->ex_flags & MNT_EXPORTED) { 2732 if (argp->ex_flags & MNT_EXPUBLIC) { 2733 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2734 return (error); 2735 mp->mnt_flag |= MNT_EXPUBLIC; 2736 } 2737 if ((error = vfs_hang_addrlist(mp, nep, argp))) 2738 return (error); 2739 mp->mnt_flag |= MNT_EXPORTED; 2740 } 2741 return (0); 2742 } 2743 2744 2745 /* 2746 * Set the publicly exported filesystem (WebNFS). Currently, only 2747 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2748 */ 2749 int 2750 vfs_setpublicfs(mp, nep, argp) 2751 struct mount *mp; 2752 struct netexport *nep; 2753 struct export_args *argp; 2754 { 2755 int error; 2756 struct vnode *rvp; 2757 char *cp; 2758 2759 /* 2760 * mp == NULL -> invalidate the current info, the FS is 2761 * no longer exported. May be called from either vfs_export 2762 * or unmount, so check if it hasn't already been done. 2763 */ 2764 if (mp == NULL) { 2765 if (nfs_pub.np_valid) { 2766 nfs_pub.np_valid = 0; 2767 if (nfs_pub.np_index != NULL) { 2768 FREE(nfs_pub.np_index, M_TEMP); 2769 nfs_pub.np_index = NULL; 2770 } 2771 } 2772 return (0); 2773 } 2774 2775 /* 2776 * Only one allowed at a time. 2777 */ 2778 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2779 return (EBUSY); 2780 2781 /* 2782 * Get real filehandle for root of exported FS. 2783 */ 2784 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 2785 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2786 2787 if ((error = VFS_ROOT(mp, &rvp))) 2788 return (error); 2789 2790 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2791 return (error); 2792 2793 vput(rvp); 2794 2795 /* 2796 * If an indexfile was specified, pull it in. 2797 */ 2798 if (argp->ex_indexfile != NULL) { 2799 MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP, 2800 M_WAITOK); 2801 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2802 MAXNAMLEN, (size_t *)0); 2803 if (!error) { 2804 /* 2805 * Check for illegal filenames. 2806 */ 2807 for (cp = nfs_pub.np_index; *cp; cp++) { 2808 if (*cp == '/') { 2809 error = EINVAL; 2810 break; 2811 } 2812 } 2813 } 2814 if (error) { 2815 FREE(nfs_pub.np_index, M_TEMP); 2816 return (error); 2817 } 2818 } 2819 2820 nfs_pub.np_mount = mp; 2821 nfs_pub.np_valid = 1; 2822 return (0); 2823 } 2824 2825 struct netcred * 2826 vfs_export_lookup(mp, nep, nam) 2827 struct mount *mp; 2828 struct netexport *nep; 2829 struct sockaddr *nam; 2830 { 2831 struct netcred *np; 2832 struct radix_node_head *rnh; 2833 struct sockaddr *saddr; 2834 2835 np = NULL; 2836 if (mp->mnt_flag & MNT_EXPORTED) { 2837 /* 2838 * Lookup in the export list first. 2839 */ 2840 if (nam != NULL) { 2841 saddr = nam; 2842 rnh = nep->ne_rtable[saddr->sa_family]; 2843 if (rnh != NULL) { 2844 np = (struct netcred *) 2845 (*rnh->rnh_matchaddr)((caddr_t)saddr, 2846 rnh); 2847 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2848 np = NULL; 2849 } 2850 } 2851 /* 2852 * If no address match, use the default if it exists. 2853 */ 2854 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2855 np = &nep->ne_defexported; 2856 } 2857 return (np); 2858 } 2859 2860 /* 2861 * perform msync on all vnodes under a mount point. The mount point must 2862 * be locked. This code is also responsible for lazy-freeing unreferenced 2863 * vnodes whos VM objects no longer contain pages. 2864 * 2865 * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state. 2866 */ 2867 static int vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data); 2868 static int vfs_msync_scan2(struct mount *mp, struct vnode *vp, 2869 lwkt_tokref_t vlock, void *data); 2870 2871 void 2872 vfs_msync(struct mount *mp, int flags) 2873 { 2874 vmntvnodescan(mp, vfs_msync_scan1, vfs_msync_scan2, (void *)flags); 2875 } 2876 2877 /* 2878 * scan1 is a fast pre-check. There could be hundreds of thousands of 2879 * vnodes, we cannot afford to do anything heavy weight until we have a 2880 * fairly good indication that there is work to do. 2881 */ 2882 static 2883 int 2884 vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data) 2885 { 2886 int flags = (int)data; 2887 2888 if ((vp->v_flag & VXLOCK) == 0) { 2889 if (VSHOULDFREE(vp)) 2890 return(0); 2891 if ((mp->mnt_flag & MNT_RDONLY) == 0 && 2892 (vp->v_flag & VOBJDIRTY) && 2893 (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) { 2894 return(0); 2895 } 2896 } 2897 return(-1); 2898 } 2899 2900 static 2901 int 2902 vfs_msync_scan2(struct mount *mp, struct vnode *vp, lwkt_tokref_t vlock, void *data) 2903 { 2904 vm_object_t obj; 2905 int error; 2906 int flags = (int)data; 2907 2908 if (vp->v_flag & VXLOCK) 2909 return(0); 2910 2911 if ((mp->mnt_flag & MNT_RDONLY) == 0 && 2912 (vp->v_flag & VOBJDIRTY) && 2913 (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) { 2914 error = vget(vp, vlock, LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ | LK_INTERLOCK, curthread); 2915 if (error == 0) { 2916 if (VOP_GETVOBJECT(vp, &obj) == 0) { 2917 vm_object_page_clean(obj, 0, 0, 2918 flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC); 2919 } 2920 vput(vp); 2921 } 2922 return(0); 2923 } 2924 vmaybefree(vp); 2925 lwkt_reltoken(vlock); 2926 return(0); 2927 } 2928 2929 /* 2930 * Create the VM object needed for VMIO and mmap support. This 2931 * is done for all VREG files in the system. Some filesystems might 2932 * afford the additional metadata buffering capability of the 2933 * VMIO code by making the device node be VMIO mode also. 2934 * 2935 * vp must be locked when vfs_object_create is called. 2936 */ 2937 int 2938 vfs_object_create(struct vnode *vp, struct thread *td) 2939 { 2940 return (VOP_CREATEVOBJECT(vp, td)); 2941 } 2942 2943 /* 2944 * NOTE: the vnode interlock must be held during the call. We have to recheck 2945 * the VFREE flag since the vnode may have been removed from the free list 2946 * while we were blocked on vnode_free_list_token. The use or hold count 2947 * must have already been bumped by the caller. 2948 */ 2949 static void 2950 vbusy(struct vnode *vp) 2951 { 2952 lwkt_tokref ilock; 2953 2954 lwkt_gettoken(&ilock, &vnode_free_list_token); 2955 if ((vp->v_flag & VFREE) != 0) { 2956 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2957 freevnodes--; 2958 vp->v_flag &= ~(VFREE|VAGE); 2959 } 2960 lwkt_reltoken(&ilock); 2961 } 2962 2963 /* 2964 * NOTE: the vnode interlock must be held during the call. The use or hold 2965 * count must have already been bumped by the caller. We use a VINFREE to 2966 * interlock against other calls to vfree() which might occur while we 2967 * are blocked. The vnode cannot be reused until it has actually been 2968 * placed on the free list, so there are no other races even though the 2969 * use and hold counts are 0. 2970 */ 2971 static void 2972 vfree(struct vnode *vp) 2973 { 2974 lwkt_tokref ilock; 2975 2976 if ((vp->v_flag & VINFREE) == 0) { 2977 vp->v_flag |= VINFREE; 2978 lwkt_gettoken(&ilock, &vnode_free_list_token); /* can block */ 2979 KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free")); 2980 if (vp->v_flag & VAGE) { 2981 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2982 } else { 2983 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 2984 } 2985 freevnodes++; 2986 vp->v_flag &= ~(VAGE|VINFREE); 2987 vp->v_flag |= VFREE; 2988 lwkt_reltoken(&ilock); /* can block */ 2989 } 2990 } 2991 2992 2993 /* 2994 * Record a process's interest in events which might happen to 2995 * a vnode. Because poll uses the historic select-style interface 2996 * internally, this routine serves as both the ``check for any 2997 * pending events'' and the ``record my interest in future events'' 2998 * functions. (These are done together, while the lock is held, 2999 * to avoid race conditions.) 3000 */ 3001 int 3002 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 3003 { 3004 lwkt_tokref ilock; 3005 3006 lwkt_gettoken(&ilock, &vp->v_pollinfo.vpi_token); 3007 if (vp->v_pollinfo.vpi_revents & events) { 3008 /* 3009 * This leaves events we are not interested 3010 * in available for the other process which 3011 * which presumably had requested them 3012 * (otherwise they would never have been 3013 * recorded). 3014 */ 3015 events &= vp->v_pollinfo.vpi_revents; 3016 vp->v_pollinfo.vpi_revents &= ~events; 3017 3018 lwkt_reltoken(&ilock); 3019 return events; 3020 } 3021 vp->v_pollinfo.vpi_events |= events; 3022 selrecord(td, &vp->v_pollinfo.vpi_selinfo); 3023 lwkt_reltoken(&ilock); 3024 return 0; 3025 } 3026 3027 /* 3028 * Note the occurrence of an event. If the VN_POLLEVENT macro is used, 3029 * it is possible for us to miss an event due to race conditions, but 3030 * that condition is expected to be rare, so for the moment it is the 3031 * preferred interface. 3032 */ 3033 void 3034 vn_pollevent(vp, events) 3035 struct vnode *vp; 3036 short events; 3037 { 3038 lwkt_tokref ilock; 3039 3040 lwkt_gettoken(&ilock, &vp->v_pollinfo.vpi_token); 3041 if (vp->v_pollinfo.vpi_events & events) { 3042 /* 3043 * We clear vpi_events so that we don't 3044 * call selwakeup() twice if two events are 3045 * posted before the polling process(es) is 3046 * awakened. This also ensures that we take at 3047 * most one selwakeup() if the polling process 3048 * is no longer interested. However, it does 3049 * mean that only one event can be noticed at 3050 * a time. (Perhaps we should only clear those 3051 * event bits which we note?) XXX 3052 */ 3053 vp->v_pollinfo.vpi_events = 0; /* &= ~events ??? */ 3054 vp->v_pollinfo.vpi_revents |= events; 3055 selwakeup(&vp->v_pollinfo.vpi_selinfo); 3056 } 3057 lwkt_reltoken(&ilock); 3058 } 3059 3060 /* 3061 * Wake up anyone polling on vp because it is being revoked. 3062 * This depends on dead_poll() returning POLLHUP for correct 3063 * behavior. 3064 */ 3065 void 3066 vn_pollgone(vp) 3067 struct vnode *vp; 3068 { 3069 lwkt_tokref ilock; 3070 3071 lwkt_gettoken(&ilock, &vp->v_pollinfo.vpi_token); 3072 if (vp->v_pollinfo.vpi_events) { 3073 vp->v_pollinfo.vpi_events = 0; 3074 selwakeup(&vp->v_pollinfo.vpi_selinfo); 3075 } 3076 lwkt_reltoken(&ilock); 3077 } 3078 3079 3080 3081 /* 3082 * Routine to create and manage a filesystem syncer vnode. 3083 */ 3084 #define sync_close ((int (*) (struct vop_close_args *))nullop) 3085 static int sync_fsync (struct vop_fsync_args *); 3086 static int sync_inactive (struct vop_inactive_args *); 3087 static int sync_reclaim (struct vop_reclaim_args *); 3088 #define sync_lock ((int (*) (struct vop_lock_args *))vop_nolock) 3089 #define sync_unlock ((int (*) (struct vop_unlock_args *))vop_nounlock) 3090 static int sync_print (struct vop_print_args *); 3091 #define sync_islocked ((int(*) (struct vop_islocked_args *))vop_noislocked) 3092 3093 static vop_t **sync_vnodeop_p; 3094 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = { 3095 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 3096 { &vop_close_desc, (vop_t *) sync_close }, /* close */ 3097 { &vop_fsync_desc, (vop_t *) sync_fsync }, /* fsync */ 3098 { &vop_inactive_desc, (vop_t *) sync_inactive }, /* inactive */ 3099 { &vop_reclaim_desc, (vop_t *) sync_reclaim }, /* reclaim */ 3100 { &vop_lock_desc, (vop_t *) sync_lock }, /* lock */ 3101 { &vop_unlock_desc, (vop_t *) sync_unlock }, /* unlock */ 3102 { &vop_print_desc, (vop_t *) sync_print }, /* print */ 3103 { &vop_islocked_desc, (vop_t *) sync_islocked }, /* islocked */ 3104 { NULL, NULL } 3105 }; 3106 static struct vnodeopv_desc sync_vnodeop_opv_desc = 3107 { &sync_vnodeop_p, sync_vnodeop_entries }; 3108 3109 VNODEOP_SET(sync_vnodeop_opv_desc); 3110 3111 /* 3112 * Create a new filesystem syncer vnode for the specified mount point. 3113 * This vnode is placed on the worklist and is responsible for sync'ing 3114 * the filesystem. 3115 * 3116 * NOTE: read-only mounts are also placed on the worklist. The filesystem 3117 * sync code is also responsible for cleaning up vnodes. 3118 */ 3119 int 3120 vfs_allocate_syncvnode(struct mount *mp) 3121 { 3122 struct vnode *vp; 3123 static long start, incr, next; 3124 int error; 3125 3126 /* Allocate a new vnode */ 3127 if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) { 3128 mp->mnt_syncer = NULL; 3129 return (error); 3130 } 3131 vp->v_type = VNON; 3132 /* 3133 * Place the vnode onto the syncer worklist. We attempt to 3134 * scatter them about on the list so that they will go off 3135 * at evenly distributed times even if all the filesystems 3136 * are mounted at once. 3137 */ 3138 next += incr; 3139 if (next == 0 || next > syncer_maxdelay) { 3140 start /= 2; 3141 incr /= 2; 3142 if (start == 0) { 3143 start = syncer_maxdelay / 2; 3144 incr = syncer_maxdelay; 3145 } 3146 next = start; 3147 } 3148 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0); 3149 mp->mnt_syncer = vp; 3150 return (0); 3151 } 3152 3153 /* 3154 * Do a lazy sync of the filesystem. 3155 */ 3156 static int 3157 sync_fsync(ap) 3158 struct vop_fsync_args /* { 3159 struct vnode *a_vp; 3160 struct ucred *a_cred; 3161 int a_waitfor; 3162 struct thread *a_td; 3163 } */ *ap; 3164 { 3165 struct vnode *syncvp = ap->a_vp; 3166 struct mount *mp = syncvp->v_mount; 3167 struct thread *td = ap->a_td; 3168 lwkt_tokref ilock; 3169 int asyncflag; 3170 3171 /* 3172 * We only need to do something if this is a lazy evaluation. 3173 */ 3174 if (ap->a_waitfor != MNT_LAZY) 3175 return (0); 3176 3177 /* 3178 * Move ourselves to the back of the sync list. 3179 */ 3180 vn_syncer_add_to_worklist(syncvp, syncdelay); 3181 3182 /* 3183 * Walk the list of vnodes pushing all that are dirty and 3184 * not already on the sync list, and freeing vnodes which have 3185 * no refs and whos VM objects are empty. vfs_msync() handles 3186 * the VM issues and must be called whether the mount is readonly 3187 * or not. 3188 */ 3189 lwkt_gettoken(&ilock, &mountlist_token); 3190 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &ilock, td) != 0) { 3191 lwkt_reltoken(&ilock); 3192 return (0); 3193 } 3194 if (mp->mnt_flag & MNT_RDONLY) { 3195 vfs_msync(mp, MNT_NOWAIT); 3196 } else { 3197 asyncflag = mp->mnt_flag & MNT_ASYNC; 3198 mp->mnt_flag &= ~MNT_ASYNC; /* ZZZ hack */ 3199 vfs_msync(mp, MNT_NOWAIT); 3200 VFS_SYNC(mp, MNT_LAZY, td); 3201 if (asyncflag) 3202 mp->mnt_flag |= MNT_ASYNC; 3203 } 3204 vfs_unbusy(mp, td); 3205 return (0); 3206 } 3207 3208 /* 3209 * The syncer vnode is no referenced. 3210 */ 3211 static int 3212 sync_inactive(ap) 3213 struct vop_inactive_args /* { 3214 struct vnode *a_vp; 3215 struct proc *a_p; 3216 } */ *ap; 3217 { 3218 3219 vgone(ap->a_vp); 3220 return (0); 3221 } 3222 3223 /* 3224 * The syncer vnode is no longer needed and is being decommissioned. 3225 * 3226 * Modifications to the worklist must be protected at splbio(). 3227 */ 3228 static int 3229 sync_reclaim(ap) 3230 struct vop_reclaim_args /* { 3231 struct vnode *a_vp; 3232 } */ *ap; 3233 { 3234 struct vnode *vp = ap->a_vp; 3235 int s; 3236 3237 s = splbio(); 3238 vp->v_mount->mnt_syncer = NULL; 3239 if (vp->v_flag & VONWORKLST) { 3240 LIST_REMOVE(vp, v_synclist); 3241 vp->v_flag &= ~VONWORKLST; 3242 } 3243 splx(s); 3244 3245 return (0); 3246 } 3247 3248 /* 3249 * Print out a syncer vnode. 3250 */ 3251 static int 3252 sync_print(ap) 3253 struct vop_print_args /* { 3254 struct vnode *a_vp; 3255 } */ *ap; 3256 { 3257 struct vnode *vp = ap->a_vp; 3258 3259 printf("syncer vnode"); 3260 if (vp->v_vnlock != NULL) 3261 lockmgr_printinfo(vp->v_vnlock); 3262 printf("\n"); 3263 return (0); 3264 } 3265 3266 /* 3267 * extract the dev_t from a VBLK or VCHR 3268 */ 3269 dev_t 3270 vn_todev(vp) 3271 struct vnode *vp; 3272 { 3273 if (vp->v_type != VBLK && vp->v_type != VCHR) 3274 return (NODEV); 3275 return (vp->v_rdev); 3276 } 3277 3278 /* 3279 * Check if vnode represents a disk device 3280 */ 3281 int 3282 vn_isdisk(vp, errp) 3283 struct vnode *vp; 3284 int *errp; 3285 { 3286 if (vp->v_type != VBLK && vp->v_type != VCHR) { 3287 if (errp != NULL) 3288 *errp = ENOTBLK; 3289 return (0); 3290 } 3291 if (vp->v_rdev == NULL) { 3292 if (errp != NULL) 3293 *errp = ENXIO; 3294 return (0); 3295 } 3296 if (!dev_dport(vp->v_rdev)) { 3297 if (errp != NULL) 3298 *errp = ENXIO; 3299 return (0); 3300 } 3301 if (!(dev_dflags(vp->v_rdev) & D_DISK)) { 3302 if (errp != NULL) 3303 *errp = ENOTBLK; 3304 return (0); 3305 } 3306 if (errp != NULL) 3307 *errp = 0; 3308 return (1); 3309 } 3310 3311 void 3312 NDFREE(ndp, flags) 3313 struct nameidata *ndp; 3314 const uint flags; 3315 { 3316 if (!(flags & NDF_NO_FREE_PNBUF) && 3317 (ndp->ni_cnd.cn_flags & CNP_HASBUF)) { 3318 zfree(namei_zone, ndp->ni_cnd.cn_pnbuf); 3319 ndp->ni_cnd.cn_flags &= ~CNP_HASBUF; 3320 } 3321 if (!(flags & NDF_NO_DNCP_RELE) && 3322 (ndp->ni_cnd.cn_flags & CNP_WANTDNCP) && 3323 ndp->ni_dncp) { 3324 cache_drop(ndp->ni_dncp); 3325 ndp->ni_dncp = NULL; 3326 } 3327 if (!(flags & NDF_NO_NCP_RELE) && 3328 (ndp->ni_cnd.cn_flags & CNP_WANTNCP) && 3329 ndp->ni_ncp) { 3330 cache_drop(ndp->ni_ncp); 3331 ndp->ni_ncp = NULL; 3332 } 3333 if (!(flags & NDF_NO_DVP_UNLOCK) && 3334 (ndp->ni_cnd.cn_flags & CNP_LOCKPARENT) && 3335 ndp->ni_dvp != ndp->ni_vp) { 3336 VOP_UNLOCK(ndp->ni_dvp, NULL, 0, ndp->ni_cnd.cn_td); 3337 } 3338 if (!(flags & NDF_NO_DVP_RELE) && 3339 (ndp->ni_cnd.cn_flags & (CNP_LOCKPARENT|CNP_WANTPARENT))) { 3340 vrele(ndp->ni_dvp); 3341 ndp->ni_dvp = NULL; 3342 } 3343 if (!(flags & NDF_NO_VP_UNLOCK) && 3344 (ndp->ni_cnd.cn_flags & CNP_LOCKLEAF) && ndp->ni_vp) { 3345 VOP_UNLOCK(ndp->ni_vp, NULL, 0, ndp->ni_cnd.cn_td); 3346 } 3347 if (!(flags & NDF_NO_VP_RELE) && 3348 ndp->ni_vp) { 3349 vrele(ndp->ni_vp); 3350 ndp->ni_vp = NULL; 3351 } 3352 if (!(flags & NDF_NO_STARTDIR_RELE) && 3353 (ndp->ni_cnd.cn_flags & CNP_SAVESTART)) { 3354 vrele(ndp->ni_startdir); 3355 ndp->ni_startdir = NULL; 3356 } 3357 } 3358 3359 #ifdef DEBUG_VFS_LOCKS 3360 3361 void 3362 assert_vop_locked(struct vnode *vp, const char *str) 3363 { 3364 3365 if (vp && IS_LOCKING_VFS(vp) && !VOP_ISLOCKED(vp, NULL)) { 3366 panic("%s: %p is not locked shared but should be", str, vp); 3367 } 3368 } 3369 3370 void 3371 assert_vop_unlocked(struct vnode *vp, const char *str) 3372 { 3373 3374 if (vp && IS_LOCKING_VFS(vp)) { 3375 if (VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE) { 3376 panic("%s: %p is locked but should not be", str, vp); 3377 } 3378 } 3379 } 3380 3381 #endif 3382