1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 40 */ 41 42 /* 43 * External virtual filesystem routines 44 */ 45 #include "opt_ddb.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/buf.h> 50 #include <sys/conf.h> 51 #include <sys/dirent.h> 52 #include <sys/domain.h> 53 #include <sys/eventhandler.h> 54 #include <sys/fcntl.h> 55 #include <sys/file.h> 56 #include <sys/kernel.h> 57 #include <sys/kthread.h> 58 #include <sys/malloc.h> 59 #include <sys/mbuf.h> 60 #include <sys/mount.h> 61 #include <sys/priv.h> 62 #include <sys/proc.h> 63 #include <sys/reboot.h> 64 #include <sys/socket.h> 65 #include <sys/stat.h> 66 #include <sys/sysctl.h> 67 #include <sys/syslog.h> 68 #include <sys/unistd.h> 69 #include <sys/vmmeter.h> 70 #include <sys/vnode.h> 71 72 #include <machine/limits.h> 73 74 #include <vm/vm.h> 75 #include <vm/vm_object.h> 76 #include <vm/vm_extern.h> 77 #include <vm/vm_kern.h> 78 #include <vm/pmap.h> 79 #include <vm/vm_map.h> 80 #include <vm/vm_page.h> 81 #include <vm/vm_pager.h> 82 #include <vm/vnode_pager.h> 83 #include <vm/vm_zone.h> 84 85 #include <sys/buf2.h> 86 #include <sys/thread2.h> 87 #include <sys/sysref2.h> 88 #include <sys/mplock2.h> 89 90 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 91 92 int numvnodes; 93 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 94 "Number of vnodes allocated"); 95 int verbose_reclaims; 96 SYSCTL_INT(_debug, OID_AUTO, verbose_reclaims, CTLFLAG_RD, &verbose_reclaims, 0, 97 "Output filename of reclaimed vnode(s)"); 98 99 enum vtype iftovt_tab[16] = { 100 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 101 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 102 }; 103 int vttoif_tab[9] = { 104 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 105 S_IFSOCK, S_IFIFO, S_IFMT, 106 }; 107 108 static int reassignbufcalls; 109 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 110 0, "Number of times buffers have been reassigned to the proper list"); 111 112 static int check_buf_overlap = 2; /* invasive check */ 113 SYSCTL_INT(_vfs, OID_AUTO, check_buf_overlap, CTLFLAG_RW, &check_buf_overlap, 114 0, "Enable overlapping buffer checks"); 115 116 int nfs_mount_type = -1; 117 static struct lwkt_token spechash_token; 118 struct nfs_public nfs_pub; /* publicly exported FS */ 119 120 int desiredvnodes; 121 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 122 &desiredvnodes, 0, "Maximum number of vnodes"); 123 124 static void vfs_free_addrlist (struct netexport *nep); 125 static int vfs_free_netcred (struct radix_node *rn, void *w); 126 static int vfs_hang_addrlist (struct mount *mp, struct netexport *nep, 127 const struct export_args *argp); 128 129 /* 130 * Red black tree functions 131 */ 132 static int rb_buf_compare(struct buf *b1, struct buf *b2); 133 RB_GENERATE2(buf_rb_tree, buf, b_rbnode, rb_buf_compare, off_t, b_loffset); 134 RB_GENERATE2(buf_rb_hash, buf, b_rbhash, rb_buf_compare, off_t, b_loffset); 135 136 static int 137 rb_buf_compare(struct buf *b1, struct buf *b2) 138 { 139 if (b1->b_loffset < b2->b_loffset) 140 return(-1); 141 if (b1->b_loffset > b2->b_loffset) 142 return(1); 143 return(0); 144 } 145 146 /* 147 * Returns non-zero if the vnode is a candidate for lazy msyncing. 148 * 149 * NOTE: v_object is not stable (this scan can race), however the 150 * mntvnodescan code holds vmobj_token so any VM object we 151 * do find will remain stable storage. 152 */ 153 static __inline int 154 vshouldmsync(struct vnode *vp) 155 { 156 vm_object_t object; 157 158 if (vp->v_auxrefs != 0 || vp->v_sysref.refcnt > 0) 159 return (0); /* other holders */ 160 object = vp->v_object; 161 cpu_ccfence(); 162 if (object && (object->ref_count || object->resident_page_count)) 163 return(0); 164 return (1); 165 } 166 167 /* 168 * Initialize the vnode management data structures. 169 * 170 * Called from vfsinit() 171 */ 172 void 173 vfs_subr_init(void) 174 { 175 int factor1; 176 int factor2; 177 178 /* 179 * Desiredvnodes is kern.maxvnodes. We want to scale it 180 * according to available system memory but we may also have 181 * to limit it based on available KVM, which is capped on 32 bit 182 * systems, to ~80K vnodes or so. 183 * 184 * WARNING! For machines with 64-256M of ram we have to be sure 185 * that the default limit scales down well due to HAMMER 186 * taking up significantly more memory per-vnode vs UFS. 187 * We want around ~5800 on a 128M machine. 188 */ 189 factor1 = 20 * (sizeof(struct vm_object) + sizeof(struct vnode)); 190 factor2 = 25 * (sizeof(struct vm_object) + sizeof(struct vnode)); 191 desiredvnodes = 192 imin((int64_t)vmstats.v_page_count * PAGE_SIZE / factor1, 193 KvaSize / factor2); 194 desiredvnodes = imax(desiredvnodes, maxproc * 8); 195 196 lwkt_token_init(&spechash_token, "spechash"); 197 } 198 199 /* 200 * Knob to control the precision of file timestamps: 201 * 202 * 0 = seconds only; nanoseconds zeroed. 203 * 1 = seconds and nanoseconds, accurate within 1/HZ. 204 * 2 = seconds and nanoseconds, truncated to microseconds. 205 * >=3 = seconds and nanoseconds, maximum precision. 206 */ 207 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 208 209 static int timestamp_precision = TSP_SEC; 210 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 211 ×tamp_precision, 0, "Precision of file timestamps"); 212 213 /* 214 * Get a current timestamp. 215 * 216 * MPSAFE 217 */ 218 void 219 vfs_timestamp(struct timespec *tsp) 220 { 221 struct timeval tv; 222 223 switch (timestamp_precision) { 224 case TSP_SEC: 225 tsp->tv_sec = time_second; 226 tsp->tv_nsec = 0; 227 break; 228 case TSP_HZ: 229 getnanotime(tsp); 230 break; 231 case TSP_USEC: 232 microtime(&tv); 233 TIMEVAL_TO_TIMESPEC(&tv, tsp); 234 break; 235 case TSP_NSEC: 236 default: 237 nanotime(tsp); 238 break; 239 } 240 } 241 242 /* 243 * Set vnode attributes to VNOVAL 244 */ 245 void 246 vattr_null(struct vattr *vap) 247 { 248 vap->va_type = VNON; 249 vap->va_size = VNOVAL; 250 vap->va_bytes = VNOVAL; 251 vap->va_mode = VNOVAL; 252 vap->va_nlink = VNOVAL; 253 vap->va_uid = VNOVAL; 254 vap->va_gid = VNOVAL; 255 vap->va_fsid = VNOVAL; 256 vap->va_fileid = VNOVAL; 257 vap->va_blocksize = VNOVAL; 258 vap->va_rmajor = VNOVAL; 259 vap->va_rminor = VNOVAL; 260 vap->va_atime.tv_sec = VNOVAL; 261 vap->va_atime.tv_nsec = VNOVAL; 262 vap->va_mtime.tv_sec = VNOVAL; 263 vap->va_mtime.tv_nsec = VNOVAL; 264 vap->va_ctime.tv_sec = VNOVAL; 265 vap->va_ctime.tv_nsec = VNOVAL; 266 vap->va_flags = VNOVAL; 267 vap->va_gen = VNOVAL; 268 vap->va_vaflags = 0; 269 /* va_*_uuid fields are only valid if related flags are set */ 270 } 271 272 /* 273 * Flush out and invalidate all buffers associated with a vnode. 274 * 275 * vp must be locked. 276 */ 277 static int vinvalbuf_bp(struct buf *bp, void *data); 278 279 struct vinvalbuf_bp_info { 280 struct vnode *vp; 281 int slptimeo; 282 int lkflags; 283 int flags; 284 int clean; 285 }; 286 287 int 288 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 289 { 290 struct vinvalbuf_bp_info info; 291 vm_object_t object; 292 int error; 293 294 lwkt_gettoken(&vp->v_token); 295 296 /* 297 * If we are being asked to save, call fsync to ensure that the inode 298 * is updated. 299 */ 300 if (flags & V_SAVE) { 301 error = bio_track_wait(&vp->v_track_write, slpflag, slptimeo); 302 if (error) 303 goto done; 304 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 305 if ((error = VOP_FSYNC(vp, MNT_WAIT, 0)) != 0) 306 goto done; 307 #if 0 308 /* 309 * Dirty bufs may be left or generated via races 310 * in circumstances where vinvalbuf() is called on 311 * a vnode not undergoing reclamation. Only 312 * panic if we are trying to reclaim the vnode. 313 */ 314 if ((vp->v_flag & VRECLAIMED) && 315 (bio_track_active(&vp->v_track_write) || 316 !RB_EMPTY(&vp->v_rbdirty_tree))) { 317 panic("vinvalbuf: dirty bufs"); 318 } 319 #endif 320 } 321 } 322 info.slptimeo = slptimeo; 323 info.lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; 324 if (slpflag & PCATCH) 325 info.lkflags |= LK_PCATCH; 326 info.flags = flags; 327 info.vp = vp; 328 329 /* 330 * Flush the buffer cache until nothing is left, wait for all I/O 331 * to complete. At least one pass is required. We might block 332 * in the pip code so we have to re-check. Order is important. 333 */ 334 do { 335 /* 336 * Flush buffer cache 337 */ 338 if (!RB_EMPTY(&vp->v_rbclean_tree)) { 339 info.clean = 1; 340 error = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 341 NULL, vinvalbuf_bp, &info); 342 } 343 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 344 info.clean = 0; 345 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 346 NULL, vinvalbuf_bp, &info); 347 } 348 349 /* 350 * Wait for I/O completion. 351 */ 352 bio_track_wait(&vp->v_track_write, 0, 0); 353 if ((object = vp->v_object) != NULL) 354 refcount_wait(&object->paging_in_progress, "vnvlbx"); 355 } while (bio_track_active(&vp->v_track_write) || 356 !RB_EMPTY(&vp->v_rbclean_tree) || 357 !RB_EMPTY(&vp->v_rbdirty_tree)); 358 359 /* 360 * Destroy the copy in the VM cache, too. 361 */ 362 if ((object = vp->v_object) != NULL) { 363 vm_object_page_remove(object, 0, 0, 364 (flags & V_SAVE) ? TRUE : FALSE); 365 } 366 367 if (!RB_EMPTY(&vp->v_rbdirty_tree) || !RB_EMPTY(&vp->v_rbclean_tree)) 368 panic("vinvalbuf: flush failed"); 369 if (!RB_EMPTY(&vp->v_rbhash_tree)) 370 panic("vinvalbuf: flush failed, buffers still present"); 371 error = 0; 372 done: 373 lwkt_reltoken(&vp->v_token); 374 return (error); 375 } 376 377 static int 378 vinvalbuf_bp(struct buf *bp, void *data) 379 { 380 struct vinvalbuf_bp_info *info = data; 381 int error; 382 383 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 384 atomic_add_int(&bp->b_refs, 1); 385 error = BUF_TIMELOCK(bp, info->lkflags, 386 "vinvalbuf", info->slptimeo); 387 atomic_subtract_int(&bp->b_refs, 1); 388 if (error == 0) { 389 BUF_UNLOCK(bp); 390 error = ENOLCK; 391 } 392 if (error == ENOLCK) 393 return(0); 394 return (-error); 395 } 396 KKASSERT(bp->b_vp == info->vp); 397 398 /* 399 * Must check clean/dirty status after successfully locking as 400 * it may race. 401 */ 402 if ((info->clean && (bp->b_flags & B_DELWRI)) || 403 (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0)) { 404 BUF_UNLOCK(bp); 405 return(0); 406 } 407 408 /* 409 * NOTE: NO B_LOCKED CHECK. Also no buf_checkwrite() 410 * check. This code will write out the buffer, period. 411 */ 412 bremfree(bp); 413 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 414 (info->flags & V_SAVE)) { 415 cluster_awrite(bp); 416 } else if (info->flags & V_SAVE) { 417 /* 418 * Cannot set B_NOCACHE on a clean buffer as this will 419 * destroy the VM backing store which might actually 420 * be dirty (and unsynchronized). 421 */ 422 bp->b_flags |= (B_INVAL | B_RELBUF); 423 brelse(bp); 424 } else { 425 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 426 brelse(bp); 427 } 428 return(0); 429 } 430 431 /* 432 * Truncate a file's buffer and pages to a specified length. This 433 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 434 * sync activity. 435 * 436 * The vnode must be locked. 437 */ 438 static int vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data); 439 static int vtruncbuf_bp_trunc(struct buf *bp, void *data); 440 static int vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data); 441 static int vtruncbuf_bp_metasync(struct buf *bp, void *data); 442 443 struct vtruncbuf_info { 444 struct vnode *vp; 445 off_t truncloffset; 446 int clean; 447 }; 448 449 int 450 vtruncbuf(struct vnode *vp, off_t length, int blksize) 451 { 452 struct vtruncbuf_info info; 453 const char *filename; 454 int count; 455 456 /* 457 * Round up to the *next* block, then destroy the buffers in question. 458 * Since we are only removing some of the buffers we must rely on the 459 * scan count to determine whether a loop is necessary. 460 */ 461 if ((count = (int)(length % blksize)) != 0) 462 info.truncloffset = length + (blksize - count); 463 else 464 info.truncloffset = length; 465 info.vp = vp; 466 467 lwkt_gettoken(&vp->v_token); 468 do { 469 info.clean = 1; 470 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 471 vtruncbuf_bp_trunc_cmp, 472 vtruncbuf_bp_trunc, &info); 473 info.clean = 0; 474 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 475 vtruncbuf_bp_trunc_cmp, 476 vtruncbuf_bp_trunc, &info); 477 } while(count); 478 479 /* 480 * For safety, fsync any remaining metadata if the file is not being 481 * truncated to 0. Since the metadata does not represent the entire 482 * dirty list we have to rely on the hit count to ensure that we get 483 * all of it. 484 */ 485 if (length > 0) { 486 do { 487 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 488 vtruncbuf_bp_metasync_cmp, 489 vtruncbuf_bp_metasync, &info); 490 } while (count); 491 } 492 493 /* 494 * Clean out any left over VM backing store. 495 * 496 * It is possible to have in-progress I/O from buffers that were 497 * not part of the truncation. This should not happen if we 498 * are truncating to 0-length. 499 */ 500 vnode_pager_setsize(vp, length); 501 bio_track_wait(&vp->v_track_write, 0, 0); 502 503 /* 504 * Debugging only 505 */ 506 spin_lock(&vp->v_spin); 507 filename = TAILQ_FIRST(&vp->v_namecache) ? 508 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"; 509 spin_unlock(&vp->v_spin); 510 511 /* 512 * Make sure no buffers were instantiated while we were trying 513 * to clean out the remaining VM pages. This could occur due 514 * to busy dirty VM pages being flushed out to disk. 515 */ 516 do { 517 info.clean = 1; 518 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 519 vtruncbuf_bp_trunc_cmp, 520 vtruncbuf_bp_trunc, &info); 521 info.clean = 0; 522 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 523 vtruncbuf_bp_trunc_cmp, 524 vtruncbuf_bp_trunc, &info); 525 if (count) { 526 kprintf("Warning: vtruncbuf(): Had to re-clean %d " 527 "left over buffers in %s\n", count, filename); 528 } 529 } while(count); 530 531 lwkt_reltoken(&vp->v_token); 532 533 return (0); 534 } 535 536 /* 537 * The callback buffer is beyond the new file EOF and must be destroyed. 538 * Note that the compare function must conform to the RB_SCAN's requirements. 539 */ 540 static 541 int 542 vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data) 543 { 544 struct vtruncbuf_info *info = data; 545 546 if (bp->b_loffset >= info->truncloffset) 547 return(0); 548 return(-1); 549 } 550 551 static 552 int 553 vtruncbuf_bp_trunc(struct buf *bp, void *data) 554 { 555 struct vtruncbuf_info *info = data; 556 557 /* 558 * Do not try to use a buffer we cannot immediately lock, but sleep 559 * anyway to prevent a livelock. The code will loop until all buffers 560 * can be acted upon. 561 * 562 * We must always revalidate the buffer after locking it to deal 563 * with MP races. 564 */ 565 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 566 atomic_add_int(&bp->b_refs, 1); 567 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 568 BUF_UNLOCK(bp); 569 atomic_subtract_int(&bp->b_refs, 1); 570 } else if ((info->clean && (bp->b_flags & B_DELWRI)) || 571 (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0) || 572 bp->b_vp != info->vp || 573 vtruncbuf_bp_trunc_cmp(bp, data)) { 574 BUF_UNLOCK(bp); 575 } else { 576 bremfree(bp); 577 bp->b_flags |= (B_INVAL | B_RELBUF | B_NOCACHE); 578 brelse(bp); 579 } 580 return(1); 581 } 582 583 /* 584 * Fsync all meta-data after truncating a file to be non-zero. Only metadata 585 * blocks (with a negative loffset) are scanned. 586 * Note that the compare function must conform to the RB_SCAN's requirements. 587 */ 588 static int 589 vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data __unused) 590 { 591 if (bp->b_loffset < 0) 592 return(0); 593 return(1); 594 } 595 596 static int 597 vtruncbuf_bp_metasync(struct buf *bp, void *data) 598 { 599 struct vtruncbuf_info *info = data; 600 601 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 602 atomic_add_int(&bp->b_refs, 1); 603 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 604 BUF_UNLOCK(bp); 605 atomic_subtract_int(&bp->b_refs, 1); 606 } else if ((bp->b_flags & B_DELWRI) == 0 || 607 bp->b_vp != info->vp || 608 vtruncbuf_bp_metasync_cmp(bp, data)) { 609 BUF_UNLOCK(bp); 610 } else { 611 bremfree(bp); 612 if (bp->b_vp == info->vp) 613 bawrite(bp); 614 else 615 bwrite(bp); 616 } 617 return(1); 618 } 619 620 /* 621 * vfsync - implements a multipass fsync on a file which understands 622 * dependancies and meta-data. The passed vnode must be locked. The 623 * waitfor argument may be MNT_WAIT or MNT_NOWAIT, or MNT_LAZY. 624 * 625 * When fsyncing data asynchronously just do one consolidated pass starting 626 * with the most negative block number. This may not get all the data due 627 * to dependancies. 628 * 629 * When fsyncing data synchronously do a data pass, then a metadata pass, 630 * then do additional data+metadata passes to try to get all the data out. 631 */ 632 static int vfsync_wait_output(struct vnode *vp, 633 int (*waitoutput)(struct vnode *, struct thread *)); 634 static int vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused); 635 static int vfsync_data_only_cmp(struct buf *bp, void *data); 636 static int vfsync_meta_only_cmp(struct buf *bp, void *data); 637 static int vfsync_lazy_range_cmp(struct buf *bp, void *data); 638 static int vfsync_bp(struct buf *bp, void *data); 639 640 struct vfsync_info { 641 struct vnode *vp; 642 int synchronous; 643 int syncdeps; 644 int lazycount; 645 int lazylimit; 646 int skippedbufs; 647 int (*checkdef)(struct buf *); 648 int (*cmpfunc)(struct buf *, void *); 649 }; 650 651 int 652 vfsync(struct vnode *vp, int waitfor, int passes, 653 int (*checkdef)(struct buf *), 654 int (*waitoutput)(struct vnode *, struct thread *)) 655 { 656 struct vfsync_info info; 657 int error; 658 659 bzero(&info, sizeof(info)); 660 info.vp = vp; 661 if ((info.checkdef = checkdef) == NULL) 662 info.syncdeps = 1; 663 664 lwkt_gettoken(&vp->v_token); 665 666 switch(waitfor) { 667 case MNT_LAZY | MNT_NOWAIT: 668 case MNT_LAZY: 669 /* 670 * Lazy (filesystem syncer typ) Asynchronous plus limit the 671 * number of data (not meta) pages we try to flush to 1MB. 672 * A non-zero return means that lazy limit was reached. 673 */ 674 info.lazylimit = 1024 * 1024; 675 info.syncdeps = 1; 676 info.cmpfunc = vfsync_lazy_range_cmp; 677 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 678 vfsync_lazy_range_cmp, vfsync_bp, &info); 679 info.cmpfunc = vfsync_meta_only_cmp; 680 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 681 vfsync_meta_only_cmp, vfsync_bp, &info); 682 if (error == 0) 683 vp->v_lazyw = 0; 684 else if (!RB_EMPTY(&vp->v_rbdirty_tree)) 685 vn_syncer_add(vp, 1); 686 error = 0; 687 break; 688 case MNT_NOWAIT: 689 /* 690 * Asynchronous. Do a data-only pass and a meta-only pass. 691 */ 692 info.syncdeps = 1; 693 info.cmpfunc = vfsync_data_only_cmp; 694 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 695 vfsync_bp, &info); 696 info.cmpfunc = vfsync_meta_only_cmp; 697 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_meta_only_cmp, 698 vfsync_bp, &info); 699 error = 0; 700 break; 701 default: 702 /* 703 * Synchronous. Do a data-only pass, then a meta-data+data 704 * pass, then additional integrated passes to try to get 705 * all the dependancies flushed. 706 */ 707 info.cmpfunc = vfsync_data_only_cmp; 708 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 709 vfsync_bp, &info); 710 error = vfsync_wait_output(vp, waitoutput); 711 if (error == 0) { 712 info.skippedbufs = 0; 713 info.cmpfunc = vfsync_dummy_cmp; 714 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 715 vfsync_bp, &info); 716 error = vfsync_wait_output(vp, waitoutput); 717 if (info.skippedbufs) { 718 kprintf("Warning: vfsync skipped %d dirty " 719 "bufs in pass2!\n", info.skippedbufs); 720 } 721 } 722 while (error == 0 && passes > 0 && 723 !RB_EMPTY(&vp->v_rbdirty_tree) 724 ) { 725 if (--passes == 0) { 726 info.synchronous = 1; 727 info.syncdeps = 1; 728 } 729 info.cmpfunc = vfsync_dummy_cmp; 730 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 731 vfsync_bp, &info); 732 if (error < 0) 733 error = -error; 734 info.syncdeps = 1; 735 if (error == 0) 736 error = vfsync_wait_output(vp, waitoutput); 737 } 738 break; 739 } 740 lwkt_reltoken(&vp->v_token); 741 return(error); 742 } 743 744 static int 745 vfsync_wait_output(struct vnode *vp, 746 int (*waitoutput)(struct vnode *, struct thread *)) 747 { 748 int error; 749 750 error = bio_track_wait(&vp->v_track_write, 0, 0); 751 if (waitoutput) 752 error = waitoutput(vp, curthread); 753 return(error); 754 } 755 756 static int 757 vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused) 758 { 759 return(0); 760 } 761 762 static int 763 vfsync_data_only_cmp(struct buf *bp, void *data) 764 { 765 if (bp->b_loffset < 0) 766 return(-1); 767 return(0); 768 } 769 770 static int 771 vfsync_meta_only_cmp(struct buf *bp, void *data) 772 { 773 if (bp->b_loffset < 0) 774 return(0); 775 return(1); 776 } 777 778 static int 779 vfsync_lazy_range_cmp(struct buf *bp, void *data) 780 { 781 struct vfsync_info *info = data; 782 783 if (bp->b_loffset < info->vp->v_lazyw) 784 return(-1); 785 return(0); 786 } 787 788 static int 789 vfsync_bp(struct buf *bp, void *data) 790 { 791 struct vfsync_info *info = data; 792 struct vnode *vp = info->vp; 793 int error; 794 795 /* 796 * Ignore buffers that we cannot immediately lock. 797 */ 798 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 799 ++info->skippedbufs; 800 return(0); 801 } 802 803 /* 804 * We must revalidate the buffer after locking. 805 */ 806 if ((bp->b_flags & B_DELWRI) == 0 || 807 bp->b_vp != info->vp || 808 info->cmpfunc(bp, data)) { 809 BUF_UNLOCK(bp); 810 return(0); 811 } 812 813 /* 814 * If syncdeps is not set we do not try to write buffers which have 815 * dependancies. 816 */ 817 if (!info->synchronous && info->syncdeps == 0 && info->checkdef(bp)) { 818 BUF_UNLOCK(bp); 819 return(0); 820 } 821 822 /* 823 * B_NEEDCOMMIT (primarily used by NFS) is a state where the buffer 824 * has been written but an additional handshake with the device 825 * is required before we can dispose of the buffer. We have no idea 826 * how to do this so we have to skip these buffers. 827 */ 828 if (bp->b_flags & B_NEEDCOMMIT) { 829 BUF_UNLOCK(bp); 830 return(0); 831 } 832 833 /* 834 * Ask bioops if it is ok to sync. If not the VFS may have 835 * set B_LOCKED so we have to cycle the buffer. 836 */ 837 if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) { 838 bremfree(bp); 839 brelse(bp); 840 return(0); 841 } 842 843 if (info->synchronous) { 844 /* 845 * Synchronous flushing. An error may be returned. 846 */ 847 bremfree(bp); 848 error = bwrite(bp); 849 } else { 850 /* 851 * Asynchronous flushing. A negative return value simply 852 * stops the scan and is not considered an error. We use 853 * this to support limited MNT_LAZY flushes. 854 */ 855 vp->v_lazyw = bp->b_loffset; 856 bremfree(bp); 857 info->lazycount += cluster_awrite(bp); 858 waitrunningbufspace(); 859 vm_wait_nominal(); 860 if (info->lazylimit && info->lazycount >= info->lazylimit) 861 error = 1; 862 else 863 error = 0; 864 } 865 return(-error); 866 } 867 868 /* 869 * Associate a buffer with a vnode. 870 * 871 * MPSAFE 872 */ 873 int 874 bgetvp(struct vnode *vp, struct buf *bp, int testsize) 875 { 876 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 877 KKASSERT((bp->b_flags & (B_HASHED|B_DELWRI|B_VNCLEAN|B_VNDIRTY)) == 0); 878 879 /* 880 * Insert onto list for new vnode. 881 */ 882 lwkt_gettoken(&vp->v_token); 883 884 if (buf_rb_hash_RB_INSERT(&vp->v_rbhash_tree, bp)) { 885 lwkt_reltoken(&vp->v_token); 886 return (EEXIST); 887 } 888 889 /* 890 * Diagnostics (mainly for HAMMER debugging). Check for 891 * overlapping buffers. 892 */ 893 if (check_buf_overlap) { 894 struct buf *bx; 895 bx = buf_rb_hash_RB_PREV(bp); 896 if (bx) { 897 if (bx->b_loffset + bx->b_bufsize > bp->b_loffset) { 898 kprintf("bgetvp: overlapl %016jx/%d %016jx " 899 "bx %p bp %p\n", 900 (intmax_t)bx->b_loffset, 901 bx->b_bufsize, 902 (intmax_t)bp->b_loffset, 903 bx, bp); 904 if (check_buf_overlap > 1) 905 panic("bgetvp - overlapping buffer"); 906 } 907 } 908 bx = buf_rb_hash_RB_NEXT(bp); 909 if (bx) { 910 if (bp->b_loffset + testsize > bx->b_loffset) { 911 kprintf("bgetvp: overlapr %016jx/%d %016jx " 912 "bp %p bx %p\n", 913 (intmax_t)bp->b_loffset, 914 testsize, 915 (intmax_t)bx->b_loffset, 916 bp, bx); 917 if (check_buf_overlap > 1) 918 panic("bgetvp - overlapping buffer"); 919 } 920 } 921 } 922 bp->b_vp = vp; 923 bp->b_flags |= B_HASHED; 924 bp->b_flags |= B_VNCLEAN; 925 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) 926 panic("reassignbuf: dup lblk/clean vp %p bp %p", vp, bp); 927 /*vhold(vp);*/ 928 lwkt_reltoken(&vp->v_token); 929 return(0); 930 } 931 932 /* 933 * Disassociate a buffer from a vnode. 934 * 935 * MPSAFE 936 */ 937 void 938 brelvp(struct buf *bp) 939 { 940 struct vnode *vp; 941 942 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 943 944 /* 945 * Delete from old vnode list, if on one. 946 */ 947 vp = bp->b_vp; 948 lwkt_gettoken(&vp->v_token); 949 if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN)) { 950 if (bp->b_flags & B_VNDIRTY) 951 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 952 else 953 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 954 bp->b_flags &= ~(B_VNDIRTY | B_VNCLEAN); 955 } 956 if (bp->b_flags & B_HASHED) { 957 buf_rb_hash_RB_REMOVE(&vp->v_rbhash_tree, bp); 958 bp->b_flags &= ~B_HASHED; 959 } 960 if ((vp->v_flag & VONWORKLST) && RB_EMPTY(&vp->v_rbdirty_tree)) 961 vn_syncer_remove(vp); 962 bp->b_vp = NULL; 963 964 lwkt_reltoken(&vp->v_token); 965 966 /*vdrop(vp);*/ 967 } 968 969 /* 970 * Reassign the buffer to the proper clean/dirty list based on B_DELWRI. 971 * This routine is called when the state of the B_DELWRI bit is changed. 972 * 973 * Must be called with vp->v_token held. 974 * MPSAFE 975 */ 976 void 977 reassignbuf(struct buf *bp) 978 { 979 struct vnode *vp = bp->b_vp; 980 int delay; 981 982 ASSERT_LWKT_TOKEN_HELD(&vp->v_token); 983 ++reassignbufcalls; 984 985 /* 986 * B_PAGING flagged buffers cannot be reassigned because their vp 987 * is not fully linked in. 988 */ 989 if (bp->b_flags & B_PAGING) 990 panic("cannot reassign paging buffer"); 991 992 if (bp->b_flags & B_DELWRI) { 993 /* 994 * Move to the dirty list, add the vnode to the worklist 995 */ 996 if (bp->b_flags & B_VNCLEAN) { 997 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 998 bp->b_flags &= ~B_VNCLEAN; 999 } 1000 if ((bp->b_flags & B_VNDIRTY) == 0) { 1001 if (buf_rb_tree_RB_INSERT(&vp->v_rbdirty_tree, bp)) { 1002 panic("reassignbuf: dup lblk vp %p bp %p", 1003 vp, bp); 1004 } 1005 bp->b_flags |= B_VNDIRTY; 1006 } 1007 if ((vp->v_flag & VONWORKLST) == 0) { 1008 switch (vp->v_type) { 1009 case VDIR: 1010 delay = dirdelay; 1011 break; 1012 case VCHR: 1013 case VBLK: 1014 if (vp->v_rdev && 1015 vp->v_rdev->si_mountpoint != NULL) { 1016 delay = metadelay; 1017 break; 1018 } 1019 /* fall through */ 1020 default: 1021 delay = filedelay; 1022 } 1023 vn_syncer_add(vp, delay); 1024 } 1025 } else { 1026 /* 1027 * Move to the clean list, remove the vnode from the worklist 1028 * if no dirty blocks remain. 1029 */ 1030 if (bp->b_flags & B_VNDIRTY) { 1031 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 1032 bp->b_flags &= ~B_VNDIRTY; 1033 } 1034 if ((bp->b_flags & B_VNCLEAN) == 0) { 1035 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) { 1036 panic("reassignbuf: dup lblk vp %p bp %p", 1037 vp, bp); 1038 } 1039 bp->b_flags |= B_VNCLEAN; 1040 } 1041 if ((vp->v_flag & VONWORKLST) && 1042 RB_EMPTY(&vp->v_rbdirty_tree)) { 1043 vn_syncer_remove(vp); 1044 } 1045 } 1046 } 1047 1048 /* 1049 * Create a vnode for a block device. Used for mounting the root file 1050 * system. 1051 * 1052 * A vref()'d vnode is returned. 1053 */ 1054 extern struct vop_ops *devfs_vnode_dev_vops_p; 1055 int 1056 bdevvp(cdev_t dev, struct vnode **vpp) 1057 { 1058 struct vnode *vp; 1059 struct vnode *nvp; 1060 int error; 1061 1062 if (dev == NULL) { 1063 *vpp = NULLVP; 1064 return (ENXIO); 1065 } 1066 error = getspecialvnode(VT_NON, NULL, &devfs_vnode_dev_vops_p, 1067 &nvp, 0, 0); 1068 if (error) { 1069 *vpp = NULLVP; 1070 return (error); 1071 } 1072 vp = nvp; 1073 vp->v_type = VCHR; 1074 #if 0 1075 vp->v_rdev = dev; 1076 #endif 1077 v_associate_rdev(vp, dev); 1078 vp->v_umajor = dev->si_umajor; 1079 vp->v_uminor = dev->si_uminor; 1080 vx_unlock(vp); 1081 *vpp = vp; 1082 return (0); 1083 } 1084 1085 int 1086 v_associate_rdev(struct vnode *vp, cdev_t dev) 1087 { 1088 if (dev == NULL) 1089 return(ENXIO); 1090 if (dev_is_good(dev) == 0) 1091 return(ENXIO); 1092 KKASSERT(vp->v_rdev == NULL); 1093 vp->v_rdev = reference_dev(dev); 1094 lwkt_gettoken(&spechash_token); 1095 SLIST_INSERT_HEAD(&dev->si_hlist, vp, v_cdevnext); 1096 lwkt_reltoken(&spechash_token); 1097 return(0); 1098 } 1099 1100 void 1101 v_release_rdev(struct vnode *vp) 1102 { 1103 cdev_t dev; 1104 1105 if ((dev = vp->v_rdev) != NULL) { 1106 lwkt_gettoken(&spechash_token); 1107 SLIST_REMOVE(&dev->si_hlist, vp, vnode, v_cdevnext); 1108 vp->v_rdev = NULL; 1109 release_dev(dev); 1110 lwkt_reltoken(&spechash_token); 1111 } 1112 } 1113 1114 /* 1115 * Add a vnode to the alias list hung off the cdev_t. We only associate 1116 * the device number with the vnode. The actual device is not associated 1117 * until the vnode is opened (usually in spec_open()), and will be 1118 * disassociated on last close. 1119 */ 1120 void 1121 addaliasu(struct vnode *nvp, int x, int y) 1122 { 1123 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1124 panic("addaliasu on non-special vnode"); 1125 nvp->v_umajor = x; 1126 nvp->v_uminor = y; 1127 } 1128 1129 /* 1130 * Simple call that a filesystem can make to try to get rid of a 1131 * vnode. It will fail if anyone is referencing the vnode (including 1132 * the caller). 1133 * 1134 * The filesystem can check whether its in-memory inode structure still 1135 * references the vp on return. 1136 */ 1137 void 1138 vclean_unlocked(struct vnode *vp) 1139 { 1140 vx_get(vp); 1141 if (sysref_isactive(&vp->v_sysref) == 0) 1142 vgone_vxlocked(vp); 1143 vx_put(vp); 1144 } 1145 1146 /* 1147 * Disassociate a vnode from its underlying filesystem. 1148 * 1149 * The vnode must be VX locked and referenced. In all normal situations 1150 * there are no active references. If vclean_vxlocked() is called while 1151 * there are active references, the vnode is being ripped out and we have 1152 * to call VOP_CLOSE() as appropriate before we can reclaim it. 1153 */ 1154 void 1155 vclean_vxlocked(struct vnode *vp, int flags) 1156 { 1157 int active; 1158 int n; 1159 vm_object_t object; 1160 struct namecache *ncp; 1161 1162 /* 1163 * If the vnode has already been reclaimed we have nothing to do. 1164 */ 1165 if (vp->v_flag & VRECLAIMED) 1166 return; 1167 vsetflags(vp, VRECLAIMED); 1168 1169 if (verbose_reclaims) { 1170 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) 1171 kprintf("Debug: reclaim %p %s\n", vp, ncp->nc_name); 1172 } 1173 1174 /* 1175 * Scrap the vfs cache 1176 */ 1177 while (cache_inval_vp(vp, 0) != 0) { 1178 kprintf("Warning: vnode %p clean/cache_resolution " 1179 "race detected\n", vp); 1180 tsleep(vp, 0, "vclninv", 2); 1181 } 1182 1183 /* 1184 * Check to see if the vnode is in use. If so we have to reference it 1185 * before we clean it out so that its count cannot fall to zero and 1186 * generate a race against ourselves to recycle it. 1187 */ 1188 active = sysref_isactive(&vp->v_sysref); 1189 1190 /* 1191 * Clean out any buffers associated with the vnode and destroy its 1192 * object, if it has one. 1193 */ 1194 vinvalbuf(vp, V_SAVE, 0, 0); 1195 1196 /* 1197 * If purging an active vnode (typically during a forced unmount 1198 * or reboot), it must be closed and deactivated before being 1199 * reclaimed. This isn't really all that safe, but what can 1200 * we do? XXX. 1201 * 1202 * Note that neither of these routines unlocks the vnode. 1203 */ 1204 if (active && (flags & DOCLOSE)) { 1205 while ((n = vp->v_opencount) != 0) { 1206 if (vp->v_writecount) 1207 VOP_CLOSE(vp, FWRITE|FNONBLOCK); 1208 else 1209 VOP_CLOSE(vp, FNONBLOCK); 1210 if (vp->v_opencount == n) { 1211 kprintf("Warning: unable to force-close" 1212 " vnode %p\n", vp); 1213 break; 1214 } 1215 } 1216 } 1217 1218 /* 1219 * If the vnode has not been deactivated, deactivated it. Deactivation 1220 * can create new buffers and VM pages so we have to call vinvalbuf() 1221 * again to make sure they all get flushed. 1222 * 1223 * This can occur if a file with a link count of 0 needs to be 1224 * truncated. 1225 * 1226 * If the vnode is already dead don't try to deactivate it. 1227 */ 1228 if ((vp->v_flag & VINACTIVE) == 0) { 1229 vsetflags(vp, VINACTIVE); 1230 if (vp->v_mount) 1231 VOP_INACTIVE(vp); 1232 vinvalbuf(vp, V_SAVE, 0, 0); 1233 } 1234 1235 /* 1236 * If the vnode has an object, destroy it. 1237 */ 1238 while ((object = vp->v_object) != NULL) { 1239 vm_object_hold(object); 1240 if (object == vp->v_object) 1241 break; 1242 vm_object_drop(object); 1243 } 1244 1245 if (object != NULL) { 1246 if (object->ref_count == 0) { 1247 if ((object->flags & OBJ_DEAD) == 0) 1248 vm_object_terminate(object); 1249 vm_object_drop(object); 1250 vclrflags(vp, VOBJBUF); 1251 } else { 1252 vm_pager_deallocate(object); 1253 vclrflags(vp, VOBJBUF); 1254 vm_object_drop(object); 1255 } 1256 } 1257 KKASSERT((vp->v_flag & VOBJBUF) == 0); 1258 1259 /* 1260 * Reclaim the vnode if not already dead. 1261 */ 1262 if (vp->v_mount && VOP_RECLAIM(vp)) 1263 panic("vclean: cannot reclaim"); 1264 1265 /* 1266 * Done with purge, notify sleepers of the grim news. 1267 */ 1268 vp->v_ops = &dead_vnode_vops_p; 1269 vn_gone(vp); 1270 vp->v_tag = VT_NON; 1271 1272 /* 1273 * If we are destroying an active vnode, reactivate it now that 1274 * we have reassociated it with deadfs. This prevents the system 1275 * from crashing on the vnode due to it being unexpectedly marked 1276 * as inactive or reclaimed. 1277 */ 1278 if (active && (flags & DOCLOSE)) { 1279 vclrflags(vp, VINACTIVE | VRECLAIMED); 1280 } 1281 } 1282 1283 /* 1284 * Eliminate all activity associated with the requested vnode 1285 * and with all vnodes aliased to the requested vnode. 1286 * 1287 * The vnode must be referenced but should not be locked. 1288 */ 1289 int 1290 vrevoke(struct vnode *vp, struct ucred *cred) 1291 { 1292 struct vnode *vq; 1293 struct vnode *vqn; 1294 cdev_t dev; 1295 int error; 1296 1297 /* 1298 * If the vnode has a device association, scrap all vnodes associated 1299 * with the device. Don't let the device disappear on us while we 1300 * are scrapping the vnodes. 1301 * 1302 * The passed vp will probably show up in the list, do not VX lock 1303 * it twice! 1304 * 1305 * Releasing the vnode's rdev here can mess up specfs's call to 1306 * device close, so don't do it. The vnode has been disassociated 1307 * and the device will be closed after the last ref on the related 1308 * fp goes away (if not still open by e.g. the kernel). 1309 */ 1310 if (vp->v_type != VCHR) { 1311 error = fdrevoke(vp, DTYPE_VNODE, cred); 1312 return (error); 1313 } 1314 if ((dev = vp->v_rdev) == NULL) { 1315 return(0); 1316 } 1317 reference_dev(dev); 1318 lwkt_gettoken(&spechash_token); 1319 1320 restart: 1321 vqn = SLIST_FIRST(&dev->si_hlist); 1322 if (vqn) 1323 vhold(vqn); 1324 while ((vq = vqn) != NULL) { 1325 if (sysref_isactive(&vq->v_sysref)) { 1326 vref(vq); 1327 fdrevoke(vq, DTYPE_VNODE, cred); 1328 /*v_release_rdev(vq);*/ 1329 vrele(vq); 1330 if (vq->v_rdev != dev) { 1331 vdrop(vq); 1332 goto restart; 1333 } 1334 } 1335 vqn = SLIST_NEXT(vq, v_cdevnext); 1336 if (vqn) 1337 vhold(vqn); 1338 vdrop(vq); 1339 } 1340 lwkt_reltoken(&spechash_token); 1341 dev_drevoke(dev); 1342 release_dev(dev); 1343 return (0); 1344 } 1345 1346 /* 1347 * This is called when the object underlying a vnode is being destroyed, 1348 * such as in a remove(). Try to recycle the vnode immediately if the 1349 * only active reference is our reference. 1350 * 1351 * Directory vnodes in the namecache with children cannot be immediately 1352 * recycled because numerous VOP_N*() ops require them to be stable. 1353 * 1354 * To avoid recursive recycling from VOP_INACTIVE implemenetations this 1355 * function is a NOP if VRECLAIMED is already set. 1356 */ 1357 int 1358 vrecycle(struct vnode *vp) 1359 { 1360 if (vp->v_sysref.refcnt <= 1 && (vp->v_flag & VRECLAIMED) == 0) { 1361 if (cache_inval_vp_nonblock(vp)) 1362 return(0); 1363 vgone_vxlocked(vp); 1364 return (1); 1365 } 1366 return (0); 1367 } 1368 1369 /* 1370 * Return the maximum I/O size allowed for strategy calls on VP. 1371 * 1372 * If vp is VCHR or VBLK we dive the device, otherwise we use 1373 * the vp's mount info. 1374 * 1375 * The returned value is clamped at MAXPHYS as most callers cannot use 1376 * buffers larger than that size. 1377 */ 1378 int 1379 vmaxiosize(struct vnode *vp) 1380 { 1381 int maxiosize; 1382 1383 if (vp->v_type == VBLK || vp->v_type == VCHR) 1384 maxiosize = vp->v_rdev->si_iosize_max; 1385 else 1386 maxiosize = vp->v_mount->mnt_iosize_max; 1387 1388 if (maxiosize > MAXPHYS) 1389 maxiosize = MAXPHYS; 1390 return (maxiosize); 1391 } 1392 1393 /* 1394 * Eliminate all activity associated with a vnode in preparation for reuse. 1395 * 1396 * The vnode must be VX locked and refd and will remain VX locked and refd 1397 * on return. This routine may be called with the vnode in any state, as 1398 * long as it is VX locked. The vnode will be cleaned out and marked 1399 * VRECLAIMED but will not actually be reused until all existing refs and 1400 * holds go away. 1401 * 1402 * NOTE: This routine may be called on a vnode which has not yet been 1403 * already been deactivated (VOP_INACTIVE), or on a vnode which has 1404 * already been reclaimed. 1405 * 1406 * This routine is not responsible for placing us back on the freelist. 1407 * Instead, it happens automatically when the caller releases the VX lock 1408 * (assuming there aren't any other references). 1409 */ 1410 void 1411 vgone_vxlocked(struct vnode *vp) 1412 { 1413 /* 1414 * assert that the VX lock is held. This is an absolute requirement 1415 * now for vgone_vxlocked() to be called. 1416 */ 1417 KKASSERT(vp->v_lock.lk_exclusivecount == 1); 1418 1419 /* 1420 * Clean out the filesystem specific data and set the VRECLAIMED 1421 * bit. Also deactivate the vnode if necessary. 1422 */ 1423 vclean_vxlocked(vp, DOCLOSE); 1424 1425 /* 1426 * Delete from old mount point vnode list, if on one. 1427 */ 1428 if (vp->v_mount != NULL) { 1429 KKASSERT(vp->v_data == NULL); 1430 insmntque(vp, NULL); 1431 } 1432 1433 /* 1434 * If special device, remove it from special device alias list 1435 * if it is on one. This should normally only occur if a vnode is 1436 * being revoked as the device should otherwise have been released 1437 * naturally. 1438 */ 1439 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) { 1440 v_release_rdev(vp); 1441 } 1442 1443 /* 1444 * Set us to VBAD 1445 */ 1446 vp->v_type = VBAD; 1447 } 1448 1449 /* 1450 * Lookup a vnode by device number. 1451 * 1452 * Returns non-zero and *vpp set to a vref'd vnode on success. 1453 * Returns zero on failure. 1454 */ 1455 int 1456 vfinddev(cdev_t dev, enum vtype type, struct vnode **vpp) 1457 { 1458 struct vnode *vp; 1459 1460 lwkt_gettoken(&spechash_token); 1461 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1462 if (type == vp->v_type) { 1463 *vpp = vp; 1464 vref(vp); 1465 lwkt_reltoken(&spechash_token); 1466 return (1); 1467 } 1468 } 1469 lwkt_reltoken(&spechash_token); 1470 return (0); 1471 } 1472 1473 /* 1474 * Calculate the total number of references to a special device. This 1475 * routine may only be called for VBLK and VCHR vnodes since v_rdev is 1476 * an overloaded field. Since udev2dev can now return NULL, we have 1477 * to check for a NULL v_rdev. 1478 */ 1479 int 1480 count_dev(cdev_t dev) 1481 { 1482 struct vnode *vp; 1483 int count = 0; 1484 1485 if (SLIST_FIRST(&dev->si_hlist)) { 1486 lwkt_gettoken(&spechash_token); 1487 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1488 count += vp->v_opencount; 1489 } 1490 lwkt_reltoken(&spechash_token); 1491 } 1492 return(count); 1493 } 1494 1495 int 1496 vcount(struct vnode *vp) 1497 { 1498 if (vp->v_rdev == NULL) 1499 return(0); 1500 return(count_dev(vp->v_rdev)); 1501 } 1502 1503 /* 1504 * Initialize VMIO for a vnode. This routine MUST be called before a 1505 * VFS can issue buffer cache ops on a vnode. It is typically called 1506 * when a vnode is initialized from its inode. 1507 */ 1508 int 1509 vinitvmio(struct vnode *vp, off_t filesize, int blksize, int boff) 1510 { 1511 vm_object_t object; 1512 int error = 0; 1513 1514 retry: 1515 while ((object = vp->v_object) != NULL) { 1516 vm_object_hold(object); 1517 if (object == vp->v_object) 1518 break; 1519 vm_object_drop(object); 1520 } 1521 1522 if (object == NULL) { 1523 object = vnode_pager_alloc(vp, filesize, 0, 0, blksize, boff); 1524 1525 /* 1526 * Dereference the reference we just created. This assumes 1527 * that the object is associated with the vp. 1528 */ 1529 vm_object_hold(object); 1530 object->ref_count--; 1531 vrele(vp); 1532 } else { 1533 if (object->flags & OBJ_DEAD) { 1534 vn_unlock(vp); 1535 if (vp->v_object == object) 1536 vm_object_dead_sleep(object, "vodead"); 1537 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1538 vm_object_drop(object); 1539 goto retry; 1540 } 1541 } 1542 KASSERT(vp->v_object != NULL, ("vinitvmio: NULL object")); 1543 vsetflags(vp, VOBJBUF); 1544 vm_object_drop(object); 1545 1546 return (error); 1547 } 1548 1549 1550 /* 1551 * Print out a description of a vnode. 1552 */ 1553 static char *typename[] = 1554 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1555 1556 void 1557 vprint(char *label, struct vnode *vp) 1558 { 1559 char buf[96]; 1560 1561 if (label != NULL) 1562 kprintf("%s: %p: ", label, (void *)vp); 1563 else 1564 kprintf("%p: ", (void *)vp); 1565 kprintf("type %s, sysrefs %d, writecount %d, holdcnt %d,", 1566 typename[vp->v_type], 1567 vp->v_sysref.refcnt, vp->v_writecount, vp->v_auxrefs); 1568 buf[0] = '\0'; 1569 if (vp->v_flag & VROOT) 1570 strcat(buf, "|VROOT"); 1571 if (vp->v_flag & VPFSROOT) 1572 strcat(buf, "|VPFSROOT"); 1573 if (vp->v_flag & VTEXT) 1574 strcat(buf, "|VTEXT"); 1575 if (vp->v_flag & VSYSTEM) 1576 strcat(buf, "|VSYSTEM"); 1577 if (vp->v_flag & VFREE) 1578 strcat(buf, "|VFREE"); 1579 if (vp->v_flag & VOBJBUF) 1580 strcat(buf, "|VOBJBUF"); 1581 if (buf[0] != '\0') 1582 kprintf(" flags (%s)", &buf[1]); 1583 if (vp->v_data == NULL) { 1584 kprintf("\n"); 1585 } else { 1586 kprintf("\n\t"); 1587 VOP_PRINT(vp); 1588 } 1589 } 1590 1591 /* 1592 * Do the usual access checking. 1593 * file_mode, uid and gid are from the vnode in question, 1594 * while acc_mode and cred are from the VOP_ACCESS parameter list 1595 */ 1596 int 1597 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1598 mode_t acc_mode, struct ucred *cred) 1599 { 1600 mode_t mask; 1601 int ismember; 1602 1603 /* 1604 * Super-user always gets read/write access, but execute access depends 1605 * on at least one execute bit being set. 1606 */ 1607 if (priv_check_cred(cred, PRIV_ROOT, 0) == 0) { 1608 if ((acc_mode & VEXEC) && type != VDIR && 1609 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1610 return (EACCES); 1611 return (0); 1612 } 1613 1614 mask = 0; 1615 1616 /* Otherwise, check the owner. */ 1617 if (cred->cr_uid == uid) { 1618 if (acc_mode & VEXEC) 1619 mask |= S_IXUSR; 1620 if (acc_mode & VREAD) 1621 mask |= S_IRUSR; 1622 if (acc_mode & VWRITE) 1623 mask |= S_IWUSR; 1624 return ((file_mode & mask) == mask ? 0 : EACCES); 1625 } 1626 1627 /* Otherwise, check the groups. */ 1628 ismember = groupmember(gid, cred); 1629 if (cred->cr_svgid == gid || ismember) { 1630 if (acc_mode & VEXEC) 1631 mask |= S_IXGRP; 1632 if (acc_mode & VREAD) 1633 mask |= S_IRGRP; 1634 if (acc_mode & VWRITE) 1635 mask |= S_IWGRP; 1636 return ((file_mode & mask) == mask ? 0 : EACCES); 1637 } 1638 1639 /* Otherwise, check everyone else. */ 1640 if (acc_mode & VEXEC) 1641 mask |= S_IXOTH; 1642 if (acc_mode & VREAD) 1643 mask |= S_IROTH; 1644 if (acc_mode & VWRITE) 1645 mask |= S_IWOTH; 1646 return ((file_mode & mask) == mask ? 0 : EACCES); 1647 } 1648 1649 #ifdef DDB 1650 #include <ddb/ddb.h> 1651 1652 static int db_show_locked_vnodes(struct mount *mp, void *data); 1653 1654 /* 1655 * List all of the locked vnodes in the system. 1656 * Called when debugging the kernel. 1657 */ 1658 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 1659 { 1660 kprintf("Locked vnodes\n"); 1661 mountlist_scan(db_show_locked_vnodes, NULL, 1662 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1663 } 1664 1665 static int 1666 db_show_locked_vnodes(struct mount *mp, void *data __unused) 1667 { 1668 struct vnode *vp; 1669 1670 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 1671 if (vn_islocked(vp)) 1672 vprint(NULL, vp); 1673 } 1674 return(0); 1675 } 1676 #endif 1677 1678 /* 1679 * Top level filesystem related information gathering. 1680 */ 1681 static int sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS); 1682 1683 static int 1684 vfs_sysctl(SYSCTL_HANDLER_ARGS) 1685 { 1686 int *name = (int *)arg1 - 1; /* XXX */ 1687 u_int namelen = arg2 + 1; /* XXX */ 1688 struct vfsconf *vfsp; 1689 int maxtypenum; 1690 1691 #if 1 || defined(COMPAT_PRELITE2) 1692 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 1693 if (namelen == 1) 1694 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 1695 #endif 1696 1697 #ifdef notyet 1698 /* all sysctl names at this level are at least name and field */ 1699 if (namelen < 2) 1700 return (ENOTDIR); /* overloaded */ 1701 if (name[0] != VFS_GENERIC) { 1702 vfsp = vfsconf_find_by_typenum(name[0]); 1703 if (vfsp == NULL) 1704 return (EOPNOTSUPP); 1705 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1706 oldp, oldlenp, newp, newlen, p)); 1707 } 1708 #endif 1709 switch (name[1]) { 1710 case VFS_MAXTYPENUM: 1711 if (namelen != 2) 1712 return (ENOTDIR); 1713 maxtypenum = vfsconf_get_maxtypenum(); 1714 return (SYSCTL_OUT(req, &maxtypenum, sizeof(maxtypenum))); 1715 case VFS_CONF: 1716 if (namelen != 3) 1717 return (ENOTDIR); /* overloaded */ 1718 vfsp = vfsconf_find_by_typenum(name[2]); 1719 if (vfsp == NULL) 1720 return (EOPNOTSUPP); 1721 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 1722 } 1723 return (EOPNOTSUPP); 1724 } 1725 1726 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 1727 "Generic filesystem"); 1728 1729 #if 1 || defined(COMPAT_PRELITE2) 1730 1731 static int 1732 sysctl_ovfs_conf_iter(struct vfsconf *vfsp, void *data) 1733 { 1734 int error; 1735 struct ovfsconf ovfs; 1736 struct sysctl_req *req = (struct sysctl_req*) data; 1737 1738 bzero(&ovfs, sizeof(ovfs)); 1739 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 1740 strcpy(ovfs.vfc_name, vfsp->vfc_name); 1741 ovfs.vfc_index = vfsp->vfc_typenum; 1742 ovfs.vfc_refcount = vfsp->vfc_refcount; 1743 ovfs.vfc_flags = vfsp->vfc_flags; 1744 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 1745 if (error) 1746 return error; /* abort iteration with error code */ 1747 else 1748 return 0; /* continue iterating with next element */ 1749 } 1750 1751 static int 1752 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 1753 { 1754 return vfsconf_each(sysctl_ovfs_conf_iter, (void*)req); 1755 } 1756 1757 #endif /* 1 || COMPAT_PRELITE2 */ 1758 1759 /* 1760 * Check to see if a filesystem is mounted on a block device. 1761 */ 1762 int 1763 vfs_mountedon(struct vnode *vp) 1764 { 1765 cdev_t dev; 1766 1767 if ((dev = vp->v_rdev) == NULL) { 1768 /* if (vp->v_type != VBLK) 1769 dev = get_dev(vp->v_uminor, vp->v_umajor); */ 1770 } 1771 if (dev != NULL && dev->si_mountpoint) 1772 return (EBUSY); 1773 return (0); 1774 } 1775 1776 /* 1777 * Unmount all filesystems. The list is traversed in reverse order 1778 * of mounting to avoid dependencies. 1779 */ 1780 1781 static int vfs_umountall_callback(struct mount *mp, void *data); 1782 1783 void 1784 vfs_unmountall(void) 1785 { 1786 int count; 1787 1788 do { 1789 count = mountlist_scan(vfs_umountall_callback, 1790 NULL, MNTSCAN_REVERSE|MNTSCAN_NOBUSY); 1791 } while (count); 1792 } 1793 1794 static 1795 int 1796 vfs_umountall_callback(struct mount *mp, void *data) 1797 { 1798 int error; 1799 1800 error = dounmount(mp, MNT_FORCE); 1801 if (error) { 1802 mountlist_remove(mp); 1803 kprintf("unmount of filesystem mounted from %s failed (", 1804 mp->mnt_stat.f_mntfromname); 1805 if (error == EBUSY) 1806 kprintf("BUSY)\n"); 1807 else 1808 kprintf("%d)\n", error); 1809 } 1810 return(1); 1811 } 1812 1813 /* 1814 * Checks the mount flags for parameter mp and put the names comma-separated 1815 * into a string buffer buf with a size limit specified by len. 1816 * 1817 * It returns the number of bytes written into buf, and (*errorp) will be 1818 * set to 0, EINVAL (if passed length is 0), or ENOSPC (supplied buffer was 1819 * not large enough). The buffer will be 0-terminated if len was not 0. 1820 */ 1821 size_t 1822 vfs_flagstostr(int flags, const struct mountctl_opt *optp, 1823 char *buf, size_t len, int *errorp) 1824 { 1825 static const struct mountctl_opt optnames[] = { 1826 { MNT_ASYNC, "asynchronous" }, 1827 { MNT_EXPORTED, "NFS exported" }, 1828 { MNT_LOCAL, "local" }, 1829 { MNT_NOATIME, "noatime" }, 1830 { MNT_NODEV, "nodev" }, 1831 { MNT_NOEXEC, "noexec" }, 1832 { MNT_NOSUID, "nosuid" }, 1833 { MNT_NOSYMFOLLOW, "nosymfollow" }, 1834 { MNT_QUOTA, "with-quotas" }, 1835 { MNT_RDONLY, "read-only" }, 1836 { MNT_SYNCHRONOUS, "synchronous" }, 1837 { MNT_UNION, "union" }, 1838 { MNT_NOCLUSTERR, "noclusterr" }, 1839 { MNT_NOCLUSTERW, "noclusterw" }, 1840 { MNT_SUIDDIR, "suiddir" }, 1841 { MNT_SOFTDEP, "soft-updates" }, 1842 { MNT_IGNORE, "ignore" }, 1843 { 0, NULL} 1844 }; 1845 int bwritten; 1846 int bleft; 1847 int optlen; 1848 int actsize; 1849 1850 *errorp = 0; 1851 bwritten = 0; 1852 bleft = len - 1; /* leave room for trailing \0 */ 1853 1854 /* 1855 * Checks the size of the string. If it contains 1856 * any data, then we will append the new flags to 1857 * it. 1858 */ 1859 actsize = strlen(buf); 1860 if (actsize > 0) 1861 buf += actsize; 1862 1863 /* Default flags if no flags passed */ 1864 if (optp == NULL) 1865 optp = optnames; 1866 1867 if (bleft < 0) { /* degenerate case, 0-length buffer */ 1868 *errorp = EINVAL; 1869 return(0); 1870 } 1871 1872 for (; flags && optp->o_opt; ++optp) { 1873 if ((flags & optp->o_opt) == 0) 1874 continue; 1875 optlen = strlen(optp->o_name); 1876 if (bwritten || actsize > 0) { 1877 if (bleft < 2) { 1878 *errorp = ENOSPC; 1879 break; 1880 } 1881 buf[bwritten++] = ','; 1882 buf[bwritten++] = ' '; 1883 bleft -= 2; 1884 } 1885 if (bleft < optlen) { 1886 *errorp = ENOSPC; 1887 break; 1888 } 1889 bcopy(optp->o_name, buf + bwritten, optlen); 1890 bwritten += optlen; 1891 bleft -= optlen; 1892 flags &= ~optp->o_opt; 1893 } 1894 1895 /* 1896 * Space already reserved for trailing \0 1897 */ 1898 buf[bwritten] = 0; 1899 return (bwritten); 1900 } 1901 1902 /* 1903 * Build hash lists of net addresses and hang them off the mount point. 1904 * Called by ufs_mount() to set up the lists of export addresses. 1905 */ 1906 static int 1907 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1908 const struct export_args *argp) 1909 { 1910 struct netcred *np; 1911 struct radix_node_head *rnh; 1912 int i; 1913 struct radix_node *rn; 1914 struct sockaddr *saddr, *smask = NULL; 1915 struct domain *dom; 1916 int error; 1917 1918 if (argp->ex_addrlen == 0) { 1919 if (mp->mnt_flag & MNT_DEFEXPORTED) 1920 return (EPERM); 1921 np = &nep->ne_defexported; 1922 np->netc_exflags = argp->ex_flags; 1923 np->netc_anon = argp->ex_anon; 1924 np->netc_anon.cr_ref = 1; 1925 mp->mnt_flag |= MNT_DEFEXPORTED; 1926 return (0); 1927 } 1928 1929 if (argp->ex_addrlen < 0 || argp->ex_addrlen > MLEN) 1930 return (EINVAL); 1931 if (argp->ex_masklen < 0 || argp->ex_masklen > MLEN) 1932 return (EINVAL); 1933 1934 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1935 np = (struct netcred *) kmalloc(i, M_NETADDR, M_WAITOK | M_ZERO); 1936 saddr = (struct sockaddr *) (np + 1); 1937 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 1938 goto out; 1939 if (saddr->sa_len > argp->ex_addrlen) 1940 saddr->sa_len = argp->ex_addrlen; 1941 if (argp->ex_masklen) { 1942 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1943 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 1944 if (error) 1945 goto out; 1946 if (smask->sa_len > argp->ex_masklen) 1947 smask->sa_len = argp->ex_masklen; 1948 } 1949 i = saddr->sa_family; 1950 if ((rnh = nep->ne_rtable[i]) == NULL) { 1951 /* 1952 * Seems silly to initialize every AF when most are not used, 1953 * do so on demand here 1954 */ 1955 SLIST_FOREACH(dom, &domains, dom_next) 1956 if (dom->dom_family == i && dom->dom_rtattach) { 1957 dom->dom_rtattach((void **) &nep->ne_rtable[i], 1958 dom->dom_rtoffset); 1959 break; 1960 } 1961 if ((rnh = nep->ne_rtable[i]) == NULL) { 1962 error = ENOBUFS; 1963 goto out; 1964 } 1965 } 1966 rn = (*rnh->rnh_addaddr) ((char *) saddr, (char *) smask, rnh, 1967 np->netc_rnodes); 1968 if (rn == NULL || np != (struct netcred *) rn) { /* already exists */ 1969 error = EPERM; 1970 goto out; 1971 } 1972 np->netc_exflags = argp->ex_flags; 1973 np->netc_anon = argp->ex_anon; 1974 np->netc_anon.cr_ref = 1; 1975 return (0); 1976 out: 1977 kfree(np, M_NETADDR); 1978 return (error); 1979 } 1980 1981 /* ARGSUSED */ 1982 static int 1983 vfs_free_netcred(struct radix_node *rn, void *w) 1984 { 1985 struct radix_node_head *rnh = (struct radix_node_head *) w; 1986 1987 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 1988 kfree((caddr_t) rn, M_NETADDR); 1989 return (0); 1990 } 1991 1992 /* 1993 * Free the net address hash lists that are hanging off the mount points. 1994 */ 1995 static void 1996 vfs_free_addrlist(struct netexport *nep) 1997 { 1998 int i; 1999 struct radix_node_head *rnh; 2000 2001 for (i = 0; i <= AF_MAX; i++) 2002 if ((rnh = nep->ne_rtable[i])) { 2003 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 2004 (caddr_t) rnh); 2005 kfree((caddr_t) rnh, M_RTABLE); 2006 nep->ne_rtable[i] = 0; 2007 } 2008 } 2009 2010 int 2011 vfs_export(struct mount *mp, struct netexport *nep, 2012 const struct export_args *argp) 2013 { 2014 int error; 2015 2016 if (argp->ex_flags & MNT_DELEXPORT) { 2017 if (mp->mnt_flag & MNT_EXPUBLIC) { 2018 vfs_setpublicfs(NULL, NULL, NULL); 2019 mp->mnt_flag &= ~MNT_EXPUBLIC; 2020 } 2021 vfs_free_addrlist(nep); 2022 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2023 } 2024 if (argp->ex_flags & MNT_EXPORTED) { 2025 if (argp->ex_flags & MNT_EXPUBLIC) { 2026 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2027 return (error); 2028 mp->mnt_flag |= MNT_EXPUBLIC; 2029 } 2030 if ((error = vfs_hang_addrlist(mp, nep, argp))) 2031 return (error); 2032 mp->mnt_flag |= MNT_EXPORTED; 2033 } 2034 return (0); 2035 } 2036 2037 2038 /* 2039 * Set the publicly exported filesystem (WebNFS). Currently, only 2040 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2041 */ 2042 int 2043 vfs_setpublicfs(struct mount *mp, struct netexport *nep, 2044 const struct export_args *argp) 2045 { 2046 int error; 2047 struct vnode *rvp; 2048 char *cp; 2049 2050 /* 2051 * mp == NULL -> invalidate the current info, the FS is 2052 * no longer exported. May be called from either vfs_export 2053 * or unmount, so check if it hasn't already been done. 2054 */ 2055 if (mp == NULL) { 2056 if (nfs_pub.np_valid) { 2057 nfs_pub.np_valid = 0; 2058 if (nfs_pub.np_index != NULL) { 2059 kfree(nfs_pub.np_index, M_TEMP); 2060 nfs_pub.np_index = NULL; 2061 } 2062 } 2063 return (0); 2064 } 2065 2066 /* 2067 * Only one allowed at a time. 2068 */ 2069 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2070 return (EBUSY); 2071 2072 /* 2073 * Get real filehandle for root of exported FS. 2074 */ 2075 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 2076 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2077 2078 if ((error = VFS_ROOT(mp, &rvp))) 2079 return (error); 2080 2081 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2082 return (error); 2083 2084 vput(rvp); 2085 2086 /* 2087 * If an indexfile was specified, pull it in. 2088 */ 2089 if (argp->ex_indexfile != NULL) { 2090 int namelen; 2091 2092 error = vn_get_namelen(rvp, &namelen); 2093 if (error) 2094 return (error); 2095 nfs_pub.np_index = kmalloc(namelen, M_TEMP, M_WAITOK); 2096 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2097 namelen, NULL); 2098 if (!error) { 2099 /* 2100 * Check for illegal filenames. 2101 */ 2102 for (cp = nfs_pub.np_index; *cp; cp++) { 2103 if (*cp == '/') { 2104 error = EINVAL; 2105 break; 2106 } 2107 } 2108 } 2109 if (error) { 2110 kfree(nfs_pub.np_index, M_TEMP); 2111 return (error); 2112 } 2113 } 2114 2115 nfs_pub.np_mount = mp; 2116 nfs_pub.np_valid = 1; 2117 return (0); 2118 } 2119 2120 struct netcred * 2121 vfs_export_lookup(struct mount *mp, struct netexport *nep, 2122 struct sockaddr *nam) 2123 { 2124 struct netcred *np; 2125 struct radix_node_head *rnh; 2126 struct sockaddr *saddr; 2127 2128 np = NULL; 2129 if (mp->mnt_flag & MNT_EXPORTED) { 2130 /* 2131 * Lookup in the export list first. 2132 */ 2133 if (nam != NULL) { 2134 saddr = nam; 2135 rnh = nep->ne_rtable[saddr->sa_family]; 2136 if (rnh != NULL) { 2137 np = (struct netcred *) 2138 (*rnh->rnh_matchaddr)((char *)saddr, 2139 rnh); 2140 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2141 np = NULL; 2142 } 2143 } 2144 /* 2145 * If no address match, use the default if it exists. 2146 */ 2147 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2148 np = &nep->ne_defexported; 2149 } 2150 return (np); 2151 } 2152 2153 /* 2154 * perform msync on all vnodes under a mount point. The mount point must 2155 * be locked. This code is also responsible for lazy-freeing unreferenced 2156 * vnodes whos VM objects no longer contain pages. 2157 * 2158 * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state. 2159 * 2160 * NOTE: XXX VOP_PUTPAGES and friends requires that the vnode be locked, 2161 * but vnode_pager_putpages() doesn't lock the vnode. We have to do it 2162 * way up in this high level function. 2163 */ 2164 static int vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data); 2165 static int vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data); 2166 2167 void 2168 vfs_msync(struct mount *mp, int flags) 2169 { 2170 int vmsc_flags; 2171 2172 /* 2173 * tmpfs sets this flag to prevent msync(), sync, and the 2174 * filesystem periodic syncer from trying to flush VM pages 2175 * to swap. Only pure memory pressure flushes tmpfs VM pages 2176 * to swap. 2177 */ 2178 if (mp->mnt_kern_flag & MNTK_NOMSYNC) 2179 return; 2180 2181 /* 2182 * Ok, scan the vnodes for work. 2183 */ 2184 vmsc_flags = VMSC_GETVP; 2185 if (flags != MNT_WAIT) 2186 vmsc_flags |= VMSC_NOWAIT; 2187 vmntvnodescan(mp, vmsc_flags, 2188 vfs_msync_scan1, vfs_msync_scan2, 2189 (void *)(intptr_t)flags); 2190 } 2191 2192 /* 2193 * scan1 is a fast pre-check. There could be hundreds of thousands of 2194 * vnodes, we cannot afford to do anything heavy weight until we have a 2195 * fairly good indication that there is work to do. 2196 */ 2197 static 2198 int 2199 vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data) 2200 { 2201 int flags = (int)(intptr_t)data; 2202 2203 if ((vp->v_flag & VRECLAIMED) == 0) { 2204 if (vshouldmsync(vp)) 2205 return(0); /* call scan2 */ 2206 if ((mp->mnt_flag & MNT_RDONLY) == 0 && 2207 (vp->v_flag & VOBJDIRTY) && 2208 (flags == MNT_WAIT || vn_islocked(vp) == 0)) { 2209 return(0); /* call scan2 */ 2210 } 2211 } 2212 2213 /* 2214 * do not call scan2, continue the loop 2215 */ 2216 return(-1); 2217 } 2218 2219 /* 2220 * This callback is handed a locked vnode. 2221 */ 2222 static 2223 int 2224 vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data) 2225 { 2226 vm_object_t obj; 2227 int flags = (int)(intptr_t)data; 2228 2229 if (vp->v_flag & VRECLAIMED) 2230 return(0); 2231 2232 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (vp->v_flag & VOBJDIRTY)) { 2233 if ((obj = vp->v_object) != NULL) { 2234 vm_object_page_clean(obj, 0, 0, 2235 flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC); 2236 } 2237 } 2238 return(0); 2239 } 2240 2241 /* 2242 * Wake up anyone interested in vp because it is being revoked. 2243 */ 2244 void 2245 vn_gone(struct vnode *vp) 2246 { 2247 lwkt_gettoken(&vp->v_token); 2248 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, NOTE_REVOKE); 2249 lwkt_reltoken(&vp->v_token); 2250 } 2251 2252 /* 2253 * extract the cdev_t from a VBLK or VCHR. The vnode must have been opened 2254 * (or v_rdev might be NULL). 2255 */ 2256 cdev_t 2257 vn_todev(struct vnode *vp) 2258 { 2259 if (vp->v_type != VBLK && vp->v_type != VCHR) 2260 return (NULL); 2261 KKASSERT(vp->v_rdev != NULL); 2262 return (vp->v_rdev); 2263 } 2264 2265 /* 2266 * Check if vnode represents a disk device. The vnode does not need to be 2267 * opened. 2268 * 2269 * MPALMOSTSAFE 2270 */ 2271 int 2272 vn_isdisk(struct vnode *vp, int *errp) 2273 { 2274 cdev_t dev; 2275 2276 if (vp->v_type != VCHR) { 2277 if (errp != NULL) 2278 *errp = ENOTBLK; 2279 return (0); 2280 } 2281 2282 dev = vp->v_rdev; 2283 2284 if (dev == NULL) { 2285 if (errp != NULL) 2286 *errp = ENXIO; 2287 return (0); 2288 } 2289 if (dev_is_good(dev) == 0) { 2290 if (errp != NULL) 2291 *errp = ENXIO; 2292 return (0); 2293 } 2294 if ((dev_dflags(dev) & D_DISK) == 0) { 2295 if (errp != NULL) 2296 *errp = ENOTBLK; 2297 return (0); 2298 } 2299 if (errp != NULL) 2300 *errp = 0; 2301 return (1); 2302 } 2303 2304 int 2305 vn_get_namelen(struct vnode *vp, int *namelen) 2306 { 2307 int error; 2308 register_t retval[2]; 2309 2310 error = VOP_PATHCONF(vp, _PC_NAME_MAX, retval); 2311 if (error) 2312 return (error); 2313 *namelen = (int)retval[0]; 2314 return (0); 2315 } 2316 2317 int 2318 vop_write_dirent(int *error, struct uio *uio, ino_t d_ino, uint8_t d_type, 2319 uint16_t d_namlen, const char *d_name) 2320 { 2321 struct dirent *dp; 2322 size_t len; 2323 2324 len = _DIRENT_RECLEN(d_namlen); 2325 if (len > uio->uio_resid) 2326 return(1); 2327 2328 dp = kmalloc(len, M_TEMP, M_WAITOK | M_ZERO); 2329 2330 dp->d_ino = d_ino; 2331 dp->d_namlen = d_namlen; 2332 dp->d_type = d_type; 2333 bcopy(d_name, dp->d_name, d_namlen); 2334 2335 *error = uiomove((caddr_t)dp, len, uio); 2336 2337 kfree(dp, M_TEMP); 2338 2339 return(0); 2340 } 2341 2342 void 2343 vn_mark_atime(struct vnode *vp, struct thread *td) 2344 { 2345 struct proc *p = td->td_proc; 2346 struct ucred *cred = p ? p->p_ucred : proc0.p_ucred; 2347 2348 if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) { 2349 VOP_MARKATIME(vp, cred); 2350 } 2351 } 2352