1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 40 */ 41 42 /* 43 * External virtual filesystem routines 44 */ 45 #include "opt_ddb.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/buf.h> 50 #include <sys/conf.h> 51 #include <sys/dirent.h> 52 #include <sys/domain.h> 53 #include <sys/eventhandler.h> 54 #include <sys/fcntl.h> 55 #include <sys/file.h> 56 #include <sys/kernel.h> 57 #include <sys/kthread.h> 58 #include <sys/malloc.h> 59 #include <sys/mbuf.h> 60 #include <sys/mount.h> 61 #include <sys/priv.h> 62 #include <sys/proc.h> 63 #include <sys/reboot.h> 64 #include <sys/socket.h> 65 #include <sys/stat.h> 66 #include <sys/sysctl.h> 67 #include <sys/syslog.h> 68 #include <sys/unistd.h> 69 #include <sys/vmmeter.h> 70 #include <sys/vnode.h> 71 72 #include <machine/limits.h> 73 74 #include <vm/vm.h> 75 #include <vm/vm_object.h> 76 #include <vm/vm_extern.h> 77 #include <vm/vm_kern.h> 78 #include <vm/pmap.h> 79 #include <vm/vm_map.h> 80 #include <vm/vm_page.h> 81 #include <vm/vm_pager.h> 82 #include <vm/vnode_pager.h> 83 #include <vm/vm_zone.h> 84 85 #include <sys/buf2.h> 86 #include <sys/thread2.h> 87 #include <sys/sysref2.h> 88 #include <sys/mplock2.h> 89 90 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 91 92 int numvnodes; 93 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 94 "Number of vnodes allocated"); 95 int verbose_reclaims; 96 SYSCTL_INT(_debug, OID_AUTO, verbose_reclaims, CTLFLAG_RD, &verbose_reclaims, 0, 97 "Output filename of reclaimed vnode(s)"); 98 99 enum vtype iftovt_tab[16] = { 100 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 101 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 102 }; 103 int vttoif_tab[9] = { 104 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 105 S_IFSOCK, S_IFIFO, S_IFMT, 106 }; 107 108 static int reassignbufcalls; 109 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 110 0, "Number of times buffers have been reassigned to the proper list"); 111 112 static int check_buf_overlap = 2; /* invasive check */ 113 SYSCTL_INT(_vfs, OID_AUTO, check_buf_overlap, CTLFLAG_RW, &check_buf_overlap, 114 0, "Enable overlapping buffer checks"); 115 116 int nfs_mount_type = -1; 117 static struct lwkt_token spechash_token; 118 struct nfs_public nfs_pub; /* publicly exported FS */ 119 120 int desiredvnodes; 121 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 122 &desiredvnodes, 0, "Maximum number of vnodes"); 123 124 static void vfs_free_addrlist (struct netexport *nep); 125 static int vfs_free_netcred (struct radix_node *rn, void *w); 126 static int vfs_hang_addrlist (struct mount *mp, struct netexport *nep, 127 const struct export_args *argp); 128 129 /* 130 * Red black tree functions 131 */ 132 static int rb_buf_compare(struct buf *b1, struct buf *b2); 133 RB_GENERATE2(buf_rb_tree, buf, b_rbnode, rb_buf_compare, off_t, b_loffset); 134 RB_GENERATE2(buf_rb_hash, buf, b_rbhash, rb_buf_compare, off_t, b_loffset); 135 136 static int 137 rb_buf_compare(struct buf *b1, struct buf *b2) 138 { 139 if (b1->b_loffset < b2->b_loffset) 140 return(-1); 141 if (b1->b_loffset > b2->b_loffset) 142 return(1); 143 return(0); 144 } 145 146 /* 147 * Returns non-zero if the vnode is a candidate for lazy msyncing. 148 * 149 * NOTE: v_object is not stable (this scan can race), however the 150 * mntvnodescan code holds vmobj_token so any VM object we 151 * do find will remain stable storage. 152 */ 153 static __inline int 154 vshouldmsync(struct vnode *vp) 155 { 156 vm_object_t object; 157 158 if (vp->v_auxrefs != 0 || vp->v_sysref.refcnt > 0) 159 return (0); /* other holders */ 160 object = vp->v_object; 161 cpu_ccfence(); 162 if (object && (object->ref_count || object->resident_page_count)) 163 return(0); 164 return (1); 165 } 166 167 /* 168 * Initialize the vnode management data structures. 169 * 170 * Called from vfsinit() 171 */ 172 void 173 vfs_subr_init(void) 174 { 175 int factor1; 176 int factor2; 177 178 /* 179 * Desiredvnodes is kern.maxvnodes. We want to scale it 180 * according to available system memory but we may also have 181 * to limit it based on available KVM, which is capped on 32 bit 182 * systems. 183 * 184 * WARNING! For machines with 64-256M of ram we have to be sure 185 * that the default limit scales down well due to HAMMER 186 * taking up significantly more memory per-vnode vs UFS. 187 * We want around ~5800 on a 128M machine. 188 */ 189 factor1 = 20 * (sizeof(struct vm_object) + sizeof(struct vnode)); 190 factor2 = 22 * (sizeof(struct vm_object) + sizeof(struct vnode)); 191 desiredvnodes = 192 imin((int64_t)vmstats.v_page_count * PAGE_SIZE / factor1, 193 KvaSize / factor2); 194 desiredvnodes = imax(desiredvnodes, maxproc * 8); 195 196 lwkt_token_init(&spechash_token, "spechash"); 197 } 198 199 /* 200 * Knob to control the precision of file timestamps: 201 * 202 * 0 = seconds only; nanoseconds zeroed. 203 * 1 = seconds and nanoseconds, accurate within 1/HZ. 204 * 2 = seconds and nanoseconds, truncated to microseconds. 205 * >=3 = seconds and nanoseconds, maximum precision. 206 */ 207 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 208 209 static int timestamp_precision = TSP_SEC; 210 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 211 ×tamp_precision, 0, "Precision of file timestamps"); 212 213 /* 214 * Get a current timestamp. 215 * 216 * MPSAFE 217 */ 218 void 219 vfs_timestamp(struct timespec *tsp) 220 { 221 struct timeval tv; 222 223 switch (timestamp_precision) { 224 case TSP_SEC: 225 tsp->tv_sec = time_second; 226 tsp->tv_nsec = 0; 227 break; 228 case TSP_HZ: 229 getnanotime(tsp); 230 break; 231 case TSP_USEC: 232 microtime(&tv); 233 TIMEVAL_TO_TIMESPEC(&tv, tsp); 234 break; 235 case TSP_NSEC: 236 default: 237 nanotime(tsp); 238 break; 239 } 240 } 241 242 /* 243 * Set vnode attributes to VNOVAL 244 */ 245 void 246 vattr_null(struct vattr *vap) 247 { 248 vap->va_type = VNON; 249 vap->va_size = VNOVAL; 250 vap->va_bytes = VNOVAL; 251 vap->va_mode = VNOVAL; 252 vap->va_nlink = VNOVAL; 253 vap->va_uid = VNOVAL; 254 vap->va_gid = VNOVAL; 255 vap->va_fsid = VNOVAL; 256 vap->va_fileid = VNOVAL; 257 vap->va_blocksize = VNOVAL; 258 vap->va_rmajor = VNOVAL; 259 vap->va_rminor = VNOVAL; 260 vap->va_atime.tv_sec = VNOVAL; 261 vap->va_atime.tv_nsec = VNOVAL; 262 vap->va_mtime.tv_sec = VNOVAL; 263 vap->va_mtime.tv_nsec = VNOVAL; 264 vap->va_ctime.tv_sec = VNOVAL; 265 vap->va_ctime.tv_nsec = VNOVAL; 266 vap->va_flags = VNOVAL; 267 vap->va_gen = VNOVAL; 268 vap->va_vaflags = 0; 269 /* va_*_uuid fields are only valid if related flags are set */ 270 } 271 272 /* 273 * Flush out and invalidate all buffers associated with a vnode. 274 * 275 * vp must be locked. 276 */ 277 static int vinvalbuf_bp(struct buf *bp, void *data); 278 279 struct vinvalbuf_bp_info { 280 struct vnode *vp; 281 int slptimeo; 282 int lkflags; 283 int flags; 284 int clean; 285 }; 286 287 int 288 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 289 { 290 struct vinvalbuf_bp_info info; 291 vm_object_t object; 292 int error; 293 294 lwkt_gettoken(&vp->v_token); 295 296 /* 297 * If we are being asked to save, call fsync to ensure that the inode 298 * is updated. 299 */ 300 if (flags & V_SAVE) { 301 error = bio_track_wait(&vp->v_track_write, slpflag, slptimeo); 302 if (error) 303 goto done; 304 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 305 if ((error = VOP_FSYNC(vp, MNT_WAIT, 0)) != 0) 306 goto done; 307 308 /* 309 * Dirty bufs may be left or generated via races 310 * in circumstances where vinvalbuf() is called on 311 * a vnode not undergoing reclamation. Only 312 * panic if we are trying to reclaim the vnode. 313 */ 314 if ((vp->v_flag & VRECLAIMED) && 315 (bio_track_active(&vp->v_track_write) || 316 !RB_EMPTY(&vp->v_rbdirty_tree))) { 317 panic("vinvalbuf: dirty bufs"); 318 } 319 } 320 } 321 info.slptimeo = slptimeo; 322 info.lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; 323 if (slpflag & PCATCH) 324 info.lkflags |= LK_PCATCH; 325 info.flags = flags; 326 info.vp = vp; 327 328 /* 329 * Flush the buffer cache until nothing is left. 330 */ 331 while (!RB_EMPTY(&vp->v_rbclean_tree) || 332 !RB_EMPTY(&vp->v_rbdirty_tree)) { 333 info.clean = 1; 334 error = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, NULL, 335 vinvalbuf_bp, &info); 336 if (error == 0) { 337 info.clean = 0; 338 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 339 vinvalbuf_bp, &info); 340 } 341 } 342 343 /* 344 * Wait for I/O completion. We may block in the pip code so we have 345 * to re-check. 346 */ 347 do { 348 bio_track_wait(&vp->v_track_write, 0, 0); 349 if ((object = vp->v_object) != NULL) { 350 refcount_wait(&object->paging_in_progress, "vnvlbx"); 351 } 352 } while (bio_track_active(&vp->v_track_write)); 353 354 /* 355 * Destroy the copy in the VM cache, too. 356 */ 357 if ((object = vp->v_object) != NULL) { 358 vm_object_page_remove(object, 0, 0, 359 (flags & V_SAVE) ? TRUE : FALSE); 360 } 361 362 if (!RB_EMPTY(&vp->v_rbdirty_tree) || !RB_EMPTY(&vp->v_rbclean_tree)) 363 panic("vinvalbuf: flush failed"); 364 if (!RB_EMPTY(&vp->v_rbhash_tree)) 365 panic("vinvalbuf: flush failed, buffers still present"); 366 error = 0; 367 done: 368 lwkt_reltoken(&vp->v_token); 369 return (error); 370 } 371 372 static int 373 vinvalbuf_bp(struct buf *bp, void *data) 374 { 375 struct vinvalbuf_bp_info *info = data; 376 int error; 377 378 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 379 atomic_add_int(&bp->b_refs, 1); 380 error = BUF_TIMELOCK(bp, info->lkflags, 381 "vinvalbuf", info->slptimeo); 382 atomic_subtract_int(&bp->b_refs, 1); 383 if (error == 0) { 384 BUF_UNLOCK(bp); 385 error = ENOLCK; 386 } 387 if (error == ENOLCK) 388 return(0); 389 return (-error); 390 } 391 KKASSERT(bp->b_vp == info->vp); 392 393 /* 394 * Must check clean/dirty status after successfully locking as 395 * it may race. 396 */ 397 if ((info->clean && (bp->b_flags & B_DELWRI)) || 398 (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0)) { 399 BUF_UNLOCK(bp); 400 return(0); 401 } 402 403 /* 404 * Note that vfs_bio_awrite expects buffers to reside 405 * on a queue, while bwrite() and brelse() do not. 406 * 407 * NOTE: NO B_LOCKED CHECK. Also no buf_checkwrite() 408 * check. This code will write out the buffer, period. 409 */ 410 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 411 (info->flags & V_SAVE)) { 412 if (bp->b_flags & B_CLUSTEROK) { 413 vfs_bio_awrite(bp); 414 } else { 415 bremfree(bp); 416 bawrite(bp); 417 } 418 } else if (info->flags & V_SAVE) { 419 /* 420 * Cannot set B_NOCACHE on a clean buffer as this will 421 * destroy the VM backing store which might actually 422 * be dirty (and unsynchronized). 423 */ 424 bremfree(bp); 425 bp->b_flags |= (B_INVAL | B_RELBUF); 426 brelse(bp); 427 } else { 428 bremfree(bp); 429 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 430 brelse(bp); 431 } 432 return(0); 433 } 434 435 /* 436 * Truncate a file's buffer and pages to a specified length. This 437 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 438 * sync activity. 439 * 440 * The vnode must be locked. 441 */ 442 static int vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data); 443 static int vtruncbuf_bp_trunc(struct buf *bp, void *data); 444 static int vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data); 445 static int vtruncbuf_bp_metasync(struct buf *bp, void *data); 446 447 struct vtruncbuf_info { 448 struct vnode *vp; 449 off_t truncloffset; 450 int clean; 451 }; 452 453 int 454 vtruncbuf(struct vnode *vp, off_t length, int blksize) 455 { 456 struct vtruncbuf_info info; 457 const char *filename; 458 int count; 459 460 /* 461 * Round up to the *next* block, then destroy the buffers in question. 462 * Since we are only removing some of the buffers we must rely on the 463 * scan count to determine whether a loop is necessary. 464 */ 465 if ((count = (int)(length % blksize)) != 0) 466 info.truncloffset = length + (blksize - count); 467 else 468 info.truncloffset = length; 469 info.vp = vp; 470 471 lwkt_gettoken(&vp->v_token); 472 do { 473 info.clean = 1; 474 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 475 vtruncbuf_bp_trunc_cmp, 476 vtruncbuf_bp_trunc, &info); 477 info.clean = 0; 478 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 479 vtruncbuf_bp_trunc_cmp, 480 vtruncbuf_bp_trunc, &info); 481 } while(count); 482 483 /* 484 * For safety, fsync any remaining metadata if the file is not being 485 * truncated to 0. Since the metadata does not represent the entire 486 * dirty list we have to rely on the hit count to ensure that we get 487 * all of it. 488 */ 489 if (length > 0) { 490 do { 491 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 492 vtruncbuf_bp_metasync_cmp, 493 vtruncbuf_bp_metasync, &info); 494 } while (count); 495 } 496 497 /* 498 * Clean out any left over VM backing store. 499 * 500 * It is possible to have in-progress I/O from buffers that were 501 * not part of the truncation. This should not happen if we 502 * are truncating to 0-length. 503 */ 504 vnode_pager_setsize(vp, length); 505 bio_track_wait(&vp->v_track_write, 0, 0); 506 507 /* 508 * Debugging only 509 */ 510 spin_lock(&vp->v_spin); 511 filename = TAILQ_FIRST(&vp->v_namecache) ? 512 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"; 513 spin_unlock(&vp->v_spin); 514 515 /* 516 * Make sure no buffers were instantiated while we were trying 517 * to clean out the remaining VM pages. This could occur due 518 * to busy dirty VM pages being flushed out to disk. 519 */ 520 do { 521 info.clean = 1; 522 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 523 vtruncbuf_bp_trunc_cmp, 524 vtruncbuf_bp_trunc, &info); 525 info.clean = 0; 526 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 527 vtruncbuf_bp_trunc_cmp, 528 vtruncbuf_bp_trunc, &info); 529 if (count) { 530 kprintf("Warning: vtruncbuf(): Had to re-clean %d " 531 "left over buffers in %s\n", count, filename); 532 } 533 } while(count); 534 535 lwkt_reltoken(&vp->v_token); 536 537 return (0); 538 } 539 540 /* 541 * The callback buffer is beyond the new file EOF and must be destroyed. 542 * Note that the compare function must conform to the RB_SCAN's requirements. 543 */ 544 static 545 int 546 vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data) 547 { 548 struct vtruncbuf_info *info = data; 549 550 if (bp->b_loffset >= info->truncloffset) 551 return(0); 552 return(-1); 553 } 554 555 static 556 int 557 vtruncbuf_bp_trunc(struct buf *bp, void *data) 558 { 559 struct vtruncbuf_info *info = data; 560 561 /* 562 * Do not try to use a buffer we cannot immediately lock, but sleep 563 * anyway to prevent a livelock. The code will loop until all buffers 564 * can be acted upon. 565 * 566 * We must always revalidate the buffer after locking it to deal 567 * with MP races. 568 */ 569 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 570 atomic_add_int(&bp->b_refs, 1); 571 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 572 BUF_UNLOCK(bp); 573 atomic_subtract_int(&bp->b_refs, 1); 574 } else if ((info->clean && (bp->b_flags & B_DELWRI)) || 575 (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0) || 576 bp->b_vp != info->vp || 577 vtruncbuf_bp_trunc_cmp(bp, data)) { 578 BUF_UNLOCK(bp); 579 } else { 580 bremfree(bp); 581 bp->b_flags |= (B_INVAL | B_RELBUF | B_NOCACHE); 582 brelse(bp); 583 } 584 return(1); 585 } 586 587 /* 588 * Fsync all meta-data after truncating a file to be non-zero. Only metadata 589 * blocks (with a negative loffset) are scanned. 590 * Note that the compare function must conform to the RB_SCAN's requirements. 591 */ 592 static int 593 vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data __unused) 594 { 595 if (bp->b_loffset < 0) 596 return(0); 597 return(1); 598 } 599 600 static int 601 vtruncbuf_bp_metasync(struct buf *bp, void *data) 602 { 603 struct vtruncbuf_info *info = data; 604 605 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 606 atomic_add_int(&bp->b_refs, 1); 607 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 608 BUF_UNLOCK(bp); 609 atomic_subtract_int(&bp->b_refs, 1); 610 } else if ((bp->b_flags & B_DELWRI) == 0 || 611 bp->b_vp != info->vp || 612 vtruncbuf_bp_metasync_cmp(bp, data)) { 613 BUF_UNLOCK(bp); 614 } else { 615 bremfree(bp); 616 if (bp->b_vp == info->vp) 617 bawrite(bp); 618 else 619 bwrite(bp); 620 } 621 return(1); 622 } 623 624 /* 625 * vfsync - implements a multipass fsync on a file which understands 626 * dependancies and meta-data. The passed vnode must be locked. The 627 * waitfor argument may be MNT_WAIT or MNT_NOWAIT, or MNT_LAZY. 628 * 629 * When fsyncing data asynchronously just do one consolidated pass starting 630 * with the most negative block number. This may not get all the data due 631 * to dependancies. 632 * 633 * When fsyncing data synchronously do a data pass, then a metadata pass, 634 * then do additional data+metadata passes to try to get all the data out. 635 */ 636 static int vfsync_wait_output(struct vnode *vp, 637 int (*waitoutput)(struct vnode *, struct thread *)); 638 static int vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused); 639 static int vfsync_data_only_cmp(struct buf *bp, void *data); 640 static int vfsync_meta_only_cmp(struct buf *bp, void *data); 641 static int vfsync_lazy_range_cmp(struct buf *bp, void *data); 642 static int vfsync_bp(struct buf *bp, void *data); 643 644 struct vfsync_info { 645 struct vnode *vp; 646 int synchronous; 647 int syncdeps; 648 int lazycount; 649 int lazylimit; 650 int skippedbufs; 651 int (*checkdef)(struct buf *); 652 int (*cmpfunc)(struct buf *, void *); 653 }; 654 655 int 656 vfsync(struct vnode *vp, int waitfor, int passes, 657 int (*checkdef)(struct buf *), 658 int (*waitoutput)(struct vnode *, struct thread *)) 659 { 660 struct vfsync_info info; 661 int error; 662 663 bzero(&info, sizeof(info)); 664 info.vp = vp; 665 if ((info.checkdef = checkdef) == NULL) 666 info.syncdeps = 1; 667 668 lwkt_gettoken(&vp->v_token); 669 670 switch(waitfor) { 671 case MNT_LAZY | MNT_NOWAIT: 672 case MNT_LAZY: 673 /* 674 * Lazy (filesystem syncer typ) Asynchronous plus limit the 675 * number of data (not meta) pages we try to flush to 1MB. 676 * A non-zero return means that lazy limit was reached. 677 */ 678 info.lazylimit = 1024 * 1024; 679 info.syncdeps = 1; 680 info.cmpfunc = vfsync_lazy_range_cmp; 681 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 682 vfsync_lazy_range_cmp, vfsync_bp, &info); 683 info.cmpfunc = vfsync_meta_only_cmp; 684 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 685 vfsync_meta_only_cmp, vfsync_bp, &info); 686 if (error == 0) 687 vp->v_lazyw = 0; 688 else if (!RB_EMPTY(&vp->v_rbdirty_tree)) 689 vn_syncer_add(vp, 1); 690 error = 0; 691 break; 692 case MNT_NOWAIT: 693 /* 694 * Asynchronous. Do a data-only pass and a meta-only pass. 695 */ 696 info.syncdeps = 1; 697 info.cmpfunc = vfsync_data_only_cmp; 698 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 699 vfsync_bp, &info); 700 info.cmpfunc = vfsync_meta_only_cmp; 701 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_meta_only_cmp, 702 vfsync_bp, &info); 703 error = 0; 704 break; 705 default: 706 /* 707 * Synchronous. Do a data-only pass, then a meta-data+data 708 * pass, then additional integrated passes to try to get 709 * all the dependancies flushed. 710 */ 711 info.cmpfunc = vfsync_data_only_cmp; 712 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 713 vfsync_bp, &info); 714 error = vfsync_wait_output(vp, waitoutput); 715 if (error == 0) { 716 info.skippedbufs = 0; 717 info.cmpfunc = vfsync_dummy_cmp; 718 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 719 vfsync_bp, &info); 720 error = vfsync_wait_output(vp, waitoutput); 721 if (info.skippedbufs) { 722 kprintf("Warning: vfsync skipped %d dirty " 723 "bufs in pass2!\n", info.skippedbufs); 724 } 725 } 726 while (error == 0 && passes > 0 && 727 !RB_EMPTY(&vp->v_rbdirty_tree) 728 ) { 729 if (--passes == 0) { 730 info.synchronous = 1; 731 info.syncdeps = 1; 732 } 733 info.cmpfunc = vfsync_dummy_cmp; 734 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 735 vfsync_bp, &info); 736 if (error < 0) 737 error = -error; 738 info.syncdeps = 1; 739 if (error == 0) 740 error = vfsync_wait_output(vp, waitoutput); 741 } 742 break; 743 } 744 lwkt_reltoken(&vp->v_token); 745 return(error); 746 } 747 748 static int 749 vfsync_wait_output(struct vnode *vp, 750 int (*waitoutput)(struct vnode *, struct thread *)) 751 { 752 int error; 753 754 error = bio_track_wait(&vp->v_track_write, 0, 0); 755 if (waitoutput) 756 error = waitoutput(vp, curthread); 757 return(error); 758 } 759 760 static int 761 vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused) 762 { 763 return(0); 764 } 765 766 static int 767 vfsync_data_only_cmp(struct buf *bp, void *data) 768 { 769 if (bp->b_loffset < 0) 770 return(-1); 771 return(0); 772 } 773 774 static int 775 vfsync_meta_only_cmp(struct buf *bp, void *data) 776 { 777 if (bp->b_loffset < 0) 778 return(0); 779 return(1); 780 } 781 782 static int 783 vfsync_lazy_range_cmp(struct buf *bp, void *data) 784 { 785 struct vfsync_info *info = data; 786 787 if (bp->b_loffset < info->vp->v_lazyw) 788 return(-1); 789 return(0); 790 } 791 792 static int 793 vfsync_bp(struct buf *bp, void *data) 794 { 795 struct vfsync_info *info = data; 796 struct vnode *vp = info->vp; 797 int error; 798 799 /* 800 * Ignore buffers that we cannot immediately lock. 801 */ 802 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 803 ++info->skippedbufs; 804 return(0); 805 } 806 807 /* 808 * We must revalidate the buffer after locking. 809 */ 810 if ((bp->b_flags & B_DELWRI) == 0 || 811 bp->b_vp != info->vp || 812 info->cmpfunc(bp, data)) { 813 BUF_UNLOCK(bp); 814 return(0); 815 } 816 817 /* 818 * If syncdeps is not set we do not try to write buffers which have 819 * dependancies. 820 */ 821 if (!info->synchronous && info->syncdeps == 0 && info->checkdef(bp)) { 822 BUF_UNLOCK(bp); 823 return(0); 824 } 825 826 /* 827 * B_NEEDCOMMIT (primarily used by NFS) is a state where the buffer 828 * has been written but an additional handshake with the device 829 * is required before we can dispose of the buffer. We have no idea 830 * how to do this so we have to skip these buffers. 831 */ 832 if (bp->b_flags & B_NEEDCOMMIT) { 833 BUF_UNLOCK(bp); 834 return(0); 835 } 836 837 /* 838 * Ask bioops if it is ok to sync. If not the VFS may have 839 * set B_LOCKED so we have to cycle the buffer. 840 */ 841 if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) { 842 bremfree(bp); 843 brelse(bp); 844 return(0); 845 } 846 847 if (info->synchronous) { 848 /* 849 * Synchronous flushing. An error may be returned. 850 */ 851 bremfree(bp); 852 error = bwrite(bp); 853 } else { 854 /* 855 * Asynchronous flushing. A negative return value simply 856 * stops the scan and is not considered an error. We use 857 * this to support limited MNT_LAZY flushes. 858 */ 859 vp->v_lazyw = bp->b_loffset; 860 if ((vp->v_flag & VOBJBUF) && (bp->b_flags & B_CLUSTEROK)) { 861 info->lazycount += vfs_bio_awrite(bp); 862 } else { 863 info->lazycount += bp->b_bufsize; 864 bremfree(bp); 865 bawrite(bp); 866 } 867 waitrunningbufspace(); 868 vm_wait_nominal(); 869 if (info->lazylimit && info->lazycount >= info->lazylimit) 870 error = 1; 871 else 872 error = 0; 873 } 874 return(-error); 875 } 876 877 /* 878 * Associate a buffer with a vnode. 879 * 880 * MPSAFE 881 */ 882 int 883 bgetvp(struct vnode *vp, struct buf *bp, int testsize) 884 { 885 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 886 KKASSERT((bp->b_flags & (B_HASHED|B_DELWRI|B_VNCLEAN|B_VNDIRTY)) == 0); 887 888 /* 889 * Insert onto list for new vnode. 890 */ 891 lwkt_gettoken(&vp->v_token); 892 893 if (buf_rb_hash_RB_INSERT(&vp->v_rbhash_tree, bp)) { 894 lwkt_reltoken(&vp->v_token); 895 return (EEXIST); 896 } 897 898 /* 899 * Diagnostics (mainly for HAMMER debugging). Check for 900 * overlapping buffers. 901 */ 902 if (check_buf_overlap) { 903 struct buf *bx; 904 bx = buf_rb_hash_RB_PREV(bp); 905 if (bx) { 906 if (bx->b_loffset + bx->b_bufsize > bp->b_loffset) { 907 kprintf("bgetvp: overlapl %016jx/%d %016jx " 908 "bx %p bp %p\n", 909 (intmax_t)bx->b_loffset, 910 bx->b_bufsize, 911 (intmax_t)bp->b_loffset, 912 bx, bp); 913 if (check_buf_overlap > 1) 914 panic("bgetvp - overlapping buffer"); 915 } 916 } 917 bx = buf_rb_hash_RB_NEXT(bp); 918 if (bx) { 919 if (bp->b_loffset + testsize > bx->b_loffset) { 920 kprintf("bgetvp: overlapr %016jx/%d %016jx " 921 "bp %p bx %p\n", 922 (intmax_t)bp->b_loffset, 923 testsize, 924 (intmax_t)bx->b_loffset, 925 bp, bx); 926 if (check_buf_overlap > 1) 927 panic("bgetvp - overlapping buffer"); 928 } 929 } 930 } 931 bp->b_vp = vp; 932 bp->b_flags |= B_HASHED; 933 bp->b_flags |= B_VNCLEAN; 934 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) 935 panic("reassignbuf: dup lblk/clean vp %p bp %p", vp, bp); 936 vhold(vp); 937 lwkt_reltoken(&vp->v_token); 938 return(0); 939 } 940 941 /* 942 * Disassociate a buffer from a vnode. 943 * 944 * MPSAFE 945 */ 946 void 947 brelvp(struct buf *bp) 948 { 949 struct vnode *vp; 950 951 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 952 953 /* 954 * Delete from old vnode list, if on one. 955 */ 956 vp = bp->b_vp; 957 lwkt_gettoken(&vp->v_token); 958 if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN)) { 959 if (bp->b_flags & B_VNDIRTY) 960 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 961 else 962 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 963 bp->b_flags &= ~(B_VNDIRTY | B_VNCLEAN); 964 } 965 if (bp->b_flags & B_HASHED) { 966 buf_rb_hash_RB_REMOVE(&vp->v_rbhash_tree, bp); 967 bp->b_flags &= ~B_HASHED; 968 } 969 if ((vp->v_flag & VONWORKLST) && RB_EMPTY(&vp->v_rbdirty_tree)) 970 vn_syncer_remove(vp); 971 bp->b_vp = NULL; 972 973 lwkt_reltoken(&vp->v_token); 974 975 vdrop(vp); 976 } 977 978 /* 979 * Reassign the buffer to the proper clean/dirty list based on B_DELWRI. 980 * This routine is called when the state of the B_DELWRI bit is changed. 981 * 982 * Must be called with vp->v_token held. 983 * MPSAFE 984 */ 985 void 986 reassignbuf(struct buf *bp) 987 { 988 struct vnode *vp = bp->b_vp; 989 int delay; 990 991 ASSERT_LWKT_TOKEN_HELD(&vp->v_token); 992 ++reassignbufcalls; 993 994 /* 995 * B_PAGING flagged buffers cannot be reassigned because their vp 996 * is not fully linked in. 997 */ 998 if (bp->b_flags & B_PAGING) 999 panic("cannot reassign paging buffer"); 1000 1001 if (bp->b_flags & B_DELWRI) { 1002 /* 1003 * Move to the dirty list, add the vnode to the worklist 1004 */ 1005 if (bp->b_flags & B_VNCLEAN) { 1006 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 1007 bp->b_flags &= ~B_VNCLEAN; 1008 } 1009 if ((bp->b_flags & B_VNDIRTY) == 0) { 1010 if (buf_rb_tree_RB_INSERT(&vp->v_rbdirty_tree, bp)) { 1011 panic("reassignbuf: dup lblk vp %p bp %p", 1012 vp, bp); 1013 } 1014 bp->b_flags |= B_VNDIRTY; 1015 } 1016 if ((vp->v_flag & VONWORKLST) == 0) { 1017 switch (vp->v_type) { 1018 case VDIR: 1019 delay = dirdelay; 1020 break; 1021 case VCHR: 1022 case VBLK: 1023 if (vp->v_rdev && 1024 vp->v_rdev->si_mountpoint != NULL) { 1025 delay = metadelay; 1026 break; 1027 } 1028 /* fall through */ 1029 default: 1030 delay = filedelay; 1031 } 1032 vn_syncer_add(vp, delay); 1033 } 1034 } else { 1035 /* 1036 * Move to the clean list, remove the vnode from the worklist 1037 * if no dirty blocks remain. 1038 */ 1039 if (bp->b_flags & B_VNDIRTY) { 1040 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 1041 bp->b_flags &= ~B_VNDIRTY; 1042 } 1043 if ((bp->b_flags & B_VNCLEAN) == 0) { 1044 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) { 1045 panic("reassignbuf: dup lblk vp %p bp %p", 1046 vp, bp); 1047 } 1048 bp->b_flags |= B_VNCLEAN; 1049 } 1050 if ((vp->v_flag & VONWORKLST) && 1051 RB_EMPTY(&vp->v_rbdirty_tree)) { 1052 vn_syncer_remove(vp); 1053 } 1054 } 1055 } 1056 1057 /* 1058 * Create a vnode for a block device. Used for mounting the root file 1059 * system. 1060 * 1061 * A vref()'d vnode is returned. 1062 */ 1063 extern struct vop_ops *devfs_vnode_dev_vops_p; 1064 int 1065 bdevvp(cdev_t dev, struct vnode **vpp) 1066 { 1067 struct vnode *vp; 1068 struct vnode *nvp; 1069 int error; 1070 1071 if (dev == NULL) { 1072 *vpp = NULLVP; 1073 return (ENXIO); 1074 } 1075 error = getspecialvnode(VT_NON, NULL, &devfs_vnode_dev_vops_p, 1076 &nvp, 0, 0); 1077 if (error) { 1078 *vpp = NULLVP; 1079 return (error); 1080 } 1081 vp = nvp; 1082 vp->v_type = VCHR; 1083 #if 0 1084 vp->v_rdev = dev; 1085 #endif 1086 v_associate_rdev(vp, dev); 1087 vp->v_umajor = dev->si_umajor; 1088 vp->v_uminor = dev->si_uminor; 1089 vx_unlock(vp); 1090 *vpp = vp; 1091 return (0); 1092 } 1093 1094 int 1095 v_associate_rdev(struct vnode *vp, cdev_t dev) 1096 { 1097 if (dev == NULL) 1098 return(ENXIO); 1099 if (dev_is_good(dev) == 0) 1100 return(ENXIO); 1101 KKASSERT(vp->v_rdev == NULL); 1102 vp->v_rdev = reference_dev(dev); 1103 lwkt_gettoken(&spechash_token); 1104 SLIST_INSERT_HEAD(&dev->si_hlist, vp, v_cdevnext); 1105 lwkt_reltoken(&spechash_token); 1106 return(0); 1107 } 1108 1109 void 1110 v_release_rdev(struct vnode *vp) 1111 { 1112 cdev_t dev; 1113 1114 if ((dev = vp->v_rdev) != NULL) { 1115 lwkt_gettoken(&spechash_token); 1116 SLIST_REMOVE(&dev->si_hlist, vp, vnode, v_cdevnext); 1117 vp->v_rdev = NULL; 1118 release_dev(dev); 1119 lwkt_reltoken(&spechash_token); 1120 } 1121 } 1122 1123 /* 1124 * Add a vnode to the alias list hung off the cdev_t. We only associate 1125 * the device number with the vnode. The actual device is not associated 1126 * until the vnode is opened (usually in spec_open()), and will be 1127 * disassociated on last close. 1128 */ 1129 void 1130 addaliasu(struct vnode *nvp, int x, int y) 1131 { 1132 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1133 panic("addaliasu on non-special vnode"); 1134 nvp->v_umajor = x; 1135 nvp->v_uminor = y; 1136 } 1137 1138 /* 1139 * Simple call that a filesystem can make to try to get rid of a 1140 * vnode. It will fail if anyone is referencing the vnode (including 1141 * the caller). 1142 * 1143 * The filesystem can check whether its in-memory inode structure still 1144 * references the vp on return. 1145 */ 1146 void 1147 vclean_unlocked(struct vnode *vp) 1148 { 1149 vx_get(vp); 1150 if (sysref_isactive(&vp->v_sysref) == 0) 1151 vgone_vxlocked(vp); 1152 vx_put(vp); 1153 } 1154 1155 /* 1156 * Disassociate a vnode from its underlying filesystem. 1157 * 1158 * The vnode must be VX locked and referenced. In all normal situations 1159 * there are no active references. If vclean_vxlocked() is called while 1160 * there are active references, the vnode is being ripped out and we have 1161 * to call VOP_CLOSE() as appropriate before we can reclaim it. 1162 */ 1163 void 1164 vclean_vxlocked(struct vnode *vp, int flags) 1165 { 1166 int active; 1167 int n; 1168 vm_object_t object; 1169 struct namecache *ncp; 1170 1171 /* 1172 * If the vnode has already been reclaimed we have nothing to do. 1173 */ 1174 if (vp->v_flag & VRECLAIMED) 1175 return; 1176 vsetflags(vp, VRECLAIMED); 1177 1178 if (verbose_reclaims) { 1179 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) 1180 kprintf("Debug: reclaim %p %s\n", vp, ncp->nc_name); 1181 } 1182 1183 /* 1184 * Scrap the vfs cache 1185 */ 1186 while (cache_inval_vp(vp, 0) != 0) { 1187 kprintf("Warning: vnode %p clean/cache_resolution " 1188 "race detected\n", vp); 1189 tsleep(vp, 0, "vclninv", 2); 1190 } 1191 1192 /* 1193 * Check to see if the vnode is in use. If so we have to reference it 1194 * before we clean it out so that its count cannot fall to zero and 1195 * generate a race against ourselves to recycle it. 1196 */ 1197 active = sysref_isactive(&vp->v_sysref); 1198 1199 /* 1200 * Clean out any buffers associated with the vnode and destroy its 1201 * object, if it has one. 1202 */ 1203 vinvalbuf(vp, V_SAVE, 0, 0); 1204 1205 /* 1206 * If purging an active vnode (typically during a forced unmount 1207 * or reboot), it must be closed and deactivated before being 1208 * reclaimed. This isn't really all that safe, but what can 1209 * we do? XXX. 1210 * 1211 * Note that neither of these routines unlocks the vnode. 1212 */ 1213 if (active && (flags & DOCLOSE)) { 1214 while ((n = vp->v_opencount) != 0) { 1215 if (vp->v_writecount) 1216 VOP_CLOSE(vp, FWRITE|FNONBLOCK); 1217 else 1218 VOP_CLOSE(vp, FNONBLOCK); 1219 if (vp->v_opencount == n) { 1220 kprintf("Warning: unable to force-close" 1221 " vnode %p\n", vp); 1222 break; 1223 } 1224 } 1225 } 1226 1227 /* 1228 * If the vnode has not been deactivated, deactivated it. Deactivation 1229 * can create new buffers and VM pages so we have to call vinvalbuf() 1230 * again to make sure they all get flushed. 1231 * 1232 * This can occur if a file with a link count of 0 needs to be 1233 * truncated. 1234 * 1235 * If the vnode is already dead don't try to deactivate it. 1236 */ 1237 if ((vp->v_flag & VINACTIVE) == 0) { 1238 vsetflags(vp, VINACTIVE); 1239 if (vp->v_mount) 1240 VOP_INACTIVE(vp); 1241 vinvalbuf(vp, V_SAVE, 0, 0); 1242 } 1243 1244 /* 1245 * If the vnode has an object, destroy it. 1246 */ 1247 while ((object = vp->v_object) != NULL) { 1248 vm_object_hold(object); 1249 if (object == vp->v_object) 1250 break; 1251 vm_object_drop(object); 1252 } 1253 1254 if (object != NULL) { 1255 if (object->ref_count == 0) { 1256 if ((object->flags & OBJ_DEAD) == 0) 1257 vm_object_terminate(object); 1258 vm_object_drop(object); 1259 vclrflags(vp, VOBJBUF); 1260 } else { 1261 vm_pager_deallocate(object); 1262 vclrflags(vp, VOBJBUF); 1263 vm_object_drop(object); 1264 } 1265 } 1266 KKASSERT((vp->v_flag & VOBJBUF) == 0); 1267 1268 /* 1269 * Reclaim the vnode if not already dead. 1270 */ 1271 if (vp->v_mount && VOP_RECLAIM(vp)) 1272 panic("vclean: cannot reclaim"); 1273 1274 /* 1275 * Done with purge, notify sleepers of the grim news. 1276 */ 1277 vp->v_ops = &dead_vnode_vops_p; 1278 vn_gone(vp); 1279 vp->v_tag = VT_NON; 1280 1281 /* 1282 * If we are destroying an active vnode, reactivate it now that 1283 * we have reassociated it with deadfs. This prevents the system 1284 * from crashing on the vnode due to it being unexpectedly marked 1285 * as inactive or reclaimed. 1286 */ 1287 if (active && (flags & DOCLOSE)) { 1288 vclrflags(vp, VINACTIVE | VRECLAIMED); 1289 } 1290 } 1291 1292 /* 1293 * Eliminate all activity associated with the requested vnode 1294 * and with all vnodes aliased to the requested vnode. 1295 * 1296 * The vnode must be referenced but should not be locked. 1297 */ 1298 int 1299 vrevoke(struct vnode *vp, struct ucred *cred) 1300 { 1301 struct vnode *vq; 1302 struct vnode *vqn; 1303 cdev_t dev; 1304 int error; 1305 1306 /* 1307 * If the vnode has a device association, scrap all vnodes associated 1308 * with the device. Don't let the device disappear on us while we 1309 * are scrapping the vnodes. 1310 * 1311 * The passed vp will probably show up in the list, do not VX lock 1312 * it twice! 1313 * 1314 * Releasing the vnode's rdev here can mess up specfs's call to 1315 * device close, so don't do it. The vnode has been disassociated 1316 * and the device will be closed after the last ref on the related 1317 * fp goes away (if not still open by e.g. the kernel). 1318 */ 1319 if (vp->v_type != VCHR) { 1320 error = fdrevoke(vp, DTYPE_VNODE, cred); 1321 return (error); 1322 } 1323 if ((dev = vp->v_rdev) == NULL) { 1324 return(0); 1325 } 1326 reference_dev(dev); 1327 lwkt_gettoken(&spechash_token); 1328 1329 restart: 1330 vqn = SLIST_FIRST(&dev->si_hlist); 1331 if (vqn) 1332 vhold(vqn); 1333 while ((vq = vqn) != NULL) { 1334 if (sysref_isactive(&vq->v_sysref)) { 1335 vref(vq); 1336 fdrevoke(vq, DTYPE_VNODE, cred); 1337 /*v_release_rdev(vq);*/ 1338 vrele(vq); 1339 if (vq->v_rdev != dev) { 1340 vdrop(vq); 1341 goto restart; 1342 } 1343 } 1344 vqn = SLIST_NEXT(vq, v_cdevnext); 1345 if (vqn) 1346 vhold(vqn); 1347 vdrop(vq); 1348 } 1349 lwkt_reltoken(&spechash_token); 1350 dev_drevoke(dev); 1351 release_dev(dev); 1352 return (0); 1353 } 1354 1355 /* 1356 * This is called when the object underlying a vnode is being destroyed, 1357 * such as in a remove(). Try to recycle the vnode immediately if the 1358 * only active reference is our reference. 1359 * 1360 * Directory vnodes in the namecache with children cannot be immediately 1361 * recycled because numerous VOP_N*() ops require them to be stable. 1362 * 1363 * To avoid recursive recycling from VOP_INACTIVE implemenetations this 1364 * function is a NOP if VRECLAIMED is already set. 1365 */ 1366 int 1367 vrecycle(struct vnode *vp) 1368 { 1369 if (vp->v_sysref.refcnt <= 1 && (vp->v_flag & VRECLAIMED) == 0) { 1370 if (cache_inval_vp_nonblock(vp)) 1371 return(0); 1372 vgone_vxlocked(vp); 1373 return (1); 1374 } 1375 return (0); 1376 } 1377 1378 /* 1379 * Return the maximum I/O size allowed for strategy calls on VP. 1380 * 1381 * If vp is VCHR or VBLK we dive the device, otherwise we use 1382 * the vp's mount info. 1383 */ 1384 int 1385 vmaxiosize(struct vnode *vp) 1386 { 1387 if (vp->v_type == VBLK || vp->v_type == VCHR) { 1388 return(vp->v_rdev->si_iosize_max); 1389 } else { 1390 return(vp->v_mount->mnt_iosize_max); 1391 } 1392 } 1393 1394 /* 1395 * Eliminate all activity associated with a vnode in preparation for reuse. 1396 * 1397 * The vnode must be VX locked and refd and will remain VX locked and refd 1398 * on return. This routine may be called with the vnode in any state, as 1399 * long as it is VX locked. The vnode will be cleaned out and marked 1400 * VRECLAIMED but will not actually be reused until all existing refs and 1401 * holds go away. 1402 * 1403 * NOTE: This routine may be called on a vnode which has not yet been 1404 * already been deactivated (VOP_INACTIVE), or on a vnode which has 1405 * already been reclaimed. 1406 * 1407 * This routine is not responsible for placing us back on the freelist. 1408 * Instead, it happens automatically when the caller releases the VX lock 1409 * (assuming there aren't any other references). 1410 */ 1411 void 1412 vgone_vxlocked(struct vnode *vp) 1413 { 1414 /* 1415 * assert that the VX lock is held. This is an absolute requirement 1416 * now for vgone_vxlocked() to be called. 1417 */ 1418 KKASSERT(vp->v_lock.lk_exclusivecount == 1); 1419 1420 /* 1421 * Clean out the filesystem specific data and set the VRECLAIMED 1422 * bit. Also deactivate the vnode if necessary. 1423 */ 1424 vclean_vxlocked(vp, DOCLOSE); 1425 1426 /* 1427 * Delete from old mount point vnode list, if on one. 1428 */ 1429 if (vp->v_mount != NULL) { 1430 KKASSERT(vp->v_data == NULL); 1431 insmntque(vp, NULL); 1432 } 1433 1434 /* 1435 * If special device, remove it from special device alias list 1436 * if it is on one. This should normally only occur if a vnode is 1437 * being revoked as the device should otherwise have been released 1438 * naturally. 1439 */ 1440 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) { 1441 v_release_rdev(vp); 1442 } 1443 1444 /* 1445 * Set us to VBAD 1446 */ 1447 vp->v_type = VBAD; 1448 } 1449 1450 /* 1451 * Lookup a vnode by device number. 1452 * 1453 * Returns non-zero and *vpp set to a vref'd vnode on success. 1454 * Returns zero on failure. 1455 */ 1456 int 1457 vfinddev(cdev_t dev, enum vtype type, struct vnode **vpp) 1458 { 1459 struct vnode *vp; 1460 1461 lwkt_gettoken(&spechash_token); 1462 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1463 if (type == vp->v_type) { 1464 *vpp = vp; 1465 vref(vp); 1466 lwkt_reltoken(&spechash_token); 1467 return (1); 1468 } 1469 } 1470 lwkt_reltoken(&spechash_token); 1471 return (0); 1472 } 1473 1474 /* 1475 * Calculate the total number of references to a special device. This 1476 * routine may only be called for VBLK and VCHR vnodes since v_rdev is 1477 * an overloaded field. Since udev2dev can now return NULL, we have 1478 * to check for a NULL v_rdev. 1479 */ 1480 int 1481 count_dev(cdev_t dev) 1482 { 1483 struct vnode *vp; 1484 int count = 0; 1485 1486 if (SLIST_FIRST(&dev->si_hlist)) { 1487 lwkt_gettoken(&spechash_token); 1488 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1489 count += vp->v_opencount; 1490 } 1491 lwkt_reltoken(&spechash_token); 1492 } 1493 return(count); 1494 } 1495 1496 int 1497 vcount(struct vnode *vp) 1498 { 1499 if (vp->v_rdev == NULL) 1500 return(0); 1501 return(count_dev(vp->v_rdev)); 1502 } 1503 1504 /* 1505 * Initialize VMIO for a vnode. This routine MUST be called before a 1506 * VFS can issue buffer cache ops on a vnode. It is typically called 1507 * when a vnode is initialized from its inode. 1508 */ 1509 int 1510 vinitvmio(struct vnode *vp, off_t filesize, int blksize, int boff) 1511 { 1512 vm_object_t object; 1513 int error = 0; 1514 1515 retry: 1516 while ((object = vp->v_object) != NULL) { 1517 vm_object_hold(object); 1518 if (object == vp->v_object) 1519 break; 1520 vm_object_drop(object); 1521 } 1522 1523 if (object == NULL) { 1524 object = vnode_pager_alloc(vp, filesize, 0, 0, blksize, boff); 1525 1526 /* 1527 * Dereference the reference we just created. This assumes 1528 * that the object is associated with the vp. 1529 */ 1530 vm_object_hold(object); 1531 object->ref_count--; 1532 vrele(vp); 1533 } else { 1534 if (object->flags & OBJ_DEAD) { 1535 vn_unlock(vp); 1536 if (vp->v_object == object) 1537 vm_object_dead_sleep(object, "vodead"); 1538 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1539 vm_object_drop(object); 1540 goto retry; 1541 } 1542 } 1543 KASSERT(vp->v_object != NULL, ("vinitvmio: NULL object")); 1544 vsetflags(vp, VOBJBUF); 1545 vm_object_drop(object); 1546 1547 return (error); 1548 } 1549 1550 1551 /* 1552 * Print out a description of a vnode. 1553 */ 1554 static char *typename[] = 1555 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1556 1557 void 1558 vprint(char *label, struct vnode *vp) 1559 { 1560 char buf[96]; 1561 1562 if (label != NULL) 1563 kprintf("%s: %p: ", label, (void *)vp); 1564 else 1565 kprintf("%p: ", (void *)vp); 1566 kprintf("type %s, sysrefs %d, writecount %d, holdcnt %d,", 1567 typename[vp->v_type], 1568 vp->v_sysref.refcnt, vp->v_writecount, vp->v_auxrefs); 1569 buf[0] = '\0'; 1570 if (vp->v_flag & VROOT) 1571 strcat(buf, "|VROOT"); 1572 if (vp->v_flag & VPFSROOT) 1573 strcat(buf, "|VPFSROOT"); 1574 if (vp->v_flag & VTEXT) 1575 strcat(buf, "|VTEXT"); 1576 if (vp->v_flag & VSYSTEM) 1577 strcat(buf, "|VSYSTEM"); 1578 if (vp->v_flag & VFREE) 1579 strcat(buf, "|VFREE"); 1580 if (vp->v_flag & VOBJBUF) 1581 strcat(buf, "|VOBJBUF"); 1582 if (buf[0] != '\0') 1583 kprintf(" flags (%s)", &buf[1]); 1584 if (vp->v_data == NULL) { 1585 kprintf("\n"); 1586 } else { 1587 kprintf("\n\t"); 1588 VOP_PRINT(vp); 1589 } 1590 } 1591 1592 /* 1593 * Do the usual access checking. 1594 * file_mode, uid and gid are from the vnode in question, 1595 * while acc_mode and cred are from the VOP_ACCESS parameter list 1596 */ 1597 int 1598 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1599 mode_t acc_mode, struct ucred *cred) 1600 { 1601 mode_t mask; 1602 int ismember; 1603 1604 /* 1605 * Super-user always gets read/write access, but execute access depends 1606 * on at least one execute bit being set. 1607 */ 1608 if (priv_check_cred(cred, PRIV_ROOT, 0) == 0) { 1609 if ((acc_mode & VEXEC) && type != VDIR && 1610 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1611 return (EACCES); 1612 return (0); 1613 } 1614 1615 mask = 0; 1616 1617 /* Otherwise, check the owner. */ 1618 if (cred->cr_uid == uid) { 1619 if (acc_mode & VEXEC) 1620 mask |= S_IXUSR; 1621 if (acc_mode & VREAD) 1622 mask |= S_IRUSR; 1623 if (acc_mode & VWRITE) 1624 mask |= S_IWUSR; 1625 return ((file_mode & mask) == mask ? 0 : EACCES); 1626 } 1627 1628 /* Otherwise, check the groups. */ 1629 ismember = groupmember(gid, cred); 1630 if (cred->cr_svgid == gid || ismember) { 1631 if (acc_mode & VEXEC) 1632 mask |= S_IXGRP; 1633 if (acc_mode & VREAD) 1634 mask |= S_IRGRP; 1635 if (acc_mode & VWRITE) 1636 mask |= S_IWGRP; 1637 return ((file_mode & mask) == mask ? 0 : EACCES); 1638 } 1639 1640 /* Otherwise, check everyone else. */ 1641 if (acc_mode & VEXEC) 1642 mask |= S_IXOTH; 1643 if (acc_mode & VREAD) 1644 mask |= S_IROTH; 1645 if (acc_mode & VWRITE) 1646 mask |= S_IWOTH; 1647 return ((file_mode & mask) == mask ? 0 : EACCES); 1648 } 1649 1650 #ifdef DDB 1651 #include <ddb/ddb.h> 1652 1653 static int db_show_locked_vnodes(struct mount *mp, void *data); 1654 1655 /* 1656 * List all of the locked vnodes in the system. 1657 * Called when debugging the kernel. 1658 */ 1659 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 1660 { 1661 kprintf("Locked vnodes\n"); 1662 mountlist_scan(db_show_locked_vnodes, NULL, 1663 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1664 } 1665 1666 static int 1667 db_show_locked_vnodes(struct mount *mp, void *data __unused) 1668 { 1669 struct vnode *vp; 1670 1671 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 1672 if (vn_islocked(vp)) 1673 vprint(NULL, vp); 1674 } 1675 return(0); 1676 } 1677 #endif 1678 1679 /* 1680 * Top level filesystem related information gathering. 1681 */ 1682 static int sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS); 1683 1684 static int 1685 vfs_sysctl(SYSCTL_HANDLER_ARGS) 1686 { 1687 int *name = (int *)arg1 - 1; /* XXX */ 1688 u_int namelen = arg2 + 1; /* XXX */ 1689 struct vfsconf *vfsp; 1690 int maxtypenum; 1691 1692 #if 1 || defined(COMPAT_PRELITE2) 1693 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 1694 if (namelen == 1) 1695 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 1696 #endif 1697 1698 #ifdef notyet 1699 /* all sysctl names at this level are at least name and field */ 1700 if (namelen < 2) 1701 return (ENOTDIR); /* overloaded */ 1702 if (name[0] != VFS_GENERIC) { 1703 vfsp = vfsconf_find_by_typenum(name[0]); 1704 if (vfsp == NULL) 1705 return (EOPNOTSUPP); 1706 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1707 oldp, oldlenp, newp, newlen, p)); 1708 } 1709 #endif 1710 switch (name[1]) { 1711 case VFS_MAXTYPENUM: 1712 if (namelen != 2) 1713 return (ENOTDIR); 1714 maxtypenum = vfsconf_get_maxtypenum(); 1715 return (SYSCTL_OUT(req, &maxtypenum, sizeof(maxtypenum))); 1716 case VFS_CONF: 1717 if (namelen != 3) 1718 return (ENOTDIR); /* overloaded */ 1719 vfsp = vfsconf_find_by_typenum(name[2]); 1720 if (vfsp == NULL) 1721 return (EOPNOTSUPP); 1722 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 1723 } 1724 return (EOPNOTSUPP); 1725 } 1726 1727 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 1728 "Generic filesystem"); 1729 1730 #if 1 || defined(COMPAT_PRELITE2) 1731 1732 static int 1733 sysctl_ovfs_conf_iter(struct vfsconf *vfsp, void *data) 1734 { 1735 int error; 1736 struct ovfsconf ovfs; 1737 struct sysctl_req *req = (struct sysctl_req*) data; 1738 1739 bzero(&ovfs, sizeof(ovfs)); 1740 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 1741 strcpy(ovfs.vfc_name, vfsp->vfc_name); 1742 ovfs.vfc_index = vfsp->vfc_typenum; 1743 ovfs.vfc_refcount = vfsp->vfc_refcount; 1744 ovfs.vfc_flags = vfsp->vfc_flags; 1745 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 1746 if (error) 1747 return error; /* abort iteration with error code */ 1748 else 1749 return 0; /* continue iterating with next element */ 1750 } 1751 1752 static int 1753 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 1754 { 1755 return vfsconf_each(sysctl_ovfs_conf_iter, (void*)req); 1756 } 1757 1758 #endif /* 1 || COMPAT_PRELITE2 */ 1759 1760 /* 1761 * Check to see if a filesystem is mounted on a block device. 1762 */ 1763 int 1764 vfs_mountedon(struct vnode *vp) 1765 { 1766 cdev_t dev; 1767 1768 if ((dev = vp->v_rdev) == NULL) { 1769 /* if (vp->v_type != VBLK) 1770 dev = get_dev(vp->v_uminor, vp->v_umajor); */ 1771 } 1772 if (dev != NULL && dev->si_mountpoint) 1773 return (EBUSY); 1774 return (0); 1775 } 1776 1777 /* 1778 * Unmount all filesystems. The list is traversed in reverse order 1779 * of mounting to avoid dependencies. 1780 */ 1781 1782 static int vfs_umountall_callback(struct mount *mp, void *data); 1783 1784 void 1785 vfs_unmountall(void) 1786 { 1787 int count; 1788 1789 do { 1790 count = mountlist_scan(vfs_umountall_callback, 1791 NULL, MNTSCAN_REVERSE|MNTSCAN_NOBUSY); 1792 } while (count); 1793 } 1794 1795 static 1796 int 1797 vfs_umountall_callback(struct mount *mp, void *data) 1798 { 1799 int error; 1800 1801 error = dounmount(mp, MNT_FORCE); 1802 if (error) { 1803 mountlist_remove(mp); 1804 kprintf("unmount of filesystem mounted from %s failed (", 1805 mp->mnt_stat.f_mntfromname); 1806 if (error == EBUSY) 1807 kprintf("BUSY)\n"); 1808 else 1809 kprintf("%d)\n", error); 1810 } 1811 return(1); 1812 } 1813 1814 /* 1815 * Checks the mount flags for parameter mp and put the names comma-separated 1816 * into a string buffer buf with a size limit specified by len. 1817 * 1818 * It returns the number of bytes written into buf, and (*errorp) will be 1819 * set to 0, EINVAL (if passed length is 0), or ENOSPC (supplied buffer was 1820 * not large enough). The buffer will be 0-terminated if len was not 0. 1821 */ 1822 size_t 1823 vfs_flagstostr(int flags, const struct mountctl_opt *optp, 1824 char *buf, size_t len, int *errorp) 1825 { 1826 static const struct mountctl_opt optnames[] = { 1827 { MNT_ASYNC, "asynchronous" }, 1828 { MNT_EXPORTED, "NFS exported" }, 1829 { MNT_LOCAL, "local" }, 1830 { MNT_NOATIME, "noatime" }, 1831 { MNT_NODEV, "nodev" }, 1832 { MNT_NOEXEC, "noexec" }, 1833 { MNT_NOSUID, "nosuid" }, 1834 { MNT_NOSYMFOLLOW, "nosymfollow" }, 1835 { MNT_QUOTA, "with-quotas" }, 1836 { MNT_RDONLY, "read-only" }, 1837 { MNT_SYNCHRONOUS, "synchronous" }, 1838 { MNT_UNION, "union" }, 1839 { MNT_NOCLUSTERR, "noclusterr" }, 1840 { MNT_NOCLUSTERW, "noclusterw" }, 1841 { MNT_SUIDDIR, "suiddir" }, 1842 { MNT_SOFTDEP, "soft-updates" }, 1843 { MNT_IGNORE, "ignore" }, 1844 { 0, NULL} 1845 }; 1846 int bwritten; 1847 int bleft; 1848 int optlen; 1849 int actsize; 1850 1851 *errorp = 0; 1852 bwritten = 0; 1853 bleft = len - 1; /* leave room for trailing \0 */ 1854 1855 /* 1856 * Checks the size of the string. If it contains 1857 * any data, then we will append the new flags to 1858 * it. 1859 */ 1860 actsize = strlen(buf); 1861 if (actsize > 0) 1862 buf += actsize; 1863 1864 /* Default flags if no flags passed */ 1865 if (optp == NULL) 1866 optp = optnames; 1867 1868 if (bleft < 0) { /* degenerate case, 0-length buffer */ 1869 *errorp = EINVAL; 1870 return(0); 1871 } 1872 1873 for (; flags && optp->o_opt; ++optp) { 1874 if ((flags & optp->o_opt) == 0) 1875 continue; 1876 optlen = strlen(optp->o_name); 1877 if (bwritten || actsize > 0) { 1878 if (bleft < 2) { 1879 *errorp = ENOSPC; 1880 break; 1881 } 1882 buf[bwritten++] = ','; 1883 buf[bwritten++] = ' '; 1884 bleft -= 2; 1885 } 1886 if (bleft < optlen) { 1887 *errorp = ENOSPC; 1888 break; 1889 } 1890 bcopy(optp->o_name, buf + bwritten, optlen); 1891 bwritten += optlen; 1892 bleft -= optlen; 1893 flags &= ~optp->o_opt; 1894 } 1895 1896 /* 1897 * Space already reserved for trailing \0 1898 */ 1899 buf[bwritten] = 0; 1900 return (bwritten); 1901 } 1902 1903 /* 1904 * Build hash lists of net addresses and hang them off the mount point. 1905 * Called by ufs_mount() to set up the lists of export addresses. 1906 */ 1907 static int 1908 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1909 const struct export_args *argp) 1910 { 1911 struct netcred *np; 1912 struct radix_node_head *rnh; 1913 int i; 1914 struct radix_node *rn; 1915 struct sockaddr *saddr, *smask = NULL; 1916 struct domain *dom; 1917 int error; 1918 1919 if (argp->ex_addrlen == 0) { 1920 if (mp->mnt_flag & MNT_DEFEXPORTED) 1921 return (EPERM); 1922 np = &nep->ne_defexported; 1923 np->netc_exflags = argp->ex_flags; 1924 np->netc_anon = argp->ex_anon; 1925 np->netc_anon.cr_ref = 1; 1926 mp->mnt_flag |= MNT_DEFEXPORTED; 1927 return (0); 1928 } 1929 1930 if (argp->ex_addrlen < 0 || argp->ex_addrlen > MLEN) 1931 return (EINVAL); 1932 if (argp->ex_masklen < 0 || argp->ex_masklen > MLEN) 1933 return (EINVAL); 1934 1935 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1936 np = (struct netcred *) kmalloc(i, M_NETADDR, M_WAITOK | M_ZERO); 1937 saddr = (struct sockaddr *) (np + 1); 1938 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 1939 goto out; 1940 if (saddr->sa_len > argp->ex_addrlen) 1941 saddr->sa_len = argp->ex_addrlen; 1942 if (argp->ex_masklen) { 1943 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1944 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 1945 if (error) 1946 goto out; 1947 if (smask->sa_len > argp->ex_masklen) 1948 smask->sa_len = argp->ex_masklen; 1949 } 1950 i = saddr->sa_family; 1951 if ((rnh = nep->ne_rtable[i]) == NULL) { 1952 /* 1953 * Seems silly to initialize every AF when most are not used, 1954 * do so on demand here 1955 */ 1956 SLIST_FOREACH(dom, &domains, dom_next) 1957 if (dom->dom_family == i && dom->dom_rtattach) { 1958 dom->dom_rtattach((void **) &nep->ne_rtable[i], 1959 dom->dom_rtoffset); 1960 break; 1961 } 1962 if ((rnh = nep->ne_rtable[i]) == NULL) { 1963 error = ENOBUFS; 1964 goto out; 1965 } 1966 } 1967 rn = (*rnh->rnh_addaddr) ((char *) saddr, (char *) smask, rnh, 1968 np->netc_rnodes); 1969 if (rn == NULL || np != (struct netcred *) rn) { /* already exists */ 1970 error = EPERM; 1971 goto out; 1972 } 1973 np->netc_exflags = argp->ex_flags; 1974 np->netc_anon = argp->ex_anon; 1975 np->netc_anon.cr_ref = 1; 1976 return (0); 1977 out: 1978 kfree(np, M_NETADDR); 1979 return (error); 1980 } 1981 1982 /* ARGSUSED */ 1983 static int 1984 vfs_free_netcred(struct radix_node *rn, void *w) 1985 { 1986 struct radix_node_head *rnh = (struct radix_node_head *) w; 1987 1988 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 1989 kfree((caddr_t) rn, M_NETADDR); 1990 return (0); 1991 } 1992 1993 /* 1994 * Free the net address hash lists that are hanging off the mount points. 1995 */ 1996 static void 1997 vfs_free_addrlist(struct netexport *nep) 1998 { 1999 int i; 2000 struct radix_node_head *rnh; 2001 2002 for (i = 0; i <= AF_MAX; i++) 2003 if ((rnh = nep->ne_rtable[i])) { 2004 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 2005 (caddr_t) rnh); 2006 kfree((caddr_t) rnh, M_RTABLE); 2007 nep->ne_rtable[i] = 0; 2008 } 2009 } 2010 2011 int 2012 vfs_export(struct mount *mp, struct netexport *nep, 2013 const struct export_args *argp) 2014 { 2015 int error; 2016 2017 if (argp->ex_flags & MNT_DELEXPORT) { 2018 if (mp->mnt_flag & MNT_EXPUBLIC) { 2019 vfs_setpublicfs(NULL, NULL, NULL); 2020 mp->mnt_flag &= ~MNT_EXPUBLIC; 2021 } 2022 vfs_free_addrlist(nep); 2023 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2024 } 2025 if (argp->ex_flags & MNT_EXPORTED) { 2026 if (argp->ex_flags & MNT_EXPUBLIC) { 2027 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2028 return (error); 2029 mp->mnt_flag |= MNT_EXPUBLIC; 2030 } 2031 if ((error = vfs_hang_addrlist(mp, nep, argp))) 2032 return (error); 2033 mp->mnt_flag |= MNT_EXPORTED; 2034 } 2035 return (0); 2036 } 2037 2038 2039 /* 2040 * Set the publicly exported filesystem (WebNFS). Currently, only 2041 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2042 */ 2043 int 2044 vfs_setpublicfs(struct mount *mp, struct netexport *nep, 2045 const struct export_args *argp) 2046 { 2047 int error; 2048 struct vnode *rvp; 2049 char *cp; 2050 2051 /* 2052 * mp == NULL -> invalidate the current info, the FS is 2053 * no longer exported. May be called from either vfs_export 2054 * or unmount, so check if it hasn't already been done. 2055 */ 2056 if (mp == NULL) { 2057 if (nfs_pub.np_valid) { 2058 nfs_pub.np_valid = 0; 2059 if (nfs_pub.np_index != NULL) { 2060 kfree(nfs_pub.np_index, M_TEMP); 2061 nfs_pub.np_index = NULL; 2062 } 2063 } 2064 return (0); 2065 } 2066 2067 /* 2068 * Only one allowed at a time. 2069 */ 2070 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2071 return (EBUSY); 2072 2073 /* 2074 * Get real filehandle for root of exported FS. 2075 */ 2076 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 2077 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2078 2079 if ((error = VFS_ROOT(mp, &rvp))) 2080 return (error); 2081 2082 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2083 return (error); 2084 2085 vput(rvp); 2086 2087 /* 2088 * If an indexfile was specified, pull it in. 2089 */ 2090 if (argp->ex_indexfile != NULL) { 2091 int namelen; 2092 2093 error = vn_get_namelen(rvp, &namelen); 2094 if (error) 2095 return (error); 2096 nfs_pub.np_index = kmalloc(namelen, M_TEMP, M_WAITOK); 2097 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2098 namelen, NULL); 2099 if (!error) { 2100 /* 2101 * Check for illegal filenames. 2102 */ 2103 for (cp = nfs_pub.np_index; *cp; cp++) { 2104 if (*cp == '/') { 2105 error = EINVAL; 2106 break; 2107 } 2108 } 2109 } 2110 if (error) { 2111 kfree(nfs_pub.np_index, M_TEMP); 2112 return (error); 2113 } 2114 } 2115 2116 nfs_pub.np_mount = mp; 2117 nfs_pub.np_valid = 1; 2118 return (0); 2119 } 2120 2121 struct netcred * 2122 vfs_export_lookup(struct mount *mp, struct netexport *nep, 2123 struct sockaddr *nam) 2124 { 2125 struct netcred *np; 2126 struct radix_node_head *rnh; 2127 struct sockaddr *saddr; 2128 2129 np = NULL; 2130 if (mp->mnt_flag & MNT_EXPORTED) { 2131 /* 2132 * Lookup in the export list first. 2133 */ 2134 if (nam != NULL) { 2135 saddr = nam; 2136 rnh = nep->ne_rtable[saddr->sa_family]; 2137 if (rnh != NULL) { 2138 np = (struct netcred *) 2139 (*rnh->rnh_matchaddr)((char *)saddr, 2140 rnh); 2141 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2142 np = NULL; 2143 } 2144 } 2145 /* 2146 * If no address match, use the default if it exists. 2147 */ 2148 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2149 np = &nep->ne_defexported; 2150 } 2151 return (np); 2152 } 2153 2154 /* 2155 * perform msync on all vnodes under a mount point. The mount point must 2156 * be locked. This code is also responsible for lazy-freeing unreferenced 2157 * vnodes whos VM objects no longer contain pages. 2158 * 2159 * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state. 2160 * 2161 * NOTE: XXX VOP_PUTPAGES and friends requires that the vnode be locked, 2162 * but vnode_pager_putpages() doesn't lock the vnode. We have to do it 2163 * way up in this high level function. 2164 */ 2165 static int vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data); 2166 static int vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data); 2167 2168 void 2169 vfs_msync(struct mount *mp, int flags) 2170 { 2171 int vmsc_flags; 2172 2173 /* 2174 * tmpfs sets this flag to prevent msync(), sync, and the 2175 * filesystem periodic syncer from trying to flush VM pages 2176 * to swap. Only pure memory pressure flushes tmpfs VM pages 2177 * to swap. 2178 */ 2179 if (mp->mnt_kern_flag & MNTK_NOMSYNC) 2180 return; 2181 2182 /* 2183 * Ok, scan the vnodes for work. 2184 */ 2185 vmsc_flags = VMSC_GETVP; 2186 if (flags != MNT_WAIT) 2187 vmsc_flags |= VMSC_NOWAIT; 2188 vmntvnodescan(mp, vmsc_flags, 2189 vfs_msync_scan1, vfs_msync_scan2, 2190 (void *)(intptr_t)flags); 2191 } 2192 2193 /* 2194 * scan1 is a fast pre-check. There could be hundreds of thousands of 2195 * vnodes, we cannot afford to do anything heavy weight until we have a 2196 * fairly good indication that there is work to do. 2197 */ 2198 static 2199 int 2200 vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data) 2201 { 2202 int flags = (int)(intptr_t)data; 2203 2204 if ((vp->v_flag & VRECLAIMED) == 0) { 2205 if (vshouldmsync(vp)) 2206 return(0); /* call scan2 */ 2207 if ((mp->mnt_flag & MNT_RDONLY) == 0 && 2208 (vp->v_flag & VOBJDIRTY) && 2209 (flags == MNT_WAIT || vn_islocked(vp) == 0)) { 2210 return(0); /* call scan2 */ 2211 } 2212 } 2213 2214 /* 2215 * do not call scan2, continue the loop 2216 */ 2217 return(-1); 2218 } 2219 2220 /* 2221 * This callback is handed a locked vnode. 2222 */ 2223 static 2224 int 2225 vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data) 2226 { 2227 vm_object_t obj; 2228 int flags = (int)(intptr_t)data; 2229 2230 if (vp->v_flag & VRECLAIMED) 2231 return(0); 2232 2233 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (vp->v_flag & VOBJDIRTY)) { 2234 if ((obj = vp->v_object) != NULL) { 2235 vm_object_page_clean(obj, 0, 0, 2236 flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC); 2237 } 2238 } 2239 return(0); 2240 } 2241 2242 /* 2243 * Wake up anyone interested in vp because it is being revoked. 2244 */ 2245 void 2246 vn_gone(struct vnode *vp) 2247 { 2248 lwkt_gettoken(&vp->v_token); 2249 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, NOTE_REVOKE); 2250 lwkt_reltoken(&vp->v_token); 2251 } 2252 2253 /* 2254 * extract the cdev_t from a VBLK or VCHR. The vnode must have been opened 2255 * (or v_rdev might be NULL). 2256 */ 2257 cdev_t 2258 vn_todev(struct vnode *vp) 2259 { 2260 if (vp->v_type != VBLK && vp->v_type != VCHR) 2261 return (NULL); 2262 KKASSERT(vp->v_rdev != NULL); 2263 return (vp->v_rdev); 2264 } 2265 2266 /* 2267 * Check if vnode represents a disk device. The vnode does not need to be 2268 * opened. 2269 * 2270 * MPALMOSTSAFE 2271 */ 2272 int 2273 vn_isdisk(struct vnode *vp, int *errp) 2274 { 2275 cdev_t dev; 2276 2277 if (vp->v_type != VCHR) { 2278 if (errp != NULL) 2279 *errp = ENOTBLK; 2280 return (0); 2281 } 2282 2283 dev = vp->v_rdev; 2284 2285 if (dev == NULL) { 2286 if (errp != NULL) 2287 *errp = ENXIO; 2288 return (0); 2289 } 2290 if (dev_is_good(dev) == 0) { 2291 if (errp != NULL) 2292 *errp = ENXIO; 2293 return (0); 2294 } 2295 if ((dev_dflags(dev) & D_DISK) == 0) { 2296 if (errp != NULL) 2297 *errp = ENOTBLK; 2298 return (0); 2299 } 2300 if (errp != NULL) 2301 *errp = 0; 2302 return (1); 2303 } 2304 2305 int 2306 vn_get_namelen(struct vnode *vp, int *namelen) 2307 { 2308 int error; 2309 register_t retval[2]; 2310 2311 error = VOP_PATHCONF(vp, _PC_NAME_MAX, retval); 2312 if (error) 2313 return (error); 2314 *namelen = (int)retval[0]; 2315 return (0); 2316 } 2317 2318 int 2319 vop_write_dirent(int *error, struct uio *uio, ino_t d_ino, uint8_t d_type, 2320 uint16_t d_namlen, const char *d_name) 2321 { 2322 struct dirent *dp; 2323 size_t len; 2324 2325 len = _DIRENT_RECLEN(d_namlen); 2326 if (len > uio->uio_resid) 2327 return(1); 2328 2329 dp = kmalloc(len, M_TEMP, M_WAITOK | M_ZERO); 2330 2331 dp->d_ino = d_ino; 2332 dp->d_namlen = d_namlen; 2333 dp->d_type = d_type; 2334 bcopy(d_name, dp->d_name, d_namlen); 2335 2336 *error = uiomove((caddr_t)dp, len, uio); 2337 2338 kfree(dp, M_TEMP); 2339 2340 return(0); 2341 } 2342 2343 void 2344 vn_mark_atime(struct vnode *vp, struct thread *td) 2345 { 2346 struct proc *p = td->td_proc; 2347 struct ucred *cred = p ? p->p_ucred : proc0.p_ucred; 2348 2349 if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) { 2350 VOP_MARKATIME(vp, cred); 2351 } 2352 } 2353