1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 36 */ 37 38 /* 39 * External virtual filesystem routines 40 */ 41 #include "opt_ddb.h" 42 #include "opt_inet.h" 43 #include "opt_inet6.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/buf.h> 48 #include <sys/conf.h> 49 #include <sys/dirent.h> 50 #include <sys/eventhandler.h> 51 #include <sys/fcntl.h> 52 #include <sys/file.h> 53 #include <sys/kernel.h> 54 #include <sys/kthread.h> 55 #include <sys/malloc.h> 56 #include <sys/mbuf.h> 57 #include <sys/mount.h> 58 #include <sys/priv.h> 59 #include <sys/proc.h> 60 #include <sys/reboot.h> 61 #include <sys/socket.h> 62 #include <sys/stat.h> 63 #include <sys/sysctl.h> 64 #include <sys/syslog.h> 65 #include <sys/unistd.h> 66 #include <sys/vmmeter.h> 67 #include <sys/vnode.h> 68 69 #include <machine/limits.h> 70 71 #include <vm/vm.h> 72 #include <vm/vm_object.h> 73 #include <vm/vm_extern.h> 74 #include <vm/vm_kern.h> 75 #include <vm/pmap.h> 76 #include <vm/vm_map.h> 77 #include <vm/vm_page.h> 78 #include <vm/vm_pager.h> 79 #include <vm/vnode_pager.h> 80 #include <vm/vm_zone.h> 81 82 #include <sys/buf2.h> 83 #include <sys/mplock2.h> 84 #include <vm/vm_page2.h> 85 86 #include <netinet/in.h> 87 88 static MALLOC_DEFINE(M_NETCRED, "Export Host", "Export host address structure"); 89 90 int numvnodes; 91 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 92 "Number of vnodes allocated"); 93 int verbose_reclaims; 94 SYSCTL_INT(_debug, OID_AUTO, verbose_reclaims, CTLFLAG_RD, &verbose_reclaims, 0, 95 "Output filename of reclaimed vnode(s)"); 96 97 enum vtype iftovt_tab[16] = { 98 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 99 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 100 }; 101 int vttoif_tab[9] = { 102 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 103 S_IFSOCK, S_IFIFO, S_IFMT, 104 }; 105 106 static int reassignbufcalls; 107 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 108 0, "Number of times buffers have been reassigned to the proper list"); 109 110 static int check_buf_overlap = 2; /* invasive check */ 111 SYSCTL_INT(_vfs, OID_AUTO, check_buf_overlap, CTLFLAG_RW, &check_buf_overlap, 112 0, "Enable overlapping buffer checks"); 113 114 int nfs_mount_type = -1; 115 static struct lwkt_token spechash_token; 116 struct nfs_public nfs_pub; /* publicly exported FS */ 117 118 int maxvnodes; 119 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 120 &maxvnodes, 0, "Maximum number of vnodes"); 121 122 static struct radix_node_head *vfs_create_addrlist_af(int af, 123 struct netexport *nep); 124 static void vfs_free_addrlist (struct netexport *nep); 125 static int vfs_free_netcred (struct radix_node *rn, void *w); 126 static void vfs_free_addrlist_af (struct radix_node_head **prnh); 127 static int vfs_hang_addrlist (struct mount *mp, struct netexport *nep, 128 const struct export_args *argp); 129 130 int prtactive = 0; /* 1 => print out reclaim of active vnodes */ 131 132 /* 133 * Red black tree functions 134 */ 135 static int rb_buf_compare(struct buf *b1, struct buf *b2); 136 RB_GENERATE2(buf_rb_tree, buf, b_rbnode, rb_buf_compare, off_t, b_loffset); 137 RB_GENERATE2(buf_rb_hash, buf, b_rbhash, rb_buf_compare, off_t, b_loffset); 138 139 static int 140 rb_buf_compare(struct buf *b1, struct buf *b2) 141 { 142 if (b1->b_loffset < b2->b_loffset) 143 return(-1); 144 if (b1->b_loffset > b2->b_loffset) 145 return(1); 146 return(0); 147 } 148 149 /* 150 * Initialize the vnode management data structures. 151 * 152 * Called from vfsinit() 153 */ 154 void 155 vfs_subr_init(void) 156 { 157 int factor1; 158 int factor2; 159 160 /* 161 * Desiredvnodes is kern.maxvnodes. We want to scale it 162 * according to available system memory but we may also have 163 * to limit it based on available KVM. 164 * 165 * WARNING! For machines with 64-256M of ram we have to be sure 166 * that the default limit scales down well due to HAMMER 167 * taking up significantly more memory per-vnode vs UFS. 168 * We want around ~5800 on a 128M machine. 169 * 170 * WARNING! Now that KVM is substantially larger (e.g. 8TB+), 171 * also limit maxvnodes based on a 128GB metric. This 172 * gives us something like ~3 millon vnodes. sysctl 173 * can be used to increase it further if desired. 174 * 175 * For disk cachhing purposes, filesystems like HAMMER1 176 * and HAMMER2 will or can be told to cache file data 177 * via the block device instead of excessively in vnodes. 178 */ 179 factor1 = 25 * (sizeof(struct vm_object) + sizeof(struct vnode)); 180 factor2 = 30 * (sizeof(struct vm_object) + sizeof(struct vnode)); 181 maxvnodes = imin((int64_t)vmstats.v_page_count * PAGE_SIZE / factor1, 182 KvaSize / factor2); 183 maxvnodes = imax(maxvnodes, maxproc * 8); 184 maxvnodes = imin(maxvnodes, 64LL*1024*1024*1024 / factor2); 185 186 lwkt_token_init(&spechash_token, "spechash"); 187 } 188 189 /* 190 * Knob to control the precision of file timestamps: 191 * 192 * 0 = seconds only; nanoseconds zeroed. 193 * 1 = seconds and nanoseconds, accurate within 1/HZ. 194 * 2 = seconds and nanoseconds, truncated to microseconds. 195 * >=3 = seconds and nanoseconds, maximum precision. 196 */ 197 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 198 199 static int timestamp_precision = TSP_SEC; 200 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 201 ×tamp_precision, 0, "Precision of file timestamps"); 202 203 /* 204 * Get a current timestamp. 205 * 206 * MPSAFE 207 */ 208 void 209 vfs_timestamp(struct timespec *tsp) 210 { 211 struct timeval tv; 212 213 switch (timestamp_precision) { 214 case TSP_SEC: 215 tsp->tv_sec = time_second; 216 tsp->tv_nsec = 0; 217 break; 218 case TSP_HZ: 219 getnanotime(tsp); 220 break; 221 case TSP_USEC: 222 microtime(&tv); 223 TIMEVAL_TO_TIMESPEC(&tv, tsp); 224 break; 225 case TSP_NSEC: 226 default: 227 nanotime(tsp); 228 break; 229 } 230 } 231 232 /* 233 * Set vnode attributes to VNOVAL 234 */ 235 void 236 vattr_null(struct vattr *vap) 237 { 238 vap->va_type = VNON; 239 vap->va_size = VNOVAL; 240 vap->va_bytes = VNOVAL; 241 vap->va_mode = VNOVAL; 242 vap->va_nlink = VNOVAL; 243 vap->va_uid = VNOVAL; 244 vap->va_gid = VNOVAL; 245 vap->va_fsid = VNOVAL; 246 vap->va_fileid = VNOVAL; 247 vap->va_blocksize = VNOVAL; 248 vap->va_rmajor = VNOVAL; 249 vap->va_rminor = VNOVAL; 250 vap->va_atime.tv_sec = VNOVAL; 251 vap->va_atime.tv_nsec = VNOVAL; 252 vap->va_mtime.tv_sec = VNOVAL; 253 vap->va_mtime.tv_nsec = VNOVAL; 254 vap->va_ctime.tv_sec = VNOVAL; 255 vap->va_ctime.tv_nsec = VNOVAL; 256 vap->va_flags = VNOVAL; 257 vap->va_gen = VNOVAL; 258 vap->va_vaflags = 0; 259 /* va_*_uuid fields are only valid if related flags are set */ 260 } 261 262 /* 263 * Flush out and invalidate all buffers associated with a vnode. 264 * 265 * vp must be locked. 266 */ 267 static int vinvalbuf_bp(struct buf *bp, void *data); 268 269 struct vinvalbuf_bp_info { 270 struct vnode *vp; 271 int slptimeo; 272 int lkflags; 273 int flags; 274 int clean; 275 }; 276 277 int 278 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 279 { 280 struct vinvalbuf_bp_info info; 281 vm_object_t object; 282 int error; 283 284 lwkt_gettoken(&vp->v_token); 285 286 /* 287 * If we are being asked to save, call fsync to ensure that the inode 288 * is updated. 289 */ 290 if (flags & V_SAVE) { 291 error = bio_track_wait(&vp->v_track_write, slpflag, slptimeo); 292 if (error) 293 goto done; 294 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 295 if ((error = VOP_FSYNC(vp, MNT_WAIT, 0)) != 0) 296 goto done; 297 #if 0 298 /* 299 * Dirty bufs may be left or generated via races 300 * in circumstances where vinvalbuf() is called on 301 * a vnode not undergoing reclamation. Only 302 * panic if we are trying to reclaim the vnode. 303 */ 304 if ((vp->v_flag & VRECLAIMED) && 305 (bio_track_active(&vp->v_track_write) || 306 !RB_EMPTY(&vp->v_rbdirty_tree))) { 307 panic("vinvalbuf: dirty bufs"); 308 } 309 #endif 310 } 311 } 312 info.slptimeo = slptimeo; 313 info.lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; 314 if (slpflag & PCATCH) 315 info.lkflags |= LK_PCATCH; 316 info.flags = flags; 317 info.vp = vp; 318 319 /* 320 * Flush the buffer cache until nothing is left, wait for all I/O 321 * to complete. At least one pass is required. We might block 322 * in the pip code so we have to re-check. Order is important. 323 */ 324 do { 325 /* 326 * Flush buffer cache 327 */ 328 if (!RB_EMPTY(&vp->v_rbclean_tree)) { 329 info.clean = 1; 330 error = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 331 NULL, vinvalbuf_bp, &info); 332 } 333 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 334 info.clean = 0; 335 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 336 NULL, vinvalbuf_bp, &info); 337 } 338 339 /* 340 * Wait for I/O completion. 341 */ 342 bio_track_wait(&vp->v_track_write, 0, 0); 343 if ((object = vp->v_object) != NULL) 344 refcount_wait(&object->paging_in_progress, "vnvlbx"); 345 } while (bio_track_active(&vp->v_track_write) || 346 !RB_EMPTY(&vp->v_rbclean_tree) || 347 !RB_EMPTY(&vp->v_rbdirty_tree)); 348 349 /* 350 * Destroy the copy in the VM cache, too. 351 */ 352 if ((object = vp->v_object) != NULL) { 353 vm_object_page_remove(object, 0, 0, 354 (flags & V_SAVE) ? TRUE : FALSE); 355 } 356 357 if (!RB_EMPTY(&vp->v_rbdirty_tree) || !RB_EMPTY(&vp->v_rbclean_tree)) 358 panic("vinvalbuf: flush failed"); 359 if (!RB_EMPTY(&vp->v_rbhash_tree)) 360 panic("vinvalbuf: flush failed, buffers still present"); 361 error = 0; 362 done: 363 lwkt_reltoken(&vp->v_token); 364 return (error); 365 } 366 367 static int 368 vinvalbuf_bp(struct buf *bp, void *data) 369 { 370 struct vinvalbuf_bp_info *info = data; 371 int error; 372 373 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 374 atomic_add_int(&bp->b_refs, 1); 375 error = BUF_TIMELOCK(bp, info->lkflags, 376 "vinvalbuf", info->slptimeo); 377 atomic_subtract_int(&bp->b_refs, 1); 378 if (error == 0) { 379 BUF_UNLOCK(bp); 380 error = ENOLCK; 381 } 382 if (error == ENOLCK) 383 return(0); 384 return (-error); 385 } 386 KKASSERT(bp->b_vp == info->vp); 387 388 /* 389 * Must check clean/dirty status after successfully locking as 390 * it may race. 391 */ 392 if ((info->clean && (bp->b_flags & B_DELWRI)) || 393 (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0)) { 394 BUF_UNLOCK(bp); 395 return(0); 396 } 397 398 /* 399 * NOTE: NO B_LOCKED CHECK. Also no buf_checkwrite() 400 * check. This code will write out the buffer, period. 401 */ 402 bremfree(bp); 403 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 404 (info->flags & V_SAVE)) { 405 cluster_awrite(bp); 406 } else if (info->flags & V_SAVE) { 407 /* 408 * Cannot set B_NOCACHE on a clean buffer as this will 409 * destroy the VM backing store which might actually 410 * be dirty (and unsynchronized). 411 */ 412 bp->b_flags |= (B_INVAL | B_RELBUF); 413 brelse(bp); 414 } else { 415 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 416 brelse(bp); 417 } 418 return(0); 419 } 420 421 /* 422 * Truncate a file's buffer and pages to a specified length. This 423 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 424 * sync activity. 425 * 426 * The vnode must be locked. 427 */ 428 static int vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data); 429 static int vtruncbuf_bp_trunc(struct buf *bp, void *data); 430 static int vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data); 431 static int vtruncbuf_bp_metasync(struct buf *bp, void *data); 432 433 struct vtruncbuf_info { 434 struct vnode *vp; 435 off_t truncloffset; 436 int clean; 437 }; 438 439 int 440 vtruncbuf(struct vnode *vp, off_t length, int blksize) 441 { 442 struct vtruncbuf_info info; 443 const char *filename; 444 int count; 445 446 /* 447 * Round up to the *next* block, then destroy the buffers in question. 448 * Since we are only removing some of the buffers we must rely on the 449 * scan count to determine whether a loop is necessary. 450 */ 451 if ((count = (int)(length % blksize)) != 0) 452 info.truncloffset = length + (blksize - count); 453 else 454 info.truncloffset = length; 455 info.vp = vp; 456 457 lwkt_gettoken(&vp->v_token); 458 do { 459 info.clean = 1; 460 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 461 vtruncbuf_bp_trunc_cmp, 462 vtruncbuf_bp_trunc, &info); 463 info.clean = 0; 464 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 465 vtruncbuf_bp_trunc_cmp, 466 vtruncbuf_bp_trunc, &info); 467 } while(count); 468 469 /* 470 * For safety, fsync any remaining metadata if the file is not being 471 * truncated to 0. Since the metadata does not represent the entire 472 * dirty list we have to rely on the hit count to ensure that we get 473 * all of it. 474 */ 475 if (length > 0) { 476 do { 477 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 478 vtruncbuf_bp_metasync_cmp, 479 vtruncbuf_bp_metasync, &info); 480 } while (count); 481 } 482 483 /* 484 * Clean out any left over VM backing store. 485 * 486 * It is possible to have in-progress I/O from buffers that were 487 * not part of the truncation. This should not happen if we 488 * are truncating to 0-length. 489 */ 490 vnode_pager_setsize(vp, length); 491 bio_track_wait(&vp->v_track_write, 0, 0); 492 493 /* 494 * Debugging only 495 */ 496 spin_lock(&vp->v_spin); 497 filename = TAILQ_FIRST(&vp->v_namecache) ? 498 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"; 499 spin_unlock(&vp->v_spin); 500 501 /* 502 * Make sure no buffers were instantiated while we were trying 503 * to clean out the remaining VM pages. This could occur due 504 * to busy dirty VM pages being flushed out to disk. 505 */ 506 do { 507 info.clean = 1; 508 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 509 vtruncbuf_bp_trunc_cmp, 510 vtruncbuf_bp_trunc, &info); 511 info.clean = 0; 512 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 513 vtruncbuf_bp_trunc_cmp, 514 vtruncbuf_bp_trunc, &info); 515 if (count) { 516 kprintf("Warning: vtruncbuf(): Had to re-clean %d " 517 "left over buffers in %s\n", count, filename); 518 } 519 } while(count); 520 521 lwkt_reltoken(&vp->v_token); 522 523 return (0); 524 } 525 526 /* 527 * The callback buffer is beyond the new file EOF and must be destroyed. 528 * Note that the compare function must conform to the RB_SCAN's requirements. 529 */ 530 static 531 int 532 vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data) 533 { 534 struct vtruncbuf_info *info = data; 535 536 if (bp->b_loffset >= info->truncloffset) 537 return(0); 538 return(-1); 539 } 540 541 static 542 int 543 vtruncbuf_bp_trunc(struct buf *bp, void *data) 544 { 545 struct vtruncbuf_info *info = data; 546 547 /* 548 * Do not try to use a buffer we cannot immediately lock, but sleep 549 * anyway to prevent a livelock. The code will loop until all buffers 550 * can be acted upon. 551 * 552 * We must always revalidate the buffer after locking it to deal 553 * with MP races. 554 */ 555 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 556 atomic_add_int(&bp->b_refs, 1); 557 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 558 BUF_UNLOCK(bp); 559 atomic_subtract_int(&bp->b_refs, 1); 560 } else if ((info->clean && (bp->b_flags & B_DELWRI)) || 561 (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0) || 562 bp->b_vp != info->vp || 563 vtruncbuf_bp_trunc_cmp(bp, data)) { 564 BUF_UNLOCK(bp); 565 } else { 566 bremfree(bp); 567 bp->b_flags |= (B_INVAL | B_RELBUF | B_NOCACHE); 568 brelse(bp); 569 } 570 return(1); 571 } 572 573 /* 574 * Fsync all meta-data after truncating a file to be non-zero. Only metadata 575 * blocks (with a negative loffset) are scanned. 576 * Note that the compare function must conform to the RB_SCAN's requirements. 577 */ 578 static int 579 vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data __unused) 580 { 581 if (bp->b_loffset < 0) 582 return(0); 583 return(1); 584 } 585 586 static int 587 vtruncbuf_bp_metasync(struct buf *bp, void *data) 588 { 589 struct vtruncbuf_info *info = data; 590 591 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 592 atomic_add_int(&bp->b_refs, 1); 593 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 594 BUF_UNLOCK(bp); 595 atomic_subtract_int(&bp->b_refs, 1); 596 } else if ((bp->b_flags & B_DELWRI) == 0 || 597 bp->b_vp != info->vp || 598 vtruncbuf_bp_metasync_cmp(bp, data)) { 599 BUF_UNLOCK(bp); 600 } else { 601 bremfree(bp); 602 if (bp->b_vp == info->vp) 603 bawrite(bp); 604 else 605 bwrite(bp); 606 } 607 return(1); 608 } 609 610 /* 611 * vfsync - implements a multipass fsync on a file which understands 612 * dependancies and meta-data. The passed vnode must be locked. The 613 * waitfor argument may be MNT_WAIT or MNT_NOWAIT, or MNT_LAZY. 614 * 615 * When fsyncing data asynchronously just do one consolidated pass starting 616 * with the most negative block number. This may not get all the data due 617 * to dependancies. 618 * 619 * When fsyncing data synchronously do a data pass, then a metadata pass, 620 * then do additional data+metadata passes to try to get all the data out. 621 * 622 * Caller must ref the vnode but does not have to lock it. 623 */ 624 static int vfsync_wait_output(struct vnode *vp, 625 int (*waitoutput)(struct vnode *, struct thread *)); 626 static int vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused); 627 static int vfsync_data_only_cmp(struct buf *bp, void *data); 628 static int vfsync_meta_only_cmp(struct buf *bp, void *data); 629 static int vfsync_lazy_range_cmp(struct buf *bp, void *data); 630 static int vfsync_bp(struct buf *bp, void *data); 631 632 struct vfsync_info { 633 struct vnode *vp; 634 int fastpass; 635 int synchronous; 636 int syncdeps; 637 int lazycount; 638 int lazylimit; 639 int skippedbufs; 640 int (*checkdef)(struct buf *); 641 int (*cmpfunc)(struct buf *, void *); 642 }; 643 644 int 645 vfsync(struct vnode *vp, int waitfor, int passes, 646 int (*checkdef)(struct buf *), 647 int (*waitoutput)(struct vnode *, struct thread *)) 648 { 649 struct vfsync_info info; 650 int error; 651 652 bzero(&info, sizeof(info)); 653 info.vp = vp; 654 if ((info.checkdef = checkdef) == NULL) 655 info.syncdeps = 1; 656 657 lwkt_gettoken(&vp->v_token); 658 659 switch(waitfor) { 660 case MNT_LAZY | MNT_NOWAIT: 661 case MNT_LAZY: 662 /* 663 * Lazy (filesystem syncer typ) Asynchronous plus limit the 664 * number of data (not meta) pages we try to flush to 1MB. 665 * A non-zero return means that lazy limit was reached. 666 */ 667 info.lazylimit = 1024 * 1024; 668 info.syncdeps = 1; 669 info.cmpfunc = vfsync_lazy_range_cmp; 670 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 671 vfsync_lazy_range_cmp, vfsync_bp, &info); 672 info.cmpfunc = vfsync_meta_only_cmp; 673 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 674 vfsync_meta_only_cmp, vfsync_bp, &info); 675 if (error == 0) 676 vp->v_lazyw = 0; 677 else if (!RB_EMPTY(&vp->v_rbdirty_tree)) 678 vn_syncer_add(vp, 1); 679 error = 0; 680 break; 681 case MNT_NOWAIT: 682 /* 683 * Asynchronous. Do a data-only pass and a meta-only pass. 684 */ 685 info.syncdeps = 1; 686 info.cmpfunc = vfsync_data_only_cmp; 687 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 688 vfsync_bp, &info); 689 info.cmpfunc = vfsync_meta_only_cmp; 690 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_meta_only_cmp, 691 vfsync_bp, &info); 692 error = 0; 693 break; 694 default: 695 /* 696 * Synchronous. Do a data-only pass, then a meta-data+data 697 * pass, then additional integrated passes to try to get 698 * all the dependancies flushed. 699 */ 700 info.cmpfunc = vfsync_data_only_cmp; 701 info.fastpass = 1; 702 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 703 vfsync_bp, &info); 704 info.fastpass = 0; 705 error = vfsync_wait_output(vp, waitoutput); 706 if (error == 0) { 707 info.skippedbufs = 0; 708 info.cmpfunc = vfsync_dummy_cmp; 709 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 710 vfsync_bp, &info); 711 error = vfsync_wait_output(vp, waitoutput); 712 if (info.skippedbufs) { 713 kprintf("Warning: vfsync skipped %d dirty " 714 "buf%s in pass2!\n", 715 info.skippedbufs, 716 ((info.skippedbufs > 1) ? "s" : "")); 717 } 718 } 719 while (error == 0 && passes > 0 && 720 !RB_EMPTY(&vp->v_rbdirty_tree) 721 ) { 722 info.skippedbufs = 0; 723 if (--passes == 0) { 724 info.synchronous = 1; 725 info.syncdeps = 1; 726 } 727 info.cmpfunc = vfsync_dummy_cmp; 728 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 729 vfsync_bp, &info); 730 if (error < 0) 731 error = -error; 732 info.syncdeps = 1; 733 if (error == 0) 734 error = vfsync_wait_output(vp, waitoutput); 735 if (info.skippedbufs && passes == 0) { 736 kprintf("Warning: vfsync skipped %d dirty " 737 "buf%s in final pass!\n", 738 info.skippedbufs, 739 ((info.skippedbufs > 1) ? "s" : "")); 740 } 741 } 742 #if 0 743 /* 744 * This case can occur normally because vnode lock might 745 * not be held. 746 */ 747 if (!RB_EMPTY(&vp->v_rbdirty_tree)) 748 kprintf("dirty bufs left after final pass\n"); 749 #endif 750 break; 751 } 752 lwkt_reltoken(&vp->v_token); 753 754 return(error); 755 } 756 757 static int 758 vfsync_wait_output(struct vnode *vp, 759 int (*waitoutput)(struct vnode *, struct thread *)) 760 { 761 int error; 762 763 error = bio_track_wait(&vp->v_track_write, 0, 0); 764 if (waitoutput) 765 error = waitoutput(vp, curthread); 766 return(error); 767 } 768 769 static int 770 vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused) 771 { 772 return(0); 773 } 774 775 static int 776 vfsync_data_only_cmp(struct buf *bp, void *data) 777 { 778 if (bp->b_loffset < 0) 779 return(-1); 780 return(0); 781 } 782 783 static int 784 vfsync_meta_only_cmp(struct buf *bp, void *data) 785 { 786 if (bp->b_loffset < 0) 787 return(0); 788 return(1); 789 } 790 791 static int 792 vfsync_lazy_range_cmp(struct buf *bp, void *data) 793 { 794 struct vfsync_info *info = data; 795 796 if (bp->b_loffset < info->vp->v_lazyw) 797 return(-1); 798 return(0); 799 } 800 801 static int 802 vfsync_bp(struct buf *bp, void *data) 803 { 804 struct vfsync_info *info = data; 805 struct vnode *vp = info->vp; 806 int error; 807 808 if (info->fastpass) { 809 /* 810 * Ignore buffers that we cannot immediately lock. 811 */ 812 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 813 /* 814 * Removed BUF_TIMELOCK(..., 1), even a 1-tick 815 * delay can mess up performance 816 * 817 * Another reason is that during a dirty-buffer 818 * scan a clustered write can start I/O on buffers 819 * ahead of the scan, causing the scan to not 820 * get a lock here. Usually this means the write 821 * is already in progress so, in fact, we *want* 822 * to skip the buffer. 823 */ 824 ++info->skippedbufs; 825 return(0); 826 } 827 } else if (info->synchronous == 0) { 828 /* 829 * Normal pass, give the buffer a little time to become 830 * available to us. 831 */ 832 if (BUF_TIMELOCK(bp, LK_EXCLUSIVE, "bflst2", hz / 10)) { 833 ++info->skippedbufs; 834 return(0); 835 } 836 } else { 837 /* 838 * Synchronous pass, give the buffer a lot of time before 839 * giving up. 840 */ 841 if (BUF_TIMELOCK(bp, LK_EXCLUSIVE, "bflst3", hz * 10)) { 842 ++info->skippedbufs; 843 return(0); 844 } 845 } 846 847 /* 848 * We must revalidate the buffer after locking. 849 */ 850 if ((bp->b_flags & B_DELWRI) == 0 || 851 bp->b_vp != info->vp || 852 info->cmpfunc(bp, data)) { 853 BUF_UNLOCK(bp); 854 return(0); 855 } 856 857 /* 858 * If syncdeps is not set we do not try to write buffers which have 859 * dependancies. 860 */ 861 if (!info->synchronous && info->syncdeps == 0 && info->checkdef(bp)) { 862 BUF_UNLOCK(bp); 863 return(0); 864 } 865 866 /* 867 * B_NEEDCOMMIT (primarily used by NFS) is a state where the buffer 868 * has been written but an additional handshake with the device 869 * is required before we can dispose of the buffer. We have no idea 870 * how to do this so we have to skip these buffers. 871 */ 872 if (bp->b_flags & B_NEEDCOMMIT) { 873 BUF_UNLOCK(bp); 874 return(0); 875 } 876 877 /* 878 * Ask bioops if it is ok to sync. If not the VFS may have 879 * set B_LOCKED so we have to cycle the buffer. 880 */ 881 if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) { 882 bremfree(bp); 883 brelse(bp); 884 return(0); 885 } 886 887 if (info->synchronous) { 888 /* 889 * Synchronous flush. An error may be returned and will 890 * stop the scan. 891 */ 892 bremfree(bp); 893 error = bwrite(bp); 894 } else { 895 /* 896 * Asynchronous flush. We use the error return to support 897 * MNT_LAZY flushes. 898 * 899 * In low-memory situations we revert to synchronous 900 * operation. This should theoretically prevent the I/O 901 * path from exhausting memory in a non-recoverable way. 902 */ 903 vp->v_lazyw = bp->b_loffset; 904 bremfree(bp); 905 if (vm_page_count_min(0)) { 906 /* low memory */ 907 info->lazycount += bp->b_bufsize; 908 bwrite(bp); 909 } else { 910 /* normal */ 911 info->lazycount += cluster_awrite(bp); 912 waitrunningbufspace(); 913 /*vm_wait_nominal();*/ 914 } 915 if (info->lazylimit && info->lazycount >= info->lazylimit) 916 error = 1; 917 else 918 error = 0; 919 } 920 return(-error); 921 } 922 923 /* 924 * Associate a buffer with a vnode. 925 * 926 * MPSAFE 927 */ 928 int 929 bgetvp(struct vnode *vp, struct buf *bp, int testsize) 930 { 931 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 932 KKASSERT((bp->b_flags & (B_HASHED|B_DELWRI|B_VNCLEAN|B_VNDIRTY)) == 0); 933 934 /* 935 * Insert onto list for new vnode. 936 */ 937 lwkt_gettoken(&vp->v_token); 938 939 if (buf_rb_hash_RB_INSERT(&vp->v_rbhash_tree, bp)) { 940 lwkt_reltoken(&vp->v_token); 941 return (EEXIST); 942 } 943 944 /* 945 * Diagnostics (mainly for HAMMER debugging). Check for 946 * overlapping buffers. 947 */ 948 if (check_buf_overlap) { 949 struct buf *bx; 950 bx = buf_rb_hash_RB_PREV(bp); 951 if (bx) { 952 if (bx->b_loffset + bx->b_bufsize > bp->b_loffset) { 953 kprintf("bgetvp: overlapl %016jx/%d %016jx " 954 "bx %p bp %p\n", 955 (intmax_t)bx->b_loffset, 956 bx->b_bufsize, 957 (intmax_t)bp->b_loffset, 958 bx, bp); 959 if (check_buf_overlap > 1) 960 panic("bgetvp - overlapping buffer"); 961 } 962 } 963 bx = buf_rb_hash_RB_NEXT(bp); 964 if (bx) { 965 if (bp->b_loffset + testsize > bx->b_loffset) { 966 kprintf("bgetvp: overlapr %016jx/%d %016jx " 967 "bp %p bx %p\n", 968 (intmax_t)bp->b_loffset, 969 testsize, 970 (intmax_t)bx->b_loffset, 971 bp, bx); 972 if (check_buf_overlap > 1) 973 panic("bgetvp - overlapping buffer"); 974 } 975 } 976 } 977 bp->b_vp = vp; 978 bp->b_flags |= B_HASHED; 979 bp->b_flags |= B_VNCLEAN; 980 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) 981 panic("reassignbuf: dup lblk/clean vp %p bp %p", vp, bp); 982 /*vhold(vp);*/ 983 lwkt_reltoken(&vp->v_token); 984 return(0); 985 } 986 987 /* 988 * Disassociate a buffer from a vnode. 989 * 990 * MPSAFE 991 */ 992 void 993 brelvp(struct buf *bp) 994 { 995 struct vnode *vp; 996 997 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 998 999 /* 1000 * Delete from old vnode list, if on one. 1001 */ 1002 vp = bp->b_vp; 1003 lwkt_gettoken(&vp->v_token); 1004 if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN)) { 1005 if (bp->b_flags & B_VNDIRTY) 1006 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 1007 else 1008 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 1009 bp->b_flags &= ~(B_VNDIRTY | B_VNCLEAN); 1010 } 1011 if (bp->b_flags & B_HASHED) { 1012 buf_rb_hash_RB_REMOVE(&vp->v_rbhash_tree, bp); 1013 bp->b_flags &= ~B_HASHED; 1014 } 1015 1016 /* 1017 * Only remove from synclist when no dirty buffers are left AND 1018 * the VFS has not flagged the vnode's inode as being dirty. 1019 */ 1020 if ((vp->v_flag & (VONWORKLST | VISDIRTY | VOBJDIRTY)) == VONWORKLST && 1021 RB_EMPTY(&vp->v_rbdirty_tree)) { 1022 vn_syncer_remove(vp, 0); 1023 } 1024 bp->b_vp = NULL; 1025 1026 lwkt_reltoken(&vp->v_token); 1027 1028 /*vdrop(vp);*/ 1029 } 1030 1031 /* 1032 * Reassign the buffer to the proper clean/dirty list based on B_DELWRI. 1033 * This routine is called when the state of the B_DELWRI bit is changed. 1034 * 1035 * Must be called with vp->v_token held. 1036 * MPSAFE 1037 */ 1038 void 1039 reassignbuf(struct buf *bp) 1040 { 1041 struct vnode *vp = bp->b_vp; 1042 int delay; 1043 1044 ASSERT_LWKT_TOKEN_HELD(&vp->v_token); 1045 ++reassignbufcalls; 1046 1047 /* 1048 * B_PAGING flagged buffers cannot be reassigned because their vp 1049 * is not fully linked in. 1050 */ 1051 if (bp->b_flags & B_PAGING) 1052 panic("cannot reassign paging buffer"); 1053 1054 if (bp->b_flags & B_DELWRI) { 1055 /* 1056 * Move to the dirty list, add the vnode to the worklist 1057 */ 1058 if (bp->b_flags & B_VNCLEAN) { 1059 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 1060 bp->b_flags &= ~B_VNCLEAN; 1061 } 1062 if ((bp->b_flags & B_VNDIRTY) == 0) { 1063 if (buf_rb_tree_RB_INSERT(&vp->v_rbdirty_tree, bp)) { 1064 panic("reassignbuf: dup lblk vp %p bp %p", 1065 vp, bp); 1066 } 1067 bp->b_flags |= B_VNDIRTY; 1068 } 1069 if ((vp->v_flag & VONWORKLST) == 0) { 1070 switch (vp->v_type) { 1071 case VDIR: 1072 delay = dirdelay; 1073 break; 1074 case VCHR: 1075 case VBLK: 1076 if (vp->v_rdev && 1077 vp->v_rdev->si_mountpoint != NULL) { 1078 delay = metadelay; 1079 break; 1080 } 1081 /* fall through */ 1082 default: 1083 delay = filedelay; 1084 } 1085 vn_syncer_add(vp, delay); 1086 } 1087 } else { 1088 /* 1089 * Move to the clean list, remove the vnode from the worklist 1090 * if no dirty blocks remain. 1091 */ 1092 if (bp->b_flags & B_VNDIRTY) { 1093 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 1094 bp->b_flags &= ~B_VNDIRTY; 1095 } 1096 if ((bp->b_flags & B_VNCLEAN) == 0) { 1097 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) { 1098 panic("reassignbuf: dup lblk vp %p bp %p", 1099 vp, bp); 1100 } 1101 bp->b_flags |= B_VNCLEAN; 1102 } 1103 1104 /* 1105 * Only remove from synclist when no dirty buffers are left 1106 * AND the VFS has not flagged the vnode's inode as being 1107 * dirty. 1108 */ 1109 if ((vp->v_flag & (VONWORKLST | VISDIRTY | VOBJDIRTY)) == 1110 VONWORKLST && 1111 RB_EMPTY(&vp->v_rbdirty_tree)) { 1112 vn_syncer_remove(vp, 0); 1113 } 1114 } 1115 } 1116 1117 /* 1118 * Create a vnode for a block device. Used for mounting the root file 1119 * system. 1120 * 1121 * A vref()'d vnode is returned. 1122 */ 1123 extern struct vop_ops *devfs_vnode_dev_vops_p; 1124 int 1125 bdevvp(cdev_t dev, struct vnode **vpp) 1126 { 1127 struct vnode *vp; 1128 struct vnode *nvp; 1129 int error; 1130 1131 if (dev == NULL) { 1132 *vpp = NULLVP; 1133 return (ENXIO); 1134 } 1135 error = getspecialvnode(VT_NON, NULL, &devfs_vnode_dev_vops_p, 1136 &nvp, 0, 0); 1137 if (error) { 1138 *vpp = NULLVP; 1139 return (error); 1140 } 1141 vp = nvp; 1142 vp->v_type = VCHR; 1143 #if 0 1144 vp->v_rdev = dev; 1145 #endif 1146 v_associate_rdev(vp, dev); 1147 vp->v_umajor = dev->si_umajor; 1148 vp->v_uminor = dev->si_uminor; 1149 vx_unlock(vp); 1150 *vpp = vp; 1151 return (0); 1152 } 1153 1154 int 1155 v_associate_rdev(struct vnode *vp, cdev_t dev) 1156 { 1157 if (dev == NULL) 1158 return(ENXIO); 1159 if (dev_is_good(dev) == 0) 1160 return(ENXIO); 1161 KKASSERT(vp->v_rdev == NULL); 1162 vp->v_rdev = reference_dev(dev); 1163 lwkt_gettoken(&spechash_token); 1164 SLIST_INSERT_HEAD(&dev->si_hlist, vp, v_cdevnext); 1165 lwkt_reltoken(&spechash_token); 1166 return(0); 1167 } 1168 1169 void 1170 v_release_rdev(struct vnode *vp) 1171 { 1172 cdev_t dev; 1173 1174 if ((dev = vp->v_rdev) != NULL) { 1175 lwkt_gettoken(&spechash_token); 1176 SLIST_REMOVE(&dev->si_hlist, vp, vnode, v_cdevnext); 1177 vp->v_rdev = NULL; 1178 release_dev(dev); 1179 lwkt_reltoken(&spechash_token); 1180 } 1181 } 1182 1183 /* 1184 * Add a vnode to the alias list hung off the cdev_t. We only associate 1185 * the device number with the vnode. The actual device is not associated 1186 * until the vnode is opened (usually in spec_open()), and will be 1187 * disassociated on last close. 1188 */ 1189 void 1190 addaliasu(struct vnode *nvp, int x, int y) 1191 { 1192 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1193 panic("addaliasu on non-special vnode"); 1194 nvp->v_umajor = x; 1195 nvp->v_uminor = y; 1196 } 1197 1198 /* 1199 * Simple call that a filesystem can make to try to get rid of a 1200 * vnode. It will fail if anyone is referencing the vnode (including 1201 * the caller). 1202 * 1203 * The filesystem can check whether its in-memory inode structure still 1204 * references the vp on return. 1205 * 1206 * May only be called if the vnode is in a known state (i.e. being prevented 1207 * from being deallocated by some other condition such as a vfs inode hold). 1208 */ 1209 void 1210 vclean_unlocked(struct vnode *vp) 1211 { 1212 vx_get(vp); 1213 if (VREFCNT(vp) <= 1) 1214 vgone_vxlocked(vp); 1215 vx_put(vp); 1216 } 1217 1218 /* 1219 * Disassociate a vnode from its underlying filesystem. 1220 * 1221 * The vnode must be VX locked and referenced. In all normal situations 1222 * there are no active references. If vclean_vxlocked() is called while 1223 * there are active references, the vnode is being ripped out and we have 1224 * to call VOP_CLOSE() as appropriate before we can reclaim it. 1225 */ 1226 void 1227 vclean_vxlocked(struct vnode *vp, int flags) 1228 { 1229 int active; 1230 int n; 1231 vm_object_t object; 1232 struct namecache *ncp; 1233 1234 /* 1235 * If the vnode has already been reclaimed we have nothing to do. 1236 */ 1237 if (vp->v_flag & VRECLAIMED) 1238 return; 1239 1240 /* 1241 * Set flag to interlock operation, flag finalization to ensure 1242 * that the vnode winds up on the inactive list, and set v_act to 0. 1243 */ 1244 vsetflags(vp, VRECLAIMED); 1245 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 1246 vp->v_act = 0; 1247 1248 if (verbose_reclaims) { 1249 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) 1250 kprintf("Debug: reclaim %p %s\n", vp, ncp->nc_name); 1251 } 1252 1253 /* 1254 * Scrap the vfs cache 1255 */ 1256 while (cache_inval_vp(vp, 0) != 0) { 1257 kprintf("Warning: vnode %p clean/cache_resolution " 1258 "race detected\n", vp); 1259 tsleep(vp, 0, "vclninv", 2); 1260 } 1261 1262 /* 1263 * Check to see if the vnode is in use. If so we have to reference it 1264 * before we clean it out so that its count cannot fall to zero and 1265 * generate a race against ourselves to recycle it. 1266 */ 1267 active = (VREFCNT(vp) > 0); 1268 1269 /* 1270 * Clean out any buffers associated with the vnode and destroy its 1271 * object, if it has one. 1272 */ 1273 vinvalbuf(vp, V_SAVE, 0, 0); 1274 1275 /* 1276 * If purging an active vnode (typically during a forced unmount 1277 * or reboot), it must be closed and deactivated before being 1278 * reclaimed. This isn't really all that safe, but what can 1279 * we do? XXX. 1280 * 1281 * Note that neither of these routines unlocks the vnode. 1282 */ 1283 if (active && (flags & DOCLOSE)) { 1284 while ((n = vp->v_opencount) != 0) { 1285 if (vp->v_writecount) 1286 VOP_CLOSE(vp, FWRITE|FNONBLOCK, NULL); 1287 else 1288 VOP_CLOSE(vp, FNONBLOCK, NULL); 1289 if (vp->v_opencount == n) { 1290 kprintf("Warning: unable to force-close" 1291 " vnode %p\n", vp); 1292 break; 1293 } 1294 } 1295 } 1296 1297 /* 1298 * If the vnode has not been deactivated, deactivated it. Deactivation 1299 * can create new buffers and VM pages so we have to call vinvalbuf() 1300 * again to make sure they all get flushed. 1301 * 1302 * This can occur if a file with a link count of 0 needs to be 1303 * truncated. 1304 * 1305 * If the vnode is already dead don't try to deactivate it. 1306 */ 1307 if ((vp->v_flag & VINACTIVE) == 0) { 1308 vsetflags(vp, VINACTIVE); 1309 if (vp->v_mount) 1310 VOP_INACTIVE(vp); 1311 vinvalbuf(vp, V_SAVE, 0, 0); 1312 } 1313 1314 /* 1315 * If the vnode has an object, destroy it. 1316 */ 1317 while ((object = vp->v_object) != NULL) { 1318 vm_object_hold(object); 1319 if (object == vp->v_object) 1320 break; 1321 vm_object_drop(object); 1322 } 1323 1324 if (object != NULL) { 1325 if (object->ref_count == 0) { 1326 if ((object->flags & OBJ_DEAD) == 0) 1327 vm_object_terminate(object); 1328 vm_object_drop(object); 1329 vclrflags(vp, VOBJBUF); 1330 } else { 1331 vm_pager_deallocate(object); 1332 vclrflags(vp, VOBJBUF); 1333 vm_object_drop(object); 1334 } 1335 } 1336 KKASSERT((vp->v_flag & VOBJBUF) == 0); 1337 1338 if (vp->v_flag & VOBJDIRTY) 1339 vclrobjdirty(vp); 1340 1341 /* 1342 * Reclaim the vnode if not already dead. 1343 */ 1344 if (vp->v_mount && VOP_RECLAIM(vp)) 1345 panic("vclean: cannot reclaim"); 1346 1347 /* 1348 * Done with purge, notify sleepers of the grim news. 1349 */ 1350 vp->v_ops = &dead_vnode_vops_p; 1351 vn_gone(vp); 1352 vp->v_tag = VT_NON; 1353 1354 /* 1355 * If we are destroying an active vnode, reactivate it now that 1356 * we have reassociated it with deadfs. This prevents the system 1357 * from crashing on the vnode due to it being unexpectedly marked 1358 * as inactive or reclaimed. 1359 */ 1360 if (active && (flags & DOCLOSE)) { 1361 vclrflags(vp, VINACTIVE | VRECLAIMED); 1362 } 1363 } 1364 1365 /* 1366 * Eliminate all activity associated with the requested vnode 1367 * and with all vnodes aliased to the requested vnode. 1368 * 1369 * The vnode must be referenced but should not be locked. 1370 */ 1371 int 1372 vrevoke(struct vnode *vp, struct ucred *cred) 1373 { 1374 struct vnode *vq; 1375 struct vnode *vqn; 1376 cdev_t dev; 1377 int error; 1378 1379 /* 1380 * If the vnode has a device association, scrap all vnodes associated 1381 * with the device. Don't let the device disappear on us while we 1382 * are scrapping the vnodes. 1383 * 1384 * The passed vp will probably show up in the list, do not VX lock 1385 * it twice! 1386 * 1387 * Releasing the vnode's rdev here can mess up specfs's call to 1388 * device close, so don't do it. The vnode has been disassociated 1389 * and the device will be closed after the last ref on the related 1390 * fp goes away (if not still open by e.g. the kernel). 1391 */ 1392 if (vp->v_type != VCHR) { 1393 error = fdrevoke(vp, DTYPE_VNODE, cred); 1394 return (error); 1395 } 1396 if ((dev = vp->v_rdev) == NULL) { 1397 return(0); 1398 } 1399 reference_dev(dev); 1400 lwkt_gettoken(&spechash_token); 1401 1402 restart: 1403 vqn = SLIST_FIRST(&dev->si_hlist); 1404 if (vqn) 1405 vhold(vqn); 1406 while ((vq = vqn) != NULL) { 1407 if (VREFCNT(vq) > 0) { 1408 vref(vq); 1409 fdrevoke(vq, DTYPE_VNODE, cred); 1410 /*v_release_rdev(vq);*/ 1411 vrele(vq); 1412 if (vq->v_rdev != dev) { 1413 vdrop(vq); 1414 goto restart; 1415 } 1416 } 1417 vqn = SLIST_NEXT(vq, v_cdevnext); 1418 if (vqn) 1419 vhold(vqn); 1420 vdrop(vq); 1421 } 1422 lwkt_reltoken(&spechash_token); 1423 dev_drevoke(dev); 1424 release_dev(dev); 1425 return (0); 1426 } 1427 1428 /* 1429 * This is called when the object underlying a vnode is being destroyed, 1430 * such as in a remove(). Try to recycle the vnode immediately if the 1431 * only active reference is our reference. 1432 * 1433 * Directory vnodes in the namecache with children cannot be immediately 1434 * recycled because numerous VOP_N*() ops require them to be stable. 1435 * 1436 * To avoid recursive recycling from VOP_INACTIVE implemenetations this 1437 * function is a NOP if VRECLAIMED is already set. 1438 */ 1439 int 1440 vrecycle(struct vnode *vp) 1441 { 1442 if (VREFCNT(vp) <= 1 && (vp->v_flag & VRECLAIMED) == 0) { 1443 if (cache_inval_vp_nonblock(vp)) 1444 return(0); 1445 vgone_vxlocked(vp); 1446 return (1); 1447 } 1448 return (0); 1449 } 1450 1451 /* 1452 * Return the maximum I/O size allowed for strategy calls on VP. 1453 * 1454 * If vp is VCHR or VBLK we dive the device, otherwise we use 1455 * the vp's mount info. 1456 * 1457 * The returned value is clamped at MAXPHYS as most callers cannot use 1458 * buffers larger than that size. 1459 */ 1460 int 1461 vmaxiosize(struct vnode *vp) 1462 { 1463 int maxiosize; 1464 1465 if (vp->v_type == VBLK || vp->v_type == VCHR) 1466 maxiosize = vp->v_rdev->si_iosize_max; 1467 else 1468 maxiosize = vp->v_mount->mnt_iosize_max; 1469 1470 if (maxiosize > MAXPHYS) 1471 maxiosize = MAXPHYS; 1472 return (maxiosize); 1473 } 1474 1475 /* 1476 * Eliminate all activity associated with a vnode in preparation for 1477 * destruction. 1478 * 1479 * The vnode must be VX locked and refd and will remain VX locked and refd 1480 * on return. This routine may be called with the vnode in any state, as 1481 * long as it is VX locked. The vnode will be cleaned out and marked 1482 * VRECLAIMED but will not actually be reused until all existing refs and 1483 * holds go away. 1484 * 1485 * NOTE: This routine may be called on a vnode which has not yet been 1486 * already been deactivated (VOP_INACTIVE), or on a vnode which has 1487 * already been reclaimed. 1488 * 1489 * This routine is not responsible for placing us back on the freelist. 1490 * Instead, it happens automatically when the caller releases the VX lock 1491 * (assuming there aren't any other references). 1492 */ 1493 void 1494 vgone_vxlocked(struct vnode *vp) 1495 { 1496 /* 1497 * assert that the VX lock is held. This is an absolute requirement 1498 * now for vgone_vxlocked() to be called. 1499 */ 1500 KKASSERT(lockinuse(&vp->v_lock)); 1501 1502 /* 1503 * Clean out the filesystem specific data and set the VRECLAIMED 1504 * bit. Also deactivate the vnode if necessary. 1505 * 1506 * The vnode should have automatically been removed from the syncer 1507 * list as syncer/dirty flags cleared during the cleaning. 1508 */ 1509 vclean_vxlocked(vp, DOCLOSE); 1510 1511 /* 1512 * Normally panic if the vnode is still dirty, unless we are doing 1513 * a forced unmount (tmpfs typically). 1514 */ 1515 if (vp->v_flag & VONWORKLST) { 1516 if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) { 1517 /* force removal */ 1518 vn_syncer_remove(vp, 1); 1519 } else { 1520 panic("vp %p still dirty in vgone after flush", vp); 1521 } 1522 } 1523 1524 /* 1525 * Delete from old mount point vnode list, if on one. 1526 */ 1527 if (vp->v_mount != NULL) { 1528 KKASSERT(vp->v_data == NULL); 1529 insmntque(vp, NULL); 1530 } 1531 1532 /* 1533 * If special device, remove it from special device alias list 1534 * if it is on one. This should normally only occur if a vnode is 1535 * being revoked as the device should otherwise have been released 1536 * naturally. 1537 */ 1538 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) { 1539 v_release_rdev(vp); 1540 } 1541 1542 /* 1543 * Set us to VBAD 1544 */ 1545 vp->v_type = VBAD; 1546 } 1547 1548 /* 1549 * Lookup a vnode by device number. 1550 * 1551 * Returns non-zero and *vpp set to a vref'd vnode on success. 1552 * Returns zero on failure. 1553 */ 1554 int 1555 vfinddev(cdev_t dev, enum vtype type, struct vnode **vpp) 1556 { 1557 struct vnode *vp; 1558 1559 lwkt_gettoken(&spechash_token); 1560 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1561 if (type == vp->v_type) { 1562 *vpp = vp; 1563 vref(vp); 1564 lwkt_reltoken(&spechash_token); 1565 return (1); 1566 } 1567 } 1568 lwkt_reltoken(&spechash_token); 1569 return (0); 1570 } 1571 1572 /* 1573 * Calculate the total number of references to a special device. This 1574 * routine may only be called for VBLK and VCHR vnodes since v_rdev is 1575 * an overloaded field. Since udev2dev can now return NULL, we have 1576 * to check for a NULL v_rdev. 1577 */ 1578 int 1579 count_dev(cdev_t dev) 1580 { 1581 struct vnode *vp; 1582 int count = 0; 1583 1584 if (SLIST_FIRST(&dev->si_hlist)) { 1585 lwkt_gettoken(&spechash_token); 1586 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1587 count += vp->v_opencount; 1588 } 1589 lwkt_reltoken(&spechash_token); 1590 } 1591 return(count); 1592 } 1593 1594 int 1595 vcount(struct vnode *vp) 1596 { 1597 if (vp->v_rdev == NULL) 1598 return(0); 1599 return(count_dev(vp->v_rdev)); 1600 } 1601 1602 /* 1603 * Initialize VMIO for a vnode. This routine MUST be called before a 1604 * VFS can issue buffer cache ops on a vnode. It is typically called 1605 * when a vnode is initialized from its inode. 1606 */ 1607 int 1608 vinitvmio(struct vnode *vp, off_t filesize, int blksize, int boff) 1609 { 1610 vm_object_t object; 1611 int error = 0; 1612 1613 object = vp->v_object; 1614 if (object) { 1615 vm_object_hold(object); 1616 KKASSERT(vp->v_object == object); 1617 } 1618 1619 if (object == NULL) { 1620 object = vnode_pager_alloc(vp, filesize, 0, 0, blksize, boff); 1621 1622 /* 1623 * Dereference the reference we just created. This assumes 1624 * that the object is associated with the vp. Allow it to 1625 * have zero refs. It cannot be destroyed as long as it 1626 * is associated with the vnode. 1627 */ 1628 vm_object_hold(object); 1629 atomic_add_int(&object->ref_count, -1); 1630 vrele(vp); 1631 } else { 1632 KKASSERT((object->flags & OBJ_DEAD) == 0); 1633 } 1634 KASSERT(vp->v_object != NULL, ("vinitvmio: NULL object")); 1635 vsetflags(vp, VOBJBUF); 1636 vm_object_drop(object); 1637 1638 return (error); 1639 } 1640 1641 1642 /* 1643 * Print out a description of a vnode. 1644 */ 1645 static char *typename[] = 1646 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1647 1648 void 1649 vprint(char *label, struct vnode *vp) 1650 { 1651 char buf[96]; 1652 1653 if (label != NULL) 1654 kprintf("%s: %p: ", label, (void *)vp); 1655 else 1656 kprintf("%p: ", (void *)vp); 1657 kprintf("type %s, refcnt %08x, writecount %d, holdcnt %d,", 1658 typename[vp->v_type], 1659 vp->v_refcnt, vp->v_writecount, vp->v_auxrefs); 1660 buf[0] = '\0'; 1661 if (vp->v_flag & VROOT) 1662 strcat(buf, "|VROOT"); 1663 if (vp->v_flag & VPFSROOT) 1664 strcat(buf, "|VPFSROOT"); 1665 if (vp->v_flag & VTEXT) 1666 strcat(buf, "|VTEXT"); 1667 if (vp->v_flag & VSYSTEM) 1668 strcat(buf, "|VSYSTEM"); 1669 if (vp->v_flag & VOBJBUF) 1670 strcat(buf, "|VOBJBUF"); 1671 if (buf[0] != '\0') 1672 kprintf(" flags (%s)", &buf[1]); 1673 if (vp->v_data == NULL) { 1674 kprintf("\n"); 1675 } else { 1676 kprintf("\n\t"); 1677 VOP_PRINT(vp); 1678 } 1679 } 1680 1681 /* 1682 * Do the usual access checking. 1683 * file_mode, uid and gid are from the vnode in question, 1684 * while acc_mode and cred are from the VOP_ACCESS parameter list 1685 */ 1686 int 1687 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1688 mode_t acc_mode, struct ucred *cred) 1689 { 1690 mode_t mask; 1691 int ismember; 1692 1693 /* 1694 * Super-user always gets read/write access, but execute access depends 1695 * on at least one execute bit being set. 1696 */ 1697 if (priv_check_cred(cred, PRIV_ROOT, 0) == 0) { 1698 if ((acc_mode & VEXEC) && type != VDIR && 1699 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1700 return (EACCES); 1701 return (0); 1702 } 1703 1704 mask = 0; 1705 1706 /* Otherwise, check the owner. */ 1707 if (cred->cr_uid == uid) { 1708 if (acc_mode & VEXEC) 1709 mask |= S_IXUSR; 1710 if (acc_mode & VREAD) 1711 mask |= S_IRUSR; 1712 if (acc_mode & VWRITE) 1713 mask |= S_IWUSR; 1714 return ((file_mode & mask) == mask ? 0 : EACCES); 1715 } 1716 1717 /* Otherwise, check the groups. */ 1718 ismember = groupmember(gid, cred); 1719 if (cred->cr_svgid == gid || ismember) { 1720 if (acc_mode & VEXEC) 1721 mask |= S_IXGRP; 1722 if (acc_mode & VREAD) 1723 mask |= S_IRGRP; 1724 if (acc_mode & VWRITE) 1725 mask |= S_IWGRP; 1726 return ((file_mode & mask) == mask ? 0 : EACCES); 1727 } 1728 1729 /* Otherwise, check everyone else. */ 1730 if (acc_mode & VEXEC) 1731 mask |= S_IXOTH; 1732 if (acc_mode & VREAD) 1733 mask |= S_IROTH; 1734 if (acc_mode & VWRITE) 1735 mask |= S_IWOTH; 1736 return ((file_mode & mask) == mask ? 0 : EACCES); 1737 } 1738 1739 #ifdef DDB 1740 #include <ddb/ddb.h> 1741 1742 static int db_show_locked_vnodes(struct mount *mp, void *data); 1743 1744 /* 1745 * List all of the locked vnodes in the system. 1746 * Called when debugging the kernel. 1747 */ 1748 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 1749 { 1750 kprintf("Locked vnodes\n"); 1751 mountlist_scan(db_show_locked_vnodes, NULL, 1752 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1753 } 1754 1755 static int 1756 db_show_locked_vnodes(struct mount *mp, void *data __unused) 1757 { 1758 struct vnode *vp; 1759 1760 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 1761 if (vn_islocked(vp)) 1762 vprint(NULL, vp); 1763 } 1764 return(0); 1765 } 1766 #endif 1767 1768 /* 1769 * Top level filesystem related information gathering. 1770 */ 1771 static int sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS); 1772 1773 static int 1774 vfs_sysctl(SYSCTL_HANDLER_ARGS) 1775 { 1776 int *name = (int *)arg1 - 1; /* XXX */ 1777 u_int namelen = arg2 + 1; /* XXX */ 1778 struct vfsconf *vfsp; 1779 int maxtypenum; 1780 1781 #if 1 || defined(COMPAT_PRELITE2) 1782 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 1783 if (namelen == 1) 1784 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 1785 #endif 1786 1787 #ifdef notyet 1788 /* all sysctl names at this level are at least name and field */ 1789 if (namelen < 2) 1790 return (ENOTDIR); /* overloaded */ 1791 if (name[0] != VFS_GENERIC) { 1792 vfsp = vfsconf_find_by_typenum(name[0]); 1793 if (vfsp == NULL) 1794 return (EOPNOTSUPP); 1795 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1796 oldp, oldlenp, newp, newlen, p)); 1797 } 1798 #endif 1799 switch (name[1]) { 1800 case VFS_MAXTYPENUM: 1801 if (namelen != 2) 1802 return (ENOTDIR); 1803 maxtypenum = vfsconf_get_maxtypenum(); 1804 return (SYSCTL_OUT(req, &maxtypenum, sizeof(maxtypenum))); 1805 case VFS_CONF: 1806 if (namelen != 3) 1807 return (ENOTDIR); /* overloaded */ 1808 vfsp = vfsconf_find_by_typenum(name[2]); 1809 if (vfsp == NULL) 1810 return (EOPNOTSUPP); 1811 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 1812 } 1813 return (EOPNOTSUPP); 1814 } 1815 1816 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 1817 "Generic filesystem"); 1818 1819 #if 1 || defined(COMPAT_PRELITE2) 1820 1821 static int 1822 sysctl_ovfs_conf_iter(struct vfsconf *vfsp, void *data) 1823 { 1824 int error; 1825 struct ovfsconf ovfs; 1826 struct sysctl_req *req = (struct sysctl_req*) data; 1827 1828 bzero(&ovfs, sizeof(ovfs)); 1829 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 1830 strcpy(ovfs.vfc_name, vfsp->vfc_name); 1831 ovfs.vfc_index = vfsp->vfc_typenum; 1832 ovfs.vfc_refcount = vfsp->vfc_refcount; 1833 ovfs.vfc_flags = vfsp->vfc_flags; 1834 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 1835 if (error) 1836 return error; /* abort iteration with error code */ 1837 else 1838 return 0; /* continue iterating with next element */ 1839 } 1840 1841 static int 1842 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 1843 { 1844 return vfsconf_each(sysctl_ovfs_conf_iter, (void*)req); 1845 } 1846 1847 #endif /* 1 || COMPAT_PRELITE2 */ 1848 1849 /* 1850 * Check to see if a filesystem is mounted on a block device. 1851 */ 1852 int 1853 vfs_mountedon(struct vnode *vp) 1854 { 1855 cdev_t dev; 1856 1857 if ((dev = vp->v_rdev) == NULL) { 1858 /* if (vp->v_type != VBLK) 1859 dev = get_dev(vp->v_uminor, vp->v_umajor); */ 1860 } 1861 if (dev != NULL && dev->si_mountpoint) 1862 return (EBUSY); 1863 return (0); 1864 } 1865 1866 /* 1867 * Unmount all filesystems. The list is traversed in reverse order 1868 * of mounting to avoid dependencies. 1869 * 1870 * We want the umountall to be able to break out of its loop if a 1871 * failure occurs, after scanning all possible mounts, so the callback 1872 * returns 0 on error. 1873 * 1874 * NOTE: Do not call mountlist_remove(mp) on error any more, this will 1875 * confuse mountlist_scan()'s unbusy check. 1876 */ 1877 static int vfs_umountall_callback(struct mount *mp, void *data); 1878 1879 void 1880 vfs_unmountall(int halting) 1881 { 1882 int count; 1883 1884 do { 1885 count = mountlist_scan(vfs_umountall_callback, &halting, 1886 MNTSCAN_REVERSE|MNTSCAN_NOBUSY); 1887 } while (count); 1888 } 1889 1890 static 1891 int 1892 vfs_umountall_callback(struct mount *mp, void *data) 1893 { 1894 int error; 1895 int halting = *(int *)data; 1896 1897 /* 1898 * NOTE: When halting, dounmount will disconnect but leave 1899 * certain mount points intact. e.g. devfs. 1900 */ 1901 error = dounmount(mp, MNT_FORCE, halting); 1902 if (error) { 1903 kprintf("unmount of filesystem mounted from %s failed (", 1904 mp->mnt_stat.f_mntfromname); 1905 if (error == EBUSY) 1906 kprintf("BUSY)\n"); 1907 else 1908 kprintf("%d)\n", error); 1909 return 0; 1910 } else { 1911 return 1; 1912 } 1913 } 1914 1915 /* 1916 * Checks the mount flags for parameter mp and put the names comma-separated 1917 * into a string buffer buf with a size limit specified by len. 1918 * 1919 * It returns the number of bytes written into buf, and (*errorp) will be 1920 * set to 0, EINVAL (if passed length is 0), or ENOSPC (supplied buffer was 1921 * not large enough). The buffer will be 0-terminated if len was not 0. 1922 */ 1923 size_t 1924 vfs_flagstostr(int flags, const struct mountctl_opt *optp, 1925 char *buf, size_t len, int *errorp) 1926 { 1927 static const struct mountctl_opt optnames[] = { 1928 { MNT_RDONLY, "read-only" }, 1929 { MNT_SYNCHRONOUS, "synchronous" }, 1930 { MNT_NOEXEC, "noexec" }, 1931 { MNT_NOSUID, "nosuid" }, 1932 { MNT_NODEV, "nodev" }, 1933 { MNT_AUTOMOUNTED, "automounted" }, 1934 { MNT_ASYNC, "asynchronous" }, 1935 { MNT_SUIDDIR, "suiddir" }, 1936 { MNT_SOFTDEP, "soft-updates" }, 1937 { MNT_NOSYMFOLLOW, "nosymfollow" }, 1938 { MNT_TRIM, "trim" }, 1939 { MNT_NOATIME, "noatime" }, 1940 { MNT_NOCLUSTERR, "noclusterr" }, 1941 { MNT_NOCLUSTERW, "noclusterw" }, 1942 { MNT_EXRDONLY, "NFS read-only" }, 1943 { MNT_EXPORTED, "NFS exported" }, 1944 /* Remaining NFS flags could come here */ 1945 { MNT_LOCAL, "local" }, 1946 { MNT_QUOTA, "with-quotas" }, 1947 /* { MNT_ROOTFS, "rootfs" }, */ 1948 /* { MNT_IGNORE, "ignore" }, */ 1949 { 0, NULL} 1950 }; 1951 int bwritten; 1952 int bleft; 1953 int optlen; 1954 int actsize; 1955 1956 *errorp = 0; 1957 bwritten = 0; 1958 bleft = len - 1; /* leave room for trailing \0 */ 1959 1960 /* 1961 * Checks the size of the string. If it contains 1962 * any data, then we will append the new flags to 1963 * it. 1964 */ 1965 actsize = strlen(buf); 1966 if (actsize > 0) 1967 buf += actsize; 1968 1969 /* Default flags if no flags passed */ 1970 if (optp == NULL) 1971 optp = optnames; 1972 1973 if (bleft < 0) { /* degenerate case, 0-length buffer */ 1974 *errorp = EINVAL; 1975 return(0); 1976 } 1977 1978 for (; flags && optp->o_opt; ++optp) { 1979 if ((flags & optp->o_opt) == 0) 1980 continue; 1981 optlen = strlen(optp->o_name); 1982 if (bwritten || actsize > 0) { 1983 if (bleft < 2) { 1984 *errorp = ENOSPC; 1985 break; 1986 } 1987 buf[bwritten++] = ','; 1988 buf[bwritten++] = ' '; 1989 bleft -= 2; 1990 } 1991 if (bleft < optlen) { 1992 *errorp = ENOSPC; 1993 break; 1994 } 1995 bcopy(optp->o_name, buf + bwritten, optlen); 1996 bwritten += optlen; 1997 bleft -= optlen; 1998 flags &= ~optp->o_opt; 1999 } 2000 2001 /* 2002 * Space already reserved for trailing \0 2003 */ 2004 buf[bwritten] = 0; 2005 return (bwritten); 2006 } 2007 2008 /* 2009 * Build hash lists of net addresses and hang them off the mount point. 2010 * Called by ufs_mount() to set up the lists of export addresses. 2011 */ 2012 static int 2013 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 2014 const struct export_args *argp) 2015 { 2016 struct netcred *np; 2017 struct radix_node_head *rnh; 2018 int i; 2019 struct radix_node *rn; 2020 struct sockaddr *saddr, *smask = NULL; 2021 int error; 2022 2023 if (argp->ex_addrlen == 0) { 2024 if (mp->mnt_flag & MNT_DEFEXPORTED) 2025 return (EPERM); 2026 np = &nep->ne_defexported; 2027 np->netc_exflags = argp->ex_flags; 2028 np->netc_anon = argp->ex_anon; 2029 np->netc_anon.cr_ref = 1; 2030 mp->mnt_flag |= MNT_DEFEXPORTED; 2031 return (0); 2032 } 2033 2034 if (argp->ex_addrlen < 0 || argp->ex_addrlen > MLEN) 2035 return (EINVAL); 2036 if (argp->ex_masklen < 0 || argp->ex_masklen > MLEN) 2037 return (EINVAL); 2038 2039 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 2040 np = (struct netcred *)kmalloc(i, M_NETCRED, M_WAITOK | M_ZERO); 2041 saddr = (struct sockaddr *) (np + 1); 2042 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 2043 goto out; 2044 if (saddr->sa_len > argp->ex_addrlen) 2045 saddr->sa_len = argp->ex_addrlen; 2046 if (argp->ex_masklen) { 2047 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 2048 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 2049 if (error) 2050 goto out; 2051 if (smask->sa_len > argp->ex_masklen) 2052 smask->sa_len = argp->ex_masklen; 2053 } 2054 NE_LOCK(nep); 2055 if (nep->ne_maskhead == NULL) { 2056 if (!rn_inithead((void **)&nep->ne_maskhead, NULL, 0)) { 2057 error = ENOBUFS; 2058 goto out; 2059 } 2060 } 2061 if ((rnh = vfs_create_addrlist_af(saddr->sa_family, nep)) == NULL) { 2062 error = ENOBUFS; 2063 goto out; 2064 } 2065 rn = (*rnh->rnh_addaddr)((char *)saddr, (char *)smask, rnh, 2066 np->netc_rnodes); 2067 NE_UNLOCK(nep); 2068 if (rn == NULL || np != (struct netcred *)rn) { /* already exists */ 2069 error = EPERM; 2070 goto out; 2071 } 2072 np->netc_exflags = argp->ex_flags; 2073 np->netc_anon = argp->ex_anon; 2074 np->netc_anon.cr_ref = 1; 2075 return (0); 2076 2077 out: 2078 kfree(np, M_NETCRED); 2079 return (error); 2080 } 2081 2082 /* 2083 * Free netcred structures installed in the netexport 2084 */ 2085 static int 2086 vfs_free_netcred(struct radix_node *rn, void *w) 2087 { 2088 struct radix_node_head *rnh = (struct radix_node_head *)w; 2089 2090 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 2091 kfree(rn, M_NETCRED); 2092 2093 return (0); 2094 } 2095 2096 /* 2097 * callback to free an element of the mask table installed in the 2098 * netexport. These may be created indirectly and are not netcred 2099 * structures. 2100 */ 2101 static int 2102 vfs_free_netcred_mask(struct radix_node *rn, void *w) 2103 { 2104 struct radix_node_head *rnh = (struct radix_node_head *)w; 2105 2106 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 2107 kfree(rn, M_RTABLE); 2108 2109 return (0); 2110 } 2111 2112 static struct radix_node_head * 2113 vfs_create_addrlist_af(int af, struct netexport *nep) 2114 { 2115 struct radix_node_head *rnh = NULL; 2116 #if defined(INET) || defined(INET6) 2117 struct radix_node_head *maskhead = nep->ne_maskhead; 2118 int off; 2119 #endif 2120 2121 NE_ASSERT_LOCKED(nep); 2122 #if defined(INET) || defined(INET6) 2123 KKASSERT(maskhead != NULL); 2124 #endif 2125 switch (af) { 2126 #ifdef INET 2127 case AF_INET: 2128 if ((rnh = nep->ne_inethead) == NULL) { 2129 off = offsetof(struct sockaddr_in, sin_addr) << 3; 2130 if (!rn_inithead((void **)&rnh, maskhead, off)) 2131 return (NULL); 2132 nep->ne_inethead = rnh; 2133 } 2134 break; 2135 #endif 2136 #ifdef INET6 2137 case AF_INET6: 2138 if ((rnh = nep->ne_inet6head) == NULL) { 2139 off = offsetof(struct sockaddr_in6, sin6_addr) << 3; 2140 if (!rn_inithead((void **)&rnh, maskhead, off)) 2141 return (NULL); 2142 nep->ne_inet6head = rnh; 2143 } 2144 break; 2145 #endif 2146 } 2147 return (rnh); 2148 } 2149 2150 /* 2151 * helper function for freeing netcred elements 2152 */ 2153 static void 2154 vfs_free_addrlist_af(struct radix_node_head **prnh) 2155 { 2156 struct radix_node_head *rnh = *prnh; 2157 2158 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, rnh); 2159 kfree(rnh, M_RTABLE); 2160 *prnh = NULL; 2161 } 2162 2163 /* 2164 * helper function for freeing mask elements 2165 */ 2166 static void 2167 vfs_free_addrlist_masks(struct radix_node_head **prnh) 2168 { 2169 struct radix_node_head *rnh = *prnh; 2170 2171 (*rnh->rnh_walktree) (rnh, vfs_free_netcred_mask, rnh); 2172 kfree(rnh, M_RTABLE); 2173 *prnh = NULL; 2174 } 2175 2176 /* 2177 * Free the net address hash lists that are hanging off the mount points. 2178 */ 2179 static void 2180 vfs_free_addrlist(struct netexport *nep) 2181 { 2182 NE_LOCK(nep); 2183 if (nep->ne_inethead != NULL) 2184 vfs_free_addrlist_af(&nep->ne_inethead); 2185 if (nep->ne_inet6head != NULL) 2186 vfs_free_addrlist_af(&nep->ne_inet6head); 2187 if (nep->ne_maskhead) 2188 vfs_free_addrlist_masks(&nep->ne_maskhead); 2189 NE_UNLOCK(nep); 2190 } 2191 2192 int 2193 vfs_export(struct mount *mp, struct netexport *nep, 2194 const struct export_args *argp) 2195 { 2196 int error; 2197 2198 if (argp->ex_flags & MNT_DELEXPORT) { 2199 if (mp->mnt_flag & MNT_EXPUBLIC) { 2200 vfs_setpublicfs(NULL, NULL, NULL); 2201 mp->mnt_flag &= ~MNT_EXPUBLIC; 2202 } 2203 vfs_free_addrlist(nep); 2204 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2205 } 2206 if (argp->ex_flags & MNT_EXPORTED) { 2207 if (argp->ex_flags & MNT_EXPUBLIC) { 2208 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2209 return (error); 2210 mp->mnt_flag |= MNT_EXPUBLIC; 2211 } 2212 if ((error = vfs_hang_addrlist(mp, nep, argp))) 2213 return (error); 2214 mp->mnt_flag |= MNT_EXPORTED; 2215 } 2216 return (0); 2217 } 2218 2219 2220 /* 2221 * Set the publicly exported filesystem (WebNFS). Currently, only 2222 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2223 */ 2224 int 2225 vfs_setpublicfs(struct mount *mp, struct netexport *nep, 2226 const struct export_args *argp) 2227 { 2228 int error; 2229 struct vnode *rvp; 2230 char *cp; 2231 2232 /* 2233 * mp == NULL -> invalidate the current info, the FS is 2234 * no longer exported. May be called from either vfs_export 2235 * or unmount, so check if it hasn't already been done. 2236 */ 2237 if (mp == NULL) { 2238 if (nfs_pub.np_valid) { 2239 nfs_pub.np_valid = 0; 2240 if (nfs_pub.np_index != NULL) { 2241 kfree(nfs_pub.np_index, M_TEMP); 2242 nfs_pub.np_index = NULL; 2243 } 2244 } 2245 return (0); 2246 } 2247 2248 /* 2249 * Only one allowed at a time. 2250 */ 2251 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2252 return (EBUSY); 2253 2254 /* 2255 * Get real filehandle for root of exported FS. 2256 */ 2257 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 2258 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2259 2260 if ((error = VFS_ROOT(mp, &rvp))) 2261 return (error); 2262 2263 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2264 return (error); 2265 2266 vput(rvp); 2267 2268 /* 2269 * If an indexfile was specified, pull it in. 2270 */ 2271 if (argp->ex_indexfile != NULL) { 2272 int namelen; 2273 2274 error = vn_get_namelen(rvp, &namelen); 2275 if (error) 2276 return (error); 2277 nfs_pub.np_index = kmalloc(namelen, M_TEMP, M_WAITOK); 2278 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2279 namelen, NULL); 2280 if (!error) { 2281 /* 2282 * Check for illegal filenames. 2283 */ 2284 for (cp = nfs_pub.np_index; *cp; cp++) { 2285 if (*cp == '/') { 2286 error = EINVAL; 2287 break; 2288 } 2289 } 2290 } 2291 if (error) { 2292 kfree(nfs_pub.np_index, M_TEMP); 2293 return (error); 2294 } 2295 } 2296 2297 nfs_pub.np_mount = mp; 2298 nfs_pub.np_valid = 1; 2299 return (0); 2300 } 2301 2302 struct netcred * 2303 vfs_export_lookup(struct mount *mp, struct netexport *nep, 2304 struct sockaddr *nam) 2305 { 2306 struct netcred *np; 2307 struct radix_node_head *rnh; 2308 struct sockaddr *saddr; 2309 2310 np = NULL; 2311 if (mp->mnt_flag & MNT_EXPORTED) { 2312 /* 2313 * Lookup in the export list first. 2314 */ 2315 NE_LOCK(nep); 2316 if (nam != NULL) { 2317 saddr = nam; 2318 switch (saddr->sa_family) { 2319 #ifdef INET 2320 case AF_INET: 2321 rnh = nep->ne_inethead; 2322 break; 2323 #endif 2324 #ifdef INET6 2325 case AF_INET6: 2326 rnh = nep->ne_inet6head; 2327 break; 2328 #endif 2329 default: 2330 rnh = NULL; 2331 } 2332 if (rnh != NULL) { 2333 np = (struct netcred *) 2334 (*rnh->rnh_matchaddr)((char *)saddr, 2335 rnh); 2336 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2337 np = NULL; 2338 } 2339 } 2340 NE_UNLOCK(nep); 2341 /* 2342 * If no address match, use the default if it exists. 2343 */ 2344 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2345 np = &nep->ne_defexported; 2346 } 2347 return (np); 2348 } 2349 2350 /* 2351 * perform msync on all vnodes under a mount point. The mount point must 2352 * be locked. This code is also responsible for lazy-freeing unreferenced 2353 * vnodes whos VM objects no longer contain pages. 2354 * 2355 * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state. 2356 * 2357 * NOTE: XXX VOP_PUTPAGES and friends requires that the vnode be locked, 2358 * but vnode_pager_putpages() doesn't lock the vnode. We have to do it 2359 * way up in this high level function. 2360 */ 2361 static int vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data); 2362 static int vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data); 2363 2364 void 2365 vfs_msync(struct mount *mp, int flags) 2366 { 2367 int vmsc_flags; 2368 2369 /* 2370 * tmpfs sets this flag to prevent msync(), sync, and the 2371 * filesystem periodic syncer from trying to flush VM pages 2372 * to swap. Only pure memory pressure flushes tmpfs VM pages 2373 * to swap. 2374 */ 2375 if (mp->mnt_kern_flag & MNTK_NOMSYNC) 2376 return; 2377 2378 /* 2379 * Ok, scan the vnodes for work. If the filesystem is using the 2380 * syncer thread feature we can use vsyncscan() instead of 2381 * vmntvnodescan(), which is much faster. 2382 */ 2383 vmsc_flags = VMSC_GETVP; 2384 if (flags != MNT_WAIT) 2385 vmsc_flags |= VMSC_NOWAIT; 2386 2387 if (mp->mnt_kern_flag & MNTK_THR_SYNC) { 2388 vsyncscan(mp, vmsc_flags, vfs_msync_scan2, 2389 (void *)(intptr_t)flags); 2390 } else { 2391 vmntvnodescan(mp, vmsc_flags, 2392 vfs_msync_scan1, vfs_msync_scan2, 2393 (void *)(intptr_t)flags); 2394 } 2395 } 2396 2397 /* 2398 * scan1 is a fast pre-check. There could be hundreds of thousands of 2399 * vnodes, we cannot afford to do anything heavy weight until we have a 2400 * fairly good indication that there is work to do. 2401 */ 2402 static 2403 int 2404 vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data) 2405 { 2406 int flags = (int)(intptr_t)data; 2407 2408 if ((vp->v_flag & VRECLAIMED) == 0) { 2409 if (vp->v_auxrefs == 0 && VREFCNT(vp) <= 0 && 2410 vp->v_object) { 2411 return(0); /* call scan2 */ 2412 } 2413 if ((mp->mnt_flag & MNT_RDONLY) == 0 && 2414 (vp->v_flag & VOBJDIRTY) && 2415 (flags == MNT_WAIT || vn_islocked(vp) == 0)) { 2416 return(0); /* call scan2 */ 2417 } 2418 } 2419 2420 /* 2421 * do not call scan2, continue the loop 2422 */ 2423 return(-1); 2424 } 2425 2426 /* 2427 * This callback is handed a locked vnode. 2428 */ 2429 static 2430 int 2431 vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data) 2432 { 2433 vm_object_t obj; 2434 int flags = (int)(intptr_t)data; 2435 int opcflags; 2436 2437 if (vp->v_flag & VRECLAIMED) 2438 return(0); 2439 2440 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (vp->v_flag & VOBJDIRTY)) { 2441 if ((obj = vp->v_object) != NULL) { 2442 if (flags == MNT_WAIT) { 2443 /* 2444 * VFS_MSYNC is called with MNT_WAIT when 2445 * unmounting. 2446 */ 2447 opcflags = OBJPC_SYNC; 2448 } else if (vp->v_writecount || obj->ref_count) { 2449 /* 2450 * VFS_MSYNC is otherwise called via the 2451 * periodic filesystem sync or the 'sync' 2452 * command. Honor MADV_NOSYNC / MAP_NOSYNC 2453 * if the file is open for writing or memory 2454 * mapped. Pages flagged PG_NOSYNC will not 2455 * be automatically flushed at this time. 2456 * 2457 * The obj->ref_count test is not perfect 2458 * since temporary refs may be present, but 2459 * the periodic filesystem sync will ultimately 2460 * catch it if the file is not open and not 2461 * mapped. 2462 */ 2463 opcflags = OBJPC_NOSYNC; 2464 } else { 2465 /* 2466 * If the file is no longer open for writing 2467 * and also no longer mapped, do not honor 2468 * MAP_NOSYNC. That is, fully synchronize 2469 * the file. 2470 * 2471 * This still occurs on the periodic fs sync, 2472 * so frontend programs which turn the file 2473 * over quickly enough can still avoid the 2474 * sync, but ultimately we do want to flush 2475 * even MADV_NOSYNC pages once it is no longer 2476 * mapped or open for writing. 2477 */ 2478 opcflags = 0; 2479 } 2480 vm_object_page_clean(obj, 0, 0, opcflags); 2481 } 2482 } 2483 return(0); 2484 } 2485 2486 /* 2487 * Wake up anyone interested in vp because it is being revoked. 2488 */ 2489 void 2490 vn_gone(struct vnode *vp) 2491 { 2492 lwkt_gettoken(&vp->v_token); 2493 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, NOTE_REVOKE); 2494 lwkt_reltoken(&vp->v_token); 2495 } 2496 2497 /* 2498 * extract the cdev_t from a VBLK or VCHR. The vnode must have been opened 2499 * (or v_rdev might be NULL). 2500 */ 2501 cdev_t 2502 vn_todev(struct vnode *vp) 2503 { 2504 if (vp->v_type != VBLK && vp->v_type != VCHR) 2505 return (NULL); 2506 KKASSERT(vp->v_rdev != NULL); 2507 return (vp->v_rdev); 2508 } 2509 2510 /* 2511 * Check if vnode represents a disk device. The vnode does not need to be 2512 * opened. 2513 * 2514 * MPALMOSTSAFE 2515 */ 2516 int 2517 vn_isdisk(struct vnode *vp, int *errp) 2518 { 2519 cdev_t dev; 2520 2521 if (vp->v_type != VCHR) { 2522 if (errp != NULL) 2523 *errp = ENOTBLK; 2524 return (0); 2525 } 2526 2527 dev = vp->v_rdev; 2528 2529 if (dev == NULL) { 2530 if (errp != NULL) 2531 *errp = ENXIO; 2532 return (0); 2533 } 2534 if (dev_is_good(dev) == 0) { 2535 if (errp != NULL) 2536 *errp = ENXIO; 2537 return (0); 2538 } 2539 if ((dev_dflags(dev) & D_DISK) == 0) { 2540 if (errp != NULL) 2541 *errp = ENOTBLK; 2542 return (0); 2543 } 2544 if (errp != NULL) 2545 *errp = 0; 2546 return (1); 2547 } 2548 2549 int 2550 vn_get_namelen(struct vnode *vp, int *namelen) 2551 { 2552 int error; 2553 register_t retval[2]; 2554 2555 error = VOP_PATHCONF(vp, _PC_NAME_MAX, retval); 2556 if (error) 2557 return (error); 2558 *namelen = (int)retval[0]; 2559 return (0); 2560 } 2561 2562 int 2563 vop_write_dirent(int *error, struct uio *uio, ino_t d_ino, uint8_t d_type, 2564 uint16_t d_namlen, const char *d_name) 2565 { 2566 struct dirent *dp; 2567 size_t len; 2568 2569 len = _DIRENT_RECLEN(d_namlen); 2570 if (len > uio->uio_resid) 2571 return(1); 2572 2573 dp = kmalloc(len, M_TEMP, M_WAITOK | M_ZERO); 2574 2575 dp->d_ino = d_ino; 2576 dp->d_namlen = d_namlen; 2577 dp->d_type = d_type; 2578 bcopy(d_name, dp->d_name, d_namlen); 2579 2580 *error = uiomove((caddr_t)dp, len, uio); 2581 2582 kfree(dp, M_TEMP); 2583 2584 return(0); 2585 } 2586 2587 void 2588 vn_mark_atime(struct vnode *vp, struct thread *td) 2589 { 2590 struct proc *p = td->td_proc; 2591 struct ucred *cred = p ? p->p_ucred : proc0.p_ucred; 2592 2593 if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) { 2594 VOP_MARKATIME(vp, cred); 2595 } 2596 } 2597 2598 /* 2599 * Calculate the number of entries in an inode-related chained hash table. 2600 * With today's memory sizes, maxvnodes can wind up being a very large 2601 * number. There is no reason to waste memory, so tolerate some stacking. 2602 */ 2603 int 2604 vfs_inodehashsize(void) 2605 { 2606 int hsize; 2607 2608 hsize = 32; 2609 while (hsize < maxvnodes) 2610 hsize <<= 1; 2611 while (hsize > maxvnodes * 2) 2612 hsize >>= 1; /* nominal 2x stacking */ 2613 2614 if (maxvnodes > 1024 * 1024) 2615 hsize >>= 1; /* nominal 8x stacking */ 2616 2617 if (maxvnodes > 128 * 1024) 2618 hsize >>= 1; /* nominal 4x stacking */ 2619 2620 if (hsize < 16) 2621 hsize = 16; 2622 2623 return hsize; 2624 } 2625