1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 36 */ 37 38 /* 39 * External virtual filesystem routines 40 */ 41 #include "opt_ddb.h" 42 #include "opt_inet.h" 43 #include "opt_inet6.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/buf.h> 48 #include <sys/conf.h> 49 #include <sys/dirent.h> 50 #include <sys/eventhandler.h> 51 #include <sys/fcntl.h> 52 #include <sys/file.h> 53 #include <sys/kernel.h> 54 #include <sys/kthread.h> 55 #include <sys/malloc.h> 56 #include <sys/mbuf.h> 57 #include <sys/mount.h> 58 #include <sys/priv.h> 59 #include <sys/proc.h> 60 #include <sys/reboot.h> 61 #include <sys/socket.h> 62 #include <sys/stat.h> 63 #include <sys/sysctl.h> 64 #include <sys/syslog.h> 65 #include <sys/unistd.h> 66 #include <sys/vmmeter.h> 67 #include <sys/vnode.h> 68 69 #include <machine/limits.h> 70 71 #include <vm/vm.h> 72 #include <vm/vm_object.h> 73 #include <vm/vm_extern.h> 74 #include <vm/vm_kern.h> 75 #include <vm/pmap.h> 76 #include <vm/vm_map.h> 77 #include <vm/vm_page.h> 78 #include <vm/vm_pager.h> 79 #include <vm/vnode_pager.h> 80 #include <vm/vm_zone.h> 81 82 #include <sys/buf2.h> 83 #include <sys/thread2.h> 84 #include <sys/sysref2.h> 85 #include <sys/mplock2.h> 86 87 #include <netinet/in.h> 88 89 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 90 91 int numvnodes; 92 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 93 "Number of vnodes allocated"); 94 int verbose_reclaims; 95 SYSCTL_INT(_debug, OID_AUTO, verbose_reclaims, CTLFLAG_RD, &verbose_reclaims, 0, 96 "Output filename of reclaimed vnode(s)"); 97 98 enum vtype iftovt_tab[16] = { 99 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 100 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 101 }; 102 int vttoif_tab[9] = { 103 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 104 S_IFSOCK, S_IFIFO, S_IFMT, 105 }; 106 107 static int reassignbufcalls; 108 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 109 0, "Number of times buffers have been reassigned to the proper list"); 110 111 static int check_buf_overlap = 2; /* invasive check */ 112 SYSCTL_INT(_vfs, OID_AUTO, check_buf_overlap, CTLFLAG_RW, &check_buf_overlap, 113 0, "Enable overlapping buffer checks"); 114 115 int nfs_mount_type = -1; 116 static struct lwkt_token spechash_token; 117 struct nfs_public nfs_pub; /* publicly exported FS */ 118 119 int desiredvnodes; 120 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 121 &desiredvnodes, 0, "Maximum number of vnodes"); 122 123 static struct radix_node_head *vfs_create_addrlist_af(int af, 124 struct netexport *nep); 125 static void vfs_free_addrlist (struct netexport *nep); 126 static int vfs_free_netcred (struct radix_node *rn, void *w); 127 static void vfs_free_addrlist_af (struct radix_node_head **prnh); 128 static int vfs_hang_addrlist (struct mount *mp, struct netexport *nep, 129 const struct export_args *argp); 130 131 int prtactive = 0; /* 1 => print out reclaim of active vnodes */ 132 133 /* 134 * Red black tree functions 135 */ 136 static int rb_buf_compare(struct buf *b1, struct buf *b2); 137 RB_GENERATE2(buf_rb_tree, buf, b_rbnode, rb_buf_compare, off_t, b_loffset); 138 RB_GENERATE2(buf_rb_hash, buf, b_rbhash, rb_buf_compare, off_t, b_loffset); 139 140 static int 141 rb_buf_compare(struct buf *b1, struct buf *b2) 142 { 143 if (b1->b_loffset < b2->b_loffset) 144 return(-1); 145 if (b1->b_loffset > b2->b_loffset) 146 return(1); 147 return(0); 148 } 149 150 /* 151 * Initialize the vnode management data structures. 152 * 153 * Called from vfsinit() 154 */ 155 void 156 vfs_subr_init(void) 157 { 158 int factor1; 159 int factor2; 160 161 /* 162 * Desiredvnodes is kern.maxvnodes. We want to scale it 163 * according to available system memory but we may also have 164 * to limit it based on available KVM, which is capped on 32 bit 165 * systems, to ~80K vnodes or so. 166 * 167 * WARNING! For machines with 64-256M of ram we have to be sure 168 * that the default limit scales down well due to HAMMER 169 * taking up significantly more memory per-vnode vs UFS. 170 * We want around ~5800 on a 128M machine. 171 */ 172 factor1 = 20 * (sizeof(struct vm_object) + sizeof(struct vnode)); 173 factor2 = 25 * (sizeof(struct vm_object) + sizeof(struct vnode)); 174 desiredvnodes = 175 imin((int64_t)vmstats.v_page_count * PAGE_SIZE / factor1, 176 KvaSize / factor2); 177 desiredvnodes = imax(desiredvnodes, maxproc * 8); 178 179 lwkt_token_init(&spechash_token, "spechash"); 180 } 181 182 /* 183 * Knob to control the precision of file timestamps: 184 * 185 * 0 = seconds only; nanoseconds zeroed. 186 * 1 = seconds and nanoseconds, accurate within 1/HZ. 187 * 2 = seconds and nanoseconds, truncated to microseconds. 188 * >=3 = seconds and nanoseconds, maximum precision. 189 */ 190 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 191 192 static int timestamp_precision = TSP_SEC; 193 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 194 ×tamp_precision, 0, "Precision of file timestamps"); 195 196 /* 197 * Get a current timestamp. 198 * 199 * MPSAFE 200 */ 201 void 202 vfs_timestamp(struct timespec *tsp) 203 { 204 struct timeval tv; 205 206 switch (timestamp_precision) { 207 case TSP_SEC: 208 tsp->tv_sec = time_second; 209 tsp->tv_nsec = 0; 210 break; 211 case TSP_HZ: 212 getnanotime(tsp); 213 break; 214 case TSP_USEC: 215 microtime(&tv); 216 TIMEVAL_TO_TIMESPEC(&tv, tsp); 217 break; 218 case TSP_NSEC: 219 default: 220 nanotime(tsp); 221 break; 222 } 223 } 224 225 /* 226 * Set vnode attributes to VNOVAL 227 */ 228 void 229 vattr_null(struct vattr *vap) 230 { 231 vap->va_type = VNON; 232 vap->va_size = VNOVAL; 233 vap->va_bytes = VNOVAL; 234 vap->va_mode = VNOVAL; 235 vap->va_nlink = VNOVAL; 236 vap->va_uid = VNOVAL; 237 vap->va_gid = VNOVAL; 238 vap->va_fsid = VNOVAL; 239 vap->va_fileid = VNOVAL; 240 vap->va_blocksize = VNOVAL; 241 vap->va_rmajor = VNOVAL; 242 vap->va_rminor = VNOVAL; 243 vap->va_atime.tv_sec = VNOVAL; 244 vap->va_atime.tv_nsec = VNOVAL; 245 vap->va_mtime.tv_sec = VNOVAL; 246 vap->va_mtime.tv_nsec = VNOVAL; 247 vap->va_ctime.tv_sec = VNOVAL; 248 vap->va_ctime.tv_nsec = VNOVAL; 249 vap->va_flags = VNOVAL; 250 vap->va_gen = VNOVAL; 251 vap->va_vaflags = 0; 252 /* va_*_uuid fields are only valid if related flags are set */ 253 } 254 255 /* 256 * Flush out and invalidate all buffers associated with a vnode. 257 * 258 * vp must be locked. 259 */ 260 static int vinvalbuf_bp(struct buf *bp, void *data); 261 262 struct vinvalbuf_bp_info { 263 struct vnode *vp; 264 int slptimeo; 265 int lkflags; 266 int flags; 267 int clean; 268 }; 269 270 int 271 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 272 { 273 struct vinvalbuf_bp_info info; 274 vm_object_t object; 275 int error; 276 277 lwkt_gettoken(&vp->v_token); 278 279 /* 280 * If we are being asked to save, call fsync to ensure that the inode 281 * is updated. 282 */ 283 if (flags & V_SAVE) { 284 error = bio_track_wait(&vp->v_track_write, slpflag, slptimeo); 285 if (error) 286 goto done; 287 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 288 if ((error = VOP_FSYNC(vp, MNT_WAIT, 0)) != 0) 289 goto done; 290 #if 0 291 /* 292 * Dirty bufs may be left or generated via races 293 * in circumstances where vinvalbuf() is called on 294 * a vnode not undergoing reclamation. Only 295 * panic if we are trying to reclaim the vnode. 296 */ 297 if ((vp->v_flag & VRECLAIMED) && 298 (bio_track_active(&vp->v_track_write) || 299 !RB_EMPTY(&vp->v_rbdirty_tree))) { 300 panic("vinvalbuf: dirty bufs"); 301 } 302 #endif 303 } 304 } 305 info.slptimeo = slptimeo; 306 info.lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; 307 if (slpflag & PCATCH) 308 info.lkflags |= LK_PCATCH; 309 info.flags = flags; 310 info.vp = vp; 311 312 /* 313 * Flush the buffer cache until nothing is left, wait for all I/O 314 * to complete. At least one pass is required. We might block 315 * in the pip code so we have to re-check. Order is important. 316 */ 317 do { 318 /* 319 * Flush buffer cache 320 */ 321 if (!RB_EMPTY(&vp->v_rbclean_tree)) { 322 info.clean = 1; 323 error = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 324 NULL, vinvalbuf_bp, &info); 325 } 326 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 327 info.clean = 0; 328 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 329 NULL, vinvalbuf_bp, &info); 330 } 331 332 /* 333 * Wait for I/O completion. 334 */ 335 bio_track_wait(&vp->v_track_write, 0, 0); 336 if ((object = vp->v_object) != NULL) 337 refcount_wait(&object->paging_in_progress, "vnvlbx"); 338 } while (bio_track_active(&vp->v_track_write) || 339 !RB_EMPTY(&vp->v_rbclean_tree) || 340 !RB_EMPTY(&vp->v_rbdirty_tree)); 341 342 /* 343 * Destroy the copy in the VM cache, too. 344 */ 345 if ((object = vp->v_object) != NULL) { 346 vm_object_page_remove(object, 0, 0, 347 (flags & V_SAVE) ? TRUE : FALSE); 348 } 349 350 if (!RB_EMPTY(&vp->v_rbdirty_tree) || !RB_EMPTY(&vp->v_rbclean_tree)) 351 panic("vinvalbuf: flush failed"); 352 if (!RB_EMPTY(&vp->v_rbhash_tree)) 353 panic("vinvalbuf: flush failed, buffers still present"); 354 error = 0; 355 done: 356 lwkt_reltoken(&vp->v_token); 357 return (error); 358 } 359 360 static int 361 vinvalbuf_bp(struct buf *bp, void *data) 362 { 363 struct vinvalbuf_bp_info *info = data; 364 int error; 365 366 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 367 atomic_add_int(&bp->b_refs, 1); 368 error = BUF_TIMELOCK(bp, info->lkflags, 369 "vinvalbuf", info->slptimeo); 370 atomic_subtract_int(&bp->b_refs, 1); 371 if (error == 0) { 372 BUF_UNLOCK(bp); 373 error = ENOLCK; 374 } 375 if (error == ENOLCK) 376 return(0); 377 return (-error); 378 } 379 KKASSERT(bp->b_vp == info->vp); 380 381 /* 382 * Must check clean/dirty status after successfully locking as 383 * it may race. 384 */ 385 if ((info->clean && (bp->b_flags & B_DELWRI)) || 386 (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0)) { 387 BUF_UNLOCK(bp); 388 return(0); 389 } 390 391 /* 392 * NOTE: NO B_LOCKED CHECK. Also no buf_checkwrite() 393 * check. This code will write out the buffer, period. 394 */ 395 bremfree(bp); 396 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 397 (info->flags & V_SAVE)) { 398 cluster_awrite(bp); 399 } else if (info->flags & V_SAVE) { 400 /* 401 * Cannot set B_NOCACHE on a clean buffer as this will 402 * destroy the VM backing store which might actually 403 * be dirty (and unsynchronized). 404 */ 405 bp->b_flags |= (B_INVAL | B_RELBUF); 406 brelse(bp); 407 } else { 408 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 409 brelse(bp); 410 } 411 return(0); 412 } 413 414 /* 415 * Truncate a file's buffer and pages to a specified length. This 416 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 417 * sync activity. 418 * 419 * The vnode must be locked. 420 */ 421 static int vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data); 422 static int vtruncbuf_bp_trunc(struct buf *bp, void *data); 423 static int vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data); 424 static int vtruncbuf_bp_metasync(struct buf *bp, void *data); 425 426 struct vtruncbuf_info { 427 struct vnode *vp; 428 off_t truncloffset; 429 int clean; 430 }; 431 432 int 433 vtruncbuf(struct vnode *vp, off_t length, int blksize) 434 { 435 struct vtruncbuf_info info; 436 const char *filename; 437 int count; 438 439 /* 440 * Round up to the *next* block, then destroy the buffers in question. 441 * Since we are only removing some of the buffers we must rely on the 442 * scan count to determine whether a loop is necessary. 443 */ 444 if ((count = (int)(length % blksize)) != 0) 445 info.truncloffset = length + (blksize - count); 446 else 447 info.truncloffset = length; 448 info.vp = vp; 449 450 lwkt_gettoken(&vp->v_token); 451 do { 452 info.clean = 1; 453 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 454 vtruncbuf_bp_trunc_cmp, 455 vtruncbuf_bp_trunc, &info); 456 info.clean = 0; 457 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 458 vtruncbuf_bp_trunc_cmp, 459 vtruncbuf_bp_trunc, &info); 460 } while(count); 461 462 /* 463 * For safety, fsync any remaining metadata if the file is not being 464 * truncated to 0. Since the metadata does not represent the entire 465 * dirty list we have to rely on the hit count to ensure that we get 466 * all of it. 467 */ 468 if (length > 0) { 469 do { 470 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 471 vtruncbuf_bp_metasync_cmp, 472 vtruncbuf_bp_metasync, &info); 473 } while (count); 474 } 475 476 /* 477 * Clean out any left over VM backing store. 478 * 479 * It is possible to have in-progress I/O from buffers that were 480 * not part of the truncation. This should not happen if we 481 * are truncating to 0-length. 482 */ 483 vnode_pager_setsize(vp, length); 484 bio_track_wait(&vp->v_track_write, 0, 0); 485 486 /* 487 * Debugging only 488 */ 489 spin_lock(&vp->v_spin); 490 filename = TAILQ_FIRST(&vp->v_namecache) ? 491 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"; 492 spin_unlock(&vp->v_spin); 493 494 /* 495 * Make sure no buffers were instantiated while we were trying 496 * to clean out the remaining VM pages. This could occur due 497 * to busy dirty VM pages being flushed out to disk. 498 */ 499 do { 500 info.clean = 1; 501 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 502 vtruncbuf_bp_trunc_cmp, 503 vtruncbuf_bp_trunc, &info); 504 info.clean = 0; 505 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 506 vtruncbuf_bp_trunc_cmp, 507 vtruncbuf_bp_trunc, &info); 508 if (count) { 509 kprintf("Warning: vtruncbuf(): Had to re-clean %d " 510 "left over buffers in %s\n", count, filename); 511 } 512 } while(count); 513 514 lwkt_reltoken(&vp->v_token); 515 516 return (0); 517 } 518 519 /* 520 * The callback buffer is beyond the new file EOF and must be destroyed. 521 * Note that the compare function must conform to the RB_SCAN's requirements. 522 */ 523 static 524 int 525 vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data) 526 { 527 struct vtruncbuf_info *info = data; 528 529 if (bp->b_loffset >= info->truncloffset) 530 return(0); 531 return(-1); 532 } 533 534 static 535 int 536 vtruncbuf_bp_trunc(struct buf *bp, void *data) 537 { 538 struct vtruncbuf_info *info = data; 539 540 /* 541 * Do not try to use a buffer we cannot immediately lock, but sleep 542 * anyway to prevent a livelock. The code will loop until all buffers 543 * can be acted upon. 544 * 545 * We must always revalidate the buffer after locking it to deal 546 * with MP races. 547 */ 548 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 549 atomic_add_int(&bp->b_refs, 1); 550 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 551 BUF_UNLOCK(bp); 552 atomic_subtract_int(&bp->b_refs, 1); 553 } else if ((info->clean && (bp->b_flags & B_DELWRI)) || 554 (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0) || 555 bp->b_vp != info->vp || 556 vtruncbuf_bp_trunc_cmp(bp, data)) { 557 BUF_UNLOCK(bp); 558 } else { 559 bremfree(bp); 560 bp->b_flags |= (B_INVAL | B_RELBUF | B_NOCACHE); 561 brelse(bp); 562 } 563 return(1); 564 } 565 566 /* 567 * Fsync all meta-data after truncating a file to be non-zero. Only metadata 568 * blocks (with a negative loffset) are scanned. 569 * Note that the compare function must conform to the RB_SCAN's requirements. 570 */ 571 static int 572 vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data __unused) 573 { 574 if (bp->b_loffset < 0) 575 return(0); 576 return(1); 577 } 578 579 static int 580 vtruncbuf_bp_metasync(struct buf *bp, void *data) 581 { 582 struct vtruncbuf_info *info = data; 583 584 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 585 atomic_add_int(&bp->b_refs, 1); 586 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 587 BUF_UNLOCK(bp); 588 atomic_subtract_int(&bp->b_refs, 1); 589 } else if ((bp->b_flags & B_DELWRI) == 0 || 590 bp->b_vp != info->vp || 591 vtruncbuf_bp_metasync_cmp(bp, data)) { 592 BUF_UNLOCK(bp); 593 } else { 594 bremfree(bp); 595 if (bp->b_vp == info->vp) 596 bawrite(bp); 597 else 598 bwrite(bp); 599 } 600 return(1); 601 } 602 603 /* 604 * vfsync - implements a multipass fsync on a file which understands 605 * dependancies and meta-data. The passed vnode must be locked. The 606 * waitfor argument may be MNT_WAIT or MNT_NOWAIT, or MNT_LAZY. 607 * 608 * When fsyncing data asynchronously just do one consolidated pass starting 609 * with the most negative block number. This may not get all the data due 610 * to dependancies. 611 * 612 * When fsyncing data synchronously do a data pass, then a metadata pass, 613 * then do additional data+metadata passes to try to get all the data out. 614 * 615 * Caller must ref the vnode but does not have to lock it. 616 */ 617 static int vfsync_wait_output(struct vnode *vp, 618 int (*waitoutput)(struct vnode *, struct thread *)); 619 static int vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused); 620 static int vfsync_data_only_cmp(struct buf *bp, void *data); 621 static int vfsync_meta_only_cmp(struct buf *bp, void *data); 622 static int vfsync_lazy_range_cmp(struct buf *bp, void *data); 623 static int vfsync_bp(struct buf *bp, void *data); 624 625 struct vfsync_info { 626 struct vnode *vp; 627 int fastpass; 628 int synchronous; 629 int syncdeps; 630 int lazycount; 631 int lazylimit; 632 int skippedbufs; 633 int (*checkdef)(struct buf *); 634 int (*cmpfunc)(struct buf *, void *); 635 }; 636 637 int 638 vfsync(struct vnode *vp, int waitfor, int passes, 639 int (*checkdef)(struct buf *), 640 int (*waitoutput)(struct vnode *, struct thread *)) 641 { 642 struct vfsync_info info; 643 int error; 644 645 bzero(&info, sizeof(info)); 646 info.vp = vp; 647 if ((info.checkdef = checkdef) == NULL) 648 info.syncdeps = 1; 649 650 lwkt_gettoken(&vp->v_token); 651 652 switch(waitfor) { 653 case MNT_LAZY | MNT_NOWAIT: 654 case MNT_LAZY: 655 /* 656 * Lazy (filesystem syncer typ) Asynchronous plus limit the 657 * number of data (not meta) pages we try to flush to 1MB. 658 * A non-zero return means that lazy limit was reached. 659 */ 660 info.lazylimit = 1024 * 1024; 661 info.syncdeps = 1; 662 info.cmpfunc = vfsync_lazy_range_cmp; 663 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 664 vfsync_lazy_range_cmp, vfsync_bp, &info); 665 info.cmpfunc = vfsync_meta_only_cmp; 666 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 667 vfsync_meta_only_cmp, vfsync_bp, &info); 668 if (error == 0) 669 vp->v_lazyw = 0; 670 else if (!RB_EMPTY(&vp->v_rbdirty_tree)) 671 vn_syncer_add(vp, 1); 672 error = 0; 673 break; 674 case MNT_NOWAIT: 675 /* 676 * Asynchronous. Do a data-only pass and a meta-only pass. 677 */ 678 info.syncdeps = 1; 679 info.cmpfunc = vfsync_data_only_cmp; 680 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 681 vfsync_bp, &info); 682 info.cmpfunc = vfsync_meta_only_cmp; 683 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_meta_only_cmp, 684 vfsync_bp, &info); 685 error = 0; 686 break; 687 default: 688 /* 689 * Synchronous. Do a data-only pass, then a meta-data+data 690 * pass, then additional integrated passes to try to get 691 * all the dependancies flushed. 692 */ 693 info.cmpfunc = vfsync_data_only_cmp; 694 info.fastpass = 1; 695 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 696 vfsync_bp, &info); 697 info.fastpass = 0; 698 error = vfsync_wait_output(vp, waitoutput); 699 if (error == 0) { 700 info.skippedbufs = 0; 701 info.cmpfunc = vfsync_dummy_cmp; 702 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 703 vfsync_bp, &info); 704 error = vfsync_wait_output(vp, waitoutput); 705 if (info.skippedbufs) { 706 kprintf("Warning: vfsync skipped %d dirty " 707 "buf%s in pass2!\n", 708 info.skippedbufs, 709 ((info.skippedbufs > 1) ? "s" : "")); 710 } 711 } 712 while (error == 0 && passes > 0 && 713 !RB_EMPTY(&vp->v_rbdirty_tree) 714 ) { 715 info.skippedbufs = 0; 716 if (--passes == 0) { 717 info.synchronous = 1; 718 info.syncdeps = 1; 719 } 720 info.cmpfunc = vfsync_dummy_cmp; 721 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 722 vfsync_bp, &info); 723 if (error < 0) 724 error = -error; 725 info.syncdeps = 1; 726 if (error == 0) 727 error = vfsync_wait_output(vp, waitoutput); 728 if (info.skippedbufs && passes == 0) { 729 kprintf("Warning: vfsync skipped %d dirty " 730 "buf%s in final pass!\n", 731 info.skippedbufs, 732 ((info.skippedbufs > 1) ? "s" : "")); 733 } 734 } 735 if (!RB_EMPTY(&vp->v_rbdirty_tree)) 736 kprintf("dirty bufs left after final pass\n"); 737 break; 738 } 739 lwkt_reltoken(&vp->v_token); 740 741 return(error); 742 } 743 744 static int 745 vfsync_wait_output(struct vnode *vp, 746 int (*waitoutput)(struct vnode *, struct thread *)) 747 { 748 int error; 749 750 error = bio_track_wait(&vp->v_track_write, 0, 0); 751 if (waitoutput) 752 error = waitoutput(vp, curthread); 753 return(error); 754 } 755 756 static int 757 vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused) 758 { 759 return(0); 760 } 761 762 static int 763 vfsync_data_only_cmp(struct buf *bp, void *data) 764 { 765 if (bp->b_loffset < 0) 766 return(-1); 767 return(0); 768 } 769 770 static int 771 vfsync_meta_only_cmp(struct buf *bp, void *data) 772 { 773 if (bp->b_loffset < 0) 774 return(0); 775 return(1); 776 } 777 778 static int 779 vfsync_lazy_range_cmp(struct buf *bp, void *data) 780 { 781 struct vfsync_info *info = data; 782 783 if (bp->b_loffset < info->vp->v_lazyw) 784 return(-1); 785 return(0); 786 } 787 788 static int 789 vfsync_bp(struct buf *bp, void *data) 790 { 791 struct vfsync_info *info = data; 792 struct vnode *vp = info->vp; 793 int error; 794 795 if (info->fastpass) { 796 /* 797 * Ignore buffers that we cannot immediately lock. 798 */ 799 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 800 if (BUF_TIMELOCK(bp, LK_EXCLUSIVE, "bflst1", 1)) { 801 ++info->skippedbufs; 802 return(0); 803 } 804 } 805 } else if (info->synchronous == 0) { 806 /* 807 * Normal pass, give the buffer a little time to become 808 * available to us. 809 */ 810 if (BUF_TIMELOCK(bp, LK_EXCLUSIVE, "bflst2", hz / 10)) { 811 ++info->skippedbufs; 812 return(0); 813 } 814 } else { 815 /* 816 * Synchronous pass, give the buffer a lot of time before 817 * giving up. 818 */ 819 if (BUF_TIMELOCK(bp, LK_EXCLUSIVE, "bflst3", hz * 10)) { 820 ++info->skippedbufs; 821 return(0); 822 } 823 } 824 825 /* 826 * We must revalidate the buffer after locking. 827 */ 828 if ((bp->b_flags & B_DELWRI) == 0 || 829 bp->b_vp != info->vp || 830 info->cmpfunc(bp, data)) { 831 BUF_UNLOCK(bp); 832 return(0); 833 } 834 835 /* 836 * If syncdeps is not set we do not try to write buffers which have 837 * dependancies. 838 */ 839 if (!info->synchronous && info->syncdeps == 0 && info->checkdef(bp)) { 840 BUF_UNLOCK(bp); 841 return(0); 842 } 843 844 /* 845 * B_NEEDCOMMIT (primarily used by NFS) is a state where the buffer 846 * has been written but an additional handshake with the device 847 * is required before we can dispose of the buffer. We have no idea 848 * how to do this so we have to skip these buffers. 849 */ 850 if (bp->b_flags & B_NEEDCOMMIT) { 851 BUF_UNLOCK(bp); 852 return(0); 853 } 854 855 /* 856 * Ask bioops if it is ok to sync. If not the VFS may have 857 * set B_LOCKED so we have to cycle the buffer. 858 */ 859 if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) { 860 bremfree(bp); 861 brelse(bp); 862 return(0); 863 } 864 865 if (info->synchronous) { 866 /* 867 * Synchronous flushing. An error may be returned. 868 */ 869 bremfree(bp); 870 error = bwrite(bp); 871 } else { 872 /* 873 * Asynchronous flushing. A negative return value simply 874 * stops the scan and is not considered an error. We use 875 * this to support limited MNT_LAZY flushes. 876 */ 877 vp->v_lazyw = bp->b_loffset; 878 bremfree(bp); 879 info->lazycount += cluster_awrite(bp); 880 waitrunningbufspace(); 881 vm_wait_nominal(); 882 if (info->lazylimit && info->lazycount >= info->lazylimit) 883 error = 1; 884 else 885 error = 0; 886 } 887 return(-error); 888 } 889 890 /* 891 * Associate a buffer with a vnode. 892 * 893 * MPSAFE 894 */ 895 int 896 bgetvp(struct vnode *vp, struct buf *bp, int testsize) 897 { 898 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 899 KKASSERT((bp->b_flags & (B_HASHED|B_DELWRI|B_VNCLEAN|B_VNDIRTY)) == 0); 900 901 /* 902 * Insert onto list for new vnode. 903 */ 904 lwkt_gettoken(&vp->v_token); 905 906 if (buf_rb_hash_RB_INSERT(&vp->v_rbhash_tree, bp)) { 907 lwkt_reltoken(&vp->v_token); 908 return (EEXIST); 909 } 910 911 /* 912 * Diagnostics (mainly for HAMMER debugging). Check for 913 * overlapping buffers. 914 */ 915 if (check_buf_overlap) { 916 struct buf *bx; 917 bx = buf_rb_hash_RB_PREV(bp); 918 if (bx) { 919 if (bx->b_loffset + bx->b_bufsize > bp->b_loffset) { 920 kprintf("bgetvp: overlapl %016jx/%d %016jx " 921 "bx %p bp %p\n", 922 (intmax_t)bx->b_loffset, 923 bx->b_bufsize, 924 (intmax_t)bp->b_loffset, 925 bx, bp); 926 if (check_buf_overlap > 1) 927 panic("bgetvp - overlapping buffer"); 928 } 929 } 930 bx = buf_rb_hash_RB_NEXT(bp); 931 if (bx) { 932 if (bp->b_loffset + testsize > bx->b_loffset) { 933 kprintf("bgetvp: overlapr %016jx/%d %016jx " 934 "bp %p bx %p\n", 935 (intmax_t)bp->b_loffset, 936 testsize, 937 (intmax_t)bx->b_loffset, 938 bp, bx); 939 if (check_buf_overlap > 1) 940 panic("bgetvp - overlapping buffer"); 941 } 942 } 943 } 944 bp->b_vp = vp; 945 bp->b_flags |= B_HASHED; 946 bp->b_flags |= B_VNCLEAN; 947 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) 948 panic("reassignbuf: dup lblk/clean vp %p bp %p", vp, bp); 949 /*vhold(vp);*/ 950 lwkt_reltoken(&vp->v_token); 951 return(0); 952 } 953 954 /* 955 * Disassociate a buffer from a vnode. 956 * 957 * MPSAFE 958 */ 959 void 960 brelvp(struct buf *bp) 961 { 962 struct vnode *vp; 963 964 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 965 966 /* 967 * Delete from old vnode list, if on one. 968 */ 969 vp = bp->b_vp; 970 lwkt_gettoken(&vp->v_token); 971 if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN)) { 972 if (bp->b_flags & B_VNDIRTY) 973 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 974 else 975 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 976 bp->b_flags &= ~(B_VNDIRTY | B_VNCLEAN); 977 } 978 if (bp->b_flags & B_HASHED) { 979 buf_rb_hash_RB_REMOVE(&vp->v_rbhash_tree, bp); 980 bp->b_flags &= ~B_HASHED; 981 } 982 983 /* 984 * Only remove from synclist when no dirty buffers are left AND 985 * the VFS has not flagged the vnode's inode as being dirty. 986 */ 987 if ((vp->v_flag & (VONWORKLST | VISDIRTY | VOBJDIRTY)) == VONWORKLST && 988 RB_EMPTY(&vp->v_rbdirty_tree)) { 989 vn_syncer_remove(vp); 990 } 991 bp->b_vp = NULL; 992 993 lwkt_reltoken(&vp->v_token); 994 995 /*vdrop(vp);*/ 996 } 997 998 /* 999 * Reassign the buffer to the proper clean/dirty list based on B_DELWRI. 1000 * This routine is called when the state of the B_DELWRI bit is changed. 1001 * 1002 * Must be called with vp->v_token held. 1003 * MPSAFE 1004 */ 1005 void 1006 reassignbuf(struct buf *bp) 1007 { 1008 struct vnode *vp = bp->b_vp; 1009 int delay; 1010 1011 ASSERT_LWKT_TOKEN_HELD(&vp->v_token); 1012 ++reassignbufcalls; 1013 1014 /* 1015 * B_PAGING flagged buffers cannot be reassigned because their vp 1016 * is not fully linked in. 1017 */ 1018 if (bp->b_flags & B_PAGING) 1019 panic("cannot reassign paging buffer"); 1020 1021 if (bp->b_flags & B_DELWRI) { 1022 /* 1023 * Move to the dirty list, add the vnode to the worklist 1024 */ 1025 if (bp->b_flags & B_VNCLEAN) { 1026 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 1027 bp->b_flags &= ~B_VNCLEAN; 1028 } 1029 if ((bp->b_flags & B_VNDIRTY) == 0) { 1030 if (buf_rb_tree_RB_INSERT(&vp->v_rbdirty_tree, bp)) { 1031 panic("reassignbuf: dup lblk vp %p bp %p", 1032 vp, bp); 1033 } 1034 bp->b_flags |= B_VNDIRTY; 1035 } 1036 if ((vp->v_flag & VONWORKLST) == 0) { 1037 switch (vp->v_type) { 1038 case VDIR: 1039 delay = dirdelay; 1040 break; 1041 case VCHR: 1042 case VBLK: 1043 if (vp->v_rdev && 1044 vp->v_rdev->si_mountpoint != NULL) { 1045 delay = metadelay; 1046 break; 1047 } 1048 /* fall through */ 1049 default: 1050 delay = filedelay; 1051 } 1052 vn_syncer_add(vp, delay); 1053 } 1054 } else { 1055 /* 1056 * Move to the clean list, remove the vnode from the worklist 1057 * if no dirty blocks remain. 1058 */ 1059 if (bp->b_flags & B_VNDIRTY) { 1060 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 1061 bp->b_flags &= ~B_VNDIRTY; 1062 } 1063 if ((bp->b_flags & B_VNCLEAN) == 0) { 1064 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) { 1065 panic("reassignbuf: dup lblk vp %p bp %p", 1066 vp, bp); 1067 } 1068 bp->b_flags |= B_VNCLEAN; 1069 } 1070 1071 /* 1072 * Only remove from synclist when no dirty buffers are left 1073 * AND the VFS has not flagged the vnode's inode as being 1074 * dirty. 1075 */ 1076 if ((vp->v_flag & (VONWORKLST | VISDIRTY | VOBJDIRTY)) == 1077 VONWORKLST && 1078 RB_EMPTY(&vp->v_rbdirty_tree)) { 1079 vn_syncer_remove(vp); 1080 } 1081 } 1082 } 1083 1084 /* 1085 * Create a vnode for a block device. Used for mounting the root file 1086 * system. 1087 * 1088 * A vref()'d vnode is returned. 1089 */ 1090 extern struct vop_ops *devfs_vnode_dev_vops_p; 1091 int 1092 bdevvp(cdev_t dev, struct vnode **vpp) 1093 { 1094 struct vnode *vp; 1095 struct vnode *nvp; 1096 int error; 1097 1098 if (dev == NULL) { 1099 *vpp = NULLVP; 1100 return (ENXIO); 1101 } 1102 error = getspecialvnode(VT_NON, NULL, &devfs_vnode_dev_vops_p, 1103 &nvp, 0, 0); 1104 if (error) { 1105 *vpp = NULLVP; 1106 return (error); 1107 } 1108 vp = nvp; 1109 vp->v_type = VCHR; 1110 #if 0 1111 vp->v_rdev = dev; 1112 #endif 1113 v_associate_rdev(vp, dev); 1114 vp->v_umajor = dev->si_umajor; 1115 vp->v_uminor = dev->si_uminor; 1116 vx_unlock(vp); 1117 *vpp = vp; 1118 return (0); 1119 } 1120 1121 int 1122 v_associate_rdev(struct vnode *vp, cdev_t dev) 1123 { 1124 if (dev == NULL) 1125 return(ENXIO); 1126 if (dev_is_good(dev) == 0) 1127 return(ENXIO); 1128 KKASSERT(vp->v_rdev == NULL); 1129 vp->v_rdev = reference_dev(dev); 1130 lwkt_gettoken(&spechash_token); 1131 SLIST_INSERT_HEAD(&dev->si_hlist, vp, v_cdevnext); 1132 lwkt_reltoken(&spechash_token); 1133 return(0); 1134 } 1135 1136 void 1137 v_release_rdev(struct vnode *vp) 1138 { 1139 cdev_t dev; 1140 1141 if ((dev = vp->v_rdev) != NULL) { 1142 lwkt_gettoken(&spechash_token); 1143 SLIST_REMOVE(&dev->si_hlist, vp, vnode, v_cdevnext); 1144 vp->v_rdev = NULL; 1145 release_dev(dev); 1146 lwkt_reltoken(&spechash_token); 1147 } 1148 } 1149 1150 /* 1151 * Add a vnode to the alias list hung off the cdev_t. We only associate 1152 * the device number with the vnode. The actual device is not associated 1153 * until the vnode is opened (usually in spec_open()), and will be 1154 * disassociated on last close. 1155 */ 1156 void 1157 addaliasu(struct vnode *nvp, int x, int y) 1158 { 1159 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1160 panic("addaliasu on non-special vnode"); 1161 nvp->v_umajor = x; 1162 nvp->v_uminor = y; 1163 } 1164 1165 /* 1166 * Simple call that a filesystem can make to try to get rid of a 1167 * vnode. It will fail if anyone is referencing the vnode (including 1168 * the caller). 1169 * 1170 * The filesystem can check whether its in-memory inode structure still 1171 * references the vp on return. 1172 * 1173 * May only be called if the vnode is in a known state (i.e. being prevented 1174 * from being deallocated by some other condition such as a vfs inode hold). 1175 */ 1176 void 1177 vclean_unlocked(struct vnode *vp) 1178 { 1179 vx_get(vp); 1180 if (VREFCNT(vp) <= 1) 1181 vgone_vxlocked(vp); 1182 vx_put(vp); 1183 } 1184 1185 /* 1186 * Disassociate a vnode from its underlying filesystem. 1187 * 1188 * The vnode must be VX locked and referenced. In all normal situations 1189 * there are no active references. If vclean_vxlocked() is called while 1190 * there are active references, the vnode is being ripped out and we have 1191 * to call VOP_CLOSE() as appropriate before we can reclaim it. 1192 */ 1193 void 1194 vclean_vxlocked(struct vnode *vp, int flags) 1195 { 1196 int active; 1197 int n; 1198 vm_object_t object; 1199 struct namecache *ncp; 1200 1201 /* 1202 * If the vnode has already been reclaimed we have nothing to do. 1203 */ 1204 if (vp->v_flag & VRECLAIMED) 1205 return; 1206 1207 /* 1208 * Set flag to interlock operation, flag finalization to ensure 1209 * that the vnode winds up on the inactive list, and set v_act to 0. 1210 */ 1211 vsetflags(vp, VRECLAIMED); 1212 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 1213 vp->v_act = 0; 1214 1215 if (verbose_reclaims) { 1216 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) 1217 kprintf("Debug: reclaim %p %s\n", vp, ncp->nc_name); 1218 } 1219 1220 /* 1221 * Scrap the vfs cache 1222 */ 1223 while (cache_inval_vp(vp, 0) != 0) { 1224 kprintf("Warning: vnode %p clean/cache_resolution " 1225 "race detected\n", vp); 1226 tsleep(vp, 0, "vclninv", 2); 1227 } 1228 1229 /* 1230 * Check to see if the vnode is in use. If so we have to reference it 1231 * before we clean it out so that its count cannot fall to zero and 1232 * generate a race against ourselves to recycle it. 1233 */ 1234 active = (VREFCNT(vp) > 0); 1235 1236 /* 1237 * Clean out any buffers associated with the vnode and destroy its 1238 * object, if it has one. 1239 */ 1240 vinvalbuf(vp, V_SAVE, 0, 0); 1241 KKASSERT(lockcountnb(&vp->v_lock) == 1); 1242 1243 /* 1244 * If purging an active vnode (typically during a forced unmount 1245 * or reboot), it must be closed and deactivated before being 1246 * reclaimed. This isn't really all that safe, but what can 1247 * we do? XXX. 1248 * 1249 * Note that neither of these routines unlocks the vnode. 1250 */ 1251 if (active && (flags & DOCLOSE)) { 1252 while ((n = vp->v_opencount) != 0) { 1253 if (vp->v_writecount) 1254 VOP_CLOSE(vp, FWRITE|FNONBLOCK, NULL); 1255 else 1256 VOP_CLOSE(vp, FNONBLOCK, NULL); 1257 if (vp->v_opencount == n) { 1258 kprintf("Warning: unable to force-close" 1259 " vnode %p\n", vp); 1260 break; 1261 } 1262 } 1263 } 1264 1265 /* 1266 * If the vnode has not been deactivated, deactivated it. Deactivation 1267 * can create new buffers and VM pages so we have to call vinvalbuf() 1268 * again to make sure they all get flushed. 1269 * 1270 * This can occur if a file with a link count of 0 needs to be 1271 * truncated. 1272 * 1273 * If the vnode is already dead don't try to deactivate it. 1274 */ 1275 if ((vp->v_flag & VINACTIVE) == 0) { 1276 vsetflags(vp, VINACTIVE); 1277 if (vp->v_mount) 1278 VOP_INACTIVE(vp); 1279 vinvalbuf(vp, V_SAVE, 0, 0); 1280 } 1281 KKASSERT(lockcountnb(&vp->v_lock) == 1); 1282 1283 /* 1284 * If the vnode has an object, destroy it. 1285 */ 1286 while ((object = vp->v_object) != NULL) { 1287 vm_object_hold(object); 1288 if (object == vp->v_object) 1289 break; 1290 vm_object_drop(object); 1291 } 1292 1293 if (object != NULL) { 1294 if (object->ref_count == 0) { 1295 if ((object->flags & OBJ_DEAD) == 0) 1296 vm_object_terminate(object); 1297 vm_object_drop(object); 1298 vclrflags(vp, VOBJBUF); 1299 } else { 1300 vm_pager_deallocate(object); 1301 vclrflags(vp, VOBJBUF); 1302 vm_object_drop(object); 1303 } 1304 } 1305 KKASSERT((vp->v_flag & VOBJBUF) == 0); 1306 1307 /* 1308 * Reclaim the vnode if not already dead. 1309 */ 1310 if (vp->v_mount && VOP_RECLAIM(vp)) 1311 panic("vclean: cannot reclaim"); 1312 1313 /* 1314 * Done with purge, notify sleepers of the grim news. 1315 */ 1316 vp->v_ops = &dead_vnode_vops_p; 1317 vn_gone(vp); 1318 vp->v_tag = VT_NON; 1319 1320 /* 1321 * If we are destroying an active vnode, reactivate it now that 1322 * we have reassociated it with deadfs. This prevents the system 1323 * from crashing on the vnode due to it being unexpectedly marked 1324 * as inactive or reclaimed. 1325 */ 1326 if (active && (flags & DOCLOSE)) { 1327 vclrflags(vp, VINACTIVE | VRECLAIMED); 1328 } 1329 } 1330 1331 /* 1332 * Eliminate all activity associated with the requested vnode 1333 * and with all vnodes aliased to the requested vnode. 1334 * 1335 * The vnode must be referenced but should not be locked. 1336 */ 1337 int 1338 vrevoke(struct vnode *vp, struct ucred *cred) 1339 { 1340 struct vnode *vq; 1341 struct vnode *vqn; 1342 cdev_t dev; 1343 int error; 1344 1345 /* 1346 * If the vnode has a device association, scrap all vnodes associated 1347 * with the device. Don't let the device disappear on us while we 1348 * are scrapping the vnodes. 1349 * 1350 * The passed vp will probably show up in the list, do not VX lock 1351 * it twice! 1352 * 1353 * Releasing the vnode's rdev here can mess up specfs's call to 1354 * device close, so don't do it. The vnode has been disassociated 1355 * and the device will be closed after the last ref on the related 1356 * fp goes away (if not still open by e.g. the kernel). 1357 */ 1358 if (vp->v_type != VCHR) { 1359 error = fdrevoke(vp, DTYPE_VNODE, cred); 1360 return (error); 1361 } 1362 if ((dev = vp->v_rdev) == NULL) { 1363 return(0); 1364 } 1365 reference_dev(dev); 1366 lwkt_gettoken(&spechash_token); 1367 1368 restart: 1369 vqn = SLIST_FIRST(&dev->si_hlist); 1370 if (vqn) 1371 vhold(vqn); 1372 while ((vq = vqn) != NULL) { 1373 if (VREFCNT(vq) > 0) { 1374 vref(vq); 1375 fdrevoke(vq, DTYPE_VNODE, cred); 1376 /*v_release_rdev(vq);*/ 1377 vrele(vq); 1378 if (vq->v_rdev != dev) { 1379 vdrop(vq); 1380 goto restart; 1381 } 1382 } 1383 vqn = SLIST_NEXT(vq, v_cdevnext); 1384 if (vqn) 1385 vhold(vqn); 1386 vdrop(vq); 1387 } 1388 lwkt_reltoken(&spechash_token); 1389 dev_drevoke(dev); 1390 release_dev(dev); 1391 return (0); 1392 } 1393 1394 /* 1395 * This is called when the object underlying a vnode is being destroyed, 1396 * such as in a remove(). Try to recycle the vnode immediately if the 1397 * only active reference is our reference. 1398 * 1399 * Directory vnodes in the namecache with children cannot be immediately 1400 * recycled because numerous VOP_N*() ops require them to be stable. 1401 * 1402 * To avoid recursive recycling from VOP_INACTIVE implemenetations this 1403 * function is a NOP if VRECLAIMED is already set. 1404 */ 1405 int 1406 vrecycle(struct vnode *vp) 1407 { 1408 if (VREFCNT(vp) <= 1 && (vp->v_flag & VRECLAIMED) == 0) { 1409 if (cache_inval_vp_nonblock(vp)) 1410 return(0); 1411 vgone_vxlocked(vp); 1412 return (1); 1413 } 1414 return (0); 1415 } 1416 1417 /* 1418 * Return the maximum I/O size allowed for strategy calls on VP. 1419 * 1420 * If vp is VCHR or VBLK we dive the device, otherwise we use 1421 * the vp's mount info. 1422 * 1423 * The returned value is clamped at MAXPHYS as most callers cannot use 1424 * buffers larger than that size. 1425 */ 1426 int 1427 vmaxiosize(struct vnode *vp) 1428 { 1429 int maxiosize; 1430 1431 if (vp->v_type == VBLK || vp->v_type == VCHR) 1432 maxiosize = vp->v_rdev->si_iosize_max; 1433 else 1434 maxiosize = vp->v_mount->mnt_iosize_max; 1435 1436 if (maxiosize > MAXPHYS) 1437 maxiosize = MAXPHYS; 1438 return (maxiosize); 1439 } 1440 1441 /* 1442 * Eliminate all activity associated with a vnode in preparation for 1443 * destruction. 1444 * 1445 * The vnode must be VX locked and refd and will remain VX locked and refd 1446 * on return. This routine may be called with the vnode in any state, as 1447 * long as it is VX locked. The vnode will be cleaned out and marked 1448 * VRECLAIMED but will not actually be reused until all existing refs and 1449 * holds go away. 1450 * 1451 * NOTE: This routine may be called on a vnode which has not yet been 1452 * already been deactivated (VOP_INACTIVE), or on a vnode which has 1453 * already been reclaimed. 1454 * 1455 * This routine is not responsible for placing us back on the freelist. 1456 * Instead, it happens automatically when the caller releases the VX lock 1457 * (assuming there aren't any other references). 1458 */ 1459 void 1460 vgone_vxlocked(struct vnode *vp) 1461 { 1462 /* 1463 * assert that the VX lock is held. This is an absolute requirement 1464 * now for vgone_vxlocked() to be called. 1465 */ 1466 KKASSERT(lockcountnb(&vp->v_lock) == 1); 1467 1468 /* 1469 * Clean out the filesystem specific data and set the VRECLAIMED 1470 * bit. Also deactivate the vnode if necessary. 1471 * 1472 * The vnode should have automatically been removed from the syncer 1473 * list as syncer/dirty flags cleared during the cleaning. 1474 */ 1475 vclean_vxlocked(vp, DOCLOSE); 1476 KKASSERT((vp->v_flag & VONWORKLST) == 0); 1477 1478 /* 1479 * Delete from old mount point vnode list, if on one. 1480 */ 1481 if (vp->v_mount != NULL) { 1482 KKASSERT(vp->v_data == NULL); 1483 insmntque(vp, NULL); 1484 } 1485 1486 /* 1487 * If special device, remove it from special device alias list 1488 * if it is on one. This should normally only occur if a vnode is 1489 * being revoked as the device should otherwise have been released 1490 * naturally. 1491 */ 1492 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) { 1493 v_release_rdev(vp); 1494 } 1495 1496 /* 1497 * Set us to VBAD 1498 */ 1499 vp->v_type = VBAD; 1500 } 1501 1502 /* 1503 * Lookup a vnode by device number. 1504 * 1505 * Returns non-zero and *vpp set to a vref'd vnode on success. 1506 * Returns zero on failure. 1507 */ 1508 int 1509 vfinddev(cdev_t dev, enum vtype type, struct vnode **vpp) 1510 { 1511 struct vnode *vp; 1512 1513 lwkt_gettoken(&spechash_token); 1514 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1515 if (type == vp->v_type) { 1516 *vpp = vp; 1517 vref(vp); 1518 lwkt_reltoken(&spechash_token); 1519 return (1); 1520 } 1521 } 1522 lwkt_reltoken(&spechash_token); 1523 return (0); 1524 } 1525 1526 /* 1527 * Calculate the total number of references to a special device. This 1528 * routine may only be called for VBLK and VCHR vnodes since v_rdev is 1529 * an overloaded field. Since udev2dev can now return NULL, we have 1530 * to check for a NULL v_rdev. 1531 */ 1532 int 1533 count_dev(cdev_t dev) 1534 { 1535 struct vnode *vp; 1536 int count = 0; 1537 1538 if (SLIST_FIRST(&dev->si_hlist)) { 1539 lwkt_gettoken(&spechash_token); 1540 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1541 count += vp->v_opencount; 1542 } 1543 lwkt_reltoken(&spechash_token); 1544 } 1545 return(count); 1546 } 1547 1548 int 1549 vcount(struct vnode *vp) 1550 { 1551 if (vp->v_rdev == NULL) 1552 return(0); 1553 return(count_dev(vp->v_rdev)); 1554 } 1555 1556 /* 1557 * Initialize VMIO for a vnode. This routine MUST be called before a 1558 * VFS can issue buffer cache ops on a vnode. It is typically called 1559 * when a vnode is initialized from its inode. 1560 */ 1561 int 1562 vinitvmio(struct vnode *vp, off_t filesize, int blksize, int boff) 1563 { 1564 vm_object_t object; 1565 int error = 0; 1566 1567 object = vp->v_object; 1568 if (object) { 1569 vm_object_hold(object); 1570 KKASSERT(vp->v_object == object); 1571 } 1572 1573 if (object == NULL) { 1574 object = vnode_pager_alloc(vp, filesize, 0, 0, blksize, boff); 1575 1576 /* 1577 * Dereference the reference we just created. This assumes 1578 * that the object is associated with the vp. Allow it to 1579 * have zero refs. It cannot be destroyed as long as it 1580 * is associated with the vnode. 1581 */ 1582 vm_object_hold(object); 1583 atomic_add_int(&object->ref_count, -1); 1584 vrele(vp); 1585 } else { 1586 KKASSERT((object->flags & OBJ_DEAD) == 0); 1587 } 1588 KASSERT(vp->v_object != NULL, ("vinitvmio: NULL object")); 1589 vsetflags(vp, VOBJBUF); 1590 vm_object_drop(object); 1591 1592 return (error); 1593 } 1594 1595 1596 /* 1597 * Print out a description of a vnode. 1598 */ 1599 static char *typename[] = 1600 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1601 1602 void 1603 vprint(char *label, struct vnode *vp) 1604 { 1605 char buf[96]; 1606 1607 if (label != NULL) 1608 kprintf("%s: %p: ", label, (void *)vp); 1609 else 1610 kprintf("%p: ", (void *)vp); 1611 kprintf("type %s, refcnt %08x, writecount %d, holdcnt %d,", 1612 typename[vp->v_type], 1613 vp->v_refcnt, vp->v_writecount, vp->v_auxrefs); 1614 buf[0] = '\0'; 1615 if (vp->v_flag & VROOT) 1616 strcat(buf, "|VROOT"); 1617 if (vp->v_flag & VPFSROOT) 1618 strcat(buf, "|VPFSROOT"); 1619 if (vp->v_flag & VTEXT) 1620 strcat(buf, "|VTEXT"); 1621 if (vp->v_flag & VSYSTEM) 1622 strcat(buf, "|VSYSTEM"); 1623 if (vp->v_flag & VOBJBUF) 1624 strcat(buf, "|VOBJBUF"); 1625 if (buf[0] != '\0') 1626 kprintf(" flags (%s)", &buf[1]); 1627 if (vp->v_data == NULL) { 1628 kprintf("\n"); 1629 } else { 1630 kprintf("\n\t"); 1631 VOP_PRINT(vp); 1632 } 1633 } 1634 1635 /* 1636 * Do the usual access checking. 1637 * file_mode, uid and gid are from the vnode in question, 1638 * while acc_mode and cred are from the VOP_ACCESS parameter list 1639 */ 1640 int 1641 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1642 mode_t acc_mode, struct ucred *cred) 1643 { 1644 mode_t mask; 1645 int ismember; 1646 1647 /* 1648 * Super-user always gets read/write access, but execute access depends 1649 * on at least one execute bit being set. 1650 */ 1651 if (priv_check_cred(cred, PRIV_ROOT, 0) == 0) { 1652 if ((acc_mode & VEXEC) && type != VDIR && 1653 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1654 return (EACCES); 1655 return (0); 1656 } 1657 1658 mask = 0; 1659 1660 /* Otherwise, check the owner. */ 1661 if (cred->cr_uid == uid) { 1662 if (acc_mode & VEXEC) 1663 mask |= S_IXUSR; 1664 if (acc_mode & VREAD) 1665 mask |= S_IRUSR; 1666 if (acc_mode & VWRITE) 1667 mask |= S_IWUSR; 1668 return ((file_mode & mask) == mask ? 0 : EACCES); 1669 } 1670 1671 /* Otherwise, check the groups. */ 1672 ismember = groupmember(gid, cred); 1673 if (cred->cr_svgid == gid || ismember) { 1674 if (acc_mode & VEXEC) 1675 mask |= S_IXGRP; 1676 if (acc_mode & VREAD) 1677 mask |= S_IRGRP; 1678 if (acc_mode & VWRITE) 1679 mask |= S_IWGRP; 1680 return ((file_mode & mask) == mask ? 0 : EACCES); 1681 } 1682 1683 /* Otherwise, check everyone else. */ 1684 if (acc_mode & VEXEC) 1685 mask |= S_IXOTH; 1686 if (acc_mode & VREAD) 1687 mask |= S_IROTH; 1688 if (acc_mode & VWRITE) 1689 mask |= S_IWOTH; 1690 return ((file_mode & mask) == mask ? 0 : EACCES); 1691 } 1692 1693 #ifdef DDB 1694 #include <ddb/ddb.h> 1695 1696 static int db_show_locked_vnodes(struct mount *mp, void *data); 1697 1698 /* 1699 * List all of the locked vnodes in the system. 1700 * Called when debugging the kernel. 1701 */ 1702 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 1703 { 1704 kprintf("Locked vnodes\n"); 1705 mountlist_scan(db_show_locked_vnodes, NULL, 1706 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1707 } 1708 1709 static int 1710 db_show_locked_vnodes(struct mount *mp, void *data __unused) 1711 { 1712 struct vnode *vp; 1713 1714 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 1715 if (vn_islocked(vp)) 1716 vprint(NULL, vp); 1717 } 1718 return(0); 1719 } 1720 #endif 1721 1722 /* 1723 * Top level filesystem related information gathering. 1724 */ 1725 static int sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS); 1726 1727 static int 1728 vfs_sysctl(SYSCTL_HANDLER_ARGS) 1729 { 1730 int *name = (int *)arg1 - 1; /* XXX */ 1731 u_int namelen = arg2 + 1; /* XXX */ 1732 struct vfsconf *vfsp; 1733 int maxtypenum; 1734 1735 #if 1 || defined(COMPAT_PRELITE2) 1736 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 1737 if (namelen == 1) 1738 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 1739 #endif 1740 1741 #ifdef notyet 1742 /* all sysctl names at this level are at least name and field */ 1743 if (namelen < 2) 1744 return (ENOTDIR); /* overloaded */ 1745 if (name[0] != VFS_GENERIC) { 1746 vfsp = vfsconf_find_by_typenum(name[0]); 1747 if (vfsp == NULL) 1748 return (EOPNOTSUPP); 1749 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1750 oldp, oldlenp, newp, newlen, p)); 1751 } 1752 #endif 1753 switch (name[1]) { 1754 case VFS_MAXTYPENUM: 1755 if (namelen != 2) 1756 return (ENOTDIR); 1757 maxtypenum = vfsconf_get_maxtypenum(); 1758 return (SYSCTL_OUT(req, &maxtypenum, sizeof(maxtypenum))); 1759 case VFS_CONF: 1760 if (namelen != 3) 1761 return (ENOTDIR); /* overloaded */ 1762 vfsp = vfsconf_find_by_typenum(name[2]); 1763 if (vfsp == NULL) 1764 return (EOPNOTSUPP); 1765 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 1766 } 1767 return (EOPNOTSUPP); 1768 } 1769 1770 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 1771 "Generic filesystem"); 1772 1773 #if 1 || defined(COMPAT_PRELITE2) 1774 1775 static int 1776 sysctl_ovfs_conf_iter(struct vfsconf *vfsp, void *data) 1777 { 1778 int error; 1779 struct ovfsconf ovfs; 1780 struct sysctl_req *req = (struct sysctl_req*) data; 1781 1782 bzero(&ovfs, sizeof(ovfs)); 1783 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 1784 strcpy(ovfs.vfc_name, vfsp->vfc_name); 1785 ovfs.vfc_index = vfsp->vfc_typenum; 1786 ovfs.vfc_refcount = vfsp->vfc_refcount; 1787 ovfs.vfc_flags = vfsp->vfc_flags; 1788 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 1789 if (error) 1790 return error; /* abort iteration with error code */ 1791 else 1792 return 0; /* continue iterating with next element */ 1793 } 1794 1795 static int 1796 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 1797 { 1798 return vfsconf_each(sysctl_ovfs_conf_iter, (void*)req); 1799 } 1800 1801 #endif /* 1 || COMPAT_PRELITE2 */ 1802 1803 /* 1804 * Check to see if a filesystem is mounted on a block device. 1805 */ 1806 int 1807 vfs_mountedon(struct vnode *vp) 1808 { 1809 cdev_t dev; 1810 1811 if ((dev = vp->v_rdev) == NULL) { 1812 /* if (vp->v_type != VBLK) 1813 dev = get_dev(vp->v_uminor, vp->v_umajor); */ 1814 } 1815 if (dev != NULL && dev->si_mountpoint) 1816 return (EBUSY); 1817 return (0); 1818 } 1819 1820 /* 1821 * Unmount all filesystems. The list is traversed in reverse order 1822 * of mounting to avoid dependencies. 1823 */ 1824 1825 static int vfs_umountall_callback(struct mount *mp, void *data); 1826 1827 void 1828 vfs_unmountall(void) 1829 { 1830 int count; 1831 1832 do { 1833 count = mountlist_scan(vfs_umountall_callback, 1834 NULL, MNTSCAN_REVERSE|MNTSCAN_NOBUSY); 1835 } while (count); 1836 } 1837 1838 static 1839 int 1840 vfs_umountall_callback(struct mount *mp, void *data) 1841 { 1842 int error; 1843 1844 error = dounmount(mp, MNT_FORCE); 1845 if (error) { 1846 mountlist_remove(mp); 1847 kprintf("unmount of filesystem mounted from %s failed (", 1848 mp->mnt_stat.f_mntfromname); 1849 if (error == EBUSY) 1850 kprintf("BUSY)\n"); 1851 else 1852 kprintf("%d)\n", error); 1853 } 1854 return(1); 1855 } 1856 1857 /* 1858 * Checks the mount flags for parameter mp and put the names comma-separated 1859 * into a string buffer buf with a size limit specified by len. 1860 * 1861 * It returns the number of bytes written into buf, and (*errorp) will be 1862 * set to 0, EINVAL (if passed length is 0), or ENOSPC (supplied buffer was 1863 * not large enough). The buffer will be 0-terminated if len was not 0. 1864 */ 1865 size_t 1866 vfs_flagstostr(int flags, const struct mountctl_opt *optp, 1867 char *buf, size_t len, int *errorp) 1868 { 1869 static const struct mountctl_opt optnames[] = { 1870 { MNT_RDONLY, "read-only" }, 1871 { MNT_SYNCHRONOUS, "synchronous" }, 1872 { MNT_NOEXEC, "noexec" }, 1873 { MNT_NOSUID, "nosuid" }, 1874 { MNT_NODEV, "nodev" }, 1875 { MNT_AUTOMOUNTED, "automounted" }, 1876 { MNT_ASYNC, "asynchronous" }, 1877 { MNT_SUIDDIR, "suiddir" }, 1878 { MNT_SOFTDEP, "soft-updates" }, 1879 { MNT_NOSYMFOLLOW, "nosymfollow" }, 1880 { MNT_TRIM, "trim" }, 1881 { MNT_NOATIME, "noatime" }, 1882 { MNT_NOCLUSTERR, "noclusterr" }, 1883 { MNT_NOCLUSTERW, "noclusterw" }, 1884 { MNT_EXRDONLY, "NFS read-only" }, 1885 { MNT_EXPORTED, "NFS exported" }, 1886 /* Remaining NFS flags could come here */ 1887 { MNT_LOCAL, "local" }, 1888 { MNT_QUOTA, "with-quotas" }, 1889 /* { MNT_ROOTFS, "rootfs" }, */ 1890 /* { MNT_IGNORE, "ignore" }, */ 1891 { 0, NULL} 1892 }; 1893 int bwritten; 1894 int bleft; 1895 int optlen; 1896 int actsize; 1897 1898 *errorp = 0; 1899 bwritten = 0; 1900 bleft = len - 1; /* leave room for trailing \0 */ 1901 1902 /* 1903 * Checks the size of the string. If it contains 1904 * any data, then we will append the new flags to 1905 * it. 1906 */ 1907 actsize = strlen(buf); 1908 if (actsize > 0) 1909 buf += actsize; 1910 1911 /* Default flags if no flags passed */ 1912 if (optp == NULL) 1913 optp = optnames; 1914 1915 if (bleft < 0) { /* degenerate case, 0-length buffer */ 1916 *errorp = EINVAL; 1917 return(0); 1918 } 1919 1920 for (; flags && optp->o_opt; ++optp) { 1921 if ((flags & optp->o_opt) == 0) 1922 continue; 1923 optlen = strlen(optp->o_name); 1924 if (bwritten || actsize > 0) { 1925 if (bleft < 2) { 1926 *errorp = ENOSPC; 1927 break; 1928 } 1929 buf[bwritten++] = ','; 1930 buf[bwritten++] = ' '; 1931 bleft -= 2; 1932 } 1933 if (bleft < optlen) { 1934 *errorp = ENOSPC; 1935 break; 1936 } 1937 bcopy(optp->o_name, buf + bwritten, optlen); 1938 bwritten += optlen; 1939 bleft -= optlen; 1940 flags &= ~optp->o_opt; 1941 } 1942 1943 /* 1944 * Space already reserved for trailing \0 1945 */ 1946 buf[bwritten] = 0; 1947 return (bwritten); 1948 } 1949 1950 /* 1951 * Build hash lists of net addresses and hang them off the mount point. 1952 * Called by ufs_mount() to set up the lists of export addresses. 1953 */ 1954 static int 1955 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1956 const struct export_args *argp) 1957 { 1958 struct netcred *np; 1959 struct radix_node_head *rnh; 1960 int i; 1961 struct radix_node *rn; 1962 struct sockaddr *saddr, *smask = NULL; 1963 int error; 1964 1965 if (argp->ex_addrlen == 0) { 1966 if (mp->mnt_flag & MNT_DEFEXPORTED) 1967 return (EPERM); 1968 np = &nep->ne_defexported; 1969 np->netc_exflags = argp->ex_flags; 1970 np->netc_anon = argp->ex_anon; 1971 np->netc_anon.cr_ref = 1; 1972 mp->mnt_flag |= MNT_DEFEXPORTED; 1973 return (0); 1974 } 1975 1976 if (argp->ex_addrlen < 0 || argp->ex_addrlen > MLEN) 1977 return (EINVAL); 1978 if (argp->ex_masklen < 0 || argp->ex_masklen > MLEN) 1979 return (EINVAL); 1980 1981 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1982 np = (struct netcred *) kmalloc(i, M_NETADDR, M_WAITOK | M_ZERO); 1983 saddr = (struct sockaddr *) (np + 1); 1984 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 1985 goto out; 1986 if (saddr->sa_len > argp->ex_addrlen) 1987 saddr->sa_len = argp->ex_addrlen; 1988 if (argp->ex_masklen) { 1989 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1990 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 1991 if (error) 1992 goto out; 1993 if (smask->sa_len > argp->ex_masklen) 1994 smask->sa_len = argp->ex_masklen; 1995 } 1996 NE_LOCK(nep); 1997 if (nep->ne_maskhead == NULL) { 1998 if (!rn_inithead((void **)&nep->ne_maskhead, NULL, 0)) { 1999 error = ENOBUFS; 2000 goto out; 2001 } 2002 } 2003 if((rnh = vfs_create_addrlist_af(saddr->sa_family, nep)) == NULL) { 2004 error = ENOBUFS; 2005 goto out; 2006 } 2007 rn = (*rnh->rnh_addaddr) ((char *) saddr, (char *) smask, rnh, 2008 np->netc_rnodes); 2009 NE_UNLOCK(nep); 2010 if (rn == NULL || np != (struct netcred *) rn) { /* already exists */ 2011 error = EPERM; 2012 goto out; 2013 } 2014 np->netc_exflags = argp->ex_flags; 2015 np->netc_anon = argp->ex_anon; 2016 np->netc_anon.cr_ref = 1; 2017 return (0); 2018 out: 2019 kfree(np, M_NETADDR); 2020 return (error); 2021 } 2022 2023 /* ARGSUSED */ 2024 static int 2025 vfs_free_netcred(struct radix_node *rn, void *w) 2026 { 2027 struct radix_node_head *rnh = (struct radix_node_head *) w; 2028 2029 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 2030 kfree((caddr_t) rn, M_NETADDR); 2031 return (0); 2032 } 2033 2034 static struct radix_node_head * 2035 vfs_create_addrlist_af(int af, struct netexport *nep) 2036 { 2037 struct radix_node_head *rnh = NULL; 2038 #if defined(INET) || defined(INET6) 2039 struct radix_node_head *maskhead = nep->ne_maskhead; 2040 int off; 2041 #endif 2042 2043 NE_ASSERT_LOCKED(nep); 2044 KKASSERT(maskhead != NULL); 2045 switch (af) { 2046 #ifdef INET 2047 case AF_INET: 2048 if ((rnh = nep->ne_inethead) == NULL) { 2049 off = offsetof(struct sockaddr_in, sin_addr) << 3; 2050 if (!rn_inithead((void **)&rnh, maskhead, off)) 2051 return (NULL); 2052 nep->ne_inethead = rnh; 2053 } 2054 break; 2055 #endif 2056 #ifdef INET6 2057 case AF_INET6: 2058 if ((rnh = nep->ne_inet6head) == NULL) { 2059 off = offsetof(struct sockaddr_in6, sin6_addr) << 3; 2060 if (!rn_inithead((void **)&rnh, maskhead, off)) 2061 return (NULL); 2062 nep->ne_inet6head = rnh; 2063 } 2064 break; 2065 #endif 2066 } 2067 return (rnh); 2068 } 2069 2070 static void 2071 vfs_free_addrlist_af(struct radix_node_head **prnh) 2072 { 2073 struct radix_node_head *rnh = *prnh; 2074 2075 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, rnh); 2076 kfree(rnh, M_RTABLE); 2077 *prnh = NULL; 2078 } 2079 2080 /* 2081 * Free the net address hash lists that are hanging off the mount points. 2082 */ 2083 static void 2084 vfs_free_addrlist(struct netexport *nep) 2085 { 2086 NE_LOCK(nep); 2087 if (nep->ne_inethead != NULL) 2088 vfs_free_addrlist_af(&nep->ne_inethead); 2089 if (nep->ne_inet6head != NULL) 2090 vfs_free_addrlist_af(&nep->ne_inet6head); 2091 if (nep->ne_maskhead) 2092 vfs_free_addrlist_af(&nep->ne_maskhead); 2093 NE_UNLOCK(nep); 2094 } 2095 2096 int 2097 vfs_export(struct mount *mp, struct netexport *nep, 2098 const struct export_args *argp) 2099 { 2100 int error; 2101 2102 if (argp->ex_flags & MNT_DELEXPORT) { 2103 if (mp->mnt_flag & MNT_EXPUBLIC) { 2104 vfs_setpublicfs(NULL, NULL, NULL); 2105 mp->mnt_flag &= ~MNT_EXPUBLIC; 2106 } 2107 vfs_free_addrlist(nep); 2108 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2109 } 2110 if (argp->ex_flags & MNT_EXPORTED) { 2111 if (argp->ex_flags & MNT_EXPUBLIC) { 2112 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2113 return (error); 2114 mp->mnt_flag |= MNT_EXPUBLIC; 2115 } 2116 if ((error = vfs_hang_addrlist(mp, nep, argp))) 2117 return (error); 2118 mp->mnt_flag |= MNT_EXPORTED; 2119 } 2120 return (0); 2121 } 2122 2123 2124 /* 2125 * Set the publicly exported filesystem (WebNFS). Currently, only 2126 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2127 */ 2128 int 2129 vfs_setpublicfs(struct mount *mp, struct netexport *nep, 2130 const struct export_args *argp) 2131 { 2132 int error; 2133 struct vnode *rvp; 2134 char *cp; 2135 2136 /* 2137 * mp == NULL -> invalidate the current info, the FS is 2138 * no longer exported. May be called from either vfs_export 2139 * or unmount, so check if it hasn't already been done. 2140 */ 2141 if (mp == NULL) { 2142 if (nfs_pub.np_valid) { 2143 nfs_pub.np_valid = 0; 2144 if (nfs_pub.np_index != NULL) { 2145 kfree(nfs_pub.np_index, M_TEMP); 2146 nfs_pub.np_index = NULL; 2147 } 2148 } 2149 return (0); 2150 } 2151 2152 /* 2153 * Only one allowed at a time. 2154 */ 2155 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2156 return (EBUSY); 2157 2158 /* 2159 * Get real filehandle for root of exported FS. 2160 */ 2161 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 2162 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2163 2164 if ((error = VFS_ROOT(mp, &rvp))) 2165 return (error); 2166 2167 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2168 return (error); 2169 2170 vput(rvp); 2171 2172 /* 2173 * If an indexfile was specified, pull it in. 2174 */ 2175 if (argp->ex_indexfile != NULL) { 2176 int namelen; 2177 2178 error = vn_get_namelen(rvp, &namelen); 2179 if (error) 2180 return (error); 2181 nfs_pub.np_index = kmalloc(namelen, M_TEMP, M_WAITOK); 2182 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2183 namelen, NULL); 2184 if (!error) { 2185 /* 2186 * Check for illegal filenames. 2187 */ 2188 for (cp = nfs_pub.np_index; *cp; cp++) { 2189 if (*cp == '/') { 2190 error = EINVAL; 2191 break; 2192 } 2193 } 2194 } 2195 if (error) { 2196 kfree(nfs_pub.np_index, M_TEMP); 2197 return (error); 2198 } 2199 } 2200 2201 nfs_pub.np_mount = mp; 2202 nfs_pub.np_valid = 1; 2203 return (0); 2204 } 2205 2206 struct netcred * 2207 vfs_export_lookup(struct mount *mp, struct netexport *nep, 2208 struct sockaddr *nam) 2209 { 2210 struct netcred *np; 2211 struct radix_node_head *rnh; 2212 struct sockaddr *saddr; 2213 2214 np = NULL; 2215 if (mp->mnt_flag & MNT_EXPORTED) { 2216 /* 2217 * Lookup in the export list first. 2218 */ 2219 NE_LOCK(nep); 2220 if (nam != NULL) { 2221 saddr = nam; 2222 switch (saddr->sa_family) { 2223 #ifdef INET 2224 case AF_INET: 2225 rnh = nep->ne_inethead; 2226 break; 2227 #endif 2228 #ifdef INET6 2229 case AF_INET6: 2230 rnh = nep->ne_inet6head; 2231 break; 2232 #endif 2233 default: 2234 rnh = NULL; 2235 } 2236 if (rnh != NULL) { 2237 np = (struct netcred *) 2238 (*rnh->rnh_matchaddr)((char *)saddr, 2239 rnh); 2240 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2241 np = NULL; 2242 } 2243 } 2244 NE_UNLOCK(nep); 2245 /* 2246 * If no address match, use the default if it exists. 2247 */ 2248 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2249 np = &nep->ne_defexported; 2250 } 2251 return (np); 2252 } 2253 2254 /* 2255 * perform msync on all vnodes under a mount point. The mount point must 2256 * be locked. This code is also responsible for lazy-freeing unreferenced 2257 * vnodes whos VM objects no longer contain pages. 2258 * 2259 * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state. 2260 * 2261 * NOTE: XXX VOP_PUTPAGES and friends requires that the vnode be locked, 2262 * but vnode_pager_putpages() doesn't lock the vnode. We have to do it 2263 * way up in this high level function. 2264 */ 2265 static int vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data); 2266 static int vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data); 2267 2268 void 2269 vfs_msync(struct mount *mp, int flags) 2270 { 2271 int vmsc_flags; 2272 2273 /* 2274 * tmpfs sets this flag to prevent msync(), sync, and the 2275 * filesystem periodic syncer from trying to flush VM pages 2276 * to swap. Only pure memory pressure flushes tmpfs VM pages 2277 * to swap. 2278 */ 2279 if (mp->mnt_kern_flag & MNTK_NOMSYNC) 2280 return; 2281 2282 /* 2283 * Ok, scan the vnodes for work. If the filesystem is using the 2284 * syncer thread feature we can use vsyncscan() instead of 2285 * vmntvnodescan(), which is much faster. 2286 */ 2287 vmsc_flags = VMSC_GETVP; 2288 if (flags != MNT_WAIT) 2289 vmsc_flags |= VMSC_NOWAIT; 2290 2291 if (mp->mnt_kern_flag & MNTK_THR_SYNC) { 2292 vsyncscan(mp, vmsc_flags, vfs_msync_scan2, 2293 (void *)(intptr_t)flags); 2294 } else { 2295 vmntvnodescan(mp, vmsc_flags, 2296 vfs_msync_scan1, vfs_msync_scan2, 2297 (void *)(intptr_t)flags); 2298 } 2299 } 2300 2301 /* 2302 * scan1 is a fast pre-check. There could be hundreds of thousands of 2303 * vnodes, we cannot afford to do anything heavy weight until we have a 2304 * fairly good indication that there is work to do. 2305 */ 2306 static 2307 int 2308 vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data) 2309 { 2310 int flags = (int)(intptr_t)data; 2311 2312 if ((vp->v_flag & VRECLAIMED) == 0) { 2313 if (vp->v_auxrefs == 0 && VREFCNT(vp) <= 0 && 2314 vp->v_object) { 2315 return(0); /* call scan2 */ 2316 } 2317 if ((mp->mnt_flag & MNT_RDONLY) == 0 && 2318 (vp->v_flag & VOBJDIRTY) && 2319 (flags == MNT_WAIT || vn_islocked(vp) == 0)) { 2320 return(0); /* call scan2 */ 2321 } 2322 } 2323 2324 /* 2325 * do not call scan2, continue the loop 2326 */ 2327 return(-1); 2328 } 2329 2330 /* 2331 * This callback is handed a locked vnode. 2332 */ 2333 static 2334 int 2335 vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data) 2336 { 2337 vm_object_t obj; 2338 int flags = (int)(intptr_t)data; 2339 2340 if (vp->v_flag & VRECLAIMED) 2341 return(0); 2342 2343 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (vp->v_flag & VOBJDIRTY)) { 2344 if ((obj = vp->v_object) != NULL) { 2345 vm_object_page_clean(obj, 0, 0, 2346 flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC); 2347 } 2348 } 2349 return(0); 2350 } 2351 2352 /* 2353 * Wake up anyone interested in vp because it is being revoked. 2354 */ 2355 void 2356 vn_gone(struct vnode *vp) 2357 { 2358 lwkt_gettoken(&vp->v_token); 2359 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, NOTE_REVOKE); 2360 lwkt_reltoken(&vp->v_token); 2361 } 2362 2363 /* 2364 * extract the cdev_t from a VBLK or VCHR. The vnode must have been opened 2365 * (or v_rdev might be NULL). 2366 */ 2367 cdev_t 2368 vn_todev(struct vnode *vp) 2369 { 2370 if (vp->v_type != VBLK && vp->v_type != VCHR) 2371 return (NULL); 2372 KKASSERT(vp->v_rdev != NULL); 2373 return (vp->v_rdev); 2374 } 2375 2376 /* 2377 * Check if vnode represents a disk device. The vnode does not need to be 2378 * opened. 2379 * 2380 * MPALMOSTSAFE 2381 */ 2382 int 2383 vn_isdisk(struct vnode *vp, int *errp) 2384 { 2385 cdev_t dev; 2386 2387 if (vp->v_type != VCHR) { 2388 if (errp != NULL) 2389 *errp = ENOTBLK; 2390 return (0); 2391 } 2392 2393 dev = vp->v_rdev; 2394 2395 if (dev == NULL) { 2396 if (errp != NULL) 2397 *errp = ENXIO; 2398 return (0); 2399 } 2400 if (dev_is_good(dev) == 0) { 2401 if (errp != NULL) 2402 *errp = ENXIO; 2403 return (0); 2404 } 2405 if ((dev_dflags(dev) & D_DISK) == 0) { 2406 if (errp != NULL) 2407 *errp = ENOTBLK; 2408 return (0); 2409 } 2410 if (errp != NULL) 2411 *errp = 0; 2412 return (1); 2413 } 2414 2415 int 2416 vn_get_namelen(struct vnode *vp, int *namelen) 2417 { 2418 int error; 2419 register_t retval[2]; 2420 2421 error = VOP_PATHCONF(vp, _PC_NAME_MAX, retval); 2422 if (error) 2423 return (error); 2424 *namelen = (int)retval[0]; 2425 return (0); 2426 } 2427 2428 int 2429 vop_write_dirent(int *error, struct uio *uio, ino_t d_ino, uint8_t d_type, 2430 uint16_t d_namlen, const char *d_name) 2431 { 2432 struct dirent *dp; 2433 size_t len; 2434 2435 len = _DIRENT_RECLEN(d_namlen); 2436 if (len > uio->uio_resid) 2437 return(1); 2438 2439 dp = kmalloc(len, M_TEMP, M_WAITOK | M_ZERO); 2440 2441 dp->d_ino = d_ino; 2442 dp->d_namlen = d_namlen; 2443 dp->d_type = d_type; 2444 bcopy(d_name, dp->d_name, d_namlen); 2445 2446 *error = uiomove((caddr_t)dp, len, uio); 2447 2448 kfree(dp, M_TEMP); 2449 2450 return(0); 2451 } 2452 2453 void 2454 vn_mark_atime(struct vnode *vp, struct thread *td) 2455 { 2456 struct proc *p = td->td_proc; 2457 struct ucred *cred = p ? p->p_ucred : proc0.p_ucred; 2458 2459 if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) { 2460 VOP_MARKATIME(vp, cred); 2461 } 2462 } 2463