1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 40 * $DragonFly: src/sys/kern/vfs_subr.c,v 1.118 2008/09/17 21:44:18 dillon Exp $ 41 */ 42 43 /* 44 * External virtual filesystem routines 45 */ 46 #include "opt_ddb.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/buf.h> 51 #include <sys/conf.h> 52 #include <sys/dirent.h> 53 #include <sys/domain.h> 54 #include <sys/eventhandler.h> 55 #include <sys/fcntl.h> 56 #include <sys/file.h> 57 #include <sys/kernel.h> 58 #include <sys/kthread.h> 59 #include <sys/malloc.h> 60 #include <sys/mbuf.h> 61 #include <sys/mount.h> 62 #include <sys/proc.h> 63 #include <sys/reboot.h> 64 #include <sys/socket.h> 65 #include <sys/stat.h> 66 #include <sys/sysctl.h> 67 #include <sys/syslog.h> 68 #include <sys/unistd.h> 69 #include <sys/vmmeter.h> 70 #include <sys/vnode.h> 71 72 #include <machine/limits.h> 73 74 #include <vm/vm.h> 75 #include <vm/vm_object.h> 76 #include <vm/vm_extern.h> 77 #include <vm/vm_kern.h> 78 #include <vm/pmap.h> 79 #include <vm/vm_map.h> 80 #include <vm/vm_page.h> 81 #include <vm/vm_pager.h> 82 #include <vm/vnode_pager.h> 83 #include <vm/vm_zone.h> 84 85 #include <sys/buf2.h> 86 #include <sys/thread2.h> 87 #include <sys/sysref2.h> 88 89 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 90 91 int numvnodes; 92 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 93 int vfs_fastdev = 1; 94 SYSCTL_INT(_vfs, OID_AUTO, fastdev, CTLFLAG_RW, &vfs_fastdev, 0, ""); 95 96 enum vtype iftovt_tab[16] = { 97 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 98 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 99 }; 100 int vttoif_tab[9] = { 101 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 102 S_IFSOCK, S_IFIFO, S_IFMT, 103 }; 104 105 static int reassignbufcalls; 106 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, 107 &reassignbufcalls, 0, ""); 108 static int reassignbufloops; 109 SYSCTL_INT(_vfs, OID_AUTO, reassignbufloops, CTLFLAG_RW, 110 &reassignbufloops, 0, ""); 111 static int reassignbufsortgood; 112 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortgood, CTLFLAG_RW, 113 &reassignbufsortgood, 0, ""); 114 static int reassignbufsortbad; 115 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortbad, CTLFLAG_RW, 116 &reassignbufsortbad, 0, ""); 117 static int reassignbufmethod = 1; 118 SYSCTL_INT(_vfs, OID_AUTO, reassignbufmethod, CTLFLAG_RW, 119 &reassignbufmethod, 0, ""); 120 121 int nfs_mount_type = -1; 122 static struct lwkt_token spechash_token; 123 struct nfs_public nfs_pub; /* publicly exported FS */ 124 125 int desiredvnodes; 126 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 127 &desiredvnodes, 0, "Maximum number of vnodes"); 128 129 static void vfs_free_addrlist (struct netexport *nep); 130 static int vfs_free_netcred (struct radix_node *rn, void *w); 131 static int vfs_hang_addrlist (struct mount *mp, struct netexport *nep, 132 const struct export_args *argp); 133 134 extern int dev_ref_debug; 135 136 /* 137 * Red black tree functions 138 */ 139 static int rb_buf_compare(struct buf *b1, struct buf *b2); 140 RB_GENERATE2(buf_rb_tree, buf, b_rbnode, rb_buf_compare, off_t, b_loffset); 141 RB_GENERATE2(buf_rb_hash, buf, b_rbhash, rb_buf_compare, off_t, b_loffset); 142 143 static int 144 rb_buf_compare(struct buf *b1, struct buf *b2) 145 { 146 if (b1->b_loffset < b2->b_loffset) 147 return(-1); 148 if (b1->b_loffset > b2->b_loffset) 149 return(1); 150 return(0); 151 } 152 153 /* 154 * Returns non-zero if the vnode is a candidate for lazy msyncing. 155 */ 156 static __inline int 157 vshouldmsync(struct vnode *vp) 158 { 159 if (vp->v_auxrefs != 0 || vp->v_sysref.refcnt > 0) 160 return (0); /* other holders */ 161 if (vp->v_object && 162 (vp->v_object->ref_count || vp->v_object->resident_page_count)) { 163 return (0); 164 } 165 return (1); 166 } 167 168 /* 169 * Initialize the vnode management data structures. 170 * 171 * Called from vfsinit() 172 */ 173 void 174 vfs_subr_init(void) 175 { 176 /* 177 * Desiredvnodes is kern.maxvnodes. We want to scale it 178 * according to available system memory but we may also have 179 * to limit it based on available KVM, which is capped on 32 bit 180 * systems. 181 */ 182 desiredvnodes = min(maxproc + vmstats.v_page_count / 4, 183 KvaSize / (20 * 184 (sizeof(struct vm_object) + sizeof(struct vnode)))); 185 186 lwkt_token_init(&spechash_token); 187 } 188 189 /* 190 * Knob to control the precision of file timestamps: 191 * 192 * 0 = seconds only; nanoseconds zeroed. 193 * 1 = seconds and nanoseconds, accurate within 1/HZ. 194 * 2 = seconds and nanoseconds, truncated to microseconds. 195 * >=3 = seconds and nanoseconds, maximum precision. 196 */ 197 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 198 199 static int timestamp_precision = TSP_SEC; 200 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 201 ×tamp_precision, 0, ""); 202 203 /* 204 * Get a current timestamp. 205 */ 206 void 207 vfs_timestamp(struct timespec *tsp) 208 { 209 struct timeval tv; 210 211 switch (timestamp_precision) { 212 case TSP_SEC: 213 tsp->tv_sec = time_second; 214 tsp->tv_nsec = 0; 215 break; 216 case TSP_HZ: 217 getnanotime(tsp); 218 break; 219 case TSP_USEC: 220 microtime(&tv); 221 TIMEVAL_TO_TIMESPEC(&tv, tsp); 222 break; 223 case TSP_NSEC: 224 default: 225 nanotime(tsp); 226 break; 227 } 228 } 229 230 /* 231 * Set vnode attributes to VNOVAL 232 */ 233 void 234 vattr_null(struct vattr *vap) 235 { 236 vap->va_type = VNON; 237 vap->va_size = VNOVAL; 238 vap->va_bytes = VNOVAL; 239 vap->va_mode = VNOVAL; 240 vap->va_nlink = VNOVAL; 241 vap->va_uid = VNOVAL; 242 vap->va_gid = VNOVAL; 243 vap->va_fsid = VNOVAL; 244 vap->va_fileid = VNOVAL; 245 vap->va_blocksize = VNOVAL; 246 vap->va_rmajor = VNOVAL; 247 vap->va_rminor = VNOVAL; 248 vap->va_atime.tv_sec = VNOVAL; 249 vap->va_atime.tv_nsec = VNOVAL; 250 vap->va_mtime.tv_sec = VNOVAL; 251 vap->va_mtime.tv_nsec = VNOVAL; 252 vap->va_ctime.tv_sec = VNOVAL; 253 vap->va_ctime.tv_nsec = VNOVAL; 254 vap->va_flags = VNOVAL; 255 vap->va_gen = VNOVAL; 256 vap->va_vaflags = 0; 257 vap->va_fsmid = VNOVAL; 258 /* va_*_uuid fields are only valid if related flags are set */ 259 } 260 261 /* 262 * Flush out and invalidate all buffers associated with a vnode. 263 * 264 * vp must be locked. 265 */ 266 static int vinvalbuf_bp(struct buf *bp, void *data); 267 268 struct vinvalbuf_bp_info { 269 struct vnode *vp; 270 int slptimeo; 271 int lkflags; 272 int flags; 273 }; 274 275 void 276 vupdatefsmid(struct vnode *vp) 277 { 278 atomic_set_int(&vp->v_flag, VFSMID); 279 } 280 281 int 282 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 283 { 284 struct vinvalbuf_bp_info info; 285 int error; 286 vm_object_t object; 287 288 /* 289 * If we are being asked to save, call fsync to ensure that the inode 290 * is updated. 291 */ 292 if (flags & V_SAVE) { 293 crit_enter(); 294 while (vp->v_track_write.bk_active) { 295 vp->v_track_write.bk_waitflag = 1; 296 error = tsleep(&vp->v_track_write, slpflag, 297 "vinvlbuf", slptimeo); 298 if (error) { 299 crit_exit(); 300 return (error); 301 } 302 } 303 if (!RB_EMPTY(&vp->v_rbdirty_tree)) { 304 crit_exit(); 305 if ((error = VOP_FSYNC(vp, MNT_WAIT)) != 0) 306 return (error); 307 crit_enter(); 308 309 /* 310 * Dirty bufs may be left or generated via races 311 * in circumstances where vinvalbuf() is called on 312 * a vnode not undergoing reclamation. Only 313 * panic if we are trying to reclaim the vnode. 314 */ 315 if ((vp->v_flag & VRECLAIMED) && 316 (vp->v_track_write.bk_active > 0 || 317 !RB_EMPTY(&vp->v_rbdirty_tree))) { 318 panic("vinvalbuf: dirty bufs"); 319 } 320 } 321 crit_exit(); 322 } 323 crit_enter(); 324 info.slptimeo = slptimeo; 325 info.lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; 326 if (slpflag & PCATCH) 327 info.lkflags |= LK_PCATCH; 328 info.flags = flags; 329 info.vp = vp; 330 331 /* 332 * Flush the buffer cache until nothing is left. 333 */ 334 while (!RB_EMPTY(&vp->v_rbclean_tree) || 335 !RB_EMPTY(&vp->v_rbdirty_tree)) { 336 error = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, NULL, 337 vinvalbuf_bp, &info); 338 if (error == 0) { 339 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 340 vinvalbuf_bp, &info); 341 } 342 } 343 344 /* 345 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 346 * have write I/O in-progress but if there is a VM object then the 347 * VM object can also have read-I/O in-progress. 348 */ 349 do { 350 while (vp->v_track_write.bk_active > 0) { 351 vp->v_track_write.bk_waitflag = 1; 352 tsleep(&vp->v_track_write, 0, "vnvlbv", 0); 353 } 354 if ((object = vp->v_object) != NULL) { 355 while (object->paging_in_progress) 356 vm_object_pip_sleep(object, "vnvlbx"); 357 } 358 } while (vp->v_track_write.bk_active > 0); 359 360 crit_exit(); 361 362 /* 363 * Destroy the copy in the VM cache, too. 364 */ 365 if ((object = vp->v_object) != NULL) { 366 vm_object_page_remove(object, 0, 0, 367 (flags & V_SAVE) ? TRUE : FALSE); 368 } 369 370 if (!RB_EMPTY(&vp->v_rbdirty_tree) || !RB_EMPTY(&vp->v_rbclean_tree)) 371 panic("vinvalbuf: flush failed"); 372 if (!RB_EMPTY(&vp->v_rbhash_tree)) 373 panic("vinvalbuf: flush failed, buffers still present"); 374 return (0); 375 } 376 377 static int 378 vinvalbuf_bp(struct buf *bp, void *data) 379 { 380 struct vinvalbuf_bp_info *info = data; 381 int error; 382 383 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 384 error = BUF_TIMELOCK(bp, info->lkflags, 385 "vinvalbuf", info->slptimeo); 386 if (error == 0) { 387 BUF_UNLOCK(bp); 388 error = ENOLCK; 389 } 390 if (error == ENOLCK) 391 return(0); 392 return (-error); 393 } 394 395 KKASSERT(bp->b_vp == info->vp); 396 397 /* 398 * XXX Since there are no node locks for NFS, I 399 * believe there is a slight chance that a delayed 400 * write will occur while sleeping just above, so 401 * check for it. Note that vfs_bio_awrite expects 402 * buffers to reside on a queue, while bwrite() and 403 * brelse() do not. 404 * 405 * NOTE: NO B_LOCKED CHECK. Also no buf_checkwrite() 406 * check. This code will write out the buffer, period. 407 */ 408 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 409 (info->flags & V_SAVE)) { 410 if (bp->b_vp == info->vp) { 411 if (bp->b_flags & B_CLUSTEROK) { 412 vfs_bio_awrite(bp); 413 } else { 414 bremfree(bp); 415 bp->b_flags |= B_ASYNC; 416 bwrite(bp); 417 } 418 } else { 419 bremfree(bp); 420 bwrite(bp); 421 } 422 } else if (info->flags & V_SAVE) { 423 /* 424 * Cannot set B_NOCACHE on a clean buffer as this will 425 * destroy the VM backing store which might actually 426 * be dirty (and unsynchronized). 427 */ 428 bremfree(bp); 429 bp->b_flags |= (B_INVAL | B_RELBUF); 430 bp->b_flags &= ~B_ASYNC; 431 brelse(bp); 432 } else { 433 bremfree(bp); 434 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 435 bp->b_flags &= ~B_ASYNC; 436 brelse(bp); 437 } 438 return(0); 439 } 440 441 /* 442 * Truncate a file's buffer and pages to a specified length. This 443 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 444 * sync activity. 445 * 446 * The vnode must be locked. 447 */ 448 static int vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data); 449 static int vtruncbuf_bp_trunc(struct buf *bp, void *data); 450 static int vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data); 451 static int vtruncbuf_bp_metasync(struct buf *bp, void *data); 452 453 int 454 vtruncbuf(struct vnode *vp, off_t length, int blksize) 455 { 456 off_t truncloffset; 457 int count; 458 const char *filename; 459 460 /* 461 * Round up to the *next* block, then destroy the buffers in question. 462 * Since we are only removing some of the buffers we must rely on the 463 * scan count to determine whether a loop is necessary. 464 */ 465 if ((count = (int)(length % blksize)) != 0) 466 truncloffset = length + (blksize - count); 467 else 468 truncloffset = length; 469 470 crit_enter(); 471 do { 472 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 473 vtruncbuf_bp_trunc_cmp, 474 vtruncbuf_bp_trunc, &truncloffset); 475 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 476 vtruncbuf_bp_trunc_cmp, 477 vtruncbuf_bp_trunc, &truncloffset); 478 } while(count); 479 480 /* 481 * For safety, fsync any remaining metadata if the file is not being 482 * truncated to 0. Since the metadata does not represent the entire 483 * dirty list we have to rely on the hit count to ensure that we get 484 * all of it. 485 */ 486 if (length > 0) { 487 do { 488 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 489 vtruncbuf_bp_metasync_cmp, 490 vtruncbuf_bp_metasync, vp); 491 } while (count); 492 } 493 494 /* 495 * Clean out any left over VM backing store. 496 */ 497 crit_exit(); 498 499 vnode_pager_setsize(vp, length); 500 501 crit_enter(); 502 503 /* 504 * It is possible to have in-progress I/O from buffers that were 505 * not part of the truncation. This should not happen if we 506 * are truncating to 0-length. 507 */ 508 filename = TAILQ_FIRST(&vp->v_namecache) ? 509 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"; 510 511 while ((count = vp->v_track_write.bk_active) > 0) { 512 vp->v_track_write.bk_waitflag = 1; 513 tsleep(&vp->v_track_write, 0, "vbtrunc", 0); 514 if (length == 0) { 515 kprintf("Warning: vtruncbuf(): Had to wait for " 516 "%d buffer I/Os to finish in %s\n", 517 count, filename); 518 } 519 } 520 521 /* 522 * Make sure no buffers were instantiated while we were trying 523 * to clean out the remaining VM pages. This could occur due 524 * to busy dirty VM pages being flushed out to disk. 525 */ 526 do { 527 count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, 528 vtruncbuf_bp_trunc_cmp, 529 vtruncbuf_bp_trunc, &truncloffset); 530 count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 531 vtruncbuf_bp_trunc_cmp, 532 vtruncbuf_bp_trunc, &truncloffset); 533 if (count) { 534 kprintf("Warning: vtruncbuf(): Had to re-clean %d " 535 "left over buffers in %s\n", count, filename); 536 } 537 } while(count); 538 539 crit_exit(); 540 541 return (0); 542 } 543 544 /* 545 * The callback buffer is beyond the new file EOF and must be destroyed. 546 * Note that the compare function must conform to the RB_SCAN's requirements. 547 */ 548 static 549 int 550 vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data) 551 { 552 if (bp->b_loffset >= *(off_t *)data) 553 return(0); 554 return(-1); 555 } 556 557 static 558 int 559 vtruncbuf_bp_trunc(struct buf *bp, void *data) 560 { 561 /* 562 * Do not try to use a buffer we cannot immediately lock, but sleep 563 * anyway to prevent a livelock. The code will loop until all buffers 564 * can be acted upon. 565 */ 566 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 567 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 568 BUF_UNLOCK(bp); 569 } else { 570 bremfree(bp); 571 bp->b_flags |= (B_INVAL | B_RELBUF | B_NOCACHE); 572 bp->b_flags &= ~B_ASYNC; 573 brelse(bp); 574 } 575 return(1); 576 } 577 578 /* 579 * Fsync all meta-data after truncating a file to be non-zero. Only metadata 580 * blocks (with a negative loffset) are scanned. 581 * Note that the compare function must conform to the RB_SCAN's requirements. 582 */ 583 static int 584 vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data) 585 { 586 if (bp->b_loffset < 0) 587 return(0); 588 return(1); 589 } 590 591 static int 592 vtruncbuf_bp_metasync(struct buf *bp, void *data) 593 { 594 struct vnode *vp = data; 595 596 if (bp->b_flags & B_DELWRI) { 597 /* 598 * Do not try to use a buffer we cannot immediately lock, 599 * but sleep anyway to prevent a livelock. The code will 600 * loop until all buffers can be acted upon. 601 */ 602 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 603 if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0) 604 BUF_UNLOCK(bp); 605 } else { 606 bremfree(bp); 607 if (bp->b_vp == vp) { 608 bp->b_flags |= B_ASYNC; 609 } else { 610 bp->b_flags &= ~B_ASYNC; 611 } 612 bwrite(bp); 613 } 614 return(1); 615 } else { 616 return(0); 617 } 618 } 619 620 /* 621 * vfsync - implements a multipass fsync on a file which understands 622 * dependancies and meta-data. The passed vnode must be locked. The 623 * waitfor argument may be MNT_WAIT or MNT_NOWAIT, or MNT_LAZY. 624 * 625 * When fsyncing data asynchronously just do one consolidated pass starting 626 * with the most negative block number. This may not get all the data due 627 * to dependancies. 628 * 629 * When fsyncing data synchronously do a data pass, then a metadata pass, 630 * then do additional data+metadata passes to try to get all the data out. 631 */ 632 static int vfsync_wait_output(struct vnode *vp, 633 int (*waitoutput)(struct vnode *, struct thread *)); 634 static int vfsync_data_only_cmp(struct buf *bp, void *data); 635 static int vfsync_meta_only_cmp(struct buf *bp, void *data); 636 static int vfsync_lazy_range_cmp(struct buf *bp, void *data); 637 static int vfsync_bp(struct buf *bp, void *data); 638 639 struct vfsync_info { 640 struct vnode *vp; 641 int synchronous; 642 int syncdeps; 643 int lazycount; 644 int lazylimit; 645 int skippedbufs; 646 int (*checkdef)(struct buf *); 647 }; 648 649 int 650 vfsync(struct vnode *vp, int waitfor, int passes, 651 int (*checkdef)(struct buf *), 652 int (*waitoutput)(struct vnode *, struct thread *)) 653 { 654 struct vfsync_info info; 655 int error; 656 657 bzero(&info, sizeof(info)); 658 info.vp = vp; 659 if ((info.checkdef = checkdef) == NULL) 660 info.syncdeps = 1; 661 662 crit_enter_id("vfsync"); 663 664 switch(waitfor) { 665 case MNT_LAZY: 666 /* 667 * Lazy (filesystem syncer typ) Asynchronous plus limit the 668 * number of data (not meta) pages we try to flush to 1MB. 669 * A non-zero return means that lazy limit was reached. 670 */ 671 info.lazylimit = 1024 * 1024; 672 info.syncdeps = 1; 673 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 674 vfsync_lazy_range_cmp, vfsync_bp, &info); 675 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, 676 vfsync_meta_only_cmp, vfsync_bp, &info); 677 if (error == 0) 678 vp->v_lazyw = 0; 679 else if (!RB_EMPTY(&vp->v_rbdirty_tree)) 680 vn_syncer_add_to_worklist(vp, 1); 681 error = 0; 682 break; 683 case MNT_NOWAIT: 684 /* 685 * Asynchronous. Do a data-only pass and a meta-only pass. 686 */ 687 info.syncdeps = 1; 688 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 689 vfsync_bp, &info); 690 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_meta_only_cmp, 691 vfsync_bp, &info); 692 error = 0; 693 break; 694 default: 695 /* 696 * Synchronous. Do a data-only pass, then a meta-data+data 697 * pass, then additional integrated passes to try to get 698 * all the dependancies flushed. 699 */ 700 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp, 701 vfsync_bp, &info); 702 error = vfsync_wait_output(vp, waitoutput); 703 if (error == 0) { 704 info.skippedbufs = 0; 705 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 706 vfsync_bp, &info); 707 error = vfsync_wait_output(vp, waitoutput); 708 if (info.skippedbufs) 709 kprintf("Warning: vfsync skipped %d dirty bufs in pass2!\n", info.skippedbufs); 710 } 711 while (error == 0 && passes > 0 && 712 !RB_EMPTY(&vp->v_rbdirty_tree)) { 713 if (--passes == 0) { 714 info.synchronous = 1; 715 info.syncdeps = 1; 716 } 717 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 718 vfsync_bp, &info); 719 if (error < 0) 720 error = -error; 721 info.syncdeps = 1; 722 if (error == 0) 723 error = vfsync_wait_output(vp, waitoutput); 724 } 725 break; 726 } 727 crit_exit_id("vfsync"); 728 return(error); 729 } 730 731 static int 732 vfsync_wait_output(struct vnode *vp, int (*waitoutput)(struct vnode *, struct thread *)) 733 { 734 int error = 0; 735 736 while (vp->v_track_write.bk_active) { 737 vp->v_track_write.bk_waitflag = 1; 738 tsleep(&vp->v_track_write, 0, "fsfsn", 0); 739 } 740 if (waitoutput) 741 error = waitoutput(vp, curthread); 742 return(error); 743 } 744 745 static int 746 vfsync_data_only_cmp(struct buf *bp, void *data) 747 { 748 if (bp->b_loffset < 0) 749 return(-1); 750 return(0); 751 } 752 753 static int 754 vfsync_meta_only_cmp(struct buf *bp, void *data) 755 { 756 if (bp->b_loffset < 0) 757 return(0); 758 return(1); 759 } 760 761 static int 762 vfsync_lazy_range_cmp(struct buf *bp, void *data) 763 { 764 struct vfsync_info *info = data; 765 if (bp->b_loffset < info->vp->v_lazyw) 766 return(-1); 767 return(0); 768 } 769 770 static int 771 vfsync_bp(struct buf *bp, void *data) 772 { 773 struct vfsync_info *info = data; 774 struct vnode *vp = info->vp; 775 int error; 776 777 /* 778 * if syncdeps is not set we do not try to write buffers which have 779 * dependancies. 780 */ 781 if (!info->synchronous && info->syncdeps == 0 && info->checkdef(bp)) 782 return(0); 783 784 /* 785 * Ignore buffers that we cannot immediately lock. XXX 786 */ 787 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 788 kprintf("Warning: vfsync_bp skipping dirty buffer %p\n", bp); 789 ++info->skippedbufs; 790 return(0); 791 } 792 if ((bp->b_flags & B_DELWRI) == 0) 793 panic("vfsync_bp: buffer not dirty"); 794 if (vp != bp->b_vp) 795 panic("vfsync_bp: buffer vp mismatch"); 796 797 /* 798 * B_NEEDCOMMIT (primarily used by NFS) is a state where the buffer 799 * has been written but an additional handshake with the device 800 * is required before we can dispose of the buffer. We have no idea 801 * how to do this so we have to skip these buffers. 802 */ 803 if (bp->b_flags & B_NEEDCOMMIT) { 804 BUF_UNLOCK(bp); 805 return(0); 806 } 807 808 /* 809 * Ask bioops if it is ok to sync 810 */ 811 if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) { 812 bremfree(bp); 813 brelse(bp); 814 return(0); 815 } 816 817 if (info->synchronous) { 818 /* 819 * Synchronous flushing. An error may be returned. 820 */ 821 bremfree(bp); 822 crit_exit_id("vfsync"); 823 error = bwrite(bp); 824 crit_enter_id("vfsync"); 825 } else { 826 /* 827 * Asynchronous flushing. A negative return value simply 828 * stops the scan and is not considered an error. We use 829 * this to support limited MNT_LAZY flushes. 830 */ 831 vp->v_lazyw = bp->b_loffset; 832 if ((vp->v_flag & VOBJBUF) && (bp->b_flags & B_CLUSTEROK)) { 833 info->lazycount += vfs_bio_awrite(bp); 834 } else { 835 info->lazycount += bp->b_bufsize; 836 bremfree(bp); 837 crit_exit_id("vfsync"); 838 bawrite(bp); 839 crit_enter_id("vfsync"); 840 } 841 if (info->lazylimit && info->lazycount >= info->lazylimit) 842 error = 1; 843 else 844 error = 0; 845 } 846 return(-error); 847 } 848 849 /* 850 * Associate a buffer with a vnode. 851 */ 852 void 853 bgetvp(struct vnode *vp, struct buf *bp) 854 { 855 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 856 KKASSERT((bp->b_flags & (B_HASHED|B_DELWRI|B_VNCLEAN|B_VNDIRTY)) == 0); 857 858 vhold(vp); 859 /* 860 * Insert onto list for new vnode. 861 */ 862 crit_enter(); 863 bp->b_vp = vp; 864 bp->b_flags |= B_HASHED; 865 if (buf_rb_hash_RB_INSERT(&vp->v_rbhash_tree, bp)) 866 panic("reassignbuf: dup lblk vp %p bp %p", vp, bp); 867 868 bp->b_flags |= B_VNCLEAN; 869 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) 870 panic("reassignbuf: dup lblk/clean vp %p bp %p", vp, bp); 871 crit_exit(); 872 } 873 874 /* 875 * Disassociate a buffer from a vnode. 876 */ 877 void 878 brelvp(struct buf *bp) 879 { 880 struct vnode *vp; 881 882 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 883 884 /* 885 * Delete from old vnode list, if on one. 886 */ 887 vp = bp->b_vp; 888 crit_enter(); 889 if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN)) { 890 if (bp->b_flags & B_VNDIRTY) 891 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 892 else 893 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 894 bp->b_flags &= ~(B_VNDIRTY | B_VNCLEAN); 895 } 896 if (bp->b_flags & B_HASHED) { 897 buf_rb_hash_RB_REMOVE(&vp->v_rbhash_tree, bp); 898 bp->b_flags &= ~B_HASHED; 899 } 900 if ((vp->v_flag & VONWORKLST) && RB_EMPTY(&vp->v_rbdirty_tree)) { 901 vp->v_flag &= ~VONWORKLST; 902 LIST_REMOVE(vp, v_synclist); 903 } 904 crit_exit(); 905 bp->b_vp = NULL; 906 vdrop(vp); 907 } 908 909 /* 910 * Reassign the buffer to the proper clean/dirty list based on B_DELWRI. 911 * This routine is called when the state of the B_DELWRI bit is changed. 912 */ 913 void 914 reassignbuf(struct buf *bp) 915 { 916 struct vnode *vp = bp->b_vp; 917 int delay; 918 919 KKASSERT(vp != NULL); 920 ++reassignbufcalls; 921 922 /* 923 * B_PAGING flagged buffers cannot be reassigned because their vp 924 * is not fully linked in. 925 */ 926 if (bp->b_flags & B_PAGING) 927 panic("cannot reassign paging buffer"); 928 929 crit_enter(); 930 if (bp->b_flags & B_DELWRI) { 931 /* 932 * Move to the dirty list, add the vnode to the worklist 933 */ 934 if (bp->b_flags & B_VNCLEAN) { 935 buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp); 936 bp->b_flags &= ~B_VNCLEAN; 937 } 938 if ((bp->b_flags & B_VNDIRTY) == 0) { 939 if (buf_rb_tree_RB_INSERT(&vp->v_rbdirty_tree, bp)) { 940 panic("reassignbuf: dup lblk vp %p bp %p", 941 vp, bp); 942 } 943 bp->b_flags |= B_VNDIRTY; 944 } 945 if ((vp->v_flag & VONWORKLST) == 0) { 946 switch (vp->v_type) { 947 case VDIR: 948 delay = dirdelay; 949 break; 950 case VCHR: 951 case VBLK: 952 if (vp->v_rdev && 953 vp->v_rdev->si_mountpoint != NULL) { 954 delay = metadelay; 955 break; 956 } 957 /* fall through */ 958 default: 959 delay = filedelay; 960 } 961 vn_syncer_add_to_worklist(vp, delay); 962 } 963 } else { 964 /* 965 * Move to the clean list, remove the vnode from the worklist 966 * if no dirty blocks remain. 967 */ 968 if (bp->b_flags & B_VNDIRTY) { 969 buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); 970 bp->b_flags &= ~B_VNDIRTY; 971 } 972 if ((bp->b_flags & B_VNCLEAN) == 0) { 973 if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) { 974 panic("reassignbuf: dup lblk vp %p bp %p", 975 vp, bp); 976 } 977 bp->b_flags |= B_VNCLEAN; 978 } 979 if ((vp->v_flag & VONWORKLST) && 980 RB_EMPTY(&vp->v_rbdirty_tree)) { 981 vp->v_flag &= ~VONWORKLST; 982 LIST_REMOVE(vp, v_synclist); 983 } 984 } 985 crit_exit(); 986 } 987 988 /* 989 * Create a vnode for a block device. 990 * Used for mounting the root file system. 991 */ 992 int 993 bdevvp(cdev_t dev, struct vnode **vpp) 994 { 995 struct vnode *vp; 996 struct vnode *nvp; 997 int error; 998 999 if (dev == NULL) { 1000 *vpp = NULLVP; 1001 return (ENXIO); 1002 } 1003 error = getspecialvnode(VT_NON, NULL, &spec_vnode_vops_p, &nvp, 0, 0); 1004 if (error) { 1005 *vpp = NULLVP; 1006 return (error); 1007 } 1008 vp = nvp; 1009 vp->v_type = VCHR; 1010 vp->v_umajor = dev->si_umajor; 1011 vp->v_uminor = dev->si_uminor; 1012 vx_unlock(vp); 1013 *vpp = vp; 1014 return (0); 1015 } 1016 1017 int 1018 v_associate_rdev(struct vnode *vp, cdev_t dev) 1019 { 1020 lwkt_tokref ilock; 1021 1022 if (dev == NULL) 1023 return(ENXIO); 1024 if (dev_is_good(dev) == 0) 1025 return(ENXIO); 1026 KKASSERT(vp->v_rdev == NULL); 1027 if (dev_ref_debug) 1028 kprintf("Z1"); 1029 vp->v_rdev = reference_dev(dev); 1030 lwkt_gettoken(&ilock, &spechash_token); 1031 SLIST_INSERT_HEAD(&dev->si_hlist, vp, v_cdevnext); 1032 lwkt_reltoken(&ilock); 1033 return(0); 1034 } 1035 1036 void 1037 v_release_rdev(struct vnode *vp) 1038 { 1039 lwkt_tokref ilock; 1040 cdev_t dev; 1041 1042 if ((dev = vp->v_rdev) != NULL) { 1043 lwkt_gettoken(&ilock, &spechash_token); 1044 SLIST_REMOVE(&dev->si_hlist, vp, vnode, v_cdevnext); 1045 vp->v_rdev = NULL; 1046 release_dev(dev); 1047 lwkt_reltoken(&ilock); 1048 } 1049 } 1050 1051 /* 1052 * Add a vnode to the alias list hung off the cdev_t. We only associate 1053 * the device number with the vnode. The actual device is not associated 1054 * until the vnode is opened (usually in spec_open()), and will be 1055 * disassociated on last close. 1056 */ 1057 void 1058 addaliasu(struct vnode *nvp, int x, int y) 1059 { 1060 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1061 panic("addaliasu on non-special vnode"); 1062 nvp->v_umajor = x; 1063 nvp->v_uminor = y; 1064 } 1065 1066 /* 1067 * Simple call that a filesystem can make to try to get rid of a 1068 * vnode. It will fail if anyone is referencing the vnode (including 1069 * the caller). 1070 * 1071 * The filesystem can check whether its in-memory inode structure still 1072 * references the vp on return. 1073 */ 1074 void 1075 vclean_unlocked(struct vnode *vp) 1076 { 1077 vx_get(vp); 1078 if (sysref_isactive(&vp->v_sysref) == 0) 1079 vgone_vxlocked(vp); 1080 vx_put(vp); 1081 } 1082 1083 /* 1084 * Disassociate a vnode from its underlying filesystem. 1085 * 1086 * The vnode must be VX locked and referenced. In all normal situations 1087 * there are no active references. If vclean_vxlocked() is called while 1088 * there are active references, the vnode is being ripped out and we have 1089 * to call VOP_CLOSE() as appropriate before we can reclaim it. 1090 */ 1091 void 1092 vclean_vxlocked(struct vnode *vp, int flags) 1093 { 1094 int active; 1095 int n; 1096 vm_object_t object; 1097 1098 /* 1099 * If the vnode has already been reclaimed we have nothing to do. 1100 */ 1101 if (vp->v_flag & VRECLAIMED) 1102 return; 1103 vp->v_flag |= VRECLAIMED; 1104 1105 /* 1106 * Scrap the vfs cache 1107 */ 1108 while (cache_inval_vp(vp, 0) != 0) { 1109 kprintf("Warning: vnode %p clean/cache_resolution race detected\n", vp); 1110 tsleep(vp, 0, "vclninv", 2); 1111 } 1112 1113 /* 1114 * Check to see if the vnode is in use. If so we have to reference it 1115 * before we clean it out so that its count cannot fall to zero and 1116 * generate a race against ourselves to recycle it. 1117 */ 1118 active = sysref_isactive(&vp->v_sysref); 1119 1120 /* 1121 * Clean out any buffers associated with the vnode and destroy its 1122 * object, if it has one. 1123 */ 1124 vinvalbuf(vp, V_SAVE, 0, 0); 1125 1126 /* 1127 * If purging an active vnode (typically during a forced unmount 1128 * or reboot), it must be closed and deactivated before being 1129 * reclaimed. This isn't really all that safe, but what can 1130 * we do? XXX. 1131 * 1132 * Note that neither of these routines unlocks the vnode. 1133 */ 1134 if (active && (flags & DOCLOSE)) { 1135 while ((n = vp->v_opencount) != 0) { 1136 if (vp->v_writecount) 1137 VOP_CLOSE(vp, FWRITE|FNONBLOCK); 1138 else 1139 VOP_CLOSE(vp, FNONBLOCK); 1140 if (vp->v_opencount == n) { 1141 kprintf("Warning: unable to force-close" 1142 " vnode %p\n", vp); 1143 break; 1144 } 1145 } 1146 } 1147 1148 /* 1149 * If the vnode has not been deactivated, deactivated it. Deactivation 1150 * can create new buffers and VM pages so we have to call vinvalbuf() 1151 * again to make sure they all get flushed. 1152 * 1153 * This can occur if a file with a link count of 0 needs to be 1154 * truncated. 1155 */ 1156 if ((vp->v_flag & VINACTIVE) == 0) { 1157 vp->v_flag |= VINACTIVE; 1158 VOP_INACTIVE(vp); 1159 vinvalbuf(vp, V_SAVE, 0, 0); 1160 } 1161 1162 /* 1163 * If the vnode has an object, destroy it. 1164 */ 1165 if ((object = vp->v_object) != NULL) { 1166 if (object->ref_count == 0) { 1167 if ((object->flags & OBJ_DEAD) == 0) 1168 vm_object_terminate(object); 1169 } else { 1170 vm_pager_deallocate(object); 1171 } 1172 vp->v_flag &= ~VOBJBUF; 1173 } 1174 KKASSERT((vp->v_flag & VOBJBUF) == 0); 1175 1176 /* 1177 * Reclaim the vnode. 1178 */ 1179 if (VOP_RECLAIM(vp)) 1180 panic("vclean: cannot reclaim"); 1181 1182 /* 1183 * Done with purge, notify sleepers of the grim news. 1184 */ 1185 vp->v_ops = &dead_vnode_vops_p; 1186 vn_pollgone(vp); 1187 vp->v_tag = VT_NON; 1188 1189 /* 1190 * If we are destroying an active vnode, reactivate it now that 1191 * we have reassociated it with deadfs. This prevents the system 1192 * from crashing on the vnode due to it being unexpectedly marked 1193 * as inactive or reclaimed. 1194 */ 1195 if (active && (flags & DOCLOSE)) { 1196 vp->v_flag &= ~(VINACTIVE|VRECLAIMED); 1197 } 1198 } 1199 1200 /* 1201 * Eliminate all activity associated with the requested vnode 1202 * and with all vnodes aliased to the requested vnode. 1203 * 1204 * The vnode must be referenced but should not be locked. 1205 */ 1206 int 1207 vrevoke(struct vnode *vp, struct ucred *cred) 1208 { 1209 struct vnode *vq; 1210 lwkt_tokref ilock; 1211 cdev_t dev; 1212 int error; 1213 1214 /* 1215 * If the vnode has a device association, scrap all vnodes associated 1216 * with the device. Don't let the device disappear on us while we 1217 * are scrapping the vnodes. 1218 * 1219 * The passed vp will probably show up in the list, do not VX lock 1220 * it twice! 1221 */ 1222 if (vp->v_type != VCHR) { 1223 error = fdrevoke(vp, DTYPE_VNODE, cred); 1224 return (error); 1225 } 1226 if ((dev = vp->v_rdev) == NULL) { 1227 if ((dev = get_dev(vp->v_umajor, vp->v_uminor)) == NULL) 1228 return(0); 1229 } 1230 reference_dev(dev); 1231 lwkt_gettoken(&ilock, &spechash_token); 1232 while ((vq = SLIST_FIRST(&dev->si_hlist)) != NULL) { 1233 vref(vq); 1234 fdrevoke(vq, DTYPE_VNODE, cred); 1235 v_release_rdev(vq); 1236 vrele(vq); 1237 } 1238 lwkt_reltoken(&ilock); 1239 release_dev(dev); 1240 return (0); 1241 } 1242 1243 /* 1244 * This is called when the object underlying a vnode is being destroyed, 1245 * such as in a remove(). Try to recycle the vnode immediately if the 1246 * only active reference is our reference. 1247 * 1248 * Directory vnodes in the namecache with children cannot be immediately 1249 * recycled because numerous VOP_N*() ops require them to be stable. 1250 */ 1251 int 1252 vrecycle(struct vnode *vp) 1253 { 1254 if (vp->v_sysref.refcnt <= 1) { 1255 if (cache_inval_vp_nonblock(vp)) 1256 return(0); 1257 vgone_vxlocked(vp); 1258 return (1); 1259 } 1260 return (0); 1261 } 1262 1263 /* 1264 * Return the maximum I/O size allowed for strategy calls on VP. 1265 * 1266 * If vp is VCHR or VBLK we dive the device, otherwise we use 1267 * the vp's mount info. 1268 */ 1269 int 1270 vmaxiosize(struct vnode *vp) 1271 { 1272 if (vp->v_type == VBLK || vp->v_type == VCHR) { 1273 return(vp->v_rdev->si_iosize_max); 1274 } else { 1275 return(vp->v_mount->mnt_iosize_max); 1276 } 1277 } 1278 1279 /* 1280 * Eliminate all activity associated with a vnode in preparation for reuse. 1281 * 1282 * The vnode must be VX locked and refd and will remain VX locked and refd 1283 * on return. This routine may be called with the vnode in any state, as 1284 * long as it is VX locked. The vnode will be cleaned out and marked 1285 * VRECLAIMED but will not actually be reused until all existing refs and 1286 * holds go away. 1287 * 1288 * NOTE: This routine may be called on a vnode which has not yet been 1289 * already been deactivated (VOP_INACTIVE), or on a vnode which has 1290 * already been reclaimed. 1291 * 1292 * This routine is not responsible for placing us back on the freelist. 1293 * Instead, it happens automatically when the caller releases the VX lock 1294 * (assuming there aren't any other references). 1295 */ 1296 1297 void 1298 vgone_vxlocked(struct vnode *vp) 1299 { 1300 /* 1301 * assert that the VX lock is held. This is an absolute requirement 1302 * now for vgone_vxlocked() to be called. 1303 */ 1304 KKASSERT(vp->v_lock.lk_exclusivecount == 1); 1305 1306 /* 1307 * Clean out the filesystem specific data and set the VRECLAIMED 1308 * bit. Also deactivate the vnode if necessary. 1309 */ 1310 vclean_vxlocked(vp, DOCLOSE); 1311 1312 /* 1313 * Delete from old mount point vnode list, if on one. 1314 */ 1315 if (vp->v_mount != NULL) 1316 insmntque(vp, NULL); 1317 1318 /* 1319 * If special device, remove it from special device alias list 1320 * if it is on one. This should normally only occur if a vnode is 1321 * being revoked as the device should otherwise have been released 1322 * naturally. 1323 */ 1324 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) { 1325 v_release_rdev(vp); 1326 } 1327 1328 /* 1329 * Set us to VBAD 1330 */ 1331 vp->v_type = VBAD; 1332 } 1333 1334 /* 1335 * Lookup a vnode by device number. 1336 * 1337 * Returns non-zero and *vpp set to a vref'd vnode on success. 1338 * Returns zero on failure. 1339 */ 1340 int 1341 vfinddev(cdev_t dev, enum vtype type, struct vnode **vpp) 1342 { 1343 lwkt_tokref ilock; 1344 struct vnode *vp; 1345 1346 lwkt_gettoken(&ilock, &spechash_token); 1347 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1348 if (type == vp->v_type) { 1349 *vpp = vp; 1350 vref(vp); 1351 lwkt_reltoken(&ilock); 1352 return (1); 1353 } 1354 } 1355 lwkt_reltoken(&ilock); 1356 return (0); 1357 } 1358 1359 /* 1360 * Calculate the total number of references to a special device. This 1361 * routine may only be called for VBLK and VCHR vnodes since v_rdev is 1362 * an overloaded field. Since udev2dev can now return NULL, we have 1363 * to check for a NULL v_rdev. 1364 */ 1365 int 1366 count_dev(cdev_t dev) 1367 { 1368 lwkt_tokref ilock; 1369 struct vnode *vp; 1370 int count = 0; 1371 1372 if (SLIST_FIRST(&dev->si_hlist)) { 1373 lwkt_gettoken(&ilock, &spechash_token); 1374 SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { 1375 if (vp->v_sysref.refcnt > 0) 1376 count += vp->v_sysref.refcnt; 1377 } 1378 lwkt_reltoken(&ilock); 1379 } 1380 return(count); 1381 } 1382 1383 int 1384 count_udev(int x, int y) 1385 { 1386 cdev_t dev; 1387 1388 if ((dev = get_dev(x, y)) == NULL) 1389 return(0); 1390 return(count_dev(dev)); 1391 } 1392 1393 int 1394 vcount(struct vnode *vp) 1395 { 1396 if (vp->v_rdev == NULL) 1397 return(0); 1398 return(count_dev(vp->v_rdev)); 1399 } 1400 1401 /* 1402 * Initialize VMIO for a vnode. This routine MUST be called before a 1403 * VFS can issue buffer cache ops on a vnode. It is typically called 1404 * when a vnode is initialized from its inode. 1405 */ 1406 int 1407 vinitvmio(struct vnode *vp, off_t filesize) 1408 { 1409 vm_object_t object; 1410 int error = 0; 1411 1412 retry: 1413 if ((object = vp->v_object) == NULL) { 1414 object = vnode_pager_alloc(vp, filesize, 0, 0); 1415 /* 1416 * Dereference the reference we just created. This assumes 1417 * that the object is associated with the vp. 1418 */ 1419 object->ref_count--; 1420 vrele(vp); 1421 } else { 1422 if (object->flags & OBJ_DEAD) { 1423 vn_unlock(vp); 1424 vm_object_dead_sleep(object, "vodead"); 1425 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1426 goto retry; 1427 } 1428 } 1429 KASSERT(vp->v_object != NULL, ("vinitvmio: NULL object")); 1430 vp->v_flag |= VOBJBUF; 1431 return (error); 1432 } 1433 1434 1435 /* 1436 * Print out a description of a vnode. 1437 */ 1438 static char *typename[] = 1439 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1440 1441 void 1442 vprint(char *label, struct vnode *vp) 1443 { 1444 char buf[96]; 1445 1446 if (label != NULL) 1447 kprintf("%s: %p: ", label, (void *)vp); 1448 else 1449 kprintf("%p: ", (void *)vp); 1450 kprintf("type %s, sysrefs %d, writecount %d, holdcnt %d,", 1451 typename[vp->v_type], 1452 vp->v_sysref.refcnt, vp->v_writecount, vp->v_auxrefs); 1453 buf[0] = '\0'; 1454 if (vp->v_flag & VROOT) 1455 strcat(buf, "|VROOT"); 1456 if (vp->v_flag & VPFSROOT) 1457 strcat(buf, "|VPFSROOT"); 1458 if (vp->v_flag & VTEXT) 1459 strcat(buf, "|VTEXT"); 1460 if (vp->v_flag & VSYSTEM) 1461 strcat(buf, "|VSYSTEM"); 1462 if (vp->v_flag & VFREE) 1463 strcat(buf, "|VFREE"); 1464 if (vp->v_flag & VOBJBUF) 1465 strcat(buf, "|VOBJBUF"); 1466 if (buf[0] != '\0') 1467 kprintf(" flags (%s)", &buf[1]); 1468 if (vp->v_data == NULL) { 1469 kprintf("\n"); 1470 } else { 1471 kprintf("\n\t"); 1472 VOP_PRINT(vp); 1473 } 1474 } 1475 1476 #ifdef DDB 1477 #include <ddb/ddb.h> 1478 1479 static int db_show_locked_vnodes(struct mount *mp, void *data); 1480 1481 /* 1482 * List all of the locked vnodes in the system. 1483 * Called when debugging the kernel. 1484 */ 1485 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 1486 { 1487 kprintf("Locked vnodes\n"); 1488 mountlist_scan(db_show_locked_vnodes, NULL, 1489 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1490 } 1491 1492 static int 1493 db_show_locked_vnodes(struct mount *mp, void *data __unused) 1494 { 1495 struct vnode *vp; 1496 1497 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 1498 if (vn_islocked(vp)) 1499 vprint(NULL, vp); 1500 } 1501 return(0); 1502 } 1503 #endif 1504 1505 /* 1506 * Top level filesystem related information gathering. 1507 */ 1508 static int sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS); 1509 1510 static int 1511 vfs_sysctl(SYSCTL_HANDLER_ARGS) 1512 { 1513 int *name = (int *)arg1 - 1; /* XXX */ 1514 u_int namelen = arg2 + 1; /* XXX */ 1515 struct vfsconf *vfsp; 1516 int maxtypenum; 1517 1518 #if 1 || defined(COMPAT_PRELITE2) 1519 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 1520 if (namelen == 1) 1521 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 1522 #endif 1523 1524 #ifdef notyet 1525 /* all sysctl names at this level are at least name and field */ 1526 if (namelen < 2) 1527 return (ENOTDIR); /* overloaded */ 1528 if (name[0] != VFS_GENERIC) { 1529 vfsp = vfsconf_find_by_typenum(name[0]); 1530 if (vfsp == NULL) 1531 return (EOPNOTSUPP); 1532 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1533 oldp, oldlenp, newp, newlen, p)); 1534 } 1535 #endif 1536 switch (name[1]) { 1537 case VFS_MAXTYPENUM: 1538 if (namelen != 2) 1539 return (ENOTDIR); 1540 maxtypenum = vfsconf_get_maxtypenum(); 1541 return (SYSCTL_OUT(req, &maxtypenum, sizeof(maxtypenum))); 1542 case VFS_CONF: 1543 if (namelen != 3) 1544 return (ENOTDIR); /* overloaded */ 1545 vfsp = vfsconf_find_by_typenum(name[2]); 1546 if (vfsp == NULL) 1547 return (EOPNOTSUPP); 1548 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 1549 } 1550 return (EOPNOTSUPP); 1551 } 1552 1553 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 1554 "Generic filesystem"); 1555 1556 #if 1 || defined(COMPAT_PRELITE2) 1557 1558 static int 1559 sysctl_ovfs_conf_iter(struct vfsconf *vfsp, void *data) 1560 { 1561 int error; 1562 struct ovfsconf ovfs; 1563 struct sysctl_req *req = (struct sysctl_req*) data; 1564 1565 bzero(&ovfs, sizeof(ovfs)); 1566 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 1567 strcpy(ovfs.vfc_name, vfsp->vfc_name); 1568 ovfs.vfc_index = vfsp->vfc_typenum; 1569 ovfs.vfc_refcount = vfsp->vfc_refcount; 1570 ovfs.vfc_flags = vfsp->vfc_flags; 1571 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 1572 if (error) 1573 return error; /* abort iteration with error code */ 1574 else 1575 return 0; /* continue iterating with next element */ 1576 } 1577 1578 static int 1579 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 1580 { 1581 return vfsconf_each(sysctl_ovfs_conf_iter, (void*)req); 1582 } 1583 1584 #endif /* 1 || COMPAT_PRELITE2 */ 1585 1586 /* 1587 * Check to see if a filesystem is mounted on a block device. 1588 */ 1589 int 1590 vfs_mountedon(struct vnode *vp) 1591 { 1592 cdev_t dev; 1593 1594 if ((dev = vp->v_rdev) == NULL) { 1595 if (vp->v_type != VBLK) 1596 dev = get_dev(vp->v_uminor, vp->v_umajor); 1597 } 1598 if (dev != NULL && dev->si_mountpoint) 1599 return (EBUSY); 1600 return (0); 1601 } 1602 1603 /* 1604 * Unmount all filesystems. The list is traversed in reverse order 1605 * of mounting to avoid dependencies. 1606 */ 1607 1608 static int vfs_umountall_callback(struct mount *mp, void *data); 1609 1610 void 1611 vfs_unmountall(void) 1612 { 1613 int count; 1614 1615 do { 1616 count = mountlist_scan(vfs_umountall_callback, 1617 NULL, MNTSCAN_REVERSE|MNTSCAN_NOBUSY); 1618 } while (count); 1619 } 1620 1621 static 1622 int 1623 vfs_umountall_callback(struct mount *mp, void *data) 1624 { 1625 int error; 1626 1627 error = dounmount(mp, MNT_FORCE); 1628 if (error) { 1629 mountlist_remove(mp); 1630 kprintf("unmount of filesystem mounted from %s failed (", 1631 mp->mnt_stat.f_mntfromname); 1632 if (error == EBUSY) 1633 kprintf("BUSY)\n"); 1634 else 1635 kprintf("%d)\n", error); 1636 } 1637 return(1); 1638 } 1639 1640 /* 1641 * Build hash lists of net addresses and hang them off the mount point. 1642 * Called by ufs_mount() to set up the lists of export addresses. 1643 */ 1644 static int 1645 vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1646 const struct export_args *argp) 1647 { 1648 struct netcred *np; 1649 struct radix_node_head *rnh; 1650 int i; 1651 struct radix_node *rn; 1652 struct sockaddr *saddr, *smask = 0; 1653 struct domain *dom; 1654 int error; 1655 1656 if (argp->ex_addrlen == 0) { 1657 if (mp->mnt_flag & MNT_DEFEXPORTED) 1658 return (EPERM); 1659 np = &nep->ne_defexported; 1660 np->netc_exflags = argp->ex_flags; 1661 np->netc_anon = argp->ex_anon; 1662 np->netc_anon.cr_ref = 1; 1663 mp->mnt_flag |= MNT_DEFEXPORTED; 1664 return (0); 1665 } 1666 1667 if (argp->ex_addrlen < 0 || argp->ex_addrlen > MLEN) 1668 return (EINVAL); 1669 if (argp->ex_masklen < 0 || argp->ex_masklen > MLEN) 1670 return (EINVAL); 1671 1672 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1673 np = (struct netcred *) kmalloc(i, M_NETADDR, M_WAITOK | M_ZERO); 1674 saddr = (struct sockaddr *) (np + 1); 1675 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 1676 goto out; 1677 if (saddr->sa_len > argp->ex_addrlen) 1678 saddr->sa_len = argp->ex_addrlen; 1679 if (argp->ex_masklen) { 1680 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1681 error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen); 1682 if (error) 1683 goto out; 1684 if (smask->sa_len > argp->ex_masklen) 1685 smask->sa_len = argp->ex_masklen; 1686 } 1687 i = saddr->sa_family; 1688 if ((rnh = nep->ne_rtable[i]) == 0) { 1689 /* 1690 * Seems silly to initialize every AF when most are not used, 1691 * do so on demand here 1692 */ 1693 SLIST_FOREACH(dom, &domains, dom_next) 1694 if (dom->dom_family == i && dom->dom_rtattach) { 1695 dom->dom_rtattach((void **) &nep->ne_rtable[i], 1696 dom->dom_rtoffset); 1697 break; 1698 } 1699 if ((rnh = nep->ne_rtable[i]) == 0) { 1700 error = ENOBUFS; 1701 goto out; 1702 } 1703 } 1704 rn = (*rnh->rnh_addaddr) ((char *) saddr, (char *) smask, rnh, 1705 np->netc_rnodes); 1706 if (rn == 0 || np != (struct netcred *) rn) { /* already exists */ 1707 error = EPERM; 1708 goto out; 1709 } 1710 np->netc_exflags = argp->ex_flags; 1711 np->netc_anon = argp->ex_anon; 1712 np->netc_anon.cr_ref = 1; 1713 return (0); 1714 out: 1715 kfree(np, M_NETADDR); 1716 return (error); 1717 } 1718 1719 /* ARGSUSED */ 1720 static int 1721 vfs_free_netcred(struct radix_node *rn, void *w) 1722 { 1723 struct radix_node_head *rnh = (struct radix_node_head *) w; 1724 1725 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 1726 kfree((caddr_t) rn, M_NETADDR); 1727 return (0); 1728 } 1729 1730 /* 1731 * Free the net address hash lists that are hanging off the mount points. 1732 */ 1733 static void 1734 vfs_free_addrlist(struct netexport *nep) 1735 { 1736 int i; 1737 struct radix_node_head *rnh; 1738 1739 for (i = 0; i <= AF_MAX; i++) 1740 if ((rnh = nep->ne_rtable[i])) { 1741 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 1742 (caddr_t) rnh); 1743 kfree((caddr_t) rnh, M_RTABLE); 1744 nep->ne_rtable[i] = 0; 1745 } 1746 } 1747 1748 int 1749 vfs_export(struct mount *mp, struct netexport *nep, 1750 const struct export_args *argp) 1751 { 1752 int error; 1753 1754 if (argp->ex_flags & MNT_DELEXPORT) { 1755 if (mp->mnt_flag & MNT_EXPUBLIC) { 1756 vfs_setpublicfs(NULL, NULL, NULL); 1757 mp->mnt_flag &= ~MNT_EXPUBLIC; 1758 } 1759 vfs_free_addrlist(nep); 1760 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 1761 } 1762 if (argp->ex_flags & MNT_EXPORTED) { 1763 if (argp->ex_flags & MNT_EXPUBLIC) { 1764 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 1765 return (error); 1766 mp->mnt_flag |= MNT_EXPUBLIC; 1767 } 1768 if ((error = vfs_hang_addrlist(mp, nep, argp))) 1769 return (error); 1770 mp->mnt_flag |= MNT_EXPORTED; 1771 } 1772 return (0); 1773 } 1774 1775 1776 /* 1777 * Set the publicly exported filesystem (WebNFS). Currently, only 1778 * one public filesystem is possible in the spec (RFC 2054 and 2055) 1779 */ 1780 int 1781 vfs_setpublicfs(struct mount *mp, struct netexport *nep, 1782 const struct export_args *argp) 1783 { 1784 int error; 1785 struct vnode *rvp; 1786 char *cp; 1787 1788 /* 1789 * mp == NULL -> invalidate the current info, the FS is 1790 * no longer exported. May be called from either vfs_export 1791 * or unmount, so check if it hasn't already been done. 1792 */ 1793 if (mp == NULL) { 1794 if (nfs_pub.np_valid) { 1795 nfs_pub.np_valid = 0; 1796 if (nfs_pub.np_index != NULL) { 1797 FREE(nfs_pub.np_index, M_TEMP); 1798 nfs_pub.np_index = NULL; 1799 } 1800 } 1801 return (0); 1802 } 1803 1804 /* 1805 * Only one allowed at a time. 1806 */ 1807 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 1808 return (EBUSY); 1809 1810 /* 1811 * Get real filehandle for root of exported FS. 1812 */ 1813 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 1814 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 1815 1816 if ((error = VFS_ROOT(mp, &rvp))) 1817 return (error); 1818 1819 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 1820 return (error); 1821 1822 vput(rvp); 1823 1824 /* 1825 * If an indexfile was specified, pull it in. 1826 */ 1827 if (argp->ex_indexfile != NULL) { 1828 int namelen; 1829 1830 error = vn_get_namelen(rvp, &namelen); 1831 if (error) 1832 return (error); 1833 MALLOC(nfs_pub.np_index, char *, namelen, M_TEMP, 1834 M_WAITOK); 1835 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 1836 namelen, NULL); 1837 if (!error) { 1838 /* 1839 * Check for illegal filenames. 1840 */ 1841 for (cp = nfs_pub.np_index; *cp; cp++) { 1842 if (*cp == '/') { 1843 error = EINVAL; 1844 break; 1845 } 1846 } 1847 } 1848 if (error) { 1849 FREE(nfs_pub.np_index, M_TEMP); 1850 return (error); 1851 } 1852 } 1853 1854 nfs_pub.np_mount = mp; 1855 nfs_pub.np_valid = 1; 1856 return (0); 1857 } 1858 1859 struct netcred * 1860 vfs_export_lookup(struct mount *mp, struct netexport *nep, 1861 struct sockaddr *nam) 1862 { 1863 struct netcred *np; 1864 struct radix_node_head *rnh; 1865 struct sockaddr *saddr; 1866 1867 np = NULL; 1868 if (mp->mnt_flag & MNT_EXPORTED) { 1869 /* 1870 * Lookup in the export list first. 1871 */ 1872 if (nam != NULL) { 1873 saddr = nam; 1874 rnh = nep->ne_rtable[saddr->sa_family]; 1875 if (rnh != NULL) { 1876 np = (struct netcred *) 1877 (*rnh->rnh_matchaddr)((char *)saddr, 1878 rnh); 1879 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 1880 np = NULL; 1881 } 1882 } 1883 /* 1884 * If no address match, use the default if it exists. 1885 */ 1886 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 1887 np = &nep->ne_defexported; 1888 } 1889 return (np); 1890 } 1891 1892 /* 1893 * perform msync on all vnodes under a mount point. The mount point must 1894 * be locked. This code is also responsible for lazy-freeing unreferenced 1895 * vnodes whos VM objects no longer contain pages. 1896 * 1897 * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state. 1898 * 1899 * NOTE: XXX VOP_PUTPAGES and friends requires that the vnode be locked, 1900 * but vnode_pager_putpages() doesn't lock the vnode. We have to do it 1901 * way up in this high level function. 1902 */ 1903 static int vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data); 1904 static int vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data); 1905 1906 void 1907 vfs_msync(struct mount *mp, int flags) 1908 { 1909 int vmsc_flags; 1910 1911 vmsc_flags = VMSC_GETVP; 1912 if (flags != MNT_WAIT) 1913 vmsc_flags |= VMSC_NOWAIT; 1914 vmntvnodescan(mp, vmsc_flags, vfs_msync_scan1, vfs_msync_scan2, 1915 (void *)flags); 1916 } 1917 1918 /* 1919 * scan1 is a fast pre-check. There could be hundreds of thousands of 1920 * vnodes, we cannot afford to do anything heavy weight until we have a 1921 * fairly good indication that there is work to do. 1922 */ 1923 static 1924 int 1925 vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data) 1926 { 1927 int flags = (int)data; 1928 1929 if ((vp->v_flag & VRECLAIMED) == 0) { 1930 if (vshouldmsync(vp)) 1931 return(0); /* call scan2 */ 1932 if ((mp->mnt_flag & MNT_RDONLY) == 0 && 1933 (vp->v_flag & VOBJDIRTY) && 1934 (flags == MNT_WAIT || vn_islocked(vp) == 0)) { 1935 return(0); /* call scan2 */ 1936 } 1937 } 1938 1939 /* 1940 * do not call scan2, continue the loop 1941 */ 1942 return(-1); 1943 } 1944 1945 /* 1946 * This callback is handed a locked vnode. 1947 */ 1948 static 1949 int 1950 vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data) 1951 { 1952 vm_object_t obj; 1953 int flags = (int)data; 1954 1955 if (vp->v_flag & VRECLAIMED) 1956 return(0); 1957 1958 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (vp->v_flag & VOBJDIRTY)) { 1959 if ((obj = vp->v_object) != NULL) { 1960 vm_object_page_clean(obj, 0, 0, 1961 flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC); 1962 } 1963 } 1964 return(0); 1965 } 1966 1967 /* 1968 * Record a process's interest in events which might happen to 1969 * a vnode. Because poll uses the historic select-style interface 1970 * internally, this routine serves as both the ``check for any 1971 * pending events'' and the ``record my interest in future events'' 1972 * functions. (These are done together, while the lock is held, 1973 * to avoid race conditions.) 1974 */ 1975 int 1976 vn_pollrecord(struct vnode *vp, int events) 1977 { 1978 lwkt_tokref ilock; 1979 1980 KKASSERT(curthread->td_proc != NULL); 1981 1982 lwkt_gettoken(&ilock, &vp->v_pollinfo.vpi_token); 1983 if (vp->v_pollinfo.vpi_revents & events) { 1984 /* 1985 * This leaves events we are not interested 1986 * in available for the other process which 1987 * which presumably had requested them 1988 * (otherwise they would never have been 1989 * recorded). 1990 */ 1991 events &= vp->v_pollinfo.vpi_revents; 1992 vp->v_pollinfo.vpi_revents &= ~events; 1993 1994 lwkt_reltoken(&ilock); 1995 return events; 1996 } 1997 vp->v_pollinfo.vpi_events |= events; 1998 selrecord(curthread, &vp->v_pollinfo.vpi_selinfo); 1999 lwkt_reltoken(&ilock); 2000 return 0; 2001 } 2002 2003 /* 2004 * Note the occurrence of an event. If the VN_POLLEVENT macro is used, 2005 * it is possible for us to miss an event due to race conditions, but 2006 * that condition is expected to be rare, so for the moment it is the 2007 * preferred interface. 2008 */ 2009 void 2010 vn_pollevent(struct vnode *vp, int events) 2011 { 2012 lwkt_tokref ilock; 2013 2014 lwkt_gettoken(&ilock, &vp->v_pollinfo.vpi_token); 2015 if (vp->v_pollinfo.vpi_events & events) { 2016 /* 2017 * We clear vpi_events so that we don't 2018 * call selwakeup() twice if two events are 2019 * posted before the polling process(es) is 2020 * awakened. This also ensures that we take at 2021 * most one selwakeup() if the polling process 2022 * is no longer interested. However, it does 2023 * mean that only one event can be noticed at 2024 * a time. (Perhaps we should only clear those 2025 * event bits which we note?) XXX 2026 */ 2027 vp->v_pollinfo.vpi_events = 0; /* &= ~events ??? */ 2028 vp->v_pollinfo.vpi_revents |= events; 2029 selwakeup(&vp->v_pollinfo.vpi_selinfo); 2030 } 2031 lwkt_reltoken(&ilock); 2032 } 2033 2034 /* 2035 * Wake up anyone polling on vp because it is being revoked. 2036 * This depends on dead_poll() returning POLLHUP for correct 2037 * behavior. 2038 */ 2039 void 2040 vn_pollgone(struct vnode *vp) 2041 { 2042 lwkt_tokref ilock; 2043 2044 lwkt_gettoken(&ilock, &vp->v_pollinfo.vpi_token); 2045 if (vp->v_pollinfo.vpi_events) { 2046 vp->v_pollinfo.vpi_events = 0; 2047 selwakeup(&vp->v_pollinfo.vpi_selinfo); 2048 } 2049 lwkt_reltoken(&ilock); 2050 } 2051 2052 /* 2053 * extract the cdev_t from a VBLK or VCHR. The vnode must have been opened 2054 * (or v_rdev might be NULL). 2055 */ 2056 cdev_t 2057 vn_todev(struct vnode *vp) 2058 { 2059 if (vp->v_type != VBLK && vp->v_type != VCHR) 2060 return (NULL); 2061 KKASSERT(vp->v_rdev != NULL); 2062 return (vp->v_rdev); 2063 } 2064 2065 /* 2066 * Check if vnode represents a disk device. The vnode does not need to be 2067 * opened. 2068 */ 2069 int 2070 vn_isdisk(struct vnode *vp, int *errp) 2071 { 2072 cdev_t dev; 2073 2074 if (vp->v_type != VCHR) { 2075 if (errp != NULL) 2076 *errp = ENOTBLK; 2077 return (0); 2078 } 2079 2080 if ((dev = vp->v_rdev) == NULL) 2081 dev = get_dev(vp->v_umajor, vp->v_uminor); 2082 2083 if (dev == NULL) { 2084 if (errp != NULL) 2085 *errp = ENXIO; 2086 return (0); 2087 } 2088 if (dev_is_good(dev) == 0) { 2089 if (errp != NULL) 2090 *errp = ENXIO; 2091 return (0); 2092 } 2093 if ((dev_dflags(dev) & D_DISK) == 0) { 2094 if (errp != NULL) 2095 *errp = ENOTBLK; 2096 return (0); 2097 } 2098 if (errp != NULL) 2099 *errp = 0; 2100 return (1); 2101 } 2102 2103 int 2104 vn_get_namelen(struct vnode *vp, int *namelen) 2105 { 2106 int error, retval[2]; 2107 2108 error = VOP_PATHCONF(vp, _PC_NAME_MAX, retval); 2109 if (error) 2110 return (error); 2111 *namelen = *retval; 2112 return (0); 2113 } 2114 2115 int 2116 vop_write_dirent(int *error, struct uio *uio, ino_t d_ino, uint8_t d_type, 2117 uint16_t d_namlen, const char *d_name) 2118 { 2119 struct dirent *dp; 2120 size_t len; 2121 2122 len = _DIRENT_RECLEN(d_namlen); 2123 if (len > uio->uio_resid) 2124 return(1); 2125 2126 dp = kmalloc(len, M_TEMP, M_WAITOK | M_ZERO); 2127 2128 dp->d_ino = d_ino; 2129 dp->d_namlen = d_namlen; 2130 dp->d_type = d_type; 2131 bcopy(d_name, dp->d_name, d_namlen); 2132 2133 *error = uiomove((caddr_t)dp, len, uio); 2134 2135 kfree(dp, M_TEMP); 2136 2137 return(0); 2138 } 2139 2140 void 2141 vn_mark_atime(struct vnode *vp, struct thread *td) 2142 { 2143 struct proc *p = td->td_proc; 2144 struct ucred *cred = p ? p->p_ucred : proc0.p_ucred; 2145 2146 if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) { 2147 VOP_MARKATIME(vp, cred); 2148 } 2149 } 2150