xref: /dragonfly/sys/kern/vfs_subr.c (revision e6e77800)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
35  * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $
36  */
37 
38 /*
39  * External virtual filesystem routines
40  */
41 #include "opt_ddb.h"
42 #include "opt_inet.h"
43 #include "opt_inet6.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/buf.h>
48 #include <sys/conf.h>
49 #include <sys/dirent.h>
50 #include <sys/eventhandler.h>
51 #include <sys/fcntl.h>
52 #include <sys/file.h>
53 #include <sys/kernel.h>
54 #include <sys/kthread.h>
55 #include <sys/malloc.h>
56 #include <sys/mbuf.h>
57 #include <sys/mount.h>
58 #include <sys/priv.h>
59 #include <sys/proc.h>
60 #include <sys/reboot.h>
61 #include <sys/socket.h>
62 #include <sys/stat.h>
63 #include <sys/sysctl.h>
64 #include <sys/syslog.h>
65 #include <sys/unistd.h>
66 #include <sys/vmmeter.h>
67 #include <sys/vnode.h>
68 
69 #include <machine/limits.h>
70 
71 #include <vm/vm.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_kern.h>
75 #include <vm/pmap.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_pager.h>
79 #include <vm/vnode_pager.h>
80 #include <vm/vm_zone.h>
81 
82 #include <sys/buf2.h>
83 #include <sys/thread2.h>
84 #include <sys/mplock2.h>
85 #include <vm/vm_page2.h>
86 
87 #include <netinet/in.h>
88 
89 static MALLOC_DEFINE(M_NETCRED, "Export Host", "Export host address structure");
90 
91 int numvnodes;
92 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0,
93     "Number of vnodes allocated");
94 int verbose_reclaims;
95 SYSCTL_INT(_debug, OID_AUTO, verbose_reclaims, CTLFLAG_RD, &verbose_reclaims, 0,
96     "Output filename of reclaimed vnode(s)");
97 
98 enum vtype iftovt_tab[16] = {
99 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
100 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
101 };
102 int vttoif_tab[9] = {
103 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
104 	S_IFSOCK, S_IFIFO, S_IFMT,
105 };
106 
107 static int reassignbufcalls;
108 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls,
109     0, "Number of times buffers have been reassigned to the proper list");
110 
111 static int check_buf_overlap = 2;	/* invasive check */
112 SYSCTL_INT(_vfs, OID_AUTO, check_buf_overlap, CTLFLAG_RW, &check_buf_overlap,
113     0, "Enable overlapping buffer checks");
114 
115 int	nfs_mount_type = -1;
116 static struct lwkt_token spechash_token;
117 struct nfs_public nfs_pub;	/* publicly exported FS */
118 
119 int maxvnodes;
120 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
121 	   &maxvnodes, 0, "Maximum number of vnodes");
122 
123 static struct radix_node_head *vfs_create_addrlist_af(int af,
124 		    struct netexport *nep);
125 static void	vfs_free_addrlist (struct netexport *nep);
126 static int	vfs_free_netcred (struct radix_node *rn, void *w);
127 static void	vfs_free_addrlist_af (struct radix_node_head **prnh);
128 static int	vfs_hang_addrlist (struct mount *mp, struct netexport *nep,
129 	            const struct export_args *argp);
130 
131 int	prtactive = 0;		/* 1 => print out reclaim of active vnodes */
132 
133 /*
134  * Red black tree functions
135  */
136 static int rb_buf_compare(struct buf *b1, struct buf *b2);
137 RB_GENERATE2(buf_rb_tree, buf, b_rbnode, rb_buf_compare, off_t, b_loffset);
138 RB_GENERATE2(buf_rb_hash, buf, b_rbhash, rb_buf_compare, off_t, b_loffset);
139 
140 static int
141 rb_buf_compare(struct buf *b1, struct buf *b2)
142 {
143 	if (b1->b_loffset < b2->b_loffset)
144 		return(-1);
145 	if (b1->b_loffset > b2->b_loffset)
146 		return(1);
147 	return(0);
148 }
149 
150 /*
151  * Initialize the vnode management data structures.
152  *
153  * Called from vfsinit()
154  */
155 void
156 vfs_subr_init(void)
157 {
158 	int factor1;
159 	int factor2;
160 
161 	/*
162 	 * Desiredvnodes is kern.maxvnodes.  We want to scale it
163 	 * according to available system memory but we may also have
164 	 * to limit it based on available KVM, which is capped on 32 bit
165 	 * systems, to ~80K vnodes or so.
166 	 *
167 	 * WARNING!  For machines with 64-256M of ram we have to be sure
168 	 *	     that the default limit scales down well due to HAMMER
169 	 *	     taking up significantly more memory per-vnode vs UFS.
170 	 *	     We want around ~5800 on a 128M machine.
171 	 */
172 	factor1 = 25 * (sizeof(struct vm_object) + sizeof(struct vnode));
173 	factor2 = 30 * (sizeof(struct vm_object) + sizeof(struct vnode));
174 	maxvnodes = imin((int64_t)vmstats.v_page_count * PAGE_SIZE / factor1,
175 			 KvaSize / factor2);
176 	maxvnodes = imax(maxvnodes, maxproc * 8);
177 
178 	lwkt_token_init(&spechash_token, "spechash");
179 }
180 
181 /*
182  * Knob to control the precision of file timestamps:
183  *
184  *   0 = seconds only; nanoseconds zeroed.
185  *   1 = seconds and nanoseconds, accurate within 1/HZ.
186  *   2 = seconds and nanoseconds, truncated to microseconds.
187  * >=3 = seconds and nanoseconds, maximum precision.
188  */
189 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
190 
191 static int timestamp_precision = TSP_SEC;
192 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
193 		&timestamp_precision, 0, "Precision of file timestamps");
194 
195 /*
196  * Get a current timestamp.
197  *
198  * MPSAFE
199  */
200 void
201 vfs_timestamp(struct timespec *tsp)
202 {
203 	struct timeval tv;
204 
205 	switch (timestamp_precision) {
206 	case TSP_SEC:
207 		tsp->tv_sec = time_second;
208 		tsp->tv_nsec = 0;
209 		break;
210 	case TSP_HZ:
211 		getnanotime(tsp);
212 		break;
213 	case TSP_USEC:
214 		microtime(&tv);
215 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
216 		break;
217 	case TSP_NSEC:
218 	default:
219 		nanotime(tsp);
220 		break;
221 	}
222 }
223 
224 /*
225  * Set vnode attributes to VNOVAL
226  */
227 void
228 vattr_null(struct vattr *vap)
229 {
230 	vap->va_type = VNON;
231 	vap->va_size = VNOVAL;
232 	vap->va_bytes = VNOVAL;
233 	vap->va_mode = VNOVAL;
234 	vap->va_nlink = VNOVAL;
235 	vap->va_uid = VNOVAL;
236 	vap->va_gid = VNOVAL;
237 	vap->va_fsid = VNOVAL;
238 	vap->va_fileid = VNOVAL;
239 	vap->va_blocksize = VNOVAL;
240 	vap->va_rmajor = VNOVAL;
241 	vap->va_rminor = VNOVAL;
242 	vap->va_atime.tv_sec = VNOVAL;
243 	vap->va_atime.tv_nsec = VNOVAL;
244 	vap->va_mtime.tv_sec = VNOVAL;
245 	vap->va_mtime.tv_nsec = VNOVAL;
246 	vap->va_ctime.tv_sec = VNOVAL;
247 	vap->va_ctime.tv_nsec = VNOVAL;
248 	vap->va_flags = VNOVAL;
249 	vap->va_gen = VNOVAL;
250 	vap->va_vaflags = 0;
251 	/* va_*_uuid fields are only valid if related flags are set */
252 }
253 
254 /*
255  * Flush out and invalidate all buffers associated with a vnode.
256  *
257  * vp must be locked.
258  */
259 static int vinvalbuf_bp(struct buf *bp, void *data);
260 
261 struct vinvalbuf_bp_info {
262 	struct vnode *vp;
263 	int slptimeo;
264 	int lkflags;
265 	int flags;
266 	int clean;
267 };
268 
269 int
270 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
271 {
272 	struct vinvalbuf_bp_info info;
273 	vm_object_t object;
274 	int error;
275 
276 	lwkt_gettoken(&vp->v_token);
277 
278 	/*
279 	 * If we are being asked to save, call fsync to ensure that the inode
280 	 * is updated.
281 	 */
282 	if (flags & V_SAVE) {
283 		error = bio_track_wait(&vp->v_track_write, slpflag, slptimeo);
284 		if (error)
285 			goto done;
286 		if (!RB_EMPTY(&vp->v_rbdirty_tree)) {
287 			if ((error = VOP_FSYNC(vp, MNT_WAIT, 0)) != 0)
288 				goto done;
289 #if 0
290 			/*
291 			 * Dirty bufs may be left or generated via races
292 			 * in circumstances where vinvalbuf() is called on
293 			 * a vnode not undergoing reclamation.   Only
294 			 * panic if we are trying to reclaim the vnode.
295 			 */
296 			if ((vp->v_flag & VRECLAIMED) &&
297 			    (bio_track_active(&vp->v_track_write) ||
298 			    !RB_EMPTY(&vp->v_rbdirty_tree))) {
299 				panic("vinvalbuf: dirty bufs");
300 			}
301 #endif
302 		}
303   	}
304 	info.slptimeo = slptimeo;
305 	info.lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL;
306 	if (slpflag & PCATCH)
307 		info.lkflags |= LK_PCATCH;
308 	info.flags = flags;
309 	info.vp = vp;
310 
311 	/*
312 	 * Flush the buffer cache until nothing is left, wait for all I/O
313 	 * to complete.  At least one pass is required.  We might block
314 	 * in the pip code so we have to re-check.  Order is important.
315 	 */
316 	do {
317 		/*
318 		 * Flush buffer cache
319 		 */
320 		if (!RB_EMPTY(&vp->v_rbclean_tree)) {
321 			info.clean = 1;
322 			error = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree,
323 					NULL, vinvalbuf_bp, &info);
324 		}
325 		if (!RB_EMPTY(&vp->v_rbdirty_tree)) {
326 			info.clean = 0;
327 			error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
328 					NULL, vinvalbuf_bp, &info);
329 		}
330 
331 		/*
332 		 * Wait for I/O completion.
333 		 */
334 		bio_track_wait(&vp->v_track_write, 0, 0);
335 		if ((object = vp->v_object) != NULL)
336 			refcount_wait(&object->paging_in_progress, "vnvlbx");
337 	} while (bio_track_active(&vp->v_track_write) ||
338 		 !RB_EMPTY(&vp->v_rbclean_tree) ||
339 		 !RB_EMPTY(&vp->v_rbdirty_tree));
340 
341 	/*
342 	 * Destroy the copy in the VM cache, too.
343 	 */
344 	if ((object = vp->v_object) != NULL) {
345 		vm_object_page_remove(object, 0, 0,
346 			(flags & V_SAVE) ? TRUE : FALSE);
347 	}
348 
349 	if (!RB_EMPTY(&vp->v_rbdirty_tree) || !RB_EMPTY(&vp->v_rbclean_tree))
350 		panic("vinvalbuf: flush failed");
351 	if (!RB_EMPTY(&vp->v_rbhash_tree))
352 		panic("vinvalbuf: flush failed, buffers still present");
353 	error = 0;
354 done:
355 	lwkt_reltoken(&vp->v_token);
356 	return (error);
357 }
358 
359 static int
360 vinvalbuf_bp(struct buf *bp, void *data)
361 {
362 	struct vinvalbuf_bp_info *info = data;
363 	int error;
364 
365 	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
366 		atomic_add_int(&bp->b_refs, 1);
367 		error = BUF_TIMELOCK(bp, info->lkflags,
368 				     "vinvalbuf", info->slptimeo);
369 		atomic_subtract_int(&bp->b_refs, 1);
370 		if (error == 0) {
371 			BUF_UNLOCK(bp);
372 			error = ENOLCK;
373 		}
374 		if (error == ENOLCK)
375 			return(0);
376 		return (-error);
377 	}
378 	KKASSERT(bp->b_vp == info->vp);
379 
380 	/*
381 	 * Must check clean/dirty status after successfully locking as
382 	 * it may race.
383 	 */
384 	if ((info->clean && (bp->b_flags & B_DELWRI)) ||
385 	    (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0)) {
386 		BUF_UNLOCK(bp);
387 		return(0);
388 	}
389 
390 	/*
391 	 * NOTE:  NO B_LOCKED CHECK.  Also no buf_checkwrite()
392 	 * check.  This code will write out the buffer, period.
393 	 */
394 	bremfree(bp);
395 	if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
396 	    (info->flags & V_SAVE)) {
397 		cluster_awrite(bp);
398 	} else if (info->flags & V_SAVE) {
399 		/*
400 		 * Cannot set B_NOCACHE on a clean buffer as this will
401 		 * destroy the VM backing store which might actually
402 		 * be dirty (and unsynchronized).
403 		 */
404 		bp->b_flags |= (B_INVAL | B_RELBUF);
405 		brelse(bp);
406 	} else {
407 		bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
408 		brelse(bp);
409 	}
410 	return(0);
411 }
412 
413 /*
414  * Truncate a file's buffer and pages to a specified length.  This
415  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
416  * sync activity.
417  *
418  * The vnode must be locked.
419  */
420 static int vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data);
421 static int vtruncbuf_bp_trunc(struct buf *bp, void *data);
422 static int vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data);
423 static int vtruncbuf_bp_metasync(struct buf *bp, void *data);
424 
425 struct vtruncbuf_info {
426 	struct vnode *vp;
427 	off_t	truncloffset;
428 	int	clean;
429 };
430 
431 int
432 vtruncbuf(struct vnode *vp, off_t length, int blksize)
433 {
434 	struct vtruncbuf_info info;
435 	const char *filename;
436 	int count;
437 
438 	/*
439 	 * Round up to the *next* block, then destroy the buffers in question.
440 	 * Since we are only removing some of the buffers we must rely on the
441 	 * scan count to determine whether a loop is necessary.
442 	 */
443 	if ((count = (int)(length % blksize)) != 0)
444 		info.truncloffset = length + (blksize - count);
445 	else
446 		info.truncloffset = length;
447 	info.vp = vp;
448 
449 	lwkt_gettoken(&vp->v_token);
450 	do {
451 		info.clean = 1;
452 		count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree,
453 				vtruncbuf_bp_trunc_cmp,
454 				vtruncbuf_bp_trunc, &info);
455 		info.clean = 0;
456 		count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
457 				vtruncbuf_bp_trunc_cmp,
458 				vtruncbuf_bp_trunc, &info);
459 	} while(count);
460 
461 	/*
462 	 * For safety, fsync any remaining metadata if the file is not being
463 	 * truncated to 0.  Since the metadata does not represent the entire
464 	 * dirty list we have to rely on the hit count to ensure that we get
465 	 * all of it.
466 	 */
467 	if (length > 0) {
468 		do {
469 			count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
470 					vtruncbuf_bp_metasync_cmp,
471 					vtruncbuf_bp_metasync, &info);
472 		} while (count);
473 	}
474 
475 	/*
476 	 * Clean out any left over VM backing store.
477 	 *
478 	 * It is possible to have in-progress I/O from buffers that were
479 	 * not part of the truncation.  This should not happen if we
480 	 * are truncating to 0-length.
481 	 */
482 	vnode_pager_setsize(vp, length);
483 	bio_track_wait(&vp->v_track_write, 0, 0);
484 
485 	/*
486 	 * Debugging only
487 	 */
488 	spin_lock(&vp->v_spin);
489 	filename = TAILQ_FIRST(&vp->v_namecache) ?
490 		   TAILQ_FIRST(&vp->v_namecache)->nc_name : "?";
491 	spin_unlock(&vp->v_spin);
492 
493 	/*
494 	 * Make sure no buffers were instantiated while we were trying
495 	 * to clean out the remaining VM pages.  This could occur due
496 	 * to busy dirty VM pages being flushed out to disk.
497 	 */
498 	do {
499 		info.clean = 1;
500 		count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree,
501 				vtruncbuf_bp_trunc_cmp,
502 				vtruncbuf_bp_trunc, &info);
503 		info.clean = 0;
504 		count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
505 				vtruncbuf_bp_trunc_cmp,
506 				vtruncbuf_bp_trunc, &info);
507 		if (count) {
508 			kprintf("Warning: vtruncbuf():  Had to re-clean %d "
509 			       "left over buffers in %s\n", count, filename);
510 		}
511 	} while(count);
512 
513 	lwkt_reltoken(&vp->v_token);
514 
515 	return (0);
516 }
517 
518 /*
519  * The callback buffer is beyond the new file EOF and must be destroyed.
520  * Note that the compare function must conform to the RB_SCAN's requirements.
521  */
522 static
523 int
524 vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data)
525 {
526 	struct vtruncbuf_info *info = data;
527 
528 	if (bp->b_loffset >= info->truncloffset)
529 		return(0);
530 	return(-1);
531 }
532 
533 static
534 int
535 vtruncbuf_bp_trunc(struct buf *bp, void *data)
536 {
537 	struct vtruncbuf_info *info = data;
538 
539 	/*
540 	 * Do not try to use a buffer we cannot immediately lock, but sleep
541 	 * anyway to prevent a livelock.  The code will loop until all buffers
542 	 * can be acted upon.
543 	 *
544 	 * We must always revalidate the buffer after locking it to deal
545 	 * with MP races.
546 	 */
547 	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
548 		atomic_add_int(&bp->b_refs, 1);
549 		if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0)
550 			BUF_UNLOCK(bp);
551 		atomic_subtract_int(&bp->b_refs, 1);
552 	} else if ((info->clean && (bp->b_flags & B_DELWRI)) ||
553 		   (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0) ||
554 		   bp->b_vp != info->vp ||
555 		   vtruncbuf_bp_trunc_cmp(bp, data)) {
556 		BUF_UNLOCK(bp);
557 	} else {
558 		bremfree(bp);
559 		bp->b_flags |= (B_INVAL | B_RELBUF | B_NOCACHE);
560 		brelse(bp);
561 	}
562 	return(1);
563 }
564 
565 /*
566  * Fsync all meta-data after truncating a file to be non-zero.  Only metadata
567  * blocks (with a negative loffset) are scanned.
568  * Note that the compare function must conform to the RB_SCAN's requirements.
569  */
570 static int
571 vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data __unused)
572 {
573 	if (bp->b_loffset < 0)
574 		return(0);
575 	return(1);
576 }
577 
578 static int
579 vtruncbuf_bp_metasync(struct buf *bp, void *data)
580 {
581 	struct vtruncbuf_info *info = data;
582 
583 	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
584 		atomic_add_int(&bp->b_refs, 1);
585 		if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0)
586 			BUF_UNLOCK(bp);
587 		atomic_subtract_int(&bp->b_refs, 1);
588 	} else if ((bp->b_flags & B_DELWRI) == 0 ||
589 		   bp->b_vp != info->vp ||
590 		   vtruncbuf_bp_metasync_cmp(bp, data)) {
591 		BUF_UNLOCK(bp);
592 	} else {
593 		bremfree(bp);
594 		if (bp->b_vp == info->vp)
595 			bawrite(bp);
596 		else
597 			bwrite(bp);
598 	}
599 	return(1);
600 }
601 
602 /*
603  * vfsync - implements a multipass fsync on a file which understands
604  * dependancies and meta-data.  The passed vnode must be locked.  The
605  * waitfor argument may be MNT_WAIT or MNT_NOWAIT, or MNT_LAZY.
606  *
607  * When fsyncing data asynchronously just do one consolidated pass starting
608  * with the most negative block number.  This may not get all the data due
609  * to dependancies.
610  *
611  * When fsyncing data synchronously do a data pass, then a metadata pass,
612  * then do additional data+metadata passes to try to get all the data out.
613  *
614  * Caller must ref the vnode but does not have to lock it.
615  */
616 static int vfsync_wait_output(struct vnode *vp,
617 			    int (*waitoutput)(struct vnode *, struct thread *));
618 static int vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused);
619 static int vfsync_data_only_cmp(struct buf *bp, void *data);
620 static int vfsync_meta_only_cmp(struct buf *bp, void *data);
621 static int vfsync_lazy_range_cmp(struct buf *bp, void *data);
622 static int vfsync_bp(struct buf *bp, void *data);
623 
624 struct vfsync_info {
625 	struct vnode *vp;
626 	int fastpass;
627 	int synchronous;
628 	int syncdeps;
629 	int lazycount;
630 	int lazylimit;
631 	int skippedbufs;
632 	int (*checkdef)(struct buf *);
633 	int (*cmpfunc)(struct buf *, void *);
634 };
635 
636 int
637 vfsync(struct vnode *vp, int waitfor, int passes,
638 	int (*checkdef)(struct buf *),
639 	int (*waitoutput)(struct vnode *, struct thread *))
640 {
641 	struct vfsync_info info;
642 	int error;
643 
644 	bzero(&info, sizeof(info));
645 	info.vp = vp;
646 	if ((info.checkdef = checkdef) == NULL)
647 		info.syncdeps = 1;
648 
649 	lwkt_gettoken(&vp->v_token);
650 
651 	switch(waitfor) {
652 	case MNT_LAZY | MNT_NOWAIT:
653 	case MNT_LAZY:
654 		/*
655 		 * Lazy (filesystem syncer typ) Asynchronous plus limit the
656 		 * number of data (not meta) pages we try to flush to 1MB.
657 		 * A non-zero return means that lazy limit was reached.
658 		 */
659 		info.lazylimit = 1024 * 1024;
660 		info.syncdeps = 1;
661 		info.cmpfunc = vfsync_lazy_range_cmp;
662 		error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
663 				vfsync_lazy_range_cmp, vfsync_bp, &info);
664 		info.cmpfunc = vfsync_meta_only_cmp;
665 		RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
666 			vfsync_meta_only_cmp, vfsync_bp, &info);
667 		if (error == 0)
668 			vp->v_lazyw = 0;
669 		else if (!RB_EMPTY(&vp->v_rbdirty_tree))
670 			vn_syncer_add(vp, 1);
671 		error = 0;
672 		break;
673 	case MNT_NOWAIT:
674 		/*
675 		 * Asynchronous.  Do a data-only pass and a meta-only pass.
676 		 */
677 		info.syncdeps = 1;
678 		info.cmpfunc = vfsync_data_only_cmp;
679 		RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp,
680 			vfsync_bp, &info);
681 		info.cmpfunc = vfsync_meta_only_cmp;
682 		RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_meta_only_cmp,
683 			vfsync_bp, &info);
684 		error = 0;
685 		break;
686 	default:
687 		/*
688 		 * Synchronous.  Do a data-only pass, then a meta-data+data
689 		 * pass, then additional integrated passes to try to get
690 		 * all the dependancies flushed.
691 		 */
692 		info.cmpfunc = vfsync_data_only_cmp;
693 		info.fastpass = 1;
694 		RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp,
695 			vfsync_bp, &info);
696 		info.fastpass = 0;
697 		error = vfsync_wait_output(vp, waitoutput);
698 		if (error == 0) {
699 			info.skippedbufs = 0;
700 			info.cmpfunc = vfsync_dummy_cmp;
701 			RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL,
702 				vfsync_bp, &info);
703 			error = vfsync_wait_output(vp, waitoutput);
704 			if (info.skippedbufs) {
705 				kprintf("Warning: vfsync skipped %d dirty "
706 					"buf%s in pass2!\n",
707 					info.skippedbufs,
708 					((info.skippedbufs > 1) ? "s" : ""));
709 			}
710 		}
711 		while (error == 0 && passes > 0 &&
712 		       !RB_EMPTY(&vp->v_rbdirty_tree)
713 		) {
714 			info.skippedbufs = 0;
715 			if (--passes == 0) {
716 				info.synchronous = 1;
717 				info.syncdeps = 1;
718 			}
719 			info.cmpfunc = vfsync_dummy_cmp;
720 			error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL,
721 					vfsync_bp, &info);
722 			if (error < 0)
723 				error = -error;
724 			info.syncdeps = 1;
725 			if (error == 0)
726 				error = vfsync_wait_output(vp, waitoutput);
727 			if (info.skippedbufs && passes == 0) {
728 				kprintf("Warning: vfsync skipped %d dirty "
729 					"buf%s in final pass!\n",
730 					info.skippedbufs,
731 					((info.skippedbufs > 1) ? "s" : ""));
732 			}
733 		}
734 #if 0
735 		/*
736 		 * This case can occur normally because vnode lock might
737 		 * not be held.
738 		 */
739 		if (!RB_EMPTY(&vp->v_rbdirty_tree))
740 			kprintf("dirty bufs left after final pass\n");
741 #endif
742 		break;
743 	}
744 	lwkt_reltoken(&vp->v_token);
745 
746 	return(error);
747 }
748 
749 static int
750 vfsync_wait_output(struct vnode *vp,
751 		   int (*waitoutput)(struct vnode *, struct thread *))
752 {
753 	int error;
754 
755 	error = bio_track_wait(&vp->v_track_write, 0, 0);
756 	if (waitoutput)
757 		error = waitoutput(vp, curthread);
758 	return(error);
759 }
760 
761 static int
762 vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused)
763 {
764 	return(0);
765 }
766 
767 static int
768 vfsync_data_only_cmp(struct buf *bp, void *data)
769 {
770 	if (bp->b_loffset < 0)
771 		return(-1);
772 	return(0);
773 }
774 
775 static int
776 vfsync_meta_only_cmp(struct buf *bp, void *data)
777 {
778 	if (bp->b_loffset < 0)
779 		return(0);
780 	return(1);
781 }
782 
783 static int
784 vfsync_lazy_range_cmp(struct buf *bp, void *data)
785 {
786 	struct vfsync_info *info = data;
787 
788 	if (bp->b_loffset < info->vp->v_lazyw)
789 		return(-1);
790 	return(0);
791 }
792 
793 static int
794 vfsync_bp(struct buf *bp, void *data)
795 {
796 	struct vfsync_info *info = data;
797 	struct vnode *vp = info->vp;
798 	int error;
799 
800 	if (info->fastpass) {
801 		/*
802 		 * Ignore buffers that we cannot immediately lock.
803 		 */
804 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
805 			if (BUF_TIMELOCK(bp, LK_EXCLUSIVE, "bflst1", 1)) {
806 				++info->skippedbufs;
807 				return(0);
808 			}
809 		}
810 	} else if (info->synchronous == 0) {
811 		/*
812 		 * Normal pass, give the buffer a little time to become
813 		 * available to us.
814 		 */
815 		if (BUF_TIMELOCK(bp, LK_EXCLUSIVE, "bflst2", hz / 10)) {
816 			++info->skippedbufs;
817 			return(0);
818 		}
819 	} else {
820 		/*
821 		 * Synchronous pass, give the buffer a lot of time before
822 		 * giving up.
823 		 */
824 		if (BUF_TIMELOCK(bp, LK_EXCLUSIVE, "bflst3", hz * 10)) {
825 			++info->skippedbufs;
826 			return(0);
827 		}
828 	}
829 
830 	/*
831 	 * We must revalidate the buffer after locking.
832 	 */
833 	if ((bp->b_flags & B_DELWRI) == 0 ||
834 	    bp->b_vp != info->vp ||
835 	    info->cmpfunc(bp, data)) {
836 		BUF_UNLOCK(bp);
837 		return(0);
838 	}
839 
840 	/*
841 	 * If syncdeps is not set we do not try to write buffers which have
842 	 * dependancies.
843 	 */
844 	if (!info->synchronous && info->syncdeps == 0 && info->checkdef(bp)) {
845 		BUF_UNLOCK(bp);
846 		return(0);
847 	}
848 
849 	/*
850 	 * B_NEEDCOMMIT (primarily used by NFS) is a state where the buffer
851 	 * has been written but an additional handshake with the device
852 	 * is required before we can dispose of the buffer.  We have no idea
853 	 * how to do this so we have to skip these buffers.
854 	 */
855 	if (bp->b_flags & B_NEEDCOMMIT) {
856 		BUF_UNLOCK(bp);
857 		return(0);
858 	}
859 
860 	/*
861 	 * Ask bioops if it is ok to sync.  If not the VFS may have
862 	 * set B_LOCKED so we have to cycle the buffer.
863 	 */
864 	if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) {
865 		bremfree(bp);
866 		brelse(bp);
867 		return(0);
868 	}
869 
870 	if (info->synchronous) {
871 		/*
872 		 * Synchronous flush.  An error may be returned and will
873 		 * stop the scan.
874 		 */
875 		bremfree(bp);
876 		error = bwrite(bp);
877 	} else {
878 		/*
879 		 * Asynchronous flush.  We use the error return to support
880 		 * MNT_LAZY flushes.
881 		 *
882 		 * In low-memory situations we revert to synchronous
883 		 * operation.  This should theoretically prevent the I/O
884 		 * path from exhausting memory in a non-recoverable way.
885 		 */
886 		vp->v_lazyw = bp->b_loffset;
887 		bremfree(bp);
888 		if (vm_page_count_min(0)) {
889 			/* low memory */
890 			info->lazycount += bp->b_bufsize;
891 			bwrite(bp);
892 		} else {
893 			/* normal */
894 			info->lazycount += cluster_awrite(bp);
895 			waitrunningbufspace();
896 			/*vm_wait_nominal();*/
897 		}
898 		if (info->lazylimit && info->lazycount >= info->lazylimit)
899 			error = 1;
900 		else
901 			error = 0;
902 	}
903 	return(-error);
904 }
905 
906 /*
907  * Associate a buffer with a vnode.
908  *
909  * MPSAFE
910  */
911 int
912 bgetvp(struct vnode *vp, struct buf *bp, int testsize)
913 {
914 	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
915 	KKASSERT((bp->b_flags & (B_HASHED|B_DELWRI|B_VNCLEAN|B_VNDIRTY)) == 0);
916 
917 	/*
918 	 * Insert onto list for new vnode.
919 	 */
920 	lwkt_gettoken(&vp->v_token);
921 
922 	if (buf_rb_hash_RB_INSERT(&vp->v_rbhash_tree, bp)) {
923 		lwkt_reltoken(&vp->v_token);
924 		return (EEXIST);
925 	}
926 
927 	/*
928 	 * Diagnostics (mainly for HAMMER debugging).  Check for
929 	 * overlapping buffers.
930 	 */
931 	if (check_buf_overlap) {
932 		struct buf *bx;
933 		bx = buf_rb_hash_RB_PREV(bp);
934 		if (bx) {
935 			if (bx->b_loffset + bx->b_bufsize > bp->b_loffset) {
936 				kprintf("bgetvp: overlapl %016jx/%d %016jx "
937 					"bx %p bp %p\n",
938 					(intmax_t)bx->b_loffset,
939 					bx->b_bufsize,
940 					(intmax_t)bp->b_loffset,
941 					bx, bp);
942 				if (check_buf_overlap > 1)
943 					panic("bgetvp - overlapping buffer");
944 			}
945 		}
946 		bx = buf_rb_hash_RB_NEXT(bp);
947 		if (bx) {
948 			if (bp->b_loffset + testsize > bx->b_loffset) {
949 				kprintf("bgetvp: overlapr %016jx/%d %016jx "
950 					"bp %p bx %p\n",
951 					(intmax_t)bp->b_loffset,
952 					testsize,
953 					(intmax_t)bx->b_loffset,
954 					bp, bx);
955 				if (check_buf_overlap > 1)
956 					panic("bgetvp - overlapping buffer");
957 			}
958 		}
959 	}
960 	bp->b_vp = vp;
961 	bp->b_flags |= B_HASHED;
962 	bp->b_flags |= B_VNCLEAN;
963 	if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp))
964 		panic("reassignbuf: dup lblk/clean vp %p bp %p", vp, bp);
965 	/*vhold(vp);*/
966 	lwkt_reltoken(&vp->v_token);
967 	return(0);
968 }
969 
970 /*
971  * Disassociate a buffer from a vnode.
972  *
973  * MPSAFE
974  */
975 void
976 brelvp(struct buf *bp)
977 {
978 	struct vnode *vp;
979 
980 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
981 
982 	/*
983 	 * Delete from old vnode list, if on one.
984 	 */
985 	vp = bp->b_vp;
986 	lwkt_gettoken(&vp->v_token);
987 	if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN)) {
988 		if (bp->b_flags & B_VNDIRTY)
989 			buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp);
990 		else
991 			buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp);
992 		bp->b_flags &= ~(B_VNDIRTY | B_VNCLEAN);
993 	}
994 	if (bp->b_flags & B_HASHED) {
995 		buf_rb_hash_RB_REMOVE(&vp->v_rbhash_tree, bp);
996 		bp->b_flags &= ~B_HASHED;
997 	}
998 
999 	/*
1000 	 * Only remove from synclist when no dirty buffers are left AND
1001 	 * the VFS has not flagged the vnode's inode as being dirty.
1002 	 */
1003 	if ((vp->v_flag & (VONWORKLST | VISDIRTY | VOBJDIRTY)) == VONWORKLST &&
1004 	    RB_EMPTY(&vp->v_rbdirty_tree)) {
1005 		vn_syncer_remove(vp, 0);
1006 	}
1007 	bp->b_vp = NULL;
1008 
1009 	lwkt_reltoken(&vp->v_token);
1010 
1011 	/*vdrop(vp);*/
1012 }
1013 
1014 /*
1015  * Reassign the buffer to the proper clean/dirty list based on B_DELWRI.
1016  * This routine is called when the state of the B_DELWRI bit is changed.
1017  *
1018  * Must be called with vp->v_token held.
1019  * MPSAFE
1020  */
1021 void
1022 reassignbuf(struct buf *bp)
1023 {
1024 	struct vnode *vp = bp->b_vp;
1025 	int delay;
1026 
1027 	ASSERT_LWKT_TOKEN_HELD(&vp->v_token);
1028 	++reassignbufcalls;
1029 
1030 	/*
1031 	 * B_PAGING flagged buffers cannot be reassigned because their vp
1032 	 * is not fully linked in.
1033 	 */
1034 	if (bp->b_flags & B_PAGING)
1035 		panic("cannot reassign paging buffer");
1036 
1037 	if (bp->b_flags & B_DELWRI) {
1038 		/*
1039 		 * Move to the dirty list, add the vnode to the worklist
1040 		 */
1041 		if (bp->b_flags & B_VNCLEAN) {
1042 			buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp);
1043 			bp->b_flags &= ~B_VNCLEAN;
1044 		}
1045 		if ((bp->b_flags & B_VNDIRTY) == 0) {
1046 			if (buf_rb_tree_RB_INSERT(&vp->v_rbdirty_tree, bp)) {
1047 				panic("reassignbuf: dup lblk vp %p bp %p",
1048 				      vp, bp);
1049 			}
1050 			bp->b_flags |= B_VNDIRTY;
1051 		}
1052 		if ((vp->v_flag & VONWORKLST) == 0) {
1053 			switch (vp->v_type) {
1054 			case VDIR:
1055 				delay = dirdelay;
1056 				break;
1057 			case VCHR:
1058 			case VBLK:
1059 				if (vp->v_rdev &&
1060 				    vp->v_rdev->si_mountpoint != NULL) {
1061 					delay = metadelay;
1062 					break;
1063 				}
1064 				/* fall through */
1065 			default:
1066 				delay = filedelay;
1067 			}
1068 			vn_syncer_add(vp, delay);
1069 		}
1070 	} else {
1071 		/*
1072 		 * Move to the clean list, remove the vnode from the worklist
1073 		 * if no dirty blocks remain.
1074 		 */
1075 		if (bp->b_flags & B_VNDIRTY) {
1076 			buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp);
1077 			bp->b_flags &= ~B_VNDIRTY;
1078 		}
1079 		if ((bp->b_flags & B_VNCLEAN) == 0) {
1080 			if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) {
1081 				panic("reassignbuf: dup lblk vp %p bp %p",
1082 				      vp, bp);
1083 			}
1084 			bp->b_flags |= B_VNCLEAN;
1085 		}
1086 
1087 		/*
1088 		 * Only remove from synclist when no dirty buffers are left
1089 		 * AND the VFS has not flagged the vnode's inode as being
1090 		 * dirty.
1091 		 */
1092 		if ((vp->v_flag & (VONWORKLST | VISDIRTY | VOBJDIRTY)) ==
1093 		     VONWORKLST &&
1094 		    RB_EMPTY(&vp->v_rbdirty_tree)) {
1095 			vn_syncer_remove(vp, 0);
1096 		}
1097 	}
1098 }
1099 
1100 /*
1101  * Create a vnode for a block device.  Used for mounting the root file
1102  * system.
1103  *
1104  * A vref()'d vnode is returned.
1105  */
1106 extern struct vop_ops *devfs_vnode_dev_vops_p;
1107 int
1108 bdevvp(cdev_t dev, struct vnode **vpp)
1109 {
1110 	struct vnode *vp;
1111 	struct vnode *nvp;
1112 	int error;
1113 
1114 	if (dev == NULL) {
1115 		*vpp = NULLVP;
1116 		return (ENXIO);
1117 	}
1118 	error = getspecialvnode(VT_NON, NULL, &devfs_vnode_dev_vops_p,
1119 				&nvp, 0, 0);
1120 	if (error) {
1121 		*vpp = NULLVP;
1122 		return (error);
1123 	}
1124 	vp = nvp;
1125 	vp->v_type = VCHR;
1126 #if 0
1127 	vp->v_rdev = dev;
1128 #endif
1129 	v_associate_rdev(vp, dev);
1130 	vp->v_umajor = dev->si_umajor;
1131 	vp->v_uminor = dev->si_uminor;
1132 	vx_unlock(vp);
1133 	*vpp = vp;
1134 	return (0);
1135 }
1136 
1137 int
1138 v_associate_rdev(struct vnode *vp, cdev_t dev)
1139 {
1140 	if (dev == NULL)
1141 		return(ENXIO);
1142 	if (dev_is_good(dev) == 0)
1143 		return(ENXIO);
1144 	KKASSERT(vp->v_rdev == NULL);
1145 	vp->v_rdev = reference_dev(dev);
1146 	lwkt_gettoken(&spechash_token);
1147 	SLIST_INSERT_HEAD(&dev->si_hlist, vp, v_cdevnext);
1148 	lwkt_reltoken(&spechash_token);
1149 	return(0);
1150 }
1151 
1152 void
1153 v_release_rdev(struct vnode *vp)
1154 {
1155 	cdev_t dev;
1156 
1157 	if ((dev = vp->v_rdev) != NULL) {
1158 		lwkt_gettoken(&spechash_token);
1159 		SLIST_REMOVE(&dev->si_hlist, vp, vnode, v_cdevnext);
1160 		vp->v_rdev = NULL;
1161 		release_dev(dev);
1162 		lwkt_reltoken(&spechash_token);
1163 	}
1164 }
1165 
1166 /*
1167  * Add a vnode to the alias list hung off the cdev_t.  We only associate
1168  * the device number with the vnode.  The actual device is not associated
1169  * until the vnode is opened (usually in spec_open()), and will be
1170  * disassociated on last close.
1171  */
1172 void
1173 addaliasu(struct vnode *nvp, int x, int y)
1174 {
1175 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
1176 		panic("addaliasu on non-special vnode");
1177 	nvp->v_umajor = x;
1178 	nvp->v_uminor = y;
1179 }
1180 
1181 /*
1182  * Simple call that a filesystem can make to try to get rid of a
1183  * vnode.  It will fail if anyone is referencing the vnode (including
1184  * the caller).
1185  *
1186  * The filesystem can check whether its in-memory inode structure still
1187  * references the vp on return.
1188  *
1189  * May only be called if the vnode is in a known state (i.e. being prevented
1190  * from being deallocated by some other condition such as a vfs inode hold).
1191  */
1192 void
1193 vclean_unlocked(struct vnode *vp)
1194 {
1195 	vx_get(vp);
1196 	if (VREFCNT(vp) <= 1)
1197 		vgone_vxlocked(vp);
1198 	vx_put(vp);
1199 }
1200 
1201 /*
1202  * Disassociate a vnode from its underlying filesystem.
1203  *
1204  * The vnode must be VX locked and referenced.  In all normal situations
1205  * there are no active references.  If vclean_vxlocked() is called while
1206  * there are active references, the vnode is being ripped out and we have
1207  * to call VOP_CLOSE() as appropriate before we can reclaim it.
1208  */
1209 void
1210 vclean_vxlocked(struct vnode *vp, int flags)
1211 {
1212 	int active;
1213 	int n;
1214 	vm_object_t object;
1215 	struct namecache *ncp;
1216 
1217 	/*
1218 	 * If the vnode has already been reclaimed we have nothing to do.
1219 	 */
1220 	if (vp->v_flag & VRECLAIMED)
1221 		return;
1222 
1223 	/*
1224 	 * Set flag to interlock operation, flag finalization to ensure
1225 	 * that the vnode winds up on the inactive list, and set v_act to 0.
1226 	 */
1227 	vsetflags(vp, VRECLAIMED);
1228 	atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
1229 	vp->v_act = 0;
1230 
1231 	if (verbose_reclaims) {
1232 		if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL)
1233 			kprintf("Debug: reclaim %p %s\n", vp, ncp->nc_name);
1234 	}
1235 
1236 	/*
1237 	 * Scrap the vfs cache
1238 	 */
1239 	while (cache_inval_vp(vp, 0) != 0) {
1240 		kprintf("Warning: vnode %p clean/cache_resolution "
1241 			"race detected\n", vp);
1242 		tsleep(vp, 0, "vclninv", 2);
1243 	}
1244 
1245 	/*
1246 	 * Check to see if the vnode is in use. If so we have to reference it
1247 	 * before we clean it out so that its count cannot fall to zero and
1248 	 * generate a race against ourselves to recycle it.
1249 	 */
1250 	active = (VREFCNT(vp) > 0);
1251 
1252 	/*
1253 	 * Clean out any buffers associated with the vnode and destroy its
1254 	 * object, if it has one.
1255 	 */
1256 	vinvalbuf(vp, V_SAVE, 0, 0);
1257 
1258 	/*
1259 	 * If purging an active vnode (typically during a forced unmount
1260 	 * or reboot), it must be closed and deactivated before being
1261 	 * reclaimed.  This isn't really all that safe, but what can
1262 	 * we do? XXX.
1263 	 *
1264 	 * Note that neither of these routines unlocks the vnode.
1265 	 */
1266 	if (active && (flags & DOCLOSE)) {
1267 		while ((n = vp->v_opencount) != 0) {
1268 			if (vp->v_writecount)
1269 				VOP_CLOSE(vp, FWRITE|FNONBLOCK, NULL);
1270 			else
1271 				VOP_CLOSE(vp, FNONBLOCK, NULL);
1272 			if (vp->v_opencount == n) {
1273 				kprintf("Warning: unable to force-close"
1274 				       " vnode %p\n", vp);
1275 				break;
1276 			}
1277 		}
1278 	}
1279 
1280 	/*
1281 	 * If the vnode has not been deactivated, deactivated it.  Deactivation
1282 	 * can create new buffers and VM pages so we have to call vinvalbuf()
1283 	 * again to make sure they all get flushed.
1284 	 *
1285 	 * This can occur if a file with a link count of 0 needs to be
1286 	 * truncated.
1287 	 *
1288 	 * If the vnode is already dead don't try to deactivate it.
1289 	 */
1290 	if ((vp->v_flag & VINACTIVE) == 0) {
1291 		vsetflags(vp, VINACTIVE);
1292 		if (vp->v_mount)
1293 			VOP_INACTIVE(vp);
1294 		vinvalbuf(vp, V_SAVE, 0, 0);
1295 	}
1296 
1297 	/*
1298 	 * If the vnode has an object, destroy it.
1299 	 */
1300 	while ((object = vp->v_object) != NULL) {
1301 		vm_object_hold(object);
1302 		if (object == vp->v_object)
1303 			break;
1304 		vm_object_drop(object);
1305 	}
1306 
1307 	if (object != NULL) {
1308 		if (object->ref_count == 0) {
1309 			if ((object->flags & OBJ_DEAD) == 0)
1310 				vm_object_terminate(object);
1311 			vm_object_drop(object);
1312 			vclrflags(vp, VOBJBUF);
1313 		} else {
1314 			vm_pager_deallocate(object);
1315 			vclrflags(vp, VOBJBUF);
1316 			vm_object_drop(object);
1317 		}
1318 	}
1319 	KKASSERT((vp->v_flag & VOBJBUF) == 0);
1320 
1321 	/*
1322 	 * Reclaim the vnode if not already dead.
1323 	 */
1324 	if (vp->v_mount && VOP_RECLAIM(vp))
1325 		panic("vclean: cannot reclaim");
1326 
1327 	/*
1328 	 * Done with purge, notify sleepers of the grim news.
1329 	 */
1330 	vp->v_ops = &dead_vnode_vops_p;
1331 	vn_gone(vp);
1332 	vp->v_tag = VT_NON;
1333 
1334 	/*
1335 	 * If we are destroying an active vnode, reactivate it now that
1336 	 * we have reassociated it with deadfs.  This prevents the system
1337 	 * from crashing on the vnode due to it being unexpectedly marked
1338 	 * as inactive or reclaimed.
1339 	 */
1340 	if (active && (flags & DOCLOSE)) {
1341 		vclrflags(vp, VINACTIVE | VRECLAIMED);
1342 	}
1343 }
1344 
1345 /*
1346  * Eliminate all activity associated with the requested vnode
1347  * and with all vnodes aliased to the requested vnode.
1348  *
1349  * The vnode must be referenced but should not be locked.
1350  */
1351 int
1352 vrevoke(struct vnode *vp, struct ucred *cred)
1353 {
1354 	struct vnode *vq;
1355 	struct vnode *vqn;
1356 	cdev_t dev;
1357 	int error;
1358 
1359 	/*
1360 	 * If the vnode has a device association, scrap all vnodes associated
1361 	 * with the device.  Don't let the device disappear on us while we
1362 	 * are scrapping the vnodes.
1363 	 *
1364 	 * The passed vp will probably show up in the list, do not VX lock
1365 	 * it twice!
1366 	 *
1367 	 * Releasing the vnode's rdev here can mess up specfs's call to
1368 	 * device close, so don't do it.  The vnode has been disassociated
1369 	 * and the device will be closed after the last ref on the related
1370 	 * fp goes away (if not still open by e.g. the kernel).
1371 	 */
1372 	if (vp->v_type != VCHR) {
1373 		error = fdrevoke(vp, DTYPE_VNODE, cred);
1374 		return (error);
1375 	}
1376 	if ((dev = vp->v_rdev) == NULL) {
1377 		return(0);
1378 	}
1379 	reference_dev(dev);
1380 	lwkt_gettoken(&spechash_token);
1381 
1382 restart:
1383 	vqn = SLIST_FIRST(&dev->si_hlist);
1384 	if (vqn)
1385 		vhold(vqn);
1386 	while ((vq = vqn) != NULL) {
1387 		if (VREFCNT(vq) > 0) {
1388 			vref(vq);
1389 			fdrevoke(vq, DTYPE_VNODE, cred);
1390 			/*v_release_rdev(vq);*/
1391 			vrele(vq);
1392 			if (vq->v_rdev != dev) {
1393 				vdrop(vq);
1394 				goto restart;
1395 			}
1396 		}
1397 		vqn = SLIST_NEXT(vq, v_cdevnext);
1398 		if (vqn)
1399 			vhold(vqn);
1400 		vdrop(vq);
1401 	}
1402 	lwkt_reltoken(&spechash_token);
1403 	dev_drevoke(dev);
1404 	release_dev(dev);
1405 	return (0);
1406 }
1407 
1408 /*
1409  * This is called when the object underlying a vnode is being destroyed,
1410  * such as in a remove().  Try to recycle the vnode immediately if the
1411  * only active reference is our reference.
1412  *
1413  * Directory vnodes in the namecache with children cannot be immediately
1414  * recycled because numerous VOP_N*() ops require them to be stable.
1415  *
1416  * To avoid recursive recycling from VOP_INACTIVE implemenetations this
1417  * function is a NOP if VRECLAIMED is already set.
1418  */
1419 int
1420 vrecycle(struct vnode *vp)
1421 {
1422 	if (VREFCNT(vp) <= 1 && (vp->v_flag & VRECLAIMED) == 0) {
1423 		if (cache_inval_vp_nonblock(vp))
1424 			return(0);
1425 		vgone_vxlocked(vp);
1426 		return (1);
1427 	}
1428 	return (0);
1429 }
1430 
1431 /*
1432  * Return the maximum I/O size allowed for strategy calls on VP.
1433  *
1434  * If vp is VCHR or VBLK we dive the device, otherwise we use
1435  * the vp's mount info.
1436  *
1437  * The returned value is clamped at MAXPHYS as most callers cannot use
1438  * buffers larger than that size.
1439  */
1440 int
1441 vmaxiosize(struct vnode *vp)
1442 {
1443 	int maxiosize;
1444 
1445 	if (vp->v_type == VBLK || vp->v_type == VCHR)
1446 		maxiosize = vp->v_rdev->si_iosize_max;
1447 	else
1448 		maxiosize = vp->v_mount->mnt_iosize_max;
1449 
1450 	if (maxiosize > MAXPHYS)
1451 		maxiosize = MAXPHYS;
1452 	return (maxiosize);
1453 }
1454 
1455 /*
1456  * Eliminate all activity associated with a vnode in preparation for
1457  * destruction.
1458  *
1459  * The vnode must be VX locked and refd and will remain VX locked and refd
1460  * on return.  This routine may be called with the vnode in any state, as
1461  * long as it is VX locked.  The vnode will be cleaned out and marked
1462  * VRECLAIMED but will not actually be reused until all existing refs and
1463  * holds go away.
1464  *
1465  * NOTE: This routine may be called on a vnode which has not yet been
1466  * already been deactivated (VOP_INACTIVE), or on a vnode which has
1467  * already been reclaimed.
1468  *
1469  * This routine is not responsible for placing us back on the freelist.
1470  * Instead, it happens automatically when the caller releases the VX lock
1471  * (assuming there aren't any other references).
1472  */
1473 void
1474 vgone_vxlocked(struct vnode *vp)
1475 {
1476 	/*
1477 	 * assert that the VX lock is held.  This is an absolute requirement
1478 	 * now for vgone_vxlocked() to be called.
1479 	 */
1480 	KKASSERT(lockinuse(&vp->v_lock));
1481 
1482 	/*
1483 	 * Clean out the filesystem specific data and set the VRECLAIMED
1484 	 * bit.  Also deactivate the vnode if necessary.
1485 	 *
1486 	 * The vnode should have automatically been removed from the syncer
1487 	 * list as syncer/dirty flags cleared during the cleaning.
1488 	 */
1489 	vclean_vxlocked(vp, DOCLOSE);
1490 
1491 	/*
1492 	 * Normally panic if the vnode is still dirty, unless we are doing
1493 	 * a forced unmount (tmpfs typically).
1494 	 */
1495 	if (vp->v_flag & VONWORKLST) {
1496 		if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
1497 			/* force removal */
1498 			vn_syncer_remove(vp, 1);
1499 		} else {
1500 			panic("vp %p still dirty in vgone after flush", vp);
1501 		}
1502 	}
1503 
1504 	/*
1505 	 * Delete from old mount point vnode list, if on one.
1506 	 */
1507 	if (vp->v_mount != NULL) {
1508 		KKASSERT(vp->v_data == NULL);
1509 		insmntque(vp, NULL);
1510 	}
1511 
1512 	/*
1513 	 * If special device, remove it from special device alias list
1514 	 * if it is on one.  This should normally only occur if a vnode is
1515 	 * being revoked as the device should otherwise have been released
1516 	 * naturally.
1517 	 */
1518 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) {
1519 		v_release_rdev(vp);
1520 	}
1521 
1522 	/*
1523 	 * Set us to VBAD
1524 	 */
1525 	vp->v_type = VBAD;
1526 }
1527 
1528 /*
1529  * Lookup a vnode by device number.
1530  *
1531  * Returns non-zero and *vpp set to a vref'd vnode on success.
1532  * Returns zero on failure.
1533  */
1534 int
1535 vfinddev(cdev_t dev, enum vtype type, struct vnode **vpp)
1536 {
1537 	struct vnode *vp;
1538 
1539 	lwkt_gettoken(&spechash_token);
1540 	SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) {
1541 		if (type == vp->v_type) {
1542 			*vpp = vp;
1543 			vref(vp);
1544 			lwkt_reltoken(&spechash_token);
1545 			return (1);
1546 		}
1547 	}
1548 	lwkt_reltoken(&spechash_token);
1549 	return (0);
1550 }
1551 
1552 /*
1553  * Calculate the total number of references to a special device.  This
1554  * routine may only be called for VBLK and VCHR vnodes since v_rdev is
1555  * an overloaded field.  Since udev2dev can now return NULL, we have
1556  * to check for a NULL v_rdev.
1557  */
1558 int
1559 count_dev(cdev_t dev)
1560 {
1561 	struct vnode *vp;
1562 	int count = 0;
1563 
1564 	if (SLIST_FIRST(&dev->si_hlist)) {
1565 		lwkt_gettoken(&spechash_token);
1566 		SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) {
1567 			count += vp->v_opencount;
1568 		}
1569 		lwkt_reltoken(&spechash_token);
1570 	}
1571 	return(count);
1572 }
1573 
1574 int
1575 vcount(struct vnode *vp)
1576 {
1577 	if (vp->v_rdev == NULL)
1578 		return(0);
1579 	return(count_dev(vp->v_rdev));
1580 }
1581 
1582 /*
1583  * Initialize VMIO for a vnode.  This routine MUST be called before a
1584  * VFS can issue buffer cache ops on a vnode.  It is typically called
1585  * when a vnode is initialized from its inode.
1586  */
1587 int
1588 vinitvmio(struct vnode *vp, off_t filesize, int blksize, int boff)
1589 {
1590 	vm_object_t object;
1591 	int error = 0;
1592 
1593 	object = vp->v_object;
1594 	if (object) {
1595 		vm_object_hold(object);
1596 		KKASSERT(vp->v_object == object);
1597 	}
1598 
1599 	if (object == NULL) {
1600 		object = vnode_pager_alloc(vp, filesize, 0, 0, blksize, boff);
1601 
1602 		/*
1603 		 * Dereference the reference we just created.  This assumes
1604 		 * that the object is associated with the vp.  Allow it to
1605 		 * have zero refs.  It cannot be destroyed as long as it
1606 		 * is associated with the vnode.
1607 		 */
1608 		vm_object_hold(object);
1609 		atomic_add_int(&object->ref_count, -1);
1610 		vrele(vp);
1611 	} else {
1612 		KKASSERT((object->flags & OBJ_DEAD) == 0);
1613 	}
1614 	KASSERT(vp->v_object != NULL, ("vinitvmio: NULL object"));
1615 	vsetflags(vp, VOBJBUF);
1616 	vm_object_drop(object);
1617 
1618 	return (error);
1619 }
1620 
1621 
1622 /*
1623  * Print out a description of a vnode.
1624  */
1625 static char *typename[] =
1626 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
1627 
1628 void
1629 vprint(char *label, struct vnode *vp)
1630 {
1631 	char buf[96];
1632 
1633 	if (label != NULL)
1634 		kprintf("%s: %p: ", label, (void *)vp);
1635 	else
1636 		kprintf("%p: ", (void *)vp);
1637 	kprintf("type %s, refcnt %08x, writecount %d, holdcnt %d,",
1638 		typename[vp->v_type],
1639 		vp->v_refcnt, vp->v_writecount, vp->v_auxrefs);
1640 	buf[0] = '\0';
1641 	if (vp->v_flag & VROOT)
1642 		strcat(buf, "|VROOT");
1643 	if (vp->v_flag & VPFSROOT)
1644 		strcat(buf, "|VPFSROOT");
1645 	if (vp->v_flag & VTEXT)
1646 		strcat(buf, "|VTEXT");
1647 	if (vp->v_flag & VSYSTEM)
1648 		strcat(buf, "|VSYSTEM");
1649 	if (vp->v_flag & VOBJBUF)
1650 		strcat(buf, "|VOBJBUF");
1651 	if (buf[0] != '\0')
1652 		kprintf(" flags (%s)", &buf[1]);
1653 	if (vp->v_data == NULL) {
1654 		kprintf("\n");
1655 	} else {
1656 		kprintf("\n\t");
1657 		VOP_PRINT(vp);
1658 	}
1659 }
1660 
1661 /*
1662  * Do the usual access checking.
1663  * file_mode, uid and gid are from the vnode in question,
1664  * while acc_mode and cred are from the VOP_ACCESS parameter list
1665  */
1666 int
1667 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1668     mode_t acc_mode, struct ucred *cred)
1669 {
1670 	mode_t mask;
1671 	int ismember;
1672 
1673 	/*
1674 	 * Super-user always gets read/write access, but execute access depends
1675 	 * on at least one execute bit being set.
1676 	 */
1677 	if (priv_check_cred(cred, PRIV_ROOT, 0) == 0) {
1678 		if ((acc_mode & VEXEC) && type != VDIR &&
1679 		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1680 			return (EACCES);
1681 		return (0);
1682 	}
1683 
1684 	mask = 0;
1685 
1686 	/* Otherwise, check the owner. */
1687 	if (cred->cr_uid == uid) {
1688 		if (acc_mode & VEXEC)
1689 			mask |= S_IXUSR;
1690 		if (acc_mode & VREAD)
1691 			mask |= S_IRUSR;
1692 		if (acc_mode & VWRITE)
1693 			mask |= S_IWUSR;
1694 		return ((file_mode & mask) == mask ? 0 : EACCES);
1695 	}
1696 
1697 	/* Otherwise, check the groups. */
1698 	ismember = groupmember(gid, cred);
1699 	if (cred->cr_svgid == gid || ismember) {
1700 		if (acc_mode & VEXEC)
1701 			mask |= S_IXGRP;
1702 		if (acc_mode & VREAD)
1703 			mask |= S_IRGRP;
1704 		if (acc_mode & VWRITE)
1705 			mask |= S_IWGRP;
1706 		return ((file_mode & mask) == mask ? 0 : EACCES);
1707 	}
1708 
1709 	/* Otherwise, check everyone else. */
1710 	if (acc_mode & VEXEC)
1711 		mask |= S_IXOTH;
1712 	if (acc_mode & VREAD)
1713 		mask |= S_IROTH;
1714 	if (acc_mode & VWRITE)
1715 		mask |= S_IWOTH;
1716 	return ((file_mode & mask) == mask ? 0 : EACCES);
1717 }
1718 
1719 #ifdef DDB
1720 #include <ddb/ddb.h>
1721 
1722 static int db_show_locked_vnodes(struct mount *mp, void *data);
1723 
1724 /*
1725  * List all of the locked vnodes in the system.
1726  * Called when debugging the kernel.
1727  */
1728 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
1729 {
1730 	kprintf("Locked vnodes\n");
1731 	mountlist_scan(db_show_locked_vnodes, NULL,
1732 			MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
1733 }
1734 
1735 static int
1736 db_show_locked_vnodes(struct mount *mp, void *data __unused)
1737 {
1738 	struct vnode *vp;
1739 
1740 	TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
1741 		if (vn_islocked(vp))
1742 			vprint(NULL, vp);
1743 	}
1744 	return(0);
1745 }
1746 #endif
1747 
1748 /*
1749  * Top level filesystem related information gathering.
1750  */
1751 static int	sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS);
1752 
1753 static int
1754 vfs_sysctl(SYSCTL_HANDLER_ARGS)
1755 {
1756 	int *name = (int *)arg1 - 1;	/* XXX */
1757 	u_int namelen = arg2 + 1;	/* XXX */
1758 	struct vfsconf *vfsp;
1759 	int maxtypenum;
1760 
1761 #if 1 || defined(COMPAT_PRELITE2)
1762 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
1763 	if (namelen == 1)
1764 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
1765 #endif
1766 
1767 #ifdef notyet
1768 	/* all sysctl names at this level are at least name and field */
1769 	if (namelen < 2)
1770 		return (ENOTDIR);		/* overloaded */
1771 	if (name[0] != VFS_GENERIC) {
1772 		vfsp = vfsconf_find_by_typenum(name[0]);
1773 		if (vfsp == NULL)
1774 			return (EOPNOTSUPP);
1775 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1776 		    oldp, oldlenp, newp, newlen, p));
1777 	}
1778 #endif
1779 	switch (name[1]) {
1780 	case VFS_MAXTYPENUM:
1781 		if (namelen != 2)
1782 			return (ENOTDIR);
1783 		maxtypenum = vfsconf_get_maxtypenum();
1784 		return (SYSCTL_OUT(req, &maxtypenum, sizeof(maxtypenum)));
1785 	case VFS_CONF:
1786 		if (namelen != 3)
1787 			return (ENOTDIR);	/* overloaded */
1788 		vfsp = vfsconf_find_by_typenum(name[2]);
1789 		if (vfsp == NULL)
1790 			return (EOPNOTSUPP);
1791 		return (SYSCTL_OUT(req, vfsp, sizeof *vfsp));
1792 	}
1793 	return (EOPNOTSUPP);
1794 }
1795 
1796 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl,
1797 	"Generic filesystem");
1798 
1799 #if 1 || defined(COMPAT_PRELITE2)
1800 
1801 static int
1802 sysctl_ovfs_conf_iter(struct vfsconf *vfsp, void *data)
1803 {
1804 	int error;
1805 	struct ovfsconf ovfs;
1806 	struct sysctl_req *req = (struct sysctl_req*) data;
1807 
1808 	bzero(&ovfs, sizeof(ovfs));
1809 	ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
1810 	strcpy(ovfs.vfc_name, vfsp->vfc_name);
1811 	ovfs.vfc_index = vfsp->vfc_typenum;
1812 	ovfs.vfc_refcount = vfsp->vfc_refcount;
1813 	ovfs.vfc_flags = vfsp->vfc_flags;
1814 	error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
1815 	if (error)
1816 		return error; /* abort iteration with error code */
1817 	else
1818 		return 0; /* continue iterating with next element */
1819 }
1820 
1821 static int
1822 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
1823 {
1824 	return vfsconf_each(sysctl_ovfs_conf_iter, (void*)req);
1825 }
1826 
1827 #endif /* 1 || COMPAT_PRELITE2 */
1828 
1829 /*
1830  * Check to see if a filesystem is mounted on a block device.
1831  */
1832 int
1833 vfs_mountedon(struct vnode *vp)
1834 {
1835 	cdev_t dev;
1836 
1837 	if ((dev = vp->v_rdev) == NULL) {
1838 /*		if (vp->v_type != VBLK)
1839 			dev = get_dev(vp->v_uminor, vp->v_umajor); */
1840 	}
1841 	if (dev != NULL && dev->si_mountpoint)
1842 		return (EBUSY);
1843 	return (0);
1844 }
1845 
1846 /*
1847  * Unmount all filesystems. The list is traversed in reverse order
1848  * of mounting to avoid dependencies.
1849  *
1850  * We want the umountall to be able to break out of its loop if a
1851  * failure occurs, after scanning all possible mounts, so the callback
1852  * returns 0 on error.
1853  *
1854  * NOTE: Do not call mountlist_remove(mp) on error any more, this will
1855  *	 confuse mountlist_scan()'s unbusy check.
1856  */
1857 static int vfs_umountall_callback(struct mount *mp, void *data);
1858 
1859 void
1860 vfs_unmountall(void)
1861 {
1862 	int count;
1863 
1864 	do {
1865 		count = mountlist_scan(vfs_umountall_callback,
1866 					NULL, MNTSCAN_REVERSE|MNTSCAN_NOBUSY);
1867 	} while (count);
1868 }
1869 
1870 static
1871 int
1872 vfs_umountall_callback(struct mount *mp, void *data)
1873 {
1874 	int error;
1875 
1876 	error = dounmount(mp, MNT_FORCE);
1877 	if (error) {
1878 		kprintf("unmount of filesystem mounted from %s failed (",
1879 			mp->mnt_stat.f_mntfromname);
1880 		if (error == EBUSY)
1881 			kprintf("BUSY)\n");
1882 		else
1883 			kprintf("%d)\n", error);
1884 		return 0;
1885 	} else {
1886 		return 1;
1887 	}
1888 }
1889 
1890 /*
1891  * Checks the mount flags for parameter mp and put the names comma-separated
1892  * into a string buffer buf with a size limit specified by len.
1893  *
1894  * It returns the number of bytes written into buf, and (*errorp) will be
1895  * set to 0, EINVAL (if passed length is 0), or ENOSPC (supplied buffer was
1896  * not large enough).  The buffer will be 0-terminated if len was not 0.
1897  */
1898 size_t
1899 vfs_flagstostr(int flags, const struct mountctl_opt *optp,
1900 	       char *buf, size_t len, int *errorp)
1901 {
1902 	static const struct mountctl_opt optnames[] = {
1903 		{ MNT_RDONLY,           "read-only" },
1904 		{ MNT_SYNCHRONOUS,      "synchronous" },
1905 		{ MNT_NOEXEC,           "noexec" },
1906 		{ MNT_NOSUID,           "nosuid" },
1907 		{ MNT_NODEV,            "nodev" },
1908 		{ MNT_AUTOMOUNTED,      "automounted" },
1909 		{ MNT_ASYNC,            "asynchronous" },
1910 		{ MNT_SUIDDIR,          "suiddir" },
1911 		{ MNT_SOFTDEP,          "soft-updates" },
1912 		{ MNT_NOSYMFOLLOW,      "nosymfollow" },
1913 		{ MNT_TRIM,             "trim" },
1914 		{ MNT_NOATIME,          "noatime" },
1915 		{ MNT_NOCLUSTERR,       "noclusterr" },
1916 		{ MNT_NOCLUSTERW,       "noclusterw" },
1917 		{ MNT_EXRDONLY,         "NFS read-only" },
1918 		{ MNT_EXPORTED,         "NFS exported" },
1919 		/* Remaining NFS flags could come here */
1920 		{ MNT_LOCAL,            "local" },
1921 		{ MNT_QUOTA,            "with-quotas" },
1922 		/* { MNT_ROOTFS,           "rootfs" }, */
1923 		/* { MNT_IGNORE,           "ignore" }, */
1924 		{ 0,			NULL}
1925 	};
1926 	int bwritten;
1927 	int bleft;
1928 	int optlen;
1929 	int actsize;
1930 
1931 	*errorp = 0;
1932 	bwritten = 0;
1933 	bleft = len - 1;	/* leave room for trailing \0 */
1934 
1935 	/*
1936 	 * Checks the size of the string. If it contains
1937 	 * any data, then we will append the new flags to
1938 	 * it.
1939 	 */
1940 	actsize = strlen(buf);
1941 	if (actsize > 0)
1942 		buf += actsize;
1943 
1944 	/* Default flags if no flags passed */
1945 	if (optp == NULL)
1946 		optp = optnames;
1947 
1948 	if (bleft < 0) {	/* degenerate case, 0-length buffer */
1949 		*errorp = EINVAL;
1950 		return(0);
1951 	}
1952 
1953 	for (; flags && optp->o_opt; ++optp) {
1954 		if ((flags & optp->o_opt) == 0)
1955 			continue;
1956 		optlen = strlen(optp->o_name);
1957 		if (bwritten || actsize > 0) {
1958 			if (bleft < 2) {
1959 				*errorp = ENOSPC;
1960 				break;
1961 			}
1962 			buf[bwritten++] = ',';
1963 			buf[bwritten++] = ' ';
1964 			bleft -= 2;
1965 		}
1966 		if (bleft < optlen) {
1967 			*errorp = ENOSPC;
1968 			break;
1969 		}
1970 		bcopy(optp->o_name, buf + bwritten, optlen);
1971 		bwritten += optlen;
1972 		bleft -= optlen;
1973 		flags &= ~optp->o_opt;
1974 	}
1975 
1976 	/*
1977 	 * Space already reserved for trailing \0
1978 	 */
1979 	buf[bwritten] = 0;
1980 	return (bwritten);
1981 }
1982 
1983 /*
1984  * Build hash lists of net addresses and hang them off the mount point.
1985  * Called by ufs_mount() to set up the lists of export addresses.
1986  */
1987 static int
1988 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1989 		const struct export_args *argp)
1990 {
1991 	struct netcred *np;
1992 	struct radix_node_head *rnh;
1993 	int i;
1994 	struct radix_node *rn;
1995 	struct sockaddr *saddr, *smask = NULL;
1996 	int error;
1997 
1998 	if (argp->ex_addrlen == 0) {
1999 		if (mp->mnt_flag & MNT_DEFEXPORTED)
2000 			return (EPERM);
2001 		np = &nep->ne_defexported;
2002 		np->netc_exflags = argp->ex_flags;
2003 		np->netc_anon = argp->ex_anon;
2004 		np->netc_anon.cr_ref = 1;
2005 		mp->mnt_flag |= MNT_DEFEXPORTED;
2006 		return (0);
2007 	}
2008 
2009 	if (argp->ex_addrlen < 0 || argp->ex_addrlen > MLEN)
2010 		return (EINVAL);
2011 	if (argp->ex_masklen < 0 || argp->ex_masklen > MLEN)
2012 		return (EINVAL);
2013 
2014 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
2015 	np = (struct netcred *)kmalloc(i, M_NETCRED, M_WAITOK | M_ZERO);
2016 	saddr = (struct sockaddr *) (np + 1);
2017 	if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen)))
2018 		goto out;
2019 	if (saddr->sa_len > argp->ex_addrlen)
2020 		saddr->sa_len = argp->ex_addrlen;
2021 	if (argp->ex_masklen) {
2022 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
2023 		error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen);
2024 		if (error)
2025 			goto out;
2026 		if (smask->sa_len > argp->ex_masklen)
2027 			smask->sa_len = argp->ex_masklen;
2028 	}
2029 	NE_LOCK(nep);
2030 	if (nep->ne_maskhead == NULL) {
2031 		if (!rn_inithead((void **)&nep->ne_maskhead, NULL, 0)) {
2032 			error = ENOBUFS;
2033 			goto out;
2034 		}
2035 	}
2036 	if ((rnh = vfs_create_addrlist_af(saddr->sa_family, nep)) == NULL) {
2037 		error = ENOBUFS;
2038 		goto out;
2039 	}
2040 	rn = (*rnh->rnh_addaddr)((char *)saddr, (char *)smask, rnh,
2041 				 np->netc_rnodes);
2042 	NE_UNLOCK(nep);
2043 	if (rn == NULL || np != (struct netcred *)rn) {	/* already exists */
2044 		error = EPERM;
2045 		goto out;
2046 	}
2047 	np->netc_exflags = argp->ex_flags;
2048 	np->netc_anon = argp->ex_anon;
2049 	np->netc_anon.cr_ref = 1;
2050 	return (0);
2051 
2052 out:
2053 	kfree(np, M_NETCRED);
2054 	return (error);
2055 }
2056 
2057 /*
2058  * Free netcred structures installed in the netexport
2059  */
2060 static int
2061 vfs_free_netcred(struct radix_node *rn, void *w)
2062 {
2063 	struct radix_node_head *rnh = (struct radix_node_head *)w;
2064 
2065 	(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
2066 	kfree(rn, M_NETCRED);
2067 
2068 	return (0);
2069 }
2070 
2071 /*
2072  * callback to free an element of the mask table installed in the
2073  * netexport.  These may be created indirectly and are not netcred
2074  * structures.
2075  */
2076 static int
2077 vfs_free_netcred_mask(struct radix_node *rn, void *w)
2078 {
2079 	struct radix_node_head *rnh = (struct radix_node_head *)w;
2080 
2081 	(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
2082 	kfree(rn, M_RTABLE);
2083 
2084 	return (0);
2085 }
2086 
2087 static struct radix_node_head *
2088 vfs_create_addrlist_af(int af, struct netexport *nep)
2089 {
2090 	struct radix_node_head *rnh = NULL;
2091 #if defined(INET) || defined(INET6)
2092 	struct radix_node_head *maskhead = nep->ne_maskhead;
2093 	int off;
2094 #endif
2095 
2096 	NE_ASSERT_LOCKED(nep);
2097 	KKASSERT(maskhead != NULL);
2098 	switch (af) {
2099 #ifdef INET
2100 	case AF_INET:
2101 		if ((rnh = nep->ne_inethead) == NULL) {
2102 			off = offsetof(struct sockaddr_in, sin_addr) << 3;
2103 			if (!rn_inithead((void **)&rnh, maskhead, off))
2104 				return (NULL);
2105 			nep->ne_inethead = rnh;
2106 		}
2107 		break;
2108 #endif
2109 #ifdef INET6
2110 	case AF_INET6:
2111 		if ((rnh = nep->ne_inet6head) == NULL) {
2112 			off = offsetof(struct sockaddr_in6, sin6_addr) << 3;
2113 			if (!rn_inithead((void **)&rnh, maskhead, off))
2114 				return (NULL);
2115 			nep->ne_inet6head = rnh;
2116 		}
2117 		break;
2118 #endif
2119 	}
2120 	return (rnh);
2121 }
2122 
2123 /*
2124  * helper function for freeing netcred elements
2125  */
2126 static void
2127 vfs_free_addrlist_af(struct radix_node_head **prnh)
2128 {
2129 	struct radix_node_head *rnh = *prnh;
2130 
2131 	(*rnh->rnh_walktree) (rnh, vfs_free_netcred, rnh);
2132 	kfree(rnh, M_RTABLE);
2133 	*prnh = NULL;
2134 }
2135 
2136 /*
2137  * helper function for freeing mask elements
2138  */
2139 static void
2140 vfs_free_addrlist_masks(struct radix_node_head **prnh)
2141 {
2142 	struct radix_node_head *rnh = *prnh;
2143 
2144 	(*rnh->rnh_walktree) (rnh, vfs_free_netcred_mask, rnh);
2145 	kfree(rnh, M_RTABLE);
2146 	*prnh = NULL;
2147 }
2148 
2149 /*
2150  * Free the net address hash lists that are hanging off the mount points.
2151  */
2152 static void
2153 vfs_free_addrlist(struct netexport *nep)
2154 {
2155 	NE_LOCK(nep);
2156 	if (nep->ne_inethead != NULL)
2157 		vfs_free_addrlist_af(&nep->ne_inethead);
2158 	if (nep->ne_inet6head != NULL)
2159 		vfs_free_addrlist_af(&nep->ne_inet6head);
2160 	if (nep->ne_maskhead)
2161 		vfs_free_addrlist_masks(&nep->ne_maskhead);
2162 	NE_UNLOCK(nep);
2163 }
2164 
2165 int
2166 vfs_export(struct mount *mp, struct netexport *nep,
2167 	   const struct export_args *argp)
2168 {
2169 	int error;
2170 
2171 	if (argp->ex_flags & MNT_DELEXPORT) {
2172 		if (mp->mnt_flag & MNT_EXPUBLIC) {
2173 			vfs_setpublicfs(NULL, NULL, NULL);
2174 			mp->mnt_flag &= ~MNT_EXPUBLIC;
2175 		}
2176 		vfs_free_addrlist(nep);
2177 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
2178 	}
2179 	if (argp->ex_flags & MNT_EXPORTED) {
2180 		if (argp->ex_flags & MNT_EXPUBLIC) {
2181 			if ((error = vfs_setpublicfs(mp, nep, argp)) != 0)
2182 				return (error);
2183 			mp->mnt_flag |= MNT_EXPUBLIC;
2184 		}
2185 		if ((error = vfs_hang_addrlist(mp, nep, argp)))
2186 			return (error);
2187 		mp->mnt_flag |= MNT_EXPORTED;
2188 	}
2189 	return (0);
2190 }
2191 
2192 
2193 /*
2194  * Set the publicly exported filesystem (WebNFS). Currently, only
2195  * one public filesystem is possible in the spec (RFC 2054 and 2055)
2196  */
2197 int
2198 vfs_setpublicfs(struct mount *mp, struct netexport *nep,
2199 		const struct export_args *argp)
2200 {
2201 	int error;
2202 	struct vnode *rvp;
2203 	char *cp;
2204 
2205 	/*
2206 	 * mp == NULL -> invalidate the current info, the FS is
2207 	 * no longer exported. May be called from either vfs_export
2208 	 * or unmount, so check if it hasn't already been done.
2209 	 */
2210 	if (mp == NULL) {
2211 		if (nfs_pub.np_valid) {
2212 			nfs_pub.np_valid = 0;
2213 			if (nfs_pub.np_index != NULL) {
2214 				kfree(nfs_pub.np_index, M_TEMP);
2215 				nfs_pub.np_index = NULL;
2216 			}
2217 		}
2218 		return (0);
2219 	}
2220 
2221 	/*
2222 	 * Only one allowed at a time.
2223 	 */
2224 	if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount)
2225 		return (EBUSY);
2226 
2227 	/*
2228 	 * Get real filehandle for root of exported FS.
2229 	 */
2230 	bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle));
2231 	nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid;
2232 
2233 	if ((error = VFS_ROOT(mp, &rvp)))
2234 		return (error);
2235 
2236 	if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid)))
2237 		return (error);
2238 
2239 	vput(rvp);
2240 
2241 	/*
2242 	 * If an indexfile was specified, pull it in.
2243 	 */
2244 	if (argp->ex_indexfile != NULL) {
2245 		int namelen;
2246 
2247 		error = vn_get_namelen(rvp, &namelen);
2248 		if (error)
2249 			return (error);
2250 		nfs_pub.np_index = kmalloc(namelen, M_TEMP, M_WAITOK);
2251 		error = copyinstr(argp->ex_indexfile, nfs_pub.np_index,
2252 		    namelen, NULL);
2253 		if (!error) {
2254 			/*
2255 			 * Check for illegal filenames.
2256 			 */
2257 			for (cp = nfs_pub.np_index; *cp; cp++) {
2258 				if (*cp == '/') {
2259 					error = EINVAL;
2260 					break;
2261 				}
2262 			}
2263 		}
2264 		if (error) {
2265 			kfree(nfs_pub.np_index, M_TEMP);
2266 			return (error);
2267 		}
2268 	}
2269 
2270 	nfs_pub.np_mount = mp;
2271 	nfs_pub.np_valid = 1;
2272 	return (0);
2273 }
2274 
2275 struct netcred *
2276 vfs_export_lookup(struct mount *mp, struct netexport *nep,
2277 		struct sockaddr *nam)
2278 {
2279 	struct netcred *np;
2280 	struct radix_node_head *rnh;
2281 	struct sockaddr *saddr;
2282 
2283 	np = NULL;
2284 	if (mp->mnt_flag & MNT_EXPORTED) {
2285 		/*
2286 		 * Lookup in the export list first.
2287 		 */
2288 		NE_LOCK(nep);
2289 		if (nam != NULL) {
2290 			saddr = nam;
2291 			switch (saddr->sa_family) {
2292 #ifdef INET
2293 			case AF_INET:
2294 				rnh = nep->ne_inethead;
2295 				break;
2296 #endif
2297 #ifdef INET6
2298 			case AF_INET6:
2299 				rnh = nep->ne_inet6head;
2300 				break;
2301 #endif
2302 			default:
2303 				rnh = NULL;
2304 			}
2305 			if (rnh != NULL) {
2306 				np = (struct netcred *)
2307 					(*rnh->rnh_matchaddr)((char *)saddr,
2308 							      rnh);
2309 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
2310 					np = NULL;
2311 			}
2312 		}
2313 		NE_UNLOCK(nep);
2314 		/*
2315 		 * If no address match, use the default if it exists.
2316 		 */
2317 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
2318 			np = &nep->ne_defexported;
2319 	}
2320 	return (np);
2321 }
2322 
2323 /*
2324  * perform msync on all vnodes under a mount point.  The mount point must
2325  * be locked.  This code is also responsible for lazy-freeing unreferenced
2326  * vnodes whos VM objects no longer contain pages.
2327  *
2328  * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state.
2329  *
2330  * NOTE: XXX VOP_PUTPAGES and friends requires that the vnode be locked,
2331  * but vnode_pager_putpages() doesn't lock the vnode.  We have to do it
2332  * way up in this high level function.
2333  */
2334 static int vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data);
2335 static int vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data);
2336 
2337 void
2338 vfs_msync(struct mount *mp, int flags)
2339 {
2340 	int vmsc_flags;
2341 
2342 	/*
2343 	 * tmpfs sets this flag to prevent msync(), sync, and the
2344 	 * filesystem periodic syncer from trying to flush VM pages
2345 	 * to swap.  Only pure memory pressure flushes tmpfs VM pages
2346 	 * to swap.
2347 	 */
2348 	if (mp->mnt_kern_flag & MNTK_NOMSYNC)
2349 		return;
2350 
2351 	/*
2352 	 * Ok, scan the vnodes for work.  If the filesystem is using the
2353 	 * syncer thread feature we can use vsyncscan() instead of
2354 	 * vmntvnodescan(), which is much faster.
2355 	 */
2356 	vmsc_flags = VMSC_GETVP;
2357 	if (flags != MNT_WAIT)
2358 		vmsc_flags |= VMSC_NOWAIT;
2359 
2360 	if (mp->mnt_kern_flag & MNTK_THR_SYNC) {
2361 		vsyncscan(mp, vmsc_flags, vfs_msync_scan2,
2362 			  (void *)(intptr_t)flags);
2363 	} else {
2364 		vmntvnodescan(mp, vmsc_flags,
2365 			      vfs_msync_scan1, vfs_msync_scan2,
2366 			      (void *)(intptr_t)flags);
2367 	}
2368 }
2369 
2370 /*
2371  * scan1 is a fast pre-check.  There could be hundreds of thousands of
2372  * vnodes, we cannot afford to do anything heavy weight until we have a
2373  * fairly good indication that there is work to do.
2374  */
2375 static
2376 int
2377 vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data)
2378 {
2379 	int flags = (int)(intptr_t)data;
2380 
2381 	if ((vp->v_flag & VRECLAIMED) == 0) {
2382 		if (vp->v_auxrefs == 0 && VREFCNT(vp) <= 0 &&
2383 		    vp->v_object) {
2384 			return(0);	/* call scan2 */
2385 		}
2386 		if ((mp->mnt_flag & MNT_RDONLY) == 0 &&
2387 		    (vp->v_flag & VOBJDIRTY) &&
2388 		    (flags == MNT_WAIT || vn_islocked(vp) == 0)) {
2389 			return(0);	/* call scan2 */
2390 		}
2391 	}
2392 
2393 	/*
2394 	 * do not call scan2, continue the loop
2395 	 */
2396 	return(-1);
2397 }
2398 
2399 /*
2400  * This callback is handed a locked vnode.
2401  */
2402 static
2403 int
2404 vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data)
2405 {
2406 	vm_object_t obj;
2407 	int flags = (int)(intptr_t)data;
2408 
2409 	if (vp->v_flag & VRECLAIMED)
2410 		return(0);
2411 
2412 	if ((mp->mnt_flag & MNT_RDONLY) == 0 && (vp->v_flag & VOBJDIRTY)) {
2413 		if ((obj = vp->v_object) != NULL) {
2414 			vm_object_page_clean(obj, 0, 0,
2415 			 flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC);
2416 		}
2417 	}
2418 	return(0);
2419 }
2420 
2421 /*
2422  * Wake up anyone interested in vp because it is being revoked.
2423  */
2424 void
2425 vn_gone(struct vnode *vp)
2426 {
2427 	lwkt_gettoken(&vp->v_token);
2428 	KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, NOTE_REVOKE);
2429 	lwkt_reltoken(&vp->v_token);
2430 }
2431 
2432 /*
2433  * extract the cdev_t from a VBLK or VCHR.  The vnode must have been opened
2434  * (or v_rdev might be NULL).
2435  */
2436 cdev_t
2437 vn_todev(struct vnode *vp)
2438 {
2439 	if (vp->v_type != VBLK && vp->v_type != VCHR)
2440 		return (NULL);
2441 	KKASSERT(vp->v_rdev != NULL);
2442 	return (vp->v_rdev);
2443 }
2444 
2445 /*
2446  * Check if vnode represents a disk device.  The vnode does not need to be
2447  * opened.
2448  *
2449  * MPALMOSTSAFE
2450  */
2451 int
2452 vn_isdisk(struct vnode *vp, int *errp)
2453 {
2454 	cdev_t dev;
2455 
2456 	if (vp->v_type != VCHR) {
2457 		if (errp != NULL)
2458 			*errp = ENOTBLK;
2459 		return (0);
2460 	}
2461 
2462 	dev = vp->v_rdev;
2463 
2464 	if (dev == NULL) {
2465 		if (errp != NULL)
2466 			*errp = ENXIO;
2467 		return (0);
2468 	}
2469 	if (dev_is_good(dev) == 0) {
2470 		if (errp != NULL)
2471 			*errp = ENXIO;
2472 		return (0);
2473 	}
2474 	if ((dev_dflags(dev) & D_DISK) == 0) {
2475 		if (errp != NULL)
2476 			*errp = ENOTBLK;
2477 		return (0);
2478 	}
2479 	if (errp != NULL)
2480 		*errp = 0;
2481 	return (1);
2482 }
2483 
2484 int
2485 vn_get_namelen(struct vnode *vp, int *namelen)
2486 {
2487 	int error;
2488 	register_t retval[2];
2489 
2490 	error = VOP_PATHCONF(vp, _PC_NAME_MAX, retval);
2491 	if (error)
2492 		return (error);
2493 	*namelen = (int)retval[0];
2494 	return (0);
2495 }
2496 
2497 int
2498 vop_write_dirent(int *error, struct uio *uio, ino_t d_ino, uint8_t d_type,
2499 		uint16_t d_namlen, const char *d_name)
2500 {
2501 	struct dirent *dp;
2502 	size_t len;
2503 
2504 	len = _DIRENT_RECLEN(d_namlen);
2505 	if (len > uio->uio_resid)
2506 		return(1);
2507 
2508 	dp = kmalloc(len, M_TEMP, M_WAITOK | M_ZERO);
2509 
2510 	dp->d_ino = d_ino;
2511 	dp->d_namlen = d_namlen;
2512 	dp->d_type = d_type;
2513 	bcopy(d_name, dp->d_name, d_namlen);
2514 
2515 	*error = uiomove((caddr_t)dp, len, uio);
2516 
2517 	kfree(dp, M_TEMP);
2518 
2519 	return(0);
2520 }
2521 
2522 void
2523 vn_mark_atime(struct vnode *vp, struct thread *td)
2524 {
2525 	struct proc *p = td->td_proc;
2526 	struct ucred *cred = p ? p->p_ucred : proc0.p_ucred;
2527 
2528 	if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) {
2529 		VOP_MARKATIME(vp, cred);
2530 	}
2531 }
2532 
2533 /*
2534  * Calculate the number of entries in an inode-related chained hash table.
2535  * With today's memory sizes, maxvnodes can wind up being a very large
2536  * number.  There is no reason to waste memory, so tolerate some stacking.
2537  */
2538 int
2539 vfs_inodehashsize(void)
2540 {
2541 	int hsize;
2542 
2543 	hsize = 32;
2544 	while (hsize < maxvnodes)
2545 		hsize <<= 1;
2546 	while (hsize > maxvnodes * 2)
2547 		hsize >>= 1;		/* nominal 2x stacking */
2548 
2549 	if (maxvnodes > 1024 * 1024)
2550 		hsize >>= 1;		/* nominal 8x stacking */
2551 
2552 	if (maxvnodes > 128 * 1024)
2553 		hsize >>= 1;		/* nominal 4x stacking */
2554 
2555 	if (hsize < 16)
2556 		hsize = 16;
2557 
2558 	return hsize;
2559 }
2560