xref: /dragonfly/sys/kern/vfs_subr.c (revision ebde9bc1)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
35  * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $
36  */
37 
38 /*
39  * External virtual filesystem routines
40  */
41 #include "opt_ddb.h"
42 #include "opt_inet.h"
43 #include "opt_inet6.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/uio.h>
48 #include <sys/buf.h>
49 #include <sys/conf.h>
50 #include <sys/dirent.h>
51 #include <sys/eventhandler.h>
52 #include <sys/fcntl.h>
53 #include <sys/file.h>
54 #include <sys/kernel.h>
55 #include <sys/kthread.h>
56 #include <sys/malloc.h>
57 #include <sys/mbuf.h>
58 #include <sys/mount.h>
59 #include <sys/priv.h>
60 #include <sys/proc.h>
61 #include <sys/reboot.h>
62 #include <sys/socket.h>
63 #include <sys/stat.h>
64 #include <sys/sysctl.h>
65 #include <sys/syslog.h>
66 #include <sys/unistd.h>
67 #include <sys/vmmeter.h>
68 #include <sys/vnode.h>
69 
70 #include <machine/limits.h>
71 
72 #include <vm/vm.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_extern.h>
75 #include <vm/vm_kern.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_page.h>
79 #include <vm/vm_pager.h>
80 #include <vm/vnode_pager.h>
81 #include <vm/vm_zone.h>
82 
83 #include <sys/buf2.h>
84 #include <sys/mplock2.h>
85 #include <vm/vm_page2.h>
86 
87 #include <netinet/in.h>
88 
89 static MALLOC_DEFINE(M_NETCRED, "Export Host", "Export host address structure");
90 
91 __read_mostly int numvnodes;
92 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0,
93     "Number of vnodes allocated");
94 __read_mostly int verbose_reclaims;
95 SYSCTL_INT(_debug, OID_AUTO, verbose_reclaims, CTLFLAG_RD, &verbose_reclaims, 0,
96     "Output filename of reclaimed vnode(s)");
97 
98 __read_mostly enum vtype iftovt_tab[16] = {
99 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
100 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
101 };
102 __read_mostly int vttoif_tab[9] = {
103 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
104 	S_IFSOCK, S_IFIFO, S_IFMT,
105 };
106 
107 static int reassignbufcalls;
108 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls,
109     0, "Number of times buffers have been reassigned to the proper list");
110 
111 __read_mostly static int check_buf_overlap = 2;	/* invasive check */
112 SYSCTL_INT(_vfs, OID_AUTO, check_buf_overlap, CTLFLAG_RW, &check_buf_overlap,
113     0, "Enable overlapping buffer checks");
114 
115 int	nfs_mount_type = -1;
116 static struct lwkt_token spechash_token;
117 struct nfs_public nfs_pub;	/* publicly exported FS */
118 
119 __read_mostly int maxvnodes;
120 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
121 	   &maxvnodes, 0, "Maximum number of vnodes");
122 
123 static struct radix_node_head *vfs_create_addrlist_af(int af,
124 		    struct netexport *nep);
125 static void	vfs_free_addrlist (struct netexport *nep);
126 static int	vfs_free_netcred (struct radix_node *rn, void *w);
127 static void	vfs_free_addrlist_af (struct radix_node_head **prnh);
128 static int	vfs_hang_addrlist (struct mount *mp, struct netexport *nep,
129 	            const struct export_args *argp);
130 
131 __read_mostly int prtactive = 0; /* 1 => print out reclaim of active vnodes */
132 
133 /*
134  * Red black tree functions
135  */
136 static int rb_buf_compare(struct buf *b1, struct buf *b2);
137 RB_GENERATE2(buf_rb_tree, buf, b_rbnode, rb_buf_compare, off_t, b_loffset);
138 RB_GENERATE2(buf_rb_hash, buf, b_rbhash, rb_buf_compare, off_t, b_loffset);
139 
140 static int
141 rb_buf_compare(struct buf *b1, struct buf *b2)
142 {
143 	if (b1->b_loffset < b2->b_loffset)
144 		return(-1);
145 	if (b1->b_loffset > b2->b_loffset)
146 		return(1);
147 	return(0);
148 }
149 
150 /*
151  * Initialize the vnode management data structures.
152  *
153  * Called from vfsinit()
154  */
155 #define MAXVNBREAKMEM	(1L * 1024 * 1024 * 1024)
156 #define MINVNODES	2000
157 #define MAXVNODES	4000000
158 
159 void
160 vfs_subr_init(void)
161 {
162 	int factor1;	/* Limit based on ram (x 2 above 1GB) */
163 	int factor2;	/* Limit based on available KVM */
164 	size_t freemem;
165 
166 	/*
167 	 * Size maxvnodes to available memory.  Size significantly
168 	 * smaller on low-memory systems (calculations for the first
169 	 * 1GB of ram), and pump it up a bit when free memory is
170 	 * above 1GB.
171 	 *
172 	 * The general minimum is maxproc * 8 (we want someone pushing
173 	 * up maxproc a lot to also get more vnodes).  Usually maxproc
174 	 * does not affect this calculation.
175 	 *
176 	 * There isn't much of a point allowing maxvnodes to exceed a
177 	 * few million as our modern filesystems cache pages in the
178 	 * underlying block device and not so much hanging off of VM
179 	 * objects.
180 	 */
181 	factor1 = 50 * (sizeof(struct vm_object) + sizeof(struct vnode));
182 	factor2 = 30 * (sizeof(struct vm_object) + sizeof(struct vnode));
183 
184 	freemem = (int64_t)vmstats.v_page_count * PAGE_SIZE;
185 
186 	maxvnodes = freemem / factor1;
187 	if (freemem > MAXVNBREAKMEM)
188 		maxvnodes += (freemem - MAXVNBREAKMEM) / factor1;
189 	maxvnodes = imax(maxvnodes, maxproc * 8);
190 	maxvnodes = imin(maxvnodes, KvaSize / factor2);
191 	maxvnodes = imin(maxvnodes, MAXVNODES);
192 	maxvnodes = imax(maxvnodes, MINVNODES);
193 
194 	lwkt_token_init(&spechash_token, "spechash");
195 }
196 
197 /*
198  * Knob to control the precision of file timestamps:
199  *
200  *   0 = seconds only; nanoseconds zeroed.
201  *   1 = seconds and nanoseconds, accurate within 1/HZ.
202  *   2 = seconds and nanoseconds, truncated to microseconds.
203  * >=3 = seconds and nanoseconds, maximum precision.
204  */
205 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
206 
207 __read_mostly static int timestamp_precision = TSP_SEC;
208 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
209 		&timestamp_precision, 0, "Precision of file timestamps");
210 
211 /*
212  * Get a current timestamp.
213  *
214  * MPSAFE
215  */
216 void
217 vfs_timestamp(struct timespec *tsp)
218 {
219 	struct timeval tv;
220 
221 	switch (timestamp_precision) {
222 	case TSP_SEC:
223 		tsp->tv_sec = time_second;
224 		tsp->tv_nsec = 0;
225 		break;
226 	case TSP_HZ:
227 		getnanotime(tsp);
228 		break;
229 	case TSP_USEC:
230 		microtime(&tv);
231 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
232 		break;
233 	case TSP_NSEC:
234 	default:
235 		nanotime(tsp);
236 		break;
237 	}
238 }
239 
240 /*
241  * Set vnode attributes to VNOVAL
242  */
243 void
244 vattr_null(struct vattr *vap)
245 {
246 	vap->va_type = VNON;
247 	vap->va_size = VNOVAL;
248 	vap->va_bytes = VNOVAL;
249 	vap->va_mode = VNOVAL;
250 	vap->va_nlink = VNOVAL;
251 	vap->va_uid = VNOVAL;
252 	vap->va_gid = VNOVAL;
253 	vap->va_fsid = VNOVAL;
254 	vap->va_fileid = VNOVAL;
255 	vap->va_blocksize = VNOVAL;
256 	vap->va_rmajor = VNOVAL;
257 	vap->va_rminor = VNOVAL;
258 	vap->va_atime.tv_sec = VNOVAL;
259 	vap->va_atime.tv_nsec = VNOVAL;
260 	vap->va_mtime.tv_sec = VNOVAL;
261 	vap->va_mtime.tv_nsec = VNOVAL;
262 	vap->va_ctime.tv_sec = VNOVAL;
263 	vap->va_ctime.tv_nsec = VNOVAL;
264 	vap->va_flags = VNOVAL;
265 	vap->va_gen = VNOVAL;
266 	vap->va_vaflags = 0;
267 	/* va_*_uuid fields are only valid if related flags are set */
268 }
269 
270 /*
271  * Flush out and invalidate all buffers associated with a vnode.
272  *
273  * vp must be locked.
274  */
275 static int vinvalbuf_bp(struct buf *bp, void *data);
276 
277 struct vinvalbuf_bp_info {
278 	struct vnode *vp;
279 	int slptimeo;
280 	int lkflags;
281 	int flags;
282 	int clean;
283 };
284 
285 int
286 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
287 {
288 	struct vinvalbuf_bp_info info;
289 	vm_object_t object;
290 	int error;
291 
292 	lwkt_gettoken(&vp->v_token);
293 
294 	/*
295 	 * If we are being asked to save, call fsync to ensure that the inode
296 	 * is updated.
297 	 */
298 	if (flags & V_SAVE) {
299 		error = bio_track_wait(&vp->v_track_write, slpflag, slptimeo);
300 		if (error)
301 			goto done;
302 		if (!RB_EMPTY(&vp->v_rbdirty_tree)) {
303 			if ((error = VOP_FSYNC(vp, MNT_WAIT, 0)) != 0)
304 				goto done;
305 #if 0
306 			/*
307 			 * Dirty bufs may be left or generated via races
308 			 * in circumstances where vinvalbuf() is called on
309 			 * a vnode not undergoing reclamation.   Only
310 			 * panic if we are trying to reclaim the vnode.
311 			 */
312 			if ((vp->v_flag & VRECLAIMED) &&
313 			    (bio_track_active(&vp->v_track_write) ||
314 			    !RB_EMPTY(&vp->v_rbdirty_tree))) {
315 				panic("vinvalbuf: dirty bufs");
316 			}
317 #endif
318 		}
319   	}
320 	info.slptimeo = slptimeo;
321 	info.lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL;
322 	if (slpflag & PCATCH)
323 		info.lkflags |= LK_PCATCH;
324 	info.flags = flags;
325 	info.vp = vp;
326 
327 	/*
328 	 * Flush the buffer cache until nothing is left, wait for all I/O
329 	 * to complete.  At least one pass is required.  We might block
330 	 * in the pip code so we have to re-check.  Order is important.
331 	 */
332 	do {
333 		/*
334 		 * Flush buffer cache
335 		 */
336 		if (!RB_EMPTY(&vp->v_rbclean_tree)) {
337 			info.clean = 1;
338 			error = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree,
339 					NULL, vinvalbuf_bp, &info);
340 		}
341 		if (!RB_EMPTY(&vp->v_rbdirty_tree)) {
342 			info.clean = 0;
343 			error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
344 					NULL, vinvalbuf_bp, &info);
345 		}
346 
347 		/*
348 		 * Wait for I/O completion.
349 		 */
350 		bio_track_wait(&vp->v_track_write, 0, 0);
351 		if ((object = vp->v_object) != NULL)
352 			refcount_wait(&object->paging_in_progress, "vnvlbx");
353 	} while (bio_track_active(&vp->v_track_write) ||
354 		 !RB_EMPTY(&vp->v_rbclean_tree) ||
355 		 !RB_EMPTY(&vp->v_rbdirty_tree));
356 
357 	/*
358 	 * Destroy the copy in the VM cache, too.
359 	 */
360 	if ((object = vp->v_object) != NULL) {
361 		vm_object_page_remove(object, 0, 0,
362 			(flags & V_SAVE) ? TRUE : FALSE);
363 	}
364 
365 	if (!RB_EMPTY(&vp->v_rbdirty_tree) || !RB_EMPTY(&vp->v_rbclean_tree))
366 		panic("vinvalbuf: flush failed");
367 	if (!RB_EMPTY(&vp->v_rbhash_tree))
368 		panic("vinvalbuf: flush failed, buffers still present");
369 	error = 0;
370 done:
371 	lwkt_reltoken(&vp->v_token);
372 	return (error);
373 }
374 
375 static int
376 vinvalbuf_bp(struct buf *bp, void *data)
377 {
378 	struct vinvalbuf_bp_info *info = data;
379 	int error;
380 
381 	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
382 		atomic_add_int(&bp->b_refs, 1);
383 		error = BUF_TIMELOCK(bp, info->lkflags,
384 				     "vinvalbuf", info->slptimeo);
385 		atomic_subtract_int(&bp->b_refs, 1);
386 		if (error == 0) {
387 			BUF_UNLOCK(bp);
388 			error = ENOLCK;
389 		}
390 		if (error == ENOLCK)
391 			return(0);
392 		return (-error);
393 	}
394 	KKASSERT(bp->b_vp == info->vp);
395 
396 	/*
397 	 * Must check clean/dirty status after successfully locking as
398 	 * it may race.
399 	 */
400 	if ((info->clean && (bp->b_flags & B_DELWRI)) ||
401 	    (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0)) {
402 		BUF_UNLOCK(bp);
403 		return(0);
404 	}
405 
406 	/*
407 	 * NOTE:  NO B_LOCKED CHECK.  Also no buf_checkwrite()
408 	 * check.  This code will write out the buffer, period.
409 	 */
410 	bremfree(bp);
411 	if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
412 	    (info->flags & V_SAVE)) {
413 		cluster_awrite(bp);
414 	} else if (info->flags & V_SAVE) {
415 		/*
416 		 * Cannot set B_NOCACHE on a clean buffer as this will
417 		 * destroy the VM backing store which might actually
418 		 * be dirty (and unsynchronized).
419 		 */
420 		bp->b_flags |= (B_INVAL | B_RELBUF);
421 		brelse(bp);
422 	} else {
423 		bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
424 		brelse(bp);
425 	}
426 	return(0);
427 }
428 
429 /*
430  * Truncate a file's buffer and pages to a specified length.  This
431  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
432  * sync activity.
433  *
434  * The vnode must be locked.
435  */
436 static int vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data);
437 static int vtruncbuf_bp_trunc(struct buf *bp, void *data);
438 static int vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data);
439 static int vtruncbuf_bp_metasync(struct buf *bp, void *data);
440 
441 struct vtruncbuf_info {
442 	struct vnode *vp;
443 	off_t	truncloffset;
444 	int	clean;
445 };
446 
447 int
448 vtruncbuf(struct vnode *vp, off_t length, int blksize)
449 {
450 	struct vtruncbuf_info info;
451 	const char *filename;
452 	int count;
453 
454 	/*
455 	 * Round up to the *next* block, then destroy the buffers in question.
456 	 * Since we are only removing some of the buffers we must rely on the
457 	 * scan count to determine whether a loop is necessary.
458 	 */
459 	if ((count = (int)(length % blksize)) != 0)
460 		info.truncloffset = length + (blksize - count);
461 	else
462 		info.truncloffset = length;
463 	info.vp = vp;
464 
465 	lwkt_gettoken(&vp->v_token);
466 	do {
467 		info.clean = 1;
468 		count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree,
469 				vtruncbuf_bp_trunc_cmp,
470 				vtruncbuf_bp_trunc, &info);
471 		info.clean = 0;
472 		count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
473 				vtruncbuf_bp_trunc_cmp,
474 				vtruncbuf_bp_trunc, &info);
475 	} while(count);
476 
477 	/*
478 	 * For safety, fsync any remaining metadata if the file is not being
479 	 * truncated to 0.  Since the metadata does not represent the entire
480 	 * dirty list we have to rely on the hit count to ensure that we get
481 	 * all of it.
482 	 */
483 	if (length > 0) {
484 		do {
485 			count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
486 					vtruncbuf_bp_metasync_cmp,
487 					vtruncbuf_bp_metasync, &info);
488 		} while (count);
489 	}
490 
491 	/*
492 	 * Clean out any left over VM backing store.
493 	 *
494 	 * It is possible to have in-progress I/O from buffers that were
495 	 * not part of the truncation.  This should not happen if we
496 	 * are truncating to 0-length.
497 	 */
498 	vnode_pager_setsize(vp, length);
499 	bio_track_wait(&vp->v_track_write, 0, 0);
500 
501 	/*
502 	 * Debugging only
503 	 */
504 	spin_lock(&vp->v_spin);
505 	filename = TAILQ_FIRST(&vp->v_namecache) ?
506 		   TAILQ_FIRST(&vp->v_namecache)->nc_name : "?";
507 	spin_unlock(&vp->v_spin);
508 
509 	/*
510 	 * Make sure no buffers were instantiated while we were trying
511 	 * to clean out the remaining VM pages.  This could occur due
512 	 * to busy dirty VM pages being flushed out to disk.
513 	 */
514 	do {
515 		info.clean = 1;
516 		count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree,
517 				vtruncbuf_bp_trunc_cmp,
518 				vtruncbuf_bp_trunc, &info);
519 		info.clean = 0;
520 		count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
521 				vtruncbuf_bp_trunc_cmp,
522 				vtruncbuf_bp_trunc, &info);
523 		if (count) {
524 			kprintf("Warning: vtruncbuf():  Had to re-clean %d "
525 			       "left over buffers in %s\n", count, filename);
526 		}
527 	} while(count);
528 
529 	lwkt_reltoken(&vp->v_token);
530 
531 	return (0);
532 }
533 
534 /*
535  * The callback buffer is beyond the new file EOF and must be destroyed.
536  * Note that the compare function must conform to the RB_SCAN's requirements.
537  */
538 static
539 int
540 vtruncbuf_bp_trunc_cmp(struct buf *bp, void *data)
541 {
542 	struct vtruncbuf_info *info = data;
543 
544 	if (bp->b_loffset >= info->truncloffset)
545 		return(0);
546 	return(-1);
547 }
548 
549 static
550 int
551 vtruncbuf_bp_trunc(struct buf *bp, void *data)
552 {
553 	struct vtruncbuf_info *info = data;
554 
555 	/*
556 	 * Do not try to use a buffer we cannot immediately lock, but sleep
557 	 * anyway to prevent a livelock.  The code will loop until all buffers
558 	 * can be acted upon.
559 	 *
560 	 * We must always revalidate the buffer after locking it to deal
561 	 * with MP races.
562 	 */
563 	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
564 		atomic_add_int(&bp->b_refs, 1);
565 		if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0)
566 			BUF_UNLOCK(bp);
567 		atomic_subtract_int(&bp->b_refs, 1);
568 	} else if ((info->clean && (bp->b_flags & B_DELWRI)) ||
569 		   (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0) ||
570 		   bp->b_vp != info->vp ||
571 		   vtruncbuf_bp_trunc_cmp(bp, data)) {
572 		BUF_UNLOCK(bp);
573 	} else {
574 		bremfree(bp);
575 		bp->b_flags |= (B_INVAL | B_RELBUF | B_NOCACHE);
576 		brelse(bp);
577 	}
578 	return(1);
579 }
580 
581 /*
582  * Fsync all meta-data after truncating a file to be non-zero.  Only metadata
583  * blocks (with a negative loffset) are scanned.
584  * Note that the compare function must conform to the RB_SCAN's requirements.
585  */
586 static int
587 vtruncbuf_bp_metasync_cmp(struct buf *bp, void *data __unused)
588 {
589 	if (bp->b_loffset < 0)
590 		return(0);
591 	return(1);
592 }
593 
594 static int
595 vtruncbuf_bp_metasync(struct buf *bp, void *data)
596 {
597 	struct vtruncbuf_info *info = data;
598 
599 	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
600 		atomic_add_int(&bp->b_refs, 1);
601 		if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0)
602 			BUF_UNLOCK(bp);
603 		atomic_subtract_int(&bp->b_refs, 1);
604 	} else if ((bp->b_flags & B_DELWRI) == 0 ||
605 		   bp->b_vp != info->vp ||
606 		   vtruncbuf_bp_metasync_cmp(bp, data)) {
607 		BUF_UNLOCK(bp);
608 	} else {
609 		bremfree(bp);
610 		if (bp->b_vp == info->vp)
611 			bawrite(bp);
612 		else
613 			bwrite(bp);
614 	}
615 	return(1);
616 }
617 
618 /*
619  * vfsync - implements a multipass fsync on a file which understands
620  * dependancies and meta-data.  The passed vnode must be locked.  The
621  * waitfor argument may be MNT_WAIT or MNT_NOWAIT, or MNT_LAZY.
622  *
623  * When fsyncing data asynchronously just do one consolidated pass starting
624  * with the most negative block number.  This may not get all the data due
625  * to dependancies.
626  *
627  * When fsyncing data synchronously do a data pass, then a metadata pass,
628  * then do additional data+metadata passes to try to get all the data out.
629  *
630  * Caller must ref the vnode but does not have to lock it.
631  */
632 static int vfsync_wait_output(struct vnode *vp,
633 			    int (*waitoutput)(struct vnode *, struct thread *));
634 static int vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused);
635 static int vfsync_data_only_cmp(struct buf *bp, void *data);
636 static int vfsync_meta_only_cmp(struct buf *bp, void *data);
637 static int vfsync_lazy_range_cmp(struct buf *bp, void *data);
638 static int vfsync_bp(struct buf *bp, void *data);
639 
640 struct vfsync_info {
641 	struct vnode *vp;
642 	int fastpass;
643 	int synchronous;
644 	int syncdeps;
645 	int lazycount;
646 	int lazylimit;
647 	int skippedbufs;
648 	int (*checkdef)(struct buf *);
649 	int (*cmpfunc)(struct buf *, void *);
650 };
651 
652 int
653 vfsync(struct vnode *vp, int waitfor, int passes,
654 	int (*checkdef)(struct buf *),
655 	int (*waitoutput)(struct vnode *, struct thread *))
656 {
657 	struct vfsync_info info;
658 	int error;
659 
660 	bzero(&info, sizeof(info));
661 	info.vp = vp;
662 	if ((info.checkdef = checkdef) == NULL)
663 		info.syncdeps = 1;
664 
665 	lwkt_gettoken(&vp->v_token);
666 
667 	switch(waitfor) {
668 	case MNT_LAZY | MNT_NOWAIT:
669 	case MNT_LAZY:
670 		/*
671 		 * Lazy (filesystem syncer typ) Asynchronous plus limit the
672 		 * number of data (not meta) pages we try to flush to 1MB.
673 		 * A non-zero return means that lazy limit was reached.
674 		 */
675 		info.lazylimit = 1024 * 1024;
676 		info.syncdeps = 1;
677 		info.cmpfunc = vfsync_lazy_range_cmp;
678 		error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
679 				vfsync_lazy_range_cmp, vfsync_bp, &info);
680 		info.cmpfunc = vfsync_meta_only_cmp;
681 		RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
682 			vfsync_meta_only_cmp, vfsync_bp, &info);
683 		if (error == 0)
684 			vp->v_lazyw = 0;
685 		else if (!RB_EMPTY(&vp->v_rbdirty_tree))
686 			vn_syncer_add(vp, 1);
687 		error = 0;
688 		break;
689 	case MNT_NOWAIT:
690 		/*
691 		 * Asynchronous.  Do a data-only pass and a meta-only pass.
692 		 */
693 		info.syncdeps = 1;
694 		info.cmpfunc = vfsync_data_only_cmp;
695 		RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp,
696 			vfsync_bp, &info);
697 		info.cmpfunc = vfsync_meta_only_cmp;
698 		RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_meta_only_cmp,
699 			vfsync_bp, &info);
700 		error = 0;
701 		break;
702 	default:
703 		/*
704 		 * Synchronous.  Do a data-only pass, then a meta-data+data
705 		 * pass, then additional integrated passes to try to get
706 		 * all the dependancies flushed.
707 		 */
708 		info.cmpfunc = vfsync_data_only_cmp;
709 		info.fastpass = 1;
710 		RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, vfsync_data_only_cmp,
711 			vfsync_bp, &info);
712 		info.fastpass = 0;
713 		error = vfsync_wait_output(vp, waitoutput);
714 		if (error == 0) {
715 			info.skippedbufs = 0;
716 			info.cmpfunc = vfsync_dummy_cmp;
717 			RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL,
718 				vfsync_bp, &info);
719 			error = vfsync_wait_output(vp, waitoutput);
720 			if (info.skippedbufs) {
721 				kprintf("Warning: vfsync skipped %d dirty "
722 					"buf%s in pass2!\n",
723 					info.skippedbufs,
724 					((info.skippedbufs > 1) ? "s" : ""));
725 			}
726 		}
727 		while (error == 0 && passes > 0 &&
728 		       !RB_EMPTY(&vp->v_rbdirty_tree)
729 		) {
730 			info.skippedbufs = 0;
731 			if (--passes == 0) {
732 				info.synchronous = 1;
733 				info.syncdeps = 1;
734 			}
735 			info.cmpfunc = vfsync_dummy_cmp;
736 			error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL,
737 					vfsync_bp, &info);
738 			if (error < 0)
739 				error = -error;
740 			info.syncdeps = 1;
741 			if (error == 0)
742 				error = vfsync_wait_output(vp, waitoutput);
743 			if (info.skippedbufs && passes == 0) {
744 				kprintf("Warning: vfsync skipped %d dirty "
745 					"buf%s in final pass!\n",
746 					info.skippedbufs,
747 					((info.skippedbufs > 1) ? "s" : ""));
748 			}
749 		}
750 #if 0
751 		/*
752 		 * This case can occur normally because vnode lock might
753 		 * not be held.
754 		 */
755 		if (!RB_EMPTY(&vp->v_rbdirty_tree))
756 			kprintf("dirty bufs left after final pass\n");
757 #endif
758 		break;
759 	}
760 	lwkt_reltoken(&vp->v_token);
761 
762 	return(error);
763 }
764 
765 static int
766 vfsync_wait_output(struct vnode *vp,
767 		   int (*waitoutput)(struct vnode *, struct thread *))
768 {
769 	int error;
770 
771 	error = bio_track_wait(&vp->v_track_write, 0, 0);
772 	if (waitoutput)
773 		error = waitoutput(vp, curthread);
774 	return(error);
775 }
776 
777 static int
778 vfsync_dummy_cmp(struct buf *bp __unused, void *data __unused)
779 {
780 	return(0);
781 }
782 
783 static int
784 vfsync_data_only_cmp(struct buf *bp, void *data)
785 {
786 	if (bp->b_loffset < 0)
787 		return(-1);
788 	return(0);
789 }
790 
791 static int
792 vfsync_meta_only_cmp(struct buf *bp, void *data)
793 {
794 	if (bp->b_loffset < 0)
795 		return(0);
796 	return(1);
797 }
798 
799 static int
800 vfsync_lazy_range_cmp(struct buf *bp, void *data)
801 {
802 	struct vfsync_info *info = data;
803 
804 	if (bp->b_loffset < info->vp->v_lazyw)
805 		return(-1);
806 	return(0);
807 }
808 
809 static int
810 vfsync_bp(struct buf *bp, void *data)
811 {
812 	struct vfsync_info *info = data;
813 	struct vnode *vp = info->vp;
814 	int error;
815 
816 	if (info->fastpass) {
817 		/*
818 		 * Ignore buffers that we cannot immediately lock.
819 		 */
820 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
821 			/*
822 			 * Removed BUF_TIMELOCK(..., 1), even a 1-tick
823 			 * delay can mess up performance
824 			 *
825 			 * Another reason is that during a dirty-buffer
826 			 * scan a clustered write can start I/O on buffers
827 			 * ahead of the scan, causing the scan to not
828 			 * get a lock here.  Usually this means the write
829 			 * is already in progress so, in fact, we *want*
830 			 * to skip the buffer.
831 			 */
832 			++info->skippedbufs;
833 			return(0);
834 		}
835 	} else if (info->synchronous == 0) {
836 		/*
837 		 * Normal pass, give the buffer a little time to become
838 		 * available to us.
839 		 */
840 		if (BUF_TIMELOCK(bp, LK_EXCLUSIVE, "bflst2", hz / 10)) {
841 			++info->skippedbufs;
842 			return(0);
843 		}
844 	} else {
845 		/*
846 		 * Synchronous pass, give the buffer a lot of time before
847 		 * giving up.
848 		 */
849 		if (BUF_TIMELOCK(bp, LK_EXCLUSIVE, "bflst3", hz * 10)) {
850 			++info->skippedbufs;
851 			return(0);
852 		}
853 	}
854 
855 	/*
856 	 * We must revalidate the buffer after locking.
857 	 */
858 	if ((bp->b_flags & B_DELWRI) == 0 ||
859 	    bp->b_vp != info->vp ||
860 	    info->cmpfunc(bp, data)) {
861 		BUF_UNLOCK(bp);
862 		return(0);
863 	}
864 
865 	/*
866 	 * If syncdeps is not set we do not try to write buffers which have
867 	 * dependancies.
868 	 */
869 	if (!info->synchronous && info->syncdeps == 0 && info->checkdef(bp)) {
870 		BUF_UNLOCK(bp);
871 		return(0);
872 	}
873 
874 	/*
875 	 * B_NEEDCOMMIT (primarily used by NFS) is a state where the buffer
876 	 * has been written but an additional handshake with the device
877 	 * is required before we can dispose of the buffer.  We have no idea
878 	 * how to do this so we have to skip these buffers.
879 	 */
880 	if (bp->b_flags & B_NEEDCOMMIT) {
881 		BUF_UNLOCK(bp);
882 		return(0);
883 	}
884 
885 	/*
886 	 * Ask bioops if it is ok to sync.  If not the VFS may have
887 	 * set B_LOCKED so we have to cycle the buffer.
888 	 */
889 	if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) {
890 		bremfree(bp);
891 		brelse(bp);
892 		return(0);
893 	}
894 
895 	if (info->synchronous) {
896 		/*
897 		 * Synchronous flush.  An error may be returned and will
898 		 * stop the scan.
899 		 */
900 		bremfree(bp);
901 		error = bwrite(bp);
902 	} else {
903 		/*
904 		 * Asynchronous flush.  We use the error return to support
905 		 * MNT_LAZY flushes.
906 		 *
907 		 * In low-memory situations we revert to synchronous
908 		 * operation.  This should theoretically prevent the I/O
909 		 * path from exhausting memory in a non-recoverable way.
910 		 */
911 		vp->v_lazyw = bp->b_loffset;
912 		bremfree(bp);
913 		if (vm_page_count_min(0)) {
914 			/* low memory */
915 			info->lazycount += bp->b_bufsize;
916 			bwrite(bp);
917 		} else {
918 			/* normal */
919 			info->lazycount += cluster_awrite(bp);
920 			waitrunningbufspace();
921 			/*vm_wait_nominal();*/
922 		}
923 		if (info->lazylimit && info->lazycount >= info->lazylimit)
924 			error = 1;
925 		else
926 			error = 0;
927 	}
928 	return(-error);
929 }
930 
931 /*
932  * Associate a buffer with a vnode.
933  *
934  * MPSAFE
935  */
936 int
937 bgetvp(struct vnode *vp, struct buf *bp, int testsize)
938 {
939 	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
940 	KKASSERT((bp->b_flags & (B_HASHED|B_DELWRI|B_VNCLEAN|B_VNDIRTY)) == 0);
941 
942 	/*
943 	 * Insert onto list for new vnode.
944 	 */
945 	lwkt_gettoken(&vp->v_token);
946 
947 	if (buf_rb_hash_RB_INSERT(&vp->v_rbhash_tree, bp)) {
948 		lwkt_reltoken(&vp->v_token);
949 		return (EEXIST);
950 	}
951 
952 	/*
953 	 * Diagnostics (mainly for HAMMER debugging).  Check for
954 	 * overlapping buffers.
955 	 */
956 	if (check_buf_overlap) {
957 		struct buf *bx;
958 		bx = buf_rb_hash_RB_PREV(bp);
959 		if (bx) {
960 			if (bx->b_loffset + bx->b_bufsize > bp->b_loffset) {
961 				kprintf("bgetvp: overlapl %016jx/%d %016jx "
962 					"bx %p bp %p\n",
963 					(intmax_t)bx->b_loffset,
964 					bx->b_bufsize,
965 					(intmax_t)bp->b_loffset,
966 					bx, bp);
967 				if (check_buf_overlap > 1)
968 					panic("bgetvp - overlapping buffer");
969 			}
970 		}
971 		bx = buf_rb_hash_RB_NEXT(bp);
972 		if (bx) {
973 			if (bp->b_loffset + testsize > bx->b_loffset) {
974 				kprintf("bgetvp: overlapr %016jx/%d %016jx "
975 					"bp %p bx %p\n",
976 					(intmax_t)bp->b_loffset,
977 					testsize,
978 					(intmax_t)bx->b_loffset,
979 					bp, bx);
980 				if (check_buf_overlap > 1)
981 					panic("bgetvp - overlapping buffer");
982 			}
983 		}
984 	}
985 	bp->b_vp = vp;
986 	bp->b_flags |= B_HASHED;
987 	bp->b_flags |= B_VNCLEAN;
988 	if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp))
989 		panic("reassignbuf: dup lblk/clean vp %p bp %p", vp, bp);
990 	/*vhold(vp);*/
991 	lwkt_reltoken(&vp->v_token);
992 	return(0);
993 }
994 
995 /*
996  * Disassociate a buffer from a vnode.
997  *
998  * MPSAFE
999  */
1000 void
1001 brelvp(struct buf *bp)
1002 {
1003 	struct vnode *vp;
1004 
1005 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1006 
1007 	/*
1008 	 * Delete from old vnode list, if on one.
1009 	 */
1010 	vp = bp->b_vp;
1011 	lwkt_gettoken(&vp->v_token);
1012 	if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN)) {
1013 		if (bp->b_flags & B_VNDIRTY)
1014 			buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp);
1015 		else
1016 			buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp);
1017 		bp->b_flags &= ~(B_VNDIRTY | B_VNCLEAN);
1018 	}
1019 	if (bp->b_flags & B_HASHED) {
1020 		buf_rb_hash_RB_REMOVE(&vp->v_rbhash_tree, bp);
1021 		bp->b_flags &= ~B_HASHED;
1022 	}
1023 
1024 	/*
1025 	 * Only remove from synclist when no dirty buffers are left AND
1026 	 * the VFS has not flagged the vnode's inode as being dirty.
1027 	 */
1028 	if ((vp->v_flag & (VONWORKLST | VISDIRTY | VOBJDIRTY)) == VONWORKLST &&
1029 	    RB_EMPTY(&vp->v_rbdirty_tree)) {
1030 		vn_syncer_remove(vp, 0);
1031 	}
1032 	bp->b_vp = NULL;
1033 
1034 	lwkt_reltoken(&vp->v_token);
1035 
1036 	/*vdrop(vp);*/
1037 }
1038 
1039 /*
1040  * Reassign the buffer to the proper clean/dirty list based on B_DELWRI.
1041  * This routine is called when the state of the B_DELWRI bit is changed.
1042  *
1043  * Must be called with vp->v_token held.
1044  * MPSAFE
1045  */
1046 void
1047 reassignbuf(struct buf *bp)
1048 {
1049 	struct vnode *vp = bp->b_vp;
1050 	int delay;
1051 
1052 	ASSERT_LWKT_TOKEN_HELD(&vp->v_token);
1053 	++reassignbufcalls;
1054 
1055 	/*
1056 	 * B_PAGING flagged buffers cannot be reassigned because their vp
1057 	 * is not fully linked in.
1058 	 */
1059 	if (bp->b_flags & B_PAGING)
1060 		panic("cannot reassign paging buffer");
1061 
1062 	if (bp->b_flags & B_DELWRI) {
1063 		/*
1064 		 * Move to the dirty list, add the vnode to the worklist
1065 		 */
1066 		if (bp->b_flags & B_VNCLEAN) {
1067 			buf_rb_tree_RB_REMOVE(&vp->v_rbclean_tree, bp);
1068 			bp->b_flags &= ~B_VNCLEAN;
1069 		}
1070 		if ((bp->b_flags & B_VNDIRTY) == 0) {
1071 			if (buf_rb_tree_RB_INSERT(&vp->v_rbdirty_tree, bp)) {
1072 				panic("reassignbuf: dup lblk vp %p bp %p",
1073 				      vp, bp);
1074 			}
1075 			bp->b_flags |= B_VNDIRTY;
1076 		}
1077 		if ((vp->v_flag & VONWORKLST) == 0) {
1078 			switch (vp->v_type) {
1079 			case VDIR:
1080 				delay = dirdelay;
1081 				break;
1082 			case VCHR:
1083 			case VBLK:
1084 				if (vp->v_rdev &&
1085 				    vp->v_rdev->si_mountpoint != NULL) {
1086 					delay = metadelay;
1087 					break;
1088 				}
1089 				/* fall through */
1090 			default:
1091 				delay = filedelay;
1092 			}
1093 			vn_syncer_add(vp, delay);
1094 		}
1095 	} else {
1096 		/*
1097 		 * Move to the clean list, remove the vnode from the worklist
1098 		 * if no dirty blocks remain.
1099 		 */
1100 		if (bp->b_flags & B_VNDIRTY) {
1101 			buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp);
1102 			bp->b_flags &= ~B_VNDIRTY;
1103 		}
1104 		if ((bp->b_flags & B_VNCLEAN) == 0) {
1105 			if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) {
1106 				panic("reassignbuf: dup lblk vp %p bp %p",
1107 				      vp, bp);
1108 			}
1109 			bp->b_flags |= B_VNCLEAN;
1110 		}
1111 
1112 		/*
1113 		 * Only remove from synclist when no dirty buffers are left
1114 		 * AND the VFS has not flagged the vnode's inode as being
1115 		 * dirty.
1116 		 */
1117 		if ((vp->v_flag & (VONWORKLST | VISDIRTY | VOBJDIRTY)) ==
1118 		     VONWORKLST &&
1119 		    RB_EMPTY(&vp->v_rbdirty_tree)) {
1120 			vn_syncer_remove(vp, 0);
1121 		}
1122 	}
1123 }
1124 
1125 /*
1126  * Create a vnode for a block device.  Used for mounting the root file
1127  * system.
1128  *
1129  * A vref()'d vnode is returned.
1130  */
1131 extern struct vop_ops *devfs_vnode_dev_vops_p;
1132 int
1133 bdevvp(cdev_t dev, struct vnode **vpp)
1134 {
1135 	struct vnode *vp;
1136 	struct vnode *nvp;
1137 	int error;
1138 
1139 	if (dev == NULL) {
1140 		*vpp = NULLVP;
1141 		return (ENXIO);
1142 	}
1143 	error = getspecialvnode(VT_NON, NULL, &devfs_vnode_dev_vops_p,
1144 				&nvp, 0, 0);
1145 	if (error) {
1146 		*vpp = NULLVP;
1147 		return (error);
1148 	}
1149 	vp = nvp;
1150 	vp->v_type = VCHR;
1151 #if 0
1152 	vp->v_rdev = dev;
1153 #endif
1154 	v_associate_rdev(vp, dev);
1155 	vp->v_umajor = dev->si_umajor;
1156 	vp->v_uminor = dev->si_uminor;
1157 	vx_unlock(vp);
1158 	*vpp = vp;
1159 	return (0);
1160 }
1161 
1162 int
1163 v_associate_rdev(struct vnode *vp, cdev_t dev)
1164 {
1165 	if (dev == NULL)
1166 		return(ENXIO);
1167 	if (dev_is_good(dev) == 0)
1168 		return(ENXIO);
1169 	KKASSERT(vp->v_rdev == NULL);
1170 	vp->v_rdev = reference_dev(dev);
1171 	lwkt_gettoken(&spechash_token);
1172 	SLIST_INSERT_HEAD(&dev->si_hlist, vp, v_cdevnext);
1173 	lwkt_reltoken(&spechash_token);
1174 	return(0);
1175 }
1176 
1177 void
1178 v_release_rdev(struct vnode *vp)
1179 {
1180 	cdev_t dev;
1181 
1182 	if ((dev = vp->v_rdev) != NULL) {
1183 		lwkt_gettoken(&spechash_token);
1184 		SLIST_REMOVE(&dev->si_hlist, vp, vnode, v_cdevnext);
1185 		vp->v_rdev = NULL;
1186 		release_dev(dev);
1187 		lwkt_reltoken(&spechash_token);
1188 	}
1189 }
1190 
1191 /*
1192  * Add a vnode to the alias list hung off the cdev_t.  We only associate
1193  * the device number with the vnode.  The actual device is not associated
1194  * until the vnode is opened (usually in spec_open()), and will be
1195  * disassociated on last close.
1196  */
1197 void
1198 addaliasu(struct vnode *nvp, int x, int y)
1199 {
1200 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
1201 		panic("addaliasu on non-special vnode");
1202 	nvp->v_umajor = x;
1203 	nvp->v_uminor = y;
1204 }
1205 
1206 /*
1207  * Simple call that a filesystem can make to try to get rid of a
1208  * vnode.  It will fail if anyone is referencing the vnode (including
1209  * the caller).
1210  *
1211  * The filesystem can check whether its in-memory inode structure still
1212  * references the vp on return.
1213  *
1214  * May only be called if the vnode is in a known state (i.e. being prevented
1215  * from being deallocated by some other condition such as a vfs inode hold).
1216  */
1217 void
1218 vclean_unlocked(struct vnode *vp)
1219 {
1220 	vx_get(vp);
1221 	if (VREFCNT(vp) <= 1)
1222 		vgone_vxlocked(vp);
1223 	vx_put(vp);
1224 }
1225 
1226 /*
1227  * Disassociate a vnode from its underlying filesystem.
1228  *
1229  * The vnode must be VX locked and referenced.  In all normal situations
1230  * there are no active references.  If vclean_vxlocked() is called while
1231  * there are active references, the vnode is being ripped out and we have
1232  * to call VOP_CLOSE() as appropriate before we can reclaim it.
1233  */
1234 void
1235 vclean_vxlocked(struct vnode *vp, int flags)
1236 {
1237 	int active;
1238 	int n;
1239 	vm_object_t object;
1240 	struct namecache *ncp;
1241 
1242 	/*
1243 	 * If the vnode has already been reclaimed we have nothing to do.
1244 	 */
1245 	if (vp->v_flag & VRECLAIMED)
1246 		return;
1247 
1248 	/*
1249 	 * Set flag to interlock operation, flag finalization to ensure
1250 	 * that the vnode winds up on the inactive list, and set v_act to 0.
1251 	 */
1252 	vsetflags(vp, VRECLAIMED);
1253 	atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
1254 	vp->v_act = 0;
1255 
1256 	if (verbose_reclaims) {
1257 		if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL)
1258 			kprintf("Debug: reclaim %p %s\n", vp, ncp->nc_name);
1259 	}
1260 
1261 	/*
1262 	 * Scrap the vfs cache
1263 	 */
1264 	while (cache_inval_vp(vp, 0) != 0) {
1265 		kprintf("Warning: vnode %p clean/cache_resolution "
1266 			"race detected\n", vp);
1267 		tsleep(vp, 0, "vclninv", 2);
1268 	}
1269 
1270 	/*
1271 	 * Check to see if the vnode is in use. If so we have to reference it
1272 	 * before we clean it out so that its count cannot fall to zero and
1273 	 * generate a race against ourselves to recycle it.
1274 	 */
1275 	active = (VREFCNT(vp) > 0);
1276 
1277 	/*
1278 	 * Clean out any buffers associated with the vnode and destroy its
1279 	 * object, if it has one.
1280 	 */
1281 	vinvalbuf(vp, V_SAVE, 0, 0);
1282 
1283 	/*
1284 	 * If purging an active vnode (typically during a forced unmount
1285 	 * or reboot), it must be closed and deactivated before being
1286 	 * reclaimed.  This isn't really all that safe, but what can
1287 	 * we do? XXX.
1288 	 *
1289 	 * Note that neither of these routines unlocks the vnode.
1290 	 */
1291 	if (active && (flags & DOCLOSE)) {
1292 		while ((n = vp->v_opencount) != 0) {
1293 			if (vp->v_writecount)
1294 				VOP_CLOSE(vp, FWRITE|FNONBLOCK, NULL);
1295 			else
1296 				VOP_CLOSE(vp, FNONBLOCK, NULL);
1297 			if (vp->v_opencount == n) {
1298 				kprintf("Warning: unable to force-close"
1299 				       " vnode %p\n", vp);
1300 				break;
1301 			}
1302 		}
1303 	}
1304 
1305 	/*
1306 	 * If the vnode has not been deactivated, deactivated it.  Deactivation
1307 	 * can create new buffers and VM pages so we have to call vinvalbuf()
1308 	 * again to make sure they all get flushed.
1309 	 *
1310 	 * This can occur if a file with a link count of 0 needs to be
1311 	 * truncated.
1312 	 *
1313 	 * If the vnode is already dead don't try to deactivate it.
1314 	 */
1315 	if ((vp->v_flag & VINACTIVE) == 0) {
1316 		vsetflags(vp, VINACTIVE);
1317 		if (vp->v_mount)
1318 			VOP_INACTIVE(vp);
1319 		vinvalbuf(vp, V_SAVE, 0, 0);
1320 	}
1321 
1322 	/*
1323 	 * If the vnode has an object, destroy it.
1324 	 */
1325 	while ((object = vp->v_object) != NULL) {
1326 		vm_object_hold(object);
1327 		if (object == vp->v_object)
1328 			break;
1329 		vm_object_drop(object);
1330 	}
1331 
1332 	if (object != NULL) {
1333 		if (object->ref_count == 0) {
1334 			if ((object->flags & OBJ_DEAD) == 0)
1335 				vm_object_terminate(object);
1336 			vm_object_drop(object);
1337 			vclrflags(vp, VOBJBUF);
1338 		} else {
1339 			vm_pager_deallocate(object);
1340 			vclrflags(vp, VOBJBUF);
1341 			vm_object_drop(object);
1342 		}
1343 	}
1344 	KKASSERT((vp->v_flag & VOBJBUF) == 0);
1345 
1346 	if (vp->v_flag & VOBJDIRTY)
1347 		vclrobjdirty(vp);
1348 
1349 	/*
1350 	 * Reclaim the vnode if not already dead.
1351 	 */
1352 	if (vp->v_mount && VOP_RECLAIM(vp))
1353 		panic("vclean: cannot reclaim");
1354 
1355 	/*
1356 	 * Done with purge, notify sleepers of the grim news.
1357 	 */
1358 	vp->v_ops = &dead_vnode_vops_p;
1359 	vn_gone(vp);
1360 	vp->v_tag = VT_NON;
1361 
1362 	/*
1363 	 * If we are destroying an active vnode, reactivate it now that
1364 	 * we have reassociated it with deadfs.  This prevents the system
1365 	 * from crashing on the vnode due to it being unexpectedly marked
1366 	 * as inactive or reclaimed.
1367 	 */
1368 	if (active && (flags & DOCLOSE)) {
1369 		vclrflags(vp, VINACTIVE | VRECLAIMED);
1370 	}
1371 }
1372 
1373 /*
1374  * Eliminate all activity associated with the requested vnode
1375  * and with all vnodes aliased to the requested vnode.
1376  *
1377  * The vnode must be referenced but should not be locked.
1378  */
1379 int
1380 vrevoke(struct vnode *vp, struct ucred *cred)
1381 {
1382 	struct vnode *vq;
1383 	struct vnode *vqn;
1384 	cdev_t dev;
1385 	int error;
1386 
1387 	/*
1388 	 * If the vnode has a device association, scrap all vnodes associated
1389 	 * with the device.  Don't let the device disappear on us while we
1390 	 * are scrapping the vnodes.
1391 	 *
1392 	 * The passed vp will probably show up in the list, do not VX lock
1393 	 * it twice!
1394 	 *
1395 	 * Releasing the vnode's rdev here can mess up specfs's call to
1396 	 * device close, so don't do it.  The vnode has been disassociated
1397 	 * and the device will be closed after the last ref on the related
1398 	 * fp goes away (if not still open by e.g. the kernel).
1399 	 */
1400 	if (vp->v_type != VCHR) {
1401 		error = fdrevoke(vp, DTYPE_VNODE, cred);
1402 		return (error);
1403 	}
1404 	if ((dev = vp->v_rdev) == NULL) {
1405 		return(0);
1406 	}
1407 	reference_dev(dev);
1408 	lwkt_gettoken(&spechash_token);
1409 
1410 restart:
1411 	vqn = SLIST_FIRST(&dev->si_hlist);
1412 	if (vqn)
1413 		vhold(vqn);
1414 	while ((vq = vqn) != NULL) {
1415 		if (VREFCNT(vq) > 0) {
1416 			vref(vq);
1417 			fdrevoke(vq, DTYPE_VNODE, cred);
1418 			/*v_release_rdev(vq);*/
1419 			vrele(vq);
1420 			if (vq->v_rdev != dev) {
1421 				vdrop(vq);
1422 				goto restart;
1423 			}
1424 		}
1425 		vqn = SLIST_NEXT(vq, v_cdevnext);
1426 		if (vqn)
1427 			vhold(vqn);
1428 		vdrop(vq);
1429 	}
1430 	lwkt_reltoken(&spechash_token);
1431 	dev_drevoke(dev);
1432 	release_dev(dev);
1433 	return (0);
1434 }
1435 
1436 /*
1437  * This is called when the object underlying a vnode is being destroyed,
1438  * such as in a remove().  Try to recycle the vnode immediately if the
1439  * only active reference is our reference.
1440  *
1441  * Directory vnodes in the namecache with children cannot be immediately
1442  * recycled because numerous VOP_N*() ops require them to be stable.
1443  *
1444  * To avoid recursive recycling from VOP_INACTIVE implemenetations this
1445  * function is a NOP if VRECLAIMED is already set.
1446  */
1447 int
1448 vrecycle(struct vnode *vp)
1449 {
1450 	if (VREFCNT(vp) <= 1 && (vp->v_flag & VRECLAIMED) == 0) {
1451 		if (cache_inval_vp_nonblock(vp))
1452 			return(0);
1453 		vgone_vxlocked(vp);
1454 		return (1);
1455 	}
1456 	return (0);
1457 }
1458 
1459 /*
1460  * Return the maximum I/O size allowed for strategy calls on VP.
1461  *
1462  * If vp is VCHR or VBLK we dive the device, otherwise we use
1463  * the vp's mount info.
1464  *
1465  * The returned value is clamped at MAXPHYS as most callers cannot use
1466  * buffers larger than that size.
1467  */
1468 int
1469 vmaxiosize(struct vnode *vp)
1470 {
1471 	int maxiosize;
1472 
1473 	if (vp->v_type == VBLK || vp->v_type == VCHR)
1474 		maxiosize = vp->v_rdev->si_iosize_max;
1475 	else
1476 		maxiosize = vp->v_mount->mnt_iosize_max;
1477 
1478 	if (maxiosize > MAXPHYS)
1479 		maxiosize = MAXPHYS;
1480 	return (maxiosize);
1481 }
1482 
1483 /*
1484  * Eliminate all activity associated with a vnode in preparation for
1485  * destruction.
1486  *
1487  * The vnode must be VX locked and refd and will remain VX locked and refd
1488  * on return.  This routine may be called with the vnode in any state, as
1489  * long as it is VX locked.  The vnode will be cleaned out and marked
1490  * VRECLAIMED but will not actually be reused until all existing refs and
1491  * holds go away.
1492  *
1493  * NOTE: This routine may be called on a vnode which has not yet been
1494  * already been deactivated (VOP_INACTIVE), or on a vnode which has
1495  * already been reclaimed.
1496  *
1497  * This routine is not responsible for placing us back on the freelist.
1498  * Instead, it happens automatically when the caller releases the VX lock
1499  * (assuming there aren't any other references).
1500  */
1501 void
1502 vgone_vxlocked(struct vnode *vp)
1503 {
1504 	/*
1505 	 * assert that the VX lock is held.  This is an absolute requirement
1506 	 * now for vgone_vxlocked() to be called.
1507 	 */
1508 	KKASSERT(lockinuse(&vp->v_lock));
1509 
1510 	/*
1511 	 * Clean out the filesystem specific data and set the VRECLAIMED
1512 	 * bit.  Also deactivate the vnode if necessary.
1513 	 *
1514 	 * The vnode should have automatically been removed from the syncer
1515 	 * list as syncer/dirty flags cleared during the cleaning.
1516 	 */
1517 	vclean_vxlocked(vp, DOCLOSE);
1518 
1519 	/*
1520 	 * Normally panic if the vnode is still dirty, unless we are doing
1521 	 * a forced unmount (tmpfs typically).
1522 	 */
1523 	if (vp->v_flag & VONWORKLST) {
1524 		if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
1525 			/* force removal */
1526 			vn_syncer_remove(vp, 1);
1527 		} else {
1528 			panic("vp %p still dirty in vgone after flush", vp);
1529 		}
1530 	}
1531 
1532 	/*
1533 	 * Delete from old mount point vnode list, if on one.
1534 	 */
1535 	if (vp->v_mount != NULL) {
1536 		KKASSERT(vp->v_data == NULL);
1537 		insmntque(vp, NULL);
1538 	}
1539 
1540 	/*
1541 	 * If special device, remove it from special device alias list
1542 	 * if it is on one.  This should normally only occur if a vnode is
1543 	 * being revoked as the device should otherwise have been released
1544 	 * naturally.
1545 	 */
1546 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) {
1547 		v_release_rdev(vp);
1548 	}
1549 
1550 	/*
1551 	 * Set us to VBAD
1552 	 */
1553 	vp->v_type = VBAD;
1554 }
1555 
1556 /*
1557  * Lookup a vnode by device number.
1558  *
1559  * Returns non-zero and *vpp set to a vref'd vnode on success.
1560  * Returns zero on failure.
1561  */
1562 int
1563 vfinddev(cdev_t dev, enum vtype type, struct vnode **vpp)
1564 {
1565 	struct vnode *vp;
1566 
1567 	lwkt_gettoken(&spechash_token);
1568 	SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) {
1569 		if (type == vp->v_type) {
1570 			*vpp = vp;
1571 			vref(vp);
1572 			lwkt_reltoken(&spechash_token);
1573 			return (1);
1574 		}
1575 	}
1576 	lwkt_reltoken(&spechash_token);
1577 	return (0);
1578 }
1579 
1580 /*
1581  * Calculate the total number of references to a special device.  This
1582  * routine may only be called for VBLK and VCHR vnodes since v_rdev is
1583  * an overloaded field.  Since udev2dev can now return NULL, we have
1584  * to check for a NULL v_rdev.
1585  */
1586 int
1587 count_dev(cdev_t dev)
1588 {
1589 	struct vnode *vp;
1590 	int count = 0;
1591 
1592 	if (SLIST_FIRST(&dev->si_hlist)) {
1593 		lwkt_gettoken(&spechash_token);
1594 		SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) {
1595 			count += vp->v_opencount;
1596 		}
1597 		lwkt_reltoken(&spechash_token);
1598 	}
1599 	return(count);
1600 }
1601 
1602 int
1603 vcount(struct vnode *vp)
1604 {
1605 	if (vp->v_rdev == NULL)
1606 		return(0);
1607 	return(count_dev(vp->v_rdev));
1608 }
1609 
1610 /*
1611  * Initialize VMIO for a vnode.  This routine MUST be called before a
1612  * VFS can issue buffer cache ops on a vnode.  It is typically called
1613  * when a vnode is initialized from its inode.
1614  */
1615 int
1616 vinitvmio(struct vnode *vp, off_t filesize, int blksize, int boff)
1617 {
1618 	vm_object_t object;
1619 	int error = 0;
1620 
1621 	object = vp->v_object;
1622 	if (object) {
1623 		vm_object_hold(object);
1624 		KKASSERT(vp->v_object == object);
1625 	}
1626 
1627 	if (object == NULL) {
1628 		object = vnode_pager_alloc(vp, filesize, 0, 0, blksize, boff);
1629 
1630 		/*
1631 		 * Dereference the reference we just created.  This assumes
1632 		 * that the object is associated with the vp.  Allow it to
1633 		 * have zero refs.  It cannot be destroyed as long as it
1634 		 * is associated with the vnode.
1635 		 */
1636 		vm_object_hold(object);
1637 		atomic_add_int(&object->ref_count, -1);
1638 		vrele(vp);
1639 	} else {
1640 		KKASSERT((object->flags & OBJ_DEAD) == 0);
1641 	}
1642 	KASSERT(vp->v_object != NULL, ("vinitvmio: NULL object"));
1643 	vsetflags(vp, VOBJBUF);
1644 	vm_object_drop(object);
1645 
1646 	return (error);
1647 }
1648 
1649 
1650 /*
1651  * Print out a description of a vnode.
1652  */
1653 static char *typename[] =
1654 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
1655 
1656 void
1657 vprint(char *label, struct vnode *vp)
1658 {
1659 	char buf[96];
1660 
1661 	if (label != NULL)
1662 		kprintf("%s: %p: ", label, (void *)vp);
1663 	else
1664 		kprintf("%p: ", (void *)vp);
1665 	kprintf("type %s, refcnt %08x, writecount %d, holdcnt %d,",
1666 		typename[vp->v_type],
1667 		vp->v_refcnt, vp->v_writecount, vp->v_auxrefs);
1668 	buf[0] = '\0';
1669 	if (vp->v_flag & VROOT)
1670 		strcat(buf, "|VROOT");
1671 	if (vp->v_flag & VPFSROOT)
1672 		strcat(buf, "|VPFSROOT");
1673 	if (vp->v_flag & VTEXT)
1674 		strcat(buf, "|VTEXT");
1675 	if (vp->v_flag & VSYSTEM)
1676 		strcat(buf, "|VSYSTEM");
1677 	if (vp->v_flag & VOBJBUF)
1678 		strcat(buf, "|VOBJBUF");
1679 	if (buf[0] != '\0')
1680 		kprintf(" flags (%s)", &buf[1]);
1681 	if (vp->v_data == NULL) {
1682 		kprintf("\n");
1683 	} else {
1684 		kprintf("\n\t");
1685 		VOP_PRINT(vp);
1686 	}
1687 }
1688 
1689 /*
1690  * Do the usual access checking.
1691  * file_mode, uid and gid are from the vnode in question,
1692  * while acc_mode and cred are from the VOP_ACCESS parameter list
1693  */
1694 int
1695 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1696     mode_t acc_mode, struct ucred *cred)
1697 {
1698 	mode_t mask;
1699 	int ismember;
1700 
1701 	/*
1702 	 * Super-user always gets read/write access, but execute access depends
1703 	 * on at least one execute bit being set.
1704 	 */
1705 	if (priv_check_cred(cred, PRIV_ROOT, 0) == 0) {
1706 		if ((acc_mode & VEXEC) && type != VDIR &&
1707 		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1708 			return (EACCES);
1709 		return (0);
1710 	}
1711 
1712 	mask = 0;
1713 
1714 	/* Otherwise, check the owner. */
1715 	if (cred->cr_uid == uid) {
1716 		if (acc_mode & VEXEC)
1717 			mask |= S_IXUSR;
1718 		if (acc_mode & VREAD)
1719 			mask |= S_IRUSR;
1720 		if (acc_mode & VWRITE)
1721 			mask |= S_IWUSR;
1722 		return ((file_mode & mask) == mask ? 0 : EACCES);
1723 	}
1724 
1725 	/* Otherwise, check the groups. */
1726 	ismember = groupmember(gid, cred);
1727 	if (cred->cr_svgid == gid || ismember) {
1728 		if (acc_mode & VEXEC)
1729 			mask |= S_IXGRP;
1730 		if (acc_mode & VREAD)
1731 			mask |= S_IRGRP;
1732 		if (acc_mode & VWRITE)
1733 			mask |= S_IWGRP;
1734 		return ((file_mode & mask) == mask ? 0 : EACCES);
1735 	}
1736 
1737 	/* Otherwise, check everyone else. */
1738 	if (acc_mode & VEXEC)
1739 		mask |= S_IXOTH;
1740 	if (acc_mode & VREAD)
1741 		mask |= S_IROTH;
1742 	if (acc_mode & VWRITE)
1743 		mask |= S_IWOTH;
1744 	return ((file_mode & mask) == mask ? 0 : EACCES);
1745 }
1746 
1747 #ifdef DDB
1748 #include <ddb/ddb.h>
1749 
1750 static int db_show_locked_vnodes(struct mount *mp, void *data);
1751 
1752 /*
1753  * List all of the locked vnodes in the system.
1754  * Called when debugging the kernel.
1755  */
1756 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
1757 {
1758 	kprintf("Locked vnodes\n");
1759 	mountlist_scan(db_show_locked_vnodes, NULL,
1760 			MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
1761 }
1762 
1763 static int
1764 db_show_locked_vnodes(struct mount *mp, void *data __unused)
1765 {
1766 	struct vnode *vp;
1767 
1768 	TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
1769 		if (vn_islocked(vp))
1770 			vprint(NULL, vp);
1771 	}
1772 	return(0);
1773 }
1774 #endif
1775 
1776 /*
1777  * Top level filesystem related information gathering.
1778  */
1779 static int	sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS);
1780 
1781 static int
1782 vfs_sysctl(SYSCTL_HANDLER_ARGS)
1783 {
1784 	int *name = (int *)arg1 - 1;	/* XXX */
1785 	u_int namelen = arg2 + 1;	/* XXX */
1786 	struct vfsconf *vfsp;
1787 	int maxtypenum;
1788 
1789 #if 1 || defined(COMPAT_PRELITE2)
1790 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
1791 	if (namelen == 1)
1792 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
1793 #endif
1794 
1795 #ifdef notyet
1796 	/* all sysctl names at this level are at least name and field */
1797 	if (namelen < 2)
1798 		return (ENOTDIR);		/* overloaded */
1799 	if (name[0] != VFS_GENERIC) {
1800 		vfsp = vfsconf_find_by_typenum(name[0]);
1801 		if (vfsp == NULL)
1802 			return (EOPNOTSUPP);
1803 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1804 		    oldp, oldlenp, newp, newlen, p));
1805 	}
1806 #endif
1807 	switch (name[1]) {
1808 	case VFS_MAXTYPENUM:
1809 		if (namelen != 2)
1810 			return (ENOTDIR);
1811 		maxtypenum = vfsconf_get_maxtypenum();
1812 		return (SYSCTL_OUT(req, &maxtypenum, sizeof(maxtypenum)));
1813 	case VFS_CONF:
1814 		if (namelen != 3)
1815 			return (ENOTDIR);	/* overloaded */
1816 		vfsp = vfsconf_find_by_typenum(name[2]);
1817 		if (vfsp == NULL)
1818 			return (EOPNOTSUPP);
1819 		return (SYSCTL_OUT(req, vfsp, sizeof *vfsp));
1820 	}
1821 	return (EOPNOTSUPP);
1822 }
1823 
1824 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl,
1825 	"Generic filesystem");
1826 
1827 #if 1 || defined(COMPAT_PRELITE2)
1828 
1829 static int
1830 sysctl_ovfs_conf_iter(struct vfsconf *vfsp, void *data)
1831 {
1832 	int error;
1833 	struct ovfsconf ovfs;
1834 	struct sysctl_req *req = (struct sysctl_req*) data;
1835 
1836 	bzero(&ovfs, sizeof(ovfs));
1837 	ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
1838 	strcpy(ovfs.vfc_name, vfsp->vfc_name);
1839 	ovfs.vfc_index = vfsp->vfc_typenum;
1840 	ovfs.vfc_refcount = vfsp->vfc_refcount;
1841 	ovfs.vfc_flags = vfsp->vfc_flags;
1842 	error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
1843 	if (error)
1844 		return error; /* abort iteration with error code */
1845 	else
1846 		return 0; /* continue iterating with next element */
1847 }
1848 
1849 static int
1850 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
1851 {
1852 	return vfsconf_each(sysctl_ovfs_conf_iter, (void*)req);
1853 }
1854 
1855 #endif /* 1 || COMPAT_PRELITE2 */
1856 
1857 /*
1858  * Check to see if a filesystem is mounted on a block device.
1859  */
1860 int
1861 vfs_mountedon(struct vnode *vp)
1862 {
1863 	cdev_t dev;
1864 
1865 	if ((dev = vp->v_rdev) == NULL) {
1866 /*		if (vp->v_type != VBLK)
1867 			dev = get_dev(vp->v_uminor, vp->v_umajor); */
1868 	}
1869 	if (dev != NULL && dev->si_mountpoint)
1870 		return (EBUSY);
1871 	return (0);
1872 }
1873 
1874 /*
1875  * Unmount all filesystems. The list is traversed in reverse order
1876  * of mounting to avoid dependencies.
1877  *
1878  * We want the umountall to be able to break out of its loop if a
1879  * failure occurs, after scanning all possible mounts, so the callback
1880  * returns 0 on error.
1881  *
1882  * NOTE: Do not call mountlist_remove(mp) on error any more, this will
1883  *	 confuse mountlist_scan()'s unbusy check.
1884  */
1885 static int vfs_umountall_callback(struct mount *mp, void *data);
1886 
1887 void
1888 vfs_unmountall(int halting)
1889 {
1890 	int count;
1891 
1892 	do {
1893 		count = mountlist_scan(vfs_umountall_callback, &halting,
1894 				       MNTSCAN_REVERSE|MNTSCAN_NOBUSY);
1895 	} while (count);
1896 }
1897 
1898 static
1899 int
1900 vfs_umountall_callback(struct mount *mp, void *data)
1901 {
1902 	int error;
1903 	int halting = *(int *)data;
1904 
1905 	/*
1906 	 * NOTE: When halting, dounmount will disconnect but leave
1907 	 *	 certain mount points intact.  e.g. devfs.
1908 	 */
1909 	error = dounmount(mp, MNT_FORCE, halting);
1910 	if (error) {
1911 		kprintf("unmount of filesystem mounted from %s failed (",
1912 			mp->mnt_stat.f_mntfromname);
1913 		if (error == EBUSY)
1914 			kprintf("BUSY)\n");
1915 		else
1916 			kprintf("%d)\n", error);
1917 		return 0;
1918 	} else {
1919 		return 1;
1920 	}
1921 }
1922 
1923 /*
1924  * Checks the mount flags for parameter mp and put the names comma-separated
1925  * into a string buffer buf with a size limit specified by len.
1926  *
1927  * It returns the number of bytes written into buf, and (*errorp) will be
1928  * set to 0, EINVAL (if passed length is 0), or ENOSPC (supplied buffer was
1929  * not large enough).  The buffer will be 0-terminated if len was not 0.
1930  */
1931 size_t
1932 vfs_flagstostr(int flags, const struct mountctl_opt *optp,
1933 	       char *buf, size_t len, int *errorp)
1934 {
1935 	static const struct mountctl_opt optnames[] = {
1936 		{ MNT_RDONLY,           "read-only" },
1937 		{ MNT_SYNCHRONOUS,      "synchronous" },
1938 		{ MNT_NOEXEC,           "noexec" },
1939 		{ MNT_NOSUID,           "nosuid" },
1940 		{ MNT_NODEV,            "nodev" },
1941 		{ MNT_AUTOMOUNTED,      "automounted" },
1942 		{ MNT_ASYNC,            "asynchronous" },
1943 		{ MNT_SUIDDIR,          "suiddir" },
1944 		{ MNT_SOFTDEP,          "soft-updates" },
1945 		{ MNT_NOSYMFOLLOW,      "nosymfollow" },
1946 		{ MNT_TRIM,             "trim" },
1947 		{ MNT_NOATIME,          "noatime" },
1948 		{ MNT_NOCLUSTERR,       "noclusterr" },
1949 		{ MNT_NOCLUSTERW,       "noclusterw" },
1950 		{ MNT_EXRDONLY,         "NFS read-only" },
1951 		{ MNT_EXPORTED,         "NFS exported" },
1952 		/* Remaining NFS flags could come here */
1953 		{ MNT_LOCAL,            "local" },
1954 		{ MNT_QUOTA,            "with-quotas" },
1955 		/* { MNT_ROOTFS,           "rootfs" }, */
1956 		/* { MNT_IGNORE,           "ignore" }, */
1957 		{ 0,			NULL}
1958 	};
1959 	int bwritten;
1960 	int bleft;
1961 	int optlen;
1962 	int actsize;
1963 
1964 	*errorp = 0;
1965 	bwritten = 0;
1966 	bleft = len - 1;	/* leave room for trailing \0 */
1967 
1968 	/*
1969 	 * Checks the size of the string. If it contains
1970 	 * any data, then we will append the new flags to
1971 	 * it.
1972 	 */
1973 	actsize = strlen(buf);
1974 	if (actsize > 0)
1975 		buf += actsize;
1976 
1977 	/* Default flags if no flags passed */
1978 	if (optp == NULL)
1979 		optp = optnames;
1980 
1981 	if (bleft < 0) {	/* degenerate case, 0-length buffer */
1982 		*errorp = EINVAL;
1983 		return(0);
1984 	}
1985 
1986 	for (; flags && optp->o_opt; ++optp) {
1987 		if ((flags & optp->o_opt) == 0)
1988 			continue;
1989 		optlen = strlen(optp->o_name);
1990 		if (bwritten || actsize > 0) {
1991 			if (bleft < 2) {
1992 				*errorp = ENOSPC;
1993 				break;
1994 			}
1995 			buf[bwritten++] = ',';
1996 			buf[bwritten++] = ' ';
1997 			bleft -= 2;
1998 		}
1999 		if (bleft < optlen) {
2000 			*errorp = ENOSPC;
2001 			break;
2002 		}
2003 		bcopy(optp->o_name, buf + bwritten, optlen);
2004 		bwritten += optlen;
2005 		bleft -= optlen;
2006 		flags &= ~optp->o_opt;
2007 	}
2008 
2009 	/*
2010 	 * Space already reserved for trailing \0
2011 	 */
2012 	buf[bwritten] = 0;
2013 	return (bwritten);
2014 }
2015 
2016 /*
2017  * Build hash lists of net addresses and hang them off the mount point.
2018  * Called by ufs_mount() to set up the lists of export addresses.
2019  */
2020 static int
2021 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
2022 		const struct export_args *argp)
2023 {
2024 	struct netcred *np;
2025 	struct radix_node_head *rnh;
2026 	int i;
2027 	struct radix_node *rn;
2028 	struct sockaddr *saddr, *smask = NULL;
2029 	int error;
2030 
2031 	if (argp->ex_addrlen == 0) {
2032 		if (mp->mnt_flag & MNT_DEFEXPORTED)
2033 			return (EPERM);
2034 		np = &nep->ne_defexported;
2035 		np->netc_exflags = argp->ex_flags;
2036 		np->netc_anon = argp->ex_anon;
2037 		np->netc_anon.cr_ref = 1;
2038 		mp->mnt_flag |= MNT_DEFEXPORTED;
2039 		return (0);
2040 	}
2041 
2042 	if (argp->ex_addrlen < 0 || argp->ex_addrlen > MLEN)
2043 		return (EINVAL);
2044 	if (argp->ex_masklen < 0 || argp->ex_masklen > MLEN)
2045 		return (EINVAL);
2046 
2047 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
2048 	np = (struct netcred *)kmalloc(i, M_NETCRED, M_WAITOK | M_ZERO);
2049 	saddr = (struct sockaddr *) (np + 1);
2050 	if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen)))
2051 		goto out;
2052 	if (saddr->sa_len > argp->ex_addrlen)
2053 		saddr->sa_len = argp->ex_addrlen;
2054 	if (argp->ex_masklen) {
2055 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
2056 		error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen);
2057 		if (error)
2058 			goto out;
2059 		if (smask->sa_len > argp->ex_masklen)
2060 			smask->sa_len = argp->ex_masklen;
2061 	}
2062 	NE_LOCK(nep);
2063 	if (nep->ne_maskhead == NULL) {
2064 		if (!rn_inithead((void **)&nep->ne_maskhead, NULL, 0)) {
2065 			error = ENOBUFS;
2066 			goto out;
2067 		}
2068 	}
2069 	if ((rnh = vfs_create_addrlist_af(saddr->sa_family, nep)) == NULL) {
2070 		error = ENOBUFS;
2071 		goto out;
2072 	}
2073 	rn = (*rnh->rnh_addaddr)((char *)saddr, (char *)smask, rnh,
2074 				 np->netc_rnodes);
2075 	NE_UNLOCK(nep);
2076 	if (rn == NULL || np != (struct netcred *)rn) {	/* already exists */
2077 		error = EPERM;
2078 		goto out;
2079 	}
2080 	np->netc_exflags = argp->ex_flags;
2081 	np->netc_anon = argp->ex_anon;
2082 	np->netc_anon.cr_ref = 1;
2083 	return (0);
2084 
2085 out:
2086 	kfree(np, M_NETCRED);
2087 	return (error);
2088 }
2089 
2090 /*
2091  * Free netcred structures installed in the netexport
2092  */
2093 static int
2094 vfs_free_netcred(struct radix_node *rn, void *w)
2095 {
2096 	struct radix_node_head *rnh = (struct radix_node_head *)w;
2097 
2098 	(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
2099 	kfree(rn, M_NETCRED);
2100 
2101 	return (0);
2102 }
2103 
2104 /*
2105  * callback to free an element of the mask table installed in the
2106  * netexport.  These may be created indirectly and are not netcred
2107  * structures.
2108  */
2109 static int
2110 vfs_free_netcred_mask(struct radix_node *rn, void *w)
2111 {
2112 	struct radix_node_head *rnh = (struct radix_node_head *)w;
2113 
2114 	(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
2115 	kfree(rn, M_RTABLE);
2116 
2117 	return (0);
2118 }
2119 
2120 static struct radix_node_head *
2121 vfs_create_addrlist_af(int af, struct netexport *nep)
2122 {
2123 	struct radix_node_head *rnh = NULL;
2124 #if defined(INET) || defined(INET6)
2125 	struct radix_node_head *maskhead = nep->ne_maskhead;
2126 	int off;
2127 #endif
2128 
2129 	NE_ASSERT_LOCKED(nep);
2130 #if defined(INET) || defined(INET6)
2131 	KKASSERT(maskhead != NULL);
2132 #endif
2133 	switch (af) {
2134 #ifdef INET
2135 	case AF_INET:
2136 		if ((rnh = nep->ne_inethead) == NULL) {
2137 			off = offsetof(struct sockaddr_in, sin_addr) << 3;
2138 			if (!rn_inithead((void **)&rnh, maskhead, off))
2139 				return (NULL);
2140 			nep->ne_inethead = rnh;
2141 		}
2142 		break;
2143 #endif
2144 #ifdef INET6
2145 	case AF_INET6:
2146 		if ((rnh = nep->ne_inet6head) == NULL) {
2147 			off = offsetof(struct sockaddr_in6, sin6_addr) << 3;
2148 			if (!rn_inithead((void **)&rnh, maskhead, off))
2149 				return (NULL);
2150 			nep->ne_inet6head = rnh;
2151 		}
2152 		break;
2153 #endif
2154 	}
2155 	return (rnh);
2156 }
2157 
2158 /*
2159  * helper function for freeing netcred elements
2160  */
2161 static void
2162 vfs_free_addrlist_af(struct radix_node_head **prnh)
2163 {
2164 	struct radix_node_head *rnh = *prnh;
2165 
2166 	(*rnh->rnh_walktree) (rnh, vfs_free_netcred, rnh);
2167 	kfree(rnh, M_RTABLE);
2168 	*prnh = NULL;
2169 }
2170 
2171 /*
2172  * helper function for freeing mask elements
2173  */
2174 static void
2175 vfs_free_addrlist_masks(struct radix_node_head **prnh)
2176 {
2177 	struct radix_node_head *rnh = *prnh;
2178 
2179 	(*rnh->rnh_walktree) (rnh, vfs_free_netcred_mask, rnh);
2180 	kfree(rnh, M_RTABLE);
2181 	*prnh = NULL;
2182 }
2183 
2184 /*
2185  * Free the net address hash lists that are hanging off the mount points.
2186  */
2187 static void
2188 vfs_free_addrlist(struct netexport *nep)
2189 {
2190 	NE_LOCK(nep);
2191 	if (nep->ne_inethead != NULL)
2192 		vfs_free_addrlist_af(&nep->ne_inethead);
2193 	if (nep->ne_inet6head != NULL)
2194 		vfs_free_addrlist_af(&nep->ne_inet6head);
2195 	if (nep->ne_maskhead)
2196 		vfs_free_addrlist_masks(&nep->ne_maskhead);
2197 	NE_UNLOCK(nep);
2198 }
2199 
2200 int
2201 vfs_export(struct mount *mp, struct netexport *nep,
2202 	   const struct export_args *argp)
2203 {
2204 	int error;
2205 
2206 	if (argp->ex_flags & MNT_DELEXPORT) {
2207 		if (mp->mnt_flag & MNT_EXPUBLIC) {
2208 			vfs_setpublicfs(NULL, NULL, NULL);
2209 			mp->mnt_flag &= ~MNT_EXPUBLIC;
2210 		}
2211 		vfs_free_addrlist(nep);
2212 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
2213 	}
2214 	if (argp->ex_flags & MNT_EXPORTED) {
2215 		if (argp->ex_flags & MNT_EXPUBLIC) {
2216 			if ((error = vfs_setpublicfs(mp, nep, argp)) != 0)
2217 				return (error);
2218 			mp->mnt_flag |= MNT_EXPUBLIC;
2219 		}
2220 		if ((error = vfs_hang_addrlist(mp, nep, argp)))
2221 			return (error);
2222 		mp->mnt_flag |= MNT_EXPORTED;
2223 	}
2224 	return (0);
2225 }
2226 
2227 
2228 /*
2229  * Set the publicly exported filesystem (WebNFS). Currently, only
2230  * one public filesystem is possible in the spec (RFC 2054 and 2055)
2231  */
2232 int
2233 vfs_setpublicfs(struct mount *mp, struct netexport *nep,
2234 		const struct export_args *argp)
2235 {
2236 	int error;
2237 	struct vnode *rvp;
2238 	char *cp;
2239 
2240 	/*
2241 	 * mp == NULL -> invalidate the current info, the FS is
2242 	 * no longer exported. May be called from either vfs_export
2243 	 * or unmount, so check if it hasn't already been done.
2244 	 */
2245 	if (mp == NULL) {
2246 		if (nfs_pub.np_valid) {
2247 			nfs_pub.np_valid = 0;
2248 			if (nfs_pub.np_index != NULL) {
2249 				kfree(nfs_pub.np_index, M_TEMP);
2250 				nfs_pub.np_index = NULL;
2251 			}
2252 		}
2253 		return (0);
2254 	}
2255 
2256 	/*
2257 	 * Only one allowed at a time.
2258 	 */
2259 	if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount)
2260 		return (EBUSY);
2261 
2262 	/*
2263 	 * Get real filehandle for root of exported FS.
2264 	 */
2265 	bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle));
2266 	nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid;
2267 
2268 	if ((error = VFS_ROOT(mp, &rvp)))
2269 		return (error);
2270 
2271 	if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid)))
2272 		return (error);
2273 
2274 	vput(rvp);
2275 
2276 	/*
2277 	 * If an indexfile was specified, pull it in.
2278 	 */
2279 	if (argp->ex_indexfile != NULL) {
2280 		int namelen;
2281 
2282 		error = vn_get_namelen(rvp, &namelen);
2283 		if (error)
2284 			return (error);
2285 		nfs_pub.np_index = kmalloc(namelen, M_TEMP, M_WAITOK);
2286 		error = copyinstr(argp->ex_indexfile, nfs_pub.np_index,
2287 		    namelen, NULL);
2288 		if (!error) {
2289 			/*
2290 			 * Check for illegal filenames.
2291 			 */
2292 			for (cp = nfs_pub.np_index; *cp; cp++) {
2293 				if (*cp == '/') {
2294 					error = EINVAL;
2295 					break;
2296 				}
2297 			}
2298 		}
2299 		if (error) {
2300 			kfree(nfs_pub.np_index, M_TEMP);
2301 			return (error);
2302 		}
2303 	}
2304 
2305 	nfs_pub.np_mount = mp;
2306 	nfs_pub.np_valid = 1;
2307 	return (0);
2308 }
2309 
2310 struct netcred *
2311 vfs_export_lookup(struct mount *mp, struct netexport *nep,
2312 		struct sockaddr *nam)
2313 {
2314 	struct netcred *np;
2315 	struct radix_node_head *rnh;
2316 	struct sockaddr *saddr;
2317 
2318 	np = NULL;
2319 	if (mp->mnt_flag & MNT_EXPORTED) {
2320 		/*
2321 		 * Lookup in the export list first.
2322 		 */
2323 		NE_LOCK(nep);
2324 		if (nam != NULL) {
2325 			saddr = nam;
2326 			switch (saddr->sa_family) {
2327 #ifdef INET
2328 			case AF_INET:
2329 				rnh = nep->ne_inethead;
2330 				break;
2331 #endif
2332 #ifdef INET6
2333 			case AF_INET6:
2334 				rnh = nep->ne_inet6head;
2335 				break;
2336 #endif
2337 			default:
2338 				rnh = NULL;
2339 			}
2340 			if (rnh != NULL) {
2341 				np = (struct netcred *)
2342 					(*rnh->rnh_matchaddr)((char *)saddr,
2343 							      rnh);
2344 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
2345 					np = NULL;
2346 			}
2347 		}
2348 		NE_UNLOCK(nep);
2349 		/*
2350 		 * If no address match, use the default if it exists.
2351 		 */
2352 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
2353 			np = &nep->ne_defexported;
2354 	}
2355 	return (np);
2356 }
2357 
2358 /*
2359  * perform msync on all vnodes under a mount point.  The mount point must
2360  * be locked.  This code is also responsible for lazy-freeing unreferenced
2361  * vnodes whos VM objects no longer contain pages.
2362  *
2363  * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state.
2364  *
2365  * NOTE: XXX VOP_PUTPAGES and friends requires that the vnode be locked,
2366  * but vnode_pager_putpages() doesn't lock the vnode.  We have to do it
2367  * way up in this high level function.
2368  */
2369 static int vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data);
2370 static int vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data);
2371 
2372 void
2373 vfs_msync(struct mount *mp, int flags)
2374 {
2375 	int vmsc_flags;
2376 
2377 	/*
2378 	 * tmpfs sets this flag to prevent msync(), sync, and the
2379 	 * filesystem periodic syncer from trying to flush VM pages
2380 	 * to swap.  Only pure memory pressure flushes tmpfs VM pages
2381 	 * to swap.
2382 	 */
2383 	if (mp->mnt_kern_flag & MNTK_NOMSYNC)
2384 		return;
2385 
2386 	/*
2387 	 * Ok, scan the vnodes for work.  If the filesystem is using the
2388 	 * syncer thread feature we can use vsyncscan() instead of
2389 	 * vmntvnodescan(), which is much faster.
2390 	 */
2391 	vmsc_flags = VMSC_GETVP;
2392 	if (flags != MNT_WAIT)
2393 		vmsc_flags |= VMSC_NOWAIT;
2394 
2395 	if (mp->mnt_kern_flag & MNTK_THR_SYNC) {
2396 		vsyncscan(mp, vmsc_flags, vfs_msync_scan2,
2397 			  (void *)(intptr_t)flags);
2398 	} else {
2399 		vmntvnodescan(mp, vmsc_flags,
2400 			      vfs_msync_scan1, vfs_msync_scan2,
2401 			      (void *)(intptr_t)flags);
2402 	}
2403 }
2404 
2405 /*
2406  * scan1 is a fast pre-check.  There could be hundreds of thousands of
2407  * vnodes, we cannot afford to do anything heavy weight until we have a
2408  * fairly good indication that there is work to do.
2409  */
2410 static
2411 int
2412 vfs_msync_scan1(struct mount *mp, struct vnode *vp, void *data)
2413 {
2414 	int flags = (int)(intptr_t)data;
2415 
2416 	if ((vp->v_flag & VRECLAIMED) == 0) {
2417 		if (vp->v_auxrefs == 0 && VREFCNT(vp) <= 0 &&
2418 		    vp->v_object) {
2419 			return(0);	/* call scan2 */
2420 		}
2421 		if ((mp->mnt_flag & MNT_RDONLY) == 0 &&
2422 		    (vp->v_flag & VOBJDIRTY) &&
2423 		    (flags == MNT_WAIT || vn_islocked(vp) == 0)) {
2424 			return(0);	/* call scan2 */
2425 		}
2426 	}
2427 
2428 	/*
2429 	 * do not call scan2, continue the loop
2430 	 */
2431 	return(-1);
2432 }
2433 
2434 /*
2435  * This callback is handed a locked vnode.
2436  */
2437 static
2438 int
2439 vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data)
2440 {
2441 	vm_object_t obj;
2442 	int flags = (int)(intptr_t)data;
2443 	int opcflags;
2444 
2445 	if (vp->v_flag & VRECLAIMED)
2446 		return(0);
2447 
2448 	if ((mp->mnt_flag & MNT_RDONLY) == 0 && (vp->v_flag & VOBJDIRTY)) {
2449 		if ((obj = vp->v_object) != NULL) {
2450 			if (flags == MNT_WAIT) {
2451 				/*
2452 				 * VFS_MSYNC is called with MNT_WAIT when
2453 				 * unmounting.
2454 				 */
2455 				opcflags = OBJPC_SYNC;
2456 			} else if (vp->v_writecount || obj->ref_count) {
2457 				/*
2458 				 * VFS_MSYNC is otherwise called via the
2459 				 * periodic filesystem sync or the 'sync'
2460 				 * command.  Honor MADV_NOSYNC / MAP_NOSYNC
2461 				 * if the file is open for writing or memory
2462 				 * mapped.  Pages flagged PG_NOSYNC will not
2463 				 * be automatically flushed at this time.
2464 				 *
2465 				 * The obj->ref_count test is not perfect
2466 				 * since temporary refs may be present, but
2467 				 * the periodic filesystem sync will ultimately
2468 				 * catch it if the file is not open and not
2469 				 * mapped.
2470 				 */
2471 				opcflags = OBJPC_NOSYNC;
2472 			} else {
2473 				/*
2474 				 * If the file is no longer open for writing
2475 				 * and also no longer mapped, do not honor
2476 				 * MAP_NOSYNC.  That is, fully synchronize
2477 				 * the file.
2478 				 *
2479 				 * This still occurs on the periodic fs sync,
2480 				 * so frontend programs which turn the file
2481 				 * over quickly enough can still avoid the
2482 				 * sync, but ultimately we do want to flush
2483 				 * even MADV_NOSYNC pages once it is no longer
2484 				 * mapped or open for writing.
2485 				 */
2486 				opcflags = 0;
2487 			}
2488 			vm_object_page_clean(obj, 0, 0, opcflags);
2489 		}
2490 	}
2491 	return(0);
2492 }
2493 
2494 /*
2495  * Wake up anyone interested in vp because it is being revoked.
2496  */
2497 void
2498 vn_gone(struct vnode *vp)
2499 {
2500 	lwkt_gettoken(&vp->v_token);
2501 	KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, NOTE_REVOKE);
2502 	lwkt_reltoken(&vp->v_token);
2503 }
2504 
2505 /*
2506  * extract the cdev_t from a VBLK or VCHR.  The vnode must have been opened
2507  * (or v_rdev might be NULL).
2508  */
2509 cdev_t
2510 vn_todev(struct vnode *vp)
2511 {
2512 	if (vp->v_type != VBLK && vp->v_type != VCHR)
2513 		return (NULL);
2514 	KKASSERT(vp->v_rdev != NULL);
2515 	return (vp->v_rdev);
2516 }
2517 
2518 /*
2519  * Check if vnode represents a disk device.  The vnode does not need to be
2520  * opened.
2521  *
2522  * MPALMOSTSAFE
2523  */
2524 int
2525 vn_isdisk(struct vnode *vp, int *errp)
2526 {
2527 	cdev_t dev;
2528 
2529 	if (vp->v_type != VCHR) {
2530 		if (errp != NULL)
2531 			*errp = ENOTBLK;
2532 		return (0);
2533 	}
2534 
2535 	dev = vp->v_rdev;
2536 
2537 	if (dev == NULL) {
2538 		if (errp != NULL)
2539 			*errp = ENXIO;
2540 		return (0);
2541 	}
2542 	if (dev_is_good(dev) == 0) {
2543 		if (errp != NULL)
2544 			*errp = ENXIO;
2545 		return (0);
2546 	}
2547 	if ((dev_dflags(dev) & D_DISK) == 0) {
2548 		if (errp != NULL)
2549 			*errp = ENOTBLK;
2550 		return (0);
2551 	}
2552 	if (errp != NULL)
2553 		*errp = 0;
2554 	return (1);
2555 }
2556 
2557 int
2558 vn_get_namelen(struct vnode *vp, int *namelen)
2559 {
2560 	int error;
2561 	register_t retval[2];
2562 
2563 	error = VOP_PATHCONF(vp, _PC_NAME_MAX, retval);
2564 	if (error)
2565 		return (error);
2566 	*namelen = (int)retval[0];
2567 	return (0);
2568 }
2569 
2570 int
2571 vop_write_dirent(int *error, struct uio *uio, ino_t d_ino, uint8_t d_type,
2572 		uint16_t d_namlen, const char *d_name)
2573 {
2574 	struct dirent *dp;
2575 	size_t len;
2576 
2577 	len = _DIRENT_RECLEN(d_namlen);
2578 	if (len > uio->uio_resid)
2579 		return(1);
2580 
2581 	dp = kmalloc(len, M_TEMP, M_WAITOK | M_ZERO);
2582 
2583 	dp->d_ino = d_ino;
2584 	dp->d_namlen = d_namlen;
2585 	dp->d_type = d_type;
2586 	bcopy(d_name, dp->d_name, d_namlen);
2587 
2588 	*error = uiomove((caddr_t)dp, len, uio);
2589 
2590 	kfree(dp, M_TEMP);
2591 
2592 	return(0);
2593 }
2594 
2595 void
2596 vn_mark_atime(struct vnode *vp, struct thread *td)
2597 {
2598 	struct proc *p = td->td_proc;
2599 	struct ucred *cred = p ? p->p_ucred : proc0.p_ucred;
2600 
2601 	if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) {
2602 		VOP_MARKATIME(vp, cred);
2603 	}
2604 }
2605 
2606 /*
2607  * Calculate the number of entries in an inode-related chained hash table.
2608  * With today's memory sizes, maxvnodes can wind up being a very large
2609  * number.  There is no reason to waste memory, so tolerate some stacking.
2610  */
2611 int
2612 vfs_inodehashsize(void)
2613 {
2614 	int hsize;
2615 
2616 	hsize = 32;
2617 	while (hsize < maxvnodes)
2618 		hsize <<= 1;
2619 	while (hsize > maxvnodes * 2)
2620 		hsize >>= 1;		/* nominal 2x stacking */
2621 
2622 	if (maxvnodes > 1024 * 1024)
2623 		hsize >>= 1;		/* nominal 8x stacking */
2624 
2625 	if (maxvnodes > 128 * 1024)
2626 		hsize >>= 1;		/* nominal 4x stacking */
2627 
2628 	if (hsize < 16)
2629 		hsize = 16;
2630 
2631 	return hsize;
2632 }
2633 
2634 union _qcvt {
2635 	quad_t qcvt;
2636 	int32_t val[2];
2637 };
2638 
2639 #define SETHIGH(q, h) { \
2640 	union _qcvt tmp; \
2641 	tmp.qcvt = (q); \
2642 	tmp.val[_QUAD_HIGHWORD] = (h); \
2643 	(q) = tmp.qcvt; \
2644 }
2645 #define SETLOW(q, l) { \
2646 	union _qcvt tmp; \
2647 	tmp.qcvt = (q); \
2648 	tmp.val[_QUAD_LOWWORD] = (l); \
2649 	(q) = tmp.qcvt; \
2650 }
2651 
2652 u_quad_t
2653 init_va_filerev(void)
2654 {
2655 	struct timeval tv;
2656 	u_quad_t ret = 0;
2657 
2658 	getmicrouptime(&tv);
2659 	SETHIGH(ret, tv.tv_sec);
2660 	SETLOW(ret, tv.tv_usec * 4294);
2661 
2662 	return ret;
2663 }
2664