xref: /original-bsd/sys/kern/vfs_subr.c (revision 7e5c8007)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * %sccs.include.redist.c%
11  *
12  *	@(#)vfs_subr.c	8.12 (Berkeley) 04/11/94
13  */
14 
15 /*
16  * External virtual filesystem routines
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/proc.h>
22 #include <sys/mount.h>
23 #include <sys/time.h>
24 #include <sys/vnode.h>
25 #include <sys/stat.h>
26 #include <sys/namei.h>
27 #include <sys/ucred.h>
28 #include <sys/buf.h>
29 #include <sys/errno.h>
30 #include <sys/malloc.h>
31 #include <sys/domain.h>
32 #include <sys/mbuf.h>
33 
34 #include <vm/vm.h>
35 #include <sys/sysctl.h>
36 
37 #include <miscfs/specfs/specdev.h>
38 
39 enum vtype iftovt_tab[16] = {
40 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
41 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
42 };
43 int	vttoif_tab[9] = {
44 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
45 	S_IFSOCK, S_IFIFO, S_IFMT,
46 };
47 
48 /*
49  * Insq/Remq for the vnode usage lists.
50  */
51 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
52 #define	bufremvn(bp) {  \
53 	LIST_REMOVE(bp, b_vnbufs); \
54 	(bp)->b_vnbufs.le_next = NOLIST; \
55 }
56 
57 TAILQ_HEAD(freelst, vnode) vnode_free_list;	/* vnode free list */
58 struct mntlist mountlist;			/* mounted filesystem list */
59 
60 /*
61  * Initialize the vnode management data structures.
62  */
63 vntblinit()
64 {
65 
66 	TAILQ_INIT(&vnode_free_list);
67 	TAILQ_INIT(&mountlist);
68 }
69 
70 /*
71  * Lock a filesystem.
72  * Used to prevent access to it while mounting and unmounting.
73  */
74 vfs_lock(mp)
75 	register struct mount *mp;
76 {
77 
78 	while(mp->mnt_flag & MNT_MLOCK) {
79 		mp->mnt_flag |= MNT_MWAIT;
80 		sleep((caddr_t)mp, PVFS);
81 	}
82 	mp->mnt_flag |= MNT_MLOCK;
83 	return (0);
84 }
85 
86 /*
87  * Unlock a locked filesystem.
88  * Panic if filesystem is not locked.
89  */
90 void
91 vfs_unlock(mp)
92 	register struct mount *mp;
93 {
94 
95 	if ((mp->mnt_flag & MNT_MLOCK) == 0)
96 		panic("vfs_unlock: not locked");
97 	mp->mnt_flag &= ~MNT_MLOCK;
98 	if (mp->mnt_flag & MNT_MWAIT) {
99 		mp->mnt_flag &= ~MNT_MWAIT;
100 		wakeup((caddr_t)mp);
101 	}
102 }
103 
104 /*
105  * Mark a mount point as busy.
106  * Used to synchronize access and to delay unmounting.
107  */
108 vfs_busy(mp)
109 	register struct mount *mp;
110 {
111 
112 	while(mp->mnt_flag & MNT_MPBUSY) {
113 		mp->mnt_flag |= MNT_MPWANT;
114 		sleep((caddr_t)&mp->mnt_flag, PVFS);
115 	}
116 	if (mp->mnt_flag & MNT_UNMOUNT)
117 		return (1);
118 	mp->mnt_flag |= MNT_MPBUSY;
119 	return (0);
120 }
121 
122 /*
123  * Free a busy filesystem.
124  * Panic if filesystem is not busy.
125  */
126 vfs_unbusy(mp)
127 	register struct mount *mp;
128 {
129 
130 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
131 		panic("vfs_unbusy: not busy");
132 	mp->mnt_flag &= ~MNT_MPBUSY;
133 	if (mp->mnt_flag & MNT_MPWANT) {
134 		mp->mnt_flag &= ~MNT_MPWANT;
135 		wakeup((caddr_t)&mp->mnt_flag);
136 	}
137 }
138 
139 /*
140  * Lookup a mount point by filesystem identifier.
141  */
142 struct mount *
143 getvfs(fsid)
144 	fsid_t *fsid;
145 {
146 	register struct mount *mp;
147 
148 	for (mp = mountlist.tqh_first; mp != NULL; mp = mp->mnt_list.tqe_next) {
149 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
150 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
151 			return (mp);
152 	}
153 	return ((struct mount *)0);
154 }
155 
156 /*
157  * Get a new unique fsid
158  */
159 void
160 getnewfsid(mp, mtype)
161 	struct mount *mp;
162 	int mtype;
163 {
164 static u_short xxxfs_mntid;
165 
166 	fsid_t tfsid;
167 
168 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
169 	mp->mnt_stat.f_fsid.val[1] = mtype;
170 	if (xxxfs_mntid == 0)
171 		++xxxfs_mntid;
172 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
173 	tfsid.val[1] = mtype;
174 	if (mountlist.tqh_first != NULL) {
175 		while (getvfs(&tfsid)) {
176 			tfsid.val[0]++;
177 			xxxfs_mntid++;
178 		}
179 	}
180 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
181 }
182 
183 /*
184  * Set vnode attributes to VNOVAL
185  */
186 void vattr_null(vap)
187 	register struct vattr *vap;
188 {
189 
190 	vap->va_type = VNON;
191 	vap->va_size = vap->va_bytes = VNOVAL;
192 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
193 		vap->va_fsid = vap->va_fileid =
194 		vap->va_blocksize = vap->va_rdev =
195 		vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
196 		vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
197 		vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
198 		vap->va_flags = vap->va_gen = VNOVAL;
199 	vap->va_vaflags = 0;
200 }
201 
202 /*
203  * Routines having to do with the management of the vnode table.
204  */
205 extern int (**dead_vnodeop_p)();
206 extern void vclean();
207 long numvnodes;
208 extern struct vattr va_null;
209 
210 /*
211  * Return the next vnode from the free list.
212  */
213 getnewvnode(tag, mp, vops, vpp)
214 	enum vtagtype tag;
215 	struct mount *mp;
216 	int (**vops)();
217 	struct vnode **vpp;
218 {
219 	register struct vnode *vp;
220 	int s;
221 
222 	if ((vnode_free_list.tqh_first == NULL &&
223 	     numvnodes < 2 * desiredvnodes) ||
224 	    numvnodes < desiredvnodes) {
225 		vp = (struct vnode *)malloc((u_long)sizeof *vp,
226 		    M_VNODE, M_WAITOK);
227 		bzero((char *)vp, sizeof *vp);
228 		numvnodes++;
229 	} else {
230 		if ((vp = vnode_free_list.tqh_first) == NULL) {
231 			tablefull("vnode");
232 			*vpp = 0;
233 			return (ENFILE);
234 		}
235 		if (vp->v_usecount)
236 			panic("free vnode isn't");
237 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
238 		/* see comment on why 0xdeadb is set at end of vgone (below) */
239 		vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
240 		vp->v_lease = NULL;
241 		if (vp->v_type != VBAD)
242 			vgone(vp);
243 #ifdef DIAGNOSTIC
244 		if (vp->v_data)
245 			panic("cleaned vnode isn't");
246 		s = splbio();
247 		if (vp->v_numoutput)
248 			panic("Clean vnode has pending I/O's");
249 		splx(s);
250 #endif
251 		vp->v_flag = 0;
252 		vp->v_lastr = 0;
253 		vp->v_ralen = 0;
254 		vp->v_maxra = 0;
255 		vp->v_lastw = 0;
256 		vp->v_lasta = 0;
257 		vp->v_cstart = 0;
258 		vp->v_clen = 0;
259 		vp->v_socket = 0;
260 	}
261 	vp->v_type = VNON;
262 	cache_purge(vp);
263 	vp->v_tag = tag;
264 	vp->v_op = vops;
265 	insmntque(vp, mp);
266 	*vpp = vp;
267 	vp->v_usecount = 1;
268 	vp->v_data = 0;
269 	return (0);
270 }
271 
272 /*
273  * Move a vnode from one mount queue to another.
274  */
275 insmntque(vp, mp)
276 	register struct vnode *vp;
277 	register struct mount *mp;
278 {
279 
280 	/*
281 	 * Delete from old mount point vnode list, if on one.
282 	 */
283 	if (vp->v_mount != NULL)
284 		LIST_REMOVE(vp, v_mntvnodes);
285 	/*
286 	 * Insert into list of vnodes for the new mount point, if available.
287 	 */
288 	if ((vp->v_mount = mp) == NULL)
289 		return;
290 	LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
291 }
292 
293 /*
294  * Update outstanding I/O count and do wakeup if requested.
295  */
296 vwakeup(bp)
297 	register struct buf *bp;
298 {
299 	register struct vnode *vp;
300 
301 	bp->b_flags &= ~B_WRITEINPROG;
302 	if (vp = bp->b_vp) {
303 		vp->v_numoutput--;
304 		if (vp->v_numoutput < 0)
305 			panic("vwakeup: neg numoutput");
306 		if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
307 			if (vp->v_numoutput < 0)
308 				panic("vwakeup: neg numoutput");
309 			vp->v_flag &= ~VBWAIT;
310 			wakeup((caddr_t)&vp->v_numoutput);
311 		}
312 	}
313 }
314 
315 /*
316  * Flush out and invalidate all buffers associated with a vnode.
317  * Called with the underlying object locked.
318  */
319 int
320 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
321 	register struct vnode *vp;
322 	int flags;
323 	struct ucred *cred;
324 	struct proc *p;
325 	int slpflag, slptimeo;
326 {
327 	register struct buf *bp;
328 	struct buf *nbp, *blist;
329 	int s, error;
330 
331 	if (flags & V_SAVE) {
332 		if (error = VOP_FSYNC(vp, cred, MNT_WAIT, p))
333 			return (error);
334 		if (vp->v_dirtyblkhd.lh_first != NULL)
335 			panic("vinvalbuf: dirty bufs");
336 	}
337 	for (;;) {
338 		if ((blist = vp->v_cleanblkhd.lh_first) && flags & V_SAVEMETA)
339 			while (blist && blist->b_lblkno < 0)
340 				blist = blist->b_vnbufs.le_next;
341 		if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
342 		    (flags & V_SAVEMETA))
343 			while (blist && blist->b_lblkno < 0)
344 				blist = blist->b_vnbufs.le_next;
345 		if (!blist)
346 			break;
347 
348 		for (bp = blist; bp; bp = nbp) {
349 			nbp = bp->b_vnbufs.le_next;
350 			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
351 				continue;
352 			s = splbio();
353 			if (bp->b_flags & B_BUSY) {
354 				bp->b_flags |= B_WANTED;
355 				error = tsleep((caddr_t)bp,
356 					slpflag | (PRIBIO + 1), "vinvalbuf",
357 					slptimeo);
358 				splx(s);
359 				if (error)
360 					return (error);
361 				break;
362 			}
363 			bremfree(bp);
364 			bp->b_flags |= B_BUSY;
365 			splx(s);
366 			/*
367 			 * XXX Since there are no node locks for NFS, I believe
368 			 * there is a slight chance that a delayed write will
369 			 * occur while sleeping just above, so check for it.
370 			 */
371 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
372 				(void) VOP_BWRITE(bp);
373 				break;
374 			}
375 			bp->b_flags |= B_INVAL;
376 			brelse(bp);
377 		}
378 	}
379 	if (!(flags & V_SAVEMETA) &&
380 	    (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
381 		panic("vinvalbuf: flush failed");
382 	return (0);
383 }
384 
385 /*
386  * Associate a buffer with a vnode.
387  */
388 bgetvp(vp, bp)
389 	register struct vnode *vp;
390 	register struct buf *bp;
391 {
392 
393 	if (bp->b_vp)
394 		panic("bgetvp: not free");
395 	VHOLD(vp);
396 	bp->b_vp = vp;
397 	if (vp->v_type == VBLK || vp->v_type == VCHR)
398 		bp->b_dev = vp->v_rdev;
399 	else
400 		bp->b_dev = NODEV;
401 	/*
402 	 * Insert onto list for new vnode.
403 	 */
404 	bufinsvn(bp, &vp->v_cleanblkhd);
405 }
406 
407 /*
408  * Disassociate a buffer from a vnode.
409  */
410 brelvp(bp)
411 	register struct buf *bp;
412 {
413 	struct vnode *vp;
414 
415 	if (bp->b_vp == (struct vnode *) 0)
416 		panic("brelvp: NULL");
417 	/*
418 	 * Delete from old vnode list, if on one.
419 	 */
420 	if (bp->b_vnbufs.le_next != NOLIST)
421 		bufremvn(bp);
422 	vp = bp->b_vp;
423 	bp->b_vp = (struct vnode *) 0;
424 	HOLDRELE(vp);
425 }
426 
427 /*
428  * Reassign a buffer from one vnode to another.
429  * Used to assign file specific control information
430  * (indirect blocks) to the vnode to which they belong.
431  */
432 reassignbuf(bp, newvp)
433 	register struct buf *bp;
434 	register struct vnode *newvp;
435 {
436 	register struct buflists *listheadp;
437 
438 	if (newvp == NULL) {
439 		printf("reassignbuf: NULL");
440 		return;
441 	}
442 	/*
443 	 * Delete from old vnode list, if on one.
444 	 */
445 	if (bp->b_vnbufs.le_next != NOLIST)
446 		bufremvn(bp);
447 	/*
448 	 * If dirty, put on list of dirty buffers;
449 	 * otherwise insert onto list of clean buffers.
450 	 */
451 	if (bp->b_flags & B_DELWRI)
452 		listheadp = &newvp->v_dirtyblkhd;
453 	else
454 		listheadp = &newvp->v_cleanblkhd;
455 	bufinsvn(bp, listheadp);
456 }
457 
458 /*
459  * Create a vnode for a block device.
460  * Used for root filesystem, argdev, and swap areas.
461  * Also used for memory file system special devices.
462  */
463 bdevvp(dev, vpp)
464 	dev_t dev;
465 	struct vnode **vpp;
466 {
467 	register struct vnode *vp;
468 	struct vnode *nvp;
469 	int error;
470 
471 	if (dev == NODEV)
472 		return (0);
473 	error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);
474 	if (error) {
475 		*vpp = 0;
476 		return (error);
477 	}
478 	vp = nvp;
479 	vp->v_type = VBLK;
480 	if (nvp = checkalias(vp, dev, (struct mount *)0)) {
481 		vput(vp);
482 		vp = nvp;
483 	}
484 	*vpp = vp;
485 	return (0);
486 }
487 
488 /*
489  * Check to see if the new vnode represents a special device
490  * for which we already have a vnode (either because of
491  * bdevvp() or because of a different vnode representing
492  * the same block device). If such an alias exists, deallocate
493  * the existing contents and return the aliased vnode. The
494  * caller is responsible for filling it with its new contents.
495  */
496 struct vnode *
497 checkalias(nvp, nvp_rdev, mp)
498 	register struct vnode *nvp;
499 	dev_t nvp_rdev;
500 	struct mount *mp;
501 {
502 	register struct vnode *vp;
503 	struct vnode **vpp;
504 
505 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
506 		return (NULLVP);
507 
508 	vpp = &speclisth[SPECHASH(nvp_rdev)];
509 loop:
510 	for (vp = *vpp; vp; vp = vp->v_specnext) {
511 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
512 			continue;
513 		/*
514 		 * Alias, but not in use, so flush it out.
515 		 */
516 		if (vp->v_usecount == 0) {
517 			vgone(vp);
518 			goto loop;
519 		}
520 		if (vget(vp, 1))
521 			goto loop;
522 		break;
523 	}
524 	if (vp == NULL || vp->v_tag != VT_NON) {
525 		MALLOC(nvp->v_specinfo, struct specinfo *,
526 			sizeof(struct specinfo), M_VNODE, M_WAITOK);
527 		nvp->v_rdev = nvp_rdev;
528 		nvp->v_hashchain = vpp;
529 		nvp->v_specnext = *vpp;
530 		nvp->v_specflags = 0;
531 		*vpp = nvp;
532 		if (vp != NULL) {
533 			nvp->v_flag |= VALIASED;
534 			vp->v_flag |= VALIASED;
535 			vput(vp);
536 		}
537 		return (NULLVP);
538 	}
539 	VOP_UNLOCK(vp);
540 	vclean(vp, 0);
541 	vp->v_op = nvp->v_op;
542 	vp->v_tag = nvp->v_tag;
543 	nvp->v_type = VNON;
544 	insmntque(vp, mp);
545 	return (vp);
546 }
547 
548 /*
549  * Grab a particular vnode from the free list, increment its
550  * reference count and lock it. The vnode lock bit is set the
551  * vnode is being eliminated in vgone. The process is awakened
552  * when the transition is completed, and an error returned to
553  * indicate that the vnode is no longer usable (possibly having
554  * been changed to a new file system type).
555  */
556 vget(vp, lockflag)
557 	register struct vnode *vp;
558 	int lockflag;
559 {
560 
561 	if (vp->v_flag & VXLOCK) {
562 		vp->v_flag |= VXWANT;
563 		sleep((caddr_t)vp, PINOD);
564 		return (1);
565 	}
566 	if (vp->v_usecount == 0) {
567 #ifdef DIAGNOSTIC
568 		if (vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb)
569 			panic("vget: race with getnewvnode");
570 #endif
571 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
572 	}
573 	vp->v_usecount++;
574 	if (lockflag)
575 		VOP_LOCK(vp);
576 	return (0);
577 }
578 
579 /*
580  * Vnode reference, just increment the count
581  */
582 void vref(vp)
583 	struct vnode *vp;
584 {
585 
586 	if (vp->v_usecount <= 0)
587 		panic("vref used where vget required");
588 	vp->v_usecount++;
589 }
590 
591 /*
592  * vput(), just unlock and vrele()
593  */
594 void vput(vp)
595 	register struct vnode *vp;
596 {
597 
598 	VOP_UNLOCK(vp);
599 	vrele(vp);
600 }
601 
602 /*
603  * Vnode release.
604  * If count drops to zero, call inactive routine and return to freelist.
605  */
606 void vrele(vp)
607 	register struct vnode *vp;
608 {
609 
610 #ifdef DIAGNOSTIC
611 	if (vp == NULL)
612 		panic("vrele: null vp");
613 #endif
614 	vp->v_usecount--;
615 	if (vp->v_usecount > 0)
616 		return;
617 #ifdef DIAGNOSTIC
618 	if (vp->v_usecount != 0 || vp->v_writecount != 0) {
619 		vprint("vrele: bad ref count", vp);
620 		panic("vrele: ref cnt");
621 	}
622 #endif
623 	/*
624 	 * insert at tail of LRU list
625 	 */
626 	TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
627 	VOP_INACTIVE(vp);
628 }
629 
630 /*
631  * Page or buffer structure gets a reference.
632  */
633 void vhold(vp)
634 	register struct vnode *vp;
635 {
636 
637 	vp->v_holdcnt++;
638 }
639 
640 /*
641  * Page or buffer structure frees a reference.
642  */
643 void holdrele(vp)
644 	register struct vnode *vp;
645 {
646 
647 	if (vp->v_holdcnt <= 0)
648 		panic("holdrele: holdcnt");
649 	vp->v_holdcnt--;
650 }
651 
652 /*
653  * Remove any vnodes in the vnode table belonging to mount point mp.
654  *
655  * If MNT_NOFORCE is specified, there should not be any active ones,
656  * return error if any are found (nb: this is a user error, not a
657  * system error). If MNT_FORCE is specified, detach any active vnodes
658  * that are found.
659  */
660 #ifdef DIAGNOSTIC
661 int busyprt = 0;	/* print out busy vnodes */
662 struct ctldebug debug1 = { "busyprt", &busyprt };
663 #endif
664 
665 vflush(mp, skipvp, flags)
666 	struct mount *mp;
667 	struct vnode *skipvp;
668 	int flags;
669 {
670 	register struct vnode *vp, *nvp;
671 	int busy = 0;
672 
673 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
674 		panic("vflush: not busy");
675 loop:
676 	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
677 		if (vp->v_mount != mp)
678 			goto loop;
679 		nvp = vp->v_mntvnodes.le_next;
680 		/*
681 		 * Skip over a selected vnode.
682 		 */
683 		if (vp == skipvp)
684 			continue;
685 		/*
686 		 * Skip over a vnodes marked VSYSTEM.
687 		 */
688 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
689 			continue;
690 		/*
691 		 * If WRITECLOSE is set, only flush out regular file
692 		 * vnodes open for writing.
693 		 */
694 		if ((flags & WRITECLOSE) &&
695 		    (vp->v_writecount == 0 || vp->v_type != VREG))
696 			continue;
697 		/*
698 		 * With v_usecount == 0, all we need to do is clear
699 		 * out the vnode data structures and we are done.
700 		 */
701 		if (vp->v_usecount == 0) {
702 			vgone(vp);
703 			continue;
704 		}
705 		/*
706 		 * If FORCECLOSE is set, forcibly close the vnode.
707 		 * For block or character devices, revert to an
708 		 * anonymous device. For all other files, just kill them.
709 		 */
710 		if (flags & FORCECLOSE) {
711 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
712 				vgone(vp);
713 			} else {
714 				vclean(vp, 0);
715 				vp->v_op = spec_vnodeop_p;
716 				insmntque(vp, (struct mount *)0);
717 			}
718 			continue;
719 		}
720 #ifdef DIAGNOSTIC
721 		if (busyprt)
722 			vprint("vflush: busy vnode", vp);
723 #endif
724 		busy++;
725 	}
726 	if (busy)
727 		return (EBUSY);
728 	return (0);
729 }
730 
731 /*
732  * Disassociate the underlying file system from a vnode.
733  */
734 void
735 vclean(vp, flags)
736 	register struct vnode *vp;
737 	int flags;
738 {
739 	int active;
740 
741 	/*
742 	 * Check to see if the vnode is in use.
743 	 * If so we have to reference it before we clean it out
744 	 * so that its count cannot fall to zero and generate a
745 	 * race against ourselves to recycle it.
746 	 */
747 	if (active = vp->v_usecount)
748 		VREF(vp);
749 	/*
750 	 * Even if the count is zero, the VOP_INACTIVE routine may still
751 	 * have the object locked while it cleans it out. The VOP_LOCK
752 	 * ensures that the VOP_INACTIVE routine is done with its work.
753 	 * For active vnodes, it ensures that no other activity can
754 	 * occur while the underlying object is being cleaned out.
755 	 */
756 	VOP_LOCK(vp);
757 	/*
758 	 * Prevent the vnode from being recycled or
759 	 * brought into use while we clean it out.
760 	 */
761 	if (vp->v_flag & VXLOCK)
762 		panic("vclean: deadlock");
763 	vp->v_flag |= VXLOCK;
764 	/*
765 	 * Clean out any buffers associated with the vnode.
766 	 */
767 	if (flags & DOCLOSE)
768 		vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
769 	/*
770 	 * Any other processes trying to obtain this lock must first
771 	 * wait for VXLOCK to clear, then call the new lock operation.
772 	 */
773 	VOP_UNLOCK(vp);
774 	/*
775 	 * If purging an active vnode, it must be closed and
776 	 * deactivated before being reclaimed.
777 	 */
778 	if (active) {
779 		if (flags & DOCLOSE)
780 			VOP_CLOSE(vp, IO_NDELAY, NOCRED, NULL);
781 		VOP_INACTIVE(vp);
782 	}
783 	/*
784 	 * Reclaim the vnode.
785 	 */
786 	if (VOP_RECLAIM(vp))
787 		panic("vclean: cannot reclaim");
788 	if (active)
789 		vrele(vp);
790 
791 	/*
792 	 * Done with purge, notify sleepers of the grim news.
793 	 */
794 	vp->v_op = dead_vnodeop_p;
795 	vp->v_tag = VT_NON;
796 	vp->v_flag &= ~VXLOCK;
797 	if (vp->v_flag & VXWANT) {
798 		vp->v_flag &= ~VXWANT;
799 		wakeup((caddr_t)vp);
800 	}
801 }
802 
803 /*
804  * Eliminate all activity associated with  the requested vnode
805  * and with all vnodes aliased to the requested vnode.
806  */
807 void vgoneall(vp)
808 	register struct vnode *vp;
809 {
810 	register struct vnode *vq;
811 
812 	if (vp->v_flag & VALIASED) {
813 		/*
814 		 * If a vgone (or vclean) is already in progress,
815 		 * wait until it is done and return.
816 		 */
817 		if (vp->v_flag & VXLOCK) {
818 			vp->v_flag |= VXWANT;
819 			sleep((caddr_t)vp, PINOD);
820 			return;
821 		}
822 		/*
823 		 * Ensure that vp will not be vgone'd while we
824 		 * are eliminating its aliases.
825 		 */
826 		vp->v_flag |= VXLOCK;
827 		while (vp->v_flag & VALIASED) {
828 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
829 				if (vq->v_rdev != vp->v_rdev ||
830 				    vq->v_type != vp->v_type || vp == vq)
831 					continue;
832 				vgone(vq);
833 				break;
834 			}
835 		}
836 		/*
837 		 * Remove the lock so that vgone below will
838 		 * really eliminate the vnode after which time
839 		 * vgone will awaken any sleepers.
840 		 */
841 		vp->v_flag &= ~VXLOCK;
842 	}
843 	vgone(vp);
844 }
845 
846 /*
847  * Eliminate all activity associated with a vnode
848  * in preparation for reuse.
849  */
850 void vgone(vp)
851 	register struct vnode *vp;
852 {
853 	register struct vnode *vq;
854 	struct vnode *vx;
855 
856 	/*
857 	 * If a vgone (or vclean) is already in progress,
858 	 * wait until it is done and return.
859 	 */
860 	if (vp->v_flag & VXLOCK) {
861 		vp->v_flag |= VXWANT;
862 		sleep((caddr_t)vp, PINOD);
863 		return;
864 	}
865 	/*
866 	 * Clean out the filesystem specific data.
867 	 */
868 	vclean(vp, DOCLOSE);
869 	/*
870 	 * Delete from old mount point vnode list, if on one.
871 	 */
872 	if (vp->v_mount != NULL) {
873 		LIST_REMOVE(vp, v_mntvnodes);
874 		vp->v_mount = NULL;
875 	}
876 	/*
877 	 * If special device, remove it from special device alias list.
878 	 */
879 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
880 		if (*vp->v_hashchain == vp) {
881 			*vp->v_hashchain = vp->v_specnext;
882 		} else {
883 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
884 				if (vq->v_specnext != vp)
885 					continue;
886 				vq->v_specnext = vp->v_specnext;
887 				break;
888 			}
889 			if (vq == NULL)
890 				panic("missing bdev");
891 		}
892 		if (vp->v_flag & VALIASED) {
893 			vx = NULL;
894 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
895 				if (vq->v_rdev != vp->v_rdev ||
896 				    vq->v_type != vp->v_type)
897 					continue;
898 				if (vx)
899 					break;
900 				vx = vq;
901 			}
902 			if (vx == NULL)
903 				panic("missing alias");
904 			if (vq == NULL)
905 				vx->v_flag &= ~VALIASED;
906 			vp->v_flag &= ~VALIASED;
907 		}
908 		FREE(vp->v_specinfo, M_VNODE);
909 		vp->v_specinfo = NULL;
910 	}
911 	/*
912 	 * If it is on the freelist and not already at the head,
913 	 * move it to the head of the list. The test of the back
914 	 * pointer and the reference count of zero is because
915 	 * it will be removed from the free list by getnewvnode,
916 	 * but will not have its reference count incremented until
917 	 * after calling vgone. If the reference count were
918 	 * incremented first, vgone would (incorrectly) try to
919 	 * close the previous instance of the underlying object.
920 	 * So, the back pointer is explicitly set to `0xdeadb' in
921 	 * getnewvnode after removing it from the freelist to ensure
922 	 * that we do not try to move it here.
923 	 */
924 	if (vp->v_usecount == 0 &&
925 	    vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb &&
926 	    vnode_free_list.tqh_first != vp) {
927 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
928 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
929 	}
930 	vp->v_type = VBAD;
931 }
932 
933 /*
934  * Lookup a vnode by device number.
935  */
936 vfinddev(dev, type, vpp)
937 	dev_t dev;
938 	enum vtype type;
939 	struct vnode **vpp;
940 {
941 	register struct vnode *vp;
942 
943 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
944 		if (dev != vp->v_rdev || type != vp->v_type)
945 			continue;
946 		*vpp = vp;
947 		return (1);
948 	}
949 	return (0);
950 }
951 
952 /*
953  * Calculate the total number of references to a special device.
954  */
955 vcount(vp)
956 	register struct vnode *vp;
957 {
958 	register struct vnode *vq, *vnext;
959 	int count;
960 
961 loop:
962 	if ((vp->v_flag & VALIASED) == 0)
963 		return (vp->v_usecount);
964 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
965 		vnext = vq->v_specnext;
966 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
967 			continue;
968 		/*
969 		 * Alias, but not in use, so flush it out.
970 		 */
971 		if (vq->v_usecount == 0 && vq != vp) {
972 			vgone(vq);
973 			goto loop;
974 		}
975 		count += vq->v_usecount;
976 	}
977 	return (count);
978 }
979 
980 /*
981  * Print out a description of a vnode.
982  */
983 static char *typename[] =
984    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
985 
986 vprint(label, vp)
987 	char *label;
988 	register struct vnode *vp;
989 {
990 	char buf[64];
991 
992 	if (label != NULL)
993 		printf("%s: ", label);
994 	printf("type %s, usecount %d, writecount %d, refcount %d,",
995 		typename[vp->v_type], vp->v_usecount, vp->v_writecount,
996 		vp->v_holdcnt);
997 	buf[0] = '\0';
998 	if (vp->v_flag & VROOT)
999 		strcat(buf, "|VROOT");
1000 	if (vp->v_flag & VTEXT)
1001 		strcat(buf, "|VTEXT");
1002 	if (vp->v_flag & VSYSTEM)
1003 		strcat(buf, "|VSYSTEM");
1004 	if (vp->v_flag & VXLOCK)
1005 		strcat(buf, "|VXLOCK");
1006 	if (vp->v_flag & VXWANT)
1007 		strcat(buf, "|VXWANT");
1008 	if (vp->v_flag & VBWAIT)
1009 		strcat(buf, "|VBWAIT");
1010 	if (vp->v_flag & VALIASED)
1011 		strcat(buf, "|VALIASED");
1012 	if (buf[0] != '\0')
1013 		printf(" flags (%s)", &buf[1]);
1014 	if (vp->v_data == NULL) {
1015 		printf("\n");
1016 	} else {
1017 		printf("\n\t");
1018 		VOP_PRINT(vp);
1019 	}
1020 }
1021 
1022 #ifdef DEBUG
1023 /*
1024  * List all of the locked vnodes in the system.
1025  * Called when debugging the kernel.
1026  */
1027 printlockedvnodes()
1028 {
1029 	register struct mount *mp;
1030 	register struct vnode *vp;
1031 
1032 	printf("Locked vnodes\n");
1033 	for (mp = mountlist.tqh_first; mp != NULL; mp = mp->mnt_list.tqe_next) {
1034 		for (vp = mp->mnt_vnodelist.lh_first;
1035 		     vp != NULL;
1036 		     vp = vp->v_mntvnodes.le_next)
1037 			if (VOP_ISLOCKED(vp))
1038 				vprint((char *)0, vp);
1039 	}
1040 }
1041 #endif
1042 
1043 int kinfo_vdebug = 1;
1044 int kinfo_vgetfailed;
1045 #define KINFO_VNODESLOP	10
1046 /*
1047  * Dump vnode list (via sysctl).
1048  * Copyout address of vnode followed by vnode.
1049  */
1050 /* ARGSUSED */
1051 sysctl_vnode(where, sizep)
1052 	char *where;
1053 	size_t *sizep;
1054 {
1055 	register struct mount *mp, *nmp;
1056 	struct vnode *vp;
1057 	register char *bp = where, *savebp;
1058 	char *ewhere;
1059 	int error;
1060 
1061 #define VPTRSZ	sizeof (struct vnode *)
1062 #define VNODESZ	sizeof (struct vnode)
1063 	if (where == NULL) {
1064 		*sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
1065 		return (0);
1066 	}
1067 	ewhere = where + *sizep;
1068 
1069 	for (mp = mountlist.tqh_first; mp != NULL; mp = nmp) {
1070 		nmp = mp->mnt_list.tqe_next;
1071 		if (vfs_busy(mp))
1072 			continue;
1073 		savebp = bp;
1074 again:
1075 		for (vp = mp->mnt_vnodelist.lh_first;
1076 		     vp != NULL;
1077 		     vp = vp->v_mntvnodes.le_next) {
1078 			/*
1079 			 * Check that the vp is still associated with
1080 			 * this filesystem.  RACE: could have been
1081 			 * recycled onto the same filesystem.
1082 			 */
1083 			if (vp->v_mount != mp) {
1084 				if (kinfo_vdebug)
1085 					printf("kinfo: vp changed\n");
1086 				bp = savebp;
1087 				goto again;
1088 			}
1089 			if (bp + VPTRSZ + VNODESZ > ewhere) {
1090 				*sizep = bp - where;
1091 				return (ENOMEM);
1092 			}
1093 			if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
1094 			   (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
1095 				return (error);
1096 			bp += VPTRSZ + VNODESZ;
1097 		}
1098 		vfs_unbusy(mp);
1099 	}
1100 
1101 	*sizep = bp - where;
1102 	return (0);
1103 }
1104 
1105 /*
1106  * Check to see if a filesystem is mounted on a block device.
1107  */
1108 int
1109 vfs_mountedon(vp)
1110 	register struct vnode *vp;
1111 {
1112 	register struct vnode *vq;
1113 
1114 	if (vp->v_specflags & SI_MOUNTEDON)
1115 		return (EBUSY);
1116 	if (vp->v_flag & VALIASED) {
1117 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1118 			if (vq->v_rdev != vp->v_rdev ||
1119 			    vq->v_type != vp->v_type)
1120 				continue;
1121 			if (vq->v_specflags & SI_MOUNTEDON)
1122 				return (EBUSY);
1123 		}
1124 	}
1125 	return (0);
1126 }
1127 
1128 /*
1129  * Build hash lists of net addresses and hang them off the mount point.
1130  * Called by ufs_mount() to set up the lists of export addresses.
1131  */
1132 static int
1133 vfs_hang_addrlist(mp, nep, argp)
1134 	struct mount *mp;
1135 	struct netexport *nep;
1136 	struct export_args *argp;
1137 {
1138 	register struct netcred *np;
1139 	register struct radix_node_head *rnh;
1140 	register int i;
1141 	struct radix_node *rn;
1142 	struct sockaddr *saddr, *smask = 0;
1143 	struct domain *dom;
1144 	int error;
1145 
1146 	if (argp->ex_addrlen == 0) {
1147 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1148 			return (EPERM);
1149 		np = &nep->ne_defexported;
1150 		np->netc_exflags = argp->ex_flags;
1151 		np->netc_anon = argp->ex_anon;
1152 		np->netc_anon.cr_ref = 1;
1153 		mp->mnt_flag |= MNT_DEFEXPORTED;
1154 		return (0);
1155 	}
1156 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1157 	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
1158 	bzero((caddr_t)np, i);
1159 	saddr = (struct sockaddr *)(np + 1);
1160 	if (error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen))
1161 		goto out;
1162 	if (saddr->sa_len > argp->ex_addrlen)
1163 		saddr->sa_len = argp->ex_addrlen;
1164 	if (argp->ex_masklen) {
1165 		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1166 		error = copyin(argp->ex_addr, (caddr_t)smask, argp->ex_masklen);
1167 		if (error)
1168 			goto out;
1169 		if (smask->sa_len > argp->ex_masklen)
1170 			smask->sa_len = argp->ex_masklen;
1171 	}
1172 	i = saddr->sa_family;
1173 	if ((rnh = nep->ne_rtable[i]) == 0) {
1174 		/*
1175 		 * Seems silly to initialize every AF when most are not
1176 		 * used, do so on demand here
1177 		 */
1178 		for (dom = domains; dom; dom = dom->dom_next)
1179 			if (dom->dom_family == i && dom->dom_rtattach) {
1180 				dom->dom_rtattach((void **)&nep->ne_rtable[i],
1181 					dom->dom_rtoffset);
1182 				break;
1183 			}
1184 		if ((rnh = nep->ne_rtable[i]) == 0) {
1185 			error = ENOBUFS;
1186 			goto out;
1187 		}
1188 	}
1189 	rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
1190 		np->netc_rnodes);
1191 	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1192 		error = EPERM;
1193 		goto out;
1194 	}
1195 	np->netc_exflags = argp->ex_flags;
1196 	np->netc_anon = argp->ex_anon;
1197 	np->netc_anon.cr_ref = 1;
1198 	return (0);
1199 out:
1200 	free(np, M_NETADDR);
1201 	return (error);
1202 }
1203 
1204 /* ARGSUSED */
1205 static int
1206 vfs_free_netcred(rn, w)
1207 	struct radix_node *rn;
1208 	caddr_t w;
1209 {
1210 	register struct radix_node_head *rnh = (struct radix_node_head *)w;
1211 
1212 	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
1213 	free((caddr_t)rn, M_NETADDR);
1214 	return (0);
1215 }
1216 
1217 /*
1218  * Free the net address hash lists that are hanging off the mount points.
1219  */
1220 static void
1221 vfs_free_addrlist(nep)
1222 	struct netexport *nep;
1223 {
1224 	register int i;
1225 	register struct radix_node_head *rnh;
1226 
1227 	for (i = 0; i <= AF_MAX; i++)
1228 		if (rnh = nep->ne_rtable[i]) {
1229 			(*rnh->rnh_walktree)(rnh, vfs_free_netcred,
1230 			    (caddr_t)rnh);
1231 			free((caddr_t)rnh, M_RTABLE);
1232 			nep->ne_rtable[i] = 0;
1233 		}
1234 }
1235 
1236 int
1237 vfs_export(mp, nep, argp)
1238 	struct mount *mp;
1239 	struct netexport *nep;
1240 	struct export_args *argp;
1241 {
1242 	int error;
1243 
1244 	if (argp->ex_flags & MNT_DELEXPORT) {
1245 		vfs_free_addrlist(nep);
1246 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1247 	}
1248 	if (argp->ex_flags & MNT_EXPORTED) {
1249 		if (error = vfs_hang_addrlist(mp, nep, argp))
1250 			return (error);
1251 		mp->mnt_flag |= MNT_EXPORTED;
1252 	}
1253 	return (0);
1254 }
1255 
1256 struct netcred *
1257 vfs_export_lookup(mp, nep, nam)
1258 	register struct mount *mp;
1259 	struct netexport *nep;
1260 	struct mbuf *nam;
1261 {
1262 	register struct netcred *np;
1263 	register struct radix_node_head *rnh;
1264 	struct sockaddr *saddr;
1265 
1266 	np = NULL;
1267 	if (mp->mnt_flag & MNT_EXPORTED) {
1268 		/*
1269 		 * Lookup in the export list first.
1270 		 */
1271 		if (nam != NULL) {
1272 			saddr = mtod(nam, struct sockaddr *);
1273 			rnh = nep->ne_rtable[saddr->sa_family];
1274 			if (rnh != NULL) {
1275 				np = (struct netcred *)
1276 					(*rnh->rnh_matchaddr)((caddr_t)saddr,
1277 							      rnh);
1278 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1279 					np = NULL;
1280 			}
1281 		}
1282 		/*
1283 		 * If no address match, use the default if it exists.
1284 		 */
1285 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1286 			np = &nep->ne_defexported;
1287 	}
1288 	return (np);
1289 }
1290