xref: /original-bsd/sys/kern/vfs_subr.c (revision 331bfa8d)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)vfs_subr.c	7.50 (Berkeley) 02/01/91
8  */
9 
10 /*
11  * External virtual filesystem routines
12  */
13 
14 #include "param.h"
15 #include "mount.h"
16 #include "time.h"
17 #include "vnode.h"
18 #include "specdev.h"
19 #include "namei.h"
20 #include "ucred.h"
21 #include "errno.h"
22 #include "malloc.h"
23 
24 /*
25  * Remove a mount point from the list of mounted filesystems.
26  * Unmount of the root is illegal.
27  */
28 void
29 vfs_remove(mp)
30 	register struct mount *mp;
31 {
32 
33 	if (mp == rootfs)
34 		panic("vfs_remove: unmounting root");
35 	mp->mnt_prev->mnt_next = mp->mnt_next;
36 	mp->mnt_next->mnt_prev = mp->mnt_prev;
37 	mp->mnt_vnodecovered->v_mountedhere = (struct mount *)0;
38 	vfs_unlock(mp);
39 }
40 
41 /*
42  * Lock a filesystem.
43  * Used to prevent access to it while mounting and unmounting.
44  */
45 vfs_lock(mp)
46 	register struct mount *mp;
47 {
48 
49 	while(mp->mnt_flag & MNT_MLOCK) {
50 		mp->mnt_flag |= MNT_MWAIT;
51 		sleep((caddr_t)mp, PVFS);
52 	}
53 	mp->mnt_flag |= MNT_MLOCK;
54 	return (0);
55 }
56 
57 /*
58  * Unlock a locked filesystem.
59  * Panic if filesystem is not locked.
60  */
61 void
62 vfs_unlock(mp)
63 	register struct mount *mp;
64 {
65 
66 	if ((mp->mnt_flag & MNT_MLOCK) == 0)
67 		panic("vfs_unlock: not locked");
68 	mp->mnt_flag &= ~MNT_MLOCK;
69 	if (mp->mnt_flag & MNT_MWAIT) {
70 		mp->mnt_flag &= ~MNT_MWAIT;
71 		wakeup((caddr_t)mp);
72 	}
73 }
74 
75 /*
76  * Mark a mount point as busy.
77  * Used to synchronize access and to delay unmounting.
78  */
79 vfs_busy(mp)
80 	register struct mount *mp;
81 {
82 
83 	while(mp->mnt_flag & MNT_MPBUSY) {
84 		mp->mnt_flag |= MNT_MPWANT;
85 		sleep((caddr_t)&mp->mnt_flag, PVFS);
86 	}
87 	if (mp->mnt_flag & MNT_UNMOUNT)
88 		return (1);
89 	mp->mnt_flag |= MNT_MPBUSY;
90 	return (0);
91 }
92 
93 /*
94  * Free a busy filesystem.
95  * Panic if filesystem is not busy.
96  */
97 vfs_unbusy(mp)
98 	register struct mount *mp;
99 {
100 
101 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
102 		panic("vfs_unbusy: not busy");
103 	mp->mnt_flag &= ~MNT_MPBUSY;
104 	if (mp->mnt_flag & MNT_MPWANT) {
105 		mp->mnt_flag &= ~MNT_MPWANT;
106 		wakeup((caddr_t)&mp->mnt_flag);
107 	}
108 }
109 
110 /*
111  * Lookup a mount point by filesystem identifier.
112  */
113 struct mount *
114 getvfs(fsid)
115 	fsid_t *fsid;
116 {
117 	register struct mount *mp;
118 
119 	mp = rootfs;
120 	do {
121 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
122 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
123 			return (mp);
124 		}
125 		mp = mp->mnt_next;
126 	} while (mp != rootfs);
127 	return ((struct mount *)0);
128 }
129 
130 /*
131  * Set vnode attributes to VNOVAL
132  */
133 void vattr_null(vap)
134 	register struct vattr *vap;
135 {
136 
137 	vap->va_type = VNON;
138 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
139 		vap->va_fsid = vap->va_fileid = vap->va_size =
140 		vap->va_size_rsv = vap->va_blocksize = vap->va_rdev =
141 		vap->va_bytes = vap->va_bytes_rsv =
142 		vap->va_atime.tv_sec = vap->va_atime.tv_usec =
143 		vap->va_mtime.tv_sec = vap->va_mtime.tv_usec =
144 		vap->va_ctime.tv_sec = vap->va_ctime.tv_usec =
145 		vap->va_flags = vap->va_gen = VNOVAL;
146 }
147 
148 /*
149  * Initialize a nameidata structure
150  */
151 ndinit(ndp)
152 	register struct nameidata *ndp;
153 {
154 
155 	bzero((caddr_t)ndp, sizeof(struct nameidata));
156 	ndp->ni_iov = &ndp->ni_nd.nd_iovec;
157 	ndp->ni_iovcnt = 1;
158 	ndp->ni_base = (caddr_t)&ndp->ni_dent;
159 	ndp->ni_rw = UIO_WRITE;
160 	ndp->ni_uioseg = UIO_SYSSPACE;
161 }
162 
163 /*
164  * Duplicate a nameidata structure
165  */
166 nddup(ndp, newndp)
167 	register struct nameidata *ndp, *newndp;
168 {
169 
170 	ndinit(newndp);
171 	newndp->ni_cred = ndp->ni_cred;
172 	crhold(newndp->ni_cred);
173 }
174 
175 /*
176  * Release a nameidata structure
177  */
178 ndrele(ndp)
179 	register struct nameidata *ndp;
180 {
181 
182 	crfree(ndp->ni_cred);
183 }
184 
185 /*
186  * Routines having to do with the management of the vnode table.
187  */
188 struct vnode *vfreeh, **vfreet;
189 extern struct vnodeops dead_vnodeops, spec_vnodeops;
190 extern void vclean();
191 long numvnodes;
192 struct vattr va_null;
193 
194 /*
195  * Initialize the vnode structures and initialize each file system type.
196  */
197 vfsinit()
198 {
199 	struct vfsops **vfsp;
200 
201 	/*
202 	 * Initialize the vnode name cache
203 	 */
204 	nchinit();
205 	/*
206 	 * Initialize each file system type.
207 	 */
208 	vattr_null(&va_null);
209 	for (vfsp = &vfssw[0]; vfsp <= &vfssw[MOUNT_MAXTYPE]; vfsp++) {
210 		if (*vfsp == NULL)
211 			continue;
212 		(*(*vfsp)->vfs_init)();
213 	}
214 }
215 
216 /*
217  * Return the next vnode from the free list.
218  */
219 getnewvnode(tag, mp, vops, vpp)
220 	enum vtagtype tag;
221 	struct mount *mp;
222 	struct vnodeops *vops;
223 	struct vnode **vpp;
224 {
225 	register struct vnode *vp, *vq;
226 
227 	if (numvnodes < desiredvnodes) {
228 		vp = (struct vnode *)malloc((u_long)sizeof *vp,
229 		    M_VNODE, M_WAITOK);
230 		bzero((char *)vp, sizeof *vp);
231 		numvnodes++;
232 	} else {
233 		if ((vp = vfreeh) == NULL) {
234 			tablefull("vnode");
235 			*vpp = 0;
236 			return (ENFILE);
237 		}
238 		if (vp->v_usecount)
239 			panic("free vnode isn't");
240 		if (vq = vp->v_freef)
241 			vq->v_freeb = &vfreeh;
242 		else
243 			vfreet = &vfreeh;
244 		vfreeh = vq;
245 		vp->v_freef = NULL;
246 		vp->v_freeb = NULL;
247 		if (vp->v_type != VBAD)
248 			vgone(vp);
249 		vp->v_flag = 0;
250 		vp->v_lastr = 0;
251 		vp->v_socket = 0;
252 	}
253 	vp->v_type = VNON;
254 	cache_purge(vp);
255 	vp->v_tag = tag;
256 	vp->v_op = vops;
257 	insmntque(vp, mp);
258 	VREF(vp);
259 	*vpp = vp;
260 	return (0);
261 }
262 
263 /*
264  * Move a vnode from one mount queue to another.
265  */
266 insmntque(vp, mp)
267 	register struct vnode *vp;
268 	register struct mount *mp;
269 {
270 	struct vnode *vq;
271 
272 	/*
273 	 * Delete from old mount point vnode list, if on one.
274 	 */
275 	if (vp->v_mountb) {
276 		if (vq = vp->v_mountf)
277 			vq->v_mountb = vp->v_mountb;
278 		*vp->v_mountb = vq;
279 	}
280 	/*
281 	 * Insert into list of vnodes for the new mount point, if available.
282 	 */
283 	vp->v_mount = mp;
284 	if (mp == NULL) {
285 		vp->v_mountf = NULL;
286 		vp->v_mountb = NULL;
287 		return;
288 	}
289 	if (mp->mnt_mounth) {
290 		vp->v_mountf = mp->mnt_mounth;
291 		vp->v_mountb = &mp->mnt_mounth;
292 		mp->mnt_mounth->v_mountb = &vp->v_mountf;
293 		mp->mnt_mounth = vp;
294 	} else {
295 		mp->mnt_mounth = vp;
296 		vp->v_mountb = &mp->mnt_mounth;
297 		vp->v_mountf = NULL;
298 	}
299 }
300 
301 /*
302  * Create a vnode for a block device.
303  * Used for root filesystem, argdev, and swap areas.
304  * Also used for memory file system special devices.
305  */
306 bdevvp(dev, vpp)
307 	dev_t dev;
308 	struct vnode **vpp;
309 {
310 	register struct vnode *vp;
311 	struct vnode *nvp;
312 	int error;
313 
314 	error = getnewvnode(VT_NON, (struct mount *)0, &spec_vnodeops, &nvp);
315 	if (error) {
316 		*vpp = 0;
317 		return (error);
318 	}
319 	vp = nvp;
320 	vp->v_type = VBLK;
321 	if (nvp = checkalias(vp, dev, (struct mount *)0)) {
322 		vput(vp);
323 		vp = nvp;
324 	}
325 	*vpp = vp;
326 	return (0);
327 }
328 
329 /*
330  * Check to see if the new vnode represents a special device
331  * for which we already have a vnode (either because of
332  * bdevvp() or because of a different vnode representing
333  * the same block device). If such an alias exists, deallocate
334  * the existing contents and return the aliased vnode. The
335  * caller is responsible for filling it with its new contents.
336  */
337 struct vnode *
338 checkalias(nvp, nvp_rdev, mp)
339 	register struct vnode *nvp;
340 	dev_t nvp_rdev;
341 	struct mount *mp;
342 {
343 	register struct vnode *vp;
344 	struct vnode **vpp;
345 
346 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
347 		return (NULLVP);
348 
349 	vpp = &speclisth[SPECHASH(nvp_rdev)];
350 loop:
351 	for (vp = *vpp; vp; vp = vp->v_specnext) {
352 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
353 			continue;
354 		/*
355 		 * Alias, but not in use, so flush it out.
356 		 */
357 		if (vp->v_usecount == 0) {
358 			vgone(vp);
359 			goto loop;
360 		}
361 		if (vget(vp))
362 			goto loop;
363 		break;
364 	}
365 	if (vp == NULL || vp->v_tag != VT_NON) {
366 		MALLOC(nvp->v_specinfo, struct specinfo *,
367 			sizeof(struct specinfo), M_VNODE, M_WAITOK);
368 		nvp->v_rdev = nvp_rdev;
369 		nvp->v_hashchain = vpp;
370 		nvp->v_specnext = *vpp;
371 		nvp->v_specflags = 0;
372 		*vpp = nvp;
373 		if (vp != NULL) {
374 			nvp->v_flag |= VALIASED;
375 			vp->v_flag |= VALIASED;
376 			vput(vp);
377 		}
378 		return (NULLVP);
379 	}
380 	VOP_UNLOCK(vp);
381 	vclean(vp, 0);
382 	vp->v_op = nvp->v_op;
383 	vp->v_tag = nvp->v_tag;
384 	nvp->v_type = VNON;
385 	insmntque(vp, mp);
386 	return (vp);
387 }
388 
389 /*
390  * Grab a particular vnode from the free list, increment its
391  * reference count and lock it. The vnode lock bit is set the
392  * vnode is being eliminated in vgone. The process is awakened
393  * when the transition is completed, and an error returned to
394  * indicate that the vnode is no longer usable (possibly having
395  * been changed to a new file system type).
396  */
397 vget(vp)
398 	register struct vnode *vp;
399 {
400 	register struct vnode *vq;
401 
402 	if (vp->v_flag & VXLOCK) {
403 		vp->v_flag |= VXWANT;
404 		sleep((caddr_t)vp, PINOD);
405 		return (1);
406 	}
407 	if (vp->v_usecount == 0) {
408 		if (vq = vp->v_freef)
409 			vq->v_freeb = vp->v_freeb;
410 		else
411 			vfreet = vp->v_freeb;
412 		*vp->v_freeb = vq;
413 		vp->v_freef = NULL;
414 		vp->v_freeb = NULL;
415 	}
416 	VREF(vp);
417 	VOP_LOCK(vp);
418 	return (0);
419 }
420 
421 /*
422  * Vnode reference, just increment the count
423  */
424 void vref(vp)
425 	struct vnode *vp;
426 {
427 
428 	vp->v_usecount++;
429 }
430 
431 /*
432  * vput(), just unlock and vrele()
433  */
434 void vput(vp)
435 	register struct vnode *vp;
436 {
437 	VOP_UNLOCK(vp);
438 	vrele(vp);
439 }
440 
441 /*
442  * Vnode release.
443  * If count drops to zero, call inactive routine and return to freelist.
444  */
445 void vrele(vp)
446 	register struct vnode *vp;
447 {
448 
449 	if (vp == NULL)
450 		panic("vrele: null vp");
451 	vp->v_usecount--;
452 	if (vp->v_usecount < 0)
453 		vprint("vrele: bad ref count", vp);
454 	if (vp->v_usecount > 0)
455 		return;
456 	if (vfreeh == NULLVP) {
457 		/*
458 		 * insert into empty list
459 		 */
460 		vfreeh = vp;
461 		vp->v_freeb = &vfreeh;
462 	} else {
463 		/*
464 		 * insert at tail of list
465 		 */
466 		*vfreet = vp;
467 		vp->v_freeb = vfreet;
468 	}
469 	vp->v_freef = NULL;
470 	vfreet = &vp->v_freef;
471 	VOP_INACTIVE(vp);
472 }
473 
474 /*
475  * Page or buffer structure gets a reference.
476  */
477 vhold(vp)
478 	register struct vnode *vp;
479 {
480 
481 	vp->v_holdcnt++;
482 }
483 
484 /*
485  * Page or buffer structure frees a reference.
486  */
487 holdrele(vp)
488 	register struct vnode *vp;
489 {
490 
491 	if (vp->v_holdcnt <= 0)
492 		panic("holdrele: holdcnt");
493 	vp->v_holdcnt--;
494 }
495 
496 /*
497  * Remove any vnodes in the vnode table belonging to mount point mp.
498  *
499  * If MNT_NOFORCE is specified, there should not be any active ones,
500  * return error if any are found (nb: this is a user error, not a
501  * system error). If MNT_FORCE is specified, detach any active vnodes
502  * that are found.
503  */
504 int busyprt = 0;	/* patch to print out busy vnodes */
505 
506 vflush(mp, skipvp, flags)
507 	struct mount *mp;
508 	struct vnode *skipvp;
509 	int flags;
510 {
511 	register struct vnode *vp, *nvp;
512 	int busy = 0;
513 
514 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
515 		panic("vflush: not busy");
516 loop:
517 	for (vp = mp->mnt_mounth; vp; vp = nvp) {
518 		if (vp->v_mount != mp)
519 			goto loop;
520 		nvp = vp->v_mountf;
521 		/*
522 		 * Skip over a selected vnode.
523 		 */
524 		if (vp == skipvp)
525 			continue;
526 		/*
527 		 * Skip over a vnodes marked VSYSTEM.
528 		 */
529 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
530 			continue;
531 		/*
532 		 * With v_usecount == 0, all we need to do is clear
533 		 * out the vnode data structures and we are done.
534 		 */
535 		if (vp->v_usecount == 0) {
536 			vgone(vp);
537 			continue;
538 		}
539 		/*
540 		 * For block or character devices, revert to an
541 		 * anonymous device. For all other files, just kill them.
542 		 */
543 		if (flags & FORCECLOSE) {
544 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
545 				vgone(vp);
546 			} else {
547 				vclean(vp, 0);
548 				vp->v_op = &spec_vnodeops;
549 				insmntque(vp, (struct mount *)0);
550 			}
551 			continue;
552 		}
553 		if (busyprt)
554 			vprint("vflush: busy vnode", vp);
555 		busy++;
556 	}
557 	if (busy)
558 		return (EBUSY);
559 	return (0);
560 }
561 
562 /*
563  * Disassociate the underlying file system from a vnode.
564  */
565 void vclean(vp, flags)
566 	register struct vnode *vp;
567 	int flags;
568 {
569 	struct vnodeops *origops;
570 	int active;
571 
572 	/*
573 	 * Check to see if the vnode is in use.
574 	 * If so we have to reference it before we clean it out
575 	 * so that its count cannot fall to zero and generate a
576 	 * race against ourselves to recycle it.
577 	 */
578 	if (active = vp->v_usecount)
579 		VREF(vp);
580 	/*
581 	 * Prevent the vnode from being recycled or
582 	 * brought into use while we clean it out.
583 	 */
584 	if (vp->v_flag & VXLOCK)
585 		panic("vclean: deadlock");
586 	vp->v_flag |= VXLOCK;
587 	/*
588 	 * Even if the count is zero, the VOP_INACTIVE routine may still
589 	 * have the object locked while it cleans it out. The VOP_LOCK
590 	 * ensures that the VOP_INACTIVE routine is done with its work.
591 	 * For active vnodes, it ensures that no other activity can
592 	 * occur while the buffer list is being cleaned out.
593 	 */
594 	VOP_LOCK(vp);
595 	if (flags & DOCLOSE)
596 		vinvalbuf(vp, 1);
597 	/*
598 	 * Prevent any further operations on the vnode from
599 	 * being passed through to the old file system.
600 	 */
601 	origops = vp->v_op;
602 	vp->v_op = &dead_vnodeops;
603 	vp->v_tag = VT_NON;
604 	/*
605 	 * If purging an active vnode, it must be unlocked, closed,
606 	 * and deactivated before being reclaimed.
607 	 */
608 	(*(origops->vn_unlock))(vp);
609 	if (active) {
610 		if (flags & DOCLOSE)
611 			(*(origops->vn_close))(vp, 0, NOCRED);
612 		(*(origops->vn_inactive))(vp);
613 	}
614 	/*
615 	 * Reclaim the vnode.
616 	 */
617 	if ((*(origops->vn_reclaim))(vp))
618 		panic("vclean: cannot reclaim");
619 	if (active)
620 		vrele(vp);
621 	/*
622 	 * Done with purge, notify sleepers in vget of the grim news.
623 	 */
624 	vp->v_flag &= ~VXLOCK;
625 	if (vp->v_flag & VXWANT) {
626 		vp->v_flag &= ~VXWANT;
627 		wakeup((caddr_t)vp);
628 	}
629 }
630 
631 /*
632  * Eliminate all activity associated with  the requested vnode
633  * and with all vnodes aliased to the requested vnode.
634  */
635 void vgoneall(vp)
636 	register struct vnode *vp;
637 {
638 	register struct vnode *vq;
639 
640 	if (vp->v_flag & VALIASED) {
641 		/*
642 		 * If a vgone (or vclean) is already in progress,
643 		 * wait until it is done and return.
644 		 */
645 		if (vp->v_flag & VXLOCK) {
646 			vp->v_flag |= VXWANT;
647 			sleep((caddr_t)vp, PINOD);
648 			return;
649 		}
650 		/*
651 		 * Ensure that vp will not be vgone'd while we
652 		 * are eliminating its aliases.
653 		 */
654 		vp->v_flag |= VXLOCK;
655 		while (vp->v_flag & VALIASED) {
656 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
657 				if (vq->v_rdev != vp->v_rdev ||
658 				    vq->v_type != vp->v_type || vp == vq)
659 					continue;
660 				vgone(vq);
661 				break;
662 			}
663 		}
664 		/*
665 		 * Remove the lock so that vgone below will
666 		 * really eliminate the vnode after which time
667 		 * vgone will awaken any sleepers.
668 		 */
669 		vp->v_flag &= ~VXLOCK;
670 	}
671 	vgone(vp);
672 }
673 
674 /*
675  * Eliminate all activity associated with a vnode
676  * in preparation for reuse.
677  */
678 void vgone(vp)
679 	register struct vnode *vp;
680 {
681 	register struct vnode *vq;
682 	struct vnode *vx;
683 	long count;
684 
685 	/*
686 	 * If a vgone (or vclean) is already in progress,
687 	 * wait until it is done and return.
688 	 */
689 	if (vp->v_flag & VXLOCK) {
690 		vp->v_flag |= VXWANT;
691 		sleep((caddr_t)vp, PINOD);
692 		return;
693 	}
694 	/*
695 	 * Clean out the filesystem specific data.
696 	 */
697 	vclean(vp, DOCLOSE);
698 	/*
699 	 * Delete from old mount point vnode list, if on one.
700 	 */
701 	if (vp->v_mountb) {
702 		if (vq = vp->v_mountf)
703 			vq->v_mountb = vp->v_mountb;
704 		*vp->v_mountb = vq;
705 		vp->v_mountf = NULL;
706 		vp->v_mountb = NULL;
707 	}
708 	/*
709 	 * If special device, remove it from special device alias list.
710 	 */
711 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
712 		if (*vp->v_hashchain == vp) {
713 			*vp->v_hashchain = vp->v_specnext;
714 		} else {
715 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
716 				if (vq->v_specnext != vp)
717 					continue;
718 				vq->v_specnext = vp->v_specnext;
719 				break;
720 			}
721 			if (vq == NULL)
722 				panic("missing bdev");
723 		}
724 		if (vp->v_flag & VALIASED) {
725 			count = 0;
726 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
727 				if (vq->v_rdev != vp->v_rdev ||
728 				    vq->v_type != vp->v_type)
729 					continue;
730 				count++;
731 				vx = vq;
732 			}
733 			if (count == 0)
734 				panic("missing alias");
735 			if (count == 1)
736 				vx->v_flag &= ~VALIASED;
737 			vp->v_flag &= ~VALIASED;
738 		}
739 		FREE(vp->v_specinfo, M_VNODE);
740 		vp->v_specinfo = NULL;
741 	}
742 	/*
743 	 * If it is on the freelist, move it to the head of the list.
744 	 */
745 	if (vp->v_freeb) {
746 		if (vq = vp->v_freef)
747 			vq->v_freeb = vp->v_freeb;
748 		else
749 			vfreet = vp->v_freeb;
750 		*vp->v_freeb = vq;
751 		vp->v_freef = vfreeh;
752 		vp->v_freeb = &vfreeh;
753 		vfreeh->v_freeb = &vp->v_freef;
754 		vfreeh = vp;
755 	}
756 	vp->v_type = VBAD;
757 }
758 
759 /*
760  * Lookup a vnode by device number.
761  */
762 vfinddev(dev, type, vpp)
763 	dev_t dev;
764 	enum vtype type;
765 	struct vnode **vpp;
766 {
767 	register struct vnode *vp;
768 
769 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
770 		if (dev != vp->v_rdev || type != vp->v_type)
771 			continue;
772 		*vpp = vp;
773 		return (0);
774 	}
775 	return (1);
776 }
777 
778 /*
779  * Calculate the total number of references to a special device.
780  */
781 vcount(vp)
782 	register struct vnode *vp;
783 {
784 	register struct vnode *vq;
785 	int count;
786 
787 	if ((vp->v_flag & VALIASED) == 0)
788 		return (vp->v_usecount);
789 loop:
790 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
791 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
792 			continue;
793 		/*
794 		 * Alias, but not in use, so flush it out.
795 		 */
796 		if (vq->v_usecount == 0) {
797 			vgone(vq);
798 			goto loop;
799 		}
800 		count += vq->v_usecount;
801 	}
802 	return (count);
803 }
804 
805 /*
806  * Print out a description of a vnode.
807  */
808 static char *typename[] =
809    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
810 
811 vprint(label, vp)
812 	char *label;
813 	register struct vnode *vp;
814 {
815 	char buf[64];
816 
817 	if (label != NULL)
818 		printf("%s: ", label);
819 	printf("type %s, usecount %d, refcount %d,", typename[vp->v_type],
820 		vp->v_usecount, vp->v_holdcnt);
821 	buf[0] = '\0';
822 	if (vp->v_flag & VROOT)
823 		strcat(buf, "|VROOT");
824 	if (vp->v_flag & VTEXT)
825 		strcat(buf, "|VTEXT");
826 	if (vp->v_flag & VSYSTEM)
827 		strcat(buf, "|VSYSTEM");
828 	if (vp->v_flag & VXLOCK)
829 		strcat(buf, "|VXLOCK");
830 	if (vp->v_flag & VXWANT)
831 		strcat(buf, "|VXWANT");
832 	if (vp->v_flag & VBWAIT)
833 		strcat(buf, "|VBWAIT");
834 	if (vp->v_flag & VALIASED)
835 		strcat(buf, "|VALIASED");
836 	if (buf[0] != '\0')
837 		printf(" flags (%s)", &buf[1]);
838 	printf("\n\t");
839 	VOP_PRINT(vp);
840 }
841 
842 int kinfo_vdebug = 1;
843 int kinfo_vgetfailed;
844 #define KINFO_VNODESLOP	10
845 /*
846  * Dump vnode list (via kinfo).
847  * Copyout address of vnode followed by vnode.
848  */
849 /* ARGSUSED */
850 kinfo_vnode(op, where, acopysize, arg, aneeded)
851 	int op;
852 	char *where;
853 	int *acopysize, arg, *aneeded;
854 {
855 	register struct mount *mp = rootfs;
856 	struct mount *omp;
857 	struct vnode *vp;
858 	register char *bp = where, *savebp;
859 	char *ewhere = where + *acopysize;
860 	int error;
861 
862 #define VPTRSZ	sizeof (struct vnode *)
863 #define VNODESZ	sizeof (struct vnode)
864 	if (where == NULL) {
865 		*aneeded = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
866 		return (0);
867 	}
868 
869 	do {
870 		if (vfs_busy(mp)) {
871 			mp = mp->mnt_next;
872 			continue;
873 		}
874 		savebp = bp;
875 again:
876 		for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) {
877 			/*
878 			 * Check that the vp is still associated with
879 			 * this filesystem.  RACE: could have been
880 			 * recycled onto the same filesystem.
881 			 */
882 			if (vp->v_mount != mp) {
883 				if (kinfo_vdebug)
884 					printf("kinfo: vp changed\n");
885 				bp = savebp;
886 				goto again;
887 			}
888 			if ((bp + VPTRSZ + VNODESZ <= ewhere) &&
889 			    ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
890 			     (error = copyout((caddr_t)vp, bp + VPTRSZ,
891 			      VNODESZ))))
892 				return (error);
893 			bp += VPTRSZ + VNODESZ;
894 		}
895 		omp = mp;
896 		mp = mp->mnt_next;
897 		vfs_unbusy(omp);
898 	} while (mp != rootfs);
899 
900 	*aneeded = bp - where;
901 	if (bp > ewhere)
902 		*acopysize = ewhere - where;
903 	else
904 		*acopysize = bp - where;
905 	return (0);
906 }
907