xref: /original-bsd/sys/kern/vfs_subr.c (revision ba762ddc)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)vfs_subr.c	7.53 (Berkeley) 04/19/91
8  */
9 
10 /*
11  * External virtual filesystem routines
12  */
13 
14 #include "param.h"
15 #include "proc.h"
16 #include "mount.h"
17 #include "time.h"
18 #include "vnode.h"
19 #include "specdev.h"
20 #include "namei.h"
21 #include "ucred.h"
22 #include "errno.h"
23 #include "malloc.h"
24 
25 /*
26  * Remove a mount point from the list of mounted filesystems.
27  * Unmount of the root is illegal.
28  */
29 void
30 vfs_remove(mp)
31 	register struct mount *mp;
32 {
33 
34 	if (mp == rootfs)
35 		panic("vfs_remove: unmounting root");
36 	mp->mnt_prev->mnt_next = mp->mnt_next;
37 	mp->mnt_next->mnt_prev = mp->mnt_prev;
38 	mp->mnt_vnodecovered->v_mountedhere = (struct mount *)0;
39 	vfs_unlock(mp);
40 }
41 
42 /*
43  * Lock a filesystem.
44  * Used to prevent access to it while mounting and unmounting.
45  */
46 vfs_lock(mp)
47 	register struct mount *mp;
48 {
49 
50 	while(mp->mnt_flag & MNT_MLOCK) {
51 		mp->mnt_flag |= MNT_MWAIT;
52 		sleep((caddr_t)mp, PVFS);
53 	}
54 	mp->mnt_flag |= MNT_MLOCK;
55 	return (0);
56 }
57 
58 /*
59  * Unlock a locked filesystem.
60  * Panic if filesystem is not locked.
61  */
62 void
63 vfs_unlock(mp)
64 	register struct mount *mp;
65 {
66 
67 	if ((mp->mnt_flag & MNT_MLOCK) == 0)
68 		panic("vfs_unlock: not locked");
69 	mp->mnt_flag &= ~MNT_MLOCK;
70 	if (mp->mnt_flag & MNT_MWAIT) {
71 		mp->mnt_flag &= ~MNT_MWAIT;
72 		wakeup((caddr_t)mp);
73 	}
74 }
75 
76 /*
77  * Mark a mount point as busy.
78  * Used to synchronize access and to delay unmounting.
79  */
80 vfs_busy(mp)
81 	register struct mount *mp;
82 {
83 
84 	while(mp->mnt_flag & MNT_MPBUSY) {
85 		mp->mnt_flag |= MNT_MPWANT;
86 		sleep((caddr_t)&mp->mnt_flag, PVFS);
87 	}
88 	if (mp->mnt_flag & MNT_UNMOUNT)
89 		return (1);
90 	mp->mnt_flag |= MNT_MPBUSY;
91 	return (0);
92 }
93 
94 /*
95  * Free a busy filesystem.
96  * Panic if filesystem is not busy.
97  */
98 vfs_unbusy(mp)
99 	register struct mount *mp;
100 {
101 
102 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
103 		panic("vfs_unbusy: not busy");
104 	mp->mnt_flag &= ~MNT_MPBUSY;
105 	if (mp->mnt_flag & MNT_MPWANT) {
106 		mp->mnt_flag &= ~MNT_MPWANT;
107 		wakeup((caddr_t)&mp->mnt_flag);
108 	}
109 }
110 
111 /*
112  * Lookup a mount point by filesystem identifier.
113  */
114 struct mount *
115 getvfs(fsid)
116 	fsid_t *fsid;
117 {
118 	register struct mount *mp;
119 
120 	mp = rootfs;
121 	do {
122 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
123 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
124 			return (mp);
125 		}
126 		mp = mp->mnt_next;
127 	} while (mp != rootfs);
128 	return ((struct mount *)0);
129 }
130 
131 /*
132  * Set vnode attributes to VNOVAL
133  */
134 void vattr_null(vap)
135 	register struct vattr *vap;
136 {
137 
138 	vap->va_type = VNON;
139 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
140 		vap->va_fsid = vap->va_fileid = vap->va_size =
141 		vap->va_size_rsv = vap->va_blocksize = vap->va_rdev =
142 		vap->va_bytes = vap->va_bytes_rsv =
143 		vap->va_atime.tv_sec = vap->va_atime.tv_usec =
144 		vap->va_mtime.tv_sec = vap->va_mtime.tv_usec =
145 		vap->va_ctime.tv_sec = vap->va_ctime.tv_usec =
146 		vap->va_flags = vap->va_gen = VNOVAL;
147 }
148 
149 /*
150  * Initialize a nameidata structure
151  */
152 ndinit(ndp)
153 	register struct nameidata *ndp;
154 {
155 
156 	bzero((caddr_t)ndp, sizeof(struct nameidata));
157 	ndp->ni_iov = &ndp->ni_nd.nd_iovec;
158 	ndp->ni_iovcnt = 1;
159 	ndp->ni_base = (caddr_t)&ndp->ni_dent;
160 	ndp->ni_rw = UIO_WRITE;
161 	ndp->ni_uioseg = UIO_SYSSPACE;
162 }
163 
164 /*
165  * Duplicate a nameidata structure
166  */
167 nddup(ndp, newndp)
168 	register struct nameidata *ndp, *newndp;
169 {
170 
171 	ndinit(newndp);
172 	newndp->ni_cred = ndp->ni_cred;
173 	crhold(newndp->ni_cred);
174 }
175 
176 /*
177  * Release a nameidata structure
178  */
179 ndrele(ndp)
180 	register struct nameidata *ndp;
181 {
182 
183 	crfree(ndp->ni_cred);
184 }
185 
186 /*
187  * Routines having to do with the management of the vnode table.
188  */
189 struct vnode *vfreeh, **vfreet;
190 extern struct vnodeops dead_vnodeops, spec_vnodeops;
191 extern void vclean();
192 long numvnodes;
193 struct vattr va_null;
194 
195 /*
196  * Initialize the vnode structures and initialize each file system type.
197  */
198 vfsinit()
199 {
200 	struct vfsops **vfsp;
201 
202 	/*
203 	 * Initialize the vnode name cache
204 	 */
205 	nchinit();
206 	/*
207 	 * Initialize each file system type.
208 	 */
209 	vattr_null(&va_null);
210 	for (vfsp = &vfssw[0]; vfsp <= &vfssw[MOUNT_MAXTYPE]; vfsp++) {
211 		if (*vfsp == NULL)
212 			continue;
213 		(*(*vfsp)->vfs_init)();
214 	}
215 }
216 
217 /*
218  * Return the next vnode from the free list.
219  */
220 getnewvnode(tag, mp, vops, vpp)
221 	enum vtagtype tag;
222 	struct mount *mp;
223 	struct vnodeops *vops;
224 	struct vnode **vpp;
225 {
226 	register struct vnode *vp, *vq;
227 
228 	if (numvnodes < desiredvnodes) {
229 		vp = (struct vnode *)malloc((u_long)sizeof *vp,
230 		    M_VNODE, M_WAITOK);
231 		bzero((char *)vp, sizeof *vp);
232 		numvnodes++;
233 	} else {
234 		if ((vp = vfreeh) == NULL) {
235 			tablefull("vnode");
236 			*vpp = 0;
237 			return (ENFILE);
238 		}
239 		if (vp->v_usecount)
240 			panic("free vnode isn't");
241 		if (vq = vp->v_freef)
242 			vq->v_freeb = &vfreeh;
243 		else
244 			vfreet = &vfreeh;
245 		vfreeh = vq;
246 		vp->v_freef = NULL;
247 		vp->v_freeb = NULL;
248 		if (vp->v_type != VBAD)
249 			vgone(vp);
250 		vp->v_flag = 0;
251 		vp->v_lastr = 0;
252 		vp->v_socket = 0;
253 	}
254 	vp->v_type = VNON;
255 	cache_purge(vp);
256 	vp->v_tag = tag;
257 	vp->v_op = vops;
258 	insmntque(vp, mp);
259 	VREF(vp);
260 	*vpp = vp;
261 	return (0);
262 }
263 
264 /*
265  * Move a vnode from one mount queue to another.
266  */
267 insmntque(vp, mp)
268 	register struct vnode *vp;
269 	register struct mount *mp;
270 {
271 	struct vnode *vq;
272 
273 	/*
274 	 * Delete from old mount point vnode list, if on one.
275 	 */
276 	if (vp->v_mountb) {
277 		if (vq = vp->v_mountf)
278 			vq->v_mountb = vp->v_mountb;
279 		*vp->v_mountb = vq;
280 	}
281 	/*
282 	 * Insert into list of vnodes for the new mount point, if available.
283 	 */
284 	vp->v_mount = mp;
285 	if (mp == NULL) {
286 		vp->v_mountf = NULL;
287 		vp->v_mountb = NULL;
288 		return;
289 	}
290 	if (mp->mnt_mounth) {
291 		vp->v_mountf = mp->mnt_mounth;
292 		vp->v_mountb = &mp->mnt_mounth;
293 		mp->mnt_mounth->v_mountb = &vp->v_mountf;
294 		mp->mnt_mounth = vp;
295 	} else {
296 		mp->mnt_mounth = vp;
297 		vp->v_mountb = &mp->mnt_mounth;
298 		vp->v_mountf = NULL;
299 	}
300 }
301 
302 /*
303  * Create a vnode for a block device.
304  * Used for root filesystem, argdev, and swap areas.
305  * Also used for memory file system special devices.
306  */
307 bdevvp(dev, vpp)
308 	dev_t dev;
309 	struct vnode **vpp;
310 {
311 	register struct vnode *vp;
312 	struct vnode *nvp;
313 	int error;
314 
315 	if (dev == NODEV)
316 		return (0);
317 	error = getnewvnode(VT_NON, (struct mount *)0, &spec_vnodeops, &nvp);
318 	if (error) {
319 		*vpp = 0;
320 		return (error);
321 	}
322 	vp = nvp;
323 	vp->v_type = VBLK;
324 	if (nvp = checkalias(vp, dev, (struct mount *)0)) {
325 		vput(vp);
326 		vp = nvp;
327 	}
328 	*vpp = vp;
329 	return (0);
330 }
331 
332 /*
333  * Check to see if the new vnode represents a special device
334  * for which we already have a vnode (either because of
335  * bdevvp() or because of a different vnode representing
336  * the same block device). If such an alias exists, deallocate
337  * the existing contents and return the aliased vnode. The
338  * caller is responsible for filling it with its new contents.
339  */
340 struct vnode *
341 checkalias(nvp, nvp_rdev, mp)
342 	register struct vnode *nvp;
343 	dev_t nvp_rdev;
344 	struct mount *mp;
345 {
346 	register struct vnode *vp;
347 	struct vnode **vpp;
348 
349 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
350 		return (NULLVP);
351 
352 	vpp = &speclisth[SPECHASH(nvp_rdev)];
353 loop:
354 	for (vp = *vpp; vp; vp = vp->v_specnext) {
355 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
356 			continue;
357 		/*
358 		 * Alias, but not in use, so flush it out.
359 		 */
360 		if (vp->v_usecount == 0) {
361 			vgone(vp);
362 			goto loop;
363 		}
364 		if (vget(vp))
365 			goto loop;
366 		break;
367 	}
368 	if (vp == NULL || vp->v_tag != VT_NON) {
369 		MALLOC(nvp->v_specinfo, struct specinfo *,
370 			sizeof(struct specinfo), M_VNODE, M_WAITOK);
371 		nvp->v_rdev = nvp_rdev;
372 		nvp->v_hashchain = vpp;
373 		nvp->v_specnext = *vpp;
374 		nvp->v_specflags = 0;
375 		*vpp = nvp;
376 		if (vp != NULL) {
377 			nvp->v_flag |= VALIASED;
378 			vp->v_flag |= VALIASED;
379 			vput(vp);
380 		}
381 		return (NULLVP);
382 	}
383 	VOP_UNLOCK(vp);
384 	vclean(vp, 0);
385 	vp->v_op = nvp->v_op;
386 	vp->v_tag = nvp->v_tag;
387 	nvp->v_type = VNON;
388 	insmntque(vp, mp);
389 	return (vp);
390 }
391 
392 /*
393  * Grab a particular vnode from the free list, increment its
394  * reference count and lock it. The vnode lock bit is set the
395  * vnode is being eliminated in vgone. The process is awakened
396  * when the transition is completed, and an error returned to
397  * indicate that the vnode is no longer usable (possibly having
398  * been changed to a new file system type).
399  */
400 vget(vp)
401 	register struct vnode *vp;
402 {
403 	register struct vnode *vq;
404 
405 	if (vp->v_flag & VXLOCK) {
406 		vp->v_flag |= VXWANT;
407 		sleep((caddr_t)vp, PINOD);
408 		return (1);
409 	}
410 	if (vp->v_usecount == 0) {
411 		if (vq = vp->v_freef)
412 			vq->v_freeb = vp->v_freeb;
413 		else
414 			vfreet = vp->v_freeb;
415 		*vp->v_freeb = vq;
416 		vp->v_freef = NULL;
417 		vp->v_freeb = NULL;
418 	}
419 	VREF(vp);
420 	VOP_LOCK(vp);
421 	return (0);
422 }
423 
424 /*
425  * Vnode reference, just increment the count
426  */
427 void vref(vp)
428 	struct vnode *vp;
429 {
430 
431 	vp->v_usecount++;
432 }
433 
434 /*
435  * vput(), just unlock and vrele()
436  */
437 void vput(vp)
438 	register struct vnode *vp;
439 {
440 	VOP_UNLOCK(vp);
441 	vrele(vp);
442 }
443 
444 /*
445  * Vnode release.
446  * If count drops to zero, call inactive routine and return to freelist.
447  */
448 void vrele(vp)
449 	register struct vnode *vp;
450 {
451 	struct proc *p = curproc;		/* XXX */
452 
453 	if (vp == NULL)
454 		panic("vrele: null vp");
455 	vp->v_usecount--;
456 	if (vp->v_usecount < 0)
457 		vprint("vrele: bad ref count", vp);
458 	if (vp->v_usecount > 0)
459 		return;
460 	if (vfreeh == NULLVP) {
461 		/*
462 		 * insert into empty list
463 		 */
464 		vfreeh = vp;
465 		vp->v_freeb = &vfreeh;
466 	} else {
467 		/*
468 		 * insert at tail of list
469 		 */
470 		*vfreet = vp;
471 		vp->v_freeb = vfreet;
472 	}
473 	vp->v_freef = NULL;
474 	vfreet = &vp->v_freef;
475 	VOP_INACTIVE(vp, p);
476 }
477 
478 /*
479  * Page or buffer structure gets a reference.
480  */
481 vhold(vp)
482 	register struct vnode *vp;
483 {
484 
485 	vp->v_holdcnt++;
486 }
487 
488 /*
489  * Page or buffer structure frees a reference.
490  */
491 holdrele(vp)
492 	register struct vnode *vp;
493 {
494 
495 	if (vp->v_holdcnt <= 0)
496 		panic("holdrele: holdcnt");
497 	vp->v_holdcnt--;
498 }
499 
500 /*
501  * Remove any vnodes in the vnode table belonging to mount point mp.
502  *
503  * If MNT_NOFORCE is specified, there should not be any active ones,
504  * return error if any are found (nb: this is a user error, not a
505  * system error). If MNT_FORCE is specified, detach any active vnodes
506  * that are found.
507  */
508 int busyprt = 0;	/* patch to print out busy vnodes */
509 
510 vflush(mp, skipvp, flags)
511 	struct mount *mp;
512 	struct vnode *skipvp;
513 	int flags;
514 {
515 	register struct vnode *vp, *nvp;
516 	int busy = 0;
517 
518 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
519 		panic("vflush: not busy");
520 loop:
521 	for (vp = mp->mnt_mounth; vp; vp = nvp) {
522 		if (vp->v_mount != mp)
523 			goto loop;
524 		nvp = vp->v_mountf;
525 		/*
526 		 * Skip over a selected vnode.
527 		 */
528 		if (vp == skipvp)
529 			continue;
530 		/*
531 		 * Skip over a vnodes marked VSYSTEM.
532 		 */
533 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
534 			continue;
535 		/*
536 		 * With v_usecount == 0, all we need to do is clear
537 		 * out the vnode data structures and we are done.
538 		 */
539 		if (vp->v_usecount == 0) {
540 			vgone(vp);
541 			continue;
542 		}
543 		/*
544 		 * For block or character devices, revert to an
545 		 * anonymous device. For all other files, just kill them.
546 		 */
547 		if (flags & FORCECLOSE) {
548 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
549 				vgone(vp);
550 			} else {
551 				vclean(vp, 0);
552 				vp->v_op = &spec_vnodeops;
553 				insmntque(vp, (struct mount *)0);
554 			}
555 			continue;
556 		}
557 		if (busyprt)
558 			vprint("vflush: busy vnode", vp);
559 		busy++;
560 	}
561 	if (busy)
562 		return (EBUSY);
563 	return (0);
564 }
565 
566 /*
567  * Disassociate the underlying file system from a vnode.
568  */
569 void vclean(vp, flags)
570 	register struct vnode *vp;
571 	int flags;
572 {
573 	struct vnodeops *origops;
574 	int active;
575 	struct proc *p = curproc;	/* XXX */
576 
577 	/*
578 	 * Check to see if the vnode is in use.
579 	 * If so we have to reference it before we clean it out
580 	 * so that its count cannot fall to zero and generate a
581 	 * race against ourselves to recycle it.
582 	 */
583 	if (active = vp->v_usecount)
584 		VREF(vp);
585 	/*
586 	 * Prevent the vnode from being recycled or
587 	 * brought into use while we clean it out.
588 	 */
589 	if (vp->v_flag & VXLOCK)
590 		panic("vclean: deadlock");
591 	vp->v_flag |= VXLOCK;
592 	/*
593 	 * Even if the count is zero, the VOP_INACTIVE routine may still
594 	 * have the object locked while it cleans it out. The VOP_LOCK
595 	 * ensures that the VOP_INACTIVE routine is done with its work.
596 	 * For active vnodes, it ensures that no other activity can
597 	 * occur while the buffer list is being cleaned out.
598 	 */
599 	VOP_LOCK(vp);
600 	if (flags & DOCLOSE)
601 		vinvalbuf(vp, 1);
602 	/*
603 	 * Prevent any further operations on the vnode from
604 	 * being passed through to the old file system.
605 	 */
606 	origops = vp->v_op;
607 	vp->v_op = &dead_vnodeops;
608 	vp->v_tag = VT_NON;
609 	/*
610 	 * If purging an active vnode, it must be unlocked, closed,
611 	 * and deactivated before being reclaimed.
612 	 */
613 	(*(origops->vn_unlock))(vp);
614 	if (active) {
615 		if (flags & DOCLOSE)
616 			(*(origops->vn_close))(vp, IO_NDELAY, NOCRED, p);
617 		(*(origops->vn_inactive))(vp, p);
618 	}
619 	/*
620 	 * Reclaim the vnode.
621 	 */
622 	if ((*(origops->vn_reclaim))(vp))
623 		panic("vclean: cannot reclaim");
624 	if (active)
625 		vrele(vp);
626 	/*
627 	 * Done with purge, notify sleepers in vget of the grim news.
628 	 */
629 	vp->v_flag &= ~VXLOCK;
630 	if (vp->v_flag & VXWANT) {
631 		vp->v_flag &= ~VXWANT;
632 		wakeup((caddr_t)vp);
633 	}
634 }
635 
636 /*
637  * Eliminate all activity associated with  the requested vnode
638  * and with all vnodes aliased to the requested vnode.
639  */
640 void vgoneall(vp)
641 	register struct vnode *vp;
642 {
643 	register struct vnode *vq;
644 
645 	if (vp->v_flag & VALIASED) {
646 		/*
647 		 * If a vgone (or vclean) is already in progress,
648 		 * wait until it is done and return.
649 		 */
650 		if (vp->v_flag & VXLOCK) {
651 			vp->v_flag |= VXWANT;
652 			sleep((caddr_t)vp, PINOD);
653 			return;
654 		}
655 		/*
656 		 * Ensure that vp will not be vgone'd while we
657 		 * are eliminating its aliases.
658 		 */
659 		vp->v_flag |= VXLOCK;
660 		while (vp->v_flag & VALIASED) {
661 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
662 				if (vq->v_rdev != vp->v_rdev ||
663 				    vq->v_type != vp->v_type || vp == vq)
664 					continue;
665 				vgone(vq);
666 				break;
667 			}
668 		}
669 		/*
670 		 * Remove the lock so that vgone below will
671 		 * really eliminate the vnode after which time
672 		 * vgone will awaken any sleepers.
673 		 */
674 		vp->v_flag &= ~VXLOCK;
675 	}
676 	vgone(vp);
677 }
678 
679 /*
680  * Eliminate all activity associated with a vnode
681  * in preparation for reuse.
682  */
683 void vgone(vp)
684 	register struct vnode *vp;
685 {
686 	register struct vnode *vq;
687 	struct vnode *vx;
688 	long count;
689 
690 	/*
691 	 * If a vgone (or vclean) is already in progress,
692 	 * wait until it is done and return.
693 	 */
694 	if (vp->v_flag & VXLOCK) {
695 		vp->v_flag |= VXWANT;
696 		sleep((caddr_t)vp, PINOD);
697 		return;
698 	}
699 	/*
700 	 * Clean out the filesystem specific data.
701 	 */
702 	vclean(vp, DOCLOSE);
703 	/*
704 	 * Delete from old mount point vnode list, if on one.
705 	 */
706 	if (vp->v_mountb) {
707 		if (vq = vp->v_mountf)
708 			vq->v_mountb = vp->v_mountb;
709 		*vp->v_mountb = vq;
710 		vp->v_mountf = NULL;
711 		vp->v_mountb = NULL;
712 	}
713 	/*
714 	 * If special device, remove it from special device alias list.
715 	 */
716 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
717 		if (*vp->v_hashchain == vp) {
718 			*vp->v_hashchain = vp->v_specnext;
719 		} else {
720 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
721 				if (vq->v_specnext != vp)
722 					continue;
723 				vq->v_specnext = vp->v_specnext;
724 				break;
725 			}
726 			if (vq == NULL)
727 				panic("missing bdev");
728 		}
729 		if (vp->v_flag & VALIASED) {
730 			count = 0;
731 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
732 				if (vq->v_rdev != vp->v_rdev ||
733 				    vq->v_type != vp->v_type)
734 					continue;
735 				count++;
736 				vx = vq;
737 			}
738 			if (count == 0)
739 				panic("missing alias");
740 			if (count == 1)
741 				vx->v_flag &= ~VALIASED;
742 			vp->v_flag &= ~VALIASED;
743 		}
744 		FREE(vp->v_specinfo, M_VNODE);
745 		vp->v_specinfo = NULL;
746 	}
747 	/*
748 	 * If it is on the freelist, move it to the head of the list.
749 	 */
750 	if (vp->v_freeb) {
751 		if (vq = vp->v_freef)
752 			vq->v_freeb = vp->v_freeb;
753 		else
754 			vfreet = vp->v_freeb;
755 		*vp->v_freeb = vq;
756 		vp->v_freef = vfreeh;
757 		vp->v_freeb = &vfreeh;
758 		vfreeh->v_freeb = &vp->v_freef;
759 		vfreeh = vp;
760 	}
761 	vp->v_type = VBAD;
762 }
763 
764 /*
765  * Lookup a vnode by device number.
766  */
767 vfinddev(dev, type, vpp)
768 	dev_t dev;
769 	enum vtype type;
770 	struct vnode **vpp;
771 {
772 	register struct vnode *vp;
773 
774 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
775 		if (dev != vp->v_rdev || type != vp->v_type)
776 			continue;
777 		*vpp = vp;
778 		return (0);
779 	}
780 	return (1);
781 }
782 
783 /*
784  * Calculate the total number of references to a special device.
785  */
786 vcount(vp)
787 	register struct vnode *vp;
788 {
789 	register struct vnode *vq;
790 	int count;
791 
792 	if ((vp->v_flag & VALIASED) == 0)
793 		return (vp->v_usecount);
794 loop:
795 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
796 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
797 			continue;
798 		/*
799 		 * Alias, but not in use, so flush it out.
800 		 */
801 		if (vq->v_usecount == 0) {
802 			vgone(vq);
803 			goto loop;
804 		}
805 		count += vq->v_usecount;
806 	}
807 	return (count);
808 }
809 
810 /*
811  * Print out a description of a vnode.
812  */
813 static char *typename[] =
814    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
815 
816 vprint(label, vp)
817 	char *label;
818 	register struct vnode *vp;
819 {
820 	char buf[64];
821 
822 	if (label != NULL)
823 		printf("%s: ", label);
824 	printf("type %s, usecount %d, refcount %d,", typename[vp->v_type],
825 		vp->v_usecount, vp->v_holdcnt);
826 	buf[0] = '\0';
827 	if (vp->v_flag & VROOT)
828 		strcat(buf, "|VROOT");
829 	if (vp->v_flag & VTEXT)
830 		strcat(buf, "|VTEXT");
831 	if (vp->v_flag & VSYSTEM)
832 		strcat(buf, "|VSYSTEM");
833 	if (vp->v_flag & VXLOCK)
834 		strcat(buf, "|VXLOCK");
835 	if (vp->v_flag & VXWANT)
836 		strcat(buf, "|VXWANT");
837 	if (vp->v_flag & VBWAIT)
838 		strcat(buf, "|VBWAIT");
839 	if (vp->v_flag & VALIASED)
840 		strcat(buf, "|VALIASED");
841 	if (buf[0] != '\0')
842 		printf(" flags (%s)", &buf[1]);
843 	printf("\n\t");
844 	VOP_PRINT(vp);
845 }
846 
847 int kinfo_vdebug = 1;
848 int kinfo_vgetfailed;
849 #define KINFO_VNODESLOP	10
850 /*
851  * Dump vnode list (via kinfo).
852  * Copyout address of vnode followed by vnode.
853  */
854 /* ARGSUSED */
855 kinfo_vnode(op, where, acopysize, arg, aneeded)
856 	int op;
857 	char *where;
858 	int *acopysize, arg, *aneeded;
859 {
860 	register struct mount *mp = rootfs;
861 	struct mount *omp;
862 	struct vnode *vp;
863 	register char *bp = where, *savebp;
864 	char *ewhere = where + *acopysize;
865 	int error;
866 
867 #define VPTRSZ	sizeof (struct vnode *)
868 #define VNODESZ	sizeof (struct vnode)
869 	if (where == NULL) {
870 		*aneeded = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
871 		return (0);
872 	}
873 
874 	do {
875 		if (vfs_busy(mp)) {
876 			mp = mp->mnt_next;
877 			continue;
878 		}
879 		savebp = bp;
880 again:
881 		for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) {
882 			/*
883 			 * Check that the vp is still associated with
884 			 * this filesystem.  RACE: could have been
885 			 * recycled onto the same filesystem.
886 			 */
887 			if (vp->v_mount != mp) {
888 				if (kinfo_vdebug)
889 					printf("kinfo: vp changed\n");
890 				bp = savebp;
891 				goto again;
892 			}
893 			if ((bp + VPTRSZ + VNODESZ <= ewhere) &&
894 			    ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
895 			     (error = copyout((caddr_t)vp, bp + VPTRSZ,
896 			      VNODESZ))))
897 				return (error);
898 			bp += VPTRSZ + VNODESZ;
899 		}
900 		omp = mp;
901 		mp = mp->mnt_next;
902 		vfs_unbusy(omp);
903 	} while (mp != rootfs);
904 
905 	*aneeded = bp - where;
906 	if (bp > ewhere)
907 		*acopysize = ewhere - where;
908 	else
909 		*acopysize = bp - where;
910 	return (0);
911 }
912