xref: /original-bsd/sys/kern/vfs_subr.c (revision 5092e0b1)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)vfs_subr.c	7.51 (Berkeley) 03/04/91
8  */
9 
10 /*
11  * External virtual filesystem routines
12  */
13 
14 #include "param.h"
15 #include "mount.h"
16 #include "time.h"
17 #include "vnode.h"
18 #include "specdev.h"
19 #include "namei.h"
20 #include "ucred.h"
21 #include "errno.h"
22 #include "malloc.h"
23 
24 /*
25  * Remove a mount point from the list of mounted filesystems.
26  * Unmount of the root is illegal.
27  */
28 void
29 vfs_remove(mp)
30 	register struct mount *mp;
31 {
32 
33 	if (mp == rootfs)
34 		panic("vfs_remove: unmounting root");
35 	mp->mnt_prev->mnt_next = mp->mnt_next;
36 	mp->mnt_next->mnt_prev = mp->mnt_prev;
37 	mp->mnt_vnodecovered->v_mountedhere = (struct mount *)0;
38 	vfs_unlock(mp);
39 }
40 
41 /*
42  * Lock a filesystem.
43  * Used to prevent access to it while mounting and unmounting.
44  */
45 vfs_lock(mp)
46 	register struct mount *mp;
47 {
48 
49 	while(mp->mnt_flag & MNT_MLOCK) {
50 		mp->mnt_flag |= MNT_MWAIT;
51 		sleep((caddr_t)mp, PVFS);
52 	}
53 	mp->mnt_flag |= MNT_MLOCK;
54 	return (0);
55 }
56 
57 /*
58  * Unlock a locked filesystem.
59  * Panic if filesystem is not locked.
60  */
61 void
62 vfs_unlock(mp)
63 	register struct mount *mp;
64 {
65 
66 	if ((mp->mnt_flag & MNT_MLOCK) == 0)
67 		panic("vfs_unlock: not locked");
68 	mp->mnt_flag &= ~MNT_MLOCK;
69 	if (mp->mnt_flag & MNT_MWAIT) {
70 		mp->mnt_flag &= ~MNT_MWAIT;
71 		wakeup((caddr_t)mp);
72 	}
73 }
74 
75 /*
76  * Mark a mount point as busy.
77  * Used to synchronize access and to delay unmounting.
78  */
79 vfs_busy(mp)
80 	register struct mount *mp;
81 {
82 
83 	while(mp->mnt_flag & MNT_MPBUSY) {
84 		mp->mnt_flag |= MNT_MPWANT;
85 		sleep((caddr_t)&mp->mnt_flag, PVFS);
86 	}
87 	if (mp->mnt_flag & MNT_UNMOUNT)
88 		return (1);
89 	mp->mnt_flag |= MNT_MPBUSY;
90 	return (0);
91 }
92 
93 /*
94  * Free a busy filesystem.
95  * Panic if filesystem is not busy.
96  */
97 vfs_unbusy(mp)
98 	register struct mount *mp;
99 {
100 
101 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
102 		panic("vfs_unbusy: not busy");
103 	mp->mnt_flag &= ~MNT_MPBUSY;
104 	if (mp->mnt_flag & MNT_MPWANT) {
105 		mp->mnt_flag &= ~MNT_MPWANT;
106 		wakeup((caddr_t)&mp->mnt_flag);
107 	}
108 }
109 
110 /*
111  * Lookup a mount point by filesystem identifier.
112  */
113 struct mount *
114 getvfs(fsid)
115 	fsid_t *fsid;
116 {
117 	register struct mount *mp;
118 
119 	mp = rootfs;
120 	do {
121 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
122 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
123 			return (mp);
124 		}
125 		mp = mp->mnt_next;
126 	} while (mp != rootfs);
127 	return ((struct mount *)0);
128 }
129 
130 /*
131  * Set vnode attributes to VNOVAL
132  */
133 void vattr_null(vap)
134 	register struct vattr *vap;
135 {
136 
137 	vap->va_type = VNON;
138 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
139 		vap->va_fsid = vap->va_fileid = vap->va_size =
140 		vap->va_size_rsv = vap->va_blocksize = vap->va_rdev =
141 		vap->va_bytes = vap->va_bytes_rsv =
142 		vap->va_atime.tv_sec = vap->va_atime.tv_usec =
143 		vap->va_mtime.tv_sec = vap->va_mtime.tv_usec =
144 		vap->va_ctime.tv_sec = vap->va_ctime.tv_usec =
145 		vap->va_flags = vap->va_gen = VNOVAL;
146 }
147 
148 /*
149  * Initialize a nameidata structure
150  */
151 ndinit(ndp)
152 	register struct nameidata *ndp;
153 {
154 
155 	bzero((caddr_t)ndp, sizeof(struct nameidata));
156 	ndp->ni_iov = &ndp->ni_nd.nd_iovec;
157 	ndp->ni_iovcnt = 1;
158 	ndp->ni_base = (caddr_t)&ndp->ni_dent;
159 	ndp->ni_rw = UIO_WRITE;
160 	ndp->ni_uioseg = UIO_SYSSPACE;
161 }
162 
163 /*
164  * Duplicate a nameidata structure
165  */
166 nddup(ndp, newndp)
167 	register struct nameidata *ndp, *newndp;
168 {
169 
170 	ndinit(newndp);
171 	newndp->ni_cred = ndp->ni_cred;
172 	crhold(newndp->ni_cred);
173 }
174 
175 /*
176  * Release a nameidata structure
177  */
178 ndrele(ndp)
179 	register struct nameidata *ndp;
180 {
181 
182 	crfree(ndp->ni_cred);
183 }
184 
185 /*
186  * Routines having to do with the management of the vnode table.
187  */
188 struct vnode *vfreeh, **vfreet;
189 extern struct vnodeops dead_vnodeops, spec_vnodeops;
190 extern void vclean();
191 long numvnodes;
192 struct vattr va_null;
193 
194 /*
195  * Initialize the vnode structures and initialize each file system type.
196  */
197 vfsinit()
198 {
199 	struct vfsops **vfsp;
200 
201 	/*
202 	 * Initialize the vnode name cache
203 	 */
204 	nchinit();
205 	/*
206 	 * Initialize each file system type.
207 	 */
208 	vattr_null(&va_null);
209 	for (vfsp = &vfssw[0]; vfsp <= &vfssw[MOUNT_MAXTYPE]; vfsp++) {
210 		if (*vfsp == NULL)
211 			continue;
212 		(*(*vfsp)->vfs_init)();
213 	}
214 }
215 
216 /*
217  * Return the next vnode from the free list.
218  */
219 getnewvnode(tag, mp, vops, vpp)
220 	enum vtagtype tag;
221 	struct mount *mp;
222 	struct vnodeops *vops;
223 	struct vnode **vpp;
224 {
225 	register struct vnode *vp, *vq;
226 
227 	if (numvnodes < desiredvnodes) {
228 		vp = (struct vnode *)malloc((u_long)sizeof *vp,
229 		    M_VNODE, M_WAITOK);
230 		bzero((char *)vp, sizeof *vp);
231 		numvnodes++;
232 	} else {
233 		if ((vp = vfreeh) == NULL) {
234 			tablefull("vnode");
235 			*vpp = 0;
236 			return (ENFILE);
237 		}
238 		if (vp->v_usecount)
239 			panic("free vnode isn't");
240 		if (vq = vp->v_freef)
241 			vq->v_freeb = &vfreeh;
242 		else
243 			vfreet = &vfreeh;
244 		vfreeh = vq;
245 		vp->v_freef = NULL;
246 		vp->v_freeb = NULL;
247 		if (vp->v_type != VBAD)
248 			vgone(vp);
249 		vp->v_flag = 0;
250 		vp->v_lastr = 0;
251 		vp->v_socket = 0;
252 	}
253 	vp->v_type = VNON;
254 	cache_purge(vp);
255 	vp->v_tag = tag;
256 	vp->v_op = vops;
257 	insmntque(vp, mp);
258 	VREF(vp);
259 	*vpp = vp;
260 	return (0);
261 }
262 
263 /*
264  * Move a vnode from one mount queue to another.
265  */
266 insmntque(vp, mp)
267 	register struct vnode *vp;
268 	register struct mount *mp;
269 {
270 	struct vnode *vq;
271 
272 	/*
273 	 * Delete from old mount point vnode list, if on one.
274 	 */
275 	if (vp->v_mountb) {
276 		if (vq = vp->v_mountf)
277 			vq->v_mountb = vp->v_mountb;
278 		*vp->v_mountb = vq;
279 	}
280 	/*
281 	 * Insert into list of vnodes for the new mount point, if available.
282 	 */
283 	vp->v_mount = mp;
284 	if (mp == NULL) {
285 		vp->v_mountf = NULL;
286 		vp->v_mountb = NULL;
287 		return;
288 	}
289 	if (mp->mnt_mounth) {
290 		vp->v_mountf = mp->mnt_mounth;
291 		vp->v_mountb = &mp->mnt_mounth;
292 		mp->mnt_mounth->v_mountb = &vp->v_mountf;
293 		mp->mnt_mounth = vp;
294 	} else {
295 		mp->mnt_mounth = vp;
296 		vp->v_mountb = &mp->mnt_mounth;
297 		vp->v_mountf = NULL;
298 	}
299 }
300 
301 /*
302  * Create a vnode for a block device.
303  * Used for root filesystem, argdev, and swap areas.
304  * Also used for memory file system special devices.
305  */
306 bdevvp(dev, vpp)
307 	dev_t dev;
308 	struct vnode **vpp;
309 {
310 	register struct vnode *vp;
311 	struct vnode *nvp;
312 	int error;
313 
314 	if (dev == NODEV)
315 		return (0);
316 	error = getnewvnode(VT_NON, (struct mount *)0, &spec_vnodeops, &nvp);
317 	if (error) {
318 		*vpp = 0;
319 		return (error);
320 	}
321 	vp = nvp;
322 	vp->v_type = VBLK;
323 	if (nvp = checkalias(vp, dev, (struct mount *)0)) {
324 		vput(vp);
325 		vp = nvp;
326 	}
327 	*vpp = vp;
328 	return (0);
329 }
330 
331 /*
332  * Check to see if the new vnode represents a special device
333  * for which we already have a vnode (either because of
334  * bdevvp() or because of a different vnode representing
335  * the same block device). If such an alias exists, deallocate
336  * the existing contents and return the aliased vnode. The
337  * caller is responsible for filling it with its new contents.
338  */
339 struct vnode *
340 checkalias(nvp, nvp_rdev, mp)
341 	register struct vnode *nvp;
342 	dev_t nvp_rdev;
343 	struct mount *mp;
344 {
345 	register struct vnode *vp;
346 	struct vnode **vpp;
347 
348 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
349 		return (NULLVP);
350 
351 	vpp = &speclisth[SPECHASH(nvp_rdev)];
352 loop:
353 	for (vp = *vpp; vp; vp = vp->v_specnext) {
354 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
355 			continue;
356 		/*
357 		 * Alias, but not in use, so flush it out.
358 		 */
359 		if (vp->v_usecount == 0) {
360 			vgone(vp);
361 			goto loop;
362 		}
363 		if (vget(vp))
364 			goto loop;
365 		break;
366 	}
367 	if (vp == NULL || vp->v_tag != VT_NON) {
368 		MALLOC(nvp->v_specinfo, struct specinfo *,
369 			sizeof(struct specinfo), M_VNODE, M_WAITOK);
370 		nvp->v_rdev = nvp_rdev;
371 		nvp->v_hashchain = vpp;
372 		nvp->v_specnext = *vpp;
373 		nvp->v_specflags = 0;
374 		*vpp = nvp;
375 		if (vp != NULL) {
376 			nvp->v_flag |= VALIASED;
377 			vp->v_flag |= VALIASED;
378 			vput(vp);
379 		}
380 		return (NULLVP);
381 	}
382 	VOP_UNLOCK(vp);
383 	vclean(vp, 0);
384 	vp->v_op = nvp->v_op;
385 	vp->v_tag = nvp->v_tag;
386 	nvp->v_type = VNON;
387 	insmntque(vp, mp);
388 	return (vp);
389 }
390 
391 /*
392  * Grab a particular vnode from the free list, increment its
393  * reference count and lock it. The vnode lock bit is set the
394  * vnode is being eliminated in vgone. The process is awakened
395  * when the transition is completed, and an error returned to
396  * indicate that the vnode is no longer usable (possibly having
397  * been changed to a new file system type).
398  */
399 vget(vp)
400 	register struct vnode *vp;
401 {
402 	register struct vnode *vq;
403 
404 	if (vp->v_flag & VXLOCK) {
405 		vp->v_flag |= VXWANT;
406 		sleep((caddr_t)vp, PINOD);
407 		return (1);
408 	}
409 	if (vp->v_usecount == 0) {
410 		if (vq = vp->v_freef)
411 			vq->v_freeb = vp->v_freeb;
412 		else
413 			vfreet = vp->v_freeb;
414 		*vp->v_freeb = vq;
415 		vp->v_freef = NULL;
416 		vp->v_freeb = NULL;
417 	}
418 	VREF(vp);
419 	VOP_LOCK(vp);
420 	return (0);
421 }
422 
423 /*
424  * Vnode reference, just increment the count
425  */
426 void vref(vp)
427 	struct vnode *vp;
428 {
429 
430 	vp->v_usecount++;
431 }
432 
433 /*
434  * vput(), just unlock and vrele()
435  */
436 void vput(vp)
437 	register struct vnode *vp;
438 {
439 	VOP_UNLOCK(vp);
440 	vrele(vp);
441 }
442 
443 /*
444  * Vnode release.
445  * If count drops to zero, call inactive routine and return to freelist.
446  */
447 void vrele(vp)
448 	register struct vnode *vp;
449 {
450 
451 	if (vp == NULL)
452 		panic("vrele: null vp");
453 	vp->v_usecount--;
454 	if (vp->v_usecount < 0)
455 		vprint("vrele: bad ref count", vp);
456 	if (vp->v_usecount > 0)
457 		return;
458 	if (vfreeh == NULLVP) {
459 		/*
460 		 * insert into empty list
461 		 */
462 		vfreeh = vp;
463 		vp->v_freeb = &vfreeh;
464 	} else {
465 		/*
466 		 * insert at tail of list
467 		 */
468 		*vfreet = vp;
469 		vp->v_freeb = vfreet;
470 	}
471 	vp->v_freef = NULL;
472 	vfreet = &vp->v_freef;
473 	VOP_INACTIVE(vp);
474 }
475 
476 /*
477  * Page or buffer structure gets a reference.
478  */
479 vhold(vp)
480 	register struct vnode *vp;
481 {
482 
483 	vp->v_holdcnt++;
484 }
485 
486 /*
487  * Page or buffer structure frees a reference.
488  */
489 holdrele(vp)
490 	register struct vnode *vp;
491 {
492 
493 	if (vp->v_holdcnt <= 0)
494 		panic("holdrele: holdcnt");
495 	vp->v_holdcnt--;
496 }
497 
498 /*
499  * Remove any vnodes in the vnode table belonging to mount point mp.
500  *
501  * If MNT_NOFORCE is specified, there should not be any active ones,
502  * return error if any are found (nb: this is a user error, not a
503  * system error). If MNT_FORCE is specified, detach any active vnodes
504  * that are found.
505  */
506 int busyprt = 0;	/* patch to print out busy vnodes */
507 
508 vflush(mp, skipvp, flags)
509 	struct mount *mp;
510 	struct vnode *skipvp;
511 	int flags;
512 {
513 	register struct vnode *vp, *nvp;
514 	int busy = 0;
515 
516 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
517 		panic("vflush: not busy");
518 loop:
519 	for (vp = mp->mnt_mounth; vp; vp = nvp) {
520 		if (vp->v_mount != mp)
521 			goto loop;
522 		nvp = vp->v_mountf;
523 		/*
524 		 * Skip over a selected vnode.
525 		 */
526 		if (vp == skipvp)
527 			continue;
528 		/*
529 		 * Skip over a vnodes marked VSYSTEM.
530 		 */
531 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
532 			continue;
533 		/*
534 		 * With v_usecount == 0, all we need to do is clear
535 		 * out the vnode data structures and we are done.
536 		 */
537 		if (vp->v_usecount == 0) {
538 			vgone(vp);
539 			continue;
540 		}
541 		/*
542 		 * For block or character devices, revert to an
543 		 * anonymous device. For all other files, just kill them.
544 		 */
545 		if (flags & FORCECLOSE) {
546 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
547 				vgone(vp);
548 			} else {
549 				vclean(vp, 0);
550 				vp->v_op = &spec_vnodeops;
551 				insmntque(vp, (struct mount *)0);
552 			}
553 			continue;
554 		}
555 		if (busyprt)
556 			vprint("vflush: busy vnode", vp);
557 		busy++;
558 	}
559 	if (busy)
560 		return (EBUSY);
561 	return (0);
562 }
563 
564 /*
565  * Disassociate the underlying file system from a vnode.
566  */
567 void vclean(vp, flags)
568 	register struct vnode *vp;
569 	int flags;
570 {
571 	struct vnodeops *origops;
572 	int active;
573 
574 	/*
575 	 * Check to see if the vnode is in use.
576 	 * If so we have to reference it before we clean it out
577 	 * so that its count cannot fall to zero and generate a
578 	 * race against ourselves to recycle it.
579 	 */
580 	if (active = vp->v_usecount)
581 		VREF(vp);
582 	/*
583 	 * Prevent the vnode from being recycled or
584 	 * brought into use while we clean it out.
585 	 */
586 	if (vp->v_flag & VXLOCK)
587 		panic("vclean: deadlock");
588 	vp->v_flag |= VXLOCK;
589 	/*
590 	 * Even if the count is zero, the VOP_INACTIVE routine may still
591 	 * have the object locked while it cleans it out. The VOP_LOCK
592 	 * ensures that the VOP_INACTIVE routine is done with its work.
593 	 * For active vnodes, it ensures that no other activity can
594 	 * occur while the buffer list is being cleaned out.
595 	 */
596 	VOP_LOCK(vp);
597 	if (flags & DOCLOSE)
598 		vinvalbuf(vp, 1);
599 	/*
600 	 * Prevent any further operations on the vnode from
601 	 * being passed through to the old file system.
602 	 */
603 	origops = vp->v_op;
604 	vp->v_op = &dead_vnodeops;
605 	vp->v_tag = VT_NON;
606 	/*
607 	 * If purging an active vnode, it must be unlocked, closed,
608 	 * and deactivated before being reclaimed.
609 	 */
610 	(*(origops->vn_unlock))(vp);
611 	if (active) {
612 		if (flags & DOCLOSE)
613 			(*(origops->vn_close))(vp, 0, NOCRED);
614 		(*(origops->vn_inactive))(vp);
615 	}
616 	/*
617 	 * Reclaim the vnode.
618 	 */
619 	if ((*(origops->vn_reclaim))(vp))
620 		panic("vclean: cannot reclaim");
621 	if (active)
622 		vrele(vp);
623 	/*
624 	 * Done with purge, notify sleepers in vget of the grim news.
625 	 */
626 	vp->v_flag &= ~VXLOCK;
627 	if (vp->v_flag & VXWANT) {
628 		vp->v_flag &= ~VXWANT;
629 		wakeup((caddr_t)vp);
630 	}
631 }
632 
633 /*
634  * Eliminate all activity associated with  the requested vnode
635  * and with all vnodes aliased to the requested vnode.
636  */
637 void vgoneall(vp)
638 	register struct vnode *vp;
639 {
640 	register struct vnode *vq;
641 
642 	if (vp->v_flag & VALIASED) {
643 		/*
644 		 * If a vgone (or vclean) is already in progress,
645 		 * wait until it is done and return.
646 		 */
647 		if (vp->v_flag & VXLOCK) {
648 			vp->v_flag |= VXWANT;
649 			sleep((caddr_t)vp, PINOD);
650 			return;
651 		}
652 		/*
653 		 * Ensure that vp will not be vgone'd while we
654 		 * are eliminating its aliases.
655 		 */
656 		vp->v_flag |= VXLOCK;
657 		while (vp->v_flag & VALIASED) {
658 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
659 				if (vq->v_rdev != vp->v_rdev ||
660 				    vq->v_type != vp->v_type || vp == vq)
661 					continue;
662 				vgone(vq);
663 				break;
664 			}
665 		}
666 		/*
667 		 * Remove the lock so that vgone below will
668 		 * really eliminate the vnode after which time
669 		 * vgone will awaken any sleepers.
670 		 */
671 		vp->v_flag &= ~VXLOCK;
672 	}
673 	vgone(vp);
674 }
675 
676 /*
677  * Eliminate all activity associated with a vnode
678  * in preparation for reuse.
679  */
680 void vgone(vp)
681 	register struct vnode *vp;
682 {
683 	register struct vnode *vq;
684 	struct vnode *vx;
685 	long count;
686 
687 	/*
688 	 * If a vgone (or vclean) is already in progress,
689 	 * wait until it is done and return.
690 	 */
691 	if (vp->v_flag & VXLOCK) {
692 		vp->v_flag |= VXWANT;
693 		sleep((caddr_t)vp, PINOD);
694 		return;
695 	}
696 	/*
697 	 * Clean out the filesystem specific data.
698 	 */
699 	vclean(vp, DOCLOSE);
700 	/*
701 	 * Delete from old mount point vnode list, if on one.
702 	 */
703 	if (vp->v_mountb) {
704 		if (vq = vp->v_mountf)
705 			vq->v_mountb = vp->v_mountb;
706 		*vp->v_mountb = vq;
707 		vp->v_mountf = NULL;
708 		vp->v_mountb = NULL;
709 	}
710 	/*
711 	 * If special device, remove it from special device alias list.
712 	 */
713 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
714 		if (*vp->v_hashchain == vp) {
715 			*vp->v_hashchain = vp->v_specnext;
716 		} else {
717 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
718 				if (vq->v_specnext != vp)
719 					continue;
720 				vq->v_specnext = vp->v_specnext;
721 				break;
722 			}
723 			if (vq == NULL)
724 				panic("missing bdev");
725 		}
726 		if (vp->v_flag & VALIASED) {
727 			count = 0;
728 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
729 				if (vq->v_rdev != vp->v_rdev ||
730 				    vq->v_type != vp->v_type)
731 					continue;
732 				count++;
733 				vx = vq;
734 			}
735 			if (count == 0)
736 				panic("missing alias");
737 			if (count == 1)
738 				vx->v_flag &= ~VALIASED;
739 			vp->v_flag &= ~VALIASED;
740 		}
741 		FREE(vp->v_specinfo, M_VNODE);
742 		vp->v_specinfo = NULL;
743 	}
744 	/*
745 	 * If it is on the freelist, move it to the head of the list.
746 	 */
747 	if (vp->v_freeb) {
748 		if (vq = vp->v_freef)
749 			vq->v_freeb = vp->v_freeb;
750 		else
751 			vfreet = vp->v_freeb;
752 		*vp->v_freeb = vq;
753 		vp->v_freef = vfreeh;
754 		vp->v_freeb = &vfreeh;
755 		vfreeh->v_freeb = &vp->v_freef;
756 		vfreeh = vp;
757 	}
758 	vp->v_type = VBAD;
759 }
760 
761 /*
762  * Lookup a vnode by device number.
763  */
764 vfinddev(dev, type, vpp)
765 	dev_t dev;
766 	enum vtype type;
767 	struct vnode **vpp;
768 {
769 	register struct vnode *vp;
770 
771 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
772 		if (dev != vp->v_rdev || type != vp->v_type)
773 			continue;
774 		*vpp = vp;
775 		return (0);
776 	}
777 	return (1);
778 }
779 
780 /*
781  * Calculate the total number of references to a special device.
782  */
783 vcount(vp)
784 	register struct vnode *vp;
785 {
786 	register struct vnode *vq;
787 	int count;
788 
789 	if ((vp->v_flag & VALIASED) == 0)
790 		return (vp->v_usecount);
791 loop:
792 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
793 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
794 			continue;
795 		/*
796 		 * Alias, but not in use, so flush it out.
797 		 */
798 		if (vq->v_usecount == 0) {
799 			vgone(vq);
800 			goto loop;
801 		}
802 		count += vq->v_usecount;
803 	}
804 	return (count);
805 }
806 
807 /*
808  * Print out a description of a vnode.
809  */
810 static char *typename[] =
811    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
812 
813 vprint(label, vp)
814 	char *label;
815 	register struct vnode *vp;
816 {
817 	char buf[64];
818 
819 	if (label != NULL)
820 		printf("%s: ", label);
821 	printf("type %s, usecount %d, refcount %d,", typename[vp->v_type],
822 		vp->v_usecount, vp->v_holdcnt);
823 	buf[0] = '\0';
824 	if (vp->v_flag & VROOT)
825 		strcat(buf, "|VROOT");
826 	if (vp->v_flag & VTEXT)
827 		strcat(buf, "|VTEXT");
828 	if (vp->v_flag & VSYSTEM)
829 		strcat(buf, "|VSYSTEM");
830 	if (vp->v_flag & VXLOCK)
831 		strcat(buf, "|VXLOCK");
832 	if (vp->v_flag & VXWANT)
833 		strcat(buf, "|VXWANT");
834 	if (vp->v_flag & VBWAIT)
835 		strcat(buf, "|VBWAIT");
836 	if (vp->v_flag & VALIASED)
837 		strcat(buf, "|VALIASED");
838 	if (buf[0] != '\0')
839 		printf(" flags (%s)", &buf[1]);
840 	printf("\n\t");
841 	VOP_PRINT(vp);
842 }
843 
844 int kinfo_vdebug = 1;
845 int kinfo_vgetfailed;
846 #define KINFO_VNODESLOP	10
847 /*
848  * Dump vnode list (via kinfo).
849  * Copyout address of vnode followed by vnode.
850  */
851 /* ARGSUSED */
852 kinfo_vnode(op, where, acopysize, arg, aneeded)
853 	int op;
854 	char *where;
855 	int *acopysize, arg, *aneeded;
856 {
857 	register struct mount *mp = rootfs;
858 	struct mount *omp;
859 	struct vnode *vp;
860 	register char *bp = where, *savebp;
861 	char *ewhere = where + *acopysize;
862 	int error;
863 
864 #define VPTRSZ	sizeof (struct vnode *)
865 #define VNODESZ	sizeof (struct vnode)
866 	if (where == NULL) {
867 		*aneeded = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
868 		return (0);
869 	}
870 
871 	do {
872 		if (vfs_busy(mp)) {
873 			mp = mp->mnt_next;
874 			continue;
875 		}
876 		savebp = bp;
877 again:
878 		for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) {
879 			/*
880 			 * Check that the vp is still associated with
881 			 * this filesystem.  RACE: could have been
882 			 * recycled onto the same filesystem.
883 			 */
884 			if (vp->v_mount != mp) {
885 				if (kinfo_vdebug)
886 					printf("kinfo: vp changed\n");
887 				bp = savebp;
888 				goto again;
889 			}
890 			if ((bp + VPTRSZ + VNODESZ <= ewhere) &&
891 			    ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
892 			     (error = copyout((caddr_t)vp, bp + VPTRSZ,
893 			      VNODESZ))))
894 				return (error);
895 			bp += VPTRSZ + VNODESZ;
896 		}
897 		omp = mp;
898 		mp = mp->mnt_next;
899 		vfs_unbusy(omp);
900 	} while (mp != rootfs);
901 
902 	*aneeded = bp - where;
903 	if (bp > ewhere)
904 		*acopysize = ewhere - where;
905 	else
906 		*acopysize = bp - where;
907 	return (0);
908 }
909