xref: /original-bsd/sys/kern/vfs_subr.c (revision 6ab384a1)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)vfs_subr.c	7.48 (Berkeley) 08/24/90
8  */
9 
10 /*
11  * External virtual filesystem routines
12  */
13 
14 #include "param.h"
15 #include "mount.h"
16 #include "time.h"
17 #include "vnode.h"
18 #include "specdev.h"
19 #include "namei.h"
20 #include "ucred.h"
21 #include "errno.h"
22 #include "malloc.h"
23 
24 /*
25  * Remove a mount point from the list of mounted filesystems.
26  * Unmount of the root is illegal.
27  */
28 void
29 vfs_remove(mp)
30 	register struct mount *mp;
31 {
32 
33 	if (mp == rootfs)
34 		panic("vfs_remove: unmounting root");
35 	mp->mnt_prev->mnt_next = mp->mnt_next;
36 	mp->mnt_next->mnt_prev = mp->mnt_prev;
37 	mp->mnt_vnodecovered->v_mountedhere = (struct mount *)0;
38 	vfs_unlock(mp);
39 }
40 
41 /*
42  * Lock a filesystem.
43  * Used to prevent access to it while mounting and unmounting.
44  */
45 vfs_lock(mp)
46 	register struct mount *mp;
47 {
48 
49 	while(mp->mnt_flag & MNT_MLOCK) {
50 		mp->mnt_flag |= MNT_MWAIT;
51 		sleep((caddr_t)mp, PVFS);
52 	}
53 	mp->mnt_flag |= MNT_MLOCK;
54 	return (0);
55 }
56 
57 /*
58  * Unlock a locked filesystem.
59  * Panic if filesystem is not locked.
60  */
61 void
62 vfs_unlock(mp)
63 	register struct mount *mp;
64 {
65 
66 	if ((mp->mnt_flag & MNT_MLOCK) == 0)
67 		panic("vfs_unlock: not locked");
68 	mp->mnt_flag &= ~MNT_MLOCK;
69 	if (mp->mnt_flag & MNT_MWAIT) {
70 		mp->mnt_flag &= ~MNT_MWAIT;
71 		wakeup((caddr_t)mp);
72 	}
73 }
74 
75 /*
76  * Mark a mount point as busy.
77  * Used to synchronize access and to delay unmounting.
78  */
79 vfs_busy(mp)
80 	register struct mount *mp;
81 {
82 
83 	while(mp->mnt_flag & MNT_MPBUSY) {
84 		mp->mnt_flag |= MNT_MPWANT;
85 		sleep((caddr_t)&mp->mnt_flag, PVFS);
86 	}
87 	if (mp->mnt_flag & MNT_UNMOUNT)
88 		return (1);
89 	mp->mnt_flag |= MNT_MPBUSY;
90 	return (0);
91 }
92 
93 /*
94  * Free a busy filesystem.
95  * Panic if filesystem is not busy.
96  */
97 vfs_unbusy(mp)
98 	register struct mount *mp;
99 {
100 
101 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
102 		panic("vfs_unbusy: not busy");
103 	mp->mnt_flag &= ~MNT_MPBUSY;
104 	if (mp->mnt_flag & MNT_MPWANT) {
105 		mp->mnt_flag &= ~MNT_MPWANT;
106 		wakeup((caddr_t)&mp->mnt_flag);
107 	}
108 }
109 
110 /*
111  * Lookup a mount point by filesystem identifier.
112  */
113 struct mount *
114 getvfs(fsid)
115 	fsid_t *fsid;
116 {
117 	register struct mount *mp;
118 
119 	mp = rootfs;
120 	do {
121 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
122 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
123 			return (mp);
124 		}
125 		mp = mp->mnt_next;
126 	} while (mp != rootfs);
127 	return ((struct mount *)0);
128 }
129 
130 /*
131  * Set vnode attributes to VNOVAL
132  */
133 void vattr_null(vap)
134 	register struct vattr *vap;
135 {
136 
137 	vap->va_type = VNON;
138 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
139 		vap->va_fsid = vap->va_fileid = vap->va_size =
140 		vap->va_size_rsv = vap->va_blocksize = vap->va_rdev =
141 		vap->va_bytes = vap->va_bytes_rsv =
142 		vap->va_atime.tv_sec = vap->va_atime.tv_usec =
143 		vap->va_mtime.tv_sec = vap->va_mtime.tv_usec =
144 		vap->va_ctime.tv_sec = vap->va_ctime.tv_usec =
145 		vap->va_flags = vap->va_gen = VNOVAL;
146 }
147 
148 /*
149  * Initialize a nameidata structure
150  */
151 ndinit(ndp)
152 	register struct nameidata *ndp;
153 {
154 
155 	bzero((caddr_t)ndp, sizeof(struct nameidata));
156 	ndp->ni_iov = &ndp->ni_nd.nd_iovec;
157 	ndp->ni_iovcnt = 1;
158 	ndp->ni_base = (caddr_t)&ndp->ni_dent;
159 	ndp->ni_rw = UIO_WRITE;
160 	ndp->ni_uioseg = UIO_SYSSPACE;
161 }
162 
163 /*
164  * Duplicate a nameidata structure
165  */
166 nddup(ndp, newndp)
167 	register struct nameidata *ndp, *newndp;
168 {
169 
170 	ndinit(newndp);
171 	newndp->ni_cdir = ndp->ni_cdir;
172 	VREF(newndp->ni_cdir);
173 	newndp->ni_rdir = ndp->ni_rdir;
174 	if (newndp->ni_rdir)
175 		VREF(newndp->ni_rdir);
176 	newndp->ni_cred = ndp->ni_cred;
177 	crhold(newndp->ni_cred);
178 }
179 
180 /*
181  * Release a nameidata structure
182  */
183 ndrele(ndp)
184 	register struct nameidata *ndp;
185 {
186 
187 	vrele(ndp->ni_cdir);
188 	if (ndp->ni_rdir)
189 		vrele(ndp->ni_rdir);
190 	crfree(ndp->ni_cred);
191 }
192 
193 /*
194  * Routines having to do with the management of the vnode table.
195  */
196 struct vnode *vfreeh, **vfreet;
197 extern struct vnodeops dead_vnodeops, spec_vnodeops;
198 extern void vclean();
199 long numvnodes;
200 struct vattr va_null;
201 
202 /*
203  * Initialize the vnode structures and initialize each file system type.
204  */
205 vfsinit()
206 {
207 	struct vfsops **vfsp;
208 
209 	/*
210 	 * Initialize the vnode name cache
211 	 */
212 	nchinit();
213 	/*
214 	 * Initialize each file system type.
215 	 */
216 	vattr_null(&va_null);
217 	for (vfsp = &vfssw[0]; vfsp <= &vfssw[MOUNT_MAXTYPE]; vfsp++) {
218 		if (*vfsp == NULL)
219 			continue;
220 		(*(*vfsp)->vfs_init)();
221 	}
222 }
223 
224 /*
225  * Return the next vnode from the free list.
226  */
227 getnewvnode(tag, mp, vops, vpp)
228 	enum vtagtype tag;
229 	struct mount *mp;
230 	struct vnodeops *vops;
231 	struct vnode **vpp;
232 {
233 	register struct vnode *vp, *vq;
234 
235 	if (numvnodes < desiredvnodes) {
236 		vp = (struct vnode *)malloc((u_long)sizeof *vp,
237 		    M_VNODE, M_WAITOK);
238 		bzero((char *)vp, sizeof *vp);
239 		numvnodes++;
240 	} else {
241 		if ((vp = vfreeh) == NULL) {
242 			tablefull("vnode");
243 			*vpp = 0;
244 			return (ENFILE);
245 		}
246 		if (vp->v_usecount)
247 			panic("free vnode isn't");
248 		if (vq = vp->v_freef)
249 			vq->v_freeb = &vfreeh;
250 		else
251 			vfreet = &vfreeh;
252 		vfreeh = vq;
253 		vp->v_freef = NULL;
254 		vp->v_freeb = NULL;
255 		if (vp->v_type != VBAD)
256 			vgone(vp);
257 		vp->v_flag = 0;
258 		vp->v_shlockc = 0;
259 		vp->v_exlockc = 0;
260 		vp->v_lastr = 0;
261 		vp->v_socket = 0;
262 	}
263 	vp->v_type = VNON;
264 	cache_purge(vp);
265 	vp->v_tag = tag;
266 	vp->v_op = vops;
267 	insmntque(vp, mp);
268 	VREF(vp);
269 	*vpp = vp;
270 	return (0);
271 }
272 
273 /*
274  * Move a vnode from one mount queue to another.
275  */
276 insmntque(vp, mp)
277 	register struct vnode *vp;
278 	register struct mount *mp;
279 {
280 	struct vnode *vq;
281 
282 	/*
283 	 * Delete from old mount point vnode list, if on one.
284 	 */
285 	if (vp->v_mountb) {
286 		if (vq = vp->v_mountf)
287 			vq->v_mountb = vp->v_mountb;
288 		*vp->v_mountb = vq;
289 	}
290 	/*
291 	 * Insert into list of vnodes for the new mount point, if available.
292 	 */
293 	vp->v_mount = mp;
294 	if (mp == NULL) {
295 		vp->v_mountf = NULL;
296 		vp->v_mountb = NULL;
297 		return;
298 	}
299 	if (mp->mnt_mounth) {
300 		vp->v_mountf = mp->mnt_mounth;
301 		vp->v_mountb = &mp->mnt_mounth;
302 		mp->mnt_mounth->v_mountb = &vp->v_mountf;
303 		mp->mnt_mounth = vp;
304 	} else {
305 		mp->mnt_mounth = vp;
306 		vp->v_mountb = &mp->mnt_mounth;
307 		vp->v_mountf = NULL;
308 	}
309 }
310 
311 /*
312  * Create a vnode for a block device.
313  * Used for root filesystem, argdev, and swap areas.
314  * Also used for memory file system special devices.
315  */
316 bdevvp(dev, vpp)
317 	dev_t dev;
318 	struct vnode **vpp;
319 {
320 	register struct vnode *vp;
321 	struct vnode *nvp;
322 	int error;
323 
324 	error = getnewvnode(VT_NON, (struct mount *)0, &spec_vnodeops, &nvp);
325 	if (error) {
326 		*vpp = 0;
327 		return (error);
328 	}
329 	vp = nvp;
330 	vp->v_type = VBLK;
331 	if (nvp = checkalias(vp, dev, (struct mount *)0)) {
332 		vput(vp);
333 		vp = nvp;
334 	}
335 	*vpp = vp;
336 	return (0);
337 }
338 
339 /*
340  * Check to see if the new vnode represents a special device
341  * for which we already have a vnode (either because of
342  * bdevvp() or because of a different vnode representing
343  * the same block device). If such an alias exists, deallocate
344  * the existing contents and return the aliased vnode. The
345  * caller is responsible for filling it with its new contents.
346  */
347 struct vnode *
348 checkalias(nvp, nvp_rdev, mp)
349 	register struct vnode *nvp;
350 	dev_t nvp_rdev;
351 	struct mount *mp;
352 {
353 	register struct vnode *vp;
354 	struct vnode **vpp;
355 
356 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
357 		return (NULLVP);
358 
359 	vpp = &speclisth[SPECHASH(nvp_rdev)];
360 loop:
361 	for (vp = *vpp; vp; vp = vp->v_specnext) {
362 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
363 			continue;
364 		/*
365 		 * Alias, but not in use, so flush it out.
366 		 */
367 		if (vp->v_usecount == 0) {
368 			vgone(vp);
369 			goto loop;
370 		}
371 		if (vget(vp))
372 			goto loop;
373 		break;
374 	}
375 	if (vp == NULL || vp->v_tag != VT_NON) {
376 		MALLOC(nvp->v_specinfo, struct specinfo *,
377 			sizeof(struct specinfo), M_VNODE, M_WAITOK);
378 		nvp->v_rdev = nvp_rdev;
379 		nvp->v_hashchain = vpp;
380 		nvp->v_specnext = *vpp;
381 		nvp->v_specflags = 0;
382 		*vpp = nvp;
383 		if (vp != NULL) {
384 			nvp->v_flag |= VALIASED;
385 			vp->v_flag |= VALIASED;
386 			vput(vp);
387 		}
388 		return (NULLVP);
389 	}
390 	VOP_UNLOCK(vp);
391 	vclean(vp, 0);
392 	vp->v_op = nvp->v_op;
393 	vp->v_tag = nvp->v_tag;
394 	nvp->v_type = VNON;
395 	insmntque(vp, mp);
396 	return (vp);
397 }
398 
399 /*
400  * Grab a particular vnode from the free list, increment its
401  * reference count and lock it. The vnode lock bit is set the
402  * vnode is being eliminated in vgone. The process is awakened
403  * when the transition is completed, and an error returned to
404  * indicate that the vnode is no longer usable (possibly having
405  * been changed to a new file system type).
406  */
407 vget(vp)
408 	register struct vnode *vp;
409 {
410 	register struct vnode *vq;
411 
412 	if (vp->v_flag & VXLOCK) {
413 		vp->v_flag |= VXWANT;
414 		sleep((caddr_t)vp, PINOD);
415 		return (1);
416 	}
417 	if (vp->v_usecount == 0) {
418 		if (vq = vp->v_freef)
419 			vq->v_freeb = vp->v_freeb;
420 		else
421 			vfreet = vp->v_freeb;
422 		*vp->v_freeb = vq;
423 		vp->v_freef = NULL;
424 		vp->v_freeb = NULL;
425 	}
426 	VREF(vp);
427 	VOP_LOCK(vp);
428 	return (0);
429 }
430 
431 /*
432  * Vnode reference, just increment the count
433  */
434 void vref(vp)
435 	struct vnode *vp;
436 {
437 
438 	vp->v_usecount++;
439 }
440 
441 /*
442  * vput(), just unlock and vrele()
443  */
444 void vput(vp)
445 	register struct vnode *vp;
446 {
447 	VOP_UNLOCK(vp);
448 	vrele(vp);
449 }
450 
451 /*
452  * Vnode release.
453  * If count drops to zero, call inactive routine and return to freelist.
454  */
455 void vrele(vp)
456 	register struct vnode *vp;
457 {
458 
459 	if (vp == NULL)
460 		panic("vrele: null vp");
461 	vp->v_usecount--;
462 	if (vp->v_usecount < 0)
463 		vprint("vrele: bad ref count", vp);
464 	if (vp->v_usecount > 0)
465 		return;
466 	if (vfreeh == NULLVP) {
467 		/*
468 		 * insert into empty list
469 		 */
470 		vfreeh = vp;
471 		vp->v_freeb = &vfreeh;
472 	} else {
473 		/*
474 		 * insert at tail of list
475 		 */
476 		*vfreet = vp;
477 		vp->v_freeb = vfreet;
478 	}
479 	vp->v_freef = NULL;
480 	vfreet = &vp->v_freef;
481 	VOP_INACTIVE(vp);
482 }
483 
484 /*
485  * Page or buffer structure gets a reference.
486  */
487 vhold(vp)
488 	register struct vnode *vp;
489 {
490 
491 	vp->v_holdcnt++;
492 }
493 
494 /*
495  * Page or buffer structure frees a reference.
496  */
497 holdrele(vp)
498 	register struct vnode *vp;
499 {
500 
501 	if (vp->v_holdcnt <= 0)
502 		panic("holdrele: holdcnt");
503 	vp->v_holdcnt--;
504 }
505 
506 /*
507  * Remove any vnodes in the vnode table belonging to mount point mp.
508  *
509  * If MNT_NOFORCE is specified, there should not be any active ones,
510  * return error if any are found (nb: this is a user error, not a
511  * system error). If MNT_FORCE is specified, detach any active vnodes
512  * that are found.
513  */
514 int busyprt = 0;	/* patch to print out busy vnodes */
515 
516 vflush(mp, skipvp, flags)
517 	struct mount *mp;
518 	struct vnode *skipvp;
519 	int flags;
520 {
521 	register struct vnode *vp, *nvp;
522 	int busy = 0;
523 
524 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
525 		panic("vflush: not busy");
526 loop:
527 	for (vp = mp->mnt_mounth; vp; vp = nvp) {
528 		if (vp->v_mount != mp)
529 			goto loop;
530 		nvp = vp->v_mountf;
531 		/*
532 		 * Skip over a selected vnode.
533 		 */
534 		if (vp == skipvp)
535 			continue;
536 		/*
537 		 * Skip over a vnodes marked VSYSTEM.
538 		 */
539 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
540 			continue;
541 		/*
542 		 * With v_usecount == 0, all we need to do is clear
543 		 * out the vnode data structures and we are done.
544 		 */
545 		if (vp->v_usecount == 0) {
546 			vgone(vp);
547 			continue;
548 		}
549 		/*
550 		 * For block or character devices, revert to an
551 		 * anonymous device. For all other files, just kill them.
552 		 */
553 		if (flags & FORCECLOSE) {
554 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
555 				vgone(vp);
556 			} else {
557 				vclean(vp, 0);
558 				vp->v_op = &spec_vnodeops;
559 				insmntque(vp, (struct mount *)0);
560 			}
561 			continue;
562 		}
563 		if (busyprt)
564 			vprint("vflush: busy vnode", vp);
565 		busy++;
566 	}
567 	if (busy)
568 		return (EBUSY);
569 	return (0);
570 }
571 
572 /*
573  * Disassociate the underlying file system from a vnode.
574  */
575 void vclean(vp, flags)
576 	register struct vnode *vp;
577 	int flags;
578 {
579 	struct vnodeops *origops;
580 	int active;
581 
582 	/*
583 	 * Check to see if the vnode is in use.
584 	 * If so we have to reference it before we clean it out
585 	 * so that its count cannot fall to zero and generate a
586 	 * race against ourselves to recycle it.
587 	 */
588 	if (active = vp->v_usecount)
589 		VREF(vp);
590 	/*
591 	 * Prevent the vnode from being recycled or
592 	 * brought into use while we clean it out.
593 	 */
594 	if (vp->v_flag & VXLOCK)
595 		panic("vclean: deadlock");
596 	vp->v_flag |= VXLOCK;
597 	/*
598 	 * Even if the count is zero, the VOP_INACTIVE routine may still
599 	 * have the object locked while it cleans it out. The VOP_LOCK
600 	 * ensures that the VOP_INACTIVE routine is done with its work.
601 	 * For active vnodes, it ensures that no other activity can
602 	 * occur while the buffer list is being cleaned out.
603 	 */
604 	VOP_LOCK(vp);
605 	if (flags & DOCLOSE)
606 		vinvalbuf(vp, 1);
607 	/*
608 	 * Prevent any further operations on the vnode from
609 	 * being passed through to the old file system.
610 	 */
611 	origops = vp->v_op;
612 	vp->v_op = &dead_vnodeops;
613 	vp->v_tag = VT_NON;
614 	/*
615 	 * If purging an active vnode, it must be unlocked, closed,
616 	 * and deactivated before being reclaimed.
617 	 */
618 	(*(origops->vn_unlock))(vp);
619 	if (active) {
620 		if (flags & DOCLOSE)
621 			(*(origops->vn_close))(vp, 0, NOCRED);
622 		(*(origops->vn_inactive))(vp);
623 	}
624 	/*
625 	 * Reclaim the vnode.
626 	 */
627 	if ((*(origops->vn_reclaim))(vp))
628 		panic("vclean: cannot reclaim");
629 	if (active)
630 		vrele(vp);
631 	/*
632 	 * Done with purge, notify sleepers in vget of the grim news.
633 	 */
634 	vp->v_flag &= ~VXLOCK;
635 	if (vp->v_flag & VXWANT) {
636 		vp->v_flag &= ~VXWANT;
637 		wakeup((caddr_t)vp);
638 	}
639 }
640 
641 /*
642  * Eliminate all activity associated with  the requested vnode
643  * and with all vnodes aliased to the requested vnode.
644  */
645 void vgoneall(vp)
646 	register struct vnode *vp;
647 {
648 	register struct vnode *vq;
649 
650 	if (vp->v_flag & VALIASED) {
651 		/*
652 		 * If a vgone (or vclean) is already in progress,
653 		 * wait until it is done and return.
654 		 */
655 		if (vp->v_flag & VXLOCK) {
656 			vp->v_flag |= VXWANT;
657 			sleep((caddr_t)vp, PINOD);
658 			return;
659 		}
660 		/*
661 		 * Ensure that vp will not be vgone'd while we
662 		 * are eliminating its aliases.
663 		 */
664 		vp->v_flag |= VXLOCK;
665 		while (vp->v_flag & VALIASED) {
666 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
667 				if (vq->v_rdev != vp->v_rdev ||
668 				    vq->v_type != vp->v_type || vp == vq)
669 					continue;
670 				vgone(vq);
671 				break;
672 			}
673 		}
674 		/*
675 		 * Remove the lock so that vgone below will
676 		 * really eliminate the vnode after which time
677 		 * vgone will awaken any sleepers.
678 		 */
679 		vp->v_flag &= ~VXLOCK;
680 	}
681 	vgone(vp);
682 }
683 
684 /*
685  * Eliminate all activity associated with a vnode
686  * in preparation for reuse.
687  */
688 void vgone(vp)
689 	register struct vnode *vp;
690 {
691 	register struct vnode *vq;
692 	struct vnode *vx;
693 	long count;
694 
695 	/*
696 	 * If a vgone (or vclean) is already in progress,
697 	 * wait until it is done and return.
698 	 */
699 	if (vp->v_flag & VXLOCK) {
700 		vp->v_flag |= VXWANT;
701 		sleep((caddr_t)vp, PINOD);
702 		return;
703 	}
704 	/*
705 	 * Clean out the filesystem specific data.
706 	 */
707 	vclean(vp, DOCLOSE);
708 	/*
709 	 * Delete from old mount point vnode list, if on one.
710 	 */
711 	if (vp->v_mountb) {
712 		if (vq = vp->v_mountf)
713 			vq->v_mountb = vp->v_mountb;
714 		*vp->v_mountb = vq;
715 		vp->v_mountf = NULL;
716 		vp->v_mountb = NULL;
717 	}
718 	/*
719 	 * If special device, remove it from special device alias list.
720 	 */
721 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
722 		if (*vp->v_hashchain == vp) {
723 			*vp->v_hashchain = vp->v_specnext;
724 		} else {
725 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
726 				if (vq->v_specnext != vp)
727 					continue;
728 				vq->v_specnext = vp->v_specnext;
729 				break;
730 			}
731 			if (vq == NULL)
732 				panic("missing bdev");
733 		}
734 		if (vp->v_flag & VALIASED) {
735 			count = 0;
736 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
737 				if (vq->v_rdev != vp->v_rdev ||
738 				    vq->v_type != vp->v_type)
739 					continue;
740 				count++;
741 				vx = vq;
742 			}
743 			if (count == 0)
744 				panic("missing alias");
745 			if (count == 1)
746 				vx->v_flag &= ~VALIASED;
747 			vp->v_flag &= ~VALIASED;
748 		}
749 		FREE(vp->v_specinfo, M_VNODE);
750 		vp->v_specinfo = NULL;
751 	}
752 	/*
753 	 * If it is on the freelist, move it to the head of the list.
754 	 */
755 	if (vp->v_freeb) {
756 		if (vq = vp->v_freef)
757 			vq->v_freeb = vp->v_freeb;
758 		else
759 			vfreet = vp->v_freeb;
760 		*vp->v_freeb = vq;
761 		vp->v_freef = vfreeh;
762 		vp->v_freeb = &vfreeh;
763 		vfreeh->v_freeb = &vp->v_freef;
764 		vfreeh = vp;
765 	}
766 	vp->v_type = VBAD;
767 }
768 
769 /*
770  * Lookup a vnode by device number.
771  */
772 vfinddev(dev, type, vpp)
773 	dev_t dev;
774 	enum vtype type;
775 	struct vnode **vpp;
776 {
777 	register struct vnode *vp;
778 
779 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
780 		if (dev != vp->v_rdev || type != vp->v_type)
781 			continue;
782 		*vpp = vp;
783 		return (0);
784 	}
785 	return (1);
786 }
787 
788 /*
789  * Calculate the total number of references to a special device.
790  */
791 vcount(vp)
792 	register struct vnode *vp;
793 {
794 	register struct vnode *vq;
795 	int count;
796 
797 	if ((vp->v_flag & VALIASED) == 0)
798 		return (vp->v_usecount);
799 loop:
800 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
801 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
802 			continue;
803 		/*
804 		 * Alias, but not in use, so flush it out.
805 		 */
806 		if (vq->v_usecount == 0) {
807 			vgone(vq);
808 			goto loop;
809 		}
810 		count += vq->v_usecount;
811 	}
812 	return (count);
813 }
814 
815 /*
816  * Print out a description of a vnode.
817  */
818 static char *typename[] =
819    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
820 
821 vprint(label, vp)
822 	char *label;
823 	register struct vnode *vp;
824 {
825 	char buf[64];
826 
827 	if (label != NULL)
828 		printf("%s: ", label);
829 	printf("type %s, usecount %d, refcount %d,", typename[vp->v_type],
830 		vp->v_usecount, vp->v_holdcnt);
831 	buf[0] = '\0';
832 	if (vp->v_flag & VROOT)
833 		strcat(buf, "|VROOT");
834 	if (vp->v_flag & VTEXT)
835 		strcat(buf, "|VTEXT");
836 	if (vp->v_flag & VSYSTEM)
837 		strcat(buf, "|VSYSTEM");
838 	if (vp->v_flag & VEXLOCK)
839 		strcat(buf, "|VEXLOCK");
840 	if (vp->v_flag & VSHLOCK)
841 		strcat(buf, "|VSHLOCK");
842 	if (vp->v_flag & VLWAIT)
843 		strcat(buf, "|VLWAIT");
844 	if (vp->v_flag & VXLOCK)
845 		strcat(buf, "|VXLOCK");
846 	if (vp->v_flag & VXWANT)
847 		strcat(buf, "|VXWANT");
848 	if (vp->v_flag & VBWAIT)
849 		strcat(buf, "|VBWAIT");
850 	if (vp->v_flag & VALIASED)
851 		strcat(buf, "|VALIASED");
852 	if (buf[0] != '\0')
853 		printf(" flags (%s)", &buf[1]);
854 	printf("\n\t");
855 	VOP_PRINT(vp);
856 }
857 
858 int kinfo_vdebug = 1;
859 int kinfo_vgetfailed;
860 #define KINFO_VNODESLOP	10
861 /*
862  * Dump vnode list (via kinfo).
863  * Copyout address of vnode followed by vnode.
864  */
865 /* ARGSUSED */
866 kinfo_vnode(op, where, acopysize, arg, aneeded)
867 	int op;
868 	char *where;
869 	int *acopysize, arg, *aneeded;
870 {
871 	register struct mount *mp = rootfs;
872 	struct mount *omp;
873 	struct vnode *vp;
874 	register char *bp = where, *savebp;
875 	char *ewhere = where + *acopysize;
876 	int error;
877 
878 #define VPTRSZ	sizeof (struct vnode *)
879 #define VNODESZ	sizeof (struct vnode)
880 	if (where == NULL) {
881 		*aneeded = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
882 		return (0);
883 	}
884 
885 	do {
886 		if (vfs_busy(mp)) {
887 			mp = mp->mnt_next;
888 			continue;
889 		}
890 		savebp = bp;
891 again:
892 		for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) {
893 			/*
894 			 * Check that the vp is still associated with
895 			 * this filesystem.  RACE: could have been
896 			 * recycled onto the same filesystem.
897 			 */
898 			if (vp->v_mount != mp) {
899 				if (kinfo_vdebug)
900 					printf("kinfo: vp changed\n");
901 				bp = savebp;
902 				goto again;
903 			}
904 			if ((bp + VPTRSZ + VNODESZ <= ewhere) &&
905 			    ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
906 			     (error = copyout((caddr_t)vp, bp + VPTRSZ,
907 			      VNODESZ))))
908 				return (error);
909 			bp += VPTRSZ + VNODESZ;
910 		}
911 		omp = mp;
912 		mp = mp->mnt_next;
913 		vfs_unbusy(omp);
914 	} while (mp != rootfs);
915 
916 	*aneeded = bp - where;
917 	if (bp > ewhere)
918 		*acopysize = ewhere - where;
919 	else
920 		*acopysize = bp - where;
921 	return (0);
922 }
923