xref: /original-bsd/sys/kern/vfs_subr.c (revision 188f7363)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)vfs_subr.c	7.40 (Berkeley) 05/02/90
18  */
19 
20 /*
21  * External virtual filesystem routines
22  */
23 
24 #include "param.h"
25 #include "mount.h"
26 #include "time.h"
27 #include "vnode.h"
28 #include "specdev.h"
29 #include "namei.h"
30 #include "ucred.h"
31 #include "errno.h"
32 #include "malloc.h"
33 
34 /*
35  * Remove a mount point from the list of mounted filesystems.
36  * Unmount of the root is illegal.
37  */
38 void
39 vfs_remove(mp)
40 	register struct mount *mp;
41 {
42 
43 	if (mp == rootfs)
44 		panic("vfs_remove: unmounting root");
45 	mp->m_prev->m_next = mp->m_next;
46 	mp->m_next->m_prev = mp->m_prev;
47 	mp->m_vnodecovered->v_mountedhere = (struct mount *)0;
48 	vfs_unlock(mp);
49 }
50 
51 /*
52  * Lock a filesystem.
53  * Used to prevent access to it while mounting and unmounting.
54  */
55 vfs_lock(mp)
56 	register struct mount *mp;
57 {
58 
59 	while(mp->m_flag & M_MLOCK) {
60 		mp->m_flag |= M_MWAIT;
61 		sleep((caddr_t)mp, PVFS);
62 	}
63 	mp->m_flag |= M_MLOCK;
64 	return (0);
65 }
66 
67 /*
68  * Unlock a locked filesystem.
69  * Panic if filesystem is not locked.
70  */
71 void
72 vfs_unlock(mp)
73 	register struct mount *mp;
74 {
75 
76 	if ((mp->m_flag & M_MLOCK) == 0)
77 		panic("vfs_unlock: not locked");
78 	mp->m_flag &= ~M_MLOCK;
79 	if (mp->m_flag & M_MWAIT) {
80 		mp->m_flag &= ~M_MWAIT;
81 		wakeup((caddr_t)mp);
82 	}
83 }
84 
85 /*
86  * Mark a mount point as busy.
87  * Used to synchronize access and to delay unmounting.
88  */
89 vfs_busy(mp)
90 	register struct mount *mp;
91 {
92 
93 	if (mp->m_flag & M_UNMOUNT)
94 		return (1);
95 	while(mp->m_flag & M_MPBUSY) {
96 		mp->m_flag |= M_MPWANT;
97 		sleep((caddr_t)&mp->m_flag, PVFS);
98 	}
99 	mp->m_flag |= M_MPBUSY;
100 	return (0);
101 }
102 
103 /*
104  * Free a busy filesystem.
105  * Panic if filesystem is not busy.
106  */
107 void
108 vfs_unbusy(mp)
109 	register struct mount *mp;
110 {
111 
112 	if ((mp->m_flag & M_MPBUSY) == 0)
113 		panic("vfs_unbusy: not busy");
114 	mp->m_flag &= ~M_MPBUSY;
115 	if (mp->m_flag & M_MPWANT) {
116 		mp->m_flag &= ~M_MPWANT;
117 		wakeup((caddr_t)&mp->m_flag);
118 	}
119 }
120 
121 /*
122  * Lookup a mount point by filesystem identifier.
123  */
124 struct mount *
125 getvfs(fsid)
126 	fsid_t *fsid;
127 {
128 	register struct mount *mp;
129 
130 	mp = rootfs;
131 	do {
132 		if (mp->m_stat.f_fsid.val[0] == fsid->val[0] &&
133 		    mp->m_stat.f_fsid.val[1] == fsid->val[1]) {
134 			return (mp);
135 		}
136 		mp = mp->m_next;
137 	} while (mp != rootfs);
138 	return ((struct mount *)0);
139 }
140 
141 /*
142  * Set vnode attributes to VNOVAL
143  */
144 void vattr_null(vap)
145 	register struct vattr *vap;
146 {
147 
148 	vap->va_type = VNON;
149 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
150 		vap->va_fsid = vap->va_fileid = vap->va_size =
151 		vap->va_size_rsv = vap->va_blocksize = vap->va_rdev =
152 		vap->va_bytes = vap->va_bytes_rsv =
153 		vap->va_atime.tv_sec = vap->va_atime.tv_usec =
154 		vap->va_mtime.tv_sec = vap->va_mtime.tv_usec =
155 		vap->va_ctime.tv_sec = vap->va_ctime.tv_usec =
156 		vap->va_flags = vap->va_gen = VNOVAL;
157 }
158 
159 /*
160  * Initialize a nameidata structure
161  */
162 ndinit(ndp)
163 	register struct nameidata *ndp;
164 {
165 
166 	bzero((caddr_t)ndp, sizeof(struct nameidata));
167 	ndp->ni_iov = &ndp->ni_nd.nd_iovec;
168 	ndp->ni_iovcnt = 1;
169 	ndp->ni_base = (caddr_t)&ndp->ni_dent;
170 	ndp->ni_rw = UIO_WRITE;
171 	ndp->ni_uioseg = UIO_SYSSPACE;
172 }
173 
174 /*
175  * Duplicate a nameidata structure
176  */
177 nddup(ndp, newndp)
178 	register struct nameidata *ndp, *newndp;
179 {
180 
181 	ndinit(newndp);
182 	newndp->ni_cdir = ndp->ni_cdir;
183 	VREF(newndp->ni_cdir);
184 	newndp->ni_rdir = ndp->ni_rdir;
185 	if (newndp->ni_rdir)
186 		VREF(newndp->ni_rdir);
187 	newndp->ni_cred = ndp->ni_cred;
188 	crhold(newndp->ni_cred);
189 }
190 
191 /*
192  * Release a nameidata structure
193  */
194 ndrele(ndp)
195 	register struct nameidata *ndp;
196 {
197 
198 	vrele(ndp->ni_cdir);
199 	if (ndp->ni_rdir)
200 		vrele(ndp->ni_rdir);
201 	crfree(ndp->ni_cred);
202 }
203 
204 /*
205  * Routines having to do with the management of the vnode table.
206  */
207 struct vnode *vfreeh, **vfreet;
208 extern struct vnodeops dead_vnodeops, spec_vnodeops;
209 extern void vclean();
210 long numvnodes;
211 
212 /*
213  * Initialize the vnode structures and initialize each file system type.
214  */
215 vfsinit()
216 {
217 	struct vfsops **vfsp;
218 
219 	/*
220 	 * Initialize the vnode name cache
221 	 */
222 	nchinit();
223 	/*
224 	 * Initialize each file system type.
225 	 */
226 	for (vfsp = &vfssw[0]; vfsp <= &vfssw[MOUNT_MAXTYPE]; vfsp++) {
227 		if (*vfsp == NULL)
228 			continue;
229 		(*(*vfsp)->vfs_init)();
230 	}
231 }
232 
233 /*
234  * Return the next vnode from the free list.
235  */
236 getnewvnode(tag, mp, vops, vpp)
237 	enum vtagtype tag;
238 	struct mount *mp;
239 	struct vnodeops *vops;
240 	struct vnode **vpp;
241 {
242 	register struct vnode *vp, *vq;
243 
244 	if (numvnodes < desiredvnodes) {
245 		vp = (struct vnode *)malloc(sizeof *vp, M_VNODE, M_WAITOK);
246 		bzero((char *)vp, sizeof *vp);
247 		numvnodes++;
248 	} else {
249 		if ((vp = vfreeh) == NULL) {
250 			tablefull("vnode");
251 			*vpp = 0;
252 			return (ENFILE);
253 		}
254 		if (vp->v_usecount)
255 			panic("free vnode isn't");
256 		if (vq = vp->v_freef)
257 			vq->v_freeb = &vfreeh;
258 		else
259 			vfreet = &vfreeh;
260 		vfreeh = vq;
261 		vp->v_freef = NULL;
262 		vp->v_freeb = NULL;
263 		if (vp->v_type != VBAD)
264 			vgone(vp);
265 		vp->v_flag = 0;
266 		vp->v_shlockc = 0;
267 		vp->v_exlockc = 0;
268 		vp->v_lastr = 0;
269 		vp->v_socket = 0;
270 	}
271 	vp->v_type = VNON;
272 	cache_purge(vp);
273 	vp->v_tag = tag;
274 	vp->v_op = vops;
275 	insmntque(vp, mp);
276 	VREF(vp);
277 	*vpp = vp;
278 	return (0);
279 }
280 
281 /*
282  * Move a vnode from one mount queue to another.
283  */
284 insmntque(vp, mp)
285 	register struct vnode *vp;
286 	register struct mount *mp;
287 {
288 	struct vnode *vq;
289 
290 	/*
291 	 * Delete from old mount point vnode list, if on one.
292 	 */
293 	if (vp->v_mountb) {
294 		if (vq = vp->v_mountf)
295 			vq->v_mountb = vp->v_mountb;
296 		*vp->v_mountb = vq;
297 	}
298 	/*
299 	 * Insert into list of vnodes for the new mount point, if available.
300 	 */
301 	vp->v_mount = mp;
302 	if (mp == NULL) {
303 		vp->v_mountf = NULL;
304 		vp->v_mountb = NULL;
305 		return;
306 	}
307 	if (mp->m_mounth) {
308 		vp->v_mountf = mp->m_mounth;
309 		vp->v_mountb = &mp->m_mounth;
310 		mp->m_mounth->v_mountb = &vp->v_mountf;
311 		mp->m_mounth = vp;
312 	} else {
313 		mp->m_mounth = vp;
314 		vp->v_mountb = &mp->m_mounth;
315 		vp->v_mountf = NULL;
316 	}
317 }
318 
319 /*
320  * Create a vnode for a block device.
321  * Used for root filesystem, argdev, and swap areas.
322  * Also used for memory file system special devices.
323  */
324 bdevvp(dev, vpp)
325 	dev_t dev;
326 	struct vnode **vpp;
327 {
328 	register struct vnode *vp;
329 	struct vnode *nvp;
330 	int error;
331 
332 	error = getnewvnode(VT_NON, (struct mount *)0, &spec_vnodeops, &nvp);
333 	if (error) {
334 		*vpp = 0;
335 		return (error);
336 	}
337 	vp = nvp;
338 	vp->v_type = VBLK;
339 	if (nvp = checkalias(vp, dev, (struct mount *)0)) {
340 		vput(vp);
341 		vp = nvp;
342 	}
343 	*vpp = vp;
344 	return (0);
345 }
346 
347 /*
348  * Check to see if the new vnode represents a special device
349  * for which we already have a vnode (either because of
350  * bdevvp() or because of a different vnode representing
351  * the same block device). If such an alias exists, deallocate
352  * the existing contents and return the aliased vnode. The
353  * caller is responsible for filling it with its new contents.
354  */
355 struct vnode *
356 checkalias(nvp, nvp_rdev, mp)
357 	register struct vnode *nvp;
358 	dev_t nvp_rdev;
359 	struct mount *mp;
360 {
361 	register struct vnode *vp;
362 	struct vnode **vpp;
363 
364 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
365 		return ((struct vnode *)0);
366 
367 	vpp = &speclisth[SPECHASH(nvp_rdev)];
368 loop:
369 	for (vp = *vpp; vp; vp = vp->v_specnext) {
370 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
371 			continue;
372 		/*
373 		 * Alias, but not in use, so flush it out.
374 		 */
375 		if (vp->v_usecount == 0) {
376 			vgone(vp);
377 			goto loop;
378 		}
379 		if (vget(vp))
380 			goto loop;
381 		break;
382 	}
383 	if (vp == NULL || vp->v_tag != VT_NON) {
384 		MALLOC(nvp->v_specinfo, struct specinfo *,
385 			sizeof(struct specinfo), M_VNODE, M_WAITOK);
386 		nvp->v_rdev = nvp_rdev;
387 		nvp->v_hashchain = vpp;
388 		nvp->v_specnext = *vpp;
389 		*vpp = nvp;
390 		if (vp != NULL) {
391 			nvp->v_flag |= VALIASED;
392 			vp->v_flag |= VALIASED;
393 			vput(vp);
394 		}
395 		return ((struct vnode *)0);
396 	}
397 	VOP_UNLOCK(vp);
398 	vclean(vp, 0);
399 	vp->v_op = nvp->v_op;
400 	vp->v_tag = nvp->v_tag;
401 	nvp->v_type = VNON;
402 	insmntque(vp, mp);
403 	return (vp);
404 }
405 
406 /*
407  * Grab a particular vnode from the free list, increment its
408  * reference count and lock it. The vnode lock bit is set the
409  * vnode is being eliminated in vgone. The process is awakened
410  * when the transition is completed, and an error returned to
411  * indicate that the vnode is no longer usable (possibly having
412  * been changed to a new file system type).
413  */
414 vget(vp)
415 	register struct vnode *vp;
416 {
417 	register struct vnode *vq;
418 
419 	if (vp->v_flag & VXLOCK) {
420 		vp->v_flag |= VXWANT;
421 		sleep((caddr_t)vp, PINOD);
422 		return (1);
423 	}
424 	if (vp->v_usecount == 0) {
425 		if (vq = vp->v_freef)
426 			vq->v_freeb = vp->v_freeb;
427 		else
428 			vfreet = vp->v_freeb;
429 		*vp->v_freeb = vq;
430 		vp->v_freef = NULL;
431 		vp->v_freeb = NULL;
432 	}
433 	VREF(vp);
434 	VOP_LOCK(vp);
435 	return (0);
436 }
437 
438 /*
439  * Vnode reference, just increment the count
440  */
441 void vref(vp)
442 	struct vnode *vp;
443 {
444 
445 	vp->v_usecount++;
446 }
447 
448 /*
449  * vput(), just unlock and vrele()
450  */
451 void vput(vp)
452 	register struct vnode *vp;
453 {
454 	VOP_UNLOCK(vp);
455 	vrele(vp);
456 }
457 
458 /*
459  * Vnode release.
460  * If count drops to zero, call inactive routine and return to freelist.
461  */
462 void vrele(vp)
463 	register struct vnode *vp;
464 {
465 
466 	if (vp == NULL)
467 		panic("vrele: null vp");
468 	vp->v_usecount--;
469 	if (vp->v_usecount < 0)
470 		vprint("vrele: bad ref count", vp);
471 	if (vp->v_usecount > 0)
472 		return;
473 	if (vfreeh == (struct vnode *)0) {
474 		/*
475 		 * insert into empty list
476 		 */
477 		vfreeh = vp;
478 		vp->v_freeb = &vfreeh;
479 	} else {
480 		/*
481 		 * insert at tail of list
482 		 */
483 		*vfreet = vp;
484 		vp->v_freeb = vfreet;
485 	}
486 	vp->v_freef = NULL;
487 	vfreet = &vp->v_freef;
488 	VOP_INACTIVE(vp);
489 }
490 
491 /*
492  * Page or buffer structure gets a reference.
493  */
494 vhold(vp)
495 	register struct vnode *vp;
496 {
497 
498 	vp->v_holdcnt++;
499 }
500 
501 /*
502  * Page or buffer structure frees a reference.
503  */
504 holdrele(vp)
505 	register struct vnode *vp;
506 {
507 
508 	if (vp->v_holdcnt <= 0)
509 		panic("holdrele: holdcnt");
510 	vp->v_holdcnt--;
511 }
512 
513 /*
514  * Remove any vnodes in the vnode table belonging to mount point mp.
515  *
516  * If MNT_NOFORCE is specified, there should not be any active ones,
517  * return error if any are found (nb: this is a user error, not a
518  * system error). If MNT_FORCE is specified, detach any active vnodes
519  * that are found.
520  */
521 int busyprt = 0;	/* patch to print out busy vnodes */
522 
523 vflush(mp, skipvp, flags)
524 	struct mount *mp;
525 	struct vnode *skipvp;
526 	int flags;
527 {
528 	register struct vnode *vp, *nvp;
529 	int busy = 0;
530 
531 	if ((mp->m_flag & M_MPBUSY) == 0)
532 		panic("vflush: not busy");
533 	for (vp = mp->m_mounth; vp; vp = nvp) {
534 		nvp = vp->v_mountf;
535 		/*
536 		 * Skip over a selected vnode.
537 		 */
538 		if (vp == skipvp)
539 			continue;
540 		/*
541 		 * Skip over a vnodes marked VSYSTEM.
542 		 */
543 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
544 			continue;
545 		/*
546 		 * With v_usecount == 0, all we need to do is clear
547 		 * out the vnode data structures and we are done.
548 		 */
549 		if (vp->v_usecount == 0) {
550 			vgone(vp);
551 			continue;
552 		}
553 		/*
554 		 * For block or character devices, revert to an
555 		 * anonymous device. For all other files, just kill them.
556 		 */
557 		if (flags & FORCECLOSE) {
558 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
559 				vgone(vp);
560 			} else {
561 				vclean(vp, 0);
562 				vp->v_op = &spec_vnodeops;
563 				insmntque(vp, (struct mount *)0);
564 			}
565 			continue;
566 		}
567 		if (busyprt)
568 			vprint("vflush: busy vnode", vp);
569 		busy++;
570 	}
571 	if (busy)
572 		return (EBUSY);
573 	return (0);
574 }
575 
576 /*
577  * Disassociate the underlying file system from a vnode.
578  */
579 void vclean(vp, flags)
580 	register struct vnode *vp;
581 	long flags;
582 {
583 	struct vnodeops *origops;
584 	int active;
585 
586 	/*
587 	 * Check to see if the vnode is in use.
588 	 * If so we have to reference it before we clean it out
589 	 * so that its count cannot fall to zero and generate a
590 	 * race against ourselves to recycle it.
591 	 */
592 	if (active = vp->v_usecount)
593 		VREF(vp);
594 	/*
595 	 * Prevent the vnode from being recycled or
596 	 * brought into use while we clean it out.
597 	 */
598 	if (vp->v_flag & VXLOCK)
599 		panic("vclean: deadlock");
600 	vp->v_flag |= VXLOCK;
601 	/*
602 	 * Even if the count is zero, the VOP_INACTIVE routine may still
603 	 * have the object locked while it cleans it out. The VOP_LOCK
604 	 * ensures that the VOP_INACTIVE routine is done with its work.
605 	 * For active vnodes, it ensures that no other activity can
606 	 * occur while the buffer list is being cleaned out.
607 	 */
608 	VOP_LOCK(vp);
609 	if (flags & DOCLOSE)
610 		vinvalbuf(vp, 1);
611 	/*
612 	 * Prevent any further operations on the vnode from
613 	 * being passed through to the old file system.
614 	 */
615 	origops = vp->v_op;
616 	vp->v_op = &dead_vnodeops;
617 	vp->v_tag = VT_NON;
618 	/*
619 	 * If purging an active vnode, it must be unlocked, closed,
620 	 * and deactivated before being reclaimed.
621 	 */
622 	(*(origops->vn_unlock))(vp);
623 	if (active) {
624 		if (flags & DOCLOSE)
625 			(*(origops->vn_close))(vp, 0, NOCRED);
626 		(*(origops->vn_inactive))(vp);
627 	}
628 	/*
629 	 * Reclaim the vnode.
630 	 */
631 	if ((*(origops->vn_reclaim))(vp))
632 		panic("vclean: cannot reclaim");
633 	if (active)
634 		vrele(vp);
635 	/*
636 	 * Done with purge, notify sleepers in vget of the grim news.
637 	 */
638 	vp->v_flag &= ~VXLOCK;
639 	if (vp->v_flag & VXWANT) {
640 		vp->v_flag &= ~VXWANT;
641 		wakeup((caddr_t)vp);
642 	}
643 }
644 
645 /*
646  * Eliminate all activity associated with  the requested vnode
647  * and with all vnodes aliased to the requested vnode.
648  */
649 void vgoneall(vp)
650 	register struct vnode *vp;
651 {
652 	register struct vnode *vq;
653 
654 	if (vp->v_flag & VALIASED) {
655 		/*
656 		 * If a vgone (or vclean) is already in progress,
657 		 * wait until it is done and return.
658 		 */
659 		if (vp->v_flag & VXLOCK) {
660 			vp->v_flag |= VXWANT;
661 			sleep((caddr_t)vp, PINOD);
662 			return;
663 		}
664 		/*
665 		 * Ensure that vp will not be vgone'd while we
666 		 * are eliminating its aliases.
667 		 */
668 		vp->v_flag |= VXLOCK;
669 		while (vp->v_flag & VALIASED) {
670 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
671 				if (vq->v_rdev != vp->v_rdev ||
672 				    vq->v_type != vp->v_type || vp == vq)
673 					continue;
674 				vgone(vq);
675 				break;
676 			}
677 		}
678 		/*
679 		 * Remove the lock so that vgone below will
680 		 * really eliminate the vnode after which time
681 		 * vgone will awaken any sleepers.
682 		 */
683 		vp->v_flag &= ~VXLOCK;
684 	}
685 	vgone(vp);
686 }
687 
688 /*
689  * Eliminate all activity associated with a vnode
690  * in preparation for reuse.
691  */
692 void vgone(vp)
693 	register struct vnode *vp;
694 {
695 	register struct vnode *vq;
696 	struct vnode *vx;
697 	long count;
698 
699 	/*
700 	 * If a vgone (or vclean) is already in progress,
701 	 * wait until it is done and return.
702 	 */
703 	if (vp->v_flag & VXLOCK) {
704 		vp->v_flag |= VXWANT;
705 		sleep((caddr_t)vp, PINOD);
706 		return;
707 	}
708 	/*
709 	 * Clean out the filesystem specific data.
710 	 */
711 	vclean(vp, DOCLOSE);
712 	/*
713 	 * Delete from old mount point vnode list, if on one.
714 	 */
715 	if (vp->v_mountb) {
716 		if (vq = vp->v_mountf)
717 			vq->v_mountb = vp->v_mountb;
718 		*vp->v_mountb = vq;
719 		vp->v_mountf = NULL;
720 		vp->v_mountb = NULL;
721 	}
722 	/*
723 	 * If special device, remove it from special device alias list.
724 	 */
725 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
726 		if (*vp->v_hashchain == vp) {
727 			*vp->v_hashchain = vp->v_specnext;
728 		} else {
729 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
730 				if (vq->v_specnext != vp)
731 					continue;
732 				vq->v_specnext = vp->v_specnext;
733 				break;
734 			}
735 			if (vq == NULL)
736 				panic("missing bdev");
737 		}
738 		if (vp->v_flag & VALIASED) {
739 			count = 0;
740 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
741 				if (vq->v_rdev != vp->v_rdev ||
742 				    vq->v_type != vp->v_type)
743 					continue;
744 				count++;
745 				vx = vq;
746 			}
747 			if (count == 0)
748 				panic("missing alias");
749 			if (count == 1)
750 				vx->v_flag &= ~VALIASED;
751 			vp->v_flag &= ~VALIASED;
752 		}
753 		FREE(vp->v_specinfo, M_VNODE);
754 		vp->v_specinfo = NULL;
755 	}
756 	/*
757 	 * If it is on the freelist, move it to the head of the list.
758 	 */
759 	if (vp->v_freeb) {
760 		if (vq = vp->v_freef)
761 			vq->v_freeb = vp->v_freeb;
762 		else
763 			vfreet = vp->v_freeb;
764 		*vp->v_freeb = vq;
765 		vp->v_freef = vfreeh;
766 		vp->v_freeb = &vfreeh;
767 		vfreeh->v_freeb = &vp->v_freef;
768 		vfreeh = vp;
769 	}
770 	vp->v_type = VBAD;
771 }
772 
773 /*
774  * Lookup a vnode by device number.
775  */
776 vfinddev(dev, type, vpp)
777 	dev_t dev;
778 	enum vtype type;
779 	struct vnode **vpp;
780 {
781 	register struct vnode *vp;
782 
783 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
784 		if (dev != vp->v_rdev || type != vp->v_type)
785 			continue;
786 		*vpp = vp;
787 		return (0);
788 	}
789 	return (1);
790 }
791 
792 /*
793  * Calculate the total number of references to a special device.
794  */
795 vcount(vp)
796 	register struct vnode *vp;
797 {
798 	register struct vnode *vq;
799 	int count;
800 
801 	if ((vp->v_flag & VALIASED) == 0)
802 		return (vp->v_usecount);
803 loop:
804 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
805 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
806 			continue;
807 		/*
808 		 * Alias, but not in use, so flush it out.
809 		 */
810 		if (vq->v_usecount == 0) {
811 			vgone(vq);
812 			goto loop;
813 		}
814 		count += vq->v_usecount;
815 	}
816 	return (count);
817 }
818 
819 /*
820  * Print out a description of a vnode.
821  */
822 static char *typename[] =
823    { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
824 
825 vprint(label, vp)
826 	char *label;
827 	register struct vnode *vp;
828 {
829 	char buf[64];
830 
831 	if (label != NULL)
832 		printf("%s: ", label);
833 	printf("type %s, usecount %d, refcount %d,", typename[vp->v_type],
834 		vp->v_usecount, vp->v_holdcnt);
835 	buf[0] = '\0';
836 	if (vp->v_flag & VROOT)
837 		strcat(buf, "|VROOT");
838 	if (vp->v_flag & VTEXT)
839 		strcat(buf, "|VTEXT");
840 	if (vp->v_flag & VSYSTEM)
841 		strcat(buf, "|VSYSTEM");
842 	if (vp->v_flag & VEXLOCK)
843 		strcat(buf, "|VEXLOCK");
844 	if (vp->v_flag & VSHLOCK)
845 		strcat(buf, "|VSHLOCK");
846 	if (vp->v_flag & VLWAIT)
847 		strcat(buf, "|VLWAIT");
848 	if (vp->v_flag & VXLOCK)
849 		strcat(buf, "|VXLOCK");
850 	if (vp->v_flag & VXWANT)
851 		strcat(buf, "|VXWANT");
852 	if (vp->v_flag & VBWAIT)
853 		strcat(buf, "|VBWAIT");
854 	if (vp->v_flag & VALIASED)
855 		strcat(buf, "|VALIASED");
856 	if (buf[0] != '\0')
857 		printf(" flags (%s)", &buf[1]);
858 	printf("\n\t");
859 	VOP_PRINT(vp);
860 }
861 
862 int kinfo_vdebug = 1;
863 int kinfo_vgetfailed;
864 #define KINFO_VNODESLOP	10
865 /*
866  * Dump vnode list (via kinfo).
867  * Copyout address of vnode followed by vnode.
868  */
869 kinfo_vnode(op, where, acopysize, arg, aneeded)
870 	char *where;
871 	int *acopysize, *aneeded;
872 {
873 	register struct mount *mp = rootfs;
874 	register struct vnode *nextvp;
875 	struct mount *omp;
876 	struct vnode *vp;
877 	register needed = 0;
878 	register char *bp = where, *savebp;
879 	char *ewhere = where + *acopysize;
880 	int error;
881 
882 #define VPTRSZ	sizeof (struct vnode *)
883 #define VNODESZ	sizeof (struct vnode)
884 	if (where == NULL) {
885 		*aneeded = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
886 		return (0);
887 	}
888 
889 #define RETRY	bp = savebp ; goto again
890 	do {
891 		if (vfs_busy(mp)) {
892 			mp = mp->m_next;
893 			continue;
894 		}
895 		/*
896 		 * A vget can fail if the vnode is being
897 		 * recycled.  In this (rare) case, we have to start
898 		 * over with this filesystem.  Also, have to
899 		 * check that nextvp is still associated
900 		 * with this filesystem.  RACE: could have been
901 		 * recycled onto same filesystem.
902 		 */
903 		savebp = bp;
904 again:
905 		nextvp = mp->m_mounth;
906 		while (vp = nextvp) {
907 			if (vget(vp)) {
908 				if (kinfo_vdebug)
909 					printf("kinfo: vget failed\n");
910 				kinfo_vgetfailed++;
911 				RETRY;
912 			}
913 			if (vp->v_mount != mp) {
914 				if (kinfo_vdebug)
915 					printf("kinfo: vp changed\n");
916 				vput(vp);
917 				RETRY;
918 			}
919 			if ((bp + VPTRSZ + VNODESZ <= ewhere) &&
920 			    ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
921 			     (error = copyout((caddr_t)vp, bp + VPTRSZ,
922 			      VNODESZ)))) {
923 				vput(vp);
924 				return (error);
925 			}
926 			bp += VPTRSZ + VNODESZ;
927 			nextvp = vp->v_mountf;
928 			vput(vp);
929 		}
930 		omp = mp;
931 		mp = mp->m_next;
932 		vfs_unbusy(omp);
933 	} while (mp != rootfs);
934 
935 	*aneeded = bp - where;
936 	if (bp > ewhere)
937 		*acopysize = ewhere - where;
938 	else
939 		*acopysize = bp - where;
940 	return (0);
941 }
942