xref: /original-bsd/sys/ufs/ufs/ufs_vfsops.c (revision 499a7081)
1 /*
2  * Copyright (c) 1989, 1991 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)ufs_vfsops.c	7.56 (Berkeley) 06/28/91
8  */
9 
10 #include "param.h"
11 #include "systm.h"
12 #include "namei.h"
13 #include "proc.h"
14 #include "kernel.h"
15 #include "vnode.h"
16 #include "specdev.h"
17 #include "mount.h"
18 #include "buf.h"
19 #include "file.h"
20 #include "disklabel.h"
21 #include "ioctl.h"
22 #include "errno.h"
23 #include "malloc.h"
24 
25 #include "quota.h"
26 #include "fs.h"
27 #include "ufsmount.h"
28 #include "inode.h"
29 
30 struct vfsops ufs_vfsops = {
31 	ufs_mount,
32 	ufs_start,
33 	ufs_unmount,
34 	ufs_root,
35 	ufs_quotactl,
36 	ufs_statfs,
37 	ufs_sync,
38 	ufs_fhtovp,
39 	ufs_vptofh,
40 	ufs_init
41 };
42 
43 /*
44  * Flag to allow forcible unmounting.
45  */
46 int doforce = 1;
47 
48 /*
49  * Called by vfs_mountroot when ufs is going to be mounted as root.
50  *
51  * Name is updated by mount(8) after booting.
52  */
53 #define ROOTNAME	"root_device"
54 
55 ufs_mountroot()
56 {
57 	register struct mount *mp;
58 	extern struct vnode *rootvp;
59 	struct proc *p = curproc;	/* XXX */
60 	struct ufsmount *ump;
61 	register struct fs *fs;
62 	u_int size;
63 	int error;
64 
65 	mp = (struct mount *)malloc((u_long)sizeof(struct mount),
66 		M_MOUNT, M_WAITOK);
67 	mp->mnt_op = &ufs_vfsops;
68 	mp->mnt_flag = MNT_RDONLY;
69 	mp->mnt_exroot = 0;
70 	mp->mnt_mounth = NULLVP;
71 	error = mountfs(rootvp, mp, p);
72 	if (error) {
73 		free((caddr_t)mp, M_MOUNT);
74 		return (error);
75 	}
76 	if (error = vfs_lock(mp)) {
77 		(void)ufs_unmount(mp, 0, p);
78 		free((caddr_t)mp, M_MOUNT);
79 		return (error);
80 	}
81 	rootfs = mp;
82 	mp->mnt_next = mp;
83 	mp->mnt_prev = mp;
84 	mp->mnt_vnodecovered = NULLVP;
85 	ump = VFSTOUFS(mp);
86 	fs = ump->um_fs;
87 	bzero(fs->fs_fsmnt, sizeof(fs->fs_fsmnt));
88 	fs->fs_fsmnt[0] = '/';
89 	bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
90 	    MNAMELEN);
91 	(void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
92 	    &size);
93 	bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
94 	(void) ufs_statfs(mp, &mp->mnt_stat, p);
95 	vfs_unlock(mp);
96 	inittodr(fs->fs_time);
97 	return (0);
98 }
99 
100 /*
101  * VFS Operations.
102  *
103  * mount system call
104  */
105 ufs_mount(mp, path, data, ndp, p)
106 	register struct mount *mp;
107 	char *path;
108 	caddr_t data;
109 	struct nameidata *ndp;
110 	struct proc *p;
111 {
112 	struct vnode *devvp;
113 	struct ufs_args args;
114 	struct ufsmount *ump;
115 	register struct fs *fs;
116 	u_int size;
117 	int error;
118 
119 	if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args)))
120 		return (error);
121 	/*
122 	 * Process export requests.
123 	 */
124 	if ((args.exflags & MNT_EXPORTED) || (mp->mnt_flag & MNT_EXPORTED)) {
125 		if (args.exflags & MNT_EXPORTED)
126 			mp->mnt_flag |= MNT_EXPORTED;
127 		else
128 			mp->mnt_flag &= ~MNT_EXPORTED;
129 		if (args.exflags & MNT_EXRDONLY)
130 			mp->mnt_flag |= MNT_EXRDONLY;
131 		else
132 			mp->mnt_flag &= ~MNT_EXRDONLY;
133 		mp->mnt_exroot = args.exroot;
134 	}
135 	/*
136 	 * If updating, check whether changing from read-only to
137 	 * read/write; if there is no device name, that's all we do.
138 	 */
139 	if (mp->mnt_flag & MNT_UPDATE) {
140 		ump = VFSTOUFS(mp);
141 		fs = ump->um_fs;
142 		if (fs->fs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0)
143 			fs->fs_ronly = 0;
144 		if (args.fspec == 0)
145 			return (0);
146 	}
147 	/*
148 	 * Not an update, or updating the name: look up the name
149 	 * and verify that it refers to a sensible block device.
150 	 */
151 	ndp->ni_nameiop = LOOKUP | FOLLOW;
152 	ndp->ni_segflg = UIO_USERSPACE;
153 	ndp->ni_dirp = args.fspec;
154 	if (error = namei(ndp, p))
155 		return (error);
156 	devvp = ndp->ni_vp;
157 	if (devvp->v_type != VBLK) {
158 		vrele(devvp);
159 		return (ENOTBLK);
160 	}
161 	if (major(devvp->v_rdev) >= nblkdev) {
162 		vrele(devvp);
163 		return (ENXIO);
164 	}
165 	if ((mp->mnt_flag & MNT_UPDATE) == 0)
166 		error = mountfs(devvp, mp, p);
167 	else {
168 		if (devvp != ump->um_devvp)
169 			error = EINVAL;	/* needs translation */
170 		else
171 			vrele(devvp);
172 	}
173 	if (error) {
174 		vrele(devvp);
175 		return (error);
176 	}
177 	ump = VFSTOUFS(mp);
178 	fs = ump->um_fs;
179 	(void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
180 	bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size);
181 	bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
182 	    MNAMELEN);
183 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
184 	    &size);
185 	bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
186 	(void) ufs_statfs(mp, &mp->mnt_stat, p);
187 	return (0);
188 }
189 
190 /*
191  * Common code for mount and mountroot
192  */
193 mountfs(devvp, mp, p)
194 	register struct vnode *devvp;
195 	struct mount *mp;
196 	struct proc *p;
197 {
198 	register struct ufsmount *ump = (struct ufsmount *)0;
199 	struct buf *bp = NULL;
200 	register struct fs *fs;
201 	dev_t dev = devvp->v_rdev;
202 	struct partinfo dpart;
203 	caddr_t base, space;
204 	int havepart = 0, blks;
205 	int error, i, size;
206 	int needclose = 0;
207 	int ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
208 	extern struct vnode *rootvp;
209 
210 	/*
211 	 * Disallow multiple mounts of the same device.
212 	 * Disallow mounting of a device that is currently in use
213 	 * (except for root, which might share swap device for miniroot).
214 	 * Flush out any old buffers remaining from a previous use.
215 	 */
216 	if (error = mountedon(devvp))
217 		return (error);
218 	if (vcount(devvp) > 1 && devvp != rootvp)
219 		return (EBUSY);
220 	vinvalbuf(devvp, 1);
221 	if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p))
222 		return (error);
223 	needclose = 1;
224 	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
225 		size = DEV_BSIZE;
226 	else {
227 		havepart = 1;
228 		size = dpart.disklab->d_secsize;
229 	}
230 	if (error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp))
231 		goto out;
232 	fs = bp->b_un.b_fs;
233 	if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
234 	    fs->fs_bsize < sizeof(struct fs)) {
235 		error = EINVAL;		/* XXX needs translation */
236 		goto out;
237 	}
238 	ump = (struct ufsmount *)malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
239 	ump->um_fs = (struct fs *)malloc((u_long)fs->fs_sbsize, M_SUPERBLK,
240 	    M_WAITOK);
241 	bcopy((caddr_t)bp->b_un.b_addr, (caddr_t)ump->um_fs,
242 	   (u_int)fs->fs_sbsize);
243 	if (fs->fs_sbsize < SBSIZE)
244 		bp->b_flags |= B_INVAL;
245 	brelse(bp);
246 	bp = NULL;
247 	fs = ump->um_fs;
248 	fs->fs_ronly = ronly;
249 	if (ronly == 0)
250 		fs->fs_fmod = 1;
251 	if (havepart) {
252 		dpart.part->p_fstype = FS_BSDFFS;
253 		dpart.part->p_fsize = fs->fs_fsize;
254 		dpart.part->p_frag = fs->fs_frag;
255 		dpart.part->p_cpg = fs->fs_cpg;
256 	}
257 	blks = howmany(fs->fs_cssize, fs->fs_fsize);
258 	base = space = (caddr_t)malloc((u_long)fs->fs_cssize, M_SUPERBLK,
259 	    M_WAITOK);
260 	for (i = 0; i < blks; i += fs->fs_frag) {
261 		size = fs->fs_bsize;
262 		if (i + fs->fs_frag > blks)
263 			size = (blks - i) * fs->fs_fsize;
264 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
265 			NOCRED, &bp);
266 		if (error) {
267 			free((caddr_t)base, M_SUPERBLK);
268 			goto out;
269 		}
270 		bcopy((caddr_t)bp->b_un.b_addr, space, (u_int)size);
271 		fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space;
272 		space += size;
273 		brelse(bp);
274 		bp = NULL;
275 	}
276 	mp->mnt_data = (qaddr_t)ump;
277 	mp->mnt_stat.f_fsid.val[0] = (long)dev;
278 	mp->mnt_stat.f_fsid.val[1] = MOUNT_UFS;
279 	mp->mnt_flag |= MNT_LOCAL;
280 	ump->um_mountp = mp;
281 	ump->um_dev = dev;
282 	ump->um_devvp = devvp;
283 	for (i = 0; i < MAXQUOTAS; i++)
284 		ump->um_quotas[i] = NULLVP;
285 	devvp->v_specflags |= SI_MOUNTEDON;
286 
287 	/* Sanity checks for old file systems.			   XXX */
288 	fs->fs_npsect = MAX(fs->fs_npsect, fs->fs_nsect);	/* XXX */
289 	fs->fs_interleave = MAX(fs->fs_interleave, 1);		/* XXX */
290 	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
291 		fs->fs_nrpos = 8;				/* XXX */
292 	return (0);
293 out:
294 	if (bp)
295 		brelse(bp);
296 	if (needclose)
297 		(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p);
298 	if (ump) {
299 		free((caddr_t)ump->um_fs, M_SUPERBLK);
300 		free((caddr_t)ump, M_UFSMNT);
301 		mp->mnt_data = (qaddr_t)0;
302 	}
303 	return (error);
304 }
305 
306 /*
307  * Make a filesystem operational.
308  * Nothing to do at the moment.
309  */
310 /* ARGSUSED */
311 ufs_start(mp, flags, p)
312 	struct mount *mp;
313 	int flags;
314 	struct proc *p;
315 {
316 
317 	return (0);
318 }
319 
320 /*
321  * unmount system call
322  */
323 ufs_unmount(mp, mntflags, p)
324 	struct mount *mp;
325 	int mntflags;
326 	struct proc *p;
327 {
328 	register struct ufsmount *ump;
329 	register struct fs *fs;
330 	int i, error, ronly, flags = 0;
331 
332 	if (mntflags & MNT_FORCE) {
333 		if (!doforce || mp == rootfs)
334 			return (EINVAL);
335 		flags |= FORCECLOSE;
336 	}
337 	mntflushbuf(mp, 0);
338 	if (mntinvalbuf(mp))
339 		return (EBUSY);
340 	ump = VFSTOUFS(mp);
341 #ifdef QUOTA
342 	if (mp->mnt_flag & MNT_QUOTA) {
343 		if (error = vflush(mp, NULLVP, SKIPSYSTEM|flags))
344 			return (error);
345 		for (i = 0; i < MAXQUOTAS; i++) {
346 			if (ump->um_quotas[i] == NULLVP)
347 				continue;
348 			quotaoff(p, mp, i);
349 		}
350 		/*
351 		 * Here we fall through to vflush again to ensure
352 		 * that we have gotten rid of all the system vnodes.
353 		 */
354 	}
355 #endif
356 	if (error = vflush(mp, NULLVP, flags))
357 		return (error);
358 	fs = ump->um_fs;
359 	ronly = !fs->fs_ronly;
360 	ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
361 	error = VOP_CLOSE(ump->um_devvp, ronly ? FREAD : FREAD|FWRITE,
362 		NOCRED, p);
363 	vrele(ump->um_devvp);
364 	free((caddr_t)fs->fs_csp[0], M_SUPERBLK);
365 	free((caddr_t)fs, M_SUPERBLK);
366 	free((caddr_t)ump, M_UFSMNT);
367 	mp->mnt_data = (qaddr_t)0;
368 	mp->mnt_flag &= ~MNT_LOCAL;
369 	return (error);
370 }
371 
372 /*
373  * Check to see if a filesystem is mounted on a block device.
374  */
375 mountedon(vp)
376 	register struct vnode *vp;
377 {
378 	register struct vnode *vq;
379 
380 	if (vp->v_specflags & SI_MOUNTEDON)
381 		return (EBUSY);
382 	if (vp->v_flag & VALIASED) {
383 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
384 			if (vq->v_rdev != vp->v_rdev ||
385 			    vq->v_type != vp->v_type)
386 				continue;
387 			if (vq->v_specflags & SI_MOUNTEDON)
388 				return (EBUSY);
389 		}
390 	}
391 	return (0);
392 }
393 
394 /*
395  * Return root of a filesystem
396  */
397 ufs_root(mp, vpp)
398 	struct mount *mp;
399 	struct vnode **vpp;
400 {
401 	register struct inode *ip;
402 	struct inode *nip;
403 	struct vnode tvp;
404 	int error;
405 
406 	tvp.v_mount = mp;
407 	ip = VTOI(&tvp);
408 	ip->i_vnode = &tvp;
409 	ip->i_dev = VFSTOUFS(mp)->um_dev;
410 	error = iget(ip, (ino_t)ROOTINO, &nip);
411 	if (error)
412 		return (error);
413 	*vpp = ITOV(nip);
414 	return (0);
415 }
416 
417 /*
418  * Do operations associated with quotas
419  */
420 ufs_quotactl(mp, cmds, uid, arg, p)
421 	struct mount *mp;
422 	int cmds;
423 	uid_t uid;
424 	caddr_t arg;
425 	struct proc *p;
426 {
427 	struct ufsmount *ump = VFSTOUFS(mp);
428 	int cmd, type, error;
429 
430 #ifndef QUOTA
431 	return (EOPNOTSUPP);
432 #else
433 	if (uid == -1)
434 		uid = p->p_cred->p_ruid;
435 	cmd = cmds >> SUBCMDSHIFT;
436 
437 	switch (cmd) {
438 	case Q_GETQUOTA:
439 	case Q_SYNC:
440 		if (uid == p->p_cred->p_ruid)
441 			break;
442 		/* fall through */
443 	default:
444 		if (error = suser(p->p_ucred, &p->p_acflag))
445 			return (error);
446 	}
447 
448 	type = cmd & SUBCMDMASK;
449 	if ((u_int)type >= MAXQUOTAS)
450 		return (EINVAL);
451 
452 	switch (cmd) {
453 
454 	case Q_QUOTAON:
455 		return (quotaon(p, mp, type, arg));
456 
457 	case Q_QUOTAOFF:
458 		if (vfs_busy(mp))
459 			return (0);
460 		error = quotaoff(p, mp, type);
461 		vfs_unbusy(mp);
462 		return (error);
463 
464 	case Q_SETQUOTA:
465 		return (setquota(mp, uid, type, arg));
466 
467 	case Q_SETUSE:
468 		return (setuse(mp, uid, type, arg));
469 
470 	case Q_GETQUOTA:
471 		return (getquota(mp, uid, type, arg));
472 
473 	case Q_SYNC:
474 		if (vfs_busy(mp))
475 			return (0);
476 		error = qsync(mp);
477 		vfs_unbusy(mp);
478 		return (error);
479 
480 	default:
481 		return (EINVAL);
482 	}
483 	/* NOTREACHED */
484 #endif
485 }
486 
487 /*
488  * Get file system statistics.
489  */
490 ufs_statfs(mp, sbp, p)
491 	struct mount *mp;
492 	register struct statfs *sbp;
493 	struct proc *p;
494 {
495 	register struct ufsmount *ump;
496 	register struct fs *fs;
497 
498 	ump = VFSTOUFS(mp);
499 	fs = ump->um_fs;
500 	if (fs->fs_magic != FS_MAGIC)
501 		panic("ufs_statfs");
502 	sbp->f_type = MOUNT_UFS;
503 	sbp->f_fsize = fs->fs_fsize;
504 	sbp->f_bsize = fs->fs_bsize;
505 	sbp->f_blocks = fs->fs_dsize;
506 	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
507 		fs->fs_cstotal.cs_nffree;
508 	sbp->f_bavail = (fs->fs_dsize * (100 - fs->fs_minfree) / 100) -
509 		(fs->fs_dsize - sbp->f_bfree);
510 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
511 	sbp->f_ffree = fs->fs_cstotal.cs_nifree;
512 	if (sbp != &mp->mnt_stat) {
513 		bcopy((caddr_t)mp->mnt_stat.f_mntonname,
514 			(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
515 		bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
516 			(caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
517 	}
518 	return (0);
519 }
520 
521 int	syncprt = 0;
522 
523 /*
524  * Go through the disk queues to initiate sandbagged IO;
525  * go through the inodes to write those that have been modified;
526  * initiate the writing of the super block if it has been modified.
527  *
528  * Note: we are always called with the filesystem marked `MPBUSY'.
529  */
530 ufs_sync(mp, waitfor)
531 	struct mount *mp;
532 	int waitfor;
533 {
534 	register struct vnode *vp;
535 	register struct inode *ip;
536 	register struct ufsmount *ump = VFSTOUFS(mp);
537 	register struct fs *fs;
538 	int error, allerror = 0;
539 
540 	if (syncprt)
541 		bufstats();
542 	fs = ump->um_fs;
543 	/*
544 	 * Write back modified superblock.
545 	 * Consistency check that the superblock
546 	 * is still in the buffer cache.
547 	 */
548 	if (fs->fs_fmod != 0) {
549 		if (fs->fs_ronly != 0) {		/* XXX */
550 			printf("fs = %s\n", fs->fs_fsmnt);
551 			panic("update: rofs mod");
552 		}
553 		fs->fs_fmod = 0;
554 		fs->fs_time = time.tv_sec;
555 		allerror = sbupdate(ump, waitfor);
556 	}
557 	/*
558 	 * Write back each (modified) inode.
559 	 */
560 loop:
561 	for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) {
562 		/*
563 		 * If the vnode that we are about to sync is no longer
564 		 * associated with this mount point, start over.
565 		 */
566 		if (vp->v_mount != mp)
567 			goto loop;
568 		if (VOP_ISLOCKED(vp))
569 			continue;
570 		ip = VTOI(vp);
571 		if ((ip->i_flag & (IMOD|IACC|IUPD|ICHG)) == 0 &&
572 		    vp->v_dirtyblkhd == NULL)
573 			continue;
574 		if (vget(vp))
575 			goto loop;
576 		if (vp->v_dirtyblkhd)
577 			vflushbuf(vp, 0);
578 		if ((ip->i_flag & (IMOD|IACC|IUPD|ICHG)) &&
579 		    (error = iupdat(ip, &time, &time, 0)))
580 			allerror = error;
581 		vput(vp);
582 	}
583 	/*
584 	 * Force stale file system control information to be flushed.
585 	 */
586 	vflushbuf(ump->um_devvp, waitfor == MNT_WAIT ? B_SYNC : 0);
587 #ifdef QUOTA
588 	qsync(mp);
589 #endif
590 	return (allerror);
591 }
592 
593 /*
594  * Write a superblock and associated information back to disk.
595  */
596 sbupdate(mp, waitfor)
597 	struct ufsmount *mp;
598 	int waitfor;
599 {
600 	register struct fs *fs = mp->um_fs;
601 	register struct buf *bp;
602 	int blks;
603 	caddr_t space;
604 	int i, size, error = 0;
605 
606 	bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize);
607 	bcopy((caddr_t)fs, bp->b_un.b_addr, (u_int)fs->fs_sbsize);
608 	/* Restore compatibility to old file systems.		   XXX */
609 	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
610 		bp->b_un.b_fs->fs_nrpos = -1;			/* XXX */
611 	if (waitfor == MNT_WAIT)
612 		error = bwrite(bp);
613 	else
614 		bawrite(bp);
615 	blks = howmany(fs->fs_cssize, fs->fs_fsize);
616 	space = (caddr_t)fs->fs_csp[0];
617 	for (i = 0; i < blks; i += fs->fs_frag) {
618 		size = fs->fs_bsize;
619 		if (i + fs->fs_frag > blks)
620 			size = (blks - i) * fs->fs_fsize;
621 		bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i), size);
622 		bcopy(space, bp->b_un.b_addr, (u_int)size);
623 		space += size;
624 		if (waitfor == MNT_WAIT)
625 			error = bwrite(bp);
626 		else
627 			bawrite(bp);
628 	}
629 	return (error);
630 }
631 
632 /*
633  * Print out statistics on the current allocation of the buffer pool.
634  * Can be enabled to print out on every ``sync'' by setting "syncprt"
635  * above.
636  */
637 bufstats()
638 {
639 	int s, i, j, count;
640 	register struct buf *bp, *dp;
641 	int counts[MAXBSIZE/CLBYTES+1];
642 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
643 
644 	for (bp = bfreelist, i = 0; bp < &bfreelist[BQUEUES]; bp++, i++) {
645 		count = 0;
646 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
647 			counts[j] = 0;
648 		s = splbio();
649 		for (dp = bp->av_forw; dp != bp; dp = dp->av_forw) {
650 			counts[dp->b_bufsize/CLBYTES]++;
651 			count++;
652 		}
653 		splx(s);
654 		printf("%s: total-%d", bname[i], count);
655 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
656 			if (counts[j] != 0)
657 				printf(", %d-%d", j * CLBYTES, counts[j]);
658 		printf("\n");
659 	}
660 }
661 
662 /*
663  * File handle to vnode
664  *
665  * Have to be really careful about stale file handles:
666  * - check that the inode number is in range
667  * - call iget() to get the locked inode
668  * - check for an unallocated inode (i_mode == 0)
669  * - check that the generation number matches
670  */
671 ufs_fhtovp(mp, fhp, vpp)
672 	register struct mount *mp;
673 	struct fid *fhp;
674 	struct vnode **vpp;
675 {
676 	register struct ufid *ufhp;
677 	register struct fs *fs;
678 	register struct inode *ip;
679 	struct inode *nip;
680 	struct vnode tvp;
681 	int error;
682 
683 	ufhp = (struct ufid *)fhp;
684 	fs = VFSTOUFS(mp)->um_fs;
685 	if (ufhp->ufid_ino < ROOTINO ||
686 	    ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg) {
687 		*vpp = NULLVP;
688 		return (EINVAL);
689 	}
690 	tvp.v_mount = mp;
691 	ip = VTOI(&tvp);
692 	ip->i_vnode = &tvp;
693 	ip->i_dev = VFSTOUFS(mp)->um_dev;
694 	if (error = iget(ip, ufhp->ufid_ino, &nip)) {
695 		*vpp = NULLVP;
696 		return (error);
697 	}
698 	ip = nip;
699 	if (ip->i_mode == 0) {
700 		iput(ip);
701 		*vpp = NULLVP;
702 		return (EINVAL);
703 	}
704 	if (ip->i_gen != ufhp->ufid_gen) {
705 		iput(ip);
706 		*vpp = NULLVP;
707 		return (EINVAL);
708 	}
709 	*vpp = ITOV(ip);
710 	return (0);
711 }
712 
713 /*
714  * Vnode pointer to File handle
715  */
716 /* ARGSUSED */
717 ufs_vptofh(vp, fhp)
718 	struct vnode *vp;
719 	struct fid *fhp;
720 {
721 	register struct inode *ip = VTOI(vp);
722 	register struct ufid *ufhp;
723 
724 	ufhp = (struct ufid *)fhp;
725 	ufhp->ufid_len = sizeof(struct ufid);
726 	ufhp->ufid_ino = ip->i_number;
727 	ufhp->ufid_gen = ip->i_gen;
728 	return (0);
729 }
730