xref: /original-bsd/sys/ufs/lfs/lfs_vfsops.c (revision 97ed2031)
1 /*
2  * Copyright (c) 1989, 1991, 1993, 1994
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)lfs_vfsops.c	8.8 (Berkeley) 06/15/94
8  */
9 
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/namei.h>
13 #include <sys/proc.h>
14 #include <sys/kernel.h>
15 #include <sys/vnode.h>
16 #include <sys/mount.h>
17 #include <sys/buf.h>
18 #include <sys/mbuf.h>
19 #include <sys/file.h>
20 #include <sys/disklabel.h>
21 #include <sys/ioctl.h>
22 #include <sys/errno.h>
23 #include <sys/malloc.h>
24 #include <sys/socket.h>
25 
26 #include <miscfs/specfs/specdev.h>
27 
28 #include <ufs/ufs/quota.h>
29 #include <ufs/ufs/inode.h>
30 #include <ufs/ufs/ufsmount.h>
31 #include <ufs/ufs/ufs_extern.h>
32 
33 #include <ufs/lfs/lfs.h>
34 #include <ufs/lfs/lfs_extern.h>
35 
36 int lfs_mountfs __P((struct vnode *, struct mount *, struct proc *));
37 
38 struct vfsops lfs_vfsops = {
39 	lfs_mount,
40 	ufs_start,
41 	lfs_unmount,
42 	ufs_root,
43 	ufs_quotactl,
44 	lfs_statfs,
45 	lfs_sync,
46 	lfs_vget,
47 	lfs_fhtovp,
48 	lfs_vptofh,
49 	lfs_init,
50 };
51 
52 int
53 lfs_mountroot()
54 {
55 	panic("lfs_mountroot");		/* XXX -- implement */
56 }
57 
58 /*
59  * VFS Operations.
60  *
61  * mount system call
62  */
63 lfs_mount(mp, path, data, ndp, p)
64 	register struct mount *mp;
65 	char *path;
66 	caddr_t data;
67 	struct nameidata *ndp;
68 	struct proc *p;
69 {
70 	struct vnode *devvp;
71 	struct ufs_args args;
72 	struct ufsmount *ump;
73 	register struct lfs *fs;				/* LFS */
74 	u_int size;
75 	int error;
76 
77 	if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args)))
78 		return (error);
79 
80 	/* Until LFS can do NFS right.		XXX */
81 	if (args.export.ex_flags & MNT_EXPORTED)
82 		return (EINVAL);
83 
84 	/*
85 	 * If updating, check whether changing from read-only to
86 	 * read/write; if there is no device name, that's all we do.
87 	 */
88 	if (mp->mnt_flag & MNT_UPDATE) {
89 		ump = VFSTOUFS(mp);
90 #ifdef NOTLFS							/* LFS */
91 		fs = ump->um_fs;
92 		if (fs->fs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0)
93 			fs->fs_ronly = 0;
94 #else
95 		fs = ump->um_lfs;
96 		if (fs->lfs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0)
97 			fs->lfs_ronly = 0;
98 #endif
99 		if (args.fspec == 0) {
100 			/*
101 			 * Process export requests.
102 			 */
103 			return (vfs_export(mp, &ump->um_export, &args.export));
104 		}
105 	}
106 	/*
107 	 * Not an update, or updating the name: look up the name
108 	 * and verify that it refers to a sensible block device.
109 	 */
110 	NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
111 	if (error = namei(ndp))
112 		return (error);
113 	devvp = ndp->ni_vp;
114 	if (devvp->v_type != VBLK) {
115 		vrele(devvp);
116 		return (ENOTBLK);
117 	}
118 	if (major(devvp->v_rdev) >= nblkdev) {
119 		vrele(devvp);
120 		return (ENXIO);
121 	}
122 	if ((mp->mnt_flag & MNT_UPDATE) == 0)
123 		error = lfs_mountfs(devvp, mp, p);		/* LFS */
124 	else {
125 		if (devvp != ump->um_devvp)
126 			error = EINVAL;	/* needs translation */
127 		else
128 			vrele(devvp);
129 	}
130 	if (error) {
131 		vrele(devvp);
132 		return (error);
133 	}
134 	ump = VFSTOUFS(mp);
135 	fs = ump->um_lfs;					/* LFS */
136 #ifdef NOTLFS							/* LFS */
137 	(void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
138 	bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size);
139 	bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
140 	    MNAMELEN);
141 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
142 	    &size);
143 	bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
144 	(void) ufs_statfs(mp, &mp->mnt_stat, p);
145 #else
146 	(void)copyinstr(path, fs->lfs_fsmnt, sizeof(fs->lfs_fsmnt) - 1, &size);
147 	bzero(fs->lfs_fsmnt + size, sizeof(fs->lfs_fsmnt) - size);
148 	bcopy((caddr_t)fs->lfs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
149 	    MNAMELEN);
150 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
151 	    &size);
152 	bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
153 	(void) lfs_statfs(mp, &mp->mnt_stat, p);
154 #endif
155 	return (0);
156 }
157 
158 /*
159  * Common code for mount and mountroot
160  * LFS specific
161  */
162 int
163 lfs_mountfs(devvp, mp, p)
164 	register struct vnode *devvp;
165 	struct mount *mp;
166 	struct proc *p;
167 {
168 	extern struct vnode *rootvp;
169 	register struct lfs *fs;
170 	register struct ufsmount *ump;
171 	struct vnode *vp;
172 	struct buf *bp;
173 	struct partinfo dpart;
174 	dev_t dev;
175 	int error, i, ronly, size;
176 
177 	/*
178 	 * Disallow multiple mounts of the same device.
179 	 * Disallow mounting of a device that is currently in use
180 	 * (except for root, which might share swap device for miniroot).
181 	 * Flush out any old buffers remaining from a previous use.
182 	 */
183 	if (error = vfs_mountedon(devvp))
184 		return (error);
185 	if (vcount(devvp) > 1 && devvp != rootvp)
186 		return (EBUSY);
187 	if (error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0))
188 		return (error);
189 
190 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
191 	if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p))
192 		return (error);
193 
194 	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
195 		size = DEV_BSIZE;
196 	else {
197 		size = dpart.disklab->d_secsize;
198 #ifdef NEVER_USED
199 		dpart.part->p_fstype = FS_LFS;
200 		dpart.part->p_fsize = fs->lfs_fsize;	/* frag size */
201 		dpart.part->p_frag = fs->lfs_frag;	/* frags per block */
202 		dpart.part->p_cpg = fs->lfs_segshift;	/* segment shift */
203 #endif
204 	}
205 
206 	/* Don't free random space on error. */
207 	bp = NULL;
208 	ump = NULL;
209 
210 	/* Read in the superblock. */
211 	if (error = bread(devvp, LFS_LABELPAD / size, LFS_SBPAD, NOCRED, &bp))
212 		goto out;
213 	fs = (struct lfs *)bp->b_data;
214 
215 	/* Check the basics. */
216 	if (fs->lfs_magic != LFS_MAGIC || fs->lfs_bsize > MAXBSIZE ||
217 	    fs->lfs_bsize < sizeof(struct lfs)) {
218 		error = EINVAL;		/* XXX needs translation */
219 		goto out;
220 	}
221 
222 	/* Allocate the mount structure, copy the superblock into it. */
223 	ump = (struct ufsmount *)malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
224 	fs = ump->um_lfs = malloc(sizeof(struct lfs), M_UFSMNT, M_WAITOK);
225 	bcopy(bp->b_data, fs, sizeof(struct lfs));
226 	if (sizeof(struct lfs) < LFS_SBPAD)			/* XXX why? */
227 		bp->b_flags |= B_INVAL;
228 	brelse(bp);
229 	bp = NULL;
230 
231 	/* Set up the I/O information */
232 	fs->lfs_iocount = 0;
233 
234 	/* Set up the ifile and lock aflags */
235 	fs->lfs_doifile = 0;
236 	fs->lfs_writer = 0;
237 	fs->lfs_dirops = 0;
238 	fs->lfs_seglock = 0;
239 
240 	/* Set the file system readonly/modify bits. */
241 	fs->lfs_ronly = ronly;
242 	if (ronly == 0)
243 		fs->lfs_fmod = 1;
244 
245 	/* Initialize the mount structure. */
246 	dev = devvp->v_rdev;
247 	mp->mnt_data = (qaddr_t)ump;
248 	mp->mnt_stat.f_fsid.val[0] = (long)dev;
249 	mp->mnt_stat.f_fsid.val[1] = MOUNT_LFS;
250 	mp->mnt_maxsymlinklen = fs->lfs_maxsymlinklen;
251 	mp->mnt_flag |= MNT_LOCAL;
252 	ump->um_mountp = mp;
253 	ump->um_dev = dev;
254 	ump->um_devvp = devvp;
255 	ump->um_bptrtodb = 0;
256 	ump->um_seqinc = 1 << fs->lfs_fsbtodb;
257 	ump->um_nindir = fs->lfs_nindir;
258 	for (i = 0; i < MAXQUOTAS; i++)
259 		ump->um_quotas[i] = NULLVP;
260 	devvp->v_specflags |= SI_MOUNTEDON;
261 
262 	/*
263 	 * We use the ifile vnode for almost every operation.  Instead of
264 	 * retrieving it from the hash table each time we retrieve it here,
265 	 * artificially increment the reference count and keep a pointer
266 	 * to it in the incore copy of the superblock.
267 	 */
268 	if (error = VFS_VGET(mp, LFS_IFILE_INUM, &vp))
269 		goto out;
270 	fs->lfs_ivnode = vp;
271 	VREF(vp);
272 	vput(vp);
273 
274 	return (0);
275 out:
276 	if (bp)
277 		brelse(bp);
278 	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p);
279 	if (ump) {
280 		free(ump->um_lfs, M_UFSMNT);
281 		free(ump, M_UFSMNT);
282 		mp->mnt_data = (qaddr_t)0;
283 	}
284 	return (error);
285 }
286 
287 /*
288  * unmount system call
289  */
290 lfs_unmount(mp, mntflags, p)
291 	struct mount *mp;
292 	int mntflags;
293 	struct proc *p;
294 {
295 	extern int doforce;
296 	register struct ufsmount *ump;
297 	register struct lfs *fs;
298 	int i, error, flags, ronly;
299 
300 	flags = 0;
301 	if (mntflags & MNT_FORCE) {
302 		if (!doforce || (mp->mnt_flag & MNT_ROOTFS))
303 			return (EINVAL);
304 		flags |= FORCECLOSE;
305 	}
306 
307 	ump = VFSTOUFS(mp);
308 	fs = ump->um_lfs;
309 #ifdef QUOTA
310 	if (mp->mnt_flag & MNT_QUOTA) {
311 		if (error = vflush(mp, fs->lfs_ivnode, SKIPSYSTEM|flags))
312 			return (error);
313 		for (i = 0; i < MAXQUOTAS; i++) {
314 			if (ump->um_quotas[i] == NULLVP)
315 				continue;
316 			quotaoff(p, mp, i);
317 		}
318 		/*
319 		 * Here we fall through to vflush again to ensure
320 		 * that we have gotten rid of all the system vnodes.
321 		 */
322 	}
323 #endif
324 	if (error = vflush(mp, fs->lfs_ivnode, flags))
325 		return (error);
326 	fs->lfs_clean = 1;
327 	if (error = VFS_SYNC(mp, 1, p->p_ucred, p))
328 		return (error);
329 	if (fs->lfs_ivnode->v_dirtyblkhd.lh_first)
330 		panic("lfs_unmount: still dirty blocks on ifile vnode\n");
331 	vrele(fs->lfs_ivnode);
332 	vgone(fs->lfs_ivnode);
333 
334 	ronly = !fs->lfs_ronly;
335 	ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
336 	error = VOP_CLOSE(ump->um_devvp,
337 	    ronly ? FREAD : FREAD|FWRITE, NOCRED, p);
338 	vrele(ump->um_devvp);
339 	free(fs, M_UFSMNT);
340 	free(ump, M_UFSMNT);
341 	mp->mnt_data = (qaddr_t)0;
342 	mp->mnt_flag &= ~MNT_LOCAL;
343 	return (error);
344 }
345 
346 /*
347  * Get file system statistics.
348  */
349 lfs_statfs(mp, sbp, p)
350 	struct mount *mp;
351 	register struct statfs *sbp;
352 	struct proc *p;
353 {
354 	register struct lfs *fs;
355 	register struct ufsmount *ump;
356 
357 	ump = VFSTOUFS(mp);
358 	fs = ump->um_lfs;
359 	if (fs->lfs_magic != LFS_MAGIC)
360 		panic("lfs_statfs: magic");
361 	sbp->f_type = MOUNT_LFS;
362 	sbp->f_bsize = fs->lfs_bsize;
363 	sbp->f_iosize = fs->lfs_bsize;
364 	sbp->f_blocks = dbtofsb(fs,fs->lfs_dsize);
365 	sbp->f_bfree = dbtofsb(fs, fs->lfs_bfree);
366 	sbp->f_bavail = (fs->lfs_dsize * (100 - fs->lfs_minfree) / 100) -
367 		(fs->lfs_dsize - fs->lfs_bfree);
368 	sbp->f_bavail = dbtofsb(fs, sbp->f_bavail);
369 	sbp->f_files = fs->lfs_nfiles;
370 	sbp->f_ffree = sbp->f_bfree * INOPB(fs);
371 	if (sbp != &mp->mnt_stat) {
372 		bcopy((caddr_t)mp->mnt_stat.f_mntonname,
373 			(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
374 		bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
375 			(caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
376 	}
377 	return (0);
378 }
379 
380 /*
381  * Go through the disk queues to initiate sandbagged IO;
382  * go through the inodes to write those that have been modified;
383  * initiate the writing of the super block if it has been modified.
384  *
385  * Note: we are always called with the filesystem marked `MPBUSY'.
386  */
387 lfs_sync(mp, waitfor, cred, p)
388 	struct mount *mp;
389 	int waitfor;
390 	struct ucred *cred;
391 	struct proc *p;
392 {
393 	int error;
394 
395 	/* All syncs must be checkpoints until roll-forward is implemented. */
396 	error = lfs_segwrite(mp, SEGM_CKP | (waitfor ? SEGM_SYNC : 0));
397 #ifdef QUOTA
398 	qsync(mp);
399 #endif
400 	return (error);
401 }
402 
403 /*
404  * Look up an LFS dinode number to find its incore vnode.  If not already
405  * in core, read it in from the specified device.  Return the inode locked.
406  * Detection and handling of mount points must be done by the calling routine.
407  */
408 int
409 lfs_vget(mp, ino, vpp)
410 	struct mount *mp;
411 	ino_t ino;
412 	struct vnode **vpp;
413 {
414 	register struct lfs *fs;
415 	register struct inode *ip;
416 	struct buf *bp;
417 	struct ifile *ifp;
418 	struct vnode *vp;
419 	struct ufsmount *ump;
420 	daddr_t daddr;
421 	dev_t dev;
422 	int error;
423 
424 	ump = VFSTOUFS(mp);
425 	dev = ump->um_dev;
426 	if ((*vpp = ufs_ihashget(dev, ino)) != NULL)
427 		return (0);
428 
429 	/* Translate the inode number to a disk address. */
430 	fs = ump->um_lfs;
431 	if (ino == LFS_IFILE_INUM)
432 		daddr = fs->lfs_idaddr;
433 	else {
434 		LFS_IENTRY(ifp, fs, ino, bp);
435 		daddr = ifp->if_daddr;
436 		brelse(bp);
437 		if (daddr == LFS_UNUSED_DADDR)
438 			return (ENOENT);
439 	}
440 
441 	/* Allocate new vnode/inode. */
442 	if (error = lfs_vcreate(mp, ino, &vp)) {
443 		*vpp = NULL;
444 		return (error);
445 	}
446 
447 	/*
448 	 * Put it onto its hash chain and lock it so that other requests for
449 	 * this inode will block if they arrive while we are sleeping waiting
450 	 * for old data structures to be purged or for the contents of the
451 	 * disk portion of this inode to be read.
452 	 */
453 	ip = VTOI(vp);
454 	ufs_ihashins(ip);
455 
456 	/*
457 	 * XXX
458 	 * This may not need to be here, logically it should go down with
459 	 * the i_devvp initialization.
460 	 * Ask Kirk.
461 	 */
462 	ip->i_lfs = ump->um_lfs;
463 
464 	/* Read in the disk contents for the inode, copy into the inode. */
465 	if (error =
466 	    bread(ump->um_devvp, daddr, (int)fs->lfs_bsize, NOCRED, &bp)) {
467 		/*
468 		 * The inode does not contain anything useful, so it would
469 		 * be misleading to leave it on its hash chain. With mode
470 		 * still zero, it will be unlinked and returned to the free
471 		 * list by vput().
472 		 */
473 		vput(vp);
474 		brelse(bp);
475 		*vpp = NULL;
476 		return (error);
477 	}
478 	ip->i_din = *lfs_ifind(fs, ino, (struct dinode *)bp->b_data);
479 	brelse(bp);
480 
481 	/*
482 	 * Initialize the vnode from the inode, check for aliases.  In all
483 	 * cases re-init ip, the underlying vnode/inode may have changed.
484 	 */
485 	if (error = ufs_vinit(mp, lfs_specop_p, LFS_FIFOOPS, &vp)) {
486 		vput(vp);
487 		*vpp = NULL;
488 		return (error);
489 	}
490 	/*
491 	 * Finish inode initialization now that aliasing has been resolved.
492 	 */
493 	ip->i_devvp = ump->um_devvp;
494 	VREF(ip->i_devvp);
495 	*vpp = vp;
496 	return (0);
497 }
498 
499 /*
500  * File handle to vnode
501  *
502  * Have to be really careful about stale file handles:
503  * - check that the inode number is valid
504  * - call lfs_vget() to get the locked inode
505  * - check for an unallocated inode (i_mode == 0)
506  * - check that the given client host has export rights and return
507  *   those rights via. exflagsp and credanonp
508  *
509  * XXX
510  * use ifile to see if inode is allocated instead of reading off disk
511  * what is the relationship between my generational number and the NFS
512  * generational number.
513  */
514 int
515 lfs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp)
516 	register struct mount *mp;
517 	struct fid *fhp;
518 	struct mbuf *nam;
519 	struct vnode **vpp;
520 	int *exflagsp;
521 	struct ucred **credanonp;
522 {
523 	register struct ufid *ufhp;
524 
525 	ufhp = (struct ufid *)fhp;
526 	if (ufhp->ufid_ino < ROOTINO)
527 		return (ESTALE);
528 	return (ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp));
529 }
530 
531 /*
532  * Vnode pointer to File handle
533  */
534 /* ARGSUSED */
535 lfs_vptofh(vp, fhp)
536 	struct vnode *vp;
537 	struct fid *fhp;
538 {
539 	register struct inode *ip;
540 	register struct ufid *ufhp;
541 
542 	ip = VTOI(vp);
543 	ufhp = (struct ufid *)fhp;
544 	ufhp->ufid_len = sizeof(struct ufid);
545 	ufhp->ufid_ino = ip->i_number;
546 	ufhp->ufid_gen = ip->i_gen;
547 	return (0);
548 }
549