xref: /original-bsd/sys/ufs/lfs/lfs_vfsops.c (revision deff14a8)
1 /*
2  * Copyright (c) 1989, 1991, 1993, 1994
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)lfs_vfsops.c	8.9 (Berkeley) 07/14/94
8  */
9 
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/namei.h>
13 #include <sys/proc.h>
14 #include <sys/kernel.h>
15 #include <sys/vnode.h>
16 #include <sys/mount.h>
17 #include <sys/buf.h>
18 #include <sys/mbuf.h>
19 #include <sys/file.h>
20 #include <sys/disklabel.h>
21 #include <sys/ioctl.h>
22 #include <sys/errno.h>
23 #include <sys/malloc.h>
24 #include <sys/socket.h>
25 
26 #include <miscfs/specfs/specdev.h>
27 
28 #include <ufs/ufs/quota.h>
29 #include <ufs/ufs/inode.h>
30 #include <ufs/ufs/ufsmount.h>
31 #include <ufs/ufs/ufs_extern.h>
32 
33 #include <ufs/lfs/lfs.h>
34 #include <ufs/lfs/lfs_extern.h>
35 
36 int lfs_mountfs __P((struct vnode *, struct mount *, struct proc *));
37 
38 struct vfsops lfs_vfsops = {
39 	lfs_mount,
40 	ufs_start,
41 	lfs_unmount,
42 	ufs_root,
43 	ufs_quotactl,
44 	lfs_statfs,
45 	lfs_sync,
46 	lfs_vget,
47 	lfs_fhtovp,
48 	lfs_vptofh,
49 	lfs_init,
50 };
51 
52 int
53 lfs_mountroot()
54 {
55 	panic("lfs_mountroot");		/* XXX -- implement */
56 }
57 
58 /*
59  * VFS Operations.
60  *
61  * mount system call
62  */
63 lfs_mount(mp, path, data, ndp, p)
64 	register struct mount *mp;
65 	char *path;
66 	caddr_t data;
67 	struct nameidata *ndp;
68 	struct proc *p;
69 {
70 	struct vnode *devvp;
71 	struct ufs_args args;
72 	struct ufsmount *ump;
73 	register struct lfs *fs;				/* LFS */
74 	u_int size;
75 	int error;
76 	mode_t accessmode;
77 
78 	if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args)))
79 		return (error);
80 
81 	/* Until LFS can do NFS right.		XXX */
82 	if (args.export.ex_flags & MNT_EXPORTED)
83 		return (EINVAL);
84 
85 	/*
86 	 * If updating, check whether changing from read-only to
87 	 * read/write; if there is no device name, that's all we do.
88 	 */
89 	if (mp->mnt_flag & MNT_UPDATE) {
90 		ump = VFSTOUFS(mp);
91 		if (fs->lfs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
92 			/*
93 			 * If upgrade to read-write by non-root, then verify
94 			 * that user has necessary permissions on the device.
95 			 */
96 			if (p->p_ucred->cr_uid != 0) {
97 				VOP_LOCK(ump->um_devvp);
98 				if (error = VOP_ACCESS(ump->um_devvp,
99 				    VREAD | VWRITE, p->p_ucred, p)) {
100 					VOP_UNLOCK(ump->um_devvp);
101 					return (error);
102 				}
103 				VOP_UNLOCK(ump->um_devvp);
104 			}
105 			fs->lfs_ronly = 0;
106 		}
107 		if (args.fspec == 0) {
108 			/*
109 			 * Process export requests.
110 			 */
111 			return (vfs_export(mp, &ump->um_export, &args.export));
112 		}
113 	}
114 	/*
115 	 * Not an update, or updating the name: look up the name
116 	 * and verify that it refers to a sensible block device.
117 	 */
118 	NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
119 	if (error = namei(ndp))
120 		return (error);
121 	devvp = ndp->ni_vp;
122 	if (devvp->v_type != VBLK) {
123 		vrele(devvp);
124 		return (ENOTBLK);
125 	}
126 	if (major(devvp->v_rdev) >= nblkdev) {
127 		vrele(devvp);
128 		return (ENXIO);
129 	}
130 	/*
131 	 * If mount by non-root, then verify that user has necessary
132 	 * permissions on the device.
133 	 */
134 	if (p->p_ucred->cr_uid != 0) {
135 		accessmode = VREAD;
136 		if ((mp->mnt_flag & MNT_RDONLY) == 0)
137 			accessmode |= VWRITE;
138 		VOP_LOCK(devvp);
139 		if (error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p)) {
140 			vput(devvp);
141 			return (error);
142 		}
143 		VOP_UNLOCK(devvp);
144 	}
145 	if ((mp->mnt_flag & MNT_UPDATE) == 0)
146 		error = lfs_mountfs(devvp, mp, p);		/* LFS */
147 	else {
148 		if (devvp != ump->um_devvp)
149 			error = EINVAL;	/* needs translation */
150 		else
151 			vrele(devvp);
152 	}
153 	if (error) {
154 		vrele(devvp);
155 		return (error);
156 	}
157 	ump = VFSTOUFS(mp);
158 	fs = ump->um_lfs;					/* LFS */
159 #ifdef NOTLFS							/* LFS */
160 	(void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
161 	bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size);
162 	bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
163 	    MNAMELEN);
164 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
165 	    &size);
166 	bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
167 	(void) ufs_statfs(mp, &mp->mnt_stat, p);
168 #else
169 	(void)copyinstr(path, fs->lfs_fsmnt, sizeof(fs->lfs_fsmnt) - 1, &size);
170 	bzero(fs->lfs_fsmnt + size, sizeof(fs->lfs_fsmnt) - size);
171 	bcopy((caddr_t)fs->lfs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
172 	    MNAMELEN);
173 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
174 	    &size);
175 	bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
176 	(void) lfs_statfs(mp, &mp->mnt_stat, p);
177 #endif
178 	return (0);
179 }
180 
181 /*
182  * Common code for mount and mountroot
183  * LFS specific
184  */
185 int
186 lfs_mountfs(devvp, mp, p)
187 	register struct vnode *devvp;
188 	struct mount *mp;
189 	struct proc *p;
190 {
191 	extern struct vnode *rootvp;
192 	register struct lfs *fs;
193 	register struct ufsmount *ump;
194 	struct vnode *vp;
195 	struct buf *bp;
196 	struct partinfo dpart;
197 	dev_t dev;
198 	int error, i, ronly, size;
199 
200 	/*
201 	 * Disallow multiple mounts of the same device.
202 	 * Disallow mounting of a device that is currently in use
203 	 * (except for root, which might share swap device for miniroot).
204 	 * Flush out any old buffers remaining from a previous use.
205 	 */
206 	if (error = vfs_mountedon(devvp))
207 		return (error);
208 	if (vcount(devvp) > 1 && devvp != rootvp)
209 		return (EBUSY);
210 	if (error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0))
211 		return (error);
212 
213 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
214 	if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p))
215 		return (error);
216 
217 	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
218 		size = DEV_BSIZE;
219 	else {
220 		size = dpart.disklab->d_secsize;
221 #ifdef NEVER_USED
222 		dpart.part->p_fstype = FS_LFS;
223 		dpart.part->p_fsize = fs->lfs_fsize;	/* frag size */
224 		dpart.part->p_frag = fs->lfs_frag;	/* frags per block */
225 		dpart.part->p_cpg = fs->lfs_segshift;	/* segment shift */
226 #endif
227 	}
228 
229 	/* Don't free random space on error. */
230 	bp = NULL;
231 	ump = NULL;
232 
233 	/* Read in the superblock. */
234 	if (error = bread(devvp, LFS_LABELPAD / size, LFS_SBPAD, NOCRED, &bp))
235 		goto out;
236 	fs = (struct lfs *)bp->b_data;
237 
238 	/* Check the basics. */
239 	if (fs->lfs_magic != LFS_MAGIC || fs->lfs_bsize > MAXBSIZE ||
240 	    fs->lfs_bsize < sizeof(struct lfs)) {
241 		error = EINVAL;		/* XXX needs translation */
242 		goto out;
243 	}
244 
245 	/* Allocate the mount structure, copy the superblock into it. */
246 	ump = (struct ufsmount *)malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
247 	fs = ump->um_lfs = malloc(sizeof(struct lfs), M_UFSMNT, M_WAITOK);
248 	bcopy(bp->b_data, fs, sizeof(struct lfs));
249 	if (sizeof(struct lfs) < LFS_SBPAD)			/* XXX why? */
250 		bp->b_flags |= B_INVAL;
251 	brelse(bp);
252 	bp = NULL;
253 
254 	/* Set up the I/O information */
255 	fs->lfs_iocount = 0;
256 
257 	/* Set up the ifile and lock aflags */
258 	fs->lfs_doifile = 0;
259 	fs->lfs_writer = 0;
260 	fs->lfs_dirops = 0;
261 	fs->lfs_seglock = 0;
262 
263 	/* Set the file system readonly/modify bits. */
264 	fs->lfs_ronly = ronly;
265 	if (ronly == 0)
266 		fs->lfs_fmod = 1;
267 
268 	/* Initialize the mount structure. */
269 	dev = devvp->v_rdev;
270 	mp->mnt_data = (qaddr_t)ump;
271 	mp->mnt_stat.f_fsid.val[0] = (long)dev;
272 	mp->mnt_stat.f_fsid.val[1] = MOUNT_LFS;
273 	mp->mnt_maxsymlinklen = fs->lfs_maxsymlinklen;
274 	mp->mnt_flag |= MNT_LOCAL;
275 	ump->um_mountp = mp;
276 	ump->um_dev = dev;
277 	ump->um_devvp = devvp;
278 	ump->um_bptrtodb = 0;
279 	ump->um_seqinc = 1 << fs->lfs_fsbtodb;
280 	ump->um_nindir = fs->lfs_nindir;
281 	for (i = 0; i < MAXQUOTAS; i++)
282 		ump->um_quotas[i] = NULLVP;
283 	devvp->v_specflags |= SI_MOUNTEDON;
284 
285 	/*
286 	 * We use the ifile vnode for almost every operation.  Instead of
287 	 * retrieving it from the hash table each time we retrieve it here,
288 	 * artificially increment the reference count and keep a pointer
289 	 * to it in the incore copy of the superblock.
290 	 */
291 	if (error = VFS_VGET(mp, LFS_IFILE_INUM, &vp))
292 		goto out;
293 	fs->lfs_ivnode = vp;
294 	VREF(vp);
295 	vput(vp);
296 
297 	return (0);
298 out:
299 	if (bp)
300 		brelse(bp);
301 	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p);
302 	if (ump) {
303 		free(ump->um_lfs, M_UFSMNT);
304 		free(ump, M_UFSMNT);
305 		mp->mnt_data = (qaddr_t)0;
306 	}
307 	return (error);
308 }
309 
310 /*
311  * unmount system call
312  */
313 lfs_unmount(mp, mntflags, p)
314 	struct mount *mp;
315 	int mntflags;
316 	struct proc *p;
317 {
318 	extern int doforce;
319 	register struct ufsmount *ump;
320 	register struct lfs *fs;
321 	int i, error, flags, ronly;
322 
323 	flags = 0;
324 	if (mntflags & MNT_FORCE) {
325 		if (!doforce || (mp->mnt_flag & MNT_ROOTFS))
326 			return (EINVAL);
327 		flags |= FORCECLOSE;
328 	}
329 
330 	ump = VFSTOUFS(mp);
331 	fs = ump->um_lfs;
332 #ifdef QUOTA
333 	if (mp->mnt_flag & MNT_QUOTA) {
334 		if (error = vflush(mp, fs->lfs_ivnode, SKIPSYSTEM|flags))
335 			return (error);
336 		for (i = 0; i < MAXQUOTAS; i++) {
337 			if (ump->um_quotas[i] == NULLVP)
338 				continue;
339 			quotaoff(p, mp, i);
340 		}
341 		/*
342 		 * Here we fall through to vflush again to ensure
343 		 * that we have gotten rid of all the system vnodes.
344 		 */
345 	}
346 #endif
347 	if (error = vflush(mp, fs->lfs_ivnode, flags))
348 		return (error);
349 	fs->lfs_clean = 1;
350 	if (error = VFS_SYNC(mp, 1, p->p_ucred, p))
351 		return (error);
352 	if (fs->lfs_ivnode->v_dirtyblkhd.lh_first)
353 		panic("lfs_unmount: still dirty blocks on ifile vnode\n");
354 	vrele(fs->lfs_ivnode);
355 	vgone(fs->lfs_ivnode);
356 
357 	ronly = !fs->lfs_ronly;
358 	ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
359 	error = VOP_CLOSE(ump->um_devvp,
360 	    ronly ? FREAD : FREAD|FWRITE, NOCRED, p);
361 	vrele(ump->um_devvp);
362 	free(fs, M_UFSMNT);
363 	free(ump, M_UFSMNT);
364 	mp->mnt_data = (qaddr_t)0;
365 	mp->mnt_flag &= ~MNT_LOCAL;
366 	return (error);
367 }
368 
369 /*
370  * Get file system statistics.
371  */
372 lfs_statfs(mp, sbp, p)
373 	struct mount *mp;
374 	register struct statfs *sbp;
375 	struct proc *p;
376 {
377 	register struct lfs *fs;
378 	register struct ufsmount *ump;
379 
380 	ump = VFSTOUFS(mp);
381 	fs = ump->um_lfs;
382 	if (fs->lfs_magic != LFS_MAGIC)
383 		panic("lfs_statfs: magic");
384 	sbp->f_type = MOUNT_LFS;
385 	sbp->f_bsize = fs->lfs_bsize;
386 	sbp->f_iosize = fs->lfs_bsize;
387 	sbp->f_blocks = dbtofsb(fs,fs->lfs_dsize);
388 	sbp->f_bfree = dbtofsb(fs, fs->lfs_bfree);
389 	sbp->f_bavail = (fs->lfs_dsize * (100 - fs->lfs_minfree) / 100) -
390 		(fs->lfs_dsize - fs->lfs_bfree);
391 	sbp->f_bavail = dbtofsb(fs, sbp->f_bavail);
392 	sbp->f_files = fs->lfs_nfiles;
393 	sbp->f_ffree = sbp->f_bfree * INOPB(fs);
394 	if (sbp != &mp->mnt_stat) {
395 		bcopy((caddr_t)mp->mnt_stat.f_mntonname,
396 			(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
397 		bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
398 			(caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
399 	}
400 	return (0);
401 }
402 
403 /*
404  * Go through the disk queues to initiate sandbagged IO;
405  * go through the inodes to write those that have been modified;
406  * initiate the writing of the super block if it has been modified.
407  *
408  * Note: we are always called with the filesystem marked `MPBUSY'.
409  */
410 lfs_sync(mp, waitfor, cred, p)
411 	struct mount *mp;
412 	int waitfor;
413 	struct ucred *cred;
414 	struct proc *p;
415 {
416 	int error;
417 
418 	/* All syncs must be checkpoints until roll-forward is implemented. */
419 	error = lfs_segwrite(mp, SEGM_CKP | (waitfor ? SEGM_SYNC : 0));
420 #ifdef QUOTA
421 	qsync(mp);
422 #endif
423 	return (error);
424 }
425 
426 /*
427  * Look up an LFS dinode number to find its incore vnode.  If not already
428  * in core, read it in from the specified device.  Return the inode locked.
429  * Detection and handling of mount points must be done by the calling routine.
430  */
431 int
432 lfs_vget(mp, ino, vpp)
433 	struct mount *mp;
434 	ino_t ino;
435 	struct vnode **vpp;
436 {
437 	register struct lfs *fs;
438 	register struct inode *ip;
439 	struct buf *bp;
440 	struct ifile *ifp;
441 	struct vnode *vp;
442 	struct ufsmount *ump;
443 	daddr_t daddr;
444 	dev_t dev;
445 	int error;
446 
447 	ump = VFSTOUFS(mp);
448 	dev = ump->um_dev;
449 	if ((*vpp = ufs_ihashget(dev, ino)) != NULL)
450 		return (0);
451 
452 	/* Translate the inode number to a disk address. */
453 	fs = ump->um_lfs;
454 	if (ino == LFS_IFILE_INUM)
455 		daddr = fs->lfs_idaddr;
456 	else {
457 		LFS_IENTRY(ifp, fs, ino, bp);
458 		daddr = ifp->if_daddr;
459 		brelse(bp);
460 		if (daddr == LFS_UNUSED_DADDR)
461 			return (ENOENT);
462 	}
463 
464 	/* Allocate new vnode/inode. */
465 	if (error = lfs_vcreate(mp, ino, &vp)) {
466 		*vpp = NULL;
467 		return (error);
468 	}
469 
470 	/*
471 	 * Put it onto its hash chain and lock it so that other requests for
472 	 * this inode will block if they arrive while we are sleeping waiting
473 	 * for old data structures to be purged or for the contents of the
474 	 * disk portion of this inode to be read.
475 	 */
476 	ip = VTOI(vp);
477 	ufs_ihashins(ip);
478 
479 	/*
480 	 * XXX
481 	 * This may not need to be here, logically it should go down with
482 	 * the i_devvp initialization.
483 	 * Ask Kirk.
484 	 */
485 	ip->i_lfs = ump->um_lfs;
486 
487 	/* Read in the disk contents for the inode, copy into the inode. */
488 	if (error =
489 	    bread(ump->um_devvp, daddr, (int)fs->lfs_bsize, NOCRED, &bp)) {
490 		/*
491 		 * The inode does not contain anything useful, so it would
492 		 * be misleading to leave it on its hash chain. With mode
493 		 * still zero, it will be unlinked and returned to the free
494 		 * list by vput().
495 		 */
496 		vput(vp);
497 		brelse(bp);
498 		*vpp = NULL;
499 		return (error);
500 	}
501 	ip->i_din = *lfs_ifind(fs, ino, (struct dinode *)bp->b_data);
502 	brelse(bp);
503 
504 	/*
505 	 * Initialize the vnode from the inode, check for aliases.  In all
506 	 * cases re-init ip, the underlying vnode/inode may have changed.
507 	 */
508 	if (error = ufs_vinit(mp, lfs_specop_p, LFS_FIFOOPS, &vp)) {
509 		vput(vp);
510 		*vpp = NULL;
511 		return (error);
512 	}
513 	/*
514 	 * Finish inode initialization now that aliasing has been resolved.
515 	 */
516 	ip->i_devvp = ump->um_devvp;
517 	VREF(ip->i_devvp);
518 	*vpp = vp;
519 	return (0);
520 }
521 
522 /*
523  * File handle to vnode
524  *
525  * Have to be really careful about stale file handles:
526  * - check that the inode number is valid
527  * - call lfs_vget() to get the locked inode
528  * - check for an unallocated inode (i_mode == 0)
529  * - check that the given client host has export rights and return
530  *   those rights via. exflagsp and credanonp
531  *
532  * XXX
533  * use ifile to see if inode is allocated instead of reading off disk
534  * what is the relationship between my generational number and the NFS
535  * generational number.
536  */
537 int
538 lfs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp)
539 	register struct mount *mp;
540 	struct fid *fhp;
541 	struct mbuf *nam;
542 	struct vnode **vpp;
543 	int *exflagsp;
544 	struct ucred **credanonp;
545 {
546 	register struct ufid *ufhp;
547 
548 	ufhp = (struct ufid *)fhp;
549 	if (ufhp->ufid_ino < ROOTINO)
550 		return (ESTALE);
551 	return (ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp));
552 }
553 
554 /*
555  * Vnode pointer to File handle
556  */
557 /* ARGSUSED */
558 lfs_vptofh(vp, fhp)
559 	struct vnode *vp;
560 	struct fid *fhp;
561 {
562 	register struct inode *ip;
563 	register struct ufid *ufhp;
564 
565 	ip = VTOI(vp);
566 	ufhp = (struct ufid *)fhp;
567 	ufhp->ufid_len = sizeof(struct ufid);
568 	ufhp->ufid_ino = ip->i_number;
569 	ufhp->ufid_gen = ip->i_gen;
570 	return (0);
571 }
572