xref: /original-bsd/sys/ufs/lfs/lfs_vfsops.c (revision 3b3772fe)
1 /*
2  * Copyright (c) 1989, 1991 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)lfs_vfsops.c	7.87 (Berkeley) 10/08/92
8  */
9 
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/namei.h>
13 #include <sys/proc.h>
14 #include <sys/kernel.h>
15 #include <sys/vnode.h>
16 #include <sys/mount.h>
17 #include <sys/buf.h>
18 #include <sys/mbuf.h>
19 #include <sys/file.h>
20 #include <sys/disklabel.h>
21 #include <sys/ioctl.h>
22 #include <sys/errno.h>
23 #include <sys/malloc.h>
24 #include <sys/socket.h>
25 
26 #include <miscfs/specfs/specdev.h>
27 
28 #include <ufs/ufs/quota.h>
29 #include <ufs/ufs/inode.h>
30 #include <ufs/ufs/ufsmount.h>
31 #include <ufs/ufs/ufs_extern.h>
32 
33 #include <ufs/lfs/lfs.h>
34 #include <ufs/lfs/lfs_extern.h>
35 
36 int lfs_mountfs __P((struct vnode *, struct mount *, struct proc *));
37 
38 struct vfsops lfs_vfsops = {
39 	lfs_mount,
40 	ufs_start,
41 	lfs_unmount,
42 	lfs_root,
43 	ufs_quotactl,
44 	lfs_statfs,
45 	lfs_sync,
46 	lfs_vget,
47 	lfs_fhtovp,
48 	lfs_vptofh,
49 	lfs_init,
50 };
51 
52 int
53 lfs_mountroot()
54 {
55 	panic("lfs_mountroot");		/* XXX -- implement */
56 }
57 
58 /*
59  * VFS Operations.
60  *
61  * mount system call
62  */
63 lfs_mount(mp, path, data, ndp, p)
64 	register struct mount *mp;
65 	char *path;
66 	caddr_t data;
67 	struct nameidata *ndp;
68 	struct proc *p;
69 {
70 	struct vnode *devvp;
71 	struct ufs_args args;
72 	struct ufsmount *ump;
73 	register struct lfs *fs;				/* LFS */
74 	u_int size;
75 	int error;
76 
77 	if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args)))
78 		return (error);
79 
80 	/* Until LFS can do NFS right.		XXX */
81 	if (args.exflags & MNT_EXPORTED)
82 		return (EINVAL);
83 	/*
84 	 * If updating, check whether changing from read-only to
85 	 * read/write; if there is no device name, that's all we do.
86 	 */
87 	if (mp->mnt_flag & MNT_UPDATE) {
88 		ump = VFSTOUFS(mp);
89 #ifdef NOTLFS							/* LFS */
90 		fs = ump->um_fs;
91 		if (fs->fs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0)
92 			fs->fs_ronly = 0;
93 #else
94 		fs = ump->um_lfs;
95 		if (fs->lfs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0)
96 			fs->lfs_ronly = 0;
97 #endif
98 		if (args.fspec == 0) {
99 			/*
100 			 * Process export requests.
101 			 */
102 			if (args.exflags & MNT_EXPORTED) {
103 				if (error = ufs_hang_addrlist(mp, &args))
104 					return (error);
105 				mp->mnt_flag |= MNT_EXPORTED;
106 			}
107 			if (args.exflags & MNT_DELEXPORT) {
108 				ufs_free_addrlist(ump);
109 				mp->mnt_flag &=
110 				    ~(MNT_EXPORTED | MNT_DEFEXPORTED);
111 			}
112 			return (0);
113 		}
114 	}
115 	/*
116 	 * Not an update, or updating the name: look up the name
117 	 * and verify that it refers to a sensible block device.
118 	 */
119 	NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
120 	if (error = namei(ndp))
121 		return (error);
122 	devvp = ndp->ni_vp;
123 	if (devvp->v_type != VBLK) {
124 		vrele(devvp);
125 		return (ENOTBLK);
126 	}
127 	if (major(devvp->v_rdev) >= nblkdev) {
128 		vrele(devvp);
129 		return (ENXIO);
130 	}
131 	if ((mp->mnt_flag & MNT_UPDATE) == 0)
132 		error = lfs_mountfs(devvp, mp, p);		/* LFS */
133 	else {
134 		if (devvp != ump->um_devvp)
135 			error = EINVAL;	/* needs translation */
136 		else
137 			vrele(devvp);
138 	}
139 	if (error) {
140 		vrele(devvp);
141 		return (error);
142 	}
143 	ump = VFSTOUFS(mp);
144 	fs = ump->um_lfs;					/* LFS */
145 #ifdef NOTLFS							/* LFS */
146 	(void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
147 	bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size);
148 	bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
149 	    MNAMELEN);
150 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
151 	    &size);
152 	bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
153 	(void) ufs_statfs(mp, &mp->mnt_stat, p);
154 #else
155 	(void)copyinstr(path, fs->lfs_fsmnt, sizeof(fs->lfs_fsmnt) - 1, &size);
156 	bzero(fs->lfs_fsmnt + size, sizeof(fs->lfs_fsmnt) - size);
157 	bcopy((caddr_t)fs->lfs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
158 	    MNAMELEN);
159 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
160 	    &size);
161 	bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
162 	(void) lfs_statfs(mp, &mp->mnt_stat, p);
163 #endif
164 	return (0);
165 }
166 
167 /*
168  * Common code for mount and mountroot
169  * LFS specific
170  */
171 int
172 lfs_mountfs(devvp, mp, p)
173 	register struct vnode *devvp;
174 	struct mount *mp;
175 	struct proc *p;
176 {
177 	extern struct vnode *rootvp;
178 	register struct lfs *fs;
179 	register struct ufsmount *ump;
180 	struct vnode *vp;
181 	struct buf *bp;
182 	struct partinfo dpart;
183 	dev_t dev;
184 	int error, i, ronly, size;
185 
186 	/*
187 	 * Disallow multiple mounts of the same device.
188 	 * Disallow mounting of a device that is currently in use
189 	 * (except for root, which might share swap device for miniroot).
190 	 * Flush out any old buffers remaining from a previous use.
191 	 */
192 	if (error = ufs_mountedon(devvp))
193 		return (error);
194 	if (vcount(devvp) > 1 && devvp != rootvp)
195 		return (EBUSY);
196 	if (error = vinvalbuf(devvp, 1, p->p_ucred, p))
197 		return (error);
198 
199 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
200 	if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p))
201 		return (error);
202 
203 	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
204 		size = DEV_BSIZE;
205 	else {
206 		size = dpart.disklab->d_secsize;
207 #ifdef NEVER_USED
208 		dpart.part->p_fstype = FS_LFS;
209 		dpart.part->p_fsize = fs->lfs_fsize;	/* frag size */
210 		dpart.part->p_frag = fs->lfs_frag;	/* frags per block */
211 		dpart.part->p_cpg = fs->lfs_segshift;	/* segment shift */
212 #endif
213 	}
214 
215 	/* Don't free random space on error. */
216 	bp = NULL;
217 	ump = NULL;
218 
219 	/* Read in the superblock. */
220 	if (error = bread(devvp, LFS_LABELPAD / size, LFS_SBPAD, NOCRED, &bp))
221 		goto out;
222 	fs = bp->b_un.b_lfs;
223 
224 	/* Check the basics. */
225 	if (fs->lfs_magic != LFS_MAGIC || fs->lfs_bsize > MAXBSIZE ||
226 	    fs->lfs_bsize < sizeof(struct lfs)) {
227 		error = EINVAL;		/* XXX needs translation */
228 		goto out;
229 	}
230 #ifdef DEBUG
231 	lfs_dump_super(fs);
232 #endif
233 
234 	/* Allocate the mount structure, copy the superblock into it. */
235 	ump = (struct ufsmount *)malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
236 	fs = ump->um_lfs = malloc(sizeof(struct lfs), M_UFSMNT, M_WAITOK);
237 	bcopy(bp->b_un.b_addr, fs, sizeof(struct lfs));
238 	if (sizeof(struct lfs) < LFS_SBPAD)			/* XXX why? */
239 		bp->b_flags |= B_INVAL;
240 	brelse(bp);
241 	bp = NULL;
242 
243 	/* Set up the I/O information */
244 	fs->lfs_iocount = 0;
245 
246 	/* Set up the ifile and lock aflags */
247 	fs->lfs_doifile = 0;
248 	fs->lfs_writer = 0;
249 	fs->lfs_dirops = 0;
250 	fs->lfs_seglock = 0;
251 
252 	/* Set the file system readonly/modify bits. */
253 	fs->lfs_ronly = ronly;
254 	if (ronly == 0)
255 		fs->lfs_fmod = 1;
256 
257 	/* Initialize the mount structure. */
258 	dev = devvp->v_rdev;
259 	mp->mnt_data = (qaddr_t)ump;
260 	mp->mnt_stat.f_fsid.val[0] = (long)dev;
261 	mp->mnt_stat.f_fsid.val[1] = MOUNT_LFS;
262 	mp->mnt_flag |= MNT_LOCAL;
263 	ump->um_mountp = mp;
264 	ump->um_dev = dev;
265 	ump->um_devvp = devvp;
266 	ump->um_bptrtodb = 0;
267 	ump->um_seqinc = 1 << fs->lfs_fsbtodb;
268 	ump->um_nindir = fs->lfs_nindir;
269 	for (i = 0; i < MAXQUOTAS; i++)
270 		ump->um_quotas[i] = NULLVP;
271 	devvp->v_specflags |= SI_MOUNTEDON;
272 
273 	/*
274 	 * We use the ifile vnode for almost every operation.  Instead of
275 	 * retrieving it from the hash table each time we retrieve it here,
276 	 * artificially increment the reference count and keep a pointer
277 	 * to it in the incore copy of the superblock.
278 	 */
279 	if (error = VFS_VGET(mp, LFS_IFILE_INUM, &vp))
280 		goto out;
281 	fs->lfs_ivnode = vp;
282 	VREF(vp);
283 	vput(vp);
284 
285 	return (0);
286 out:
287 	if (bp)
288 		brelse(bp);
289 	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p);
290 	if (ump) {
291 		free(ump->um_lfs, M_UFSMNT);
292 		free(ump, M_UFSMNT);
293 		mp->mnt_data = (qaddr_t)0;
294 	}
295 	return (error);
296 }
297 
298 /*
299  * unmount system call
300  */
301 lfs_unmount(mp, mntflags, p)
302 	struct mount *mp;
303 	int mntflags;
304 	struct proc *p;
305 {
306 	extern int doforce;
307 	register struct ufsmount *ump;
308 	register struct lfs *fs;
309 	int i, error, flags, ronly;
310 
311 	flags = 0;
312 	if (mntflags & MNT_FORCE) {
313 		if (!doforce || mp == rootfs)
314 			return (EINVAL);
315 		flags |= FORCECLOSE;
316 	}
317 
318 	ump = VFSTOUFS(mp);
319 	fs = ump->um_lfs;
320 #ifdef QUOTA
321 	if (mp->mnt_flag & MNT_QUOTA) {
322 		if (error = vflush(mp, fs->lfs_ivnode, SKIPSYSTEM|flags))
323 			return (error);
324 		for (i = 0; i < MAXQUOTAS; i++) {
325 			if (ump->um_quotas[i] == NULLVP)
326 				continue;
327 			quotaoff(p, mp, i);
328 		}
329 		/*
330 		 * Here we fall through to vflush again to ensure
331 		 * that we have gotten rid of all the system vnodes.
332 		 */
333 	}
334 #endif
335 	if (error = vflush(mp, fs->lfs_ivnode, flags))
336 		return (error);
337 	fs->lfs_clean = 1;
338 	if (error = VFS_SYNC(mp, 1, p->p_ucred, p))
339 		return (error);
340 	if (fs->lfs_ivnode->v_dirtyblkhd.le_next)
341 		panic("lfs_unmount: still dirty blocks on ifile vnode\n");
342 	vrele(fs->lfs_ivnode);
343 	vgone(fs->lfs_ivnode);
344 
345 	ronly = !fs->lfs_ronly;
346 	ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
347 	error = VOP_CLOSE(ump->um_devvp,
348 	    ronly ? FREAD : FREAD|FWRITE, NOCRED, p);
349 	vrele(ump->um_devvp);
350 	free(fs, M_UFSMNT);
351 	free(ump, M_UFSMNT);
352 	mp->mnt_data = (qaddr_t)0;
353 	mp->mnt_flag &= ~MNT_LOCAL;
354 	return (error);
355 }
356 
357 /*
358  * Return root of a filesystem
359  */
360 int
361 lfs_root(mp, vpp)
362 	struct mount *mp;
363 	struct vnode **vpp;
364 {
365 	struct vnode *nvp;
366 	int error;
367 
368 	if (error = VFS_VGET(mp, (ino_t)ROOTINO, &nvp))
369 		return (error);
370 	*vpp = nvp;
371 	return (0);
372 }
373 
374 /*
375  * Get file system statistics.
376  */
377 lfs_statfs(mp, sbp, p)
378 	struct mount *mp;
379 	register struct statfs *sbp;
380 	struct proc *p;
381 {
382 	register struct lfs *fs;
383 	register struct ufsmount *ump;
384 
385 	ump = VFSTOUFS(mp);
386 	fs = ump->um_lfs;
387 	if (fs->lfs_magic != LFS_MAGIC)
388 		panic("lfs_statfs: magic");
389 	sbp->f_type = MOUNT_LFS;
390 	sbp->f_bsize = fs->lfs_bsize;
391 	sbp->f_iosize = fs->lfs_bsize;
392 	sbp->f_blocks = dbtofsb(fs,fs->lfs_dsize);
393 	sbp->f_bfree = dbtofsb(fs, fs->lfs_bfree);
394 	sbp->f_bavail = (fs->lfs_dsize * (100 - fs->lfs_minfree) / 100) -
395 		(fs->lfs_dsize - fs->lfs_bfree);
396 	sbp->f_bavail = dbtofsb(fs, sbp->f_bavail);
397 	sbp->f_files = fs->lfs_nfiles;
398 	sbp->f_ffree = sbp->f_bfree * INOPB(fs);
399 	if (sbp != &mp->mnt_stat) {
400 		bcopy((caddr_t)mp->mnt_stat.f_mntonname,
401 			(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
402 		bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
403 			(caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
404 	}
405 	return (0);
406 }
407 
408 /*
409  * Go through the disk queues to initiate sandbagged IO;
410  * go through the inodes to write those that have been modified;
411  * initiate the writing of the super block if it has been modified.
412  *
413  * Note: we are always called with the filesystem marked `MPBUSY'.
414  */
415 lfs_sync(mp, waitfor, cred, p)
416 	struct mount *mp;
417 	int waitfor;
418 	struct ucred *cred;
419 	struct proc *p;
420 {
421 	extern int syncprt;
422 	int error;
423 
424 	/* All syncs must be checkpoints until roll-forward is implemented. */
425 	error = lfs_segwrite(mp, 1);
426 #ifdef QUOTA
427 	qsync(mp);
428 #endif
429 	return (error);
430 }
431 
432 /*
433  * Look up an LFS dinode number to find its incore vnode.  If not already
434  * in core, read it in from the specified device.  Return the inode locked.
435  * Detection and handling of mount points must be done by the calling routine.
436  */
437 int
438 lfs_vget(mp, ino, vpp)
439 	struct mount *mp;
440 	ino_t ino;
441 	struct vnode **vpp;
442 {
443 	register struct lfs *fs;
444 	register struct inode *ip;
445 	struct buf *bp;
446 	struct ifile *ifp;
447 	struct vnode *vp;
448 	struct ufsmount *ump;
449 	daddr_t daddr;
450 	dev_t dev;
451 	int error;
452 
453 	ump = VFSTOUFS(mp);
454 	dev = ump->um_dev;
455 	if ((*vpp = ufs_ihashget(dev, ino)) != NULL)
456 		return (0);
457 
458 	/* Translate the inode number to a disk address. */
459 	fs = ump->um_lfs;
460 	if (ino == LFS_IFILE_INUM)
461 		daddr = fs->lfs_idaddr;
462 	else {
463 		LFS_IENTRY(ifp, fs, ino, bp);
464 		daddr = ifp->if_daddr;
465 		brelse(bp);
466 		if (daddr == LFS_UNUSED_DADDR)
467 			return (ENOENT);
468 	}
469 
470 	/* Allocate new vnode/inode. */
471 	if (error = lfs_vcreate(mp, ino, &vp)) {
472 		*vpp = NULL;
473 		return (error);
474 	}
475 
476 	/*
477 	 * Put it onto its hash chain and lock it so that other requests for
478 	 * this inode will block if they arrive while we are sleeping waiting
479 	 * for old data structures to be purged or for the contents of the
480 	 * disk portion of this inode to be read.
481 	 */
482 	ip = VTOI(vp);
483 	ufs_ihashins(ip);
484 
485 	/*
486 	 * XXX
487 	 * This may not need to be here, logically it should go down with
488 	 * the i_devvp initialization.
489 	 * Ask Kirk.
490 	 */
491 	ip->i_lfs = ump->um_lfs;
492 
493 	/* Read in the disk contents for the inode, copy into the inode. */
494 	if (error =
495 	    bread(ump->um_devvp, daddr, (int)fs->lfs_bsize, NOCRED, &bp)) {
496 		/*
497 		 * The inode does not contain anything useful, so it
498 		 * would be misleading to leave it on its hash chain.
499 		 * Iput() will return it to the free list.
500 		 */
501 		ufs_ihashrem(ip);
502 
503 		/* Unlock and discard unneeded inode. */
504 		ufs_iput(ip);
505 		brelse(bp);
506 		*vpp = NULL;
507 		return (error);
508 	}
509 	ip->i_din = *lfs_ifind(fs, ino, bp->b_un.b_dino);
510 	brelse(bp);
511 
512 	/*
513 	 * Initialize the vnode from the inode, check for aliases.  In all
514 	 * cases re-init ip, the underlying vnode/inode may have changed.
515 	 */
516 	if (error = ufs_vinit(mp, lfs_specop_p, LFS_FIFOOPS, &vp)) {
517 		ufs_iput(ip);
518 		*vpp = NULL;
519 		return (error);
520 	}
521 	/*
522 	 * Finish inode initialization now that aliasing has been resolved.
523 	 */
524 	ip->i_devvp = ump->um_devvp;
525 	VREF(ip->i_devvp);
526 	*vpp = vp;
527 	return (0);
528 }
529 
530 /*
531  * File handle to vnode
532  *
533  * Have to be really careful about stale file handles:
534  * - check that the inode number is valid
535  * - call lfs_vget() to get the locked inode
536  * - check for an unallocated inode (i_mode == 0)
537  * - check that the given client host has export rights and return
538  *   those rights via. exflagsp and credanonp
539  *
540  * XXX
541  * use ifile to see if inode is allocated instead of reading off disk
542  * what is the relationship between my generational number and the NFS
543  * generational number.
544  */
545 int
546 lfs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp)
547 	register struct mount *mp;
548 	struct fid *fhp;
549 	struct mbuf *nam;
550 	struct vnode **vpp;
551 	int *exflagsp;
552 	struct ucred **credanonp;
553 {
554 	register struct ufid *ufhp;
555 
556 	ufhp = (struct ufid *)fhp;
557 	if (ufhp->ufid_ino < ROOTINO)
558 		return (ESTALE);
559 	return (ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp));
560 }
561 
562 /*
563  * Vnode pointer to File handle
564  */
565 /* ARGSUSED */
566 lfs_vptofh(vp, fhp)
567 	struct vnode *vp;
568 	struct fid *fhp;
569 {
570 	register struct inode *ip;
571 	register struct ufid *ufhp;
572 
573 	ip = VTOI(vp);
574 	ufhp = (struct ufid *)fhp;
575 	ufhp->ufid_len = sizeof(struct ufid);
576 	ufhp->ufid_ino = ip->i_number;
577 	ufhp->ufid_gen = ip->i_gen;
578 	return (0);
579 }
580