xref: /original-bsd/sys/ufs/lfs/lfs_vfsops.c (revision 0f558095)
1 /*
2  * Copyright (c) 1989, 1991 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)lfs_vfsops.c	7.61 (Berkeley) 10/09/91
8  */
9 
10 #ifdef LOGFS
11 #include "param.h"
12 #include "systm.h"
13 #include "namei.h"
14 #include "proc.h"
15 #include "kernel.h"
16 #include "vnode.h"
17 #include "specdev.h"
18 #include "mount.h"
19 #include "buf.h"
20 #include "file.h"
21 #include "disklabel.h"
22 #include "ioctl.h"
23 #include "errno.h"
24 #include "malloc.h"
25 
26 #include "../ufs/quota.h"
27 #include "../ufs/inode.h"
28 #include "../ufs/ufsmount.h"
29 #include "lfs.h"
30 #include "lfs_extern.h"
31 
32 static int	lfs_mountfs
33 		    __P((struct vnode *, struct mount *, struct proc *));
34 
35 static int 	lfs_umountdebug __P((struct mount *));
36 static int 	lfs_vinvalbuf __P((register struct vnode *));
37 
38 struct vfsops lfs_vfsops = {
39 	lfs_mount,
40 	ufs_start,
41 	lfs_unmount,
42 	lfs_root,
43 	ufs_quotactl,
44 	lfs_statfs,
45 	lfs_sync,
46 	lfs_fhtovp,
47 	ufs_vptofh,
48 	lfs_init
49 };
50 
51 /*
52  * Flag to allow forcible unmounting.
53  */
54 extern int doforce;						/* LFS */
55 
56 lfs_mountroot()
57 {
58 	/* LFS IMPLEMENT -- lfs_mountroot */
59 	panic("lfs_mountroot");
60 }
61 
62 /*
63  * VFS Operations.
64  *
65  * mount system call
66  */
67 lfs_mount(mp, path, data, ndp, p)
68 	register struct mount *mp;
69 	char *path;
70 	caddr_t data;
71 	struct nameidata *ndp;
72 	struct proc *p;
73 {
74 	struct vnode *devvp;
75 	struct ufs_args args;
76 	struct ufsmount *ump;
77 	register LFS *fs;					/* LFS */
78 	u_int size;
79 	int error;
80 
81 	if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args)))
82 		return (error);
83 	/*
84 	 * Process export requests.
85 	 */
86 	if ((args.exflags & MNT_EXPORTED) || (mp->mnt_flag & MNT_EXPORTED)) {
87 		if (args.exflags & MNT_EXPORTED)
88 			mp->mnt_flag |= MNT_EXPORTED;
89 		else
90 			mp->mnt_flag &= ~MNT_EXPORTED;
91 		if (args.exflags & MNT_EXRDONLY)
92 			mp->mnt_flag |= MNT_EXRDONLY;
93 		else
94 			mp->mnt_flag &= ~MNT_EXRDONLY;
95 		mp->mnt_exroot = args.exroot;
96 	}
97 	/*
98 	 * If updating, check whether changing from read-only to
99 	 * read/write; if there is no device name, that's all we do.
100 	 */
101 	if (mp->mnt_flag & MNT_UPDATE) {
102 		ump = VFSTOUFS(mp);
103 #ifdef NOTLFS							/* LFS */
104 		fs = ump->um_fs;
105 		if (fs->fs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0)
106 			fs->fs_ronly = 0;
107 #else
108 		fs = ump->um_lfs;
109 		if (fs->lfs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0)
110 			fs->lfs_ronly = 0;
111 #endif
112 		if (args.fspec == 0)
113 			return (0);
114 	}
115 	/*
116 	 * Not an update, or updating the name: look up the name
117 	 * and verify that it refers to a sensible block device.
118 	 */
119 	ndp->ni_nameiop = LOOKUP | FOLLOW;
120 	ndp->ni_segflg = UIO_USERSPACE;
121 	ndp->ni_dirp = args.fspec;
122 	if (error = namei(ndp, p))
123 		return (error);
124 	devvp = ndp->ni_vp;
125 	if (devvp->v_type != VBLK) {
126 		vrele(devvp);
127 		return (ENOTBLK);
128 	}
129 	if (major(devvp->v_rdev) >= nblkdev) {
130 		vrele(devvp);
131 		return (ENXIO);
132 	}
133 	if ((mp->mnt_flag & MNT_UPDATE) == 0)
134 		error = lfs_mountfs(devvp, mp, p);		/* LFS */
135 	else {
136 		if (devvp != ump->um_devvp)
137 			error = EINVAL;	/* needs translation */
138 		else
139 			vrele(devvp);
140 	}
141 	if (error) {
142 		vrele(devvp);
143 		return (error);
144 	}
145 	ump = VFSTOUFS(mp);
146 	fs = ump->um_lfs;					/* LFS */
147 #ifdef NOTLFS							/* LFS */
148 	(void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
149 	bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size);
150 	bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
151 	    MNAMELEN);
152 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
153 	    &size);
154 	bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
155 	(void) ufs_statfs(mp, &mp->mnt_stat, p);
156 #else
157 	(void)copyinstr(path, fs->lfs_fsmnt, sizeof(fs->lfs_fsmnt) - 1, &size);
158 	bzero(fs->lfs_fsmnt + size, sizeof(fs->lfs_fsmnt) - size);
159 	bcopy((caddr_t)fs->lfs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
160 	    MNAMELEN);
161 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
162 	    &size);
163 	bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
164 	(void) lfs_statfs(mp, &mp->mnt_stat, p);
165 #endif
166 	return (0);
167 }
168 
169 /*
170  * Common code for mount and mountroot
171  * LFS specific
172  */
173 static int
174 lfs_mountfs(devvp, mp, p)
175 	register struct vnode *devvp;
176 	struct mount *mp;
177 	struct proc *p;
178 {
179 	extern struct vnode *rootvp;
180 	register LFS *fs;
181 	register struct ufsmount *ump;
182 	struct inode *ip;
183 	struct vnode *vp;
184 	struct buf *bp;
185 	struct partinfo dpart;
186 	daddr_t seg_addr;
187 	dev_t dev;
188 	int error, i, ronly, size;
189 
190 	/*
191 	 * Disallow multiple mounts of the same device.
192 	 * Disallow mounting of a device that is currently in use
193 	 * (except for root, which might share swap device for miniroot).
194 	 * Flush out any old buffers remaining from a previous use.
195 	 */
196 	if (error = mountedon(devvp))
197 		return (error);
198 	if (vcount(devvp) > 1 && devvp != rootvp)
199 		return (EBUSY);
200 	vinvalbuf(devvp, 1);
201 
202 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
203 	if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p))
204 		return (error);
205 
206 	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
207 		size = DEV_BSIZE;
208 	else {
209 		size = dpart.disklab->d_secsize;
210 #ifdef NEVER_USED
211 		dpart.part->p_fstype = FS_LFS;
212 		dpart.part->p_fsize = fs->lfs_fsize;	/* frag size */
213 		dpart.part->p_frag = fs->lfs_frag;	/* frags per block */
214 		dpart.part->p_cpg = fs->lfs_segshift;	/* segment shift */
215 #endif
216 	}
217 
218 	/* Don't free random space on error. */
219 	bp = NULL;
220 	ump = NULL;
221 
222 	/* Read in the superblock. */
223 	if (error = bread(devvp, LFS_LABELPAD / size, LFS_SBPAD, NOCRED, &bp))
224 		goto out;
225 	fs = bp->b_un.b_lfs;
226 
227 	/* Check the basics. */
228 	if (fs->lfs_magic != LFS_MAGIC || fs->lfs_bsize > MAXBSIZE ||
229 	    fs->lfs_bsize < sizeof(LFS)) {
230 		error = EINVAL;		/* XXX needs translation */
231 		goto out;
232 	}
233 #ifdef DEBUG
234 	dump_super(fs);
235 #endif
236 
237 	/* Allocate the mount structure, copy the superblock into it. */
238 	ump = (struct ufsmount *)malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
239 	ump->um_lfs = malloc(sizeof(LFS), M_SUPERBLK, M_WAITOK);
240 	bcopy(bp->b_un.b_addr, ump->um_lfs, sizeof(LFS));
241 	if (sizeof(LFS) < LFS_SBPAD)			/* XXX why? */
242 		bp->b_flags |= B_INVAL;
243 	brelse(bp);
244 	bp = NULL;
245 
246 	/* Set up the I/O information */
247 	fs->lfs_iocount = 0;
248 	fs->lfs_seglist = NULL;
249 
250 	/* Set the file system readonly/modify bits. */
251 	fs = ump->um_lfs;
252 	fs->lfs_ronly = ronly;
253 	if (ronly == 0)
254 		fs->lfs_fmod = 1;
255 
256 	/* Initialize the mount structure. */
257 	dev = devvp->v_rdev;
258 	mp->mnt_data = (qaddr_t)ump;
259 	mp->mnt_stat.f_fsid.val[0] = (long)dev;
260 	mp->mnt_stat.f_fsid.val[1] = MOUNT_LFS;
261 	mp->mnt_flag |= MNT_LOCAL;
262 	ump->um_mountp = mp;
263 	ump->um_dev = dev;
264 	ump->um_devvp = devvp;
265 	for (i = 0; i < MAXQUOTAS; i++)
266 		ump->um_quotas[i] = NULLVP;
267 
268 	/* Read the ifile disk inode and store it in a vnode. */
269 	error = bread(devvp, fs->lfs_idaddr, fs->lfs_bsize, NOCRED, &bp);
270 	if (error)
271 		goto out;
272 	error = lfs_vcreate(mp, LFS_IFILE_INUM, &vp);
273 	if (error)
274 		goto out;
275 	ip = VTOI(vp);
276 
277 	/* The ifile inode is stored in the superblock. */
278 	fs->lfs_ivnode = vp;
279 
280 	/* Copy the on-disk inode into place. */
281 	ip->i_din = *lfs_ifind(fs, LFS_IFILE_INUM, bp->b_un.b_dino);
282 	brelse(bp);
283 
284 	/* Initialize the associated vnode */
285 	vp->v_type = IFTOVT(ip->i_mode);
286 
287 	/*
288 	 * Read in the segusage table.
289 	 *
290 	 * Since we always explicitly write the segusage table at a checkpoint,
291 	 * we're assuming that it is continguous on disk.
292 	 */
293 	seg_addr = ip->i_din.di_db[0];
294 	size = fs->lfs_segtabsz << fs->lfs_bshift;
295 	fs->lfs_segtab = malloc(size, M_SUPERBLK, M_WAITOK);
296 	error = bread(devvp, seg_addr, size, NOCRED, &bp);
297 	if (error) {
298 		free(fs->lfs_segtab, M_SUPERBLK);
299 		goto out;
300 	}
301 	bcopy((caddr_t)bp->b_un.b_addr, fs->lfs_segtab, size);
302 	brelse(bp);
303 	devvp->v_specflags |= SI_MOUNTEDON;
304 	VREF(ip->i_devvp);
305 	return (0);
306 out:
307 	if (bp)
308 		brelse(bp);
309 	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p);
310 	if (ump) {
311 		free((caddr_t)ump->um_lfs, M_SUPERBLK);
312 		free((caddr_t)ump, M_UFSMNT);
313 		mp->mnt_data = (qaddr_t)0;
314 	}
315 	return (error);
316 }
317 
318 /*
319  * unmount system call
320  */
321 lfs_unmount(mp, mntflags, p)
322 	struct mount *mp;
323 	int mntflags;
324 	struct proc *p;
325 {
326 	register struct ufsmount *ump;
327 	register LFS *fs;					/* LFS */
328 	int i, error, ronly, flags = 0;
329 	int ndirty;						/* LFS */
330 
331 printf("lfs_unmount\n");
332 	if (mntflags & MNT_FORCE) {
333 		if (!doforce || mp == rootfs)
334 			return (EINVAL);
335 		flags |= FORCECLOSE;
336 	}
337 	if (error = lfs_segwrite(mp, 1))
338 		return(error);
339 
340 ndirty = lfs_umountdebug(mp);
341 printf("lfs_umountdebug: returned %d dirty\n", ndirty);
342 return(0);
343 	if (mntinvalbuf(mp))
344 		return (EBUSY);
345 	ump = VFSTOUFS(mp);
346 #ifdef QUOTA
347 	if (mp->mnt_flag & MNT_QUOTA) {
348 		if (error = vflush(mp, NULLVP, SKIPSYSTEM|flags))
349 			return (error);
350 		for (i = 0; i < MAXQUOTAS; i++) {
351 			if (ump->um_quotas[i] == NULLVP)
352 				continue;
353 			quotaoff(p, mp, i);
354 		}
355 		/*
356 		 * Here we fall through to vflush again to ensure
357 		 * that we have gotten rid of all the system vnodes.
358 		 */
359 	}
360 #endif
361 	if (error = vflush(mp, NULLVP, flags))
362 		return (error);
363 #ifdef NOTLFS							/* LFS */
364 	fs = ump->um_fs;
365 	ronly = !fs->fs_ronly;
366 #else
367 	fs = ump->um_lfs;
368 	ronly = !fs->lfs_ronly;
369 #endif
370 	ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
371 	error = VOP_CLOSE(ump->um_devvp, ronly ? FREAD : FREAD|FWRITE,
372 		NOCRED, p);
373 	vrele(ump->um_devvp);
374 #ifdef NOTLFS							/* LFS */
375 	free((caddr_t)fs->fs_csp[0], M_SUPERBLK);
376 #else
377 	free(fs->lfs_segtab, M_SUPERBLK);
378 	iput(VTOI(fs->lfs_ivnode));
379 #endif
380 	free((caddr_t)fs, M_SUPERBLK);
381 	free((caddr_t)ump, M_UFSMNT);
382 	mp->mnt_data = (qaddr_t)0;
383 	mp->mnt_flag &= ~MNT_LOCAL;
384 	return (error);
385 }
386 
387 /*
388  * Return root of a filesystem
389  */
390 lfs_root(mp, vpp)
391 	struct mount *mp;
392 	struct vnode **vpp;
393 {
394 	register struct inode *ip;
395 	struct inode *nip;
396 	struct vnode tvp;
397 	int error;
398 
399 	tvp.v_mount = mp;
400 	ip = VTOI(&tvp);
401 	ip->i_vnode = &tvp;
402 	ip->i_dev = VFSTOUFS(mp)->um_dev;
403 	error = lfs_iget(ip, (ino_t)ROOTINO, &nip);		/* LFS */
404 	if (error)
405 		return (error);
406 	*vpp = ITOV(nip);
407 	return (0);
408 }
409 
410 /*
411  * Get file system statistics.
412  */
413 lfs_statfs(mp, sbp, p)
414 	struct mount *mp;
415 	register struct statfs *sbp;
416 	struct proc *p;
417 {
418 	register LFS *fs;
419 	register struct ufsmount *ump;
420 
421 	ump = VFSTOUFS(mp);
422 #ifdef NOTLFS							/* LFS */
423 	fs = ump->um_fs;
424 	if (fs->fs_magic != FS_MAGIC)
425 		panic("ufs_statfs");
426 	sbp->f_type = MOUNT_UFS;
427 	sbp->f_fsize = fs->fs_fsize;
428 	sbp->f_bsize = fs->fs_bsize;
429 	sbp->f_blocks = fs->fs_dsize;
430 	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
431 		fs->fs_cstotal.cs_nffree;
432 	sbp->f_bavail = (fs->fs_dsize * (100 - fs->fs_minfree) / 100) -
433 		(fs->fs_dsize - sbp->f_bfree);
434 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
435 	sbp->f_ffree = fs->fs_cstotal.cs_nifree;
436 #else
437 	fs = ump->um_lfs;
438 	if (fs->lfs_magic != LFS_MAGIC)
439 		panic("lfs_statfs: magic");
440 	sbp->f_type = MOUNT_LFS;
441 	sbp->f_fsize = fs->lfs_bsize;
442 	sbp->f_bsize = fs->lfs_bsize;
443 	sbp->f_blocks = fs->lfs_dsize;
444 	sbp->f_bfree = fs->lfs_bfree;
445 	sbp->f_bavail = (fs->lfs_dsize * (100 - fs->lfs_minfree) / 100) -
446 		(fs->lfs_dsize - sbp->f_bfree);
447 	sbp->f_files = fs->lfs_nfiles;
448 	sbp->f_ffree = fs->lfs_bfree * INOPB(fs);
449 #endif
450 	if (sbp != &mp->mnt_stat) {
451 		bcopy((caddr_t)mp->mnt_stat.f_mntonname,
452 			(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
453 		bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
454 			(caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
455 	}
456 	return (0);
457 }
458 
459 extern int	syncprt;					/* LFS */
460 
461 /*
462  * Go through the disk queues to initiate sandbagged IO;
463  * go through the inodes to write those that have been modified;
464  * initiate the writing of the super block if it has been modified.
465  *
466  * Note: we are always called with the filesystem marked `MPBUSY'.
467  */
468 lfs_sync(mp, waitfor)
469 	struct mount *mp;
470 	int waitfor;
471 {
472 	static int sync_lock, sync_want;
473 	int error;
474 
475 printf("lfs_sync\n");
476 
477 	/*
478 	 * Meta data blocks are only marked dirty, not busy, so LFS syncs
479 	 * must be single threaded.
480 	 */
481 	while (sync_lock) {
482 		sync_want = 1;
483 		if (error = tsleep(&sync_lock, PLOCK | PCATCH, "lfs sync", 0))
484 			return (error);
485 	}
486 	sync_lock = 1;
487 
488 	if (syncprt)
489 		bufstats();
490 
491 	/* All syncs must be checkpoints until roll-forward is implemented. */
492 	error = lfs_segwrite(mp, 1);
493 #ifdef QUOTA
494 	qsync(mp);
495 #endif
496 	sync_lock = 0;
497 	if (sync_want) {
498 		sync_want = 0;
499 		wakeup(&sync_lock);
500 	}
501 	return (error);
502 }
503 
504 /*
505  * File handle to vnode
506  *
507  * Have to be really careful about stale file handles:
508  * - check that the inode number is in range
509  * - call iget() to get the locked inode
510  * - check for an unallocated inode (i_mode == 0)
511  * - check that the generation number matches
512  */
513 lfs_fhtovp(mp, fhp, vpp)
514 	register struct mount *mp;
515 	struct fid *fhp;
516 	struct vnode **vpp;
517 {
518 	register struct ufid *ufhp;
519 	register LFS *fs;					/* LFS */
520 	register struct inode *ip;
521 	IFILE *ifp;
522 	struct buf *bp;
523 	struct inode *nip;
524 	struct vnode tvp;
525 	int error;
526 
527 	ufhp = (struct ufid *)fhp;
528 #ifdef NOTLFS							/* LFS */
529 	fs = VFSTOUFS(mp)->um_fs;
530 	if (ufhp->ufid_ino < ROOTINO ||
531 	    ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg) {
532 		*vpp = NULLVP;
533 		return (EINVAL);
534 	}
535 #else
536 	fs = VFSTOUFS(mp)->um_lfs;
537 	if (ufhp->ufid_ino < ROOTINO) {
538 		*vpp = NULLVP;
539 		return (EINVAL);
540 	}
541 #endif
542 	tvp.v_mount = mp;
543 	ip = VTOI(&tvp);
544 	ip->i_vnode = &tvp;
545 	ip->i_dev = VFSTOUFS(mp)->um_dev;
546 	if (error = lfs_iget(ip, ufhp->ufid_ino, &nip)) {	/* LFS */
547 		*vpp = NULLVP;
548 		return (error);
549 	}
550 	ip = nip;
551 	if (ip->i_mode == 0) {
552 		iput(ip);
553 		*vpp = NULLVP;
554 		return (EINVAL);
555 	}
556 	if (ip->i_gen != ufhp->ufid_gen) {
557 		iput(ip);
558 		*vpp = NULLVP;
559 		return (EINVAL);
560 	}
561 	*vpp = ITOV(ip);
562 	return (0);
563 }
564 
565 static int
566 lfs_umountdebug(mp)
567 	struct mount *mp;
568 {
569 	struct vnode *vp;
570 	int dirty;
571 
572 	dirty = 0;
573 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
574 		panic("umountdebug: not busy");
575 loop:
576 	for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) {
577 		if (vget(vp))
578 			goto loop;
579 		dirty += lfs_vinvalbuf(vp);
580 		vput(vp);
581 		if (vp->v_mount != mp)
582 			goto loop;
583 	}
584 	return (dirty);
585 }
586 static int
587 lfs_vinvalbuf(vp)
588 	register struct vnode *vp;
589 {
590 	register struct buf *bp;
591 	struct buf *nbp, *blist;
592 	int s, dirty = 0;
593 
594 	for (;;) {
595 		if (blist = vp->v_dirtyblkhd)
596 			/* void */;
597 		else if (blist = vp->v_cleanblkhd)
598 			/* void */;
599 		else
600 			break;
601 		for (bp = blist; bp; bp = nbp) {
602 printf("lfs_vinvalbuf: ino %d, lblkno %d, blkno %lx flags %xl\n",
603 VTOI(vp)->i_number, bp->b_lblkno, bp->b_blkno, bp->b_flags);
604 			nbp = bp->b_blockf;
605 			s = splbio();
606 			if (bp->b_flags & B_BUSY) {
607 printf("lfs_vinvalbuf: buffer busy, would normally sleep\n");
608 /*
609 				bp->b_flags |= B_WANTED;
610 				sleep((caddr_t)bp, PRIBIO + 1);
611 */
612 				splx(s);
613 				break;
614 			}
615 			bremfree(bp);
616 			bp->b_flags |= B_BUSY;
617 			splx(s);
618 			if (bp->b_flags & B_DELWRI) {
619 				dirty++;			/* XXX */
620 printf("lfs_vinvalbuf: buffer dirty (DELWRI). would normally write\n");
621 				break;
622 			}
623 			if (bp->b_vp != vp)
624 				reassignbuf(bp, bp->b_vp);
625 			else
626 				bp->b_flags |= B_INVAL;
627 			brelse(bp);
628 		}
629 	}
630 	if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
631 		panic("lfs_vinvalbuf: flush failed");
632 	return (dirty);
633 }
634 #endif /* LOGFS */
635