xref: /original-bsd/sys/ufs/lfs/lfs_bio.c (revision 753853ba)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)lfs_bio.c	7.6 (Berkeley) 02/04/92
8  */
9 
10 #include <sys/param.h>
11 #include <sys/proc.h>
12 #include <sys/buf.h>
13 #include <sys/vnode.h>
14 #include <sys/resourcevar.h>
15 #include <sys/mount.h>
16 
17 #include <ufs/ufs/quota.h>
18 #include <ufs/ufs/inode.h>
19 #include <ufs/ufs/ufsmount.h>
20 
21 #include <ufs/lfs/lfs.h>
22 #include <ufs/lfs/lfs_extern.h>
23 
24 /*
25  * LFS block write function.
26  *
27  * XXX
28  * No write cost accounting is done.
29  * This is almost certainly wrong for synchronous operations and NFS.
30  */
31 int	locked_queue_count;		/* XXX Count of locked-down buffers. */
32 
33 int
34 lfs_bwrite(bp)
35 	register BUF *bp;
36 {
37 	int s;
38 #ifdef VERBOSE
39 printf("lfs_bwrite\n");
40 #endif
41 	/*
42 	 * Set the delayed write flag and use reassignbuf to move the buffer
43 	 * from the clean list to the dirty one.
44 	 *
45 	 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
46 	 * the buffer onto the LOCKED free list.  This is necessary, otherwise
47 	 * getnewbuf() would try to reclaim the buffers using bawrite, which
48 	 * isn't going to work.
49 	 */
50 	if (!(bp->b_flags & B_LOCKED))
51 		++locked_queue_count;
52 	bp->b_flags |= B_DELWRI | B_LOCKED;
53 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
54 	s = splbio();
55 	reassignbuf(bp, bp->b_vp);
56 	splx(s);
57 	brelse(bp);
58 	return (0);
59 }
60 
61 /*
62  * XXX
63  * This routine flushes buffers out of the B_LOCKED queue when LFS has too
64  * many locked down.  Eventually the pageout daemon will simply call LFS
65  * when pages need to be reclaimed.  Note, we have one static count of locked
66  * buffers, so we can't have more than a single file system.  To make this
67  * work for multiple file systems, put the count into the mount structure.
68  */
69 void
70 lfs_flush()
71 {
72 	register struct mount *mp;
73 	struct mount *omp;
74 
75 	/* 1M in a 4K file system. */
76 	if (locked_queue_count < 256)
77 		return;
78 	mp = rootfs;
79 	do {
80 		/*
81 		 * The lock check below is to avoid races with mount
82 		 * and unmount.
83 		 */
84 		if (mp->mnt_stat.f_type == MOUNT_LFS &&
85 		    (mp->mnt_flag & (MNT_MLOCK|MNT_RDONLY|MNT_MPBUSY)) == 0 &&
86 		    !vfs_busy(mp)) {
87 			/*
88 			 * We set the queue to 0 here because we are about to
89 			 * write all the dirty buffers we have.  If more come
90 			 * in while we're writing the segment, they may not
91 			 * get written, so we want the count to reflect these
92 			 * new writes after the segwrite completes.
93 			 */
94 			locked_queue_count = 0;
95 			lfs_segwrite(mp, 0);
96 			omp = mp;
97 			mp = mp->mnt_next;
98 			vfs_unbusy(omp);
99 		} else
100 			mp = mp->mnt_next;
101 	} while (mp != rootfs);
102 }
103