xref: /original-bsd/sys/ufs/lfs/lfs_bio.c (revision daedb501)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)lfs_bio.c	7.16 (Berkeley) 08/26/92
8  */
9 
10 #include <sys/param.h>
11 #include <sys/proc.h>
12 #include <sys/buf.h>
13 #include <sys/vnode.h>
14 #include <sys/resourcevar.h>
15 #include <sys/mount.h>
16 
17 #include <ufs/ufs/quota.h>
18 #include <ufs/ufs/inode.h>
19 #include <ufs/ufs/ufsmount.h>
20 
21 #include <ufs/lfs/lfs.h>
22 #include <ufs/lfs/lfs_extern.h>
23 
24 /*
25  * LFS block write function.
26  *
27  * XXX
28  * No write cost accounting is done.
29  * This is almost certainly wrong for synchronous operations and NFS.
30  */
31 int	lfs_allclean_wakeup;		/* Cleaner wakeup address. */
32 int	locked_queue_count;		/* XXX Count of locked-down buffers. */
33 int	lfs_writing;			/* Set if already kicked off a writer
34 					   because of buffer space */
35 #define WRITE_THRESHHOLD	((nbuf >> 2) - 10)
36 #define WAIT_THRESHHOLD		((nbuf >> 1) - 10)
37 
38 int
39 lfs_bwrite(ap)
40 	struct vop_bwrite_args /* {
41 		struct buf *a_bp;
42 	} */ *ap;
43 {
44 	register struct buf *bp = ap->a_bp;
45 	struct lfs *fs;
46 	struct inode *ip;
47 	int s;
48 
49 	/*
50 	 * Set the delayed write flag and use reassignbuf to move the buffer
51 	 * from the clean list to the dirty one.
52 	 *
53 	 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
54 	 * the buffer onto the LOCKED free list.  This is necessary, otherwise
55 	 * getnewbuf() would try to reclaim the buffers using bawrite, which
56 	 * isn't going to work.
57 	 */
58 	if (!(bp->b_flags & B_LOCKED)) {
59 		fs = VFSTOUFS(bp->b_vp->v_mount)->um_lfs;
60 		if (!LFS_FITS(fs, fsbtodb(fs, 1)) && !IS_IFILE(bp)) {
61 			bp->b_flags |= B_INVAL;
62 			brelse(bp);
63 			wakeup(&lfs_allclean_wakeup);
64 			return (ENOSPC);
65 		}
66 		ip = VTOI((bp)->b_vp);
67 		if (!(ip->i_flag & IMOD))
68 			++fs->lfs_uinodes;
69 		ip->i_flag |= IMOD | ICHG | IUPD;			\
70 		fs->lfs_avail -= fsbtodb(fs, 1);
71 		++locked_queue_count;
72 		bp->b_flags |= B_DELWRI | B_LOCKED;
73 		bp->b_flags &= ~(B_READ | B_ERROR);
74 		s = splbio();
75 		reassignbuf(bp, bp->b_vp);
76 		splx(s);
77 	}
78 	brelse(bp);
79 	return (0);
80 }
81 
82 /*
83  * XXX
84  * This routine flushes buffers out of the B_LOCKED queue when LFS has too
85  * many locked down.  Eventually the pageout daemon will simply call LFS
86  * when pages need to be reclaimed.  Note, we have one static count of locked
87  * buffers, so we can't have more than a single file system.  To make this
88  * work for multiple file systems, put the count into the mount structure.
89  */
90 void
91 lfs_flush()
92 {
93 	register struct mount *mp;
94 
95 	if (lfs_writing)
96 		return;
97 	lfs_writing = 1;
98 	mp = rootfs;
99 	do {
100 		/* The lock check below is to avoid races with unmount. */
101 		if (mp->mnt_stat.f_type == MOUNT_LFS &&
102 		    (mp->mnt_flag & (MNT_MLOCK|MNT_RDONLY|MNT_UNMOUNT)) == 0 &&
103 		    !((((struct ufsmount *)mp->mnt_data))->ufsmount_u.lfs)->lfs_dirops ) {
104 			/*
105 			 * We set the queue to 0 here because we are about to
106 			 * write all the dirty buffers we have.  If more come
107 			 * in while we're writing the segment, they may not
108 			 * get written, so we want the count to reflect these
109 			 * new writes after the segwrite completes.
110 			 */
111 			lfs_segwrite(mp, 0);
112 		}
113 		mp = mp->mnt_next;
114 	} while (mp != rootfs);
115 	lfs_writing = 0;
116 }
117 
118 int
119 lfs_check(vp, blkno)
120 	struct vnode *vp;
121 	daddr_t blkno;
122 {
123 	extern int lfs_allclean_wakeup;
124 	int error;
125 
126 	if (incore(vp, blkno))
127 		return (0);
128 	if (locked_queue_count > WRITE_THRESHHOLD)
129 		lfs_flush();
130 	if (locked_queue_count > WAIT_THRESHHOLD)
131 		error = tsleep(&lfs_allclean_wakeup, PCATCH | PUSER,
132 		    "buffers", NULL);
133 	return (error);
134 }
135