xref: /original-bsd/sys/nfs/nfs_bio.c (revision 63b2a11c)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Rick Macklem at The University of Guelph.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)nfs_bio.c	7.31 (Berkeley) 07/12/92
11  */
12 
13 #include <sys/param.h>
14 #include <sys/systm.h>
15 #include <sys/resourcevar.h>
16 #include <sys/proc.h>
17 #include <sys/buf.h>
18 #include <sys/vnode.h>
19 #include <sys/trace.h>
20 #include <sys/mount.h>
21 #include <sys/kernel.h>
22 #include <machine/endian.h>
23 #include <vm/vm.h>
24 #include <nfs/nfsnode.h>
25 #include <nfs/rpcv2.h>
26 #include <nfs/nfsv2.h>
27 #include <nfs/nfs.h>
28 #include <nfs/nfsmount.h>
29 #include <nfs/nqnfs.h>
30 
31 /* True and false, how exciting */
32 #define	TRUE	1
33 #define	FALSE	0
34 
35 /*
36  * Vnode op for read using bio
37  * Any similarity to readip() is purely coincidental
38  */
39 nfs_bioread(vp, uio, ioflag, cred)
40 	register struct vnode *vp;
41 	register struct uio *uio;
42 	int ioflag;
43 	struct ucred *cred;
44 {
45 	register struct nfsnode *np = VTONFS(vp);
46 	register int biosize;
47 	struct buf *bp;
48 	struct vattr vattr;
49 	struct nfsmount *nmp;
50 	daddr_t lbn, bn, rablock[NFS_MAXRAHEAD];
51 	int rasize[NFS_MAXRAHEAD], nra, diff, error = 0;
52 	int n, on;
53 
54 #ifdef lint
55 	ioflag = ioflag;
56 #endif /* lint */
57 #ifdef DIAGNOSTIC
58 	if (uio->uio_rw != UIO_READ)
59 		panic("nfs_read mode");
60 #endif
61 	if (uio->uio_resid == 0)
62 		return (0);
63 	if (uio->uio_offset < 0 && vp->v_type != VDIR)
64 		return (EINVAL);
65 	nmp = VFSTONFS(vp->v_mount);
66 	biosize = nmp->nm_rsize;
67 	/*
68 	 * For nfs, cache consistency can only be maintained approximately.
69 	 * Although RFC1094 does not specify the criteria, the following is
70 	 * believed to be compatible with the reference port.
71 	 * For nqnfs, full cache consistency is maintained within the loop.
72 	 * For nfs:
73 	 * If the file's modify time on the server has changed since the
74 	 * last read rpc or you have written to the file,
75 	 * you may have lost data cache consistency with the
76 	 * server, so flush all of the file's data out of the cache.
77 	 * Then force a getattr rpc to ensure that you have up to date
78 	 * attributes.
79 	 * The mount flag NFSMNT_MYWRITE says "Assume that my writes are
80 	 * the ones changing the modify time.
81 	 * NB: This implies that cache data can be read when up to
82 	 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
83 	 * attributes this could be forced by setting n_attrstamp to 0 before
84 	 * the VOP_GETATTR() call.
85 	 */
86 	if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) {
87 		if (np->n_flag & NMODIFIED) {
88 			np->n_flag &= ~NMODIFIED;
89 			if ((nmp->nm_flag & NFSMNT_MYWRITE) == 0 ||
90 			     vp->v_type != VREG)
91 				vinvalbuf(vp, TRUE, cred, uio->uio_procp);
92 			np->n_attrstamp = 0;
93 			np->n_direofoffset = 0;
94 			if (error = VOP_GETATTR(vp, &vattr, cred, uio->uio_procp))
95 				return (error);
96 			np->n_mtime = vattr.va_mtime.ts_sec;
97 		} else {
98 			if (error = VOP_GETATTR(vp, &vattr, cred, uio->uio_procp))
99 				return (error);
100 			if (np->n_mtime != vattr.va_mtime.ts_sec) {
101 				np->n_direofoffset = 0;
102 				vinvalbuf(vp, TRUE, cred, uio->uio_procp);
103 				np->n_mtime = vattr.va_mtime.ts_sec;
104 			}
105 		}
106 	}
107 	do {
108 
109 	    /*
110 	     * Get a valid lease. If cached data is stale, flush it.
111 	     */
112 	    if ((nmp->nm_flag & NFSMNT_NQNFS) &&
113 		NQNFS_CKINVALID(vp, np, NQL_READ)) {
114 		do {
115 			error = nqnfs_getlease(vp, NQL_READ, cred, uio->uio_procp);
116 		} while (error == NQNFS_EXPIRED);
117 		if (error)
118 			return (error);
119 		if (np->n_lrev != np->n_brev ||
120 		    ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
121 			if (vp->v_type == VDIR) {
122 				np->n_direofoffset = 0;
123 				cache_purge(vp);
124 			}
125 			np->n_flag &= ~NMODIFIED;
126 			vinvalbuf(vp, TRUE, cred, uio->uio_procp);
127 			np->n_brev = np->n_lrev;
128 		}
129 	    }
130 	    if (np->n_flag & NQNFSNONCACHE) {
131 		switch (vp->v_type) {
132 		case VREG:
133 			error = nfs_readrpc(vp, uio, cred);
134 			break;
135 		case VLNK:
136 			error = nfs_readlinkrpc(vp, uio, cred);
137 			break;
138 		case VDIR:
139 			error = nfs_readdirrpc(vp, uio, cred);
140 			break;
141 		};
142 		return (error);
143 	    }
144 	    switch (vp->v_type) {
145 	    case VREG:
146 		nfsstats.biocache_reads++;
147 		lbn = uio->uio_offset / biosize;
148 		on = uio->uio_offset & (biosize-1);
149 		n = min((unsigned)(biosize - on), uio->uio_resid);
150 		diff = np->n_size - uio->uio_offset;
151 		if (diff <= 0)
152 			return (error);
153 		if (diff < n)
154 			n = diff;
155 		bn = lbn*(biosize/DEV_BSIZE);
156 		for (nra = 0; nra < nmp->nm_readahead &&
157 			(lbn + 1 + nra) * biosize < np->n_size; nra++) {
158 			rablock[nra] = (lbn + 1 + nra) * (biosize / DEV_BSIZE);
159 			rasize[nra] = biosize;
160 		}
161 again:
162 		if (nra > 0 && lbn >= vp->v_lastr)
163 			error = breadn(vp, bn, biosize, rablock, rasize, nra,
164 				cred, &bp);
165 		else
166 			error = bread(vp, bn, biosize, cred, &bp);
167 		if (bp->b_validend > 0) {
168 			if (on < bp->b_validoff || (on+n) > bp->b_validend) {
169 				bp->b_flags |= B_INVAL;
170 				if (bp->b_dirtyend > 0) {
171 					if ((bp->b_flags & B_DELWRI) == 0)
172 						panic("nfsbioread");
173 					(void) bwrite(bp);
174 				} else
175 					brelse(bp);
176 				goto again;
177 			}
178 		} else {
179 			bp->b_validoff = 0;
180 			bp->b_validend = biosize - bp->b_resid;
181 		}
182 		vp->v_lastr = lbn;
183 		if (bp->b_resid) {
184 		   diff = (on >= (biosize-bp->b_resid)) ? 0 :
185 			(biosize-bp->b_resid-on);
186 		   n = min(n, diff);
187 		}
188 		break;
189 	    case VLNK:
190 		nfsstats.biocache_readlinks++;
191 		on = 0;
192 		error = bread(vp, (daddr_t)0, NFS_MAXPATHLEN, cred, &bp);
193 		n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
194 		break;
195 	    case VDIR:
196 		nfsstats.biocache_readdirs++;
197 		on = 0;
198 		error = bread(vp, uio->uio_offset, NFS_DIRBLKSIZ, cred, &bp);
199 		n = min(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid);
200 		break;
201 	    };
202 	    if (error) {
203 		brelse(bp);
204 		return (error);
205 	    }
206 
207 	    /*
208 	     * For nqnfs:
209 	     * Must check for valid lease, since it may have expired while in
210 	     * bread(). If expired, get a lease.
211 	     * If data is stale, flush and try again.
212 	     * nb: If a read rpc is done by bread() or breada() and there is
213 	     *     no valid lease, a get_lease request will be piggy backed.
214 	     */
215 	    if (nmp->nm_flag & NFSMNT_NQNFS) {
216 		if (NQNFS_CKINVALID(vp, np, NQL_READ)) {
217 			do {
218 				error = nqnfs_getlease(vp, NQL_READ, cred, uio->uio_procp);
219 			} while (error == NQNFS_EXPIRED);
220 			if (error) {
221 				brelse(bp);
222 				return (error);
223 			}
224 			if ((np->n_flag & NQNFSNONCACHE) ||
225 			    np->n_lrev != np->n_brev ||
226 			    ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
227 				if (vp->v_type == VDIR) {
228 					np->n_direofoffset = 0;
229 					cache_purge(vp);
230 				}
231 				brelse(bp);
232 				np->n_flag &= ~NMODIFIED;
233 				vinvalbuf(vp, TRUE, cred, uio->uio_procp);
234 				np->n_brev = np->n_lrev;
235 				continue;
236 			}
237 		} else if ((np->n_flag & NQNFSNONCACHE) ||
238 		    ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
239 			np->n_direofoffset = 0;
240 			brelse(bp);
241 			np->n_flag &= ~NMODIFIED;
242 			vinvalbuf(vp, TRUE, cred, uio->uio_procp);
243 			np->n_brev = np->n_lrev;
244 			continue;
245 		}
246 	    }
247 	    if (n > 0)
248 		error = uiomove(bp->b_un.b_addr + on, (int)n, uio);
249 	    switch (vp->v_type) {
250 	    case VREG:
251 		if (n+on == biosize || uio->uio_offset == np->n_size)
252 			bp->b_flags |= B_AGE;
253 		break;
254 	    case VLNK:
255 		n = 0;
256 		break;
257 	    case VDIR:
258 		uio->uio_offset = bp->b_blkno;
259 		break;
260 	    };
261 	    brelse(bp);
262 	} while (error == 0 && uio->uio_resid > 0 && n != 0);
263 	return (error);
264 }
265 
266 /*
267  * Vnode op for write using bio
268  */
269 nfs_write(ap)
270 	struct vop_write_args /* {
271 		struct vnode *a_vp;
272 		struct uio *a_uio;
273 		int  a_ioflag;
274 		struct ucred *a_cred;
275 	} */ *ap;
276 {
277 	register int biosize;
278 	register struct uio *uio = ap->a_uio;
279 	struct proc *p = uio->uio_procp;
280 	register struct vnode *vp = ap->a_vp;
281 	struct nfsnode *np = VTONFS(vp);
282 	register struct ucred *cred = ap->a_cred;
283 	int ioflag = ap->a_ioflag;
284 	struct buf *bp;
285 	struct vattr vattr;
286 	struct nfsmount *nmp;
287 	daddr_t lbn, bn;
288 	int n, on, error = 0;
289 
290 #ifdef DIAGNOSTIC
291 	if (uio->uio_rw != UIO_WRITE)
292 		panic("nfs_write mode");
293 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
294 		panic("nfs_write proc");
295 #endif
296 	if (vp->v_type != VREG)
297 		return (EIO);
298 	if (np->n_flag & NWRITEERR) {
299 		np->n_flag &= ~NWRITEERR;
300 		return (np->n_error);
301 	}
302 	if (ioflag & (IO_APPEND | IO_SYNC)) {
303 		if (np->n_flag & NMODIFIED) {
304 			np->n_flag &= ~NMODIFIED;
305 			vinvalbuf(vp, TRUE, cred, p);
306 		}
307 		if (ioflag & IO_APPEND) {
308 			np->n_attrstamp = 0;
309 			if (error = VOP_GETATTR(vp, &vattr, cred, p))
310 				return (error);
311 			uio->uio_offset = np->n_size;
312 		}
313 	}
314 	nmp = VFSTONFS(vp->v_mount);
315 	if (uio->uio_offset < 0)
316 		return (EINVAL);
317 	if (uio->uio_resid == 0)
318 		return (0);
319 	/*
320 	 * Maybe this should be above the vnode op call, but so long as
321 	 * file servers have no limits, i don't think it matters
322 	 */
323 	if (p && uio->uio_offset + uio->uio_resid >
324 	      p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
325 		psignal(p, SIGXFSZ);
326 		return (EFBIG);
327 	}
328 	/*
329 	 * I use nm_rsize, not nm_wsize so that all buffer cache blocks
330 	 * will be the same size within a filesystem. nfs_writerpc will
331 	 * still use nm_wsize when sizing the rpc's.
332 	 */
333 	biosize = nmp->nm_rsize;
334 	np->n_flag |= NMODIFIED;
335 	do {
336 
337 		/*
338 		 * Check for a valid write lease.
339 		 * If non-cachable, just do the rpc
340 		 */
341 		if ((nmp->nm_flag & NFSMNT_NQNFS) &&
342 		    NQNFS_CKINVALID(vp, np, NQL_WRITE)) {
343 			do {
344 				error = nqnfs_getlease(vp, NQL_WRITE, cred, p);
345 			} while (error == NQNFS_EXPIRED);
346 			if (error)
347 				return (error);
348 			if (np->n_lrev != np->n_brev ||
349 			    (np->n_flag & NQNFSNONCACHE)) {
350 				vinvalbuf(vp, TRUE, cred, p);
351 				np->n_brev = np->n_lrev;
352 			}
353 		}
354 		if (np->n_flag & NQNFSNONCACHE)
355 			return (nfs_writerpc(vp, uio, cred));
356 		nfsstats.biocache_writes++;
357 		lbn = uio->uio_offset / biosize;
358 		on = uio->uio_offset & (biosize-1);
359 		n = min((unsigned)(biosize - on), uio->uio_resid);
360 		if (uio->uio_offset + n > np->n_size) {
361 			np->n_size = uio->uio_offset + n;
362 			vnode_pager_setsize(vp, (u_long)np->n_size);
363 		}
364 		bn = lbn * (biosize / DEV_BSIZE);
365 again:
366 		bp = getblk(vp, bn, biosize);
367 		if (bp->b_wcred == NOCRED) {
368 			crhold(cred);
369 			bp->b_wcred = cred;
370 		}
371 
372 		/*
373 		 * If the new write will leave a contiguous dirty
374 		 * area, just update the b_dirtyoff and b_dirtyend,
375 		 * otherwise force a write rpc of the old dirty area.
376 		 */
377 		if (bp->b_dirtyend > 0 &&
378 		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
379 			bp->b_proc = p;
380 			if (error = bwrite(bp))
381 				return (error);
382 			goto again;
383 		}
384 
385 		/*
386 		 * Check for valid write lease and get one as required.
387 		 * In case getblk() and/or bwrite() delayed us.
388 		 */
389 		if ((nmp->nm_flag & NFSMNT_NQNFS) &&
390 		    NQNFS_CKINVALID(vp, np, NQL_WRITE)) {
391 			do {
392 				error = nqnfs_getlease(vp, NQL_WRITE, cred, p);
393 			} while (error == NQNFS_EXPIRED);
394 			if (error) {
395 				brelse(bp);
396 				return (error);
397 			}
398 			if (np->n_lrev != np->n_brev ||
399 			    (np->n_flag & NQNFSNONCACHE)) {
400 				vinvalbuf(vp, TRUE, cred, p);
401 				np->n_brev = np->n_lrev;
402 			}
403 		}
404 		if (error = uiomove(bp->b_un.b_addr + on, n, uio)) {
405 			brelse(bp);
406 			return (error);
407 		}
408 		if (bp->b_dirtyend > 0) {
409 			bp->b_dirtyoff = min(on, bp->b_dirtyoff);
410 			bp->b_dirtyend = max((on+n), bp->b_dirtyend);
411 		} else {
412 			bp->b_dirtyoff = on;
413 			bp->b_dirtyend = on+n;
414 		}
415 		if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||
416 		    bp->b_validoff > bp->b_dirtyend) {
417 			bp->b_validoff = bp->b_dirtyoff;
418 			bp->b_validend = bp->b_dirtyend;
419 		} else {
420 			bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
421 			bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
422 		}
423 
424 		/*
425 		 * If the lease is non-cachable or IO_SYNC do bwrite().
426 		 */
427 		if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
428 			bp->b_proc = p;
429 			bwrite(bp);
430 		} else if ((n+on) == biosize &&
431 			 (nmp->nm_flag & NFSMNT_NQNFS) == 0) {
432 			bp->b_flags |= B_AGE;
433 			bp->b_proc = (struct proc *)0;
434 			bawrite(bp);
435 		} else {
436 			bp->b_proc = (struct proc *)0;
437 			bdwrite(bp);
438 		}
439 	} while (error == 0 && uio->uio_resid > 0 && n != 0);
440 	return (error);
441 }
442