xref: /original-bsd/sys/nfs/nfs_bio.c (revision 4ba124f7)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Rick Macklem at The University of Guelph.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)nfs_bio.c	8.6 (Berkeley) 06/08/94
11  */
12 
13 #include <sys/param.h>
14 #include <sys/systm.h>
15 #include <sys/resourcevar.h>
16 #include <sys/proc.h>
17 #include <sys/buf.h>
18 #include <sys/vnode.h>
19 #include <sys/trace.h>
20 #include <sys/mount.h>
21 #include <sys/kernel.h>
22 
23 #include <vm/vm.h>
24 
25 #include <nfs/nfsnode.h>
26 #include <nfs/rpcv2.h>
27 #include <nfs/nfsv2.h>
28 #include <nfs/nfs.h>
29 #include <nfs/nfsmount.h>
30 #include <nfs/nqnfs.h>
31 
32 struct buf *incore(), *nfs_getcacheblk();
33 extern struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
34 extern int nfs_numasync;
35 
36 /*
37  * Vnode op for read using bio
38  * Any similarity to readip() is purely coincidental
39  */
40 nfs_bioread(vp, uio, ioflag, cred)
41 	register struct vnode *vp;
42 	register struct uio *uio;
43 	int ioflag;
44 	struct ucred *cred;
45 {
46 	register struct nfsnode *np = VTONFS(vp);
47 	register int biosize, diff;
48 	struct buf *bp, *rabp;
49 	struct vattr vattr;
50 	struct proc *p;
51 	struct nfsmount *nmp;
52 	daddr_t lbn, bn, rabn;
53 	caddr_t baddr;
54 	int got_buf, nra, error = 0, n, on, not_readin;
55 
56 #ifdef lint
57 	ioflag = ioflag;
58 #endif /* lint */
59 #ifdef DIAGNOSTIC
60 	if (uio->uio_rw != UIO_READ)
61 		panic("nfs_read mode");
62 #endif
63 	if (uio->uio_resid == 0)
64 		return (0);
65 	if (uio->uio_offset < 0 && vp->v_type != VDIR)
66 		return (EINVAL);
67 	nmp = VFSTONFS(vp->v_mount);
68 	biosize = nmp->nm_rsize;
69 	p = uio->uio_procp;
70 	/*
71 	 * For nfs, cache consistency can only be maintained approximately.
72 	 * Although RFC1094 does not specify the criteria, the following is
73 	 * believed to be compatible with the reference port.
74 	 * For nqnfs, full cache consistency is maintained within the loop.
75 	 * For nfs:
76 	 * If the file's modify time on the server has changed since the
77 	 * last read rpc or you have written to the file,
78 	 * you may have lost data cache consistency with the
79 	 * server, so flush all of the file's data out of the cache.
80 	 * Then force a getattr rpc to ensure that you have up to date
81 	 * attributes.
82 	 * The mount flag NFSMNT_MYWRITE says "Assume that my writes are
83 	 * the ones changing the modify time.
84 	 * NB: This implies that cache data can be read when up to
85 	 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
86 	 * attributes this could be forced by setting n_attrstamp to 0 before
87 	 * the VOP_GETATTR() call.
88 	 */
89 	if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) {
90 		if (np->n_flag & NMODIFIED) {
91 			if ((nmp->nm_flag & NFSMNT_MYWRITE) == 0 ||
92 			     vp->v_type != VREG) {
93 				if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))
94 					return (error);
95 			}
96 			np->n_attrstamp = 0;
97 			np->n_direofoffset = 0;
98 			if (error = VOP_GETATTR(vp, &vattr, cred, p))
99 				return (error);
100 			np->n_mtime = vattr.va_mtime.ts_sec;
101 		} else {
102 			if (error = VOP_GETATTR(vp, &vattr, cred, p))
103 				return (error);
104 			if (np->n_mtime != vattr.va_mtime.ts_sec) {
105 				np->n_direofoffset = 0;
106 				if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))
107 					return (error);
108 				np->n_mtime = vattr.va_mtime.ts_sec;
109 			}
110 		}
111 	}
112 	do {
113 
114 	    /*
115 	     * Get a valid lease. If cached data is stale, flush it.
116 	     */
117 	    if (nmp->nm_flag & NFSMNT_NQNFS) {
118 		if (NQNFS_CKINVALID(vp, np, NQL_READ)) {
119 		    do {
120 			error = nqnfs_getlease(vp, NQL_READ, cred, p);
121 		    } while (error == NQNFS_EXPIRED);
122 		    if (error)
123 			return (error);
124 		    if (np->n_lrev != np->n_brev ||
125 			(np->n_flag & NQNFSNONCACHE) ||
126 			((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
127 			if (vp->v_type == VDIR) {
128 			    np->n_direofoffset = 0;
129 			    cache_purge(vp);
130 			}
131 			if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))
132 			    return (error);
133 			np->n_brev = np->n_lrev;
134 		    }
135 		} else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) {
136 		    np->n_direofoffset = 0;
137 		    cache_purge(vp);
138 		    if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))
139 			return (error);
140 		}
141 	    }
142 	    if (np->n_flag & NQNFSNONCACHE) {
143 		switch (vp->v_type) {
144 		case VREG:
145 			error = nfs_readrpc(vp, uio, cred);
146 			break;
147 		case VLNK:
148 			error = nfs_readlinkrpc(vp, uio, cred);
149 			break;
150 		case VDIR:
151 			error = nfs_readdirrpc(vp, uio, cred);
152 			break;
153 		};
154 		return (error);
155 	    }
156 	    baddr = (caddr_t)0;
157 	    switch (vp->v_type) {
158 	    case VREG:
159 		nfsstats.biocache_reads++;
160 		lbn = uio->uio_offset / biosize;
161 		on = uio->uio_offset & (biosize-1);
162 		bn = lbn * (biosize / DEV_BSIZE);
163 		not_readin = 1;
164 
165 		/*
166 		 * Start the read ahead(s), as required.
167 		 */
168 		if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
169 		    lbn == vp->v_lastr + 1) {
170 		    for (nra = 0; nra < nmp->nm_readahead &&
171 			(lbn + 1 + nra) * biosize < np->n_size; nra++) {
172 			rabn = (lbn + 1 + nra) * (biosize / DEV_BSIZE);
173 			if (!incore(vp, rabn)) {
174 			    rabp = nfs_getcacheblk(vp, rabn, biosize, p);
175 			    if (!rabp)
176 				return (EINTR);
177 			    if ((rabp->b_flags & (B_DELWRI | B_DONE)) == 0) {
178 				rabp->b_flags |= (B_READ | B_ASYNC);
179 				if (nfs_asyncio(rabp, cred)) {
180 				    rabp->b_flags |= B_INVAL;
181 				    brelse(rabp);
182 				}
183 			    }
184 			}
185 		    }
186 		}
187 
188 		/*
189 		 * If the block is in the cache and has the required data
190 		 * in a valid region, just copy it out.
191 		 * Otherwise, get the block and write back/read in,
192 		 * as required.
193 		 */
194 		if ((bp = incore(vp, bn)) &&
195 		    (bp->b_flags & (B_BUSY | B_WRITEINPROG)) ==
196 		    (B_BUSY | B_WRITEINPROG))
197 			got_buf = 0;
198 		else {
199 again:
200 			bp = nfs_getcacheblk(vp, bn, biosize, p);
201 			if (!bp)
202 				return (EINTR);
203 			got_buf = 1;
204 			if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
205 				bp->b_flags |= B_READ;
206 				not_readin = 0;
207 				if (error = nfs_doio(bp, cred, p)) {
208 				    brelse(bp);
209 				    return (error);
210 				}
211 			}
212 		}
213 		n = min((unsigned)(biosize - on), uio->uio_resid);
214 		diff = np->n_size - uio->uio_offset;
215 		if (diff < n)
216 			n = diff;
217 		if (not_readin && n > 0) {
218 			if (on < bp->b_validoff || (on + n) > bp->b_validend) {
219 				if (!got_buf) {
220 				    bp = nfs_getcacheblk(vp, bn, biosize, p);
221 				    if (!bp)
222 					return (EINTR);
223 				    got_buf = 1;
224 				}
225 				bp->b_flags |= B_INVAL;
226 				if (bp->b_dirtyend > 0) {
227 				    if ((bp->b_flags & B_DELWRI) == 0)
228 					panic("nfsbioread");
229 				    if (VOP_BWRITE(bp) == EINTR)
230 					return (EINTR);
231 				} else
232 				    brelse(bp);
233 				goto again;
234 			}
235 		}
236 		vp->v_lastr = lbn;
237 		diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on);
238 		if (diff < n)
239 			n = diff;
240 		break;
241 	    case VLNK:
242 		nfsstats.biocache_readlinks++;
243 		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p);
244 		if (!bp)
245 			return (EINTR);
246 		if ((bp->b_flags & B_DONE) == 0) {
247 			bp->b_flags |= B_READ;
248 			if (error = nfs_doio(bp, cred, p)) {
249 				brelse(bp);
250 				return (error);
251 			}
252 		}
253 		n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
254 		got_buf = 1;
255 		on = 0;
256 		break;
257 	    case VDIR:
258 		nfsstats.biocache_readdirs++;
259 		bn = (daddr_t)uio->uio_offset;
260 		bp = nfs_getcacheblk(vp, bn, NFS_DIRBLKSIZ, p);
261 		if (!bp)
262 			return (EINTR);
263 		if ((bp->b_flags & B_DONE) == 0) {
264 			bp->b_flags |= B_READ;
265 			if (error = nfs_doio(bp, cred, p)) {
266 				brelse(bp);
267 				return (error);
268 			}
269 		}
270 
271 		/*
272 		 * If not eof and read aheads are enabled, start one.
273 		 * (You need the current block first, so that you have the
274 		 *  directory offset cookie of the next block.
275 		 */
276 		rabn = bp->b_blkno;
277 		if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
278 		    rabn != 0 && rabn != np->n_direofoffset &&
279 		    !incore(vp, rabn)) {
280 			rabp = nfs_getcacheblk(vp, rabn, NFS_DIRBLKSIZ, p);
281 			if (rabp) {
282 			    if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
283 				rabp->b_flags |= (B_READ | B_ASYNC);
284 				if (nfs_asyncio(rabp, cred)) {
285 				    rabp->b_flags |= B_INVAL;
286 				    brelse(rabp);
287 				}
288 			    }
289 			}
290 		}
291 		on = 0;
292 		n = min(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid);
293 		got_buf = 1;
294 		break;
295 	    };
296 
297 	    if (n > 0) {
298 		if (!baddr)
299 			baddr = bp->b_data;
300 		error = uiomove(baddr + on, (int)n, uio);
301 	    }
302 	    switch (vp->v_type) {
303 	    case VREG:
304 		if (n + on == biosize || uio->uio_offset == np->n_size)
305 			bp->b_flags |= B_AGE;
306 		break;
307 	    case VLNK:
308 		n = 0;
309 		break;
310 	    case VDIR:
311 		uio->uio_offset = bp->b_blkno;
312 		break;
313 	    };
314 	    if (got_buf)
315 		brelse(bp);
316 	} while (error == 0 && uio->uio_resid > 0 && n > 0);
317 	return (error);
318 }
319 
320 /*
321  * Vnode op for write using bio
322  */
323 nfs_write(ap)
324 	struct vop_write_args /* {
325 		struct vnode *a_vp;
326 		struct uio *a_uio;
327 		int  a_ioflag;
328 		struct ucred *a_cred;
329 	} */ *ap;
330 {
331 	register int biosize;
332 	register struct uio *uio = ap->a_uio;
333 	struct proc *p = uio->uio_procp;
334 	register struct vnode *vp = ap->a_vp;
335 	struct nfsnode *np = VTONFS(vp);
336 	register struct ucred *cred = ap->a_cred;
337 	int ioflag = ap->a_ioflag;
338 	struct buf *bp;
339 	struct vattr vattr;
340 	struct nfsmount *nmp;
341 	daddr_t lbn, bn;
342 	int n, on, error = 0;
343 
344 #ifdef DIAGNOSTIC
345 	if (uio->uio_rw != UIO_WRITE)
346 		panic("nfs_write mode");
347 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
348 		panic("nfs_write proc");
349 #endif
350 	if (vp->v_type != VREG)
351 		return (EIO);
352 	if (np->n_flag & NWRITEERR) {
353 		np->n_flag &= ~NWRITEERR;
354 		return (np->n_error);
355 	}
356 	if (ioflag & (IO_APPEND | IO_SYNC)) {
357 		if (np->n_flag & NMODIFIED) {
358 			np->n_attrstamp = 0;
359 			if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))
360 				return (error);
361 		}
362 		if (ioflag & IO_APPEND) {
363 			np->n_attrstamp = 0;
364 			if (error = VOP_GETATTR(vp, &vattr, cred, p))
365 				return (error);
366 			uio->uio_offset = np->n_size;
367 		}
368 	}
369 	nmp = VFSTONFS(vp->v_mount);
370 	if (uio->uio_offset < 0)
371 		return (EINVAL);
372 	if (uio->uio_resid == 0)
373 		return (0);
374 	/*
375 	 * Maybe this should be above the vnode op call, but so long as
376 	 * file servers have no limits, i don't think it matters
377 	 */
378 	if (p && uio->uio_offset + uio->uio_resid >
379 	      p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
380 		psignal(p, SIGXFSZ);
381 		return (EFBIG);
382 	}
383 	/*
384 	 * I use nm_rsize, not nm_wsize so that all buffer cache blocks
385 	 * will be the same size within a filesystem. nfs_writerpc will
386 	 * still use nm_wsize when sizing the rpc's.
387 	 */
388 	biosize = nmp->nm_rsize;
389 	do {
390 
391 		/*
392 		 * XXX make sure we aren't cached in the VM page cache
393 		 */
394 		(void)vnode_pager_uncache(vp);
395 
396 		/*
397 		 * Check for a valid write lease.
398 		 * If non-cachable, just do the rpc
399 		 */
400 		if ((nmp->nm_flag & NFSMNT_NQNFS) &&
401 		    NQNFS_CKINVALID(vp, np, NQL_WRITE)) {
402 			do {
403 				error = nqnfs_getlease(vp, NQL_WRITE, cred, p);
404 			} while (error == NQNFS_EXPIRED);
405 			if (error)
406 				return (error);
407 			if (np->n_lrev != np->n_brev ||
408 			    (np->n_flag & NQNFSNONCACHE)) {
409 				if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))
410 					return (error);
411 				np->n_brev = np->n_lrev;
412 			}
413 		}
414 		if (np->n_flag & NQNFSNONCACHE)
415 			return (nfs_writerpc(vp, uio, cred, ioflag));
416 		nfsstats.biocache_writes++;
417 		lbn = uio->uio_offset / biosize;
418 		on = uio->uio_offset & (biosize-1);
419 		n = min((unsigned)(biosize - on), uio->uio_resid);
420 		bn = lbn * (biosize / DEV_BSIZE);
421 again:
422 		bp = nfs_getcacheblk(vp, bn, biosize, p);
423 		if (!bp)
424 			return (EINTR);
425 		if (bp->b_wcred == NOCRED) {
426 			crhold(cred);
427 			bp->b_wcred = cred;
428 		}
429 		np->n_flag |= NMODIFIED;
430 		if (uio->uio_offset + n > np->n_size) {
431 			np->n_size = uio->uio_offset + n;
432 			vnode_pager_setsize(vp, (u_long)np->n_size);
433 		}
434 
435 		/*
436 		 * If the new write will leave a contiguous dirty
437 		 * area, just update the b_dirtyoff and b_dirtyend,
438 		 * otherwise force a write rpc of the old dirty area.
439 		 */
440 		if (bp->b_dirtyend > 0 &&
441 		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
442 			bp->b_proc = p;
443 			if (VOP_BWRITE(bp) == EINTR)
444 				return (EINTR);
445 			goto again;
446 		}
447 
448 		/*
449 		 * Check for valid write lease and get one as required.
450 		 * In case getblk() and/or bwrite() delayed us.
451 		 */
452 		if ((nmp->nm_flag & NFSMNT_NQNFS) &&
453 		    NQNFS_CKINVALID(vp, np, NQL_WRITE)) {
454 			do {
455 				error = nqnfs_getlease(vp, NQL_WRITE, cred, p);
456 			} while (error == NQNFS_EXPIRED);
457 			if (error) {
458 				brelse(bp);
459 				return (error);
460 			}
461 			if (np->n_lrev != np->n_brev ||
462 			    (np->n_flag & NQNFSNONCACHE)) {
463 				brelse(bp);
464 				if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))
465 					return (error);
466 				np->n_brev = np->n_lrev;
467 				goto again;
468 			}
469 		}
470 		if (error = uiomove((char *)bp->b_data + on, n, uio)) {
471 			bp->b_flags |= B_ERROR;
472 			brelse(bp);
473 			return (error);
474 		}
475 		if (bp->b_dirtyend > 0) {
476 			bp->b_dirtyoff = min(on, bp->b_dirtyoff);
477 			bp->b_dirtyend = max((on + n), bp->b_dirtyend);
478 		} else {
479 			bp->b_dirtyoff = on;
480 			bp->b_dirtyend = on + n;
481 		}
482 #ifndef notdef
483 		if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||
484 		    bp->b_validoff > bp->b_dirtyend) {
485 			bp->b_validoff = bp->b_dirtyoff;
486 			bp->b_validend = bp->b_dirtyend;
487 		} else {
488 			bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
489 			bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
490 		}
491 #else
492 		bp->b_validoff = bp->b_dirtyoff;
493 		bp->b_validend = bp->b_dirtyend;
494 #endif
495 		if (ioflag & IO_APPEND)
496 			bp->b_flags |= B_APPENDWRITE;
497 
498 		/*
499 		 * If the lease is non-cachable or IO_SYNC do bwrite().
500 		 */
501 		if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
502 			bp->b_proc = p;
503 			if (error = VOP_BWRITE(bp))
504 				return (error);
505 		} else if ((n + on) == biosize &&
506 			(nmp->nm_flag & NFSMNT_NQNFS) == 0) {
507 			bp->b_proc = (struct proc *)0;
508 			bawrite(bp);
509 		} else
510 			bdwrite(bp);
511 	} while (uio->uio_resid > 0 && n > 0);
512 	return (0);
513 }
514 
515 /*
516  * Get an nfs cache block.
517  * Allocate a new one if the block isn't currently in the cache
518  * and return the block marked busy. If the calling process is
519  * interrupted by a signal for an interruptible mount point, return
520  * NULL.
521  */
522 struct buf *
523 nfs_getcacheblk(vp, bn, size, p)
524 	struct vnode *vp;
525 	daddr_t bn;
526 	int size;
527 	struct proc *p;
528 {
529 	register struct buf *bp;
530 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
531 
532 	if (nmp->nm_flag & NFSMNT_INT) {
533 		bp = getblk(vp, bn, size, PCATCH, 0);
534 		while (bp == (struct buf *)0) {
535 			if (nfs_sigintr(nmp, (struct nfsreq *)0, p))
536 				return ((struct buf *)0);
537 			bp = getblk(vp, bn, size, 0, 2 * hz);
538 		}
539 	} else
540 		bp = getblk(vp, bn, size, 0, 0);
541 	return (bp);
542 }
543 
544 /*
545  * Flush and invalidate all dirty buffers. If another process is already
546  * doing the flush, just wait for completion.
547  */
548 nfs_vinvalbuf(vp, flags, cred, p, intrflg)
549 	struct vnode *vp;
550 	int flags;
551 	struct ucred *cred;
552 	struct proc *p;
553 	int intrflg;
554 {
555 	register struct nfsnode *np = VTONFS(vp);
556 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
557 	int error = 0, slpflag, slptimeo;
558 
559 	if ((nmp->nm_flag & NFSMNT_INT) == 0)
560 		intrflg = 0;
561 	if (intrflg) {
562 		slpflag = PCATCH;
563 		slptimeo = 2 * hz;
564 	} else {
565 		slpflag = 0;
566 		slptimeo = 0;
567 	}
568 	/*
569 	 * First wait for any other process doing a flush to complete.
570 	 */
571 	while (np->n_flag & NFLUSHINPROG) {
572 		np->n_flag |= NFLUSHWANT;
573 		error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval",
574 			slptimeo);
575 		if (error && intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p))
576 			return (EINTR);
577 	}
578 
579 	/*
580 	 * Now, flush as required.
581 	 */
582 	np->n_flag |= NFLUSHINPROG;
583 	error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
584 	while (error) {
585 		if (intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
586 			np->n_flag &= ~NFLUSHINPROG;
587 			if (np->n_flag & NFLUSHWANT) {
588 				np->n_flag &= ~NFLUSHWANT;
589 				wakeup((caddr_t)&np->n_flag);
590 			}
591 			return (EINTR);
592 		}
593 		error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
594 	}
595 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
596 	if (np->n_flag & NFLUSHWANT) {
597 		np->n_flag &= ~NFLUSHWANT;
598 		wakeup((caddr_t)&np->n_flag);
599 	}
600 	return (0);
601 }
602 
603 /*
604  * Initiate asynchronous I/O. Return an error if no nfsiods are available.
605  * This is mainly to avoid queueing async I/O requests when the nfsiods
606  * are all hung on a dead server.
607  */
608 nfs_asyncio(bp, cred)
609 	register struct buf *bp;
610 	struct ucred *cred;
611 {
612 	register int i;
613 
614 	if (nfs_numasync == 0)
615 		return (EIO);
616 	for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
617 	    if (nfs_iodwant[i]) {
618 		if (bp->b_flags & B_READ) {
619 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
620 				crhold(cred);
621 				bp->b_rcred = cred;
622 			}
623 		} else {
624 			if (bp->b_wcred == NOCRED && cred != NOCRED) {
625 				crhold(cred);
626 				bp->b_wcred = cred;
627 			}
628 		}
629 
630 		TAILQ_INSERT_TAIL(&nfs_bufq, bp, b_freelist);
631 		nfs_iodwant[i] = (struct proc *)0;
632 		wakeup((caddr_t)&nfs_iodwant[i]);
633 		return (0);
634 	    }
635 	return (EIO);
636 }
637 
638 /*
639  * Do an I/O operation to/from a cache block. This may be called
640  * synchronously or from an nfsiod.
641  */
642 int
643 nfs_doio(bp, cr, p)
644 	register struct buf *bp;
645 	struct cred *cr;
646 	struct proc *p;
647 {
648 	register struct uio *uiop;
649 	register struct vnode *vp;
650 	struct nfsnode *np;
651 	struct nfsmount *nmp;
652 	int error, diff, len;
653 	struct uio uio;
654 	struct iovec io;
655 
656 	vp = bp->b_vp;
657 	np = VTONFS(vp);
658 	nmp = VFSTONFS(vp->v_mount);
659 	uiop = &uio;
660 	uiop->uio_iov = &io;
661 	uiop->uio_iovcnt = 1;
662 	uiop->uio_segflg = UIO_SYSSPACE;
663 	uiop->uio_procp = p;
664 
665 	/*
666 	 * Historically, paging was done with physio, but no more.
667 	 */
668 	if (bp->b_flags & B_PHYS)
669 	    panic("doio phys");
670 	if (bp->b_flags & B_READ) {
671 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
672 	    io.iov_base = bp->b_data;
673 	    uiop->uio_rw = UIO_READ;
674 	    switch (vp->v_type) {
675 	    case VREG:
676 		uiop->uio_offset = bp->b_blkno * DEV_BSIZE;
677 		nfsstats.read_bios++;
678 		error = nfs_readrpc(vp, uiop, cr);
679 		if (!error) {
680 		    bp->b_validoff = 0;
681 		    if (uiop->uio_resid) {
682 			/*
683 			 * If len > 0, there is a hole in the file and
684 			 * no writes after the hole have been pushed to
685 			 * the server yet.
686 			 * Just zero fill the rest of the valid area.
687 			 */
688 			diff = bp->b_bcount - uiop->uio_resid;
689 			len = np->n_size - (bp->b_blkno * DEV_BSIZE
690 				+ diff);
691 			if (len > 0) {
692 			    len = min(len, uiop->uio_resid);
693 			    bzero((char *)bp->b_data + diff, len);
694 			    bp->b_validend = diff + len;
695 			} else
696 			    bp->b_validend = diff;
697 		    } else
698 			bp->b_validend = bp->b_bcount;
699 		}
700 		if (p && (vp->v_flag & VTEXT) &&
701 			(((nmp->nm_flag & NFSMNT_NQNFS) &&
702 			  np->n_lrev != np->n_brev) ||
703 			 (!(nmp->nm_flag & NFSMNT_NQNFS) &&
704 			  np->n_mtime != np->n_vattr.va_mtime.ts_sec))) {
705 			uprintf("Process killed due to text file modification\n");
706 			psignal(p, SIGKILL);
707 			p->p_flag |= P_NOSWAP;
708 		}
709 		break;
710 	    case VLNK:
711 		uiop->uio_offset = 0;
712 		nfsstats.readlink_bios++;
713 		error = nfs_readlinkrpc(vp, uiop, cr);
714 		break;
715 	    case VDIR:
716 		uiop->uio_offset = bp->b_lblkno;
717 		nfsstats.readdir_bios++;
718 		if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS)
719 		    error = nfs_readdirlookrpc(vp, uiop, cr);
720 		else
721 		    error = nfs_readdirrpc(vp, uiop, cr);
722 		/*
723 		 * Save offset cookie in b_blkno.
724 		 */
725 		bp->b_blkno = uiop->uio_offset;
726 		break;
727 	    };
728 	    if (error) {
729 		bp->b_flags |= B_ERROR;
730 		bp->b_error = error;
731 	    }
732 	} else {
733 	    io.iov_len = uiop->uio_resid = bp->b_dirtyend
734 		- bp->b_dirtyoff;
735 	    uiop->uio_offset = (bp->b_blkno * DEV_BSIZE)
736 		+ bp->b_dirtyoff;
737 	    io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
738 	    uiop->uio_rw = UIO_WRITE;
739 	    nfsstats.write_bios++;
740 	    if (bp->b_flags & B_APPENDWRITE)
741 		error = nfs_writerpc(vp, uiop, cr, IO_APPEND);
742 	    else
743 		error = nfs_writerpc(vp, uiop, cr, 0);
744 	    bp->b_flags &= ~(B_WRITEINPROG | B_APPENDWRITE);
745 
746 	    /*
747 	     * For an interrupted write, the buffer is still valid and the
748 	     * write hasn't been pushed to the server yet, so we can't set
749 	     * B_ERROR and report the interruption by setting B_EINTR. For
750 	     * the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt
751 	     * is essentially a noop.
752 	     */
753 	    if (error == EINTR) {
754 		bp->b_flags &= ~B_INVAL;
755 		bp->b_flags |= B_DELWRI;
756 
757 		/*
758 		 * Since for the B_ASYNC case, nfs_bwrite() has reassigned the
759 		 * buffer to the clean list, we have to reassign it back to the
760 		 * dirty one. Ugh.
761 		 */
762 		if (bp->b_flags & B_ASYNC)
763 		    reassignbuf(bp, vp);
764 		else
765 		    bp->b_flags |= B_EINTR;
766 	    } else {
767 		if (error) {
768 		    bp->b_flags |= B_ERROR;
769 		    bp->b_error = np->n_error = error;
770 		    np->n_flag |= NWRITEERR;
771 		}
772 		bp->b_dirtyoff = bp->b_dirtyend = 0;
773 	    }
774 	}
775 	bp->b_resid = uiop->uio_resid;
776 	biodone(bp);
777 	return (error);
778 }
779