1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
33 * $FreeBSD: /repoman/r/ncvs/src/sys/nfsclient/nfs_bio.c,v 1.130 2004/04/14 23:23:55 peadar Exp $
34 */
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/uio.h>
39 #include <sys/resourcevar.h>
40 #include <sys/signalvar.h>
41 #include <sys/proc.h>
42 #include <sys/buf.h>
43 #include <sys/vnode.h>
44 #include <sys/mount.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48
49 #include <vm/vm.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vnode_pager.h>
55
56 #include <sys/buf2.h>
57 #include <sys/thread2.h>
58 #include <vm/vm_page2.h>
59
60 #include "rpcv2.h"
61 #include "nfsproto.h"
62 #include "nfs.h"
63 #include "nfsmount.h"
64 #include "nfsnode.h"
65 #include "xdr_subs.h"
66 #include "nfsm_subs.h"
67
68
69 static struct buf *nfs_getcacheblk(struct vnode *vp, off_t loffset,
70 int size, struct thread *td);
71 static int nfs_check_dirent(struct nfs_dirent *dp, int maxlen);
72 static void nfsiodone_sync(struct bio *bio);
73 static void nfs_readrpc_bio_done(nfsm_info_t info);
74 static void nfs_writerpc_bio_done(nfsm_info_t info);
75 static void nfs_commitrpc_bio_done(nfsm_info_t info);
76
77 static __inline
78 void
nfs_knote(struct vnode * vp,int flags)79 nfs_knote(struct vnode *vp, int flags)
80 {
81 if (flags)
82 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
83 }
84
85 /*
86 * Vnode op for read using bio
87 */
88 int
nfs_bioread(struct vnode * vp,struct uio * uio,int ioflag)89 nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag)
90 {
91 struct nfsnode *np = VTONFS(vp);
92 int biosize, i;
93 struct buf *bp, *rabp;
94 struct vattr vattr;
95 struct thread *td;
96 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
97 off_t lbn, rabn;
98 off_t raoffset;
99 off_t loffset;
100 int seqcount;
101 int nra, error = 0;
102 int boff = 0;
103 size_t n;
104
105 #ifdef DIAGNOSTIC
106 if (uio->uio_rw != UIO_READ)
107 panic("nfs_read mode");
108 #endif
109 if (uio->uio_resid == 0)
110 return (0);
111 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */
112 return (EINVAL);
113 td = uio->uio_td;
114
115 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
116 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
117 (void)nfs_fsinfo(nmp, vp, td);
118 if (vp->v_type != VDIR &&
119 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
120 return (EFBIG);
121 biosize = vp->v_mount->mnt_stat.f_iosize;
122 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / MAXBSIZE);
123
124 /*
125 * For nfs, cache consistency can only be maintained approximately.
126 * Although RFC1094 does not specify the criteria, the following is
127 * believed to be compatible with the reference port.
128 *
129 * NFS: If local changes have been made and this is a
130 * directory, the directory must be invalidated and
131 * the attribute cache must be cleared.
132 *
133 * GETATTR is called to synchronize the file size. To
134 * avoid a deadlock again the VM system, we cannot do
135 * this for UIO_NOCOPY reads.
136 *
137 * If remote changes are detected local data is flushed
138 * and the cache is invalidated.
139 *
140 * NOTE: In the normal case the attribute cache is not
141 * cleared which means GETATTR may use cached data and
142 * not immediately detect changes made on the server.
143 */
144 if ((np->n_flag & NLMODIFIED) && vp->v_type == VDIR) {
145 nfs_invaldir(vp);
146 error = nfs_vinvalbuf(vp, V_SAVE, 1);
147 if (error)
148 return (error);
149 np->n_attrstamp = 0;
150 }
151
152 /*
153 * Synchronize the file size when possible. We can't do this without
154 * risking a deadlock if this is NOCOPY read from a vm_fault->getpages
155 * sequence.
156 */
157 if (uio->uio_segflg != UIO_NOCOPY) {
158 error = VOP_GETATTR(vp, &vattr);
159 if (error)
160 return (error);
161 }
162
163 /*
164 * This can deadlock getpages/putpages for regular
165 * files. Only do it for directories.
166 */
167 if (np->n_flag & NRMODIFIED) {
168 if (vp->v_type == VDIR) {
169 nfs_invaldir(vp);
170 error = nfs_vinvalbuf(vp, V_SAVE, 1);
171 if (error)
172 return (error);
173 np->n_flag &= ~NRMODIFIED;
174 }
175 }
176
177 /*
178 * Loop until uio exhausted or we hit EOF
179 */
180 do {
181 bp = NULL;
182
183 switch (vp->v_type) {
184 case VREG:
185 nfsstats.biocache_reads++;
186 lbn = uio->uio_offset / biosize;
187 boff = uio->uio_offset & (biosize - 1);
188 loffset = lbn * biosize;
189
190 /*
191 * Start the read ahead(s), as required.
192 */
193 if (nmp->nm_readahead > 0 && nfs_asyncok(nmp)) {
194 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
195 (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) {
196 rabn = lbn + 1 + nra;
197 raoffset = rabn * biosize;
198 if (findblk(vp, raoffset, FINDBLK_TEST) == NULL) {
199 rabp = nfs_getcacheblk(vp, raoffset, biosize, td);
200 if (!rabp)
201 return (EINTR);
202 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
203 rabp->b_cmd = BUF_CMD_READ;
204 vfs_busy_pages(vp, rabp);
205 nfs_asyncio(vp, &rabp->b_bio2);
206 } else {
207 brelse(rabp);
208 }
209 }
210 }
211 }
212
213 /*
214 * Obtain the buffer cache block. Figure out the buffer size
215 * when we are at EOF. If we are modifying the size of the
216 * buffer based on an EOF condition we need to hold
217 * nfs_rslock() through obtaining the buffer to prevent
218 * a potential writer-appender from messing with n_size.
219 * Otherwise we may accidently truncate the buffer and
220 * lose dirty data.
221 *
222 * Note that bcount is *not* DEV_BSIZE aligned.
223 */
224 if (loffset + boff >= np->n_size) {
225 n = 0;
226 break;
227 }
228 bp = nfs_getcacheblk(vp, loffset, biosize, td);
229
230 if (bp == NULL)
231 return (EINTR);
232
233 /*
234 * If B_CACHE is not set, we must issue the read. If this
235 * fails, we return an error.
236 */
237 if ((bp->b_flags & B_CACHE) == 0) {
238 bp->b_cmd = BUF_CMD_READ;
239 bp->b_bio2.bio_done = nfsiodone_sync;
240 bp->b_bio2.bio_flags |= BIO_SYNC;
241 vfs_busy_pages(vp, bp);
242 error = nfs_doio(vp, &bp->b_bio2, td);
243 if (error) {
244 brelse(bp);
245 return (error);
246 }
247 }
248
249 /*
250 * on is the offset into the current bp. Figure out how many
251 * bytes we can copy out of the bp. Note that bcount is
252 * NOT DEV_BSIZE aligned.
253 *
254 * Then figure out how many bytes we can copy into the uio.
255 */
256 n = biosize - boff;
257 if (n > uio->uio_resid)
258 n = uio->uio_resid;
259 if (loffset + boff + n > np->n_size)
260 n = np->n_size - loffset - boff;
261 break;
262 case VLNK:
263 biosize = min(NFS_MAXPATHLEN, np->n_size);
264 nfsstats.biocache_readlinks++;
265 bp = nfs_getcacheblk(vp, (off_t)0, biosize, td);
266 if (bp == NULL)
267 return (EINTR);
268 if ((bp->b_flags & B_CACHE) == 0) {
269 bp->b_cmd = BUF_CMD_READ;
270 bp->b_bio2.bio_done = nfsiodone_sync;
271 bp->b_bio2.bio_flags |= BIO_SYNC;
272 vfs_busy_pages(vp, bp);
273 error = nfs_doio(vp, &bp->b_bio2, td);
274 if (error) {
275 bp->b_flags |= B_ERROR | B_INVAL;
276 brelse(bp);
277 return (error);
278 }
279 }
280 n = szmin(uio->uio_resid, (size_t)bp->b_bcount - bp->b_resid);
281 boff = 0;
282 break;
283 case VDIR:
284 nfsstats.biocache_readdirs++;
285 if (np->n_direofoffset &&
286 uio->uio_offset >= np->n_direofoffset
287 ) {
288 return (0);
289 }
290 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
291 boff = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
292 loffset = uio->uio_offset - boff;
293 bp = nfs_getcacheblk(vp, loffset, NFS_DIRBLKSIZ, td);
294 if (bp == NULL)
295 return (EINTR);
296
297 if ((bp->b_flags & B_CACHE) == 0) {
298 bp->b_cmd = BUF_CMD_READ;
299 bp->b_bio2.bio_done = nfsiodone_sync;
300 bp->b_bio2.bio_flags |= BIO_SYNC;
301 vfs_busy_pages(vp, bp);
302 error = nfs_doio(vp, &bp->b_bio2, td);
303 if (error)
304 brelse(bp);
305 while (error == NFSERR_BAD_COOKIE) {
306 kprintf("got bad cookie vp %p bp %p\n", vp, bp);
307 nfs_invaldir(vp);
308 error = nfs_vinvalbuf(vp, 0, 1);
309 /*
310 * Yuck! The directory has been modified on the
311 * server. The only way to get the block is by
312 * reading from the beginning to get all the
313 * offset cookies.
314 *
315 * Leave the last bp intact unless there is an error.
316 * Loop back up to the while if the error is another
317 * NFSERR_BAD_COOKIE (double yuch!).
318 */
319 for (i = 0; i <= lbn && !error; i++) {
320 if (np->n_direofoffset
321 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
322 return (0);
323 bp = nfs_getcacheblk(vp, (off_t)i * NFS_DIRBLKSIZ,
324 NFS_DIRBLKSIZ, td);
325 if (!bp)
326 return (EINTR);
327 if ((bp->b_flags & B_CACHE) == 0) {
328 bp->b_cmd = BUF_CMD_READ;
329 bp->b_bio2.bio_done = nfsiodone_sync;
330 bp->b_bio2.bio_flags |= BIO_SYNC;
331 vfs_busy_pages(vp, bp);
332 error = nfs_doio(vp, &bp->b_bio2, td);
333 /*
334 * no error + B_INVAL == directory EOF,
335 * use the block.
336 */
337 if (error == 0 && (bp->b_flags & B_INVAL))
338 break;
339 }
340 /*
341 * An error will throw away the block and the
342 * for loop will break out. If no error and this
343 * is not the block we want, we throw away the
344 * block and go for the next one via the for loop.
345 */
346 if (error || i < lbn)
347 brelse(bp);
348 }
349 }
350 /*
351 * The above while is repeated if we hit another cookie
352 * error. If we hit an error and it wasn't a cookie error,
353 * we give up.
354 */
355 if (error)
356 return (error);
357 }
358
359 /*
360 * If not eof and read aheads are enabled, start one.
361 * (You need the current block first, so that you have the
362 * directory offset cookie of the next block.)
363 */
364 if (nmp->nm_readahead > 0 && nfs_asyncok(nmp) &&
365 (bp->b_flags & B_INVAL) == 0 &&
366 (np->n_direofoffset == 0 ||
367 loffset + NFS_DIRBLKSIZ < np->n_direofoffset) &&
368 findblk(vp, loffset + NFS_DIRBLKSIZ, FINDBLK_TEST) == NULL
369 ) {
370 rabp = nfs_getcacheblk(vp, loffset + NFS_DIRBLKSIZ,
371 NFS_DIRBLKSIZ, td);
372 if (rabp) {
373 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
374 rabp->b_cmd = BUF_CMD_READ;
375 vfs_busy_pages(vp, rabp);
376 nfs_asyncio(vp, &rabp->b_bio2);
377 } else {
378 brelse(rabp);
379 }
380 }
381 }
382 /*
383 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
384 * chopped for the EOF condition, we cannot tell how large
385 * NFS directories are going to be until we hit EOF. So
386 * an NFS directory buffer is *not* chopped to its EOF. Now,
387 * it just so happens that b_resid will effectively chop it
388 * to EOF. *BUT* this information is lost if the buffer goes
389 * away and is reconstituted into a B_CACHE state ( due to
390 * being VMIO ) later. So we keep track of the directory eof
391 * in np->n_direofoffset and chop it off as an extra step
392 * right here.
393 *
394 * NOTE: boff could already be beyond EOF.
395 */
396 if ((size_t)boff > NFS_DIRBLKSIZ - bp->b_resid) {
397 n = 0;
398 } else {
399 n = szmin(uio->uio_resid,
400 NFS_DIRBLKSIZ - bp->b_resid - (size_t)boff);
401 }
402 if (np->n_direofoffset &&
403 n > (size_t)(np->n_direofoffset - uio->uio_offset)) {
404 n = (size_t)(np->n_direofoffset - uio->uio_offset);
405 }
406 break;
407 default:
408 kprintf(" nfs_bioread: type %x unexpected\n",vp->v_type);
409 n = 0;
410 break;
411 }
412
413 switch (vp->v_type) {
414 case VREG:
415 if (n > 0)
416 error = uiomovebp(bp, bp->b_data + boff, n, uio);
417 break;
418 case VLNK:
419 if (n > 0)
420 error = uiomovebp(bp, bp->b_data + boff, n, uio);
421 n = 0;
422 break;
423 case VDIR:
424 if (n > 0) {
425 off_t old_off = uio->uio_offset;
426 caddr_t cpos, epos;
427 struct nfs_dirent *dp;
428
429 /*
430 * We are casting cpos to nfs_dirent, it must be
431 * int-aligned.
432 */
433 if (boff & 3) {
434 error = EINVAL;
435 break;
436 }
437
438 cpos = bp->b_data + boff;
439 epos = bp->b_data + boff + n;
440 while (cpos < epos && error == 0 && uio->uio_resid > 0) {
441 dp = (struct nfs_dirent *)cpos;
442 error = nfs_check_dirent(dp, (int)(epos - cpos));
443 if (error)
444 break;
445 if (vop_write_dirent(&error, uio, dp->nfs_ino,
446 dp->nfs_type, dp->nfs_namlen, dp->nfs_name)) {
447 break;
448 }
449 cpos += dp->nfs_reclen;
450 }
451 n = 0;
452 if (error == 0) {
453 uio->uio_offset = old_off + cpos -
454 bp->b_data - boff;
455 }
456 }
457 break;
458 default:
459 kprintf(" nfs_bioread: type %x unexpected\n",vp->v_type);
460 }
461 if (bp)
462 brelse(bp);
463 } while (error == 0 && uio->uio_resid > 0 && n > 0);
464 return (error);
465 }
466
467 /*
468 * Userland can supply any 'seek' offset when reading a NFS directory.
469 * Validate the structure so we don't panic the kernel. Note that
470 * the element name is nul terminated and the nul is not included
471 * in nfs_namlen.
472 */
473 static
474 int
nfs_check_dirent(struct nfs_dirent * dp,int maxlen)475 nfs_check_dirent(struct nfs_dirent *dp, int maxlen)
476 {
477 int nfs_name_off = offsetof(struct nfs_dirent, nfs_name[0]);
478
479 if (nfs_name_off >= maxlen)
480 return (EINVAL);
481 if (dp->nfs_reclen < nfs_name_off || dp->nfs_reclen > maxlen)
482 return (EINVAL);
483 if (nfs_name_off + dp->nfs_namlen >= dp->nfs_reclen)
484 return (EINVAL);
485 if (dp->nfs_reclen & 3)
486 return (EINVAL);
487 return (0);
488 }
489
490 /*
491 * Vnode op for write using bio
492 *
493 * nfs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
494 * struct ucred *a_cred)
495 */
496 int
nfs_write(struct vop_write_args * ap)497 nfs_write(struct vop_write_args *ap)
498 {
499 struct uio *uio = ap->a_uio;
500 struct thread *td = uio->uio_td;
501 struct vnode *vp = ap->a_vp;
502 struct nfsnode *np = VTONFS(vp);
503 int ioflag = ap->a_ioflag;
504 struct buf *bp;
505 struct vattr vattr;
506 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
507 off_t loffset;
508 int boff, bytes;
509 int error = 0;
510 int haverslock = 0;
511 int bcount;
512 int biosize;
513 int trivial;
514 int kflags = 0;
515
516 #ifdef DIAGNOSTIC
517 if (uio->uio_rw != UIO_WRITE)
518 panic("nfs_write mode");
519 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread)
520 panic("nfs_write proc");
521 #endif
522 if (vp->v_type != VREG)
523 return (EIO);
524
525 lwkt_gettoken(&nmp->nm_token);
526
527 if (np->n_flag & NWRITEERR) {
528 np->n_flag &= ~NWRITEERR;
529 lwkt_reltoken(&nmp->nm_token);
530 return (np->n_error);
531 }
532 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
533 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
534 (void)nfs_fsinfo(nmp, vp, td);
535 }
536
537 /*
538 * Synchronously flush pending buffers if we are in synchronous
539 * mode or if we are appending.
540 */
541 if (ioflag & (IO_APPEND | IO_SYNC)) {
542 if (np->n_flag & NLMODIFIED) {
543 np->n_attrstamp = 0;
544 error = nfs_flush(vp, MNT_WAIT, td, 0);
545 /* error = nfs_vinvalbuf(vp, V_SAVE, 1); */
546 if (error)
547 goto done;
548 }
549 }
550
551 /*
552 * If IO_APPEND then load uio_offset. We restart here if we cannot
553 * get the append lock.
554 */
555 restart:
556 if (ioflag & IO_APPEND) {
557 np->n_attrstamp = 0;
558 error = VOP_GETATTR(vp, &vattr);
559 if (error)
560 goto done;
561 uio->uio_offset = np->n_size;
562 }
563
564 if (uio->uio_offset < 0) {
565 error = EINVAL;
566 goto done;
567 }
568 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) {
569 error = EFBIG;
570 goto done;
571 }
572 if (uio->uio_resid == 0) {
573 error = 0;
574 goto done;
575 }
576
577 /*
578 * We need to obtain the rslock if we intend to modify np->n_size
579 * in order to guarentee the append point with multiple contending
580 * writers, to guarentee that no other appenders modify n_size
581 * while we are trying to obtain a truncated buffer (i.e. to avoid
582 * accidently truncating data written by another appender due to
583 * the race), and to ensure that the buffer is populated prior to
584 * our extending of the file. We hold rslock through the entire
585 * operation.
586 *
587 * Note that we do not synchronize the case where someone truncates
588 * the file while we are appending to it because attempting to lock
589 * this case may deadlock other parts of the system unexpectedly.
590 */
591 if ((ioflag & IO_APPEND) ||
592 uio->uio_offset + uio->uio_resid > np->n_size) {
593 switch(nfs_rslock(np)) {
594 case ENOLCK:
595 goto restart;
596 /* not reached */
597 case EINTR:
598 case ERESTART:
599 error = EINTR;
600 goto done;
601 /* not reached */
602 default:
603 break;
604 }
605 haverslock = 1;
606 }
607
608 /*
609 * Maybe this should be above the vnode op call, but so long as
610 * file servers have no limits, i don't think it matters
611 */
612 if (td && td->td_proc && uio->uio_offset + uio->uio_resid >
613 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
614 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
615 if (haverslock)
616 nfs_rsunlock(np);
617 error = EFBIG;
618 goto done;
619 }
620
621 biosize = vp->v_mount->mnt_stat.f_iosize;
622
623 do {
624 nfsstats.biocache_writes++;
625 boff = uio->uio_offset & (biosize-1);
626 loffset = uio->uio_offset - boff;
627 bytes = (int)szmin((unsigned)(biosize - boff), uio->uio_resid);
628 again:
629 /*
630 * Handle direct append and file extension cases, calculate
631 * unaligned buffer size. When extending B_CACHE will be
632 * set if possible. See UIO_NOCOPY note below.
633 */
634 if (uio->uio_offset + bytes > np->n_size) {
635 np->n_flag |= NLMODIFIED;
636 trivial = (uio->uio_segflg != UIO_NOCOPY &&
637 uio->uio_offset <= np->n_size) ?
638 NVEXTF_TRIVIAL : 0;
639 nfs_meta_setsize(vp, td, uio->uio_offset + bytes,
640 trivial);
641 kflags |= NOTE_EXTEND;
642 }
643 bp = nfs_getcacheblk(vp, loffset, biosize, td);
644 if (bp == NULL) {
645 error = EINTR;
646 break;
647 }
648
649 /*
650 * Actual bytes in buffer which we care about
651 */
652 if (loffset + biosize < np->n_size)
653 bcount = biosize;
654 else
655 bcount = (int)(np->n_size - loffset);
656
657 /*
658 * Avoid a read by setting B_CACHE where the data we
659 * intend to write covers the entire buffer. Note
660 * that the buffer may have been set to B_CACHE by
661 * nfs_meta_setsize() above or otherwise inherited the
662 * flag, but if B_CACHE isn't set the buffer may be
663 * uninitialized and must be zero'd to accomodate
664 * future seek+write's.
665 *
666 * See the comments in kern/vfs_bio.c's getblk() for
667 * more information.
668 *
669 * When doing a UIO_NOCOPY write the buffer is not
670 * overwritten and we cannot just set B_CACHE unconditionally
671 * for full-block writes.
672 */
673 if (boff == 0 && bytes == biosize &&
674 uio->uio_segflg != UIO_NOCOPY) {
675 bp->b_flags |= B_CACHE;
676 bp->b_flags &= ~(B_ERROR | B_INVAL);
677 }
678
679 /*
680 * b_resid may be set due to file EOF if we extended out.
681 * The NFS bio code will zero the difference anyway so
682 * just acknowledged the fact and set b_resid to 0.
683 */
684 if ((bp->b_flags & B_CACHE) == 0) {
685 bp->b_cmd = BUF_CMD_READ;
686 bp->b_bio2.bio_done = nfsiodone_sync;
687 bp->b_bio2.bio_flags |= BIO_SYNC;
688 vfs_busy_pages(vp, bp);
689 error = nfs_doio(vp, &bp->b_bio2, td);
690 if (error) {
691 brelse(bp);
692 break;
693 }
694 bp->b_resid = 0;
695 }
696 np->n_flag |= NLMODIFIED;
697 kflags |= NOTE_WRITE;
698
699 /*
700 * If dirtyend exceeds file size, chop it down. This should
701 * not normally occur but there is an append race where it
702 * might occur XXX, so we log it.
703 *
704 * If the chopping creates a reverse-indexed or degenerate
705 * situation with dirtyoff/end, we 0 both of them.
706 */
707 if (bp->b_dirtyend > bcount) {
708 kprintf("NFS append race @%08llx:%d\n",
709 (long long)bp->b_bio2.bio_offset,
710 bp->b_dirtyend - bcount);
711 bp->b_dirtyend = bcount;
712 }
713
714 if (bp->b_dirtyoff >= bp->b_dirtyend)
715 bp->b_dirtyoff = bp->b_dirtyend = 0;
716
717 /*
718 * If the new write will leave a contiguous dirty
719 * area, just update the b_dirtyoff and b_dirtyend,
720 * otherwise force a write rpc of the old dirty area.
721 *
722 * While it is possible to merge discontiguous writes due to
723 * our having a B_CACHE buffer ( and thus valid read data
724 * for the hole), we don't because it could lead to
725 * significant cache coherency problems with multiple clients,
726 * especially if locking is implemented later on.
727 *
728 * as an optimization we could theoretically maintain
729 * a linked list of discontinuous areas, but we would still
730 * have to commit them separately so there isn't much
731 * advantage to it except perhaps a bit of asynchronization.
732 */
733 if (bp->b_dirtyend > 0 &&
734 (boff > bp->b_dirtyend ||
735 (boff + bytes) < bp->b_dirtyoff)
736 ) {
737 if (bwrite(bp) == EINTR) {
738 error = EINTR;
739 break;
740 }
741 goto again;
742 }
743
744 error = uiomovebp(bp, bp->b_data + boff, bytes, uio);
745
746 /*
747 * Since this block is being modified, it must be written
748 * again and not just committed. Since write clustering does
749 * not work for the stage 1 data write, only the stage 2
750 * commit rpc, we have to clear B_CLUSTEROK as well.
751 */
752 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
753
754 if (error) {
755 brelse(bp);
756 break;
757 }
758
759 /*
760 * Only update dirtyoff/dirtyend if not a degenerate
761 * condition.
762 *
763 * The underlying VM pages have been marked valid by
764 * virtue of acquiring the bp. Because the entire buffer
765 * is marked dirty we do not have to worry about cleaning
766 * out the related dirty bits (and wouldn't really know
767 * how to deal with byte ranges anyway)
768 */
769 if (bytes) {
770 if (bp->b_dirtyend > 0) {
771 bp->b_dirtyoff = imin(boff, bp->b_dirtyoff);
772 bp->b_dirtyend = imax(boff + bytes,
773 bp->b_dirtyend);
774 } else {
775 bp->b_dirtyoff = boff;
776 bp->b_dirtyend = boff + bytes;
777 }
778 }
779
780 /*
781 * If the lease is non-cachable or IO_SYNC do bwrite().
782 *
783 * IO_INVAL appears to be unused. The idea appears to be
784 * to turn off caching in this case. Very odd. XXX
785 *
786 * If nfs_async is set bawrite() will use an unstable write
787 * (build dirty bufs on the server), so we might as well
788 * push it out with bawrite(). If nfs_async is not set we
789 * use bdwrite() to cache dirty bufs on the client.
790 */
791 if (ioflag & IO_SYNC) {
792 if (ioflag & IO_INVAL)
793 bp->b_flags |= B_NOCACHE;
794 error = bwrite(bp);
795 if (error)
796 break;
797 } else if (boff + bytes == biosize && nfs_async) {
798 bawrite(bp);
799 } else {
800 bdwrite(bp);
801 }
802 } while (uio->uio_resid > 0 && bytes > 0);
803
804 if (haverslock)
805 nfs_rsunlock(np);
806
807 done:
808 nfs_knote(vp, kflags);
809 lwkt_reltoken(&nmp->nm_token);
810 return (error);
811 }
812
813 /*
814 * Get an nfs cache block.
815 *
816 * Allocate a new one if the block isn't currently in the cache
817 * and return the block marked busy. If the calling process is
818 * interrupted by a signal for an interruptible mount point, return
819 * NULL.
820 *
821 * The caller must carefully deal with the possible B_INVAL state of
822 * the buffer. nfs_startio() clears B_INVAL (and nfs_asyncio() clears it
823 * indirectly), so synchronous reads can be issued without worrying about
824 * the B_INVAL state. We have to be a little more careful when dealing
825 * with writes (see comments in nfs_write()) when extending a file past
826 * its EOF.
827 */
828 static struct buf *
nfs_getcacheblk(struct vnode * vp,off_t loffset,int size,struct thread * td)829 nfs_getcacheblk(struct vnode *vp, off_t loffset, int size, struct thread *td)
830 {
831 struct buf *bp;
832 struct mount *mp;
833 struct nfsmount *nmp;
834
835 mp = vp->v_mount;
836 nmp = VFSTONFS(mp);
837
838 if (nmp->nm_flag & NFSMNT_INT) {
839 bp = getblk(vp, loffset, size, GETBLK_PCATCH, 0);
840 while (bp == NULL) {
841 if (nfs_sigintr(nmp, NULL, td))
842 return (NULL);
843 bp = getblk(vp, loffset, size, 0, 2 * hz);
844 }
845 } else {
846 bp = getblk(vp, loffset, size, 0, 0);
847 }
848
849 /*
850 * bio2, the 'device' layer. Since BIOs use 64 bit byte offsets
851 * now, no translation is necessary.
852 */
853 bp->b_bio2.bio_offset = loffset;
854 return (bp);
855 }
856
857 /*
858 * Flush and invalidate all dirty buffers. If another process is already
859 * doing the flush, just wait for completion.
860 */
861 int
nfs_vinvalbuf(struct vnode * vp,int flags,int intrflg)862 nfs_vinvalbuf(struct vnode *vp, int flags, int intrflg)
863 {
864 struct nfsnode *np = VTONFS(vp);
865 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
866 int error = 0, slpflag, slptimeo;
867 thread_t td = curthread;
868
869 if (vp->v_flag & VRECLAIMED)
870 return (0);
871
872 if ((nmp->nm_flag & NFSMNT_INT) == 0)
873 intrflg = 0;
874 if (intrflg) {
875 slpflag = PCATCH;
876 slptimeo = 2 * hz;
877 } else {
878 slpflag = 0;
879 slptimeo = 0;
880 }
881 /*
882 * First wait for any other process doing a flush to complete.
883 */
884 while (np->n_flag & NFLUSHINPROG) {
885 np->n_flag |= NFLUSHWANT;
886 error = tsleep((caddr_t)&np->n_flag, 0, "nfsvinval", slptimeo);
887 if (error && intrflg && nfs_sigintr(nmp, NULL, td))
888 return (EINTR);
889 }
890
891 /*
892 * Now, flush as required.
893 */
894 np->n_flag |= NFLUSHINPROG;
895 error = vinvalbuf(vp, flags, slpflag, 0);
896 while (error) {
897 if (intrflg && nfs_sigintr(nmp, NULL, td)) {
898 np->n_flag &= ~NFLUSHINPROG;
899 if (np->n_flag & NFLUSHWANT) {
900 np->n_flag &= ~NFLUSHWANT;
901 wakeup((caddr_t)&np->n_flag);
902 }
903 return (EINTR);
904 }
905 error = vinvalbuf(vp, flags, 0, slptimeo);
906 }
907 np->n_flag &= ~(NLMODIFIED | NFLUSHINPROG);
908 if (np->n_flag & NFLUSHWANT) {
909 np->n_flag &= ~NFLUSHWANT;
910 wakeup((caddr_t)&np->n_flag);
911 }
912 return (0);
913 }
914
915 /*
916 * Return true (non-zero) if the txthread and rxthread are operational
917 * and we do not already have too many not-yet-started BIO's built up.
918 */
919 int
nfs_asyncok(struct nfsmount * nmp)920 nfs_asyncok(struct nfsmount *nmp)
921 {
922 return (nmp->nm_bioqlen < nfs_maxasyncbio &&
923 nmp->nm_bioqlen < nmp->nm_maxasync_scaled / NFS_ASYSCALE &&
924 nmp->nm_rxstate <= NFSSVC_PENDING &&
925 nmp->nm_txstate <= NFSSVC_PENDING);
926 }
927
928 /*
929 * The read-ahead code calls this to queue a bio to the txthread.
930 *
931 * We don't touch the bio otherwise... that is, we do not even
932 * construct or send the initial rpc. The txthread will do it
933 * for us.
934 *
935 * NOTE! nm_bioqlen is not decremented until the request completes,
936 * so it does not reflect the number of bio's on bioq.
937 */
938 void
nfs_asyncio(struct vnode * vp,struct bio * bio)939 nfs_asyncio(struct vnode *vp, struct bio *bio)
940 {
941 struct buf *bp = bio->bio_buf;
942 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
943
944 KKASSERT(vp->v_tag == VT_NFS);
945 BUF_KERNPROC(bp);
946
947 /*
948 * Shortcut swap cache (not done automatically because we are not
949 * using bread()).
950 */
951 if (vn_cache_strategy(vp, bio))
952 return;
953
954 bio->bio_driver_info = vp;
955 crit_enter();
956 TAILQ_INSERT_TAIL(&nmp->nm_bioq, bio, bio_act);
957 atomic_add_int(&nmp->nm_bioqlen, 1);
958 crit_exit();
959 nfssvc_iod_writer_wakeup(nmp);
960 }
961
962 /*
963 * nfs_doio() - Execute a BIO operation synchronously. The BIO will be
964 * completed and its error returned. The caller is responsible
965 * for brelse()ing it. ONLY USE FOR BIO_SYNC IOs! Otherwise
966 * our error probe will be against an invalid pointer.
967 *
968 * nfs_startio()- Execute a BIO operation assynchronously.
969 *
970 * NOTE: nfs_asyncio() is used to initiate an asynchronous BIO operation,
971 * which basically just queues it to the txthread. nfs_startio()
972 * actually initiates the I/O AFTER it has gotten to the txthread.
973 *
974 * NOTE: td might be NULL.
975 *
976 * NOTE: Caller has already busied the I/O.
977 */
978 void
nfs_startio(struct vnode * vp,struct bio * bio,struct thread * td)979 nfs_startio(struct vnode *vp, struct bio *bio, struct thread *td)
980 {
981 struct buf *bp = bio->bio_buf;
982
983 KKASSERT(vp->v_tag == VT_NFS);
984
985 /*
986 * clear B_ERROR and B_INVAL state prior to initiating the I/O. We
987 * do this here so we do not have to do it in all the code that
988 * calls us.
989 */
990 bp->b_flags &= ~(B_ERROR | B_INVAL);
991
992 KASSERT(bp->b_cmd != BUF_CMD_DONE,
993 ("nfs_doio: bp %p already marked done!", bp));
994
995 if (bp->b_cmd == BUF_CMD_READ) {
996 switch (vp->v_type) {
997 case VREG:
998 nfsstats.read_bios++;
999 nfs_readrpc_bio(vp, bio);
1000 break;
1001 case VLNK:
1002 #if 0
1003 bio->bio_offset = 0;
1004 nfsstats.readlink_bios++;
1005 nfs_readlinkrpc_bio(vp, bio);
1006 #else
1007 nfs_doio(vp, bio, td);
1008 #endif
1009 break;
1010 case VDIR:
1011 /*
1012 * NOTE: If nfs_readdirplusrpc_bio() is requested but
1013 * not supported, it will chain to
1014 * nfs_readdirrpc_bio().
1015 */
1016 #if 0
1017 nfsstats.readdir_bios++;
1018 uiop->uio_offset = bio->bio_offset;
1019 if (nmp->nm_flag & NFSMNT_RDIRPLUS)
1020 nfs_readdirplusrpc_bio(vp, bio);
1021 else
1022 nfs_readdirrpc_bio(vp, bio);
1023 #else
1024 nfs_doio(vp, bio, td);
1025 #endif
1026 break;
1027 default:
1028 kprintf("nfs_doio: type %x unexpected\n",vp->v_type);
1029 bp->b_flags |= B_ERROR;
1030 bp->b_error = EINVAL;
1031 biodone(bio);
1032 break;
1033 }
1034 } else {
1035 /*
1036 * If we only need to commit, try to commit. If this fails
1037 * it will chain through to the write. Basically all the logic
1038 * in nfs_doio() is replicated.
1039 */
1040 KKASSERT(bp->b_cmd == BUF_CMD_WRITE);
1041 if (bp->b_flags & B_NEEDCOMMIT)
1042 nfs_commitrpc_bio(vp, bio);
1043 else
1044 nfs_writerpc_bio(vp, bio);
1045 }
1046 }
1047
1048 int
nfs_doio(struct vnode * vp,struct bio * bio,struct thread * td)1049 nfs_doio(struct vnode *vp, struct bio *bio, struct thread *td)
1050 {
1051 struct buf *bp = bio->bio_buf;
1052 struct uio *uiop;
1053 struct nfsnode *np;
1054 struct nfsmount *nmp;
1055 int error = 0;
1056 int iomode, must_commit;
1057 size_t n;
1058 struct uio uio;
1059 struct iovec io;
1060
1061 #if 0
1062 /*
1063 * Shortcut swap cache (not done automatically because we are not
1064 * using bread()).
1065 *
1066 * XXX The biowait is a hack until we can figure out how to stop a
1067 * biodone chain when a middle element is BIO_SYNC. BIO_SYNC is
1068 * set so the bp shouldn't get ripped out from under us. The only
1069 * use-cases are fully synchronous I/O cases.
1070 *
1071 * XXX This is having problems, give up for now.
1072 */
1073 if (vn_cache_strategy(vp, bio)) {
1074 error = biowait(&bio->bio_buf->b_bio1, "nfsrsw");
1075 return (error);
1076 }
1077 #endif
1078
1079 KKASSERT(vp->v_tag == VT_NFS);
1080 np = VTONFS(vp);
1081 nmp = VFSTONFS(vp->v_mount);
1082 uiop = &uio;
1083 uiop->uio_iov = &io;
1084 uiop->uio_iovcnt = 1;
1085 uiop->uio_segflg = UIO_SYSSPACE;
1086 uiop->uio_td = td;
1087
1088 /*
1089 * clear B_ERROR and B_INVAL state prior to initiating the I/O. We
1090 * do this here so we do not have to do it in all the code that
1091 * calls us.
1092 */
1093 bp->b_flags &= ~(B_ERROR | B_INVAL);
1094
1095 KASSERT(bp->b_cmd != BUF_CMD_DONE,
1096 ("nfs_doio: bp %p already marked done!", bp));
1097
1098 if (bp->b_cmd == BUF_CMD_READ) {
1099 io.iov_len = uiop->uio_resid = (size_t)bp->b_bcount;
1100 io.iov_base = bp->b_data;
1101 uiop->uio_rw = UIO_READ;
1102
1103 switch (vp->v_type) {
1104 case VREG:
1105 /*
1106 * When reading from a regular file zero-fill any residual.
1107 * Note that this residual has nothing to do with NFS short
1108 * reads, which nfs_readrpc_uio() will handle for us.
1109 *
1110 * We have to do this because when we are write extending
1111 * a file the server may not have the same notion of
1112 * filesize as we do. Our BIOs should already be sized
1113 * (b_bcount) to account for the file EOF.
1114 */
1115 nfsstats.read_bios++;
1116 uiop->uio_offset = bio->bio_offset;
1117 error = nfs_readrpc_uio(vp, uiop);
1118 if (error == 0 && uiop->uio_resid) {
1119 n = (size_t)bp->b_bcount - uiop->uio_resid;
1120 bzero(bp->b_data + n, bp->b_bcount - n);
1121 uiop->uio_resid = 0;
1122 }
1123 if (td && td->td_proc && (vp->v_flag & VTEXT) &&
1124 np->n_mtime != np->n_vattr.va_mtime.tv_sec) {
1125 uprintf("Process killed due to text file modification\n");
1126 ksignal(td->td_proc, SIGKILL);
1127 }
1128 break;
1129 case VLNK:
1130 uiop->uio_offset = 0;
1131 nfsstats.readlink_bios++;
1132 error = nfs_readlinkrpc_uio(vp, uiop);
1133 break;
1134 case VDIR:
1135 nfsstats.readdir_bios++;
1136 uiop->uio_offset = bio->bio_offset;
1137 if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
1138 error = nfs_readdirplusrpc_uio(vp, uiop);
1139 if (error == NFSERR_NOTSUPP)
1140 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1141 }
1142 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1143 error = nfs_readdirrpc_uio(vp, uiop);
1144 /*
1145 * end-of-directory sets B_INVAL but does not generate an
1146 * error.
1147 */
1148 if (error == 0 && uiop->uio_resid == bp->b_bcount)
1149 bp->b_flags |= B_INVAL;
1150 break;
1151 default:
1152 kprintf("nfs_doio: type %x unexpected\n",vp->v_type);
1153 break;
1154 }
1155 if (error) {
1156 bp->b_flags |= B_ERROR;
1157 bp->b_error = error;
1158 }
1159 bp->b_resid = uiop->uio_resid;
1160 } else {
1161 /*
1162 * If we only need to commit, try to commit.
1163 *
1164 * NOTE: The I/O has already been staged for the write and
1165 * its pages busied, so b_dirtyoff/end is valid.
1166 */
1167 KKASSERT(bp->b_cmd == BUF_CMD_WRITE);
1168 if (bp->b_flags & B_NEEDCOMMIT) {
1169 int retv;
1170 off_t off;
1171
1172 off = bio->bio_offset + bp->b_dirtyoff;
1173 retv = nfs_commitrpc_uio(vp, off,
1174 bp->b_dirtyend - bp->b_dirtyoff,
1175 td);
1176 if (retv == 0) {
1177 bp->b_dirtyoff = bp->b_dirtyend = 0;
1178 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1179 bp->b_resid = 0;
1180 biodone(bio);
1181 return(0);
1182 }
1183 if (retv == NFSERR_STALEWRITEVERF) {
1184 nfs_clearcommit(vp->v_mount);
1185 }
1186 }
1187
1188 /*
1189 * Setup for actual write
1190 */
1191 if (bio->bio_offset + bp->b_dirtyend > np->n_size)
1192 bp->b_dirtyend = np->n_size - bio->bio_offset;
1193
1194 if (bp->b_dirtyend > bp->b_dirtyoff) {
1195 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1196 - bp->b_dirtyoff;
1197 uiop->uio_offset = bio->bio_offset + bp->b_dirtyoff;
1198 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1199 uiop->uio_rw = UIO_WRITE;
1200 nfsstats.write_bios++;
1201
1202 if ((bp->b_flags & (B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == 0)
1203 iomode = NFSV3WRITE_UNSTABLE;
1204 else
1205 iomode = NFSV3WRITE_FILESYNC;
1206
1207 must_commit = 0;
1208 error = nfs_writerpc_uio(vp, uiop, &iomode, &must_commit);
1209
1210 /*
1211 * We no longer try to use kern/vfs_bio's cluster code to
1212 * cluster commits, so B_CLUSTEROK is no longer set with
1213 * B_NEEDCOMMIT. The problem is that a vfs_busy_pages()
1214 * may have to clear B_NEEDCOMMIT if it finds underlying
1215 * pages have been redirtied through a memory mapping
1216 * and doing this on a clustered bp will probably cause
1217 * a panic, plus the flag in the underlying NFS bufs
1218 * making up the cluster bp will not be properly cleared.
1219 */
1220 if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1221 bp->b_flags |= B_NEEDCOMMIT;
1222 #if 0
1223 /* XXX do not enable commit clustering */
1224 if (bp->b_dirtyoff == 0
1225 && bp->b_dirtyend == bp->b_bcount)
1226 bp->b_flags |= B_CLUSTEROK;
1227 #endif
1228 } else {
1229 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1230 }
1231
1232 /*
1233 * For an interrupted write, the buffer is still valid
1234 * and the write hasn't been pushed to the server yet,
1235 * so we can't set B_ERROR and report the interruption
1236 * by setting B_EINTR. For the async case, B_EINTR
1237 * is not relevant, so the rpc attempt is essentially
1238 * a noop. For the case of a V3 write rpc not being
1239 * committed to stable storage, the block is still
1240 * dirty and requires either a commit rpc or another
1241 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1242 * the block is reused. This is indicated by setting
1243 * the B_DELWRI and B_NEEDCOMMIT flags.
1244 *
1245 * If the buffer is marked B_PAGING, it does not reside on
1246 * the vp's paging queues so we cannot call bdirty(). The
1247 * bp in this case is not an NFS cache block so we should
1248 * be safe. XXX
1249 */
1250 if (error == EINTR
1251 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1252 crit_enter();
1253 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1254 if ((bp->b_flags & B_PAGING) == 0)
1255 bdirty(bp);
1256 if (error)
1257 bp->b_flags |= B_EINTR;
1258 crit_exit();
1259 } else {
1260 if (error) {
1261 bp->b_flags |= B_ERROR;
1262 bp->b_error = np->n_error = error;
1263 np->n_flag |= NWRITEERR;
1264 }
1265 bp->b_dirtyoff = bp->b_dirtyend = 0;
1266 }
1267 if (must_commit)
1268 nfs_clearcommit(vp->v_mount);
1269 bp->b_resid = uiop->uio_resid;
1270 } else {
1271 bp->b_resid = 0;
1272 }
1273 }
1274
1275 /*
1276 * I/O was run synchronously, biodone() it and calculate the
1277 * error to return.
1278 */
1279 biodone(bio);
1280 KKASSERT(bp->b_cmd == BUF_CMD_DONE);
1281 if (bp->b_flags & B_EINTR)
1282 return (EINTR);
1283 if (bp->b_flags & B_ERROR)
1284 return (bp->b_error ? bp->b_error : EIO);
1285 return (0);
1286 }
1287
1288 /*
1289 * Handle all truncation, write-extend, and ftruncate()-extend operations
1290 * on the NFS lcient side.
1291 *
1292 * We use the new API in kern/vfs_vm.c to perform these operations in a
1293 * VM-friendly way. With this API VM pages are properly zerod and pages
1294 * still mapped into the buffer straddling EOF are not invalidated.
1295 */
1296 int
nfs_meta_setsize(struct vnode * vp,struct thread * td,off_t nsize,int flags)1297 nfs_meta_setsize(struct vnode *vp, struct thread *td, off_t nsize, int flags)
1298 {
1299 struct nfsnode *np = VTONFS(vp);
1300 off_t osize;
1301 int biosize = vp->v_mount->mnt_stat.f_iosize;
1302 int error;
1303
1304 osize = np->n_size;
1305 np->n_size = nsize;
1306
1307 if (nsize < osize) {
1308 error = nvtruncbuf(vp, nsize, biosize, -1, flags);
1309 } else {
1310 error = nvextendbuf(vp, osize, nsize,
1311 biosize, biosize, -1, -1, flags);
1312 }
1313 return(error);
1314 }
1315
1316 /*
1317 * Synchronous completion for nfs_doio. Call bpdone() with elseit=FALSE.
1318 * Caller is responsible for brelse()'ing the bp.
1319 */
1320 static void
nfsiodone_sync(struct bio * bio)1321 nfsiodone_sync(struct bio *bio)
1322 {
1323 bio->bio_flags = 0;
1324 bpdone(bio->bio_buf, 0);
1325 }
1326
1327 /*
1328 * nfs read rpc - BIO version
1329 */
1330 void
nfs_readrpc_bio(struct vnode * vp,struct bio * bio)1331 nfs_readrpc_bio(struct vnode *vp, struct bio *bio)
1332 {
1333 struct buf *bp = bio->bio_buf;
1334 u_int32_t *tl;
1335 struct nfsmount *nmp;
1336 int error = 0, len, tsiz;
1337 struct nfsm_info *info;
1338
1339 info = kmalloc(sizeof(*info), M_NFSREQ, M_WAITOK);
1340 info->mrep = NULL;
1341 info->v3 = NFS_ISV3(vp);
1342
1343 nmp = VFSTONFS(vp->v_mount);
1344 tsiz = bp->b_bcount;
1345 KKASSERT(tsiz <= nmp->nm_rsize);
1346 if (bio->bio_offset + tsiz > nmp->nm_maxfilesize) {
1347 error = EFBIG;
1348 goto nfsmout;
1349 }
1350 nfsstats.rpccnt[NFSPROC_READ]++;
1351 len = tsiz;
1352 nfsm_reqhead(info, vp, NFSPROC_READ,
1353 NFSX_FH(info->v3) + NFSX_UNSIGNED * 3);
1354 ERROROUT(nfsm_fhtom(info, vp));
1355 tl = nfsm_build(info, NFSX_UNSIGNED * 3);
1356 if (info->v3) {
1357 txdr_hyper(bio->bio_offset, tl);
1358 *(tl + 2) = txdr_unsigned(len);
1359 } else {
1360 *tl++ = txdr_unsigned(bio->bio_offset);
1361 *tl++ = txdr_unsigned(len);
1362 *tl = 0;
1363 }
1364 info->bio = bio;
1365 info->done = nfs_readrpc_bio_done;
1366 nfsm_request_bio(info, vp, NFSPROC_READ, NULL,
1367 nfs_vpcred(vp, ND_READ));
1368 return;
1369 nfsmout:
1370 kfree(info, M_NFSREQ);
1371 bp->b_error = error;
1372 bp->b_flags |= B_ERROR;
1373 biodone(bio);
1374 }
1375
1376 static void
nfs_readrpc_bio_done(nfsm_info_t info)1377 nfs_readrpc_bio_done(nfsm_info_t info)
1378 {
1379 struct nfsmount *nmp = VFSTONFS(info->vp->v_mount);
1380 struct bio *bio = info->bio;
1381 struct buf *bp = bio->bio_buf;
1382 u_int32_t *tl;
1383 int attrflag;
1384 int retlen;
1385 int eof;
1386 int error = 0;
1387
1388 KKASSERT(info->state == NFSM_STATE_DONE);
1389
1390 lwkt_gettoken(&nmp->nm_token);
1391
1392 ERROROUT(info->error);
1393 if (info->v3) {
1394 ERROROUT(nfsm_postop_attr(info, info->vp, &attrflag,
1395 NFS_LATTR_NOSHRINK));
1396 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED));
1397 eof = fxdr_unsigned(int, *(tl + 1));
1398 } else {
1399 ERROROUT(nfsm_loadattr(info, info->vp, NULL));
1400 eof = 0;
1401 }
1402 NEGATIVEOUT(retlen = nfsm_strsiz(info, nmp->nm_rsize));
1403 ERROROUT(nfsm_mtobio(info, bio, retlen));
1404 m_freem(info->mrep);
1405 info->mrep = NULL;
1406
1407 /*
1408 * No error occured, if retlen is less then bcount and no EOF
1409 * and NFSv3 a zero-fill short read occured.
1410 *
1411 * For NFSv2 a short-read indicates EOF.
1412 */
1413 if (retlen < bp->b_bcount && info->v3 && eof == 0) {
1414 bzero(bp->b_data + retlen, bp->b_bcount - retlen);
1415 retlen = bp->b_bcount;
1416 }
1417
1418 /*
1419 * If we hit an EOF we still zero-fill, but return the expected
1420 * b_resid anyway. This should normally not occur since async
1421 * BIOs are not used for read-before-write case. Races against
1422 * the server can cause it though and we don't want to leave
1423 * garbage in the buffer.
1424 */
1425 if (retlen < bp->b_bcount) {
1426 bzero(bp->b_data + retlen, bp->b_bcount - retlen);
1427 }
1428 bp->b_resid = 0;
1429 /* bp->b_resid = bp->b_bcount - retlen; */
1430 nfsmout:
1431 lwkt_reltoken(&nmp->nm_token);
1432 kfree(info, M_NFSREQ);
1433 if (error) {
1434 bp->b_error = error;
1435 bp->b_flags |= B_ERROR;
1436 }
1437 biodone(bio);
1438 }
1439
1440 /*
1441 * nfs write call - BIO version
1442 *
1443 * NOTE: Caller has already busied the I/O.
1444 */
1445 void
nfs_writerpc_bio(struct vnode * vp,struct bio * bio)1446 nfs_writerpc_bio(struct vnode *vp, struct bio *bio)
1447 {
1448 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1449 struct nfsnode *np = VTONFS(vp);
1450 struct buf *bp = bio->bio_buf;
1451 u_int32_t *tl;
1452 int len;
1453 int iomode;
1454 int error = 0;
1455 struct nfsm_info *info;
1456 off_t offset;
1457
1458 /*
1459 * Setup for actual write. Just clean up the bio if there
1460 * is nothing to do. b_dirtyoff/end have already been staged
1461 * by the bp's pages getting busied.
1462 */
1463 if (bio->bio_offset + bp->b_dirtyend > np->n_size)
1464 bp->b_dirtyend = np->n_size - bio->bio_offset;
1465
1466 if (bp->b_dirtyend <= bp->b_dirtyoff) {
1467 bp->b_resid = 0;
1468 biodone(bio);
1469 return;
1470 }
1471 len = bp->b_dirtyend - bp->b_dirtyoff;
1472 offset = bio->bio_offset + bp->b_dirtyoff;
1473 if (offset + len > nmp->nm_maxfilesize) {
1474 bp->b_flags |= B_ERROR;
1475 bp->b_error = EFBIG;
1476 biodone(bio);
1477 return;
1478 }
1479 bp->b_resid = len;
1480 nfsstats.write_bios++;
1481
1482 info = kmalloc(sizeof(*info), M_NFSREQ, M_WAITOK);
1483 info->mrep = NULL;
1484 info->v3 = NFS_ISV3(vp);
1485 info->info_writerpc.must_commit = 0;
1486 if ((bp->b_flags & (B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == 0)
1487 iomode = NFSV3WRITE_UNSTABLE;
1488 else
1489 iomode = NFSV3WRITE_FILESYNC;
1490
1491 KKASSERT(len <= nmp->nm_wsize);
1492
1493 nfsstats.rpccnt[NFSPROC_WRITE]++;
1494 nfsm_reqhead(info, vp, NFSPROC_WRITE,
1495 NFSX_FH(info->v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1496 ERROROUT(nfsm_fhtom(info, vp));
1497 if (info->v3) {
1498 tl = nfsm_build(info, 5 * NFSX_UNSIGNED);
1499 txdr_hyper(offset, tl);
1500 tl += 2;
1501 *tl++ = txdr_unsigned(len);
1502 *tl++ = txdr_unsigned(iomode);
1503 *tl = txdr_unsigned(len);
1504 } else {
1505 u_int32_t x;
1506
1507 tl = nfsm_build(info, 4 * NFSX_UNSIGNED);
1508 /* Set both "begin" and "current" to non-garbage. */
1509 x = txdr_unsigned((u_int32_t)offset);
1510 *tl++ = x; /* "begin offset" */
1511 *tl++ = x; /* "current offset" */
1512 x = txdr_unsigned(len);
1513 *tl++ = x; /* total to this offset */
1514 *tl = x; /* size of this write */
1515 }
1516 ERROROUT(nfsm_biotom(info, bio, bp->b_dirtyoff, len));
1517 info->bio = bio;
1518 info->done = nfs_writerpc_bio_done;
1519 nfsm_request_bio(info, vp, NFSPROC_WRITE, NULL,
1520 nfs_vpcred(vp, ND_WRITE));
1521 return;
1522 nfsmout:
1523 kfree(info, M_NFSREQ);
1524 bp->b_error = error;
1525 bp->b_flags |= B_ERROR;
1526 biodone(bio);
1527 }
1528
1529 static void
nfs_writerpc_bio_done(nfsm_info_t info)1530 nfs_writerpc_bio_done(nfsm_info_t info)
1531 {
1532 struct nfsmount *nmp = VFSTONFS(info->vp->v_mount);
1533 struct nfsnode *np = VTONFS(info->vp);
1534 struct bio *bio = info->bio;
1535 struct buf *bp = bio->bio_buf;
1536 int wccflag = NFSV3_WCCRATTR;
1537 int iomode = NFSV3WRITE_FILESYNC;
1538 int commit;
1539 int rlen;
1540 int error;
1541 int len = bp->b_resid; /* b_resid was set to shortened length */
1542 u_int32_t *tl;
1543
1544 lwkt_gettoken(&nmp->nm_token);
1545
1546 ERROROUT(info->error);
1547 if (info->v3) {
1548 /*
1549 * The write RPC returns a before and after mtime. The
1550 * nfsm_wcc_data() macro checks the before n_mtime
1551 * against the before time and stores the after time
1552 * in the nfsnode's cached vattr and n_mtime field.
1553 * The NRMODIFIED bit will be set if the before
1554 * time did not match the original mtime.
1555 */
1556 wccflag = NFSV3_WCCCHK;
1557 ERROROUT(nfsm_wcc_data(info, info->vp, &wccflag));
1558 if (error == 0) {
1559 NULLOUT(tl = nfsm_dissect(info, 2 * NFSX_UNSIGNED + NFSX_V3WRITEVERF));
1560 rlen = fxdr_unsigned(int, *tl++);
1561 if (rlen == 0) {
1562 error = NFSERR_IO;
1563 m_freem(info->mrep);
1564 info->mrep = NULL;
1565 goto nfsmout;
1566 } else if (rlen < len) {
1567 #if 0
1568 /*
1569 * XXX what do we do here?
1570 */
1571 backup = len - rlen;
1572 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base - backup;
1573 uiop->uio_iov->iov_len += backup;
1574 uiop->uio_offset -= backup;
1575 uiop->uio_resid += backup;
1576 len = rlen;
1577 #endif
1578 }
1579 commit = fxdr_unsigned(int, *tl++);
1580
1581 /*
1582 * Return the lowest committment level
1583 * obtained by any of the RPCs.
1584 */
1585 if (iomode == NFSV3WRITE_FILESYNC)
1586 iomode = commit;
1587 else if (iomode == NFSV3WRITE_DATASYNC &&
1588 commit == NFSV3WRITE_UNSTABLE)
1589 iomode = commit;
1590 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1591 bcopy(tl, (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF);
1592 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1593 } else if (bcmp(tl, nmp->nm_verf, NFSX_V3WRITEVERF)) {
1594 info->info_writerpc.must_commit = 1;
1595 bcopy(tl, (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF);
1596 }
1597 }
1598 } else {
1599 ERROROUT(nfsm_loadattr(info, info->vp, NULL));
1600 }
1601 m_freem(info->mrep);
1602 info->mrep = NULL;
1603 len = 0;
1604 nfsmout:
1605 if (info->vp->v_mount->mnt_flag & MNT_ASYNC)
1606 iomode = NFSV3WRITE_FILESYNC;
1607 bp->b_resid = len;
1608
1609 /*
1610 * End of RPC. Now clean up the bp.
1611 *
1612 * We no longer enable write clustering for commit operations,
1613 * See around line 1157 for a more detailed comment.
1614 */
1615 if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1616 bp->b_flags |= B_NEEDCOMMIT;
1617 #if 0
1618 /* XXX do not enable commit clustering */
1619 if (bp->b_dirtyoff == 0 && bp->b_dirtyend == bp->b_bcount)
1620 bp->b_flags |= B_CLUSTEROK;
1621 #endif
1622 } else {
1623 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1624 }
1625
1626 /*
1627 * For an interrupted write, the buffer is still valid
1628 * and the write hasn't been pushed to the server yet,
1629 * so we can't set B_ERROR and report the interruption
1630 * by setting B_EINTR. For the async case, B_EINTR
1631 * is not relevant, so the rpc attempt is essentially
1632 * a noop. For the case of a V3 write rpc not being
1633 * committed to stable storage, the block is still
1634 * dirty and requires either a commit rpc or another
1635 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1636 * the block is reused. This is indicated by setting
1637 * the B_DELWRI and B_NEEDCOMMIT flags.
1638 *
1639 * If the buffer is marked B_PAGING, it does not reside on
1640 * the vp's paging queues so we cannot call bdirty(). The
1641 * bp in this case is not an NFS cache block so we should
1642 * be safe. XXX
1643 */
1644 if (error == EINTR || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1645 crit_enter();
1646 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1647 if ((bp->b_flags & B_PAGING) == 0)
1648 bdirty(bp);
1649 if (error)
1650 bp->b_flags |= B_EINTR;
1651 crit_exit();
1652 } else {
1653 if (error) {
1654 bp->b_flags |= B_ERROR;
1655 bp->b_error = np->n_error = error;
1656 np->n_flag |= NWRITEERR;
1657 }
1658 bp->b_dirtyoff = bp->b_dirtyend = 0;
1659 }
1660 if (info->info_writerpc.must_commit)
1661 nfs_clearcommit(info->vp->v_mount);
1662 lwkt_reltoken(&nmp->nm_token);
1663
1664 kfree(info, M_NFSREQ);
1665 if (error) {
1666 bp->b_flags |= B_ERROR;
1667 bp->b_error = error;
1668 }
1669 biodone(bio);
1670 }
1671
1672 /*
1673 * Nfs Version 3 commit rpc - BIO version
1674 *
1675 * This function issues the commit rpc and will chain to a write
1676 * rpc if necessary.
1677 */
1678 void
nfs_commitrpc_bio(struct vnode * vp,struct bio * bio)1679 nfs_commitrpc_bio(struct vnode *vp, struct bio *bio)
1680 {
1681 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1682 struct buf *bp = bio->bio_buf;
1683 struct nfsm_info *info;
1684 int error = 0;
1685 u_int32_t *tl;
1686
1687 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) {
1688 bp->b_dirtyoff = bp->b_dirtyend = 0;
1689 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1690 bp->b_resid = 0;
1691 biodone(bio);
1692 return;
1693 }
1694
1695 info = kmalloc(sizeof(*info), M_NFSREQ, M_WAITOK);
1696 info->mrep = NULL;
1697 info->v3 = 1;
1698
1699 nfsstats.rpccnt[NFSPROC_COMMIT]++;
1700 nfsm_reqhead(info, vp, NFSPROC_COMMIT, NFSX_FH(1));
1701 ERROROUT(nfsm_fhtom(info, vp));
1702 tl = nfsm_build(info, 3 * NFSX_UNSIGNED);
1703 txdr_hyper(bio->bio_offset + bp->b_dirtyoff, tl);
1704 tl += 2;
1705 *tl = txdr_unsigned(bp->b_dirtyend - bp->b_dirtyoff);
1706 info->bio = bio;
1707 info->done = nfs_commitrpc_bio_done;
1708 nfsm_request_bio(info, vp, NFSPROC_COMMIT, NULL,
1709 nfs_vpcred(vp, ND_WRITE));
1710 return;
1711 nfsmout:
1712 /*
1713 * Chain to write RPC on (early) error
1714 */
1715 kfree(info, M_NFSREQ);
1716 nfs_writerpc_bio(vp, bio);
1717 }
1718
1719 static void
nfs_commitrpc_bio_done(nfsm_info_t info)1720 nfs_commitrpc_bio_done(nfsm_info_t info)
1721 {
1722 struct nfsmount *nmp = VFSTONFS(info->vp->v_mount);
1723 struct bio *bio = info->bio;
1724 struct buf *bp = bio->bio_buf;
1725 u_int32_t *tl;
1726 int wccflag = NFSV3_WCCRATTR;
1727 int error = 0;
1728
1729 lwkt_gettoken(&nmp->nm_token);
1730
1731 ERROROUT(info->error);
1732 ERROROUT(nfsm_wcc_data(info, info->vp, &wccflag));
1733 if (error == 0) {
1734 NULLOUT(tl = nfsm_dissect(info, NFSX_V3WRITEVERF));
1735 if (bcmp(nmp->nm_verf, tl, NFSX_V3WRITEVERF)) {
1736 bcopy(tl, nmp->nm_verf, NFSX_V3WRITEVERF);
1737 error = NFSERR_STALEWRITEVERF;
1738 }
1739 }
1740 m_freem(info->mrep);
1741 info->mrep = NULL;
1742
1743 /*
1744 * On completion we must chain to a write bio if an
1745 * error occurred.
1746 */
1747 nfsmout:
1748 if (error == 0) {
1749 bp->b_dirtyoff = bp->b_dirtyend = 0;
1750 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1751 bp->b_resid = 0;
1752 biodone(bio);
1753 } else {
1754 nfs_writerpc_bio(info->vp, bio);
1755 }
1756 kfree(info, M_NFSREQ);
1757 lwkt_reltoken(&nmp->nm_token);
1758 }
1759
1760