xref: /dragonfly/sys/vfs/smbfs/smbfs_io.c (revision 0db87cb7)
1 /*
2  * Copyright (c) 2000-2001, Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/fs/smbfs/smbfs_io.c,v 1.3.2.3 2003/01/17 08:20:26 tjr Exp $
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/resourcevar.h>	/* defines plimit structure in proc struct */
38 #include <sys/kernel.h>
39 #include <sys/proc.h>
40 #include <sys/fcntl.h>
41 #include <sys/mount.h>
42 #include <sys/namei.h>
43 #include <sys/vnode.h>
44 #include <sys/dirent.h>
45 #include <sys/signalvar.h>
46 #include <sys/sysctl.h>
47 
48 #include  <machine/limits.h>
49 
50 #include <vm/vm.h>
51 #include <vm/vm_page2.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vnode_pager.h>
56 
57 #include <netproto/smb/smb.h>
58 #include <netproto/smb/smb_conn.h>
59 #include <netproto/smb/smb_subr.h>
60 
61 #include "smbfs.h"
62 #include "smbfs_node.h"
63 #include "smbfs_subr.h"
64 
65 #include <sys/buf.h>
66 
67 #include <sys/thread2.h>
68 
69 /*#define SMBFS_RWGENERIC*/
70 
71 extern int smbfs_pbuf_freecnt;
72 
73 static int smbfs_fastlookup = 1;
74 
75 SYSCTL_DECL(_vfs_smbfs);
76 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
77 
78 static int
79 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
80 {
81 	struct smb_cred scred;
82 	struct smbfs_fctx *ctx;
83 	struct vnode *newvp;
84 	struct smbnode *np;
85 	int error, offset, retval;
86 
87 	np = VTOSMB(vp);
88 	SMBVDEBUG("dirname='%s'\n", np->n_name);
89 	smb_makescred(&scred, uio->uio_td, cred);
90 
91 	if (uio->uio_offset < 0 || uio->uio_offset > INT_MAX)
92 		return(EINVAL);
93 
94 	error = 0;
95 	offset = uio->uio_offset;
96 
97 	if (uio->uio_resid > 0 && offset < 1) {
98 		if (vop_write_dirent(&error, uio, np->n_ino, DT_DIR, 1, "."))
99 			goto done;
100 		if (error)
101 			goto done;
102 		++offset;
103 	}
104 
105 	if (uio->uio_resid > 0 && offset < 2) {
106 		if (vop_write_dirent(&error, uio,
107 		    np->n_parent ? VTOSMB(np->n_parent)->n_ino : 2,
108 		    DT_DIR, 2, ".."))
109 			goto done;
110 		if (error)
111 			goto done;
112 		++offset;
113 	}
114 
115 	if (uio->uio_resid == 0)
116 		goto done;
117 
118 	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
119 		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
120 		if (np->n_dirseq) {
121 			smbfs_findclose(np->n_dirseq, &scred);
122 			np->n_dirseq = NULL;
123 		}
124 		np->n_dirofs = 2;
125 		error = smbfs_findopen(np, "*", 1,
126 		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
127 		    &scred, &ctx);
128 		if (error) {
129 			SMBVDEBUG("can not open search, error = %d", error);
130 			return error;
131 		}
132 		np->n_dirseq = ctx;
133 	} else {
134 		ctx = np->n_dirseq;
135 	}
136 	while (np->n_dirofs < offset) {
137 		error = smbfs_findnext(ctx, offset - np->n_dirofs, &scred);
138 		++np->n_dirofs;
139 		if (error) {
140 			smbfs_findclose(np->n_dirseq, &scred);
141 			np->n_dirseq = NULL;
142 			return error == ENOENT ? 0 : error;
143 		}
144 	}
145 	error = 0;
146 	while (uio->uio_resid > 0 && !error) {
147 		/*
148 		 * Overestimate the size of a record a bit, doesn't really
149 		 * hurt to be wrong here.
150 		 */
151 		error = smbfs_findnext(ctx, uio->uio_resid / _DIRENT_RECLEN(255) + 1, &scred);
152 		if (error)
153 			break;
154 		np->n_dirofs++;
155 		++offset;
156 
157 		retval = vop_write_dirent(&error, uio, ctx->f_attr.fa_ino,
158 		    (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG,
159 		    ctx->f_nmlen, ctx->f_name);
160 		if (retval)
161 			break;
162 		if (smbfs_fastlookup && !error) {
163 			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
164 			    ctx->f_nmlen, &ctx->f_attr, &newvp);
165 			if (!error)
166 				vput(newvp);
167 		}
168 	}
169 	if (error == ENOENT)
170 		error = 0;
171 done:
172 	uio->uio_offset = offset;
173 	return error;
174 }
175 
176 int
177 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
178 {
179 	struct thread *td;
180 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
181 	struct smbnode *np = VTOSMB(vp);
182 	struct vattr vattr;
183 	struct smb_cred scred;
184 	int error, lks;
185 
186 	/*
187 	 * Protect against method which is not supported for now
188 	 */
189 	if (uiop->uio_segflg == UIO_NOCOPY)
190 		return EOPNOTSUPP;
191 
192 	if (vp->v_type != VREG && vp->v_type != VDIR) {
193 		SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
194 		return EIO;
195 	}
196 	if (uiop->uio_resid == 0)
197 		return 0;
198 	if (uiop->uio_offset < 0)
199 		return EINVAL;
200 	td = uiop->uio_td;
201 	if (vp->v_type == VDIR) {
202 		lks = LK_EXCLUSIVE;/*lockstatus(&vp->v_lock, td);*/
203 		if (lks == LK_SHARED)
204 			vn_lock(vp, LK_UPGRADE | LK_RETRY);
205 		error = smbfs_readvdir(vp, uiop, cred);
206 		if (lks == LK_SHARED)
207 			vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
208 		return error;
209 	}
210 
211 /*	biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
212 	if (np->n_flag & NMODIFIED) {
213 		smbfs_attr_cacheremove(vp);
214 		error = VOP_GETATTR(vp, &vattr);
215 		if (error)
216 			return error;
217 		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
218 	} else {
219 		error = VOP_GETATTR(vp, &vattr);
220 		if (error)
221 			return error;
222 		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
223 			error = smbfs_vinvalbuf(vp, V_SAVE, 1);
224 			if (error)
225 				return error;
226 			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
227 		}
228 	}
229 	smb_makescred(&scred, td, cred);
230 	return smb_read(smp->sm_share, np->n_fid, uiop, &scred);
231 }
232 
233 int
234 smbfs_writevnode(struct vnode *vp, struct uio *uiop,
235 		 struct ucred *cred, int ioflag)
236 {
237 	struct thread *td;
238 	struct smbmount *smp = VTOSMBFS(vp);
239 	struct smbnode *np = VTOSMB(vp);
240 	struct smb_cred scred;
241 	int error = 0;
242 
243 	if (vp->v_type != VREG) {
244 		SMBERROR("vn types other than VREG unsupported !\n");
245 		return EIO;
246 	}
247 	SMBVDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
248 	if (uiop->uio_offset < 0)
249 		return EINVAL;
250 	td = uiop->uio_td;
251 	if (ioflag & (IO_APPEND | IO_SYNC)) {
252 		if (np->n_flag & NMODIFIED) {
253 			smbfs_attr_cacheremove(vp);
254 			error = smbfs_vinvalbuf(vp, V_SAVE, 1);
255 			if (error)
256 				return error;
257 		}
258 		if (ioflag & IO_APPEND) {
259 #if 0 /* notyet */
260 			/*
261 			 * File size can be changed by another client
262 			 */
263 			smbfs_attr_cacheremove(vp);
264 			error = VOP_GETATTR(vp, &vattr);
265 			if (error) return (error);
266 #endif
267 			uiop->uio_offset = np->n_size;
268 		}
269 	}
270 	if (uiop->uio_resid == 0)
271 		return 0;
272 	if (td->td_proc &&
273 	    uiop->uio_offset + uiop->uio_resid >
274 	    td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
275 		lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
276 		return EFBIG;
277 	}
278 	smb_makescred(&scred, td, cred);
279 	error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
280 	SMBVDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
281 	if (!error) {
282 		if (uiop->uio_offset > np->n_size) {
283 			np->n_size = uiop->uio_offset;
284 			vnode_pager_setsize(vp, np->n_size);
285 		}
286 	}
287 	return error;
288 }
289 
290 /*
291  * Do an I/O operation to/from a cache block.
292  */
293 int
294 smbfs_doio(struct vnode *vp, struct bio *bio, struct ucred *cr, struct thread *td)
295 {
296 	struct buf *bp = bio->bio_buf;
297 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
298 	struct smbnode *np = VTOSMB(vp);
299 	struct uio uio, *uiop = &uio;
300 	struct iovec io;
301 	struct smb_cred scred;
302 	int error = 0;
303 
304 	uiop->uio_iov = &io;
305 	uiop->uio_iovcnt = 1;
306 	uiop->uio_segflg = UIO_SYSSPACE;
307 	uiop->uio_td = td;
308 
309 	smb_makescred(&scred, td, cr);
310 
311 	if (bp->b_cmd == BUF_CMD_READ) {
312 	    io.iov_len = uiop->uio_resid = (size_t)bp->b_bcount;
313 	    io.iov_base = bp->b_data;
314 	    uiop->uio_rw = UIO_READ;
315 	    switch (vp->v_type) {
316 	      case VREG:
317 		uiop->uio_offset = bio->bio_offset;
318 		error = smb_read(smp->sm_share, np->n_fid, uiop, &scred);
319 		if (error)
320 			break;
321 		if (uiop->uio_resid) {
322 			size_t left = uiop->uio_resid;
323 			size_t nread = (size_t)bp->b_bcount - left;
324 			if (left > 0)
325 				bzero((char *)bp->b_data + nread, left);
326 		}
327 		break;
328 	    default:
329 		kprintf("smbfs_doio:  type %x unexpected\n",vp->v_type);
330 		break;
331 	    }
332 	    if (error) {
333 		bp->b_error = error;
334 		bp->b_flags |= B_ERROR;
335 	    }
336 	} else { /* write */
337 	    KKASSERT(bp->b_cmd == BUF_CMD_WRITE);
338 	    if (bio->bio_offset + bp->b_dirtyend > np->n_size)
339 		bp->b_dirtyend = np->n_size - bio->bio_offset;
340 
341 	    if (bp->b_dirtyend > bp->b_dirtyoff) {
342 		io.iov_len = uiop->uio_resid =
343 			(size_t)(bp->b_dirtyend - bp->b_dirtyoff);
344 		uiop->uio_offset = bio->bio_offset + bp->b_dirtyoff;
345 		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
346 		uiop->uio_rw = UIO_WRITE;
347 		error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
348 
349 		/*
350 		 * For an interrupted write, the buffer is still valid
351 		 * and the write hasn't been pushed to the server yet,
352 		 * so we can't set BIO_ERROR and report the interruption
353 		 * by setting B_EINTR. For the async case, B_EINTR
354 		 * is not relevant, so the rpc attempt is essentially
355 		 * a noop.  For the case of a V3 write rpc not being
356 		 * committed to stable storage, the block is still
357 		 * dirty and requires either a commit rpc or another
358 		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
359 		 * the block is reused. This is indicated by setting
360 		 * the B_DELWRI and B_NEEDCOMMIT flags.
361 		 */
362     		if (error == EINTR
363 		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
364 
365 			crit_enter();
366 			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
367 			if ((bp->b_flags & B_PAGING) == 0)
368 			    bdirty(bp);
369 			bp->b_flags |= B_EINTR;
370 			crit_exit();
371 	    	} else {
372 			if (error) {
373 				bp->b_flags |= B_ERROR;
374 				bp->b_error = error;
375 			}
376 			bp->b_dirtyoff = bp->b_dirtyend = 0;
377 		}
378 	    } else {
379 		bp->b_resid = 0;
380 		biodone(bio);
381 		return 0;
382 	    }
383 	}
384 	bp->b_resid = uiop->uio_resid;
385 	biodone(bio);
386 	return error;
387 }
388 
389 /*
390  * Vnode op for VM getpages.
391  * Wish wish .... get rid from multiple IO routines
392  *
393  * smbfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
394  *		  int a_reqpage, vm_ooffset_t a_offset)
395  */
396 int
397 smbfs_getpages(struct vop_getpages_args *ap)
398 {
399 #ifdef SMBFS_RWGENERIC
400 	return vop_stdgetpages(ap);
401 #else
402 	int i, error, npages;
403 	int doclose;
404 	size_t size, toff, nextoff, count;
405 	struct uio uio;
406 	struct iovec iov;
407 	vm_offset_t kva;
408 	struct buf *bp;
409 	struct vnode *vp;
410 	struct thread *td = curthread;	/* XXX */
411 	struct ucred *cred;
412 	struct smbmount *smp;
413 	struct smbnode *np;
414 	struct smb_cred scred;
415 	vm_page_t *pages;
416 
417 	KKASSERT(td->td_proc);
418 
419 	vp = ap->a_vp;
420 	cred = td->td_proc->p_ucred;
421 	np = VTOSMB(vp);
422 	smp = VFSTOSMBFS(vp->v_mount);
423 	pages = ap->a_m;
424 	count = (size_t)ap->a_count;
425 
426 	if (vp->v_object == NULL) {
427 		kprintf("smbfs_getpages: called with non-merged cache vnode??\n");
428 		return VM_PAGER_ERROR;
429 	}
430 	smb_makescred(&scred, td, cred);
431 
432 	bp = getpbuf_kva(&smbfs_pbuf_freecnt);
433 	npages = btoc(count);
434 	kva = (vm_offset_t) bp->b_data;
435 	pmap_qenter(kva, pages, npages);
436 
437 	iov.iov_base = (caddr_t) kva;
438 	iov.iov_len = count;
439 	uio.uio_iov = &iov;
440 	uio.uio_iovcnt = 1;
441 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
442 	uio.uio_resid = count;
443 	uio.uio_segflg = UIO_SYSSPACE;
444 	uio.uio_rw = UIO_READ;
445 	uio.uio_td = td;
446 
447 	/*
448 	 * This is kinda nasty.  Since smbfs is physically closing the
449 	 * fid on close(), we have to reopen it if necessary.  There are
450 	 * other races here too, such as if another process opens the same
451 	 * file while we are blocked in read. XXX
452 	 */
453 	error = 0;
454 	doclose = 0;
455 	if (np->n_opencount == 0) {
456 		error = smbfs_smb_open(np, SMB_AM_OPENREAD, &scred);
457 		if (error == 0)
458 			doclose = 1;
459 	}
460 	if (error == 0)
461 		error = smb_read(smp->sm_share, np->n_fid, &uio, &scred);
462 	if (doclose)
463 		smbfs_smb_close(smp->sm_share, np->n_fid, NULL, &scred);
464 	pmap_qremove(kva, npages);
465 
466 	relpbuf(bp, &smbfs_pbuf_freecnt);
467 
468 	if (error && (uio.uio_resid == count)) {
469 		kprintf("smbfs_getpages: error %d\n",error);
470 		for (i = 0; i < npages; i++) {
471 			if (ap->a_reqpage != i)
472 				vnode_pager_freepage(pages[i]);
473 		}
474 		return VM_PAGER_ERROR;
475 	}
476 
477 	size = count - uio.uio_resid;
478 
479 	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
480 		vm_page_t m;
481 		nextoff = toff + PAGE_SIZE;
482 		m = pages[i];
483 
484 		m->flags &= ~PG_ZERO;
485 
486 		/*
487 		 * NOTE: pmap dirty bit should have already been cleared.
488 		 *	 We do not clear it here.
489 		 */
490 		if (nextoff <= size) {
491 			m->valid = VM_PAGE_BITS_ALL;
492 			m->dirty = 0;
493 		} else {
494 			int nvalid = ((size + DEV_BSIZE - 1) - toff) &
495 				      ~(DEV_BSIZE - 1);
496 			vm_page_set_validclean(m, 0, nvalid);
497 		}
498 
499 		if (i != ap->a_reqpage) {
500 			/*
501 			 * Whether or not to leave the page activated is up in
502 			 * the air, but we should put the page on a page queue
503 			 * somewhere (it already is in the object).  Result:
504 			 * It appears that emperical results show that
505 			 * deactivating pages is best.
506 			 */
507 
508 			/*
509 			 * Just in case someone was asking for this page we
510 			 * now tell them that it is ok to use.
511 			 */
512 			if (!error) {
513 				if (m->flags & PG_REFERENCED)
514 					vm_page_activate(m);
515 				else
516 					vm_page_deactivate(m);
517 				vm_page_wakeup(m);
518 			} else {
519 				vnode_pager_freepage(m);
520 			}
521 		}
522 	}
523 	return 0;
524 #endif /* SMBFS_RWGENERIC */
525 }
526 
527 /*
528  * Vnode op for VM putpages.
529  * possible bug: all IO done in sync mode
530  * Note that vop_close always invalidate pages before close, so it's
531  * not necessary to open vnode.
532  *
533  * smbfs_putpages(struct vnode *a_vp, vm_page_t *a_m, int a_count, int a_sync,
534  *		  int *a_rtvals, vm_ooffset_t a_offset)
535  */
536 int
537 smbfs_putpages(struct vop_putpages_args *ap)
538 {
539 	int error;
540 	struct vnode *vp = ap->a_vp;
541 	struct thread *td = curthread;	/* XXX */
542 	struct ucred *cred;
543 
544 #ifdef SMBFS_RWGENERIC
545 	KKASSERT(td->td_proc);
546 	cred = td->td_proc->p_ucred;
547 	VOP_OPEN(vp, FWRITE, cred, NULL);
548 	error = vop_stdputpages(ap);
549 	VOP_CLOSE(vp, FWRITE, cred, NULL);
550 	return error;
551 #else
552 	struct uio uio;
553 	struct iovec iov;
554 	vm_offset_t kva;
555 	struct buf *bp;
556 	int i, npages, count;
557 	int doclose;
558 	int *rtvals;
559 	struct smbmount *smp;
560 	struct smbnode *np;
561 	struct smb_cred scred;
562 	vm_page_t *pages;
563 
564 	KKASSERT(td->td_proc);
565 	cred = td->td_proc->p_ucred;
566 /*	VOP_OPEN(vp, FWRITE, cred, NULL);*/
567 	np = VTOSMB(vp);
568 	smp = VFSTOSMBFS(vp->v_mount);
569 	pages = ap->a_m;
570 	count = ap->a_count;
571 	rtvals = ap->a_rtvals;
572 	npages = btoc(count);
573 
574 	for (i = 0; i < npages; i++) {
575 		rtvals[i] = VM_PAGER_AGAIN;
576 	}
577 
578 	bp = getpbuf_kva(&smbfs_pbuf_freecnt);
579 	kva = (vm_offset_t) bp->b_data;
580 	pmap_qenter(kva, pages, npages);
581 
582 	iov.iov_base = (caddr_t) kva;
583 	iov.iov_len = count;
584 	uio.uio_iov = &iov;
585 	uio.uio_iovcnt = 1;
586 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
587 	uio.uio_resid = count;
588 	uio.uio_segflg = UIO_SYSSPACE;
589 	uio.uio_rw = UIO_WRITE;
590 	uio.uio_td = td;
591 	SMBVDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);
592 
593 	smb_makescred(&scred, td, cred);
594 
595 	/*
596 	 * This is kinda nasty.  Since smbfs is physically closing the
597 	 * fid on close(), we have to reopen it if necessary.  There are
598 	 * other races here too, such as if another process opens the same
599 	 * file while we are blocked in read, or the file is open read-only
600 	 * XXX
601 	 */
602 	error = 0;
603 	doclose = 0;
604 	if (np->n_opencount == 0) {
605 		error = smbfs_smb_open(np, SMB_AM_OPENRW, &scred);
606 		if (error == 0)
607 			doclose = 1;
608 	}
609 	if (error == 0)
610 		error = smb_write(smp->sm_share, np->n_fid, &uio, &scred);
611 	if (doclose)
612 		smbfs_smb_close(smp->sm_share, np->n_fid, NULL, &scred);
613 /*	VOP_CLOSE(vp, FWRITE, cred);*/
614 	SMBVDEBUG("paged write done: %d\n", error);
615 
616 	pmap_qremove(kva, npages);
617 	relpbuf(bp, &smbfs_pbuf_freecnt);
618 
619 	if (!error) {
620 		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
621 		for (i = 0; i < nwritten; i++) {
622 			rtvals[i] = VM_PAGER_OK;
623 			vm_page_undirty(pages[i]);
624 		}
625 	}
626 	return rtvals[0];
627 #endif /* SMBFS_RWGENERIC */
628 }
629 
630 /*
631  * Flush and invalidate all dirty buffers. If another process is already
632  * doing the flush, just wait for completion.
633  */
634 int
635 smbfs_vinvalbuf(struct vnode *vp, int flags, int intrflg)
636 {
637 	struct smbnode *np = VTOSMB(vp);
638 	int error = 0, slpflag, slptimeo;
639 
640 	if (vp->v_flag & VRECLAIMED)
641 		return 0;
642 	if (intrflg) {
643 		slpflag = PCATCH;
644 		slptimeo = 2 * hz;
645 	} else {
646 		slpflag = 0;
647 		slptimeo = 0;
648 	}
649 	while (np->n_flag & NFLUSHINPROG) {
650 		np->n_flag |= NFLUSHWANT;
651 		error = tsleep((caddr_t)&np->n_flag, 0, "smfsvinv", slptimeo);
652 		error = smb_proc_intr(curthread);
653 		if (error == EINTR && intrflg)
654 			return EINTR;
655 	}
656 	np->n_flag |= NFLUSHINPROG;
657 	error = vinvalbuf(vp, flags, slpflag, 0);
658 	while (error) {
659 		if (intrflg && (error == ERESTART || error == EINTR)) {
660 			np->n_flag &= ~NFLUSHINPROG;
661 			if (np->n_flag & NFLUSHWANT) {
662 				np->n_flag &= ~NFLUSHWANT;
663 				wakeup((caddr_t)&np->n_flag);
664 			}
665 			return EINTR;
666 		}
667 		error = vinvalbuf(vp, flags, slpflag, 0);
668 	}
669 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
670 	if (np->n_flag & NFLUSHWANT) {
671 		np->n_flag &= ~NFLUSHWANT;
672 		wakeup((caddr_t)&np->n_flag);
673 	}
674 	return (error);
675 }
676