xref: /dragonfly/sys/vfs/smbfs/smbfs_io.c (revision 1d1731fa)
1 /*
2  * Copyright (c) 2000-2001, Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/fs/smbfs/smbfs_io.c,v 1.3.2.3 2003/01/17 08:20:26 tjr Exp $
33  * $DragonFly: src/sys/vfs/smbfs/smbfs_io.c,v 1.7 2003/08/07 21:54:36 dillon Exp $
34  *
35  */
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/resourcevar.h>	/* defines plimit structure in proc struct */
39 #include <sys/kernel.h>
40 #include <sys/proc.h>
41 #include <sys/fcntl.h>
42 #include <sys/mount.h>
43 #include <sys/namei.h>
44 #include <sys/vnode.h>
45 #include <sys/dirent.h>
46 #include <sys/signalvar.h>
47 #include <sys/sysctl.h>
48 
49 #include <vm/vm.h>
50 #include <vm/vm_page.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vnode_pager.h>
55 /*
56 #include <sys/ioccom.h>
57 */
58 #include <netproto/smb/smb.h>
59 #include <netproto/smb/smb_conn.h>
60 #include <netproto/smb/smb_subr.h>
61 
62 #include "smbfs.h"
63 #include "smbfs_node.h"
64 #include "smbfs_subr.h"
65 
66 #include <sys/buf.h>
67 
68 /*#define SMBFS_RWGENERIC*/
69 
70 extern int smbfs_pbuf_freecnt;
71 
72 static int smbfs_fastlookup = 1;
73 
74 extern struct linker_set sysctl_vfs_smbfs;
75 
76 SYSCTL_DECL(_vfs_smbfs);
77 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
78 
79 
80 #define DE_SIZE	(sizeof(struct dirent))
81 
82 static int
83 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
84 {
85 	struct dirent de;
86 	struct componentname cn;
87 	struct smb_cred scred;
88 	struct smbfs_fctx *ctx;
89 	struct vnode *newvp;
90 	struct smbnode *np = VTOSMB(vp);
91 	int error/*, *eofflag = ap->a_eofflag*/;
92 	long offset, limit;
93 
94 	np = VTOSMB(vp);
95 	SMBVDEBUG("dirname='%s'\n", np->n_name);
96 	smb_makescred(&scred, uio->uio_td, cred);
97 	offset = uio->uio_offset / DE_SIZE; 	/* offset in the directory */
98 	limit = uio->uio_resid / DE_SIZE;
99 	if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0)
100 		return EINVAL;
101 	while (limit && offset < 2) {
102 		limit--;
103 		bzero((caddr_t)&de, DE_SIZE);
104 		de.d_reclen = DE_SIZE;
105 		de.d_fileno = (offset == 0) ? np->n_ino :
106 		    (np->n_parent ? VTOSMB(np->n_parent)->n_ino : 2);
107 		if (de.d_fileno == 0)
108 			de.d_fileno = 0x7ffffffd + offset;
109 		de.d_namlen = offset + 1;
110 		de.d_name[0] = '.';
111 		de.d_name[1] = '.';
112 		de.d_name[offset + 1] = '\0';
113 		de.d_type = DT_DIR;
114 		error = uiomove((caddr_t)&de, DE_SIZE, uio);
115 		if (error)
116 			return error;
117 		offset++;
118 		uio->uio_offset += DE_SIZE;
119 	}
120 	if (limit == 0)
121 		return 0;
122 	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
123 		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
124 		if (np->n_dirseq) {
125 			smbfs_findclose(np->n_dirseq, &scred);
126 			np->n_dirseq = NULL;
127 		}
128 		np->n_dirofs = 2;
129 		error = smbfs_findopen(np, "*", 1,
130 		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
131 		    &scred, &ctx);
132 		if (error) {
133 			SMBVDEBUG("can not open search, error = %d", error);
134 			return error;
135 		}
136 		np->n_dirseq = ctx;
137 	} else
138 		ctx = np->n_dirseq;
139 	while (np->n_dirofs < offset) {
140 		error = smbfs_findnext(ctx, offset - np->n_dirofs++, &scred);
141 		if (error) {
142 			smbfs_findclose(np->n_dirseq, &scred);
143 			np->n_dirseq = NULL;
144 			return error == ENOENT ? 0 : error;
145 		}
146 	}
147 	error = 0;
148 	for (; limit; limit--, offset++) {
149 		error = smbfs_findnext(ctx, limit, &scred);
150 		if (error)
151 			break;
152 		np->n_dirofs++;
153 		bzero((caddr_t)&de, DE_SIZE);
154 		de.d_reclen = DE_SIZE;
155 		de.d_fileno = ctx->f_attr.fa_ino;
156 		de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
157 		de.d_namlen = ctx->f_nmlen;
158 		bcopy(ctx->f_name, de.d_name, de.d_namlen);
159 		de.d_name[de.d_namlen] = '\0';
160 		if (smbfs_fastlookup) {
161 			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
162 			    ctx->f_nmlen, &ctx->f_attr, &newvp);
163 			if (!error) {
164 				cn.cn_nameptr = de.d_name;
165 				cn.cn_namelen = de.d_namlen;
166 		    		cache_enter(vp, newvp, &cn);
167 				vput(newvp);
168 			}
169 		}
170 		error = uiomove((caddr_t)&de, DE_SIZE, uio);
171 		if (error)
172 			break;
173 	}
174 	if (error == ENOENT)
175 		error = 0;
176 	uio->uio_offset = offset * DE_SIZE;
177 	return error;
178 }
179 
180 int
181 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
182 {
183 	struct thread *td;
184 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
185 	struct smbnode *np = VTOSMB(vp);
186 	struct vattr vattr;
187 	struct smb_cred scred;
188 	int error, lks;
189 
190 	/*
191 	 * Protect against method which is not supported for now
192 	 */
193 	if (uiop->uio_segflg == UIO_NOCOPY)
194 		return EOPNOTSUPP;
195 
196 	if (vp->v_type != VREG && vp->v_type != VDIR) {
197 		SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
198 		return EIO;
199 	}
200 	if (uiop->uio_resid == 0)
201 		return 0;
202 	if (uiop->uio_offset < 0)
203 		return EINVAL;
204 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
205 		return EFBIG;*/
206 	td = uiop->uio_td;
207 	if (vp->v_type == VDIR) {
208 		lks = LK_EXCLUSIVE;/*lockstatus(&vp->v_lock, td);*/
209 		if (lks == LK_SHARED)
210 			vn_lock(vp, LK_UPGRADE | LK_RETRY, td);
211 		error = smbfs_readvdir(vp, uiop, cred);
212 		if (lks == LK_SHARED)
213 			vn_lock(vp, LK_DOWNGRADE | LK_RETRY, td);
214 		return error;
215 	}
216 
217 /*	biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
218 	if (np->n_flag & NMODIFIED) {
219 		smbfs_attr_cacheremove(vp);
220 		error = VOP_GETATTR(vp, &vattr, td);
221 		if (error)
222 			return error;
223 		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
224 	} else {
225 		error = VOP_GETATTR(vp, &vattr, td);
226 		if (error)
227 			return error;
228 		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
229 			error = smbfs_vinvalbuf(vp, V_SAVE, td, 1);
230 			if (error)
231 				return error;
232 			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
233 		}
234 	}
235 	smb_makescred(&scred, td, cred);
236 	return smb_read(smp->sm_share, np->n_fid, uiop, &scred);
237 }
238 
239 int
240 smbfs_writevnode(struct vnode *vp, struct uio *uiop,
241 	struct ucred *cred, int ioflag)
242 {
243 	struct thread *td;
244 	struct smbmount *smp = VTOSMBFS(vp);
245 	struct smbnode *np = VTOSMB(vp);
246 	struct smb_cred scred;
247 	int error = 0;
248 
249 	if (vp->v_type != VREG) {
250 		SMBERROR("vn types other than VREG unsupported !\n");
251 		return EIO;
252 	}
253 	SMBVDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
254 	if (uiop->uio_offset < 0)
255 		return EINVAL;
256 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
257 		return (EFBIG);*/
258 	td = uiop->uio_td;
259 	if (ioflag & (IO_APPEND | IO_SYNC)) {
260 		if (np->n_flag & NMODIFIED) {
261 			smbfs_attr_cacheremove(vp);
262 			error = smbfs_vinvalbuf(vp, V_SAVE, td, 1);
263 			if (error)
264 				return error;
265 		}
266 		if (ioflag & IO_APPEND) {
267 #if notyet
268 			/*
269 			 * File size can be changed by another client
270 			 */
271 			smbfs_attr_cacheremove(vp);
272 			error = VOP_GETATTR(vp, &vattr, td);
273 			if (error) return (error);
274 #endif
275 			uiop->uio_offset = np->n_size;
276 		}
277 	}
278 	if (uiop->uio_resid == 0)
279 		return 0;
280 	if (td->td_proc &&
281 	    uiop->uio_offset + uiop->uio_resid >
282 	    td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
283 		psignal(td->td_proc, SIGXFSZ);
284 		return EFBIG;
285 	}
286 	smb_makescred(&scred, td, cred);
287 	error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
288 	SMBVDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
289 	if (!error) {
290 		if (uiop->uio_offset > np->n_size) {
291 			np->n_size = uiop->uio_offset;
292 			vnode_pager_setsize(vp, np->n_size);
293 		}
294 	}
295 	return error;
296 }
297 
298 /*
299  * Do an I/O operation to/from a cache block.
300  */
301 int
302 smbfs_doio(struct buf *bp, struct ucred *cr, struct thread *td)
303 {
304 	struct vnode *vp = bp->b_vp;
305 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
306 	struct smbnode *np = VTOSMB(vp);
307 	struct uio uio, *uiop = &uio;
308 	struct iovec io;
309 	struct smb_cred scred;
310 	int error = 0;
311 
312 	uiop->uio_iov = &io;
313 	uiop->uio_iovcnt = 1;
314 	uiop->uio_segflg = UIO_SYSSPACE;
315 	uiop->uio_td = td;
316 
317 	smb_makescred(&scred, td, cr);
318 
319 	if (bp->b_flags & B_READ) {
320 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
321 	    io.iov_base = bp->b_data;
322 	    uiop->uio_rw = UIO_READ;
323 	    switch (vp->v_type) {
324 	      case VREG:
325 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
326 		error = smb_read(smp->sm_share, np->n_fid, uiop, &scred);
327 		if (error)
328 			break;
329 		if (uiop->uio_resid) {
330 			int left = uiop->uio_resid;
331 			int nread = bp->b_bcount - left;
332 			if (left > 0)
333 			    bzero((char *)bp->b_data + nread, left);
334 		}
335 		break;
336 	    default:
337 		printf("smbfs_doio:  type %x unexpected\n",vp->v_type);
338 		break;
339 	    };
340 	    if (error) {
341 		bp->b_error = error;
342 		bp->b_flags |= B_ERROR;
343 	    }
344 	} else { /* write */
345 	    if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
346 		bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
347 
348 	    if (bp->b_dirtyend > bp->b_dirtyoff) {
349 		io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
350 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
351 		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
352 		uiop->uio_rw = UIO_WRITE;
353 		bp->b_flags |= B_WRITEINPROG;
354 		error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
355 		bp->b_flags &= ~B_WRITEINPROG;
356 
357 		/*
358 		 * For an interrupted write, the buffer is still valid
359 		 * and the write hasn't been pushed to the server yet,
360 		 * so we can't set BIO_ERROR and report the interruption
361 		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
362 		 * is not relevant, so the rpc attempt is essentially
363 		 * a noop.  For the case of a V3 write rpc not being
364 		 * committed to stable storage, the block is still
365 		 * dirty and requires either a commit rpc or another
366 		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
367 		 * the block is reused. This is indicated by setting
368 		 * the B_DELWRI and B_NEEDCOMMIT flags.
369 		 */
370     		if (error == EINTR
371 		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
372 			int s;
373 
374 			s = splbio();
375 			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
376 			if ((bp->b_flags & B_ASYNC) == 0)
377 			    bp->b_flags |= B_EINTR;
378 			if ((bp->b_flags & B_PAGING) == 0) {
379 			    bdirty(bp);
380 			    bp->b_flags &= ~B_DONE;
381 			}
382 			if ((bp->b_flags & B_ASYNC) == 0)
383 			    bp->b_flags |= B_EINTR;
384 			splx(s);
385 	    	} else {
386 			if (error) {
387 				bp->b_flags |= B_ERROR;
388 				bp->b_error = error;
389 			}
390 			bp->b_dirtyoff = bp->b_dirtyend = 0;
391 		}
392 	    } else {
393 		bp->b_resid = 0;
394 		biodone(bp);
395 		return 0;
396 	    }
397 	}
398 	bp->b_resid = uiop->uio_resid;
399 	biodone(bp);
400 	return error;
401 }
402 
403 /*
404  * Vnode op for VM getpages.
405  * Wish wish .... get rid from multiple IO routines
406  */
407 int
408 smbfs_getpages(ap)
409 	struct vop_getpages_args /* {
410 		struct vnode *a_vp;
411 		vm_page_t *a_m;
412 		int a_count;
413 		int a_reqpage;
414 		vm_ooffset_t a_offset;
415 	} */ *ap;
416 {
417 #ifdef SMBFS_RWGENERIC
418 	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
419 		ap->a_reqpage);
420 #else
421 	int i, error, nextoff, size, toff, npages, count;
422 	struct uio uio;
423 	struct iovec iov;
424 	vm_offset_t kva;
425 	struct buf *bp;
426 	struct vnode *vp;
427 	struct thread *td = curthread;	/* XXX */
428 	struct ucred *cred;
429 	struct smbmount *smp;
430 	struct smbnode *np;
431 	struct smb_cred scred;
432 	vm_page_t *pages;
433 
434 	KKASSERT(td->td_proc);
435 
436 	vp = ap->a_vp;
437 	cred = td->td_proc->p_ucred;
438 	np = VTOSMB(vp);
439 	smp = VFSTOSMBFS(vp->v_mount);
440 	pages = ap->a_m;
441 	count = ap->a_count;
442 
443 	if (vp->v_object == NULL) {
444 		printf("smbfs_getpages: called with non-merged cache vnode??\n");
445 		return VM_PAGER_ERROR;
446 	}
447 	smb_makescred(&scred, td, cred);
448 
449 	bp = getpbuf(&smbfs_pbuf_freecnt);
450 	npages = btoc(count);
451 	kva = (vm_offset_t) bp->b_data;
452 	pmap_qenter(kva, pages, npages);
453 
454 	iov.iov_base = (caddr_t) kva;
455 	iov.iov_len = count;
456 	uio.uio_iov = &iov;
457 	uio.uio_iovcnt = 1;
458 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
459 	uio.uio_resid = count;
460 	uio.uio_segflg = UIO_SYSSPACE;
461 	uio.uio_rw = UIO_READ;
462 	uio.uio_td = td;
463 
464 	error = smb_read(smp->sm_share, np->n_fid, &uio, &scred);
465 	pmap_qremove(kva, npages);
466 
467 	relpbuf(bp, &smbfs_pbuf_freecnt);
468 
469 	if (error && (uio.uio_resid == count)) {
470 		printf("smbfs_getpages: error %d\n",error);
471 		for (i = 0; i < npages; i++) {
472 			if (ap->a_reqpage != i)
473 				vnode_pager_freepage(pages[i]);
474 		}
475 		return VM_PAGER_ERROR;
476 	}
477 
478 	size = count - uio.uio_resid;
479 
480 	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
481 		vm_page_t m;
482 		nextoff = toff + PAGE_SIZE;
483 		m = pages[i];
484 
485 		m->flags &= ~PG_ZERO;
486 
487 		if (nextoff <= size) {
488 			m->valid = VM_PAGE_BITS_ALL;
489 			m->dirty = 0;
490 		} else {
491 			int nvalid = ((size + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
492 			vm_page_set_validclean(m, 0, nvalid);
493 		}
494 
495 		if (i != ap->a_reqpage) {
496 			/*
497 			 * Whether or not to leave the page activated is up in
498 			 * the air, but we should put the page on a page queue
499 			 * somewhere (it already is in the object).  Result:
500 			 * It appears that emperical results show that
501 			 * deactivating pages is best.
502 			 */
503 
504 			/*
505 			 * Just in case someone was asking for this page we
506 			 * now tell them that it is ok to use.
507 			 */
508 			if (!error) {
509 				if (m->flags & PG_WANTED)
510 					vm_page_activate(m);
511 				else
512 					vm_page_deactivate(m);
513 				vm_page_wakeup(m);
514 			} else {
515 				vnode_pager_freepage(m);
516 			}
517 		}
518 	}
519 	return 0;
520 #endif /* SMBFS_RWGENERIC */
521 }
522 
523 /*
524  * Vnode op for VM putpages.
525  * possible bug: all IO done in sync mode
526  * Note that vop_close always invalidate pages before close, so it's
527  * not necessary to open vnode.
528  */
529 int
530 smbfs_putpages(ap)
531 	struct vop_putpages_args /* {
532 		struct vnode *a_vp;
533 		vm_page_t *a_m;
534 		int a_count;
535 		int a_sync;
536 		int *a_rtvals;
537 		vm_ooffset_t a_offset;
538 	} */ *ap;
539 {
540 	int error;
541 	struct vnode *vp = ap->a_vp;
542 	struct thread *td = curthread;	/* XXX */
543 	struct ucred *cred;
544 
545 #ifdef SMBFS_RWGENERIC
546 	KKASSERT(td->td_proc);
547 	cred = td->td_proc->p_ucred;
548 	VOP_OPEN(vp, FWRITE, cred, td);
549 	error = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
550 		ap->a_sync, ap->a_rtvals);
551 	VOP_CLOSE(vp, FWRITE, cred, td);
552 	return error;
553 #else
554 	struct uio uio;
555 	struct iovec iov;
556 	vm_offset_t kva;
557 	struct buf *bp;
558 	int i, npages, count;
559 	int *rtvals;
560 	struct smbmount *smp;
561 	struct smbnode *np;
562 	struct smb_cred scred;
563 	vm_page_t *pages;
564 
565 	KKASSERT(td->td_proc);
566 	cred = td->td_proc->p_ucred;
567 /*	VOP_OPEN(vp, FWRITE, cred, td);*/
568 	np = VTOSMB(vp);
569 	smp = VFSTOSMBFS(vp->v_mount);
570 	pages = ap->a_m;
571 	count = ap->a_count;
572 	rtvals = ap->a_rtvals;
573 	npages = btoc(count);
574 
575 	for (i = 0; i < npages; i++) {
576 		rtvals[i] = VM_PAGER_AGAIN;
577 	}
578 
579 	bp = getpbuf(&smbfs_pbuf_freecnt);
580 	kva = (vm_offset_t) bp->b_data;
581 	pmap_qenter(kva, pages, npages);
582 
583 	iov.iov_base = (caddr_t) kva;
584 	iov.iov_len = count;
585 	uio.uio_iov = &iov;
586 	uio.uio_iovcnt = 1;
587 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
588 	uio.uio_resid = count;
589 	uio.uio_segflg = UIO_SYSSPACE;
590 	uio.uio_rw = UIO_WRITE;
591 	uio.uio_td = td;
592 	SMBVDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);
593 
594 	smb_makescred(&scred, td, cred);
595 	error = smb_write(smp->sm_share, np->n_fid, &uio, &scred);
596 /*	VOP_CLOSE(vp, FWRITE, cred, td);*/
597 	SMBVDEBUG("paged write done: %d\n", error);
598 
599 	pmap_qremove(kva, npages);
600 	relpbuf(bp, &smbfs_pbuf_freecnt);
601 
602 	if (!error) {
603 		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
604 		for (i = 0; i < nwritten; i++) {
605 			rtvals[i] = VM_PAGER_OK;
606 			pages[i]->dirty = 0;
607 		}
608 	}
609 	return rtvals[0];
610 #endif /* SMBFS_RWGENERIC */
611 }
612 
613 /*
614  * Flush and invalidate all dirty buffers. If another process is already
615  * doing the flush, just wait for completion.
616  */
617 int
618 smbfs_vinvalbuf(vp, flags, td, intrflg)
619 	struct vnode *vp;
620 	int flags;
621 	struct thread *td;
622 	int intrflg;
623 {
624 	struct smbnode *np = VTOSMB(vp);
625 	int error = 0, slpflag, slptimeo;
626 
627 	if (vp->v_flag & VXLOCK)
628 		return 0;
629 	if (intrflg) {
630 		slpflag = PCATCH;
631 		slptimeo = 2 * hz;
632 	} else {
633 		slpflag = 0;
634 		slptimeo = 0;
635 	}
636 	while (np->n_flag & NFLUSHINPROG) {
637 		np->n_flag |= NFLUSHWANT;
638 		error = tsleep((caddr_t)&np->n_flag, 0, "smfsvinv", slptimeo);
639 		error = smb_proc_intr(td);
640 		if (error == EINTR && intrflg)
641 			return EINTR;
642 	}
643 	np->n_flag |= NFLUSHINPROG;
644 	error = vinvalbuf(vp, flags, td, slpflag, 0);
645 	while (error) {
646 		if (intrflg && (error == ERESTART || error == EINTR)) {
647 			np->n_flag &= ~NFLUSHINPROG;
648 			if (np->n_flag & NFLUSHWANT) {
649 				np->n_flag &= ~NFLUSHWANT;
650 				wakeup((caddr_t)&np->n_flag);
651 			}
652 			return EINTR;
653 		}
654 		error = vinvalbuf(vp, flags, td, slpflag, 0);
655 	}
656 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
657 	if (np->n_flag & NFLUSHWANT) {
658 		np->n_flag &= ~NFLUSHWANT;
659 		wakeup((caddr_t)&np->n_flag);
660 	}
661 	return (error);
662 }
663