xref: /dragonfly/sys/vfs/fuse/fuse_vnops.c (revision 7485684f)
1 /*-
2  * Copyright (c) 2019 Tomohiro Kusumi <tkusumi@netbsd.org>
3  * Copyright (c) 2019 The DragonFly Project
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include "fuse.h"
29 
30 #include <sys/fcntl.h>
31 #include <sys/dirent.h>
32 #include <sys/uio.h>
33 #include <sys/buf.h>
34 #include <sys/mountctl.h>
35 #include <sys/kern_syscall.h>
36 #include <vm/vm_pager.h>
37 #include <vm/vm_extern.h>
38 #include <vm/vnode_pager.h>
39 #include <vm/vm_pageout.h>
40 
41 #include <sys/buf2.h>
42 #include <vm/vm_page2.h>
43 
44 static int fuse_reg_resize(struct vnode *vp, off_t newsize, int trivial);
45 static void fuse_io_execute(struct fuse_mount *fmp, struct bio *bio);
46 static void fuse_release(struct fuse_mount *fmp, struct fuse_node *fnp);
47 
48 static int
49 fuse_set_attr(struct fuse_node *fnp, struct fuse_attr *fat)
50 {
51 	struct vattr *vap = &fnp->attr;
52 	int error = 0;
53 
54 	vattr_null(vap);
55 
56 	vap->va_type = IFTOVT(fat->mode);
57 	vap->va_size = (fnp->sizeoverride ? fnp->size : fat->size);
58 	vap->va_bytes = fat->blocks * S_BLKSIZE;
59 	vap->va_mode = fat->mode & ~S_IFMT;
60 	if (!fat->nlink) /* XXX .fuse_hidden* has 0 link */
61 		vap->va_nlink = 1;
62 	else
63 		vap->va_nlink = fat->nlink;
64 	vap->va_uid = fat->uid;
65 	vap->va_gid = fat->gid;
66 	vap->va_fsid = fnp->fmp->mp->mnt_stat.f_fsid.val[0];
67 	vap->va_fileid = fat->ino;
68 	vap->va_blocksize = FUSE_BLKSIZE;
69 	vap->va_rmajor = VNOVAL;
70 	vap->va_rminor = VNOVAL;
71 	vap->va_atime.tv_sec = fat->atime;
72 	vap->va_atime.tv_nsec = fat->atimensec;
73 	vap->va_mtime.tv_sec = fat->mtime;
74 	vap->va_mtime.tv_nsec = fat->mtimensec;
75 	vap->va_ctime.tv_sec = fat->ctime;
76 	vap->va_ctime.tv_nsec = fat->ctimensec;
77 	vap->va_flags = 0;
78 	vap->va_gen = VNOVAL;
79 	vap->va_vaflags = 0;
80 
81 	KKASSERT(vap->va_type == fnp->type);
82 
83 	if (fnp->vp->v_object && fnp->sizeoverride == 0 &&
84 	    fnp->size != vap->va_size)
85 	{
86 		error = fuse_node_truncate(fnp, fnp->size, vap->va_size);
87 	}
88 
89 	fnp->attrgood = 1;
90 
91 	return error;
92 }
93 
94 static int
95 fuse_vop_access(struct vop_access_args *ap)
96 {
97 	struct vnode *vp = ap->a_vp;
98 	mode_t mode = ap->a_mode;
99 	struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
100 	struct fuse_ipc *fip;
101 	struct fuse_access_in *fai;
102 	uint32_t mask;
103 	int error;
104 
105 	if (fuse_test_dead(fmp))
106 		return 0;
107 
108 	if (fuse_test_nosys(fmp, FUSE_ACCESS))
109 		return 0;
110 
111 	switch (vp->v_type) {
112 	case VDIR:
113 	case VLNK:
114 	case VREG:
115 		if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY))
116 			return EROFS;
117 		break;
118 	case VBLK:
119 	case VCHR:
120 	case VSOCK:
121 	case VFIFO:
122 		break;
123 	default:
124 		return EINVAL;
125 	}
126 
127 	mask = F_OK;
128 	if (mode & VEXEC)
129 		mask |= X_OK;
130 	if (mode & VWRITE)
131 		mask |= W_OK;
132 	if (mode & VREAD)
133 		mask |= R_OK;
134 
135 	fip = fuse_ipc_get(fmp, sizeof(*fai));
136 	fai = fuse_ipc_fill(fip, FUSE_ACCESS, VTOI(vp)->ino, ap->a_cred);
137 	fai->mask = mask;
138 
139 	error = fuse_ipc_tx(fip);
140 	if (error) {
141 		if (error == ENOSYS)
142 			error = 0;
143 		if (error == ENOTCONN && (vp->v_flag & VROOT))
144 			error = 0;
145 		return error;
146 	}
147 
148 	fuse_ipc_put(fip);
149 
150 	return 0;
151 }
152 
153 static int
154 fuse_vop_open(struct vop_open_args *ap)
155 {
156 	struct vnode *vp = ap->a_vp;
157 	struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
158 	struct fuse_node *fnp = VTOI(vp);
159 	struct fuse_ipc *fip;
160 	struct fuse_open_in *foi;
161 	struct fuse_open_out *foo;
162 	int error, op;
163 
164 	if (fuse_test_dead(fmp))
165 		return ENOTCONN;
166 
167 	if (fuse_test_nosys(fmp, FUSE_OPEN))
168 		return EOPNOTSUPP;
169 
170 	/*
171 	 * Reopen with userland process if the vnode doesn't have a
172 	 * file-handle.  This can occur if the vnode is new or if it
173 	 * was previously deactivated.
174 	 */
175 	if (fnp->fh == 0) {
176 		if (vp->v_type == VDIR)
177 			op = FUSE_OPENDIR;
178 		else
179 			op = FUSE_OPEN;
180 
181 		fip = fuse_ipc_get(fmp, sizeof(*foi));
182 		foi = fuse_ipc_fill(fip, op, fnp->ino, ap->a_cred);
183 		foi->flags = OFLAGS(ap->a_mode);
184 		fuse_dbg("flags=%X\n", foi->flags);
185 		if (foi->flags & O_CREAT) {
186 			fuse_dbg("drop O_CREAT\n");
187 			foi->flags &= ~O_CREAT;
188 		}
189 
190 		error = fuse_ipc_tx(fip);
191 		if (error)
192 			return error;
193 
194 		/* XXX unused */
195 		foo = fuse_out_data(fip);
196 		if (foo->open_flags & FOPEN_DIRECT_IO)
197 			;
198 		else if (foo->open_flags & FOPEN_KEEP_CACHE)
199 			;
200 		else if (foo->open_flags & FOPEN_NONSEEKABLE)
201 			;
202 		else if (foo->open_flags & FOPEN_CACHE_DIR)
203 			;
204 
205 		fnp->closed = false;
206 		fnp->fh = foo->fh;
207 		fuse_ipc_put(fip);
208 	}
209 
210 	return vop_stdopen(ap);
211 }
212 
213 /*
214  * NOTE: We do not release the file-handle on close() as the vnode
215  *	 may still be in active use as an active directory or memory-mapped.
216  *
217  *	 However, to reduce overhead we issue vfinalize() to tell the kernel
218  *	 to attempt to finalize (deactivate) the vnode as soon as it can.
219  */
220 static int
221 fuse_vop_close(struct vop_close_args *ap)
222 {
223 	struct vnode *vp = ap->a_vp;
224 	struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
225 
226 	if (fuse_test_dead(fmp))
227 		return 0;
228 
229 	if (fuse_test_nosys(fmp, FUSE_RELEASE) ||
230 	    fuse_test_nosys(fmp, FUSE_RELEASEDIR))
231 	{
232 		return EOPNOTSUPP;
233 	}
234 
235 	/*
236 	 * Finalize immediately if not dirty, otherwise we will check
237 	 * during the fsync and try to finalize then.
238 	 */
239 	if ((vp->v_flag & VISDIRTY) == 0 &&
240 	    RB_EMPTY(&vp->v_rbdirty_tree))
241 	{
242 		vfinalize(vp);
243 	}
244 
245 	return vop_stdclose(ap);
246 }
247 
248 static int
249 fuse_vop_fsync(struct vop_fsync_args *ap)
250 {
251 	struct vnode *vp = ap->a_vp;
252 	struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
253 	struct fuse_ipc *fip;
254 	struct fuse_fsync_in *fsi;
255 	struct fuse_node *fnp = VTOI(vp);
256 	int error, op;
257 
258 	if (fuse_test_dead(fmp))
259 		return 0;
260 
261 	if (fuse_test_nosys(fmp, FUSE_FSYNC))
262 		return 0;
263 
264 	/*
265 	 * fsync any dirty buffers, wait for completion.
266 	 */
267 	vclrisdirty(vp);
268 	vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
269 	bio_track_wait(&vp->v_track_write, 0, 0);
270 	fnp->sizeoverride = 0;
271 
272 	/*
273 	 * Ask DragonFly to deactivate the vnode ASAP if it is no longer
274 	 * open.
275 	 */
276 	if (vp->v_opencount == 0)
277 		vfinalize(vp);
278 
279 	if (fnp->fh) {
280 		if (vp->v_type == VDIR)
281 			op = FUSE_FSYNCDIR;
282 		else
283 			op = FUSE_FSYNC;
284 
285 		fip = fuse_ipc_get(fmp, sizeof(*fsi));
286 		fsi = fuse_ipc_fill(fip, op, VTOI(vp)->ino, NULL);
287 		fsi->fh = VTOI(vp)->fh;
288 		fsi->fsync_flags = 1; /* datasync */
289 
290 		error = fuse_ipc_tx(fip);
291 		if (error == 0)
292 			fuse_ipc_put(fip);
293 	} else {
294 		error = 0;
295 	}
296 
297 	return error;
298 }
299 
300 static int
301 fuse_vop_getattr(struct vop_getattr_args *ap)
302 {
303 	struct vnode *vp = ap->a_vp;
304 	struct vattr *vap = ap->a_vap;
305 	struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
306 	struct fuse_node *fnp = VTOI(vp);
307 	struct fuse_ipc *fip;
308 	struct fuse_getattr_in *fgi;
309 	struct fuse_attr_out *fao;
310 	int error;
311 
312 	if (fuse_test_dead(fmp))
313 		return 0;
314 
315 	if (fuse_test_nosys(fmp, FUSE_GETATTR))
316 		return 0;
317 
318 	if (fnp->attrgood == 0) {
319 		/*
320 		 * Acquire new attribute
321 		 */
322 		fip = fuse_ipc_get(fmp, sizeof(*fgi));
323 		fgi = fuse_ipc_fill(fip, FUSE_GETATTR, fnp->ino, NULL);
324 #if 0
325 		/* this may be called before open when fh is 0 */
326 		fgi->getattr_flags |= FUSE_GETATTR_FH;
327 		fgi->fh = fnp->fh;
328 #endif
329 		error = fuse_ipc_tx(fip);
330 		if (error) {
331 			if (error == ENOSYS)
332 				error = 0;
333 			if (error == ENOTCONN && (vp->v_flag & VROOT)) {
334 				memset(vap, 0, sizeof(*vap));
335 				vap->va_type = vp->v_type;
336 				error = 0;
337 			}
338 			return error;
339 		}
340 
341 		fao = fuse_out_data(fip);
342 		mtx_lock(&fnp->node_lock);
343 		fuse_set_attr(fnp, &fao->attr);
344 		memcpy(vap, &fnp->attr, sizeof(*vap));
345 		/* unused */
346 		//fao->attr_valid;
347 		//fao->attr_valid_nsec;
348 		mtx_unlock(&fnp->node_lock);
349 
350 		fuse_ipc_put(fip);
351 	} else {
352 		/*
353 		 * Use cached attribute
354 		 */
355 		memcpy(vap, &fnp->attr, sizeof(*vap));
356 	}
357 
358 	if (vap->va_type != vp->v_type)
359 		return EINVAL;
360 
361 	return 0;
362 }
363 
364 static int
365 fuse_vop_setattr(struct vop_setattr_args *ap)
366 {
367 	struct vnode *vp = ap->a_vp;
368 	struct vattr *vap = ap->a_vap;
369 	struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
370 	struct fuse_node *fnp = VTOI(vp);
371 	struct fuse_ipc *fip;
372 	struct fuse_setattr_in *fsi, arg;
373 	struct fuse_attr_out *fao;
374 	int kflags = 0;
375 	int error = 0;
376 
377 	if (fuse_test_dead(fmp))
378 		return 0;
379 
380 	if (fuse_test_nosys(fmp, FUSE_SETATTR))
381 		return 0;
382 
383 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
384 		return EROFS;
385 
386 	memset(&arg, 0, sizeof(arg));
387 	mtx_lock(&fnp->node_lock);
388 
389 	if (!error && (vap->va_flags != VNOVAL)) {
390 		mtx_unlock(&fnp->node_lock);
391 		kflags |= NOTE_ATTRIB;
392 		return EOPNOTSUPP; /* XXX */
393 	}
394 
395 	if (!error && (vap->va_size != VNOVAL)) {
396 		if (vp->v_type == VDIR) {
397 			mtx_unlock(&fnp->node_lock);
398 			return EISDIR;
399 		}
400 		if (vp->v_type == VREG &&
401 		    (vp->v_mount->mnt_flag & MNT_RDONLY)) {
402 			mtx_unlock(&fnp->node_lock);
403 			return EROFS;
404 		}
405 		arg.size = vap->va_size;
406 		arg.valid |= FATTR_SIZE;
407 		if (vap->va_size > fnp->size)
408 			kflags |= NOTE_WRITE | NOTE_EXTEND;
409 		else
410 			kflags |= NOTE_WRITE;
411 	}
412 
413 	if (!error && (vap->va_uid != (uid_t)VNOVAL ||
414 	    vap->va_gid != (gid_t)VNOVAL)) {
415 		mode_t mode;
416 		error = vop_helper_chown(vp, vap->va_uid, vap->va_gid,
417 		    ap->a_cred, &arg.uid, &arg.gid, &mode);
418 		arg.valid |= FATTR_UID;
419 		arg.valid |= FATTR_GID;
420 		kflags |= NOTE_ATTRIB;
421 	}
422 
423 	if (!error && (vap->va_mode != (mode_t)VNOVAL)) {
424 		error = vop_helper_chmod(vp, vap->va_mode, ap->a_cred,
425 		    vap->va_uid, vap->va_gid, (mode_t*)&arg.mode);
426 		arg.valid |= FATTR_MODE;
427 		kflags |= NOTE_ATTRIB;
428 	}
429 
430 	if (!error && (vap->va_atime.tv_sec != VNOVAL &&
431 	    vap->va_atime.tv_nsec != VNOVAL)) {
432 		arg.atime = vap->va_atime.tv_sec;
433 		arg.atimensec = vap->va_atime.tv_nsec;
434 		arg.valid |= FATTR_ATIME;
435 		kflags |= NOTE_ATTRIB;
436 	}
437 
438 	if (!error && (vap->va_mtime.tv_sec != VNOVAL &&
439 	    vap->va_mtime.tv_nsec != VNOVAL)) {
440 		arg.mtime = vap->va_mtime.tv_sec;
441 		arg.mtimensec = vap->va_mtime.tv_nsec;
442 		arg.valid |= FATTR_MTIME;
443 		kflags |= NOTE_ATTRIB;
444 	}
445 
446 	if (!error && (vap->va_ctime.tv_sec != VNOVAL &&
447 	    vap->va_ctime.tv_nsec != VNOVAL)) {
448 		arg.ctime = vap->va_ctime.tv_sec;
449 		arg.ctimensec = vap->va_ctime.tv_nsec;
450 		arg.valid |= FATTR_CTIME;
451 		kflags |= NOTE_ATTRIB;
452 	}
453 
454 	mtx_unlock(&fnp->node_lock);
455 
456 	if (error)
457 		return error;
458 	if (!arg.valid)
459 		return 0;
460 
461 	fip = fuse_ipc_get(fmp, sizeof(*fsi));
462 	fsi = fuse_ipc_fill(fip, FUSE_SETATTR, fnp->ino, ap->a_cred);
463 	memcpy(fsi, &arg, sizeof(arg));
464 #if 0
465 	fsi->valid |= FATTR_FH;
466 	fsi->fh = fnp->fh;
467 #endif
468 	error = fuse_ipc_tx(fip);
469 	if (error)
470 		return error;
471 
472 	fao = fuse_out_data(fip);
473 	if (IFTOVT(fao->attr.mode) != vp->v_type) {
474 		fuse_ipc_put(fip);
475 		return EINVAL;
476 	}
477 	mtx_lock(&fnp->node_lock);
478 	fuse_set_attr(fnp, &fao->attr);
479 	/* unused */
480 	//fao->attr_valid;
481 	//fao->attr_valid_nsec;
482 	mtx_unlock(&fnp->node_lock);
483 
484 	fuse_ipc_put(fip);
485 	fuse_knote(vp, kflags);
486 
487 	return 0;
488 }
489 
490 static int
491 fuse_vop_nresolve(struct vop_nresolve_args *ap)
492 {
493 	struct vnode *dvp = ap->a_dvp;
494 	struct vnode *vp;
495 	struct namecache *ncp = ap->a_nch->ncp;
496 	struct fuse_mount *fmp = VFSTOFUSE(dvp->v_mount);
497 	struct fuse_node *dfnp = VTOI(dvp);
498 	struct fuse_node *fnp;
499 	struct fuse_ipc *fip;
500 	struct fuse_entry_out *feo;
501 	char *p, tmp[1024];
502 	uint32_t mode;
503 	enum vtype vtyp;
504 	int error;
505 	int forgettable;
506 
507 	if (fuse_test_dead(fmp))
508 		return ENOTCONN;
509 
510 	if (fuse_test_nosys(fmp, FUSE_LOOKUP))
511 		return EOPNOTSUPP;
512 
513 	fip = fuse_ipc_get(fmp, ncp->nc_nlen + 1);
514 	p = fuse_ipc_fill(fip, FUSE_LOOKUP, dfnp->ino, ap->a_cred);
515 
516 	memcpy(p, ncp->nc_name, ncp->nc_nlen);
517 	p[ncp->nc_nlen] = '\0';
518 	strlcpy(tmp, p, sizeof(tmp));
519 
520 	/*
521 	 * "." and ".." are not ref-counted by the fuse userland
522 	 * (their API is basically broken but, meh).
523 	 */
524 	forgettable = 0;
525 	if (strcmp(p, ".") != 0 && strcmp(p, "..") != 0)
526 		forgettable = 1;
527 
528 	error = fuse_ipc_tx(fip);
529 	if (error == ENOENT) {
530 		cache_setvp(ap->a_nch, NULL);
531 		fuse_dbg("lookup \"%s\" ENOENT\n", tmp);
532 		return ENOENT;
533 	} else if (error) {
534 		fuse_dbg("lookup \"%s\" error=%d\n", tmp, error);
535 		return error;
536 	}
537 
538 	feo = fuse_out_data(fip);
539 	fuse_dbg("lookup \"%s\" ino=%ju/%ju\n", p, feo->nodeid, feo->attr.ino);
540 
541 	/*
542 	 * Apparently in later FUSEs this means a cacheable ENOENT
543 	 */
544 	if (feo->nodeid == 0) {
545 		fuse_ipc_put(fip);
546 		cache_setvp(ap->a_nch, NULL);
547 		return ENOENT;
548 	}
549 	if (feo->nodeid == 1)
550 		forgettable = 0;
551 
552 	mode = feo->attr.mode;
553 
554 	if (S_ISREG(mode))
555 		vtyp = VREG;
556 	else if (S_ISDIR(mode))
557 		vtyp = VDIR;
558 	else if (S_ISBLK(mode))
559 		vtyp = VBLK;
560 	else if (S_ISCHR(mode))
561 		vtyp = VCHR;
562 	else if (S_ISLNK(mode))
563 		vtyp = VLNK;
564 	else if (S_ISSOCK(mode))
565 		vtyp = VSOCK;
566 	else if (S_ISFIFO(mode))
567 		vtyp = VFIFO;
568 	else
569 		vtyp = VBAD;
570 
571 	error = fuse_alloc_node(fmp, dfnp, feo->nodeid, vtyp, &vp);
572 	if (error == 0) {
573 		KKASSERT(vp);
574 		KKASSERT(vn_islocked(vp));
575 
576 		vn_unlock(vp);
577 		cache_setvp(ap->a_nch, vp);
578 		vrele(vp);
579 
580 		/* unused */
581 		//feo->generation;
582 		//feo->entry_valid;
583 		//feo->attr_valid;
584 		//feo->entry_valid_nsec;
585 		//feo->attr_valid_nsec;
586 		fnp = VTOI(vp);
587 
588 		if (forgettable)
589 			atomic_add_64(&fnp->nlookup, 1);
590 	} else {
591 #if 0
592 		/* sshfs fails utterly if we issue FUSE_FORGET */
593 		if (forgettable)
594 			fuse_forget_node(fmp, feo->nodeid, 1, NULL);
595 #endif
596 	}
597 	fuse_ipc_put(fip);
598 
599 	return error;
600 }
601 
602 static int
603 fuse_vop_nlink(struct vop_nlink_args *ap)
604 {
605 	struct vnode *dvp = ap->a_dvp;
606 	struct vnode *vp = ap->a_vp;
607 	struct namecache *ncp = ap->a_nch->ncp;
608 	struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
609 	struct fuse_node *dfnp = VTOI(dvp);
610 	struct fuse_node *fnp = VTOI(vp);
611 	struct fuse_ipc *fip;
612 	struct fuse_link_in *fli;
613 	struct fuse_entry_out *feo;
614 	char *p;
615 	int error;
616 
617 	if (fuse_test_dead(fmp))
618 		return ENOTCONN;
619 
620 	if (fuse_test_nosys(fmp, FUSE_LINK))
621 		return EOPNOTSUPP;
622 
623 	if (vp->v_type == VDIR)
624 		return EPERM;
625 	if (dvp->v_mount != vp->v_mount)
626 		return EXDEV;
627 
628 	fip = fuse_ipc_get(fmp, sizeof(fli) + ncp->nc_nlen + 1);
629 	fli = fuse_ipc_fill(fip, FUSE_LINK, dfnp->ino, ap->a_cred);
630 	fli->oldnodeid = fnp->ino;
631 
632 	p = (char*)(fli + 1);
633 	memcpy(p, ncp->nc_name, ncp->nc_nlen);
634 	p[ncp->nc_nlen] = '\0';
635 
636 	error = fuse_ipc_tx(fip);
637 	if (error)
638 		return error;
639 
640 	feo = fuse_out_data(fip);
641 	if (IFTOVT(feo->attr.mode) != vp->v_type) {
642 		fuse_ipc_put(fip);
643 		return EINVAL;
644 	}
645 
646 	mtx_lock(&dfnp->node_lock);
647 	mtx_lock(&fnp->node_lock);
648 	fuse_set_attr(fnp, &feo->attr);
649 	mtx_unlock(&fnp->node_lock);
650 	mtx_unlock(&dfnp->node_lock);
651 
652 	cache_setunresolved(ap->a_nch);
653 	cache_setvp(ap->a_nch, vp);
654 	fuse_knote(dvp, NOTE_WRITE);
655 	fuse_knote(vp, NOTE_LINK);
656 
657 	/* unused */
658 	//feo->nodeid;
659 	//feo->generation;
660 	//feo->entry_valid;
661 	//feo->attr_valid;
662 	//feo->entry_valid_nsec;
663 	//feo->attr_valid_nsec;
664 
665 	fuse_ipc_put(fip);
666 
667 	return 0;
668 }
669 
670 static int
671 fuse_vop_ncreate(struct vop_ncreate_args *ap)
672 {
673 	struct vnode *dvp = ap->a_dvp;
674 	struct vnode *vp;
675 	struct namecache *ncp = ap->a_nch->ncp;
676 	struct fuse_mount *fmp = VFSTOFUSE(dvp->v_mount);
677 	struct fuse_node *dfnp = VTOI(dvp);
678 	struct fuse_node *fnp;
679 	struct fuse_ipc *fip;
680 	struct fuse_create_in *fci;
681 	struct fuse_entry_out *feo;
682 	struct fuse_open_out *foo;
683 	enum vtype vtyp;
684 	char *p;
685 	int error;
686 
687 	if (fuse_test_dead(fmp))
688 		return ENOTCONN;
689 
690 	if (fuse_test_nosys(fmp, FUSE_CREATE))
691 		return EOPNOTSUPP;
692 
693 	fip = fuse_ipc_get(fmp, sizeof(*fci) + ncp->nc_nlen + 1);
694 	fci = fuse_ipc_fill(fip, FUSE_CREATE, dfnp->ino, ap->a_cred);
695 	fci->flags = OFLAGS(ap->a_vap->va_fuseflags);
696 	fci->mode = MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode);
697 	/* unused */
698 	//fci->umask = ...;
699 	fuse_dbg("flags=%X mode=%X\n", fci->flags, fci->mode);
700 
701 	p = (char*)(fci + 1);
702 	memcpy(p, ncp->nc_name, ncp->nc_nlen);
703 	p[ncp->nc_nlen] = '\0';
704 
705 	error = fuse_ipc_tx(fip);
706 	if (error)
707 		return error;
708 
709 	feo = fuse_out_data(fip);
710 	foo = (struct fuse_open_out*)(feo + 1);
711 	vtyp = IFTOVT(feo->attr.mode);
712 	if (vtyp != VREG && vtyp != VSOCK) {
713 		fuse_ipc_put(fip);
714 		return EINVAL;
715 	}
716 
717 	error = fuse_alloc_node(fmp, dfnp, feo->nodeid, VREG, &vp);
718 	if (error == 0) {
719 		KKASSERT(vp);
720 		KKASSERT(vn_islocked(vp));
721 
722 		fnp = VTOI(vp);
723 		mtx_lock(&fnp->node_lock);
724 		fuse_set_attr(fnp, &feo->attr);
725 		mtx_unlock(&fnp->node_lock);
726 		fnp->fh = foo->fh;
727 
728 		cache_setunresolved(ap->a_nch);
729 		cache_setvp(ap->a_nch, vp);
730 		*(ap->a_vpp) = vp;
731 		fuse_knote(dvp, NOTE_WRITE);
732 
733 		/* unused */
734 		//feo->generation;
735 		//feo->entry_valid;
736 		//feo->attr_valid;
737 		//feo->entry_valid_nsec;
738 		//feo->attr_valid_nsec;
739 		/* unused */
740 		//foo->open_flags;
741 	}
742 	fuse_ipc_put(fip);
743 
744 	return error;
745 }
746 
747 static int
748 fuse_vop_nmknod(struct vop_nmknod_args *ap)
749 {
750 	struct vnode *dvp = ap->a_dvp;
751 	struct vnode *vp;
752 	struct namecache *ncp = ap->a_nch->ncp;
753 	struct fuse_mount *fmp = VFSTOFUSE(dvp->v_mount);
754 	struct fuse_node *dfnp = VTOI(dvp);
755 	struct fuse_node *fnp;
756 	struct fuse_ipc *fip;
757 	struct fuse_mknod_in *fmi;
758 	struct fuse_entry_out *feo;
759 	enum vtype vtyp;
760 	char *p;
761 	int error;
762 
763 	if (fuse_test_dead(fmp))
764 		return ENOTCONN;
765 
766 	if (fuse_test_nosys(fmp, FUSE_MKNOD))
767 		return EOPNOTSUPP;
768 
769 	fip = fuse_ipc_get(fmp, sizeof(*fmi) + ncp->nc_nlen + 1);
770 	fmi = fuse_ipc_fill(fip, FUSE_MKNOD, dfnp->ino, ap->a_cred);
771 	fmi->mode = MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode);
772 	/* unused */
773 	//fmi->rdev = ...;
774 	//fmi->umask = ...;
775 
776 	p = (char*)(fmi + 1);
777 	memcpy(p, ncp->nc_name, ncp->nc_nlen);
778 	p[ncp->nc_nlen] = '\0';
779 
780 	error = fuse_ipc_tx(fip);
781 	if (error)
782 		return error;
783 
784 	feo = fuse_out_data(fip);
785 	vtyp = IFTOVT(feo->attr.mode);
786 	if (vtyp != VBLK && vtyp != VCHR && vtyp != VFIFO) {
787 		fuse_ipc_put(fip);
788 		return EINVAL;
789 	}
790 
791 	error = fuse_alloc_node(fmp, dfnp, feo->nodeid,
792 				ap->a_vap->va_type, &vp);
793 	if (error == 0) {
794 		KKASSERT(vp);
795 		KKASSERT(vn_islocked(vp));
796 
797 		fnp = VTOI(vp);
798 		mtx_lock(&fnp->node_lock);
799 		fuse_set_attr(fnp, &feo->attr);
800 		mtx_unlock(&fnp->node_lock);
801 
802 		cache_setunresolved(ap->a_nch);
803 		cache_setvp(ap->a_nch, vp);
804 		*(ap->a_vpp) = vp;
805 		fuse_knote(dvp, NOTE_WRITE);
806 
807 		/* unused */
808 		//feo->generation;
809 		//feo->entry_valid;
810 		//feo->attr_valid;
811 		//feo->entry_valid_nsec;
812 		//feo->attr_valid_nsec;
813 	}
814 	fuse_ipc_put(fip);
815 
816 	return error;
817 }
818 
819 static int
820 fuse_vop_nremove(struct vop_nremove_args *ap)
821 {
822 	struct vnode *dvp = ap->a_dvp;
823 	struct vnode *vp;
824 	struct namecache *ncp = ap->a_nch->ncp;
825 	struct fuse_mount *fmp = VFSTOFUSE(dvp->v_mount);
826 	struct fuse_node *dfnp = VTOI(dvp);
827 	struct fuse_node *fnp;
828 	struct fuse_ipc *fip;
829 	char *p;
830 	int error;
831 
832 	if (fuse_test_dead(fmp))
833 		return ENOTCONN;
834 
835 	if (fuse_test_nosys(fmp, FUSE_UNLINK))
836 		return EOPNOTSUPP;
837 
838 	error = cache_vget(ap->a_nch, ap->a_cred, LK_SHARED, &vp);
839 	if (error)
840 		return error;
841 	KKASSERT(vp->v_mount == dvp->v_mount);
842 
843 	/*
844 	 * Clean-up the deletion target to avoid .fuse_hidden*
845 	 * files.
846 	 *
847 	 * NOTE: XXX v_opencount check does not take mmap/filepointers
848 	 *	 into account.
849 	 */
850 	vinvalbuf(vp, V_SAVE, 0, 0);
851 	if (vp->v_opencount == 0) {
852 		fnp = VTOI(vp);
853 		fuse_release(fmp, fnp);
854 	}
855 	vn_unlock(vp);
856 
857 	fip = fuse_ipc_get(fmp, ncp->nc_nlen + 1);
858 	p = fuse_ipc_fill(fip, FUSE_UNLINK, dfnp->ino, ap->a_cred);
859 
860 	memcpy(p, ncp->nc_name, ncp->nc_nlen);
861 	p[ncp->nc_nlen] = '\0';
862 
863 	error = fuse_ipc_tx(fip);
864 	if (error) {
865 		vrele(vp);
866 		return error;
867 	}
868 
869 	fnp = VTOI(vp);
870 
871 	cache_unlink(ap->a_nch);
872 	fuse_knote(dvp, NOTE_WRITE);
873 	fuse_knote(vp, NOTE_DELETE);
874 
875 	fuse_ipc_put(fip);
876 	vrele(vp);
877 
878 	return 0;
879 }
880 
881 static int
882 fuse_vop_nmkdir(struct vop_nmkdir_args *ap)
883 {
884 	struct vnode *dvp = ap->a_dvp;
885 	struct vnode *vp;
886 	struct namecache *ncp = ap->a_nch->ncp;
887 	struct fuse_mount *fmp = VFSTOFUSE(dvp->v_mount);
888 	struct fuse_node *dfnp = VTOI(dvp);
889 	struct fuse_node *fnp;
890 	struct fuse_ipc *fip;
891 	struct fuse_mkdir_in *fmi;
892 	struct fuse_entry_out *feo;
893 	char *p;
894 	int error;
895 
896 	if (fuse_test_dead(fmp))
897 		return ENOTCONN;
898 
899 	if (fuse_test_nosys(fmp, FUSE_MKDIR))
900 		return EOPNOTSUPP;
901 
902 	fip = fuse_ipc_get(fmp, sizeof(*fmi) + ncp->nc_nlen + 1);
903 	fmi = fuse_ipc_fill(fip, FUSE_MKDIR, dfnp->ino, ap->a_cred);
904 	fmi->mode = MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode);
905 
906 	p = (char*)(fmi + 1);
907 	memcpy(p, ncp->nc_name, ncp->nc_nlen);
908 	p[ncp->nc_nlen] = '\0';
909 
910 	error = fuse_ipc_tx(fip);
911 	if (error)
912 		return error;
913 
914 	feo = fuse_out_data(fip);
915 	if (IFTOVT(feo->attr.mode) != VDIR) {
916 		fuse_ipc_put(fip);
917 		return EINVAL;
918 	}
919 
920 	error = fuse_alloc_node(fmp, dfnp, feo->nodeid, VDIR, &vp);
921 	if (error == 0) {
922 		KKASSERT(vp);
923 		KKASSERT(vn_islocked(vp));
924 
925 		fnp = VTOI(vp);
926 		mtx_lock(&fnp->node_lock);
927 		fuse_set_attr(fnp, &feo->attr);
928 		mtx_unlock(&fnp->node_lock);
929 
930 		cache_setunresolved(ap->a_nch);
931 		cache_setvp(ap->a_nch, vp);
932 		*(ap->a_vpp) = vp;
933 		fuse_knote(dvp, NOTE_WRITE | NOTE_LINK);
934 
935 		/* unused */
936 		//feo->generation;
937 		//feo->entry_valid;
938 		//feo->attr_valid;
939 		//feo->entry_valid_nsec;
940 		//feo->attr_valid_nsec;
941 	}
942 	fuse_ipc_put(fip);
943 
944 	return error;
945 }
946 
947 static int
948 fuse_vop_nrmdir(struct vop_nrmdir_args *ap)
949 {
950 	struct vnode *dvp = ap->a_dvp;
951 	struct vnode *vp;
952 	struct namecache *ncp = ap->a_nch->ncp;
953 	struct fuse_mount *fmp = VFSTOFUSE(dvp->v_mount);
954 	struct fuse_node *dfnp = VTOI(dvp);
955 	struct fuse_node *fnp;
956 	struct fuse_ipc *fip;
957 	char *p;
958 	int error;
959 
960 	if (fuse_test_dead(fmp))
961 		return ENOTCONN;
962 
963 	if (fuse_test_nosys(fmp, FUSE_RMDIR))
964 		return EOPNOTSUPP;
965 
966 	error = cache_vget(ap->a_nch, ap->a_cred, LK_SHARED, &vp);
967 	KKASSERT(vp->v_mount == dvp->v_mount);
968 	KKASSERT(!error); /* from tmpfs */
969 	vn_unlock(vp);
970 
971 	fip = fuse_ipc_get(fmp, ncp->nc_nlen + 1);
972 	p = fuse_ipc_fill(fip, FUSE_RMDIR, dfnp->ino, ap->a_cred);
973 
974 	memcpy(p, ncp->nc_name, ncp->nc_nlen);
975 	p[ncp->nc_nlen] = '\0';
976 
977 	error = fuse_ipc_tx(fip);
978 	if (error) {
979 		vrele(vp);
980 		return error;
981 	}
982 
983 	fnp = VTOI(vp);
984 
985 	cache_unlink(ap->a_nch);
986 	fuse_knote(dvp, NOTE_WRITE | NOTE_LINK);
987 
988 	fuse_ipc_put(fip);
989 	vrele(vp);
990 
991 	return 0;
992 }
993 
994 static int
995 fuse_vop_pathconf(struct vop_pathconf_args *ap)
996 {
997 	switch (ap->a_name) {
998 	case _PC_FILESIZEBITS:
999 		*ap->a_retval = 64;
1000 		break;
1001 	case _PC_NO_TRUNC:
1002 		*ap->a_retval = 1;
1003 		break;
1004 	default:
1005 		return vop_stdpathconf(ap);
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 static int
1012 fuse_vop_readdir(struct vop_readdir_args *ap)
1013 {
1014 	struct vnode *vp = ap->a_vp;
1015 	struct uio *uio = ap->a_uio;
1016 	struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
1017 	struct fuse_ipc *fip;
1018 	struct fuse_read_in *fri;
1019 	const char *buf;
1020 	size_t len;
1021 	off_t cur_offset = 0;
1022 	int error;
1023 
1024 	if (fuse_test_dead(fmp))
1025 		return ENOTCONN;
1026 
1027 	if (fuse_test_nosys(fmp, FUSE_READDIR))
1028 		return EOPNOTSUPP;
1029 
1030 	fip = fuse_ipc_get(fmp, sizeof(*fri));
1031 	fri = fuse_ipc_fill(fip, FUSE_READDIR, VTOI(vp)->ino, ap->a_cred);
1032 	fri->fh = VTOI(vp)->fh;
1033 	fri->offset = 0;
1034 	/*
1035 	 * XXX This needs to be large enough to read all entries at once.
1036 	 * FUSE filesystems typically just opendir/readdir and return entries.
1037 	 */
1038 	fri->size = FUSE_BLKSIZE * 10;
1039 	/* unused */
1040 	//fri->read_flags = ...;
1041 	//fri->lock_owner = ...;
1042 	//fri->flags = ...;
1043 
1044 	error = fuse_ipc_tx(fip);
1045 	if (error)
1046 		return error;
1047 
1048 	buf = fuse_out_data(fip);
1049 	len = fuse_out_data_size(fip);
1050 
1051 	while (1) {
1052 		const struct fuse_dirent *fde;
1053 		size_t freclen;
1054 
1055 		fuse_dbg("uio_offset=%ju uio_resid=%ju\n",
1056 		    uio->uio_offset, uio->uio_resid);
1057 
1058 		if (len < FUSE_NAME_OFFSET) {
1059 			if (ap->a_eofflag)
1060 				*ap->a_eofflag = 1;
1061 			break;
1062 		}
1063 		if (uio->uio_resid < FUSE_NAME_OFFSET)
1064 			break;
1065 
1066 		fde = (const struct fuse_dirent*)buf;
1067 		if (!fde->namelen) {
1068 			error = EINVAL;
1069 			break;
1070 		}
1071 		freclen = FUSE_DIRENT_SIZE(fde);
1072 
1073 		/*
1074 		 * Also see
1075 		 * getdirentries(2) in sys/kern/vfs_syscalls.c
1076 		 * readdir(3) in lib/libc/gen/readdir.c
1077 		 */
1078 		if (cur_offset >= uio->uio_offset) {
1079 			error = 0;
1080 			if (vop_write_dirent(&error, uio, fde->ino, fde->type,
1081 			    fde->namelen, fde->name))
1082 				break;
1083 			if (error)
1084 				break;
1085 			fuse_dbg("ino=%ju type=%d name=%s len=%u\n",
1086 			    fde->ino, fde->type, fde->name, fde->namelen);
1087 		}
1088 
1089 		cur_offset += _DIRENT_RECLEN(fde->namelen);
1090 		buf += freclen;
1091 		len -= freclen;
1092 	}
1093 	fuse_ipc_put(fip);
1094 
1095 	return error;
1096 }
1097 
1098 static int
1099 fuse_vop_readlink(struct vop_readlink_args *ap)
1100 {
1101 	struct vnode *vp = ap->a_vp;
1102 	struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
1103 	struct fuse_ipc *fip;
1104 	int error;
1105 
1106 	if (fuse_test_dead(fmp))
1107 		return ENOTCONN;
1108 
1109 	if (fuse_test_nosys(fmp, FUSE_READLINK))
1110 		return EOPNOTSUPP;
1111 
1112 	if (vp->v_type != VLNK)
1113 		return EINVAL;
1114 
1115 	fip = fuse_ipc_get(fmp, 0);
1116 	fuse_ipc_fill(fip, FUSE_READLINK, VTOI(vp)->ino, ap->a_cred);
1117 
1118 	error = fuse_ipc_tx(fip);
1119 	if (error)
1120 		return error;
1121 
1122 	error = uiomove(fuse_out_data(fip), fuse_out_data_size(fip), ap->a_uio);
1123 
1124 	fuse_ipc_put(fip);
1125 
1126 	return error;
1127 }
1128 
1129 static int
1130 fuse_vop_nrename(struct vop_nrename_args *ap)
1131 {
1132 	struct namecache *fncp = ap->a_fnch->ncp;
1133 	struct namecache *tncp = ap->a_tnch->ncp;
1134 	struct vnode *fdvp = ap->a_fdvp;
1135 	struct vnode *fvp = fncp->nc_vp;
1136 	struct vnode *tdvp = ap->a_tdvp;
1137 	struct vnode *tvp;
1138 	struct fuse_mount *fmp = VFSTOFUSE(fdvp->v_mount);
1139 	struct fuse_node *fdfnp = VTOI(fdvp);
1140 	struct fuse_node *ffnp = VTOI(fvp);
1141 	struct fuse_node *tdfnp = VTOI(tdvp);
1142 	struct fuse_node *tfnp;
1143 	struct fuse_ipc *fip;
1144 	struct fuse_rename_in *fri;
1145 	char *p, *newname;
1146 	int error;
1147 
1148 	KKASSERT(fdvp->v_mount == fvp->v_mount);
1149 
1150 	if (fuse_test_dead(fmp))
1151 		return ENOTCONN;
1152 
1153 	if (fuse_test_nosys(fmp, FUSE_RENAME))
1154 		return EOPNOTSUPP;
1155 
1156 	error = cache_vget(ap->a_tnch, ap->a_cred, LK_SHARED, &tvp);
1157 	if (!error) {
1158 		tfnp = VTOI(tvp);
1159 
1160 		/*
1161 		 * Clean-up the deletion target to avoid .fuse_hidden*
1162 		 * files.
1163 		 * NOTE: XXX v_opencount check does not take mmap/filepointers
1164 		 *	 into account.
1165 		 */
1166 		if (tvp->v_opencount == 0) {
1167 			vinvalbuf(tvp, V_SAVE, 0, 0);
1168 			fuse_release(fmp, tfnp);
1169 		}
1170 		vn_unlock(tvp);
1171 	} else {
1172 		tfnp = NULL;
1173 	}
1174 
1175 	/* Disallow cross-device renames.
1176 	 * Why isn't this done by the caller? */
1177 	if (fvp->v_mount != tdvp->v_mount ||
1178 	    (tvp && fvp->v_mount != tvp->v_mount)) {
1179 		error = EXDEV;
1180 		goto out;
1181 	}
1182 
1183 	if (fvp == tvp) {
1184 		error = 0;
1185 		goto out;
1186 	}
1187 
1188 	if (tvp) {
1189 		KKASSERT(tfnp);
1190 		if (ffnp->type == VDIR && tfnp->type == VDIR) {
1191 			/* depend on RPC to check if empty */
1192 		} else if (ffnp->type == VDIR && tfnp->type != VDIR) {
1193 			error = ENOTDIR;
1194 			goto out;
1195 		} else if (ffnp->type != VDIR && tfnp->type == VDIR) {
1196 			error = EISDIR;
1197 			goto out;
1198 		} else
1199 			KKASSERT(ffnp->type != VDIR && tfnp->type != VDIR);
1200 	}
1201 
1202 	fip = fuse_ipc_get(fmp, sizeof(*fri) + fncp->nc_nlen +
1203 				tncp->nc_nlen + 2);
1204 	/* There is also fuse_rename2_in with flags. */
1205 	fri = fuse_ipc_fill(fip, FUSE_RENAME, fdfnp->ino, ap->a_cred);
1206 	fri->newdir = tdfnp->ino;
1207 
1208 	p = (char*)(fri + 1);
1209 	memcpy(p, fncp->nc_name, fncp->nc_nlen);
1210 	p[fncp->nc_nlen] = '\0';
1211 	memcpy(p + fncp->nc_nlen + 1, tncp->nc_name, tncp->nc_nlen);
1212 	p[fncp->nc_nlen + 1 + tncp->nc_nlen] = '\0';
1213 
1214 	error = fuse_ipc_tx(fip);
1215 	if (error)
1216 		goto out;
1217 	fuse_ipc_put(fip);
1218 
1219 	if (fncp->nc_nlen != tncp->nc_nlen ||
1220 	    memcmp(fncp->nc_name, tncp->nc_name, fncp->nc_nlen)) {
1221 		newname = kmalloc(tncp->nc_nlen + 1, M_TEMP, M_WAITOK | M_ZERO);
1222 		KKASSERT(newname);
1223 		memcpy(newname, tncp->nc_name, tncp->nc_nlen);
1224 		newname[tncp->nc_nlen] = '\0';
1225 		fuse_dbg("newname=\"%s\"\n", newname);
1226 	} else
1227 		newname = NULL;
1228 
1229 	mtx_lock(&tdfnp->node_lock);
1230 	mtx_lock(&fdfnp->node_lock);
1231 	mtx_lock(&ffnp->node_lock);
1232 
1233 	if (tvp) {
1234 		fuse_knote(tdvp, NOTE_DELETE);
1235 	}
1236 
1237 	mtx_unlock(&ffnp->node_lock);
1238 	mtx_unlock(&fdfnp->node_lock);
1239 	mtx_unlock(&tdfnp->node_lock);
1240 
1241 	cache_rename(ap->a_fnch, ap->a_tnch);
1242 	fuse_knote(fdvp, NOTE_WRITE);
1243 	fuse_knote(tdvp, NOTE_WRITE);
1244 	fuse_knote(fvp, NOTE_RENAME);
1245 out:
1246 	if (tvp)
1247 		vrele(tvp);
1248 
1249 	return error;
1250 }
1251 
1252 static int
1253 fuse_vop_nsymlink(struct vop_nsymlink_args *ap)
1254 {
1255 	struct vnode *dvp = ap->a_dvp;
1256 	struct vnode *vp;
1257 	struct namecache *ncp = ap->a_nch->ncp;
1258 	struct fuse_mount *fmp = VFSTOFUSE(dvp->v_mount);
1259 	struct fuse_node *dfnp = VTOI(dvp);
1260 	struct fuse_node *fnp;
1261 	struct fuse_ipc *fip;
1262 	struct fuse_entry_out *feo;
1263 	char *p;
1264 	int error;
1265 
1266 	if (fuse_test_dead(fmp))
1267 		return ENOTCONN;
1268 
1269 	if (fuse_test_nosys(fmp, FUSE_SYMLINK))
1270 		return EOPNOTSUPP;
1271 
1272 	fip = fuse_ipc_get(fmp, strlen(ap->a_target) + 1 + ncp->nc_nlen + 1);
1273 	p = fuse_ipc_fill(fip, FUSE_SYMLINK, dfnp->ino, ap->a_cred);
1274 
1275 	memcpy(p, ncp->nc_name, ncp->nc_nlen);
1276 	p[ncp->nc_nlen] = '\0';
1277 	memcpy(p + ncp->nc_nlen + 1, ap->a_target, strlen(ap->a_target) + 1);
1278 
1279 	error = fuse_ipc_tx(fip);
1280 	if (error)
1281 		return error;
1282 
1283 	feo = fuse_out_data(fip);
1284 	if (IFTOVT(feo->attr.mode) != VLNK) {
1285 		fuse_ipc_put(fip);
1286 		return EINVAL;
1287 	}
1288 
1289 	error = fuse_alloc_node(fmp, dfnp, feo->nodeid, VLNK, &vp);
1290 	if (error == 0) {
1291 		KKASSERT(vp);
1292 		KKASSERT(vn_islocked(vp));
1293 
1294 		fnp = VTOI(vp);
1295 		mtx_lock(&fnp->node_lock);
1296 		fuse_set_attr(fnp, &feo->attr);
1297 		mtx_unlock(&fnp->node_lock);
1298 
1299 		cache_setunresolved(ap->a_nch);
1300 		cache_setvp(ap->a_nch, vp);
1301 		*(ap->a_vpp) = vp;
1302 		fuse_knote(vp, NOTE_WRITE);
1303 
1304 		/* unused */
1305 		//feo->generation;
1306 		//feo->entry_valid;
1307 		//feo->attr_valid;
1308 		//feo->entry_valid_nsec;
1309 		//feo->attr_valid_nsec;
1310 	}
1311 	fuse_ipc_put(fip);
1312 
1313 	return error;
1314 }
1315 
1316 static int
1317 fuse_vop_read(struct vop_read_args *ap)
1318 {
1319 	struct buf *bp;
1320 	struct vnode *vp = ap->a_vp;
1321 	struct uio *uio = ap->a_uio;
1322 	struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
1323 	struct fuse_node *fnp;
1324 	off_t base_offset;
1325 	size_t offset;
1326 	size_t len;
1327 	size_t resid;
1328 	int error;
1329 	int seqcount;
1330 
1331 	/*
1332 	 * Check the basics
1333 	 */
1334 	if (fuse_test_dead(fmp))
1335 		return ENOTCONN;
1336 	if (fuse_test_nosys(fmp, FUSE_READ))
1337 		return EOPNOTSUPP;
1338 	if (uio->uio_offset < 0)
1339 		return EINVAL;
1340 	if (vp->v_type != VREG)
1341 		return EINVAL;
1342 
1343 	/*
1344 	 * Extract node, try to shortcut the operation through
1345 	 * the vM page cache, allowing us to avoid buffer cache
1346 	 * overheads.
1347 	 */
1348 	fnp = VTOI(vp);
1349 	resid = uio->uio_resid;
1350 	seqcount = ap->a_ioflag >> IO_SEQSHIFT;
1351 	error = vop_helper_read_shortcut(ap);
1352 	if (error)
1353 		return error;
1354 	if (uio->uio_resid == 0) {
1355 		if (resid)
1356 			goto finished;
1357 		return error;
1358 	}
1359 
1360 	/*
1361 	 * Fall-through to our normal read code.
1362 	 */
1363 	while (uio->uio_resid > 0 && uio->uio_offset < fnp->size) {
1364 		/*
1365 		 * Use buffer cache I/O (via fuse_vop_strategy)
1366 		 */
1367 		offset = (size_t)uio->uio_offset & FUSE_BLKMASK64;
1368 		base_offset = (off_t)uio->uio_offset - offset;
1369 		bp = getcacheblk(vp, base_offset,
1370 				 FUSE_BLKSIZE, GETBLK_KVABIO);
1371 		if (bp == NULL) {
1372 			if (1 /* fuse_cluster_rd_enable XXX sysctl */) {
1373 				error = cluster_readx(vp, fnp->size,
1374 						     base_offset,
1375 						     FUSE_BLKSIZE,
1376 						     B_NOTMETA | B_KVABIO,
1377 						     uio->uio_resid,
1378 						     seqcount * MAXBSIZE,
1379 						     &bp);
1380 			} else {
1381 				error = bread_kvabio(vp, base_offset,
1382 						     FUSE_BLKSIZE, &bp);
1383 			}
1384 			if (error) {
1385 				brelse(bp);
1386 				kprintf("fuse_vop_read bread error %d\n",
1387 					error);
1388 				break;
1389 			}
1390 
1391 			/*
1392 			 * Only do this if the VOP is coming from a normal
1393 			 * read/write.  The VM system handles the case for
1394 			 * UIO_NOCOPY.
1395 			 */
1396 			if (uio->uio_segflg != UIO_NOCOPY)
1397 				vm_wait_nominal();
1398 		}
1399 		bp->b_flags |= B_CLUSTEROK;
1400 		bkvasync(bp);
1401 
1402 		/*
1403 		 * Figure out how many bytes we can actually copy this loop.
1404 		 */
1405 		len = FUSE_BLKSIZE - offset;
1406 		if (len > uio->uio_resid)
1407 			len = uio->uio_resid;
1408 		if (len > fnp->size - uio->uio_offset)
1409 			len = (size_t)(fnp->size - uio->uio_offset);
1410 
1411 		error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio);
1412 		bqrelse(bp);
1413 		if (error) {
1414 			kprintf("fuse_vop_read uiomove error %d\n", error);
1415 			break;
1416 		}
1417 	}
1418 
1419 finished:
1420 	if (fnp->accessed == 0) {
1421 		mtx_lock(&fnp->node_lock);
1422 		fnp->accessed = 1;
1423 		mtx_unlock(&fnp->node_lock);
1424 	}
1425 	return (error);
1426 }
1427 
1428 static int
1429 fuse_vop_write(struct vop_write_args *ap)
1430 {
1431 	struct vnode *vp = ap->a_vp;
1432 	struct uio *uio = ap->a_uio;
1433 	struct thread *td = uio->uio_td;
1434 	struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
1435 	struct fuse_node *fnp;
1436 	boolean_t extended;
1437 	off_t oldsize;
1438 	off_t newsize;
1439 	int error;
1440 	off_t base_offset;
1441 	size_t offset;
1442 	size_t len;
1443 	struct rlimit limit;
1444 	int trivial = 0;
1445 	int kflags = 0;
1446 	int ioflag = ap->a_ioflag;
1447 	int seqcount;
1448 	int endofblk;
1449 
1450 	if (fuse_test_dead(fmp))
1451 		return ENOTCONN;
1452 
1453 	if (fuse_test_nosys(fmp, FUSE_WRITE))
1454 		return EOPNOTSUPP;
1455 
1456 	error = 0;
1457 	if (uio->uio_resid == 0)
1458 		return error;
1459 
1460 	fnp = VTOI(vp);
1461 
1462 	if (vp->v_type != VREG)
1463 		return (EINVAL);
1464 	seqcount = ioflag >> IO_SEQSHIFT;
1465 
1466 	mtx_lock(&fnp->node_lock);
1467 
1468 	oldsize = fnp->size;
1469 	newsize = uio->uio_offset + uio->uio_resid;
1470 	if (newsize < oldsize)
1471 		newsize = oldsize;
1472 	if (ioflag & IO_APPEND)
1473 		uio->uio_offset = fnp->size;
1474 
1475 	/*
1476 	 * Check for illegal write offsets.
1477 	 */
1478 	if (newsize > FUSE_MAXFILESIZE) {
1479 		error = EFBIG;
1480 		goto done;
1481 	}
1482 
1483 	/*
1484 	 * NOTE: Ignore if UIO does not come from a user thread (e.g. VN).
1485 	 */
1486 	if (vp->v_type == VREG && td != NULL && td->td_lwp != NULL) {
1487 		error = kern_getrlimit(RLIMIT_FSIZE, &limit);
1488 		if (error)
1489 			goto done;
1490 		if (newsize > limit.rlim_cur) {
1491 			ksignal(td->td_proc, SIGXFSZ);
1492 			error = EFBIG;
1493 			goto done;
1494 		}
1495 	}
1496 
1497 	/*
1498 	 * Extend the file's size if necessary
1499 	 */
1500 	extended = (newsize > fnp->size);
1501 
1502 	while (uio->uio_resid > 0) {
1503 		struct buf *bp;
1504 
1505 		/*
1506 		 * Don't completely blow out running buffer I/O
1507 		 * when being hit from the pageout daemon.
1508 		 */
1509 		if (uio->uio_segflg == UIO_NOCOPY &&
1510 		    (ioflag & IO_RECURSE) == 0)
1511 		{
1512 			bwillwrite(FUSE_BLKSIZE);
1513 		}
1514 
1515 		/*
1516 		 * Use buffer cache I/O (via fuse_vop_strategy)
1517 		 *
1518 		 * Calculate the maximum bytes we can write to the buffer at
1519 		 * this offset (after resizing).
1520 		 */
1521 		offset = (size_t)uio->uio_offset & FUSE_BLKMASK64;
1522 		base_offset = (off_t)uio->uio_offset - offset;
1523 		len = uio->uio_resid;
1524 		if (len > FUSE_BLKSIZE - offset)
1525 			len = FUSE_BLKSIZE - offset;
1526 
1527 		endofblk = 0;
1528 		trivial = 0;
1529 		if ((uio->uio_offset + len) > fnp->size) {
1530 			trivial = (uio->uio_offset <= fnp->size);
1531 			error = fuse_reg_resize(vp, uio->uio_offset + len,
1532 						trivial);
1533 			kflags |= NOTE_EXTEND;
1534 			if (error)
1535 				break;
1536 		}
1537 		if (base_offset + len == FUSE_BLKSIZE)
1538 			endofblk = 1;
1539 
1540 		/*
1541 		 * Get the buffer
1542 		 */
1543 		error = 0;
1544 		if (uio->uio_segflg == UIO_NOCOPY) {
1545 			/*
1546 			 * Issue a write with the same data backing
1547 			 * the buffer
1548 			 */
1549 			bp = getblk(vp,
1550 				    base_offset, FUSE_BLKSIZE,
1551 				    GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1552 			if ((bp->b_flags & B_CACHE) == 0) {
1553 				bqrelse(bp);
1554 				error = bread_kvabio(vp,
1555 					      base_offset, FUSE_BLKSIZE,
1556 					      &bp);
1557 			}
1558 		} else if (trivial) {
1559 			/*
1560 			 * We are entirely overwriting the buffer, but
1561 			 * may still have to zero it.
1562 			 */
1563 			bp = getblk(vp,
1564 				    base_offset, FUSE_BLKSIZE,
1565 				    GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1566 			if ((bp->b_flags & B_CACHE) == 0)
1567 				vfs_bio_clrbuf(bp);
1568 		} else {
1569 			/*
1570 			 * Partial overwrite, read in any missing bits
1571 			 * then replace the portion being overwritten.
1572 			 */
1573 			error = bread_kvabio(vp, base_offset, FUSE_BLKSIZE, &bp);
1574 			if (error == 0)
1575 				bheavy(bp);
1576 		}
1577 
1578 		if (error) {
1579 			brelse(bp);
1580 			break;
1581 		}
1582 
1583 		/*
1584 		 * Ok, copy the data in
1585 		 */
1586 		bkvasync(bp);
1587 		error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio);
1588 		kflags |= NOTE_WRITE;
1589 
1590 		if (error) {
1591 			kprintf("fuse_vop_write uiomove error %d\n", error);
1592 			brelse(bp);
1593 			break;
1594 		}
1595 
1596 		if (ioflag & IO_SYNC) {
1597 			bwrite(bp);
1598 		} else if ((ioflag & IO_DIRECT) && endofblk) {
1599 			bawrite(bp);
1600 		} else if (ioflag & IO_ASYNC) {
1601 			bawrite(bp);
1602 		} else if (vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1603 			bdwrite(bp);
1604 		} else {
1605 			bp->b_flags |= B_CLUSTEROK;
1606 			cluster_write(bp, fnp->size, FUSE_BLKSIZE, seqcount);
1607 			//bdwrite(bp);
1608 		}
1609 	}
1610 	vsetisdirty(vp);
1611 
1612 	if (error) {
1613 		if (extended) {
1614 			(void)fuse_reg_resize(vp, oldsize, trivial);
1615 			kflags &= ~NOTE_EXTEND;
1616 		}
1617 		goto done;
1618 	}
1619 
1620 	/*
1621 	 * Currently we don't set the mtime on files modified via mmap()
1622 	 * because we can't tell the difference between those modifications
1623 	 * and an attempt by the pageout daemon to flush fuse pages to
1624 	 * swap.
1625 	 */
1626 	if (uio->uio_segflg == UIO_NOCOPY) {
1627 		if (vp->v_flag & VLASTWRITETS) {
1628 			fnp->attr.va_mtime.tv_sec = vp->v_lastwrite_ts.tv_sec;
1629 			fnp->attr.va_mtime.tv_nsec = vp->v_lastwrite_ts.tv_nsec;
1630 		}
1631 	} else {
1632 		fnp->modified = 1;
1633 		vclrflags(vp, VLASTWRITETS);
1634 	}
1635 
1636 	if (extended)
1637 		fnp->changed = 1;
1638 
1639 	if (fnp->attr.va_mode & (S_ISUID | S_ISGID)) {
1640 		if (caps_priv_check(ap->a_cred, SYSCAP_NOVFS_RETAINSUGID))
1641 			fnp->attr.va_mode &= ~(S_ISUID | S_ISGID);
1642 	}
1643 done:
1644 	mtx_unlock(&fnp->node_lock);
1645 
1646 	if (kflags)
1647 		fuse_knote(vp, kflags);
1648 
1649 	return(error);
1650 }
1651 
1652 /*
1653  * Issue I/O RPC to support thread.  This can be issued from sensitive
1654  * kernel threads such as the pageout daemon, so we have to queue the
1655  * I/O to our support thread and return.  We cannot block in here.
1656  */
1657 static int
1658 fuse_vop_strategy(struct vop_strategy_args *ap)
1659 {
1660 	struct bio *bio = ap->a_bio;
1661 	struct buf *bp = bio->bio_buf;
1662 	struct vnode *vp = ap->a_vp;
1663 	struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
1664 	//struct fuse_node *fnp = VTOI(vp);
1665 
1666 	fuse_dbg("ino=%ju b_cmd=%d\n", VTOI(ap->a_vp)->ino, bp->b_cmd);
1667 
1668 	if (vp->v_type != VREG) {
1669 		bp->b_resid = bp->b_bcount;
1670 		bp->b_flags |= B_ERROR | B_INVAL;
1671 		bp->b_error = EINVAL;
1672 		biodone(bio);
1673 		return 0;
1674 	}
1675 
1676 	bp->b_flags &= ~(B_ERROR | B_INVAL);
1677 
1678 	switch(bp->b_cmd) {
1679 	case BUF_CMD_READ:
1680 		if (vn_cache_strategy(vp, bio) == 0) {
1681 			bio->bio_driver_info = vp;
1682 			spin_lock(&fmp->helper_spin);
1683 			TAILQ_INSERT_TAIL(&fmp->bioq, bio, bio_act);
1684 			spin_unlock(&fmp->helper_spin);
1685 			wakeup(&fmp->helper_td);
1686 		}
1687 		break;
1688 	case BUF_CMD_WRITE:
1689 		bio->bio_driver_info = vp;
1690 		spin_lock(&fmp->helper_spin);
1691 		TAILQ_INSERT_TAIL(&fmp->bioq, bio, bio_act);
1692 		spin_unlock(&fmp->helper_spin);
1693 		wakeup(&fmp->helper_td);
1694 		break;
1695 	default:
1696 		bp->b_flags |= B_INVAL;
1697 		bp->b_error = EINVAL;
1698 		biodone(bio);
1699 		break;
1700 	}
1701 	return 0;
1702 }
1703 
1704 /*
1705  * Just make the backing store appear to be contiguous so write clustering
1706  * works.  The strategy function will take it from there.  Use MAXBSIZE
1707  * chunks as a micro-optimization to make random flushes use reasonable
1708  * block writes.
1709  */
1710 static int
1711 fuse_bmap(struct vop_bmap_args *ap)
1712 {
1713 	if (ap->a_doffsetp != NULL)
1714 		*ap->a_doffsetp = ap->a_loffset;
1715 	if (ap->a_runp != NULL)
1716 		*ap->a_runp = MAXBSIZE - (ap->a_loffset & (MAXBSIZE - 1));
1717 	if (ap->a_runb != NULL)
1718 		*ap->a_runb = ap->a_loffset & (MAXBSIZE - 1);
1719 
1720 	return 0;
1721 }
1722 
1723 static int
1724 fuse_advlock(struct vop_advlock_args *ap)
1725 {
1726 	struct vnode *vp = ap->a_vp;
1727 	struct fuse_node *fnp = VTOI(vp);
1728 	int error;
1729 
1730 	error = lf_advlock(ap, &fnp->advlock, fnp->size);
1731 
1732 	return error;
1733 }
1734 
1735 static int
1736 fuse_vop_print(struct vop_print_args *ap)
1737 {
1738 	struct fuse_node *fnp = VTOI(ap->a_vp);
1739 
1740 	fuse_print("tag VT_FUSE, node %p, ino %ju\n",
1741 	    fnp, VTOI(ap->a_vp)->ino);
1742 
1743 	return 0;
1744 }
1745 
1746 static int
1747 fuse_vop_inactive(struct vop_inactive_args *ap)
1748 {
1749 	struct vnode *vp = ap->a_vp;
1750 	struct mount *mp = vp->v_mount;
1751 	struct fuse_node *fnp = VTOI(vp);
1752 	struct fuse_mount *fmp = VFSTOFUSE(mp);
1753 	struct vm_object *obj;
1754 
1755 	if (!fnp) {
1756 		vrecycle(vp);
1757 		return 0;
1758 	}
1759 
1760 	/*
1761 	 * For now synchronize all dirty data on INACTIVE instead
1762 	 * of on RECLAIM.
1763 	 *
1764 	 * Get all dirty data out... mmap'd pages and the buffer cache,
1765 	 * so we can issue FUSE_RELEASE here.
1766 	 */
1767 	fuse_dbg("ino=%ju\n", fnp->ino);
1768 
1769 	if ((obj = vp->v_object) != NULL)
1770 		vm_object_page_clean(obj, 0, 0, 0);
1771 	VOP_FSYNC(vp, MNT_WAIT, 0);
1772 
1773 	/*
1774 	 *
1775 	 */
1776 	fuse_release(fmp, fnp);
1777 
1778 	return 0;
1779 }
1780 
1781 /*
1782  * Reclaim inactive vnode and destroy the related fuse_node.  We
1783  * never destroy the root fuse_node here.
1784  */
1785 static int
1786 fuse_vop_reclaim(struct vop_reclaim_args *ap)
1787 {
1788 	struct vnode *vp = ap->a_vp;
1789 	struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
1790 	struct fuse_node *fnp = VTOI(vp);
1791 
1792 	if (fnp) {
1793 		vp->v_data = NULL;
1794 		fnp->vp = NULL;
1795 		fuse_dbg("ino=%ju\n", fnp->ino);
1796 
1797 		if (fnp != fmp->rfnp)
1798 			fuse_node_free(fmp, fnp);
1799 		vclrisdirty(vp);
1800 	}
1801 
1802 	return 0;
1803 }
1804 
1805 static int
1806 fuse_vop_mountctl(struct vop_mountctl_args *ap)
1807 {
1808 	struct mount *mp;
1809 	int res = 0;
1810 
1811 	mp = ap->a_head.a_ops->head.vv_mount;
1812 	lwkt_gettoken(&mp->mnt_token);
1813 
1814 	switch (ap->a_op) {
1815 	//case MOUNTCTL_MOUNTFLAGS:
1816 	//	...
1817 	//	break;
1818 	default:
1819 		res = vop_stdmountctl(ap);
1820 		break;
1821 	}
1822 
1823 	lwkt_reltoken(&mp->mnt_token);
1824 	return res;
1825 }
1826 
1827 static void filt_fusedetach(struct knote*);
1828 static int filt_fuseread(struct knote*, long);
1829 static int filt_fusewrite(struct knote*, long);
1830 static int filt_fusevnode(struct knote*, long);
1831 
1832 static struct filterops fuseread_filtops =
1833 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
1834 	  NULL, filt_fusedetach, filt_fuseread };
1835 static struct filterops fusewrite_filtops =
1836 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
1837 	  NULL, filt_fusedetach, filt_fusewrite };
1838 static struct filterops fusevnode_filtops =
1839 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
1840 	  NULL, filt_fusedetach, filt_fusevnode };
1841 
1842 static int
1843 fuse_kqfilter(struct vop_kqfilter_args *ap)
1844 {
1845 	struct vnode *vp = ap->a_vp;
1846 	struct knote *kn = ap->a_kn;
1847 
1848 	switch (kn->kn_filter) {
1849 	case EVFILT_READ:
1850 		kn->kn_fop = &fuseread_filtops;
1851 		break;
1852 	case EVFILT_WRITE:
1853 		kn->kn_fop = &fusewrite_filtops;
1854 		break;
1855 	case EVFILT_VNODE:
1856 		kn->kn_fop = &fusevnode_filtops;
1857 		break;
1858 	default:
1859 		return EOPNOTSUPP;
1860 	}
1861 
1862 	kn->kn_hook = (caddr_t)vp;
1863 	knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
1864 
1865 	return 0;
1866 }
1867 
1868 static void
1869 filt_fusedetach(struct knote *kn)
1870 {
1871 	struct vnode *vp = (void*)kn->kn_hook;
1872 
1873 	knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
1874 }
1875 
1876 static int
1877 filt_fuseread(struct knote *kn, long hint)
1878 {
1879 	struct vnode *vp = (void*)kn->kn_hook;
1880 	struct fuse_node *fnp = VTOI(vp);
1881 	off_t off;
1882 
1883 	if (hint == NOTE_REVOKE) {
1884 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
1885 		return 1;
1886 	}
1887 
1888 	/*
1889 	 * Interlock against MP races when performing this function.
1890 	 */
1891 	mtx_lock(&fnp->node_lock);
1892 	off = fnp->size - kn->kn_fp->f_offset;
1893 	kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
1894 	if (kn->kn_sfflags & NOTE_OLDAPI) {
1895 		mtx_unlock(&fnp->node_lock);
1896 		return 1;
1897 	}
1898 	if (!kn->kn_data)
1899 		kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
1900 	mtx_unlock(&fnp->node_lock);
1901 
1902 	return kn->kn_data != 0;
1903 }
1904 
1905 static int
1906 filt_fusewrite(struct knote *kn, long hint)
1907 {
1908 	if (hint == NOTE_REVOKE)
1909 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
1910 	kn->kn_data = 0;
1911 
1912 	return 1;
1913 }
1914 
1915 static int
1916 filt_fusevnode(struct knote *kn, long hint)
1917 {
1918 	if (kn->kn_sfflags & hint)
1919 		kn->kn_fflags |= hint;
1920 	if (hint == NOTE_REVOKE) {
1921 		kn->kn_flags |= (EV_EOF | EV_NODATA);
1922 		return 1;
1923 	}
1924 
1925 	return kn->kn_fflags != 0;
1926 }
1927 
1928 static int
1929 fuse_vop_getpages(struct vop_getpages_args *ap)
1930 {
1931 	if (!ap->a_vp->v_mount)
1932 		return VM_PAGER_BAD;
1933 
1934 	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
1935 	    ap->a_reqpage, ap->a_seqaccess);
1936 }
1937 
1938 static int
1939 fuse_vop_putpages(struct vop_putpages_args *ap)
1940 {
1941 	if (!ap->a_vp->v_mount)
1942 		return VM_PAGER_BAD;
1943 
1944 	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
1945 	    ap->a_flags, ap->a_rtvals);
1946 }
1947 
1948 /*
1949  * Resizes the object associated to the regular file pointed to by vp to
1950  * the size newsize.  'vp' must point to a vnode that represents a regular
1951  * file.  'newsize' must be positive.
1952  *
1953  * pass NVEXTF_TRIVIAL when buf content will be overwritten, otherwise set 0
1954  * to be zero filled.
1955  *
1956  * Returns zero on success or an appropriate error code on failure.
1957  *
1958  * Caller must hold the node exclusively locked.
1959  */
1960 static int
1961 fuse_reg_resize(struct vnode *vp, off_t newsize, int trivial)
1962 {
1963 	struct fuse_node *fnp;
1964 	off_t oldsize;
1965 	int nvextflags;
1966 	int error;
1967 
1968 #ifdef INVARIANTS
1969 	KKASSERT(vp->v_type == VREG);
1970 	KKASSERT(newsize >= 0);
1971 #endif
1972 
1973 	fnp = VTOI(vp);
1974 
1975 	oldsize = fnp->size;
1976 	fnp->size = newsize;
1977 	fnp->attr.va_size = newsize;
1978 	fnp->sizeoverride = 1;
1979 
1980 	nvextflags = 0;
1981 
1982 	/*
1983 	 * The backing VM object may contain VM pages as well as swap
1984 	 * assignments if we previously renamed main object pages into
1985 	 * it during deactivation.
1986 	 */
1987 	if (newsize < oldsize) {
1988 		error = nvtruncbuf(vp, newsize, FUSE_BLKSIZE, -1, nvextflags);
1989 	} else {
1990 		int nblksize;
1991 
1992 		nblksize = FUSE_BLKSIZE;
1993 
1994 		if (trivial)
1995 			nvextflags |= NVEXTF_TRIVIAL;
1996 
1997 		error = nvextendbuf(vp, oldsize, newsize,
1998 				    FUSE_BLKSIZE, nblksize,
1999 				    -1, -1, nvextflags);
2000 	}
2001 	return error;
2002 }
2003 
2004 /*
2005  * Fuse strategy helper thread
2006  */
2007 void
2008 fuse_io_thread(void *arg)
2009 {
2010 	struct fuse_mount *fmp = arg;
2011 	struct bio *bio;
2012 
2013 	while (fmp->dead == 0) {
2014 		tsleep(&fmp->helper_td, 0, "fuse_wio", 0);
2015 		spin_lock(&fmp->helper_spin);
2016 		while ((bio = TAILQ_FIRST(&fmp->bioq)) != NULL) {
2017 			TAILQ_REMOVE(&fmp->bioq, bio, bio_act);
2018 			spin_unlock(&fmp->helper_spin);
2019 			fuse_io_execute(fmp, bio);
2020 			spin_lock(&fmp->helper_spin);
2021 		}
2022 		spin_unlock(&fmp->helper_spin);
2023 	}
2024 	fmp->helper_td = NULL;
2025 	wakeup(&fmp->helper_td);
2026 }
2027 
2028 /*
2029  * Execute BIO
2030  */
2031 static void
2032 fuse_io_execute(struct fuse_mount *fmp, struct bio *bio)
2033 {
2034 	struct buf *bp = bio->bio_buf;
2035 	struct vnode *vp = bio->bio_driver_info;
2036 	struct fuse_node *fnp = VTOI(vp);
2037 	struct fuse_ipc *fip;
2038 	struct fuse_read_in *fri;
2039 	struct fuse_write_in *fwi;
2040 	struct fuse_write_out *fwo;
2041 	int error;
2042 
2043 	switch(bp->b_cmd) {
2044 	case BUF_CMD_READ:
2045 		fip = fuse_ipc_get(fmp, sizeof(*fri));
2046 		fri = fuse_ipc_fill(fip, FUSE_READ, fnp->ino, proc0.p_ucred);
2047 		fri->offset = bp->b_loffset;
2048 		fri->size = bp->b_bcount;
2049 		fri->fh = fnp->fh;
2050 
2051 		error = fuse_ipc_tx(fip);
2052 
2053 		if (error == 0) {
2054 			memcpy(bp->b_data, fuse_out_data(fip),
2055 			       fuse_out_data_size(fip));
2056 			fuse_ipc_put(fip);
2057 			bp->b_resid = 0;
2058 			bp->b_error = 0;
2059 		} else {
2060 			bp->b_resid = bp->b_bcount;
2061 			bp->b_flags |= B_ERROR | B_INVAL;
2062 			bp->b_error = EINVAL;
2063 		}
2064 		biodone(bio);
2065 		break;
2066 	case BUF_CMD_WRITE:
2067 		fip = fuse_ipc_get(fmp, sizeof(*fwi) + bp->b_bcount);
2068 		fwi = fuse_ipc_fill(fip, FUSE_WRITE, fnp->ino, proc0.p_ucred);
2069 		fwi->offset = bp->b_loffset;
2070 		fwi->size = bp->b_bcount;
2071 		fwi->fh = fnp->fh;
2072 
2073 		/*
2074 		 * Handle truncated buffer at file EOF
2075 		 */
2076 		if (fwi->offset + fwi->size > fnp->size) {
2077 			if (fwi->offset >= fnp->size) {
2078 				error = EINVAL;
2079 				goto write_failed;
2080 			}
2081 			fwi->size = fnp->size - fwi->offset;
2082 		}
2083 
2084 		memcpy((void *)(fwi + 1), bp->b_data, bp->b_bcount);
2085 
2086 		error = fuse_ipc_tx(fip);
2087 
2088 		fwo = fuse_out_data(fip);
2089 		if (error == 0) {
2090 			bp->b_resid = bp->b_bcount - fwo->size;
2091 			bp->b_error = 0;
2092 			fuse_ipc_put(fip);
2093 		} else {
2094 write_failed:
2095 			bp->b_resid = bp->b_bcount;
2096 			bp->b_flags |= B_ERROR | B_INVAL;
2097 			bp->b_error = EINVAL;
2098 		}
2099 		biodone(bio);
2100 		break;
2101 	default:
2102 		bp->b_resid = bp->b_bcount;
2103 		bp->b_flags |= B_ERROR | B_INVAL;
2104 		bp->b_error = EINVAL;
2105 		biodone(bio);
2106 		break;
2107 	}
2108 }
2109 
2110 #if 0
2111 	bp->b_resid = bp->b_bcount;
2112 	bp->b_flags |= B_ERROR | B_INVAL;
2113 	bp->b_error = EINVAL;
2114 	biodone(bio);
2115 #endif
2116 
2117 static void
2118 fuse_release(struct fuse_mount *fmp, struct fuse_node *fnp)
2119 {
2120 	struct fuse_ipc *fip;
2121 	struct fuse_release_in *fri;
2122 	int error, op;
2123 
2124 	if (fnp->fh) {
2125 		/*
2126 		 * Release the file-handle to clean-up the userland side.
2127 		 */
2128 		if (fnp->type == VDIR)
2129 			op = FUSE_RELEASEDIR;
2130 		else
2131 			op = FUSE_RELEASE;
2132 
2133 		fip = fuse_ipc_get(fmp, sizeof(*fri));
2134 		fri = fuse_ipc_fill(fip, op, fnp->ino, NULL);
2135 		/* unused */
2136 		//fri->flags = ...;
2137 		fri->release_flags = FUSE_RELEASE_FLUSH;
2138 		//fri->lock_owner = ...;
2139 		fri->fh = fnp->fh;
2140 
2141 		error = fuse_ipc_tx(fip);
2142 		if (error == 0)
2143 			fuse_ipc_put(fip);
2144 
2145 #if 0
2146 		op = FUSE_FORGET;
2147 		fip = fuse_ipc_get(fmp, sizeof(*fri));
2148 		fri = fuse_ipc_fill(fip, op, fnp->ino, NULL);
2149 		error = fuse_ipc_tx(fip);
2150 		if (error == 0)
2151 			fuse_ipc_put(fip);
2152 #endif
2153 		fnp->fh = 0;
2154 	}
2155 	if (fnp->nlookup && fnp->ino != 1) {
2156 #if 0
2157 		/* sshfs fails utterly if we issue FUSE_FORGET */
2158 		error = fuse_forget_node(fmp, fnp->ino, fnp->nlookup, NULL);
2159 #endif
2160 		fnp->nlookup = 0;
2161 	}
2162 	fnp->closed = true;
2163 }
2164 
2165 
2166 struct vop_ops fuse_vnode_vops = {
2167 	.vop_default =		vop_defaultop,
2168 	.vop_access =		fuse_vop_access,
2169 	.vop_open =		fuse_vop_open,
2170 	.vop_close =		fuse_vop_close,
2171 	.vop_fsync =		fuse_vop_fsync,
2172 	.vop_getattr =		fuse_vop_getattr,
2173 	.vop_setattr =		fuse_vop_setattr,
2174 	.vop_nresolve =		fuse_vop_nresolve,
2175 	//.vop_nlookupdotdot =	fuse_nlookupdotdot,
2176 	.vop_nlink =		fuse_vop_nlink,
2177 	.vop_ncreate =		fuse_vop_ncreate,
2178 	.vop_nmknod =		fuse_vop_nmknod,
2179 	.vop_nremove =		fuse_vop_nremove,
2180 	.vop_nmkdir =		fuse_vop_nmkdir,
2181 	.vop_nrmdir =		fuse_vop_nrmdir,
2182 	.vop_pathconf =		fuse_vop_pathconf,
2183 	.vop_readdir =		fuse_vop_readdir,
2184 	.vop_readlink =		fuse_vop_readlink,
2185 	.vop_nrename =		fuse_vop_nrename,
2186 	.vop_nsymlink =		fuse_vop_nsymlink,
2187 	.vop_read =		fuse_vop_read,
2188 	.vop_write =		fuse_vop_write,
2189 	.vop_strategy =		fuse_vop_strategy,
2190 	.vop_bmap =		fuse_bmap,
2191 	.vop_advlock =		fuse_advlock,
2192 	.vop_print =		fuse_vop_print,
2193 	.vop_inactive =		fuse_vop_inactive,
2194 	.vop_reclaim =		fuse_vop_reclaim,
2195 	.vop_mountctl =		fuse_vop_mountctl,
2196 	.vop_kqfilter =		fuse_kqfilter,
2197 	.vop_getpages =		fuse_vop_getpages,
2198 	.vop_putpages =		fuse_vop_putpages,
2199 };
2200 
2201 struct vop_ops fuse_spec_vops = {
2202 	.vop_default =		vop_defaultop,
2203 	.vop_access =		fuse_vop_access,
2204 	.vop_close =		fuse_vop_close,
2205 	.vop_fsync =		fuse_vop_fsync,
2206 	.vop_getattr =		fuse_vop_getattr,
2207 	.vop_setattr =		fuse_vop_setattr,
2208 	.vop_read =		vop_stdnoread,
2209 	.vop_write =		vop_stdnowrite,
2210 	//.vop_markatime =	fuse_vop_markatime,
2211 	.vop_print =		fuse_vop_print,
2212 	.vop_inactive =		fuse_vop_inactive,
2213 	.vop_reclaim =		fuse_vop_reclaim,
2214 };
2215