xref: /dragonfly/sys/vfs/dirfs/dirfs_subr.c (revision 0de61e28)
1 /*
2  * Copyright (c) 2013 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Antonio Huete Jimenez <tuxillo@quantumachine.net>
6  * by Matthew Dillon <dillon@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  */
36 
37 #include <errno.h>
38 #include <fcntl.h>
39 #include <unistd.h>
40 
41 #include <sys/mount.h>
42 #include <sys/queue.h>
43 #include <sys/spinlock2.h>
44 #include <sys/stat.h>
45 #include <sys/systm.h>
46 #include <sys/types.h>
47 #include <sys/vfscache.h>
48 #include <sys/vnode.h>
49 
50 #include "dirfs.h"
51 
52 /*
53  * Allocate and setup all is needed for the dirfs node to hold the filename.
54  * Note: dn_name is NULL terminated.
55  */
56 void
57 dirfs_node_setname(dirfs_node_t dnp, const char *name, int len)
58 {
59 	dbg(5, "called\n");
60 
61 	if (dnp->dn_name)
62 		kfree(dnp->dn_name, M_DIRFS_MISC);
63 	dnp->dn_name = kmalloc(len + 1, M_DIRFS_MISC, M_WAITOK | M_ZERO);
64 	bcopy(name, dnp->dn_name, len);
65 	dnp->dn_name[len] = 0;
66 	dnp->dn_namelen = len;
67 }
68 
69 /*
70  * Allocate enough space to hold a dirfs node structure.
71  * Note: Node name and length isn't handled here.
72  */
73 dirfs_node_t
74 dirfs_node_alloc(struct mount *mp)
75 {
76         dirfs_node_t dnp;
77 
78         dbg(5, "called\n");
79 
80         dnp = kmalloc(sizeof(*dnp), M_DIRFS_NODE, M_WAITOK | M_ZERO);
81         lockinit(&dnp->dn_lock, "dfsnode", 0, LK_CANRECURSE);
82 
83 	dnp->dn_fd = DIRFS_NOFD;
84 
85         return dnp;
86 }
87 
88 /*
89  * Drops a reference to the node and. Node is freed when in the last reference.
90  */
91 void
92 dirfs_node_drop(dirfs_mount_t dmp, dirfs_node_t dnp)
93 {
94 	dbg(5, "called\n");
95 
96 	if (dirfs_node_unref(dnp))
97 		dirfs_node_free(dmp, dnp);
98 }
99 
100 /*
101  * Removes the association with its parent. Before freeing up its resources
102  * the node will be removed from the per-mount passive fd cache and its fd
103  * will be closed, either normally or forced.
104  */
105 int
106 dirfs_node_free(dirfs_mount_t dmp, dirfs_node_t dnp)
107 {
108 	struct vnode *vp;
109 
110 	dbg(5, "called\n");
111 
112 	KKASSERT(dnp != NULL);
113 	debug_node2(dnp);
114 
115 	KKASSERT(dirfs_node_refcnt(dnp) == 0);
116 
117 	vp = NODE_TO_VP(dnp);
118 	/*
119 	 * Remove the inode from the passive fds list
120 	 * as we are tearing down the node.
121 	 * Root inode will be removed on VOP_UNMOUNT()
122 	 */
123 	if (dnp->dn_parent) {	/* NULL when children reaped parents */
124 		dirfs_node_drop(dmp, dnp->dn_parent);
125 		dnp->dn_parent = NULL;
126 	}
127 	dirfs_node_setpassive(dmp, dnp, 0);
128 	if (dnp->dn_name) {
129 		kfree(dnp->dn_name, M_DIRFS_MISC);
130 		dnp->dn_name = NULL;
131 	}
132 
133 	/*
134 	 * The file descriptor should have been closed already by the
135 	 * previous call to dirfs_set-passive. If not, force a sync and
136 	 * close it.
137 	 */
138 	if (dnp->dn_fd != DIRFS_NOFD) {
139 		if (dnp->dn_vnode)
140 			VOP_FSYNC(vp, MNT_WAIT, 0);
141 		close(dnp->dn_fd);
142 		dnp->dn_fd = DIRFS_NOFD;
143 	}
144 
145 	lockuninit(&dnp->dn_lock);
146 	kfree(dnp, M_DIRFS_NODE);
147 	dnp = NULL;
148 
149 	return 0;
150 }
151 
152 /*
153  * Do all the operations needed to get a resulting inode <--> host file
154  * association. This or may not include opening the file, which should be
155  * only needed when creating it.
156  *
157  * In the case vap is not NULL and openflags are specified, open the file.
158  */
159 int
160 dirfs_alloc_file(dirfs_mount_t dmp, dirfs_node_t *dnpp, dirfs_node_t pdnp,
161     struct namecache *ncp, struct vnode **vpp, struct vattr *vap,
162     int openflags)
163 {
164 	dirfs_node_t dnp;
165 	dirfs_node_t pathnp;
166 	struct vnode *vp;
167 	struct mount *mp;
168 	char *tmp;
169 	char *pathfree;
170 	int error;
171 
172 	dbg(5, "called\n");
173 
174 	error = 0;
175 	vp = NULL;
176 	mp = DIRFS_TO_VFS(dmp);
177 
178 	/* Sanity check */
179 	if (pdnp == NULL)
180 		return EINVAL;
181 
182 	dnp = dirfs_node_alloc(mp);
183 	KKASSERT(dnp != NULL);
184 
185 	dirfs_node_lock(dnp);
186 	dirfs_node_setname(dnp, ncp->nc_name, ncp->nc_nlen);
187 	dnp->dn_parent = pdnp;
188 	dirfs_node_ref(pdnp);   /* Children ref */
189 	dirfs_node_unlock(dnp);
190 
191 	pathnp = dirfs_findfd(dmp, dnp, &tmp, &pathfree);
192 
193 	if (openflags && vap != NULL) {
194 		dnp->dn_fd = openat(pathnp->dn_fd, tmp,
195 				    openflags, vap->va_mode);
196 		if (dnp->dn_fd == -1) {
197 			dirfs_dropfd(dmp, pathnp, pathfree);
198 			return errno;
199 		}
200 	}
201 
202 	error = dirfs_node_stat(pathnp->dn_fd, tmp, dnp);
203 	if (error) {		/* XXX Handle errors */
204 		error = errno;
205 		if (vp)
206 			dirfs_free_vp(dmp, dnp);
207 		dirfs_node_free(dmp, dnp);
208 		dirfs_dropfd(dmp, pathnp, pathfree);
209 		return error;
210 	}
211 
212 	dirfs_alloc_vp(mp, &vp, LK_CANRECURSE, dnp);
213 	*vpp = vp;
214 	*dnpp = dnp;
215 
216 	dbg(9, "tmp=%s dnp=%p allocated\n", tmp, dnp);
217 	dirfs_dropfd(dmp, pathnp, pathfree);
218 
219 	/* We want VOP_INACTIVE() to be called on last ref */
220 	atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
221 
222 	return error;
223 }
224 
225 /*
226  * Requires an already dirfs_node_t that has been already lstat(2)
227  * for the type comparison
228  */
229 void
230 dirfs_alloc_vp(struct mount *mp, struct vnode **vpp, int lkflags,
231 	       dirfs_node_t dnp)
232 {
233 	struct vnode *vp;
234 	dirfs_mount_t dmp = VFS_TO_DIRFS(mp);
235 
236 	dbg(5, "called\n");
237 
238 	/*
239 	 * Handle vnode reclaim/alloc races
240 	 */
241 	for (;;) {
242 		vp = dnp->dn_vnode;
243 		if (vp) {
244 			if (vget(vp, LK_EXCLUSIVE) == 0)
245 				break;	/* success */
246 			/* vget raced a reclaim, retry */
247 		} else {
248 			getnewvnode(VT_UNUSED10, mp, &vp, 0, lkflags);
249 			if (dnp->dn_vnode == NULL) {
250 				dnp->dn_vnode = vp;
251 				vp->v_data = dnp;
252 				vp->v_type = dnp->dn_type;
253 				if (dmp->dm_root == dnp)
254 					vsetflags(vp, VROOT);
255 				dirfs_node_ref(dnp);	/* ref for dnp<->vp */
256 
257 				/* Type-specific initialization. */
258 				switch (dnp->dn_type) {
259 				case VBLK:
260 				case VCHR:
261 				case VSOCK:
262 					break;
263 				case VREG:
264 					vinitvmio(vp, dnp->dn_size, BMASK, -1);
265 					break;
266 				case VLNK:
267 					break;
268 				case VFIFO:
269 			//              vp->v_ops = &mp->mnt_vn_fifo_ops;
270 					break;
271 				case VDIR:
272 					break;
273 				default:
274 					panic("dirfs_alloc_vp: dnp=%p vp=%p "
275 					      "type=%d",
276 					      dnp, vp, dnp->dn_type);
277 					/* NOT REACHED */
278 					break;
279 				}
280 				/* downgrade VX lock to VN lock */
281 				vx_downgrade(vp);
282 				break;	/* success */
283 			}
284 			vp->v_type = VBAD;
285 			vx_put(vp);
286 			/* multiple dirfs_alloc_vp calls raced, retry */
287 		}
288 	}
289 	KKASSERT(vp != NULL);
290 	*vpp = vp;
291 	dbg(9, "dnp=%p vp=%p type=%d\n", dnp, vp, vp->v_type);
292 }
293 
294 /*
295  * Do not call locked!
296  */
297 void
298 dirfs_free_vp(dirfs_mount_t dmp, dirfs_node_t dnp)
299 {
300 	struct vnode *vp = NODE_TO_VP(dnp);
301 
302 	dbg(5, "called\n");
303 
304 	dnp->dn_vnode = NULL;
305 	vp->v_data = NULL;
306 	dirfs_node_drop(dmp, dnp);
307 }
308 
309 int
310 dirfs_nodetype(struct stat *st)
311 {
312 	int ret;
313 	mode_t mode = st->st_mode;
314 
315 	if (S_ISDIR(mode))
316 		ret = VDIR;
317 	else if (S_ISBLK(mode))
318 		ret = VBLK;
319 	else if (S_ISCHR(mode))
320 		ret = VCHR;
321 	else if (S_ISFIFO(mode))
322 		ret = VFIFO;
323 	else if (S_ISSOCK(mode))
324 		ret = VSOCK;
325 	else if (S_ISLNK(mode))
326 		ret = VLNK;
327 	else if (S_ISREG(mode))
328 		ret = VREG;
329 	else
330 		ret = VBAD;
331 
332 	return ret;
333 }
334 
335 int
336 dirfs_node_stat(int fd, const char *path, dirfs_node_t dnp)
337 {
338 	struct stat st;
339 	int error;
340 
341 	dbg(5, "called\n");
342 	if (fd == DIRFS_NOFD)
343 		error = lstat(path, &st);
344 	else
345 		error = fstatat(fd, path, &st, AT_SYMLINK_NOFOLLOW);
346 
347 	if (error)
348 		return errno;
349 
350 	/* Populate our dirfs node struct with stat data */
351 	dnp->dn_uid = st.st_uid;
352 	dnp->dn_gid = st.st_gid;
353 	dnp->dn_mode = st.st_mode;
354 	dnp->dn_flags = st.st_flags;
355 	dnp->dn_links = st.st_nlink;
356 	dnp->dn_atime = st.st_atime;
357 	dnp->dn_atimensec = (st.st_atime * 1000000000L);
358 	dnp->dn_mtime = st.st_mtime;
359 	dnp->dn_mtimensec = (st.st_mtime * 1000000000L);
360 	dnp->dn_ctime = st.st_ctime;
361 	dnp->dn_ctimensec = (st.st_ctime * 1000000000L);
362 	dnp->dn_gen = st.st_gen;
363 	dnp->dn_ino = st.st_ino;
364 	dnp->dn_st_dev = st.st_dev;
365 	dnp->dn_size = st.st_size;
366 	dnp->dn_type = dirfs_nodetype(&st);
367 
368 	return 0;
369 }
370 
371 char *
372 dirfs_node_absolute_path(dirfs_mount_t dmp, dirfs_node_t cur, char **pathfreep)
373 {
374 	return(dirfs_node_absolute_path_plus(dmp, cur, NULL, pathfreep));
375 }
376 
377 char *
378 dirfs_node_absolute_path_plus(dirfs_mount_t dmp, dirfs_node_t cur,
379 			      char *last, char **pathfreep)
380 {
381 	size_t len;
382 	dirfs_node_t dnp1;
383 	char *buf;
384 	int count;
385 
386 	dbg(5, "called\n");
387 
388 	KKASSERT(dmp->dm_root);	/* Sanity check */
389 	*pathfreep = NULL;
390 	if (cur == NULL)
391 		return NULL;
392 	buf = kmalloc(MAXPATHLEN + 1, M_DIRFS_MISC, M_WAITOK);
393 
394 	/*
395 	 * Passed-in trailing element.
396 	 */
397 	count = 0;
398 	buf[MAXPATHLEN] = 0;
399 	if (last) {
400 		len = strlen(last);
401 		count += len;
402 		if (count <= MAXPATHLEN)
403 			bcopy(last, &buf[MAXPATHLEN - count], len);
404 		++count;
405 		if (count <= MAXPATHLEN)
406 			buf[MAXPATHLEN - count] = '/';
407 	}
408 
409 	/*
410 	 * Iterate through the parents until we hit the root.
411 	 */
412 	dnp1 = cur;
413 	while (dirfs_node_isroot(dnp1) == 0) {
414 		count += dnp1->dn_namelen;
415 		if (count <= MAXPATHLEN) {
416 			bcopy(dnp1->dn_name, &buf[MAXPATHLEN - count],
417 			      dnp1->dn_namelen);
418 		}
419 		++count;
420 		if (count <= MAXPATHLEN)
421 			buf[MAXPATHLEN - count] = '/';
422 		dnp1 = dnp1->dn_parent;
423 		if (dnp1 == NULL)
424 			break;
425 	}
426 
427 	/*
428 	 * Prefix with the root mount path.  If the element was unlinked
429 	 * dnp1 will be NULL and there is no path.
430 	 */
431 	len = strlen(dmp->dm_path);
432 	count += len;
433 	if (dnp1 && count <= MAXPATHLEN) {
434 		bcopy(dmp->dm_path, &buf[MAXPATHLEN - count], len);
435 		*pathfreep = buf;
436 		dbg(9, "absolute_path %s\n", &buf[MAXPATHLEN - count]);
437 		return (&buf[MAXPATHLEN - count]);
438 	} else {
439 		kfree(buf, M_DIRFS_MISC);
440 		*pathfreep = NULL;
441 		return (NULL);
442 	}
443 }
444 
445 /*
446  * Return a dirfs_node with a valid descriptor plus an allocated
447  * relative path which can be used in openat(), fstatat(), etc calls
448  * to locate the requested inode.
449  */
450 dirfs_node_t
451 dirfs_findfd(dirfs_mount_t dmp, dirfs_node_t cur,
452 	     char **pathto, char **pathfreep)
453 {
454 	dirfs_node_t dnp1;
455 	int count;
456 	char *buf;
457 
458 	dbg(5, "called\n");
459 
460 	*pathfreep = NULL;
461 	*pathto = NULL;
462 
463 	if (cur == NULL)
464 		return NULL;
465 
466 	buf = kmalloc(MAXPATHLEN + 1, M_DIRFS_MISC, M_WAITOK | M_ZERO);
467 	count = 0;
468 
469 	dnp1 = cur;
470 	while (dnp1 == cur || dnp1->dn_fd == DIRFS_NOFD) {
471 		count += dnp1->dn_namelen;
472 		if (count <= MAXPATHLEN) {
473 			bcopy(dnp1->dn_name, &buf[MAXPATHLEN - count],
474 			      dnp1->dn_namelen);
475 		}
476 		++count;
477 		if (count <= MAXPATHLEN)
478 			buf[MAXPATHLEN - count] = '/';
479 		dnp1 = dnp1->dn_parent;
480 		KKASSERT(dnp1 != NULL);
481 	}
482 
483 	if (dnp1 && count <= MAXPATHLEN) {
484 		*pathfreep = buf;
485 		*pathto = &buf[MAXPATHLEN - count + 1];	/* skip '/' prefix */
486 		dirfs_node_ref(dnp1);
487 		dbg(9, "fd=%d dnp1=%p dnp1->dn_name=%d &buf[off]=%s\n",
488 		    dnp1->dn_fd, dnp1, dnp1->dn_name, *pathto);
489 	} else {
490 		dbg(9, "failed too long\n");
491 		kfree(buf, M_DIRFS_MISC);
492 		*pathfreep = NULL;
493 		*pathto = NULL;
494 		dnp1 = NULL;
495 	}
496 	return (dnp1);
497 }
498 
499 void
500 dirfs_dropfd(dirfs_mount_t dmp, dirfs_node_t dnp1, char *pathfree)
501 {
502 	if (pathfree)
503 		kfree(pathfree, M_DIRFS_MISC);
504 	if (dnp1)
505 		dirfs_node_drop(dmp, dnp1);
506 }
507 
508 int
509 dirfs_node_getperms(dirfs_node_t dnp, int *flags)
510 {
511 	dirfs_mount_t dmp;
512 	struct vnode *vp = dnp->dn_vnode;
513 	int isowner;
514 	int isgroup;
515 
516 	/*
517 	 * There must be an active vnode anyways since that
518 	 * would indicate the dirfs node has valid data for
519 	 * for dnp->dn_mode (via lstat syscall).
520 	 */
521 	KKASSERT(vp);
522 	dmp = VFS_TO_DIRFS(vp->v_mount);
523 
524 	isowner = (dmp->dm_uid == dnp->dn_uid);
525 	isgroup = (dmp->dm_gid == dnp->dn_gid);
526 
527 	if (isowner) {
528 		if (dnp->dn_mode & S_IRUSR)
529 			*flags |= DIRFS_NODE_RD;
530 		if (dnp->dn_mode & S_IWUSR)
531 			*flags |= DIRFS_NODE_WR;
532 		if (dnp->dn_mode & S_IXUSR)
533 			*flags |= DIRFS_NODE_EXE;
534 	} else if (isgroup) {
535 		if (dnp->dn_mode & S_IRGRP)
536 			*flags |= DIRFS_NODE_RD;
537 		if (dnp->dn_mode & S_IWGRP)
538 			*flags |= DIRFS_NODE_WR;
539 		if (dnp->dn_mode & S_IXGRP)
540 			*flags |= DIRFS_NODE_EXE;
541 	} else {
542 		if (dnp->dn_mode & S_IROTH)
543 			*flags |= DIRFS_NODE_RD;
544 		if (dnp->dn_mode & S_IWOTH)
545 			*flags |= DIRFS_NODE_WR;
546 		if (dnp->dn_mode & S_IXOTH)
547 			*flags |= DIRFS_NODE_EXE;
548 	}
549 
550 	return 0;
551 }
552 
553 /*
554  * This requires an allocated node and vnode, otherwise it'll panic
555  */
556 int
557 dirfs_open_helper(dirfs_mount_t dmp, dirfs_node_t dnp, int parentfd,
558 		  char *relpath)
559 {
560 	dirfs_node_t pathnp;
561 	char *pathfree;
562 	char *tmp;
563 	int flags;
564 	int perms;
565 	int error;
566 
567 	dbg(5, "called\n");
568 
569 	flags = error = perms = 0;
570 	tmp = NULL;
571 
572 	KKASSERT(dnp);
573 	KKASSERT(dnp->dn_vnode);
574 
575 	/*
576 	 * XXX Besides VDIR and VREG there are other file
577 	 * types, y'know?
578 	 * Also, O_RDWR alone might not be the best mode to open
579 	 * a file with, need to investigate which suits better.
580 	 */
581 	dirfs_node_getperms(dnp, &perms);
582 
583 	if (dnp->dn_type & VDIR) {
584 		flags |= O_DIRECTORY;
585 	} else {
586 		if (perms & DIRFS_NODE_WR)
587 			flags |= O_RDWR;
588 		else
589 			flags |= O_RDONLY;
590 	}
591 	if (relpath != NULL) {
592 		tmp = relpath;
593 		pathnp = NULL;
594 		KKASSERT(parentfd != DIRFS_NOFD);
595 	} else if (parentfd == DIRFS_NOFD) {
596 		pathnp = dirfs_findfd(dmp, dnp, &tmp, &pathfree);
597 		parentfd = pathnp->dn_fd;
598 	} else {
599 		pathnp = NULL;
600 	}
601 
602 	dnp->dn_fd = openat(parentfd, tmp, flags);
603 	if (dnp->dn_fd == -1)
604 		error = errno;
605 
606 	dbg(9, "dnp=%p tmp2=%s parentfd=%d flags=%d error=%d "
607 	    "flags=%08x w=%d x=%d\n", dnp, tmp, parentfd, flags, error,
608 	    perms);
609 
610 	if (pathnp)
611 		dirfs_dropfd(dmp, pathnp, pathfree);
612 
613 	return error;
614 }
615 
616 int
617 dirfs_close_helper(dirfs_node_t dnp)
618 {
619 	int error = 0;
620 
621 	dbg(5, "called\n");
622 
623 
624 	if (dnp->dn_fd != DIRFS_NOFD) {
625 		dbg(9, "closed fd on dnp=%p\n", dnp);
626 #if 0
627 		/* buffer cache buffers may still be present */
628 		error = close(dnp->dn_fd); /* XXX EINTR should be checked */
629 		dnp->dn_fd = DIRFS_NOFD;
630 #endif
631 	}
632 
633 	return error;
634 }
635 
636 int
637 dirfs_node_refcnt(dirfs_node_t dnp)
638 {
639 	return dnp->dn_refcnt;
640 }
641 
642 int
643 dirfs_node_chtimes(dirfs_node_t dnp)
644 {
645 	struct vnode *vp;
646 	dirfs_mount_t dmp;
647 	int error = 0;
648 	char *tmp;
649 	char *pathfree;
650 
651 	vp = NODE_TO_VP(dnp);
652 	dmp = VFS_TO_DIRFS(vp->v_mount);
653 
654 	KKASSERT(vn_islocked(vp));
655 
656 	if (dnp->dn_flags & (IMMUTABLE | APPEND))
657 		return EPERM;
658 
659 	tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree);
660 	KKASSERT(tmp);
661 	if((lutimes(tmp, NULL)) == -1)
662 		error = errno;
663 
664 	dirfs_node_stat(DIRFS_NOFD, tmp, dnp);
665 	dirfs_dropfd(dmp, NULL, pathfree);
666 
667 	KKASSERT(vn_islocked(vp));
668 
669 
670 	return error;
671 }
672 
673 int
674 dirfs_node_chflags(dirfs_node_t dnp, u_long vaflags, struct ucred *cred)
675 {
676 	struct vnode *vp;
677 	dirfs_mount_t dmp;
678 	int error = 0;
679 	int flags;
680 	char *tmp;
681 	char *pathfree;
682 
683 	vp = NODE_TO_VP(dnp);
684 	dmp = VFS_TO_DIRFS(vp->v_mount);
685 
686 	KKASSERT(vn_islocked(vp));
687 
688 	flags = dnp->dn_flags;
689 
690 	error = vop_helper_setattr_flags(&flags, vaflags, dnp->dn_uid, cred);
691 	/*
692 	 * When running vkernels with non-root it is not possible to set
693 	 * certain flags on host files, such as SF* flags. chflags(2) call
694 	 * will spit an error in that case.
695 	 */
696 	if (error == 0) {
697 		tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree);
698 		KKASSERT(tmp);
699 		if((lchflags(tmp, flags)) == -1)
700 			error = errno;
701 		dirfs_node_stat(DIRFS_NOFD, tmp, dnp);
702 		dirfs_dropfd(dmp, NULL, pathfree);
703 	}
704 
705 	KKASSERT(vn_islocked(vp));
706 
707 	return error;
708 }
709 
710 int
711 dirfs_node_chmod(dirfs_mount_t dmp, dirfs_node_t dnp, mode_t mode)
712 {
713 	char *tmp;
714 	char *pathfree;
715 	int error = 0;
716 
717 	tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree);
718 	KKASSERT(tmp);
719 	if (lchmod(tmp, mode) < 0)
720 		error = errno;
721 	dirfs_node_stat(DIRFS_NOFD, tmp, dnp);
722 	dirfs_dropfd(dmp, NULL, pathfree);
723 
724 	return error;
725 }
726 
727 int
728 dirfs_node_chown(dirfs_mount_t dmp, dirfs_node_t dnp,
729 		 uid_t uid, uid_t gid, mode_t mode)
730 {
731 	char *tmp;
732 	char *pathfree;
733 	int error = 0;
734 
735 	tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree);
736 	KKASSERT(tmp);
737 	if (lchown(tmp, uid, gid) < 0)
738 		error = errno;
739 	if (mode != dnp->dn_mode)
740 		lchmod(tmp, mode);
741 	dirfs_node_stat(DIRFS_NOFD, tmp, dnp);
742 	dirfs_dropfd(dmp, NULL, pathfree);
743 
744 	return error;
745 }
746 
747 
748 int
749 dirfs_node_chsize(dirfs_node_t dnp, off_t nsize)
750 {
751 	dirfs_mount_t dmp;
752 	struct vnode *vp;
753 	int error = 0;
754 	char *tmp;
755 	char *pathfree;
756 	off_t osize;
757 	int biosize;
758 
759 	KKASSERT(dnp);
760 
761 	vp = NODE_TO_VP(dnp);
762 	dmp = VFS_TO_DIRFS(vp->v_mount);
763 	biosize = BSIZE;
764 	osize = dnp->dn_size;
765 
766 	KKASSERT(vn_islocked(vp));
767 
768 	switch (vp->v_type) {
769 	case VDIR:
770 		return (EISDIR);
771 	case VREG:
772 		break;
773 	default:
774 		return (EOPNOTSUPP);
775 
776 	}
777 
778 	tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree);
779 	if (nsize < osize) {
780 		error = nvtruncbuf(vp, nsize, biosize, -1, 0);
781 	} else {
782 		error = nvextendbuf(vp, osize, nsize,
783 				    biosize, biosize,
784 				    -1, -1, 0);
785 	}
786 	if (error == 0 && truncate(tmp, nsize) < 0)
787 		error = errno;
788 	if (error == 0)
789 		dnp->dn_size = nsize;
790 	dbg(9, "TRUNCATE %016jx %016jx\n", (intmax_t)nsize, dnp->dn_size);
791 	/*dirfs_node_stat(DIRFS_NOFD, tmp, dnp); don't need to do this*/
792 
793 	dirfs_dropfd(dmp, NULL, pathfree);
794 
795 
796 	KKASSERT(vn_islocked(vp));
797 
798 	return error;
799 }
800 
801 void
802 dirfs_node_setpassive(dirfs_mount_t dmp, dirfs_node_t dnp, int state)
803 {
804 	struct vnode *vp;
805 
806 	dbg(5, "dnp=%p state=%d dnp->dn_fd=%d\n", dnp, state, dnp->dn_fd);
807 
808 	if (state && (dnp->dn_state & DIRFS_PASVFD) == 0 &&
809 	    dnp->dn_fd != DIRFS_NOFD) {
810 		dirfs_node_ref(dnp);
811 		dirfs_node_setflags(dnp, DIRFS_PASVFD);
812 		TAILQ_INSERT_TAIL(&dmp->dm_fdlist, dnp, dn_fdentry);
813 		++dirfs_fd_used;
814 		++dmp->dm_fd_used;
815 
816 		/*
817 		 * If we are over our limit remove nodes from the
818 		 * passive fd cache.
819 		 */
820 		while (dmp->dm_fd_used > dirfs_fd_limit) {
821 			dnp = TAILQ_FIRST(&dmp->dm_fdlist);
822 			dirfs_node_setpassive(dmp, dnp, 0);
823 		}
824 	}
825 	if (state == 0 && (dnp->dn_state & DIRFS_PASVFD)) {
826 		dirfs_node_clrflags(dnp, DIRFS_PASVFD);
827 		TAILQ_REMOVE(&dmp->dm_fdlist, dnp, dn_fdentry);
828 		--dirfs_fd_used;
829 		--dmp->dm_fd_used;
830 		dbg(5, "dnp=%p removed from fdlist. %d used refs=%d\n",
831 		    dnp, dirfs_fd_used, dirfs_node_refcnt(dnp));
832 
833 		/*
834 		 * Attempt to close the descriptor.  We can only do this
835 		 * if the related vnode is inactive and has exactly two
836 		 * refs (representing the vp<->dnp and PASVFD).  Otherwise
837 		 * someone might have ref'd the node in order to use the
838 		 * dn_fd.
839 		 *
840 		 * Also, if the vnode is in any way dirty we leave the fd
841 		 * open for the buffer cache code.  The syncer will eventually
842 		 * come along and fsync the vnode, and the next inactive
843 		 * transition will deal with the descriptor.
844 		 *
845 		 * The descriptor for the root node is NEVER closed by
846 		 * this function.
847 		 */
848 		vp = dnp->dn_vnode;
849 		if (dirfs_node_refcnt(dnp) == 2 && vp &&
850 		    dnp->dn_fd != DIRFS_NOFD &&
851 		    !dirfs_node_isroot(dnp) &&
852 		    (vp->v_flag & (VINACTIVE|VOBJDIRTY)) == VINACTIVE &&
853 		    RB_EMPTY(&vp->v_rbdirty_tree)) {
854 			dbg(9, "passive cache: closing %d\n", dnp->dn_fd);
855 			close(dnp->dn_fd);
856 			dnp->dn_fd = DIRFS_NOFD;
857 		} else {
858 			if (dirfs_node_refcnt(dnp) == 1 && dnp->dn_vnode == NULL &&
859 			    dnp->dn_fd != DIRFS_NOFD &&
860 			    dnp != dmp->dm_root) {
861 				dbg(9, "passive cache: closing %d\n", dnp->dn_fd);
862 				close(dnp->dn_fd);
863 				dnp->dn_fd = DIRFS_NOFD;
864 			}
865 		}
866 		dirfs_node_drop(dmp, dnp);
867 	}
868 }
869 
870 char *
871 dirfs_flag2str(dirfs_node_t dnp)
872 {
873 	const char *txtflg[] = { DIRFS_TXTFLG };
874 	static char str[512] = {0};
875 
876 	if (dnp->dn_state & DIRFS_PASVFD)
877 		ksprintf(str, "%s ", txtflg[0]);
878 
879 	return str;
880 }
881 
882 void
883 debug(int level, const char *fmt, ...)
884 {
885 	__va_list ap;
886 
887 	if (debuglvl >= level) {
888 		__va_start(ap, fmt);
889 		kvprintf(fmt, ap);
890 		__va_end(ap);
891 	}
892 }
893