1 /* 2 * Copyright (c) 2013 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Antonio Huete Jimenez <tuxillo@quantumachine.net> 6 * by Matthew Dillon <dillon@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 */ 36 37 #include <errno.h> 38 #include <fcntl.h> 39 #include <unistd.h> 40 41 #include <sys/mount.h> 42 #include <sys/queue.h> 43 #include <sys/spinlock2.h> 44 #include <sys/stat.h> 45 #include <sys/systm.h> 46 #include <sys/types.h> 47 #include <sys/vfscache.h> 48 #include <sys/vnode.h> 49 50 #include "dirfs.h" 51 52 /* 53 * Allocate and setup all is needed for the dirfs node to hold the filename. 54 * Note: dn_name is NULL terminated. 55 */ 56 void 57 dirfs_node_setname(dirfs_node_t dnp, const char *name, int len) 58 { 59 dbg(5, "called\n"); 60 61 if (dnp->dn_name) 62 kfree(dnp->dn_name, M_DIRFS_MISC); 63 dnp->dn_name = kmalloc(len + 1, M_DIRFS_MISC, M_WAITOK | M_ZERO); 64 bcopy(name, dnp->dn_name, len); 65 dnp->dn_name[len] = 0; 66 dnp->dn_namelen = len; 67 } 68 69 /* 70 * Allocate enough space to hold a dirfs node structure. 71 * Note: Node name and length isn't handled here. 72 */ 73 dirfs_node_t 74 dirfs_node_alloc(struct mount *mp) 75 { 76 dirfs_node_t dnp; 77 78 dbg(5, "called\n"); 79 80 dnp = kmalloc(sizeof(*dnp), M_DIRFS_NODE, M_WAITOK | M_ZERO); 81 lockinit(&dnp->dn_lock, "dfsnode", 0, LK_CANRECURSE); 82 83 dnp->dn_fd = DIRFS_NOFD; 84 85 return dnp; 86 } 87 88 /* 89 * Drops a reference to the node and. Node is freed when in the last reference. 90 */ 91 void 92 dirfs_node_drop(dirfs_mount_t dmp, dirfs_node_t dnp) 93 { 94 dbg(5, "called\n"); 95 96 if (dirfs_node_unref(dnp)) 97 dirfs_node_free(dmp, dnp); 98 } 99 100 /* 101 * Removes the association with its parent. Before freeing up its resources 102 * the node will be removed from the per-mount passive fd cache and its fd 103 * will be closed, either normally or forced. 104 */ 105 int 106 dirfs_node_free(dirfs_mount_t dmp, dirfs_node_t dnp) 107 { 108 struct vnode *vp; 109 110 dbg(5, "called\n"); 111 112 KKASSERT(dnp != NULL); 113 debug_node2(dnp); 114 115 KKASSERT(dirfs_node_refcnt(dnp) == 0); 116 117 vp = NODE_TO_VP(dnp); 118 /* 119 * Remove the inode from the passive fds list 120 * as we are tearing down the node. 121 * Root inode will be removed on VOP_UNMOUNT() 122 */ 123 if (dnp->dn_parent) { /* NULL when children reaped parents */ 124 dirfs_node_drop(dmp, dnp->dn_parent); 125 dnp->dn_parent = NULL; 126 } 127 dirfs_node_setpassive(dmp, dnp, 0); 128 if (dnp->dn_name) { 129 kfree(dnp->dn_name, M_DIRFS_MISC); 130 dnp->dn_name = NULL; 131 } 132 133 /* 134 * The file descriptor should have been closed already by the 135 * previous call to dirfs_set-passive. If not, force a sync and 136 * close it. 137 */ 138 if (dnp->dn_fd != DIRFS_NOFD) { 139 if (dnp->dn_vnode) 140 VOP_FSYNC(vp, MNT_WAIT, 0); 141 close(dnp->dn_fd); 142 dnp->dn_fd = DIRFS_NOFD; 143 } 144 145 lockuninit(&dnp->dn_lock); 146 kfree(dnp, M_DIRFS_NODE); 147 dnp = NULL; 148 149 return 0; 150 } 151 152 /* 153 * Do all the operations needed to get a resulting inode <--> host file 154 * association. This or may not include opening the file, which should be 155 * only needed when creating it. 156 * 157 * In the case vap is not NULL and openflags are specified, open the file. 158 */ 159 int 160 dirfs_alloc_file(dirfs_mount_t dmp, dirfs_node_t *dnpp, dirfs_node_t pdnp, 161 struct namecache *ncp, struct vnode **vpp, struct vattr *vap, 162 int openflags) 163 { 164 dirfs_node_t dnp; 165 dirfs_node_t pathnp; 166 struct vnode *vp; 167 struct mount *mp; 168 char *tmp; 169 char *pathfree; 170 int error; 171 172 dbg(5, "called\n"); 173 174 error = 0; 175 vp = NULL; 176 mp = DIRFS_TO_VFS(dmp); 177 178 /* Sanity check */ 179 if (pdnp == NULL) 180 return EINVAL; 181 182 dnp = dirfs_node_alloc(mp); 183 KKASSERT(dnp != NULL); 184 185 dirfs_node_lock(dnp); 186 dirfs_node_setname(dnp, ncp->nc_name, ncp->nc_nlen); 187 dnp->dn_parent = pdnp; 188 dirfs_node_ref(pdnp); /* Children ref */ 189 dirfs_node_unlock(dnp); 190 191 pathnp = dirfs_findfd(dmp, dnp, &tmp, &pathfree); 192 193 if (openflags && vap != NULL) { 194 dnp->dn_fd = openat(pathnp->dn_fd, tmp, 195 openflags, vap->va_mode); 196 if (dnp->dn_fd == -1) { 197 dirfs_dropfd(dmp, pathnp, pathfree); 198 return errno; 199 } 200 } 201 202 error = dirfs_node_stat(pathnp->dn_fd, tmp, dnp); 203 if (error) { /* XXX Handle errors */ 204 error = errno; 205 if (vp) 206 dirfs_free_vp(dmp, dnp); 207 dirfs_node_free(dmp, dnp); 208 dirfs_dropfd(dmp, pathnp, pathfree); 209 return error; 210 } 211 212 dirfs_alloc_vp(mp, &vp, LK_CANRECURSE, dnp); 213 *vpp = vp; 214 *dnpp = dnp; 215 216 dbg(9, "tmp=%s dnp=%p allocated\n", tmp, dnp); 217 dirfs_dropfd(dmp, pathnp, pathfree); 218 219 /* We want VOP_INACTIVE() to be called on last ref */ 220 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 221 222 return error; 223 } 224 225 /* 226 * Requires an already dirfs_node_t that has been already lstat(2) 227 * for the type comparison 228 */ 229 void 230 dirfs_alloc_vp(struct mount *mp, struct vnode **vpp, int lkflags, 231 dirfs_node_t dnp) 232 { 233 struct vnode *vp; 234 dirfs_mount_t dmp = VFS_TO_DIRFS(mp); 235 236 dbg(5, "called\n"); 237 238 /* 239 * Handle vnode reclaim/alloc races 240 */ 241 for (;;) { 242 vp = dnp->dn_vnode; 243 if (vp) { 244 if (vget(vp, LK_EXCLUSIVE) == 0) 245 break; /* success */ 246 /* vget raced a reclaim, retry */ 247 } else { 248 getnewvnode(VT_UNUSED10, mp, &vp, 0, lkflags); 249 if (dnp->dn_vnode == NULL) { 250 dnp->dn_vnode = vp; 251 vp->v_data = dnp; 252 vp->v_type = dnp->dn_type; 253 if (dmp->dm_root == dnp) 254 vsetflags(vp, VROOT); 255 dirfs_node_ref(dnp); /* ref for dnp<->vp */ 256 257 /* Type-specific initialization. */ 258 switch (dnp->dn_type) { 259 case VBLK: 260 case VCHR: 261 case VSOCK: 262 break; 263 case VREG: 264 vinitvmio(vp, dnp->dn_size, BMASK, -1); 265 break; 266 case VLNK: 267 break; 268 case VFIFO: 269 // vp->v_ops = &mp->mnt_vn_fifo_ops; 270 break; 271 case VDIR: 272 break; 273 default: 274 panic("dirfs_alloc_vp: dnp=%p vp=%p " 275 "type=%d", 276 dnp, vp, dnp->dn_type); 277 /* NOT REACHED */ 278 break; 279 } 280 break; /* success */ 281 } 282 vp->v_type = VBAD; 283 vx_put(vp); 284 /* multiple dirfs_alloc_vp calls raced, retry */ 285 } 286 } 287 KKASSERT(vp != NULL); 288 *vpp = vp; 289 dbg(9, "dnp=%p vp=%p type=%d\n", dnp, vp, vp->v_type); 290 } 291 292 /* 293 * Do not call locked! 294 */ 295 void 296 dirfs_free_vp(dirfs_mount_t dmp, dirfs_node_t dnp) 297 { 298 struct vnode *vp = NODE_TO_VP(dnp); 299 300 dbg(5, "called\n"); 301 302 dnp->dn_vnode = NULL; 303 vp->v_data = NULL; 304 dirfs_node_drop(dmp, dnp); 305 } 306 307 int 308 dirfs_nodetype(struct stat *st) 309 { 310 int ret; 311 mode_t mode = st->st_mode; 312 313 if (S_ISDIR(mode)) 314 ret = VDIR; 315 else if (S_ISBLK(mode)) 316 ret = VBLK; 317 else if (S_ISCHR(mode)) 318 ret = VCHR; 319 else if (S_ISFIFO(mode)) 320 ret = VFIFO; 321 else if (S_ISSOCK(mode)) 322 ret = VSOCK; 323 else if (S_ISLNK(mode)) 324 ret = VLNK; 325 else if (S_ISREG(mode)) 326 ret = VREG; 327 else 328 ret = VBAD; 329 330 return ret; 331 } 332 333 int 334 dirfs_node_stat(int fd, const char *path, dirfs_node_t dnp) 335 { 336 struct stat st; 337 int error; 338 339 dbg(5, "called\n"); 340 if (fd == DIRFS_NOFD) 341 error = lstat(path, &st); 342 else 343 error = fstatat(fd, path, &st, AT_SYMLINK_NOFOLLOW); 344 345 if (error) 346 return errno; 347 348 /* Populate our dirfs node struct with stat data */ 349 dnp->dn_uid = st.st_uid; 350 dnp->dn_gid = st.st_gid; 351 dnp->dn_mode = st.st_mode; 352 dnp->dn_flags = st.st_flags; 353 dnp->dn_links = st.st_nlink; 354 dnp->dn_atime = st.st_atime; 355 dnp->dn_atimensec = (st.st_atime * 1000000000L); 356 dnp->dn_mtime = st.st_mtime; 357 dnp->dn_mtimensec = (st.st_mtime * 1000000000L); 358 dnp->dn_ctime = st.st_ctime; 359 dnp->dn_ctimensec = (st.st_ctime * 1000000000L); 360 dnp->dn_gen = st.st_gen; 361 dnp->dn_ino = st.st_ino; 362 dnp->dn_st_dev = st.st_dev; 363 dnp->dn_size = st.st_size; 364 dnp->dn_type = dirfs_nodetype(&st); 365 366 return 0; 367 } 368 369 char * 370 dirfs_node_absolute_path(dirfs_mount_t dmp, dirfs_node_t cur, char **pathfreep) 371 { 372 return(dirfs_node_absolute_path_plus(dmp, cur, NULL, pathfreep)); 373 } 374 375 char * 376 dirfs_node_absolute_path_plus(dirfs_mount_t dmp, dirfs_node_t cur, 377 char *last, char **pathfreep) 378 { 379 size_t len; 380 dirfs_node_t dnp1; 381 char *buf; 382 int count; 383 384 dbg(5, "called\n"); 385 386 KKASSERT(dmp->dm_root); /* Sanity check */ 387 *pathfreep = NULL; 388 if (cur == NULL) 389 return NULL; 390 buf = kmalloc(MAXPATHLEN + 1, M_DIRFS_MISC, M_WAITOK); 391 392 /* 393 * Passed-in trailing element. 394 */ 395 count = 0; 396 buf[MAXPATHLEN] = 0; 397 if (last) { 398 len = strlen(last); 399 count += len; 400 if (count <= MAXPATHLEN) 401 bcopy(last, &buf[MAXPATHLEN - count], len); 402 ++count; 403 if (count <= MAXPATHLEN) 404 buf[MAXPATHLEN - count] = '/'; 405 } 406 407 /* 408 * Iterate through the parents until we hit the root. 409 */ 410 dnp1 = cur; 411 while (dirfs_node_isroot(dnp1) == 0) { 412 count += dnp1->dn_namelen; 413 if (count <= MAXPATHLEN) { 414 bcopy(dnp1->dn_name, &buf[MAXPATHLEN - count], 415 dnp1->dn_namelen); 416 } 417 ++count; 418 if (count <= MAXPATHLEN) 419 buf[MAXPATHLEN - count] = '/'; 420 dnp1 = dnp1->dn_parent; 421 if (dnp1 == NULL) 422 break; 423 } 424 425 /* 426 * Prefix with the root mount path. If the element was unlinked 427 * dnp1 will be NULL and there is no path. 428 */ 429 len = strlen(dmp->dm_path); 430 count += len; 431 if (dnp1 && count <= MAXPATHLEN) { 432 bcopy(dmp->dm_path, &buf[MAXPATHLEN - count], len); 433 *pathfreep = buf; 434 dbg(9, "absolute_path %s\n", &buf[MAXPATHLEN - count]); 435 return (&buf[MAXPATHLEN - count]); 436 } else { 437 kfree(buf, M_DIRFS_MISC); 438 *pathfreep = NULL; 439 return (NULL); 440 } 441 } 442 443 /* 444 * Return a dirfs_node with a valid descriptor plus an allocated 445 * relative path which can be used in openat(), fstatat(), etc calls 446 * to locate the requested inode. 447 */ 448 dirfs_node_t 449 dirfs_findfd(dirfs_mount_t dmp, dirfs_node_t cur, 450 char **pathto, char **pathfreep) 451 { 452 dirfs_node_t dnp1; 453 int count; 454 char *buf; 455 456 dbg(5, "called\n"); 457 458 *pathfreep = NULL; 459 *pathto = NULL; 460 461 if (cur == NULL) 462 return NULL; 463 464 buf = kmalloc(MAXPATHLEN + 1, M_DIRFS_MISC, M_WAITOK | M_ZERO); 465 count = 0; 466 467 dnp1 = cur; 468 while (dnp1 == cur || dnp1->dn_fd == DIRFS_NOFD) { 469 count += dnp1->dn_namelen; 470 if (count <= MAXPATHLEN) { 471 bcopy(dnp1->dn_name, &buf[MAXPATHLEN - count], 472 dnp1->dn_namelen); 473 } 474 ++count; 475 if (count <= MAXPATHLEN) 476 buf[MAXPATHLEN - count] = '/'; 477 dnp1 = dnp1->dn_parent; 478 KKASSERT(dnp1 != NULL); 479 } 480 481 if (dnp1 && count <= MAXPATHLEN) { 482 *pathfreep = buf; 483 *pathto = &buf[MAXPATHLEN - count + 1]; /* skip '/' prefix */ 484 dirfs_node_ref(dnp1); 485 dbg(9, "fd=%d dnp1=%p dnp1->dn_name=%d &buf[off]=%s\n", 486 dnp1->dn_fd, dnp1, dnp1->dn_name, *pathto); 487 } else { 488 dbg(9, "failed too long\n"); 489 kfree(buf, M_DIRFS_MISC); 490 *pathfreep = NULL; 491 *pathto = NULL; 492 dnp1 = NULL; 493 } 494 return (dnp1); 495 } 496 497 void 498 dirfs_dropfd(dirfs_mount_t dmp, dirfs_node_t dnp1, char *pathfree) 499 { 500 if (pathfree) 501 kfree(pathfree, M_DIRFS_MISC); 502 if (dnp1) 503 dirfs_node_drop(dmp, dnp1); 504 } 505 506 int 507 dirfs_node_getperms(dirfs_node_t dnp, int *flags) 508 { 509 dirfs_mount_t dmp; 510 struct vnode *vp = dnp->dn_vnode; 511 int isowner; 512 int isgroup; 513 514 /* 515 * There must be an active vnode anyways since that 516 * would indicate the dirfs node has valid data for 517 * for dnp->dn_mode (via lstat syscall). 518 */ 519 KKASSERT(vp); 520 dmp = VFS_TO_DIRFS(vp->v_mount); 521 522 isowner = (dmp->dm_uid == dnp->dn_uid); 523 isgroup = (dmp->dm_gid == dnp->dn_gid); 524 525 if (isowner) { 526 if (dnp->dn_mode & S_IRUSR) 527 *flags |= DIRFS_NODE_RD; 528 if (dnp->dn_mode & S_IWUSR) 529 *flags |= DIRFS_NODE_WR; 530 if (dnp->dn_mode & S_IXUSR) 531 *flags |= DIRFS_NODE_EXE; 532 } else if (isgroup) { 533 if (dnp->dn_mode & S_IRGRP) 534 *flags |= DIRFS_NODE_RD; 535 if (dnp->dn_mode & S_IWGRP) 536 *flags |= DIRFS_NODE_WR; 537 if (dnp->dn_mode & S_IXGRP) 538 *flags |= DIRFS_NODE_EXE; 539 } else { 540 if (dnp->dn_mode & S_IROTH) 541 *flags |= DIRFS_NODE_RD; 542 if (dnp->dn_mode & S_IWOTH) 543 *flags |= DIRFS_NODE_WR; 544 if (dnp->dn_mode & S_IXOTH) 545 *flags |= DIRFS_NODE_EXE; 546 } 547 548 return 0; 549 } 550 551 /* 552 * This requires an allocated node and vnode, otherwise it'll panic 553 */ 554 int 555 dirfs_open_helper(dirfs_mount_t dmp, dirfs_node_t dnp, int parentfd, 556 char *relpath) 557 { 558 dirfs_node_t pathnp; 559 char *pathfree; 560 char *tmp; 561 int flags; 562 int perms; 563 int error; 564 565 dbg(5, "called\n"); 566 567 flags = error = perms = 0; 568 tmp = NULL; 569 570 KKASSERT(dnp); 571 KKASSERT(dnp->dn_vnode); 572 573 /* 574 * XXX Besides VDIR and VREG there are other file 575 * types, y'know? 576 * Also, O_RDWR alone might not be the best mode to open 577 * a file with, need to investigate which suits better. 578 */ 579 dirfs_node_getperms(dnp, &perms); 580 581 if (dnp->dn_type & VDIR) { 582 flags |= O_DIRECTORY; 583 } else { 584 if (perms & DIRFS_NODE_WR) 585 flags |= O_RDWR; 586 else 587 flags |= O_RDONLY; 588 } 589 if (relpath != NULL) { 590 tmp = relpath; 591 pathnp = NULL; 592 KKASSERT(parentfd != DIRFS_NOFD); 593 } else if (parentfd == DIRFS_NOFD) { 594 pathnp = dirfs_findfd(dmp, dnp, &tmp, &pathfree); 595 parentfd = pathnp->dn_fd; 596 } else { 597 pathnp = NULL; 598 } 599 600 dnp->dn_fd = openat(parentfd, tmp, flags); 601 if (dnp->dn_fd == -1) 602 error = errno; 603 604 dbg(9, "dnp=%p tmp2=%s parentfd=%d flags=%d error=%d " 605 "flags=%08x w=%d x=%d\n", dnp, tmp, parentfd, flags, error, 606 perms); 607 608 if (pathnp) 609 dirfs_dropfd(dmp, pathnp, pathfree); 610 611 return error; 612 } 613 614 int 615 dirfs_close_helper(dirfs_node_t dnp) 616 { 617 int error = 0; 618 619 dbg(5, "called\n"); 620 621 622 if (dnp->dn_fd != DIRFS_NOFD) { 623 dbg(9, "closed fd on dnp=%p\n", dnp); 624 #if 0 625 /* buffer cache buffers may still be present */ 626 error = close(dnp->dn_fd); /* XXX EINTR should be checked */ 627 dnp->dn_fd = DIRFS_NOFD; 628 #endif 629 } 630 631 return error; 632 } 633 634 int 635 dirfs_node_refcnt(dirfs_node_t dnp) 636 { 637 return dnp->dn_refcnt; 638 } 639 640 int 641 dirfs_node_chtimes(dirfs_node_t dnp) 642 { 643 struct vnode *vp; 644 dirfs_mount_t dmp; 645 int error = 0; 646 char *tmp; 647 char *pathfree; 648 649 vp = NODE_TO_VP(dnp); 650 dmp = VFS_TO_DIRFS(vp->v_mount); 651 652 KKASSERT(vn_islocked(vp)); 653 654 if (dnp->dn_flags & (IMMUTABLE | APPEND)) 655 return EPERM; 656 657 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 658 KKASSERT(tmp); 659 if((lutimes(tmp, NULL)) == -1) 660 error = errno; 661 662 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 663 dirfs_dropfd(dmp, NULL, pathfree); 664 665 KKASSERT(vn_islocked(vp)); 666 667 668 return error; 669 } 670 671 int 672 dirfs_node_chflags(dirfs_node_t dnp, int vaflags, struct ucred *cred) 673 { 674 struct vnode *vp; 675 dirfs_mount_t dmp; 676 int error = 0; 677 int flags; 678 char *tmp; 679 char *pathfree; 680 681 vp = NODE_TO_VP(dnp); 682 dmp = VFS_TO_DIRFS(vp->v_mount); 683 684 KKASSERT(vn_islocked(vp)); 685 686 flags = dnp->dn_flags; 687 688 error = vop_helper_setattr_flags(&flags, vaflags, dnp->dn_uid, cred); 689 /* 690 * When running vkernels with non-root it is not possible to set 691 * certain flags on host files, such as SF* flags. chflags(2) call 692 * will spit an error in that case. 693 */ 694 if (error == 0) { 695 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 696 KKASSERT(tmp); 697 if((lchflags(tmp, flags)) == -1) 698 error = errno; 699 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 700 dirfs_dropfd(dmp, NULL, pathfree); 701 } 702 703 KKASSERT(vn_islocked(vp)); 704 705 return error; 706 } 707 708 int 709 dirfs_node_chmod(dirfs_mount_t dmp, dirfs_node_t dnp, mode_t mode) 710 { 711 char *tmp; 712 char *pathfree; 713 int error = 0; 714 715 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 716 KKASSERT(tmp); 717 if (lchmod(tmp, mode) < 0) 718 error = errno; 719 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 720 dirfs_dropfd(dmp, NULL, pathfree); 721 722 return error; 723 } 724 725 int 726 dirfs_node_chown(dirfs_mount_t dmp, dirfs_node_t dnp, 727 uid_t uid, uid_t gid, mode_t mode) 728 { 729 char *tmp; 730 char *pathfree; 731 int error = 0; 732 733 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 734 KKASSERT(tmp); 735 if (lchown(tmp, uid, gid) < 0) 736 error = errno; 737 if (mode != dnp->dn_mode) 738 lchmod(tmp, mode); 739 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 740 dirfs_dropfd(dmp, NULL, pathfree); 741 742 return error; 743 } 744 745 746 int 747 dirfs_node_chsize(dirfs_node_t dnp, off_t nsize) 748 { 749 dirfs_mount_t dmp; 750 struct vnode *vp; 751 int error = 0; 752 char *tmp; 753 char *pathfree; 754 off_t osize; 755 int biosize; 756 757 KKASSERT(dnp); 758 759 vp = NODE_TO_VP(dnp); 760 dmp = VFS_TO_DIRFS(vp->v_mount); 761 biosize = BSIZE; 762 osize = dnp->dn_size; 763 764 KKASSERT(vn_islocked(vp)); 765 766 switch (vp->v_type) { 767 case VDIR: 768 return (EISDIR); 769 case VREG: 770 break; 771 default: 772 return (EOPNOTSUPP); 773 774 } 775 776 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 777 if (nsize < osize) { 778 error = nvtruncbuf(vp, nsize, biosize, -1, 0); 779 } else { 780 error = nvextendbuf(vp, osize, nsize, 781 biosize, biosize, 782 -1, -1, 0); 783 } 784 if (error == 0 && truncate(tmp, nsize) < 0) 785 error = errno; 786 if (error == 0) 787 dnp->dn_size = nsize; 788 dbg(9, "TRUNCATE %016jx %016jx\n", (intmax_t)nsize, dnp->dn_size); 789 /*dirfs_node_stat(DIRFS_NOFD, tmp, dnp); don't need to do this*/ 790 791 dirfs_dropfd(dmp, NULL, pathfree); 792 793 794 KKASSERT(vn_islocked(vp)); 795 796 return error; 797 } 798 799 void 800 dirfs_node_setpassive(dirfs_mount_t dmp, dirfs_node_t dnp, int state) 801 { 802 struct vnode *vp; 803 804 dbg(5, "dnp=%p state=%d dnp->dn_fd=%d\n", dnp, state, dnp->dn_fd); 805 806 if (state && (dnp->dn_state & DIRFS_PASVFD) == 0 && 807 dnp->dn_fd != DIRFS_NOFD) { 808 dirfs_node_ref(dnp); 809 dirfs_node_setflags(dnp, DIRFS_PASVFD); 810 TAILQ_INSERT_TAIL(&dmp->dm_fdlist, dnp, dn_fdentry); 811 ++dirfs_fd_used; 812 ++dmp->dm_fd_used; 813 814 /* 815 * If we are over our limit remove nodes from the 816 * passive fd cache. 817 */ 818 while (dmp->dm_fd_used > dirfs_fd_limit) { 819 dnp = TAILQ_FIRST(&dmp->dm_fdlist); 820 dirfs_node_setpassive(dmp, dnp, 0); 821 } 822 } 823 if (state == 0 && (dnp->dn_state & DIRFS_PASVFD)) { 824 dirfs_node_clrflags(dnp, DIRFS_PASVFD); 825 TAILQ_REMOVE(&dmp->dm_fdlist, dnp, dn_fdentry); 826 --dirfs_fd_used; 827 --dmp->dm_fd_used; 828 dbg(5, "dnp=%p removed from fdlist. %d used refs=%d\n", 829 dnp, dirfs_fd_used, dirfs_node_refcnt(dnp)); 830 831 /* 832 * Attempt to close the descriptor. We can only do this 833 * if the related vnode is inactive and has exactly two 834 * refs (representing the vp<->dnp and PASVFD). Otherwise 835 * someone might have ref'd the node in order to use the 836 * dn_fd. 837 * 838 * Also, if the vnode is in any way dirty we leave the fd 839 * open for the buffer cache code. The syncer will eventually 840 * come along and fsync the vnode, and the next inactive 841 * transition will deal with the descriptor. 842 * 843 * The descriptor for the root node is NEVER closed by 844 * this function. 845 */ 846 vp = dnp->dn_vnode; 847 if (dirfs_node_refcnt(dnp) == 2 && vp && 848 dnp->dn_fd != DIRFS_NOFD && 849 !dirfs_node_isroot(dnp) && 850 (vp->v_flag & (VINACTIVE|VOBJDIRTY)) == VINACTIVE && 851 RB_EMPTY(&vp->v_rbdirty_tree)) { 852 dbg(9, "passive cache: closing %d\n", dnp->dn_fd); 853 close(dnp->dn_fd); 854 dnp->dn_fd = DIRFS_NOFD; 855 } else { 856 if (dirfs_node_refcnt(dnp) == 1 && dnp->dn_vnode == NULL && 857 dnp->dn_fd != DIRFS_NOFD && 858 dnp != dmp->dm_root) { 859 dbg(9, "passive cache: closing %d\n", dnp->dn_fd); 860 close(dnp->dn_fd); 861 dnp->dn_fd = DIRFS_NOFD; 862 } 863 } 864 dirfs_node_drop(dmp, dnp); 865 } 866 } 867 868 char * 869 dirfs_flag2str(dirfs_node_t dnp) 870 { 871 const char *txtflg[] = { DIRFS_TXTFLG }; 872 static char str[512] = {0}; 873 874 if (dnp->dn_state & DIRFS_PASVFD) 875 ksprintf(str, "%s ", txtflg[0]); 876 877 return str; 878 } 879 880 void 881 debug(int level, const char *fmt, ...) 882 { 883 __va_list ap; 884 885 if (debuglvl >= level) { 886 __va_start(ap, fmt); 887 kvprintf(fmt, ap); 888 __va_end(ap); 889 } 890 } 891