1 /* 2 * Copyright (c) 2013 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Antonio Huete Jimenez <tuxillo@quantumachine.net> 6 * by Matthew Dillon <dillon@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 */ 36 37 #include <errno.h> 38 #include <fcntl.h> 39 #include <unistd.h> 40 41 #include <sys/mount.h> 42 #include <sys/queue.h> 43 #include <sys/spinlock2.h> 44 #include <sys/stat.h> 45 #include <sys/systm.h> 46 #include <sys/types.h> 47 #include <sys/vfscache.h> 48 #include <sys/vnode.h> 49 50 #include "dirfs.h" 51 52 /* 53 * Allocate and setup all is needed for the dirfs node to hold the filename. 54 * Note: dn_name is NULL terminated. 55 */ 56 void 57 dirfs_node_setname(dirfs_node_t dnp, const char *name, int len) 58 { 59 debug_called(); 60 61 if (dnp->dn_name) 62 kfree(dnp->dn_name, M_DIRFS_MISC); 63 dnp->dn_name = kmalloc(len + 1, M_DIRFS_MISC, M_WAITOK | M_ZERO); 64 bcopy(name, dnp->dn_name, len); 65 dnp->dn_name[len] = 0; 66 dnp->dn_namelen = len; 67 } 68 69 /* 70 * Allocate enough space to hold a dirfs node structure. 71 * Note: Node name and length isn't handled here. 72 */ 73 dirfs_node_t 74 dirfs_node_alloc(struct mount *mp) 75 { 76 dirfs_node_t dnp; 77 78 debug_called(); 79 80 dnp = kmalloc(sizeof(*dnp), M_DIRFS_NODE, M_WAITOK | M_ZERO); 81 lockinit(&dnp->dn_lock, "dfsnode", 0, LK_CANRECURSE); 82 83 dnp->dn_fd = DIRFS_NOFD; 84 85 return dnp; 86 } 87 88 /* 89 * Drops a reference to the node and. Node is freed when in the last reference. 90 */ 91 void 92 dirfs_node_drop(dirfs_mount_t dmp, dirfs_node_t dnp) 93 { 94 if (dirfs_node_unref(dnp)) 95 dirfs_node_free(dmp, dnp); 96 } 97 98 /* 99 * Removes the association with its parent. Before freeing up its resources 100 * the node will be removed from the per-mount passive fd cache and its fd 101 * will be closed, either normally or forced. 102 */ 103 int 104 dirfs_node_free(dirfs_mount_t dmp, dirfs_node_t dnp) 105 { 106 struct vnode *vp; 107 108 debug_called(); 109 110 KKASSERT(dnp != NULL); 111 debug_node2(dnp); 112 113 KKASSERT(dirfs_node_refcnt(dnp) == 0); 114 115 vp = NODE_TO_VP(dnp); 116 /* 117 * Remove the inode from the passive fds list 118 * as we are tearing down the node. 119 * Root inode will be removed on VOP_UNMOUNT() 120 */ 121 dirfs_mount_gettoken(dmp); 122 123 if (dnp->dn_parent) { /* NULL when children reaped parents */ 124 dirfs_node_drop(dmp, dnp->dn_parent); 125 dnp->dn_parent = NULL; 126 } 127 dirfs_node_setpassive(dmp, dnp, 0); 128 if (dnp->dn_name) { 129 kfree(dnp->dn_name, M_DIRFS_MISC); 130 dnp->dn_name = NULL; 131 } 132 133 /* 134 * The file descriptor should have been closed already by the 135 * previous call to dirfs_set-passive. If not, force a sync and 136 * close it. 137 */ 138 if (dnp->dn_fd != DIRFS_NOFD) { 139 if (dnp->dn_vnode) 140 VOP_FSYNC(vp, MNT_WAIT, 0); 141 close(dnp->dn_fd); 142 dnp->dn_fd = DIRFS_NOFD; 143 } 144 145 lockuninit(&dnp->dn_lock); 146 kfree(dnp, M_DIRFS_NODE); 147 dnp = NULL; 148 149 dirfs_mount_reltoken(dmp); 150 151 return 0; 152 } 153 154 /* 155 * Do all the operations needed to get a resulting inode <--> host file 156 * association. This or may not include opening the file, which should be 157 * only needed when creating it. 158 * 159 * In the case vap is not NULL and openflags are specified, open the file. 160 */ 161 int 162 dirfs_alloc_file(dirfs_mount_t dmp, dirfs_node_t *dnpp, dirfs_node_t pdnp, 163 struct namecache *ncp, struct vnode **vpp, struct vattr *vap, 164 int openflags) 165 { 166 dirfs_node_t dnp; 167 dirfs_node_t pathnp; 168 struct vnode *vp; 169 struct mount *mp; 170 char *tmp; 171 char *pathfree; 172 int error; 173 174 debug_called(); 175 176 error = 0; 177 vp = NULL; 178 mp = DIRFS_TO_VFS(dmp); 179 180 /* Sanity check */ 181 if (pdnp == NULL) 182 return EINVAL; 183 184 dnp = dirfs_node_alloc(mp); 185 KKASSERT(dnp != NULL); 186 187 dirfs_node_lock(dnp); 188 dirfs_node_setname(dnp, ncp->nc_name, ncp->nc_nlen); 189 dnp->dn_parent = pdnp; 190 dirfs_node_ref(pdnp); /* Children ref */ 191 dirfs_node_unlock(dnp); 192 193 pathnp = dirfs_findfd(dmp, dnp, &tmp, &pathfree); 194 195 if (openflags && vap != NULL) { 196 dnp->dn_fd = openat(pathnp->dn_fd, tmp, 197 openflags, vap->va_mode); 198 if (dnp->dn_fd == -1) { 199 dirfs_dropfd(dmp, pathnp, pathfree); 200 return errno; 201 } 202 } 203 204 error = dirfs_node_stat(pathnp->dn_fd, tmp, dnp); 205 if (error) { /* XXX Handle errors */ 206 error = errno; 207 if (vp) 208 dirfs_free_vp(dmp, dnp); 209 dirfs_node_free(dmp, dnp); 210 dirfs_dropfd(dmp, pathnp, pathfree); 211 return error; 212 } 213 214 dirfs_alloc_vp(mp, &vp, LK_CANRECURSE, dnp); 215 *vpp = vp; 216 *dnpp = dnp; 217 218 dbg(5, "tmp=%s dnp=%p allocated\n", tmp, dnp); 219 dirfs_dropfd(dmp, pathnp, pathfree); 220 221 return error; 222 } 223 224 /* 225 * Requires an already dirfs_node_t that has been already lstat(2) 226 * for the type comparison 227 */ 228 void 229 dirfs_alloc_vp(struct mount *mp, struct vnode **vpp, int lkflags, 230 dirfs_node_t dnp) 231 { 232 struct vnode *vp; 233 dirfs_mount_t dmp = VFS_TO_DIRFS(mp); 234 235 debug_called(); 236 237 /* 238 * Handle vnode reclaim/alloc races 239 */ 240 for (;;) { 241 vp = dnp->dn_vnode; 242 if (vp) { 243 if (vget(vp, LK_EXCLUSIVE) == 0) 244 break; /* success */ 245 /* vget raced a reclaim, retry */ 246 } else { 247 getnewvnode(VT_UNUSED10, mp, &vp, 0, lkflags); 248 if (dnp->dn_vnode == NULL) { 249 dnp->dn_vnode = vp; 250 vp->v_data = dnp; 251 vp->v_type = dnp->dn_type; 252 if (dmp->dm_root == dnp) 253 vsetflags(vp, VROOT); 254 dirfs_node_ref(dnp); /* ref for dnp<->vp */ 255 256 /* Type-specific initialization. */ 257 switch (dnp->dn_type) { 258 case VBLK: 259 case VCHR: 260 case VSOCK: 261 break; 262 case VREG: 263 vinitvmio(vp, dnp->dn_size, BMASK, -1); 264 break; 265 case VLNK: 266 break; 267 case VFIFO: 268 // vp->v_ops = &mp->mnt_vn_fifo_ops; 269 break; 270 case VDIR: 271 break; 272 default: 273 panic("dirfs_alloc_vp: dnp=%p vp=%p " 274 "type=%d", 275 dnp, vp, dnp->dn_type); 276 /* NOT REACHED */ 277 break; 278 } 279 break; /* success */ 280 } 281 vp->v_type = VBAD; 282 vx_put(vp); 283 /* multiple dirfs_alloc_vp calls raced, retry */ 284 } 285 } 286 KKASSERT(vp != NULL); 287 *vpp = vp; 288 dbg(5, "dnp=%p vp=%p type=%d\n", dnp, vp, vp->v_type); 289 } 290 291 /* 292 * Do not call locked! 293 */ 294 void 295 dirfs_free_vp(dirfs_mount_t dmp, dirfs_node_t dnp) 296 { 297 struct vnode *vp = NODE_TO_VP(dnp); 298 299 dnp->dn_vnode = NULL; 300 vp->v_data = NULL; 301 dirfs_node_drop(dmp, dnp); 302 } 303 304 int 305 dirfs_nodetype(struct stat *st) 306 { 307 int ret; 308 mode_t mode = st->st_mode; 309 310 debug_called(); 311 312 if (S_ISDIR(mode)) 313 ret = VDIR; 314 else if (S_ISBLK(mode)) 315 ret = VBLK; 316 else if (S_ISCHR(mode)) 317 ret = VCHR; 318 else if (S_ISFIFO(mode)) 319 ret = VFIFO; 320 else if (S_ISSOCK(mode)) 321 ret = VSOCK; 322 else if (S_ISLNK(mode)) 323 ret = VLNK; 324 else if (S_ISREG(mode)) 325 ret = VREG; 326 else 327 ret = VBAD; 328 329 return ret; 330 } 331 332 int 333 dirfs_node_stat(int fd, const char *path, dirfs_node_t dnp) 334 { 335 struct stat st; 336 int error; 337 338 debug_called(); 339 if (fd == DIRFS_NOFD) 340 error = lstat(path, &st); 341 else 342 error = fstatat(fd, path, &st, AT_SYMLINK_NOFOLLOW); 343 344 if (error) 345 return errno; 346 347 /* Populate our dirfs node struct with stat data */ 348 dnp->dn_uid = st.st_uid; 349 dnp->dn_gid = st.st_gid; 350 dnp->dn_mode = st.st_mode; 351 dnp->dn_flags = st.st_flags; 352 dnp->dn_links = st.st_nlink; 353 dnp->dn_atime = st.st_atime; 354 dnp->dn_atimensec = (st.st_atime * 1000000000L); 355 dnp->dn_mtime = st.st_mtime; 356 dnp->dn_mtimensec = (st.st_mtime * 1000000000L); 357 dnp->dn_ctime = st.st_ctime; 358 dnp->dn_ctimensec = (st.st_ctime * 1000000000L); 359 dnp->dn_gen = st.st_gen; 360 dnp->dn_ino = st.st_ino; 361 dnp->dn_st_dev = st.st_dev; 362 dnp->dn_size = st.st_size; 363 dnp->dn_type = dirfs_nodetype(&st); 364 365 return 0; 366 } 367 368 char * 369 dirfs_node_absolute_path(dirfs_mount_t dmp, dirfs_node_t cur, char **pathfreep) 370 { 371 return(dirfs_node_absolute_path_plus(dmp, cur, NULL, pathfreep)); 372 } 373 374 char * 375 dirfs_node_absolute_path_plus(dirfs_mount_t dmp, dirfs_node_t cur, 376 char *last, char **pathfreep) 377 { 378 size_t len; 379 dirfs_node_t dnp1; 380 char *buf; 381 int count; 382 383 debug_called(); 384 385 KKASSERT(dmp->dm_root); /* Sanity check */ 386 *pathfreep = NULL; 387 if (cur == NULL) 388 return NULL; 389 buf = kmalloc(MAXPATHLEN + 1, M_DIRFS_MISC, M_WAITOK); 390 391 /* 392 * Passed-in trailing element. 393 */ 394 count = 0; 395 buf[MAXPATHLEN] = 0; 396 if (last) { 397 len = strlen(last); 398 count += len; 399 if (count <= MAXPATHLEN) 400 bcopy(last, &buf[MAXPATHLEN - count], len); 401 ++count; 402 if (count <= MAXPATHLEN) 403 buf[MAXPATHLEN - count] = '/'; 404 } 405 406 /* 407 * Iterate through the parents until we hit the root. 408 */ 409 dnp1 = cur; 410 while (dirfs_node_isroot(dnp1) == 0) { 411 count += dnp1->dn_namelen; 412 if (count <= MAXPATHLEN) { 413 bcopy(dnp1->dn_name, &buf[MAXPATHLEN - count], 414 dnp1->dn_namelen); 415 } 416 ++count; 417 if (count <= MAXPATHLEN) 418 buf[MAXPATHLEN - count] = '/'; 419 dnp1 = dnp1->dn_parent; 420 if (dnp1 == NULL) 421 break; 422 } 423 424 /* 425 * Prefix with the root mount path. If the element was unlinked 426 * dnp1 will be NULL and there is no path. 427 */ 428 len = strlen(dmp->dm_path); 429 count += len; 430 if (dnp1 && count <= MAXPATHLEN) { 431 bcopy(dmp->dm_path, &buf[MAXPATHLEN - count], len); 432 *pathfreep = buf; 433 dbg(5, "absolute_path %s\n", &buf[MAXPATHLEN - count]); 434 return (&buf[MAXPATHLEN - count]); 435 } else { 436 kfree(buf, M_DIRFS_MISC); 437 *pathfreep = NULL; 438 return (NULL); 439 } 440 } 441 442 /* 443 * Return a dirfs_node with a valid descriptor plus an allocated 444 * relative path which can be used in openat(), fstatat(), etc calls 445 * to locate the requested inode. 446 */ 447 dirfs_node_t 448 dirfs_findfd(dirfs_mount_t dmp, dirfs_node_t cur, 449 char **pathto, char **pathfreep) 450 { 451 dirfs_node_t dnp1; 452 int count; 453 char *buf; 454 455 debug_called(); 456 457 *pathfreep = NULL; 458 *pathto = NULL; 459 460 if (cur == NULL) 461 return NULL; 462 463 buf = kmalloc(MAXPATHLEN + 1, M_DIRFS_MISC, M_WAITOK | M_ZERO); 464 count = 0; 465 466 dnp1 = cur; 467 while (dnp1 == cur || dnp1->dn_fd == DIRFS_NOFD) { 468 count += dnp1->dn_namelen; 469 if (count <= MAXPATHLEN) { 470 bcopy(dnp1->dn_name, &buf[MAXPATHLEN - count], 471 dnp1->dn_namelen); 472 } 473 ++count; 474 if (count <= MAXPATHLEN) 475 buf[MAXPATHLEN - count] = '/'; 476 dnp1 = dnp1->dn_parent; 477 KKASSERT(dnp1 != NULL); 478 } 479 480 if (dnp1 && count <= MAXPATHLEN) { 481 *pathfreep = buf; 482 *pathto = &buf[MAXPATHLEN - count + 1]; /* skip '/' prefix */ 483 dirfs_node_ref(dnp1); 484 dbg(5, "fd=%d dnp1=%p dnp1->dn_name=%d &buf[off]=%s\n", 485 dnp1->dn_fd, dnp1, dnp1->dn_name, *pathto); 486 } else { 487 dbg(5, "failed too long\n"); 488 kfree(buf, M_DIRFS_MISC); 489 *pathfreep = NULL; 490 *pathto = NULL; 491 dnp1 = NULL; 492 } 493 return (dnp1); 494 } 495 496 void 497 dirfs_dropfd(dirfs_mount_t dmp, dirfs_node_t dnp1, char *pathfree) 498 { 499 if (pathfree) 500 kfree(pathfree, M_DIRFS_MISC); 501 if (dnp1) 502 dirfs_node_drop(dmp, dnp1); 503 } 504 505 int 506 dirfs_node_getperms(dirfs_node_t dnp, int *flags) 507 { 508 dirfs_mount_t dmp; 509 struct vnode *vp = dnp->dn_vnode; 510 int isowner; 511 int isgroup; 512 513 /* 514 * There must be an active vnode anyways since that 515 * would indicate the dirfs node has valid data for 516 * for dnp->dn_mode (via lstat syscall). 517 */ 518 KKASSERT(vp); 519 dmp = VFS_TO_DIRFS(vp->v_mount); 520 521 isowner = (dmp->dm_uid == dnp->dn_uid); 522 isgroup = (dmp->dm_gid == dnp->dn_gid); 523 524 if (isowner) { 525 if (dnp->dn_mode & S_IRUSR) 526 *flags |= DIRFS_NODE_RD; 527 if (dnp->dn_mode & S_IWUSR) 528 *flags |= DIRFS_NODE_WR; 529 if (dnp->dn_mode & S_IXUSR) 530 *flags |= DIRFS_NODE_EXE; 531 } else if (isgroup) { 532 if (dnp->dn_mode & S_IRGRP) 533 *flags |= DIRFS_NODE_RD; 534 if (dnp->dn_mode & S_IWGRP) 535 *flags |= DIRFS_NODE_WR; 536 if (dnp->dn_mode & S_IXGRP) 537 *flags |= DIRFS_NODE_EXE; 538 } else { 539 if (dnp->dn_mode & S_IROTH) 540 *flags |= DIRFS_NODE_RD; 541 if (dnp->dn_mode & S_IWOTH) 542 *flags |= DIRFS_NODE_WR; 543 if (dnp->dn_mode & S_IXOTH) 544 *flags |= DIRFS_NODE_EXE; 545 } 546 547 return 0; 548 } 549 550 /* 551 * This requires an allocated node and vnode, otherwise it'll panic 552 */ 553 int 554 dirfs_open_helper(dirfs_mount_t dmp, dirfs_node_t dnp, int parentfd, 555 char *relpath) 556 { 557 dirfs_node_t pathnp; 558 char *pathfree; 559 char *tmp; 560 int flags; 561 int perms; 562 int error; 563 564 debug_called(); 565 566 flags = error = perms = 0; 567 tmp = NULL; 568 569 KKASSERT(dnp); 570 KKASSERT(dnp->dn_vnode); 571 572 /* 573 * XXX Besides VDIR and VREG there are other file 574 * types, y'know? 575 * Also, O_RDWR alone might not be the best mode to open 576 * a file with, need to investigate which suits better. 577 */ 578 dirfs_node_getperms(dnp, &perms); 579 580 if (dnp->dn_type & VDIR) { 581 flags |= O_DIRECTORY; 582 } else { 583 if (perms & DIRFS_NODE_WR) 584 flags |= O_RDWR; 585 else 586 flags |= O_RDONLY; 587 } 588 if (relpath != NULL) { 589 tmp = relpath; 590 pathnp = NULL; 591 KKASSERT(parentfd != DIRFS_NOFD); 592 } else if (parentfd == DIRFS_NOFD) { 593 pathnp = dirfs_findfd(dmp, dnp, &tmp, &pathfree); 594 parentfd = pathnp->dn_fd; 595 } else { 596 pathnp = NULL; 597 } 598 599 dnp->dn_fd = openat(parentfd, tmp, flags); 600 if (dnp->dn_fd == -1) 601 error = errno; 602 603 dbg(5, "dnp=%p tmp2=%s parentfd=%d flags=%d error=%d " 604 "flags=%08x w=%d x=%d\n", dnp, tmp, parentfd, flags, error, 605 perms); 606 607 if (pathnp) 608 dirfs_dropfd(dmp, pathnp, pathfree); 609 610 return error; 611 } 612 613 int 614 dirfs_close_helper(dirfs_node_t dnp) 615 { 616 int error = 0; 617 618 debug_called(); 619 620 621 if (dnp->dn_fd != DIRFS_NOFD) { 622 dbg(5, "closed fd on dnp=%p\n", dnp); 623 #if 0 624 /* buffer cache buffers may still be present */ 625 error = close(dnp->dn_fd); /* XXX EINTR should be checked */ 626 dnp->dn_fd = DIRFS_NOFD; 627 #endif 628 } 629 630 return error; 631 } 632 633 int 634 dirfs_node_refcnt(dirfs_node_t dnp) 635 { 636 return dnp->dn_refcnt; 637 } 638 639 int 640 dirfs_node_chtimes(dirfs_node_t dnp) 641 { 642 struct vnode *vp; 643 dirfs_mount_t dmp; 644 int error = 0; 645 char *tmp; 646 char *pathfree; 647 648 debug_called(); 649 650 vp = NODE_TO_VP(dnp); 651 dmp = VFS_TO_DIRFS(vp->v_mount); 652 653 KKASSERT(vn_islocked(vp)); 654 655 if (dnp->dn_flags & (IMMUTABLE | APPEND)) 656 return EPERM; 657 658 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 659 KKASSERT(tmp); 660 if((lutimes(tmp, NULL)) == -1) 661 error = errno; 662 663 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 664 dirfs_dropfd(dmp, NULL, pathfree); 665 666 KKASSERT(vn_islocked(vp)); 667 668 669 return error; 670 } 671 672 int 673 dirfs_node_chflags(dirfs_node_t dnp, int vaflags, struct ucred *cred) 674 { 675 struct vnode *vp; 676 dirfs_mount_t dmp; 677 int error = 0; 678 int flags; 679 char *tmp; 680 char *pathfree; 681 682 debug_called(); 683 684 vp = NODE_TO_VP(dnp); 685 dmp = VFS_TO_DIRFS(vp->v_mount); 686 687 KKASSERT(vn_islocked(vp)); 688 689 flags = dnp->dn_flags; 690 691 error = vop_helper_setattr_flags(&flags, vaflags, dnp->dn_uid, cred); 692 /* 693 * When running vkernels with non-root it is not possible to set 694 * certain flags on host files, such as SF* flags. chflags(2) call 695 * will spit an error in that case. 696 */ 697 if (error == 0) { 698 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 699 KKASSERT(tmp); 700 if((lchflags(tmp, flags)) == -1) 701 error = errno; 702 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 703 dirfs_dropfd(dmp, NULL, pathfree); 704 } 705 706 KKASSERT(vn_islocked(vp)); 707 708 return error; 709 } 710 711 int 712 dirfs_node_chmod(dirfs_mount_t dmp, dirfs_node_t dnp, mode_t mode) 713 { 714 char *tmp; 715 char *pathfree; 716 int error = 0; 717 718 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 719 KKASSERT(tmp); 720 if (lchmod(tmp, mode) < 0) 721 error = errno; 722 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 723 dirfs_dropfd(dmp, NULL, pathfree); 724 725 return error; 726 } 727 728 int 729 dirfs_node_chown(dirfs_mount_t dmp, dirfs_node_t dnp, 730 uid_t uid, uid_t gid, mode_t mode) 731 { 732 char *tmp; 733 char *pathfree; 734 int error = 0; 735 736 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 737 KKASSERT(tmp); 738 if (lchown(tmp, uid, gid) < 0) 739 error = errno; 740 if (mode != dnp->dn_mode) 741 lchmod(tmp, mode); 742 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 743 dirfs_dropfd(dmp, NULL, pathfree); 744 745 return error; 746 } 747 748 749 int 750 dirfs_node_chsize(dirfs_node_t dnp, off_t nsize) 751 { 752 dirfs_mount_t dmp; 753 struct vnode *vp; 754 int error = 0; 755 char *tmp; 756 char *pathfree; 757 off_t osize; 758 int biosize; 759 760 debug_called(); 761 762 KKASSERT(dnp); 763 764 vp = NODE_TO_VP(dnp); 765 dmp = VFS_TO_DIRFS(vp->v_mount); 766 biosize = BSIZE; 767 osize = dnp->dn_size; 768 769 KKASSERT(vn_islocked(vp)); 770 771 switch (vp->v_type) { 772 case VDIR: 773 return (EISDIR); 774 case VREG: 775 break; 776 default: 777 return (EOPNOTSUPP); 778 779 } 780 781 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 782 if (nsize < osize) { 783 error = nvtruncbuf(vp, nsize, biosize, -1, 0); 784 } else { 785 error = nvextendbuf(vp, osize, nsize, 786 biosize, biosize, 787 -1, -1, 0); 788 } 789 if (error == 0 && truncate(tmp, nsize) < 0) 790 error = errno; 791 if (error == 0) 792 dnp->dn_size = nsize; 793 dbg(5, "TRUNCATE %016jx %016jx\n", (intmax_t)nsize, dnp->dn_size); 794 /*dirfs_node_stat(DIRFS_NOFD, tmp, dnp); don't need to do this*/ 795 796 dirfs_dropfd(dmp, NULL, pathfree); 797 798 799 KKASSERT(vn_islocked(vp)); 800 801 return error; 802 } 803 804 void 805 dirfs_node_setpassive(dirfs_mount_t dmp, dirfs_node_t dnp, int state) 806 { 807 struct vnode *vp; 808 809 if (state && (dnp->dn_state & DIRFS_PASVFD) == 0 && 810 dnp->dn_fd != DIRFS_NOFD) { 811 dirfs_node_ref(dnp); 812 dirfs_node_setflags(dnp, DIRFS_PASVFD); 813 TAILQ_INSERT_TAIL(&dmp->dm_fdlist, dnp, dn_fdentry); 814 ++dirfs_fd_used; 815 ++dmp->dm_fd_used; 816 817 /* 818 * If we are over our limit remove nodes from the 819 * passive fd cache. 820 */ 821 while (dmp->dm_fd_used > dirfs_fd_limit) { 822 dnp = TAILQ_FIRST(&dmp->dm_fdlist); 823 dirfs_node_setpassive(dmp, dnp, 0); 824 } 825 } 826 if (state == 0 && (dnp->dn_state & DIRFS_PASVFD)) { 827 dirfs_node_clrflags(dnp, DIRFS_PASVFD); 828 TAILQ_REMOVE(&dmp->dm_fdlist, dnp, dn_fdentry); 829 --dirfs_fd_used; 830 --dmp->dm_fd_used; 831 dbg(5, "dnp=%p removed from fdlist. %d used\n", 832 dnp, dirfs_fd_used); 833 834 /* 835 * Attempt to close the descriptor. We can only do this 836 * if the related vnode is inactive and has exactly two 837 * refs (representing the vp<->dnp and PASVFD). Otherwise 838 * someone might have ref'd the node in order to use the 839 * dn_fd. 840 * 841 * Also, if the vnode is in any way dirty we leave the fd 842 * open for the buffer cache code. The syncer will eventually 843 * come along and fsync the vnode, and the next inactive 844 * transition will deal with the descriptor. 845 * 846 * The descriptor for the root node is NEVER closed by 847 * this function. 848 */ 849 vp = dnp->dn_vnode; 850 if (dirfs_node_refcnt(dnp) == 2 && vp && 851 dnp->dn_fd != DIRFS_NOFD && 852 !dirfs_node_isroot(dnp) && 853 (vp->v_flag & (VINACTIVE|VOBJDIRTY)) == VINACTIVE && 854 RB_EMPTY(&vp->v_rbdirty_tree)) { 855 dbg(5, "passive cache: closing %d\n", dnp->dn_fd); 856 close(dnp->dn_fd); 857 dnp->dn_fd = DIRFS_NOFD; 858 } else { 859 if (dirfs_node_refcnt(dnp) == 1 && dnp->dn_vnode == NULL && 860 dnp->dn_fd != DIRFS_NOFD && 861 dnp != dmp->dm_root) { 862 dbg(5, "passive cache: closing %d\n", dnp->dn_fd); 863 close(dnp->dn_fd); 864 dnp->dn_fd = DIRFS_NOFD; 865 } 866 } 867 dirfs_node_drop(dmp, dnp); 868 } 869 } 870 871 char * 872 dirfs_flag2str(dirfs_node_t dnp) 873 { 874 const char *txtflg[] = { DIRFS_TXTFLG }; 875 static char str[512] = {0}; 876 877 if (dnp->dn_state & DIRFS_PASVFD) 878 ksprintf(str, "%s ", txtflg[0]); 879 880 return str; 881 } 882 883 void 884 debug(int level, const char *fmt, ...) 885 { 886 __va_list ap; 887 888 if (debuglvl >= level) { 889 __va_start(ap, fmt); 890 kvprintf(fmt, ap); 891 __va_end(ap); 892 } 893 } 894 895