1 /* 2 * Copyright (c) 2013 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Antonio Huete Jimenez <tuxillo@quantumachine.net> 6 * by Matthew Dillon <dillon@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 */ 36 37 #include <errno.h> 38 #include <fcntl.h> 39 #include <unistd.h> 40 41 #include <sys/mount.h> 42 #include <sys/queue.h> 43 #include <sys/spinlock2.h> 44 #include <sys/stat.h> 45 #include <sys/systm.h> 46 #include <sys/types.h> 47 #include <sys/vfscache.h> 48 #include <sys/vnode.h> 49 50 #include "dirfs.h" 51 52 /* 53 * Allocate and setup all is needed for the dirfs node to hold the filename. 54 * Note: dn_name is NULL terminated. 55 */ 56 void 57 dirfs_node_setname(dirfs_node_t dnp, const char *name, int len) 58 { 59 debug_called(); 60 61 if (dnp->dn_name) 62 kfree(dnp->dn_name, M_DIRFS_MISC); 63 dnp->dn_name = kmalloc(len + 1, M_DIRFS_MISC, M_WAITOK | M_ZERO); 64 bcopy(name, dnp->dn_name, len); 65 dnp->dn_name[len] = 0; 66 dnp->dn_namelen = len; 67 } 68 69 /* 70 * Allocate enough space to hold a dirfs node structure. 71 * Note: Node name and length isn't handled here. 72 */ 73 dirfs_node_t 74 dirfs_node_alloc(struct mount *mp) 75 { 76 dirfs_node_t dnp; 77 78 debug_called(); 79 80 dnp = kmalloc(sizeof(*dnp), M_DIRFS_NODE, M_WAITOK | M_ZERO); 81 lockinit(&dnp->dn_lock, "dfsnode", 0, LK_CANRECURSE); 82 83 dnp->dn_fd = DIRFS_NOFD; 84 85 return dnp; 86 } 87 88 /* 89 * Drops a reference to the node and. Node is freed when in the last reference. 90 */ 91 void 92 dirfs_node_drop(dirfs_mount_t dmp, dirfs_node_t dnp) 93 { 94 if (dirfs_node_unref(dnp)) 95 dirfs_node_free(dmp, dnp); 96 } 97 98 /* 99 * Removes the association with its parent. Before freeing up its resources 100 * the node will be removed from the per-mount passive fd cache and its fd 101 * will be closed, either normally or forced. 102 */ 103 int 104 dirfs_node_free(dirfs_mount_t dmp, dirfs_node_t dnp) 105 { 106 struct vnode *vp; 107 108 debug_called(); 109 110 KKASSERT(dnp != NULL); 111 debug_node2(dnp); 112 113 KKASSERT(dirfs_node_refcnt(dnp) == 0); 114 115 vp = NODE_TO_VP(dnp); 116 /* 117 * Remove the inode from the passive fds list 118 * as we are tearing down the node. 119 * Root inode will be removed on VOP_UNMOUNT() 120 */ 121 dirfs_mount_gettoken(dmp); 122 123 if (dnp->dn_parent) { /* NULL when children reaped parents */ 124 dirfs_node_drop(dmp, dnp->dn_parent); 125 dnp->dn_parent = NULL; 126 } 127 dirfs_node_setpassive(dmp, dnp, 0); 128 if (dnp->dn_name) { 129 kfree(dnp->dn_name, M_DIRFS_MISC); 130 dnp->dn_name = NULL; 131 } 132 133 /* 134 * The file descriptor should have been closed already by the 135 * previous call to dirfs_set-passive. If not, force a sync and 136 * close it. 137 */ 138 if (dnp->dn_fd != DIRFS_NOFD) { 139 if (dnp->dn_vnode) 140 VOP_FSYNC(vp, MNT_WAIT, 0); 141 close(dnp->dn_fd); 142 dnp->dn_fd = DIRFS_NOFD; 143 } 144 145 lockuninit(&dnp->dn_lock); 146 kfree(dnp, M_DIRFS_NODE); 147 dnp = NULL; 148 149 dirfs_mount_reltoken(dmp); 150 151 return 0; 152 } 153 154 /* 155 * Do all the operations needed to get a resulting inode <--> host file 156 * association. This or may not include opening the file, which should be 157 * only needed when creating it. 158 * 159 * In the case vap is not NULL and openflags are specified, open the file. 160 */ 161 int 162 dirfs_alloc_file(dirfs_mount_t dmp, dirfs_node_t *dnpp, dirfs_node_t pdnp, 163 struct namecache *ncp, struct vnode **vpp, struct vattr *vap, 164 int openflags) 165 { 166 dirfs_node_t dnp; 167 dirfs_node_t pathnp; 168 struct vnode *vp; 169 struct mount *mp; 170 char *tmp; 171 char *pathfree; 172 int error; 173 174 debug_called(); 175 176 error = 0; 177 vp = NULL; 178 mp = DIRFS_TO_VFS(dmp); 179 180 /* Sanity check */ 181 if (pdnp == NULL) 182 return EINVAL; 183 184 dnp = dirfs_node_alloc(mp); 185 KKASSERT(dnp != NULL); 186 187 dirfs_node_lock(dnp); 188 dirfs_node_setname(dnp, ncp->nc_name, ncp->nc_nlen); 189 dnp->dn_parent = pdnp; 190 dirfs_node_ref(pdnp); /* Children ref */ 191 dirfs_node_unlock(dnp); 192 193 pathnp = dirfs_findfd(dmp, dnp, &tmp, &pathfree); 194 195 if (openflags && vap != NULL) { 196 dnp->dn_fd = openat(pathnp->dn_fd, tmp, 197 openflags, vap->va_mode); 198 if (dnp->dn_fd == -1) { 199 dirfs_dropfd(dmp, pathnp, pathfree); 200 return errno; 201 } 202 } 203 204 error = dirfs_node_stat(pathnp->dn_fd, tmp, dnp); 205 if (error) { /* XXX Handle errors */ 206 error = errno; 207 if (vp) 208 dirfs_free_vp(dmp, dnp); 209 dirfs_node_free(dmp, dnp); 210 dirfs_dropfd(dmp, pathnp, pathfree); 211 return error; 212 } 213 214 dirfs_alloc_vp(mp, &vp, LK_CANRECURSE, dnp); 215 *vpp = vp; 216 *dnpp = dnp; 217 218 dbg(5, "tmp=%s dnp=%p allocated\n", tmp, dnp); 219 dirfs_dropfd(dmp, pathnp, pathfree); 220 221 return error; 222 } 223 224 /* 225 * Requires an already dirfs_node_t that has been already lstat(2) 226 * for the type comparison 227 */ 228 void 229 dirfs_alloc_vp(struct mount *mp, struct vnode **vpp, int lkflags, 230 dirfs_node_t dnp) 231 { 232 struct vnode *vp; 233 dirfs_mount_t dmp = VFS_TO_DIRFS(mp); 234 235 debug_called(); 236 237 /* 238 * Handle vnode reclaim/alloc races 239 */ 240 for (;;) { 241 vp = dnp->dn_vnode; 242 if (vp) { 243 if (vget(vp, LK_EXCLUSIVE) == 0) 244 break; /* success */ 245 /* vget raced a reclaim, retry */ 246 } else { 247 getnewvnode(VT_UNUSED10, mp, &vp, 0, lkflags); 248 if (dnp->dn_vnode == NULL) { 249 dnp->dn_vnode = vp; 250 vp->v_data = dnp; 251 vp->v_type = dnp->dn_type; 252 if (dmp->dm_root == dnp) 253 vsetflags(vp, VROOT); 254 dirfs_node_ref(dnp); /* ref for dnp<->vp */ 255 256 /* Type-specific initialization. */ 257 switch (dnp->dn_type) { 258 case VBLK: 259 case VCHR: 260 case VSOCK: 261 break; 262 case VREG: 263 vinitvmio(vp, dnp->dn_size, BMASK, -1); 264 break; 265 case VLNK: 266 break; 267 case VFIFO: 268 // vp->v_ops = &mp->mnt_vn_fifo_ops; 269 break; 270 case VDIR: 271 break; 272 default: 273 panic("dirfs_alloc_vp: dnp=%p vp=%p " 274 "type=%d", 275 dnp, vp, dnp->dn_type); 276 /* NOT REACHED */ 277 break; 278 } 279 break; /* success */ 280 } 281 vp->v_type = VBAD; 282 vx_put(vp); 283 /* multiple dirfs_alloc_vp calls raced, retry */ 284 } 285 } 286 KKASSERT(vp != NULL); 287 *vpp = vp; 288 dbg(5, "dnp=%p vp=%p type=%d\n", dnp, vp, vp->v_type); 289 } 290 291 /* 292 * Do not call locked! 293 */ 294 void 295 dirfs_free_vp(dirfs_mount_t dmp, dirfs_node_t dnp) 296 { 297 struct vnode *vp = NODE_TO_VP(dnp); 298 299 dnp->dn_vnode = NULL; 300 vp->v_data = NULL; 301 dirfs_node_drop(dmp, dnp); 302 } 303 304 int 305 dirfs_nodetype(struct stat *st) 306 { 307 int ret; 308 mode_t mode = st->st_mode; 309 310 debug_called(); 311 312 if (S_ISDIR(mode)) 313 ret = VDIR; 314 else if (S_ISBLK(mode)) 315 ret = VBLK; 316 else if (S_ISCHR(mode)) 317 ret = VCHR; 318 else if (S_ISFIFO(mode)) 319 ret = VFIFO; 320 else if (S_ISSOCK(mode)) 321 ret = VSOCK; 322 else if (S_ISLNK(mode)) 323 ret = VLNK; 324 else if (S_ISREG(mode)) 325 ret = VREG; 326 else 327 ret = VBAD; 328 329 return ret; 330 } 331 332 int 333 dirfs_node_stat(int fd, const char *path, dirfs_node_t dnp) 334 { 335 struct stat st; 336 int error; 337 338 debug_called(); 339 if (fd == DIRFS_NOFD) 340 error = lstat(path, &st); 341 else 342 error = fstatat(fd, path, &st, AT_SYMLINK_NOFOLLOW); 343 344 if (error) 345 return errno; 346 347 /* Populate our dirfs node struct with stat data */ 348 dnp->dn_uid = st.st_uid; 349 dnp->dn_gid = st.st_gid; 350 dnp->dn_mode = st.st_mode; 351 dnp->dn_flags = st.st_flags; 352 dnp->dn_links = st.st_nlink; 353 dnp->dn_atime = st.st_atime; 354 dnp->dn_atimensec = (st.st_atime * 1000000000L); 355 dnp->dn_mtime = st.st_mtime; 356 dnp->dn_mtimensec = (st.st_mtime * 1000000000L); 357 dnp->dn_ctime = st.st_ctime; 358 dnp->dn_ctimensec = (st.st_ctime * 1000000000L); 359 dnp->dn_gen = st.st_gen; 360 dnp->dn_ino = st.st_ino; 361 dnp->dn_st_dev = st.st_dev; 362 dnp->dn_size = st.st_size; 363 dnp->dn_type = dirfs_nodetype(&st); 364 365 return 0; 366 } 367 368 char * 369 dirfs_node_absolute_path(dirfs_mount_t dmp, dirfs_node_t cur, char **pathfreep) 370 { 371 return(dirfs_node_absolute_path_plus(dmp, cur, NULL, pathfreep)); 372 } 373 374 char * 375 dirfs_node_absolute_path_plus(dirfs_mount_t dmp, dirfs_node_t cur, 376 char *last, char **pathfreep) 377 { 378 size_t len; 379 dirfs_node_t dnp1; 380 char *buf; 381 int count; 382 383 debug_called(); 384 385 KKASSERT(dmp->dm_root); /* Sanity check */ 386 *pathfreep = NULL; 387 if (cur == NULL) 388 return NULL; 389 buf = kmalloc(MAXPATHLEN + 1, M_DIRFS_MISC, M_WAITOK); 390 391 /* 392 * Passed-in trailing element. 393 */ 394 count = 0; 395 buf[MAXPATHLEN] = 0; 396 if (last) { 397 len = strlen(last); 398 count += len; 399 if (count <= MAXPATHLEN) 400 bcopy(last, &buf[MAXPATHLEN - count], len); 401 ++count; 402 if (count <= MAXPATHLEN) 403 buf[MAXPATHLEN - count] = '/'; 404 } 405 406 /* 407 * Iterate through the parents until we hit the root. 408 */ 409 dnp1 = cur; 410 while (dirfs_node_isroot(dnp1) == 0) { 411 count += dnp1->dn_namelen; 412 if (count <= MAXPATHLEN) { 413 bcopy(dnp1->dn_name, &buf[MAXPATHLEN - count], 414 dnp1->dn_namelen); 415 } 416 ++count; 417 if (count <= MAXPATHLEN) 418 buf[MAXPATHLEN - count] = '/'; 419 dnp1 = dnp1->dn_parent; 420 if (dnp1 == NULL) 421 break; 422 } 423 424 /* 425 * Prefix with the root mount path. If the element was unlinked 426 * dnp1 will be NULL and there is no path. 427 */ 428 len = strlen(dmp->dm_path); 429 count += len; 430 if (dnp1 && count <= MAXPATHLEN) { 431 bcopy(dmp->dm_path, &buf[MAXPATHLEN - count], len); 432 *pathfreep = buf; 433 dbg(5, "absolute_path %s\n", &buf[MAXPATHLEN - count]); 434 return (&buf[MAXPATHLEN - count]); 435 } else { 436 kfree(buf, M_DIRFS_MISC); 437 *pathfreep = NULL; 438 return (NULL); 439 } 440 } 441 442 /* 443 * Return a dirfs_node with a valid descriptor plus an allocated 444 * relative path which can be used in openat(), fstatat(), etc calls 445 * to locate the requested inode. 446 */ 447 dirfs_node_t 448 dirfs_findfd(dirfs_mount_t dmp, dirfs_node_t cur, 449 char **pathto, char **pathfreep) 450 { 451 dirfs_node_t dnp1; 452 int count; 453 char *buf; 454 455 debug_called(); 456 457 *pathfreep = NULL; 458 *pathto = NULL; 459 460 if (cur == NULL) 461 return NULL; 462 463 buf = kmalloc(MAXPATHLEN + 1, M_DIRFS_MISC, M_WAITOK | M_ZERO); 464 count = 0; 465 466 dnp1 = cur; 467 while (dnp1 == cur || dnp1->dn_fd == DIRFS_NOFD) { 468 count += dnp1->dn_namelen; 469 if (count <= MAXPATHLEN) { 470 bcopy(dnp1->dn_name, &buf[MAXPATHLEN - count], 471 dnp1->dn_namelen); 472 } 473 ++count; 474 if (count <= MAXPATHLEN) 475 buf[MAXPATHLEN - count] = '/'; 476 dnp1 = dnp1->dn_parent; 477 KKASSERT(dnp1 != NULL); 478 } 479 480 if (dnp1 && count <= MAXPATHLEN) { 481 *pathfreep = buf; 482 *pathto = &buf[MAXPATHLEN - count + 1]; /* skip '/' prefix */ 483 dirfs_node_ref(dnp1); 484 dbg(5, "fd=%d dnp1=%p dnp1->dn_name=%d &buf[off]=%s\n", 485 dnp1->dn_fd, dnp1, dnp1->dn_name, *pathto); 486 } else { 487 dbg(5, "failed too long\n"); 488 kfree(buf, M_DIRFS_MISC); 489 *pathfreep = NULL; 490 *pathto = NULL; 491 dnp1 = NULL; 492 } 493 return (dnp1); 494 } 495 496 void 497 dirfs_dropfd(dirfs_mount_t dmp, dirfs_node_t dnp1, char *pathfree) 498 { 499 if (pathfree) 500 kfree(pathfree, M_DIRFS_MISC); 501 if (dnp1) 502 dirfs_node_drop(dmp, dnp1); 503 } 504 505 int 506 dirfs_node_getperms(dirfs_node_t dnp, int *r, int *w, int *x) 507 { 508 uid_t u; 509 gid_t g; 510 int isowner, isgroup; 511 512 u = getuid(); /* XXX What about EUID? */ 513 g = getgid(); /* XXX What about EGID? */ 514 isowner = (u == dnp->dn_uid); 515 isgroup = (g == dnp->dn_gid); 516 517 if (r) { 518 if (isowner && (dnp->dn_mode & S_IRUSR)) 519 *r = 1; 520 else if (isgroup && (dnp->dn_mode & S_IRGRP)) 521 *r = 1; 522 else if (dnp->dn_mode & S_IROTH) 523 *r = 1; 524 } 525 526 if (w) { 527 if (isowner && (dnp->dn_mode & S_IWUSR)) 528 *w = 1; 529 else if (isgroup && (dnp->dn_mode & S_IWGRP)) 530 *w = 1; 531 else if (dnp->dn_mode & S_IWOTH) 532 *w = 1; 533 } 534 535 if (x) { 536 if (isowner && (dnp->dn_mode & S_IXUSR)) 537 *x = 1; 538 else if (isgroup && (dnp->dn_mode & S_IXGRP)) 539 *x = 1; 540 else if (dnp->dn_mode & S_IXOTH) 541 *x = 1; 542 } 543 544 return 0; 545 } 546 547 /* 548 * This requires an allocated node and vnode, otherwise it'll panic 549 */ 550 int 551 dirfs_open_helper(dirfs_mount_t dmp, dirfs_node_t dnp, int parentfd, 552 char *relpath) 553 { 554 int canread, canwrite, canexec; 555 dirfs_node_t pathnp; 556 char *tmp; 557 char *pathfree; 558 int flags, error; 559 560 debug_called(); 561 562 canread = canwrite = canexec = 0; 563 flags = error = 0; 564 tmp = NULL; 565 566 KKASSERT(dnp); 567 KKASSERT(dnp->dn_vnode); 568 569 /* 570 * XXX Besides VDIR and VREG there are other file 571 * types, y'know? 572 * Also, O_RDWR alone might not be the best mode to open 573 * a file with, need to investigate which suits better. 574 */ 575 dirfs_node_getperms(dnp, &canread, &canwrite, &canexec); 576 577 if (dnp->dn_type & VDIR) { 578 flags |= O_DIRECTORY; 579 } else { 580 if (canwrite) 581 flags |= O_RDWR; 582 else 583 flags |= O_RDONLY; 584 } 585 if (relpath != NULL) { 586 tmp = relpath; 587 pathnp = NULL; 588 KKASSERT(parentfd != DIRFS_NOFD); 589 } else if (parentfd == DIRFS_NOFD) { 590 pathnp = dirfs_findfd(dmp, dnp, &tmp, &pathfree); 591 parentfd = pathnp->dn_fd; 592 } else { 593 pathnp = NULL; 594 } 595 596 dnp->dn_fd = openat(parentfd, tmp, flags); 597 if (dnp->dn_fd == -1) 598 error = errno; 599 600 dbg(5, "dnp=%p tmp2=%s parentfd=%d flags=%d error=%d " 601 "r=%d w=%d x=%d\n", dnp, tmp, parentfd, flags, error, 602 canread, canwrite, canexec); 603 604 if (pathnp) 605 dirfs_dropfd(dmp, pathnp, pathfree); 606 607 return error; 608 } 609 610 int 611 dirfs_close_helper(dirfs_node_t dnp) 612 { 613 int error = 0; 614 615 debug_called(); 616 617 618 if (dnp->dn_fd != DIRFS_NOFD) { 619 dbg(5, "closed fd on dnp=%p\n", dnp); 620 #if 0 621 /* buffer cache buffers may still be present */ 622 error = close(dnp->dn_fd); /* XXX EINTR should be checked */ 623 dnp->dn_fd = DIRFS_NOFD; 624 #endif 625 } 626 627 return error; 628 } 629 630 int 631 dirfs_node_refcnt(dirfs_node_t dnp) 632 { 633 return dnp->dn_refcnt; 634 } 635 636 int 637 dirfs_node_chtimes(dirfs_node_t dnp) 638 { 639 struct vnode *vp; 640 dirfs_mount_t dmp; 641 int error = 0; 642 char *tmp; 643 char *pathfree; 644 645 debug_called(); 646 647 vp = NODE_TO_VP(dnp); 648 dmp = VFS_TO_DIRFS(vp->v_mount); 649 650 KKASSERT(vn_islocked(vp)); 651 652 if (dnp->dn_flags & (IMMUTABLE | APPEND)) 653 return EPERM; 654 655 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 656 KKASSERT(tmp); 657 if((lutimes(tmp, NULL)) == -1) 658 error = errno; 659 660 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 661 dirfs_dropfd(dmp, NULL, pathfree); 662 663 KKASSERT(vn_islocked(vp)); 664 665 666 return error; 667 } 668 669 int 670 dirfs_node_chflags(dirfs_node_t dnp, int vaflags, struct ucred *cred) 671 { 672 struct vnode *vp; 673 dirfs_mount_t dmp; 674 int error = 0; 675 int flags; 676 char *tmp; 677 char *pathfree; 678 679 debug_called(); 680 681 vp = NODE_TO_VP(dnp); 682 dmp = VFS_TO_DIRFS(vp->v_mount); 683 684 KKASSERT(vn_islocked(vp)); 685 686 flags = dnp->dn_flags; 687 688 error = vop_helper_setattr_flags(&flags, vaflags, dnp->dn_uid, cred); 689 /* 690 * When running vkernels with non-root it is not possible to set 691 * certain flags on host files, such as SF* flags. chflags(2) call 692 * will spit an error in that case. 693 */ 694 if (error == 0) { 695 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 696 KKASSERT(tmp); 697 if((lchflags(tmp, flags)) == -1) 698 error = errno; 699 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 700 dirfs_dropfd(dmp, NULL, pathfree); 701 } 702 703 KKASSERT(vn_islocked(vp)); 704 705 return error; 706 } 707 708 int 709 dirfs_node_chmod(dirfs_mount_t dmp, dirfs_node_t dnp, mode_t mode) 710 { 711 char *tmp; 712 char *pathfree; 713 int error = 0; 714 715 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 716 KKASSERT(tmp); 717 if (lchmod(tmp, mode) < 0) 718 error = errno; 719 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 720 dirfs_dropfd(dmp, NULL, pathfree); 721 722 return error; 723 } 724 725 int 726 dirfs_node_chown(dirfs_mount_t dmp, dirfs_node_t dnp, 727 uid_t uid, uid_t gid, mode_t mode) 728 { 729 char *tmp; 730 char *pathfree; 731 int error = 0; 732 733 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 734 KKASSERT(tmp); 735 if (lchown(tmp, uid, gid) < 0) 736 error = errno; 737 if (mode != dnp->dn_mode) 738 lchmod(tmp, mode); 739 dirfs_node_stat(DIRFS_NOFD, tmp, dnp); 740 dirfs_dropfd(dmp, NULL, pathfree); 741 742 return error; 743 } 744 745 746 int 747 dirfs_node_chsize(dirfs_node_t dnp, off_t nsize) 748 { 749 dirfs_mount_t dmp; 750 struct vnode *vp; 751 int error = 0; 752 char *tmp; 753 char *pathfree; 754 off_t osize; 755 int biosize; 756 757 debug_called(); 758 759 KKASSERT(dnp); 760 761 vp = NODE_TO_VP(dnp); 762 dmp = VFS_TO_DIRFS(vp->v_mount); 763 biosize = BSIZE; 764 osize = dnp->dn_size; 765 766 KKASSERT(vn_islocked(vp)); 767 768 switch (vp->v_type) { 769 case VDIR: 770 return (EISDIR); 771 case VREG: 772 break; 773 default: 774 return (EOPNOTSUPP); 775 776 } 777 778 tmp = dirfs_node_absolute_path(dmp, dnp, &pathfree); 779 if (nsize < osize) { 780 error = nvtruncbuf(vp, nsize, biosize, -1, 0); 781 } else { 782 error = nvextendbuf(vp, osize, nsize, 783 biosize, biosize, 784 -1, -1, 0); 785 } 786 if (error == 0 && truncate(tmp, nsize) < 0) 787 error = errno; 788 if (error == 0) 789 dnp->dn_size = nsize; 790 dbg(5, "TRUNCATE %016jx %016jx\n", (intmax_t)nsize, dnp->dn_size); 791 /*dirfs_node_stat(DIRFS_NOFD, tmp, dnp); don't need to do this*/ 792 793 dirfs_dropfd(dmp, NULL, pathfree); 794 795 796 KKASSERT(vn_islocked(vp)); 797 798 return error; 799 } 800 801 void 802 dirfs_node_setpassive(dirfs_mount_t dmp, dirfs_node_t dnp, int state) 803 { 804 struct vnode *vp; 805 806 if (state && (dnp->dn_state & DIRFS_PASVFD) == 0 && 807 dnp->dn_fd != DIRFS_NOFD) { 808 dirfs_node_ref(dnp); 809 dirfs_node_setflags(dnp, DIRFS_PASVFD); 810 TAILQ_INSERT_TAIL(&dmp->dm_fdlist, dnp, dn_fdentry); 811 ++dirfs_fd_used; 812 ++dmp->dm_fd_used; 813 814 /* 815 * If we are over our limit remove nodes from the 816 * passive fd cache. 817 */ 818 while (dmp->dm_fd_used > dirfs_fd_limit) { 819 dnp = TAILQ_FIRST(&dmp->dm_fdlist); 820 dirfs_node_setpassive(dmp, dnp, 0); 821 } 822 } 823 if (state == 0 && (dnp->dn_state & DIRFS_PASVFD)) { 824 dirfs_node_clrflags(dnp, DIRFS_PASVFD); 825 TAILQ_REMOVE(&dmp->dm_fdlist, dnp, dn_fdentry); 826 --dirfs_fd_used; 827 --dmp->dm_fd_used; 828 dbg(5, "dnp=%p removed from fdlist. %d used\n", 829 dnp, dirfs_fd_used); 830 831 /* 832 * Attempt to close the descriptor. We can only do this 833 * if the related vnode is inactive and has exactly two 834 * refs (representing the vp<->dnp and PASVFD). Otherwise 835 * someone might have ref'd the node in order to use the 836 * dn_fd. 837 * 838 * Also, if the vnode is in any way dirty we leave the fd 839 * open for the buffer cache code. The syncer will eventually 840 * come along and fsync the vnode, and the next inactive 841 * transition will deal with the descriptor. 842 * 843 * The descriptor for the root node is NEVER closed by 844 * this function. 845 */ 846 vp = dnp->dn_vnode; 847 if (dirfs_node_refcnt(dnp) == 2 && vp && 848 dnp->dn_fd != DIRFS_NOFD && 849 !dirfs_node_isroot(dnp) && 850 (vp->v_flag & (VINACTIVE|VOBJDIRTY)) == VINACTIVE && 851 RB_EMPTY(&vp->v_rbdirty_tree)) { 852 dbg(5, "passive cache: closing %d\n", dnp->dn_fd); 853 close(dnp->dn_fd); 854 dnp->dn_fd = DIRFS_NOFD; 855 } else { 856 if (dirfs_node_refcnt(dnp) == 1 && dnp->dn_vnode == NULL && 857 dnp->dn_fd != DIRFS_NOFD && 858 dnp != dmp->dm_root) { 859 dbg(5, "passive cache: closing %d\n", dnp->dn_fd); 860 close(dnp->dn_fd); 861 dnp->dn_fd = DIRFS_NOFD; 862 } 863 } 864 dirfs_node_drop(dmp, dnp); 865 } 866 } 867 868 char * 869 dirfs_flag2str(dirfs_node_t dnp) 870 { 871 const char *txtflg[] = { DIRFS_TXTFLG }; 872 static char str[512] = {0}; 873 874 if (dnp->dn_state & DIRFS_PASVFD) 875 ksprintf(str, "%s ", txtflg[0]); 876 877 return str; 878 } 879 880 void 881 debug(int level, const char *fmt, ...) 882 { 883 __va_list ap; 884 885 if (debuglvl >= level) { 886 __va_start(ap, fmt); 887 kvprintf(fmt, ap); 888 __va_end(ap); 889 } 890 } 891 892