1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 /* 37 * Kernel Filesystem interface 38 * 39 * NOTE! local ipdata pointers must be reloaded on any modifying operation 40 * to the inode as its underlying chain may have changed. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/fcntl.h> 47 #include <sys/buf.h> 48 #include <sys/proc.h> 49 #include <sys/namei.h> 50 #include <sys/mount.h> 51 #include <sys/vnode.h> 52 #include <sys/mountctl.h> 53 #include <sys/dirent.h> 54 #include <sys/uio.h> 55 #include <sys/objcache.h> 56 #include <sys/event.h> 57 #include <sys/file.h> 58 #include <vfs/fifofs/fifo.h> 59 60 #include "hammer2.h" 61 62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, 63 int seqcount); 64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 65 int ioflag, int seqcount); 66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize); 67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize); 68 69 struct objcache *cache_xops; 70 71 static __inline 72 void 73 hammer2_knote(struct vnode *vp, int flags) 74 { 75 if (flags) 76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 77 } 78 79 /* 80 * Last reference to a vnode is going away but it is still cached. 81 */ 82 static 83 int 84 hammer2_vop_inactive(struct vop_inactive_args *ap) 85 { 86 hammer2_inode_t *ip; 87 struct vnode *vp; 88 89 LOCKSTART; 90 vp = ap->a_vp; 91 ip = VTOI(vp); 92 93 /* 94 * Degenerate case 95 */ 96 if (ip == NULL) { 97 vrecycle(vp); 98 LOCKSTOP; 99 return (0); 100 } 101 102 /* 103 * Check for deleted inodes and recycle immediately on the last 104 * release. Be sure to destroy any left-over buffer cache buffers 105 * so we do not waste time trying to flush them. 106 * 107 * Note that deleting the file block chains under the inode chain 108 * would just be a waste of energy, so don't do it. 109 * 110 * WARNING: nvtruncbuf() can only be safely called without the inode 111 * lock held due to the way our write thread works. 112 */ 113 if (ip->flags & HAMMER2_INODE_ISUNLINKED) { 114 hammer2_key_t lbase; 115 int nblksize; 116 117 /* 118 * Detect updates to the embedded data which may be 119 * synchronized by the strategy code. Simply mark the 120 * inode modified so it gets picked up by our normal flush. 121 */ 122 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL); 123 nvtruncbuf(vp, 0, nblksize, 0, 0); 124 vrecycle(vp); 125 } 126 LOCKSTOP; 127 return (0); 128 } 129 130 /* 131 * Reclaim a vnode so that it can be reused; after the inode is 132 * disassociated, the filesystem must manage it alone. 133 */ 134 static 135 int 136 hammer2_vop_reclaim(struct vop_reclaim_args *ap) 137 { 138 hammer2_inode_t *ip; 139 hammer2_pfs_t *pmp; 140 struct vnode *vp; 141 142 LOCKSTART; 143 vp = ap->a_vp; 144 ip = VTOI(vp); 145 if (ip == NULL) { 146 LOCKSTOP; 147 return(0); 148 } 149 pmp = ip->pmp; 150 151 /* 152 * The final close of a deleted file or directory marks it for 153 * destruction. The DELETED flag allows the flusher to shortcut 154 * any modified blocks still unflushed (that is, just ignore them). 155 * 156 * HAMMER2 usually does not try to optimize the freemap by returning 157 * deleted blocks to it as it does not usually know how many snapshots 158 * might be referencing portions of the file/dir. 159 */ 160 vp->v_data = NULL; 161 ip->vp = NULL; 162 163 /* 164 * NOTE! We do not attempt to flush chains here, flushing is 165 * really fragile and could also deadlock. 166 */ 167 vclrisdirty(vp); 168 169 /* 170 * This occurs if the inode was unlinked while open. Reclamation of 171 * these inodes requires processing we cannot safely do here so add 172 * the inode to the sideq in that situation. 173 * 174 * A modified inode may require chain synchronization which will no 175 * longer be driven by a sync or fsync without the vnode, also use 176 * the sideq for that. 177 * 178 * A reclaim can occur at any time so we cannot safely start a 179 * transaction to handle reclamation of unlinked files. Instead, 180 * the ip is left with a reference and placed on a linked list and 181 * handled later on. 182 */ 183 184 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED | 185 HAMMER2_INODE_MODIFIED | 186 HAMMER2_INODE_RESIZED)) && 187 (ip->flags & HAMMER2_INODE_ISDELETED) == 0) { 188 hammer2_inode_sideq_t *ipul; 189 190 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO); 191 ipul->ip = ip; 192 193 hammer2_spin_ex(&pmp->list_spin); 194 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) { 195 /* ref -> sideq */ 196 atomic_set_int(&ip->flags, HAMMER2_INODE_ONSIDEQ); 197 TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry); 198 hammer2_spin_unex(&pmp->list_spin); 199 } else { 200 hammer2_spin_unex(&pmp->list_spin); 201 kfree(ipul, pmp->minode); 202 hammer2_inode_drop(ip); /* vp ref */ 203 } 204 /* retain ref from vp for ipul */ 205 } else { 206 hammer2_inode_drop(ip); /* vp ref */ 207 } 208 209 /* 210 * XXX handle background sync when ip dirty, kernel will no longer 211 * notify us regarding this inode because there is no longer a 212 * vnode attached to it. 213 */ 214 215 LOCKSTOP; 216 return (0); 217 } 218 219 static 220 int 221 hammer2_vop_fsync(struct vop_fsync_args *ap) 222 { 223 hammer2_inode_t *ip; 224 struct vnode *vp; 225 226 LOCKSTART; 227 vp = ap->a_vp; 228 ip = VTOI(vp); 229 230 #if 0 231 /* XXX can't do this yet */ 232 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_ISFLUSH); 233 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 234 #endif 235 hammer2_trans_init(ip->pmp, 0); 236 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 237 238 /* 239 * Calling chain_flush here creates a lot of duplicative 240 * COW operations due to non-optimal vnode ordering. 241 * 242 * Only do it for an actual fsync() syscall. The other forms 243 * which call this function will eventually call chain_flush 244 * on the volume root as a catch-all, which is far more optimal. 245 */ 246 hammer2_inode_lock(ip, 0); 247 if (ip->flags & HAMMER2_INODE_MODIFIED) 248 hammer2_inode_chain_sync(ip); 249 hammer2_inode_unlock(ip); 250 hammer2_trans_done(ip->pmp); 251 252 LOCKSTOP; 253 return (0); 254 } 255 256 static 257 int 258 hammer2_vop_access(struct vop_access_args *ap) 259 { 260 hammer2_inode_t *ip = VTOI(ap->a_vp); 261 uid_t uid; 262 gid_t gid; 263 int error; 264 265 LOCKSTART; 266 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 267 uid = hammer2_to_unix_xid(&ip->meta.uid); 268 gid = hammer2_to_unix_xid(&ip->meta.gid); 269 error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags); 270 hammer2_inode_unlock(ip); 271 272 LOCKSTOP; 273 return (error); 274 } 275 276 static 277 int 278 hammer2_vop_getattr(struct vop_getattr_args *ap) 279 { 280 hammer2_pfs_t *pmp; 281 hammer2_inode_t *ip; 282 struct vnode *vp; 283 struct vattr *vap; 284 hammer2_chain_t *chain; 285 int i; 286 287 LOCKSTART; 288 vp = ap->a_vp; 289 vap = ap->a_vap; 290 291 ip = VTOI(vp); 292 pmp = ip->pmp; 293 294 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 295 296 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 297 vap->va_fileid = ip->meta.inum; 298 vap->va_mode = ip->meta.mode; 299 vap->va_nlink = ip->meta.nlinks; 300 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 301 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 302 vap->va_rmajor = 0; 303 vap->va_rminor = 0; 304 vap->va_size = ip->meta.size; /* protected by shared lock */ 305 vap->va_blocksize = HAMMER2_PBUFSIZE; 306 vap->va_flags = ip->meta.uflags; 307 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime); 308 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime); 309 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime); 310 vap->va_gen = 1; 311 vap->va_bytes = 0; 312 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) { 313 /* 314 * Can't really calculate directory use sans the files under 315 * it, just assume one block for now. 316 */ 317 vap->va_bytes += HAMMER2_INODE_BYTES; 318 } else { 319 for (i = 0; i < ip->cluster.nchains; ++i) { 320 if ((chain = ip->cluster.array[i].chain) != NULL) { 321 if (vap->va_bytes < chain->bref.data_count) 322 vap->va_bytes = chain->bref.data_count; 323 } 324 } 325 } 326 vap->va_type = hammer2_get_vtype(ip->meta.type); 327 vap->va_filerev = 0; 328 vap->va_uid_uuid = ip->meta.uid; 329 vap->va_gid_uuid = ip->meta.gid; 330 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 331 VA_FSID_UUID_VALID; 332 333 hammer2_inode_unlock(ip); 334 335 LOCKSTOP; 336 return (0); 337 } 338 339 static 340 int 341 hammer2_vop_setattr(struct vop_setattr_args *ap) 342 { 343 hammer2_inode_t *ip; 344 struct vnode *vp; 345 struct vattr *vap; 346 int error; 347 int kflags = 0; 348 uint64_t ctime; 349 350 LOCKSTART; 351 vp = ap->a_vp; 352 vap = ap->a_vap; 353 hammer2_update_time(&ctime); 354 355 ip = VTOI(vp); 356 357 if (ip->pmp->ronly) { 358 LOCKSTOP; 359 return(EROFS); 360 } 361 362 hammer2_pfs_memory_wait(ip->pmp); 363 hammer2_trans_init(ip->pmp, 0); 364 hammer2_inode_lock(ip, 0); 365 error = 0; 366 367 if (vap->va_flags != VNOVAL) { 368 uint32_t flags; 369 370 flags = ip->meta.uflags; 371 error = vop_helper_setattr_flags(&flags, vap->va_flags, 372 hammer2_to_unix_xid(&ip->meta.uid), 373 ap->a_cred); 374 if (error == 0) { 375 if (ip->meta.uflags != flags) { 376 hammer2_inode_modify(ip); 377 ip->meta.uflags = flags; 378 ip->meta.ctime = ctime; 379 kflags |= NOTE_ATTRIB; 380 } 381 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 382 error = 0; 383 goto done; 384 } 385 } 386 goto done; 387 } 388 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 389 error = EPERM; 390 goto done; 391 } 392 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { 393 mode_t cur_mode = ip->meta.mode; 394 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 395 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 396 uuid_t uuid_uid; 397 uuid_t uuid_gid; 398 399 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 400 ap->a_cred, 401 &cur_uid, &cur_gid, &cur_mode); 402 if (error == 0) { 403 hammer2_guid_to_uuid(&uuid_uid, cur_uid); 404 hammer2_guid_to_uuid(&uuid_gid, cur_gid); 405 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) || 406 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) || 407 ip->meta.mode != cur_mode 408 ) { 409 hammer2_inode_modify(ip); 410 ip->meta.uid = uuid_uid; 411 ip->meta.gid = uuid_gid; 412 ip->meta.mode = cur_mode; 413 ip->meta.ctime = ctime; 414 } 415 kflags |= NOTE_ATTRIB; 416 } 417 } 418 419 /* 420 * Resize the file 421 */ 422 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) { 423 switch(vp->v_type) { 424 case VREG: 425 if (vap->va_size == ip->meta.size) 426 break; 427 if (vap->va_size < ip->meta.size) { 428 hammer2_mtx_ex(&ip->truncate_lock); 429 hammer2_truncate_file(ip, vap->va_size); 430 hammer2_mtx_unlock(&ip->truncate_lock); 431 } else { 432 hammer2_extend_file(ip, vap->va_size); 433 } 434 hammer2_inode_modify(ip); 435 ip->meta.mtime = ctime; 436 break; 437 default: 438 error = EINVAL; 439 goto done; 440 } 441 } 442 #if 0 443 /* atime not supported */ 444 if (vap->va_atime.tv_sec != VNOVAL) { 445 hammer2_inode_modify(ip); 446 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime); 447 kflags |= NOTE_ATTRIB; 448 } 449 #endif 450 if (vap->va_mode != (mode_t)VNOVAL) { 451 mode_t cur_mode = ip->meta.mode; 452 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 453 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 454 455 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 456 cur_uid, cur_gid, &cur_mode); 457 if (error == 0 && ip->meta.mode != cur_mode) { 458 hammer2_inode_modify(ip); 459 ip->meta.mode = cur_mode; 460 ip->meta.ctime = ctime; 461 kflags |= NOTE_ATTRIB; 462 } 463 } 464 465 if (vap->va_mtime.tv_sec != VNOVAL) { 466 hammer2_inode_modify(ip); 467 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime); 468 kflags |= NOTE_ATTRIB; 469 } 470 471 done: 472 /* 473 * If a truncation occurred we must call inode_fsync() now in order 474 * to trim the related data chains, otherwise a later expansion can 475 * cause havoc. 476 * 477 * If an extend occured that changed the DIRECTDATA state, we must 478 * call inode_fsync now in order to prepare the inode's indirect 479 * block table. 480 */ 481 if (ip->flags & HAMMER2_INODE_RESIZED) 482 hammer2_inode_chain_sync(ip); 483 484 /* 485 * Cleanup. 486 */ 487 hammer2_inode_unlock(ip); 488 hammer2_trans_done(ip->pmp); 489 hammer2_knote(ip->vp, kflags); 490 491 LOCKSTOP; 492 return (error); 493 } 494 495 static 496 int 497 hammer2_vop_readdir(struct vop_readdir_args *ap) 498 { 499 hammer2_xop_readdir_t *xop; 500 hammer2_blockref_t bref; 501 hammer2_inode_t *ip; 502 hammer2_tid_t inum; 503 hammer2_key_t lkey; 504 struct uio *uio; 505 off_t *cookies; 506 off_t saveoff; 507 int cookie_index; 508 int ncookies; 509 int error; 510 int eofflag; 511 int dtype; 512 int r; 513 514 LOCKSTART; 515 ip = VTOI(ap->a_vp); 516 uio = ap->a_uio; 517 saveoff = uio->uio_offset; 518 eofflag = 0; 519 error = 0; 520 521 /* 522 * Setup cookies directory entry cookies if requested 523 */ 524 if (ap->a_ncookies) { 525 ncookies = uio->uio_resid / 16 + 1; 526 if (ncookies > 1024) 527 ncookies = 1024; 528 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK); 529 } else { 530 ncookies = -1; 531 cookies = NULL; 532 } 533 cookie_index = 0; 534 535 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 536 537 /* 538 * Handle artificial entries. To ensure that only positive 64 bit 539 * quantities are returned to userland we always strip off bit 63. 540 * The hash code is designed such that codes 0x0000-0x7FFF are not 541 * used, allowing us to use these codes for articial entries. 542 * 543 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not 544 * allow '..' to cross the mount point into (e.g.) the super-root. 545 */ 546 if (saveoff == 0) { 547 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 548 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, "."); 549 if (r) 550 goto done; 551 if (cookies) 552 cookies[cookie_index] = saveoff; 553 ++saveoff; 554 ++cookie_index; 555 if (cookie_index == ncookies) 556 goto done; 557 } 558 559 if (saveoff == 1) { 560 /* 561 * Be careful with lockorder when accessing ".." 562 * 563 * (ip is the current dir. xip is the parent dir). 564 */ 565 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 566 if (ip != ip->pmp->iroot) 567 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK; 568 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, ".."); 569 if (r) 570 goto done; 571 if (cookies) 572 cookies[cookie_index] = saveoff; 573 ++saveoff; 574 ++cookie_index; 575 if (cookie_index == ncookies) 576 goto done; 577 } 578 579 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE; 580 if (hammer2_debug & 0x0020) 581 kprintf("readdir: lkey %016jx\n", lkey); 582 if (error) 583 goto done; 584 585 /* 586 * Use XOP for cluster scan. 587 * 588 * parent is the inode cluster, already locked for us. Don't 589 * double lock shared locks as this will screw up upgrades. 590 */ 591 xop = hammer2_xop_alloc(ip, 0); 592 xop->lkey = lkey; 593 hammer2_xop_start(&xop->head, hammer2_xop_readdir); 594 595 for (;;) { 596 const hammer2_inode_data_t *ripdata; 597 598 error = hammer2_xop_collect(&xop->head, 0); 599 if (error) 600 break; 601 if (cookie_index == ncookies) 602 break; 603 if (hammer2_debug & 0x0020) 604 kprintf("cluster chain %p %p\n", 605 xop->head.cluster.focus, 606 (xop->head.cluster.focus ? 607 xop->head.cluster.focus->data : (void *)-1)); 608 ripdata = &hammer2_cluster_rdata(&xop->head.cluster)->ipdata; 609 hammer2_cluster_bref(&xop->head.cluster, &bref); 610 if (bref.type == HAMMER2_BREF_TYPE_INODE) { 611 dtype = hammer2_get_dtype(ripdata); 612 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 613 r = vop_write_dirent(&error, uio, 614 ripdata->meta.inum & 615 HAMMER2_DIRHASH_USERMSK, 616 dtype, 617 ripdata->meta.name_len, 618 ripdata->filename); 619 if (r) 620 break; 621 if (cookies) 622 cookies[cookie_index] = saveoff; 623 ++cookie_index; 624 } else { 625 /* XXX chain error */ 626 kprintf("bad chain type readdir %d\n", bref.type); 627 } 628 } 629 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 630 if (error == ENOENT) { 631 error = 0; 632 eofflag = 1; 633 saveoff = (hammer2_key_t)-1; 634 } else { 635 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 636 } 637 done: 638 hammer2_inode_unlock(ip); 639 if (ap->a_eofflag) 640 *ap->a_eofflag = eofflag; 641 if (hammer2_debug & 0x0020) 642 kprintf("readdir: done at %016jx\n", saveoff); 643 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE; 644 if (error && cookie_index == 0) { 645 if (cookies) { 646 kfree(cookies, M_TEMP); 647 *ap->a_ncookies = 0; 648 *ap->a_cookies = NULL; 649 } 650 } else { 651 if (cookies) { 652 *ap->a_ncookies = cookie_index; 653 *ap->a_cookies = cookies; 654 } 655 } 656 LOCKSTOP; 657 return (error); 658 } 659 660 /* 661 * hammer2_vop_readlink { vp, uio, cred } 662 */ 663 static 664 int 665 hammer2_vop_readlink(struct vop_readlink_args *ap) 666 { 667 struct vnode *vp; 668 hammer2_inode_t *ip; 669 int error; 670 671 vp = ap->a_vp; 672 if (vp->v_type != VLNK) 673 return (EINVAL); 674 ip = VTOI(vp); 675 676 error = hammer2_read_file(ip, ap->a_uio, 0); 677 return (error); 678 } 679 680 static 681 int 682 hammer2_vop_read(struct vop_read_args *ap) 683 { 684 struct vnode *vp; 685 hammer2_inode_t *ip; 686 struct uio *uio; 687 int error; 688 int seqcount; 689 int bigread; 690 691 /* 692 * Read operations supported on this vnode? 693 */ 694 vp = ap->a_vp; 695 if (vp->v_type != VREG) 696 return (EINVAL); 697 698 /* 699 * Misc 700 */ 701 ip = VTOI(vp); 702 uio = ap->a_uio; 703 error = 0; 704 705 seqcount = ap->a_ioflag >> 16; 706 bigread = (uio->uio_resid > 100 * 1024 * 1024); 707 708 error = hammer2_read_file(ip, uio, seqcount); 709 return (error); 710 } 711 712 static 713 int 714 hammer2_vop_write(struct vop_write_args *ap) 715 { 716 hammer2_inode_t *ip; 717 thread_t td; 718 struct vnode *vp; 719 struct uio *uio; 720 int error; 721 int seqcount; 722 723 /* 724 * Read operations supported on this vnode? 725 */ 726 vp = ap->a_vp; 727 if (vp->v_type != VREG) 728 return (EINVAL); 729 730 /* 731 * Misc 732 */ 733 ip = VTOI(vp); 734 uio = ap->a_uio; 735 error = 0; 736 if (ip->pmp->ronly) { 737 return (EROFS); 738 } 739 740 seqcount = ap->a_ioflag >> 16; 741 742 /* 743 * Check resource limit 744 */ 745 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc && 746 uio->uio_offset + uio->uio_resid > 747 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 748 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); 749 return (EFBIG); 750 } 751 752 /* 753 * The transaction interlocks against flushes initiations 754 * (note: but will run concurrently with the actual flush). 755 */ 756 hammer2_trans_init(ip->pmp, 0); 757 error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount); 758 hammer2_trans_done(ip->pmp); 759 760 return (error); 761 } 762 763 /* 764 * Perform read operations on a file or symlink given an UNLOCKED 765 * inode and uio. 766 * 767 * The passed ip is not locked. 768 */ 769 static 770 int 771 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount) 772 { 773 hammer2_off_t size; 774 struct buf *bp; 775 int error; 776 777 error = 0; 778 779 /* 780 * UIO read loop. 781 * 782 * WARNING! Assumes that the kernel interlocks size changes at the 783 * vnode level. 784 */ 785 hammer2_mtx_sh(&ip->lock); 786 hammer2_mtx_sh(&ip->truncate_lock); 787 size = ip->meta.size; 788 hammer2_mtx_unlock(&ip->lock); 789 790 while (uio->uio_resid > 0 && uio->uio_offset < size) { 791 hammer2_key_t lbase; 792 hammer2_key_t leof; 793 int lblksize; 794 int loff; 795 int n; 796 797 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 798 &lbase, &leof); 799 800 #if 1 801 error = cluster_read(ip->vp, leof, lbase, lblksize, 802 uio->uio_resid, seqcount * MAXBSIZE, 803 &bp); 804 #else 805 if (uio->uio_segflg == UIO_NOCOPY) { 806 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0); 807 if (bp->b_flags & B_CACHE) { 808 int i; 809 int j = 0; 810 if (bp->b_xio.xio_npages != 16) 811 kprintf("NPAGES BAD\n"); 812 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 813 vm_page_t m; 814 m = bp->b_xio.xio_pages[i]; 815 if (m == NULL || m->valid == 0) { 816 kprintf("bp %016jx %016jx pg %d inv", 817 lbase, leof, i); 818 if (m) 819 kprintf("m->object %p/%p", m->object, ip->vp->v_object); 820 kprintf("\n"); 821 j = 1; 822 } 823 } 824 if (j) 825 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error); 826 } 827 bqrelse(bp); 828 } 829 error = bread(ip->vp, lbase, lblksize, &bp); 830 #endif 831 if (error) { 832 brelse(bp); 833 break; 834 } 835 loff = (int)(uio->uio_offset - lbase); 836 n = lblksize - loff; 837 if (n > uio->uio_resid) 838 n = uio->uio_resid; 839 if (n > size - uio->uio_offset) 840 n = (int)(size - uio->uio_offset); 841 bp->b_flags |= B_AGE; 842 uiomovebp(bp, (char *)bp->b_data + loff, n, uio); 843 bqrelse(bp); 844 } 845 hammer2_mtx_unlock(&ip->truncate_lock); 846 847 return (error); 848 } 849 850 /* 851 * Write to the file represented by the inode via the logical buffer cache. 852 * The inode may represent a regular file or a symlink. 853 * 854 * The inode must not be locked. 855 */ 856 static 857 int 858 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 859 int ioflag, int seqcount) 860 { 861 hammer2_key_t old_eof; 862 hammer2_key_t new_eof; 863 struct buf *bp; 864 int kflags; 865 int error; 866 int modified; 867 868 /* 869 * Setup if append 870 * 871 * WARNING! Assumes that the kernel interlocks size changes at the 872 * vnode level. 873 */ 874 hammer2_mtx_ex(&ip->lock); 875 hammer2_mtx_sh(&ip->truncate_lock); 876 if (ioflag & IO_APPEND) 877 uio->uio_offset = ip->meta.size; 878 old_eof = ip->meta.size; 879 880 /* 881 * Extend the file if necessary. If the write fails at some point 882 * we will truncate it back down to cover as much as we were able 883 * to write. 884 * 885 * Doing this now makes it easier to calculate buffer sizes in 886 * the loop. 887 */ 888 kflags = 0; 889 error = 0; 890 modified = 0; 891 892 if (uio->uio_offset + uio->uio_resid > old_eof) { 893 new_eof = uio->uio_offset + uio->uio_resid; 894 modified = 1; 895 hammer2_extend_file(ip, new_eof); 896 kflags |= NOTE_EXTEND; 897 } else { 898 new_eof = old_eof; 899 } 900 hammer2_mtx_unlock(&ip->lock); 901 902 /* 903 * UIO write loop 904 */ 905 while (uio->uio_resid > 0) { 906 hammer2_key_t lbase; 907 int trivial; 908 int endofblk; 909 int lblksize; 910 int loff; 911 int n; 912 913 /* 914 * Don't allow the buffer build to blow out the buffer 915 * cache. 916 */ 917 if ((ioflag & IO_RECURSE) == 0) 918 bwillwrite(HAMMER2_PBUFSIZE); 919 920 /* 921 * This nominally tells us how much we can cluster and 922 * what the logical buffer size needs to be. Currently 923 * we don't try to cluster the write and just handle one 924 * block at a time. 925 */ 926 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 927 &lbase, NULL); 928 loff = (int)(uio->uio_offset - lbase); 929 930 KKASSERT(lblksize <= 65536); 931 932 /* 933 * Calculate bytes to copy this transfer and whether the 934 * copy completely covers the buffer or not. 935 */ 936 trivial = 0; 937 n = lblksize - loff; 938 if (n > uio->uio_resid) { 939 n = uio->uio_resid; 940 if (loff == lbase && uio->uio_offset + n == new_eof) 941 trivial = 1; 942 endofblk = 0; 943 } else { 944 if (loff == 0) 945 trivial = 1; 946 endofblk = 1; 947 } 948 if (lbase >= new_eof) 949 trivial = 1; 950 951 /* 952 * Get the buffer 953 */ 954 if (uio->uio_segflg == UIO_NOCOPY) { 955 /* 956 * Issuing a write with the same data backing the 957 * buffer. Instantiate the buffer to collect the 958 * backing vm pages, then read-in any missing bits. 959 * 960 * This case is used by vop_stdputpages(). 961 */ 962 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0); 963 if ((bp->b_flags & B_CACHE) == 0) { 964 bqrelse(bp); 965 error = bread(ip->vp, lbase, lblksize, &bp); 966 } 967 } else if (trivial) { 968 /* 969 * Even though we are entirely overwriting the buffer 970 * we may still have to zero it out to avoid a 971 * mmap/write visibility issue. 972 */ 973 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0); 974 if ((bp->b_flags & B_CACHE) == 0) 975 vfs_bio_clrbuf(bp); 976 } else { 977 /* 978 * Partial overwrite, read in any missing bits then 979 * replace the portion being written. 980 * 981 * (The strategy code will detect zero-fill physical 982 * blocks for this case). 983 */ 984 error = bread(ip->vp, lbase, lblksize, &bp); 985 if (error == 0) 986 bheavy(bp); 987 } 988 989 if (error) { 990 brelse(bp); 991 break; 992 } 993 994 /* 995 * Ok, copy the data in 996 */ 997 error = uiomovebp(bp, bp->b_data + loff, n, uio); 998 kflags |= NOTE_WRITE; 999 modified = 1; 1000 if (error) { 1001 brelse(bp); 1002 break; 1003 } 1004 1005 /* 1006 * WARNING: Pageout daemon will issue UIO_NOCOPY writes 1007 * with IO_SYNC or IO_ASYNC set. These writes 1008 * must be handled as the pageout daemon expects. 1009 * 1010 * NOTE! H2 relies on cluster_write() here because it 1011 * cannot preallocate disk blocks at the logical 1012 * level due to not knowing what the compression 1013 * size will be at this time. 1014 * 1015 * We must use cluster_write() here and we depend 1016 * on the write-behind feature to flush buffers 1017 * appropriately. If we let the buffer daemons do 1018 * it the block allocations will be all over the 1019 * map. 1020 */ 1021 if (ioflag & IO_SYNC) { 1022 bwrite(bp); 1023 } else if ((ioflag & IO_DIRECT) && endofblk) { 1024 bawrite(bp); 1025 } else if (ioflag & IO_ASYNC) { 1026 bawrite(bp); 1027 } else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) { 1028 bdwrite(bp); 1029 } else { 1030 #if 1 1031 bp->b_flags |= B_CLUSTEROK; 1032 cluster_write(bp, new_eof, lblksize, seqcount); 1033 #else 1034 bp->b_flags |= B_CLUSTEROK; 1035 bdwrite(bp); 1036 #endif 1037 } 1038 } 1039 1040 /* 1041 * Cleanup. If we extended the file EOF but failed to write through 1042 * the entire write is a failure and we have to back-up. 1043 */ 1044 if (error && new_eof != old_eof) { 1045 hammer2_mtx_unlock(&ip->truncate_lock); 1046 hammer2_mtx_ex(&ip->lock); 1047 hammer2_mtx_ex(&ip->truncate_lock); 1048 hammer2_truncate_file(ip, old_eof); 1049 if (ip->flags & HAMMER2_INODE_MODIFIED) 1050 hammer2_inode_chain_sync(ip); 1051 hammer2_mtx_unlock(&ip->lock); 1052 } else if (modified) { 1053 hammer2_mtx_ex(&ip->lock); 1054 hammer2_inode_modify(ip); 1055 hammer2_update_time(&ip->meta.mtime); 1056 if (ip->flags & HAMMER2_INODE_MODIFIED) 1057 hammer2_inode_chain_sync(ip); 1058 hammer2_mtx_unlock(&ip->lock); 1059 hammer2_knote(ip->vp, kflags); 1060 } 1061 hammer2_trans_assert_strategy(ip->pmp); 1062 hammer2_mtx_unlock(&ip->truncate_lock); 1063 1064 return error; 1065 } 1066 1067 /* 1068 * Truncate the size of a file. The inode must not be locked. 1069 * 1070 * We must unconditionally set HAMMER2_INODE_RESIZED to properly 1071 * ensure that any on-media data beyond the new file EOF has been destroyed. 1072 * 1073 * WARNING: nvtruncbuf() can only be safely called without the inode lock 1074 * held due to the way our write thread works. If the truncation 1075 * occurs in the middle of a buffer, nvtruncbuf() is responsible 1076 * for dirtying that buffer and zeroing out trailing bytes. 1077 * 1078 * WARNING! Assumes that the kernel interlocks size changes at the 1079 * vnode level. 1080 * 1081 * WARNING! Caller assumes responsibility for removing dead blocks 1082 * if INODE_RESIZED is set. 1083 */ 1084 static 1085 void 1086 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1087 { 1088 hammer2_key_t lbase; 1089 int nblksize; 1090 1091 LOCKSTART; 1092 hammer2_mtx_unlock(&ip->lock); 1093 if (ip->vp) { 1094 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1095 nvtruncbuf(ip->vp, nsize, 1096 nblksize, (int)nsize & (nblksize - 1), 1097 0); 1098 } 1099 hammer2_mtx_ex(&ip->lock); 1100 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1101 ip->osize = ip->meta.size; 1102 ip->meta.size = nsize; 1103 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1104 hammer2_inode_modify(ip); 1105 LOCKSTOP; 1106 } 1107 1108 /* 1109 * Extend the size of a file. The inode must not be locked. 1110 * 1111 * Even though the file size is changing, we do not have to set the 1112 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES 1113 * boundary. When this occurs a hammer2_inode_chain_sync() is required 1114 * to prepare the inode cluster's indirect block table, otherwise 1115 * async execution of the strategy code will implode on us. 1116 * 1117 * WARNING! Assumes that the kernel interlocks size changes at the 1118 * vnode level. 1119 * 1120 * WARNING! Caller assumes responsibility for transitioning out 1121 * of the inode DIRECTDATA mode if INODE_RESIZED is set. 1122 */ 1123 static 1124 void 1125 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1126 { 1127 hammer2_key_t lbase; 1128 hammer2_key_t osize; 1129 int oblksize; 1130 int nblksize; 1131 1132 LOCKSTART; 1133 1134 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1135 hammer2_inode_modify(ip); 1136 osize = ip->meta.size; 1137 ip->osize = osize; 1138 ip->meta.size = nsize; 1139 1140 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) { 1141 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1142 hammer2_inode_chain_sync(ip); 1143 } 1144 1145 hammer2_mtx_unlock(&ip->lock); 1146 if (ip->vp) { 1147 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL); 1148 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1149 nvextendbuf(ip->vp, 1150 osize, nsize, 1151 oblksize, nblksize, 1152 -1, -1, 0); 1153 } 1154 hammer2_mtx_ex(&ip->lock); 1155 1156 LOCKSTOP; 1157 } 1158 1159 static 1160 int 1161 hammer2_vop_nresolve(struct vop_nresolve_args *ap) 1162 { 1163 hammer2_xop_nresolve_t *xop; 1164 hammer2_inode_t *ip; 1165 hammer2_inode_t *dip; 1166 struct namecache *ncp; 1167 struct vnode *vp; 1168 int error; 1169 1170 LOCKSTART; 1171 dip = VTOI(ap->a_dvp); 1172 xop = hammer2_xop_alloc(dip, 0); 1173 1174 ncp = ap->a_nch->ncp; 1175 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1176 1177 /* 1178 * Note: In DragonFly the kernel handles '.' and '..'. 1179 */ 1180 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1181 hammer2_xop_start(&xop->head, hammer2_xop_nresolve); 1182 1183 error = hammer2_xop_collect(&xop->head, 0); 1184 if (error) { 1185 ip = NULL; 1186 } else { 1187 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1188 } 1189 hammer2_inode_unlock(dip); 1190 1191 /* 1192 * Acquire the related vnode 1193 * 1194 * NOTE: For error processing, only ENOENT resolves the namecache 1195 * entry to NULL, otherwise we just return the error and 1196 * leave the namecache unresolved. 1197 * 1198 * NOTE: multiple hammer2_inode structures can be aliased to the 1199 * same chain element, for example for hardlinks. This 1200 * use case does not 'reattach' inode associations that 1201 * might already exist, but always allocates a new one. 1202 * 1203 * WARNING: inode structure is locked exclusively via inode_get 1204 * but chain was locked shared. inode_unlock() 1205 * will handle it properly. 1206 */ 1207 if (ip) { 1208 vp = hammer2_igetv(ip, &error); 1209 if (error == 0) { 1210 vn_unlock(vp); 1211 cache_setvp(ap->a_nch, vp); 1212 } else if (error == ENOENT) { 1213 cache_setvp(ap->a_nch, NULL); 1214 } 1215 hammer2_inode_unlock(ip); 1216 1217 /* 1218 * The vp should not be released until after we've disposed 1219 * of our locks, because it might cause vop_inactive() to 1220 * be called. 1221 */ 1222 if (vp) 1223 vrele(vp); 1224 } else { 1225 error = ENOENT; 1226 cache_setvp(ap->a_nch, NULL); 1227 } 1228 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1229 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL, 1230 ("resolve error %d/%p ap %p\n", 1231 error, ap->a_nch->ncp->nc_vp, ap)); 1232 LOCKSTOP; 1233 1234 return error; 1235 } 1236 1237 static 1238 int 1239 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 1240 { 1241 hammer2_inode_t *dip; 1242 hammer2_tid_t inum; 1243 int error; 1244 1245 LOCKSTART; 1246 dip = VTOI(ap->a_dvp); 1247 inum = dip->meta.iparent; 1248 *ap->a_vpp = NULL; 1249 1250 if (inum) { 1251 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL, 1252 inum, ap->a_vpp); 1253 } else { 1254 error = ENOENT; 1255 } 1256 LOCKSTOP; 1257 return error; 1258 } 1259 1260 static 1261 int 1262 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap) 1263 { 1264 hammer2_inode_t *dip; 1265 hammer2_inode_t *nip; 1266 struct namecache *ncp; 1267 const uint8_t *name; 1268 size_t name_len; 1269 hammer2_tid_t inum; 1270 int error; 1271 1272 LOCKSTART; 1273 dip = VTOI(ap->a_dvp); 1274 if (dip->pmp->ronly) { 1275 LOCKSTOP; 1276 return (EROFS); 1277 } 1278 1279 ncp = ap->a_nch->ncp; 1280 name = ncp->nc_name; 1281 name_len = ncp->nc_nlen; 1282 1283 hammer2_pfs_memory_wait(dip->pmp); 1284 hammer2_trans_init(dip->pmp, 0); 1285 1286 inum = hammer2_trans_newinum(dip->pmp); 1287 1288 /* 1289 * Create the actual inode as a hidden file in the iroot, then 1290 * create the directory entry as a hardlink to it. The creation 1291 * of the actual inode sets its nlinks to 1 which is the value 1292 * we desire. 1293 */ 1294 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred, 1295 NULL, 0, inum, 1296 inum, 0, 0, 1297 0, &error); 1298 if (error == 0) { 1299 hammer2_inode_create(dip, dip, NULL, NULL, 1300 name, name_len, 0, 1301 nip->meta.inum, 1302 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type, 1303 0, &error); 1304 } 1305 1306 if (error) { 1307 KKASSERT(nip == NULL); 1308 *ap->a_vpp = NULL; 1309 } else { 1310 *ap->a_vpp = hammer2_igetv(nip, &error); 1311 hammer2_inode_unlock(nip); 1312 } 1313 1314 /* 1315 * Update dip's mtime 1316 */ 1317 if (error == 0) { 1318 uint64_t mtime; 1319 1320 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1321 hammer2_update_time(&mtime); 1322 hammer2_inode_modify(dip); 1323 dip->meta.mtime = mtime; 1324 hammer2_inode_unlock(dip); 1325 } 1326 1327 hammer2_trans_done(dip->pmp); 1328 1329 if (error == 0) { 1330 cache_setunresolved(ap->a_nch); 1331 cache_setvp(ap->a_nch, *ap->a_vpp); 1332 } 1333 LOCKSTOP; 1334 return error; 1335 } 1336 1337 static 1338 int 1339 hammer2_vop_open(struct vop_open_args *ap) 1340 { 1341 return vop_stdopen(ap); 1342 } 1343 1344 /* 1345 * hammer2_vop_advlock { vp, id, op, fl, flags } 1346 */ 1347 static 1348 int 1349 hammer2_vop_advlock(struct vop_advlock_args *ap) 1350 { 1351 hammer2_inode_t *ip = VTOI(ap->a_vp); 1352 hammer2_off_t size; 1353 1354 size = ip->meta.size; 1355 return (lf_advlock(ap, &ip->advlock, size)); 1356 } 1357 1358 static 1359 int 1360 hammer2_vop_close(struct vop_close_args *ap) 1361 { 1362 return vop_stdclose(ap); 1363 } 1364 1365 /* 1366 * hammer2_vop_nlink { nch, dvp, vp, cred } 1367 * 1368 * Create a hardlink from (vp) to {dvp, nch}. 1369 */ 1370 static 1371 int 1372 hammer2_vop_nlink(struct vop_nlink_args *ap) 1373 { 1374 hammer2_inode_t *tdip; /* target directory to create link in */ 1375 hammer2_inode_t *ip; /* inode we are hardlinking to */ 1376 struct namecache *ncp; 1377 const uint8_t *name; 1378 size_t name_len; 1379 int error; 1380 1381 if (ap->a_dvp->v_mount != ap->a_vp->v_mount) 1382 return(EXDEV); 1383 1384 LOCKSTART; 1385 tdip = VTOI(ap->a_dvp); 1386 if (tdip->pmp->ronly) { 1387 LOCKSTOP; 1388 return (EROFS); 1389 } 1390 1391 ncp = ap->a_nch->ncp; 1392 name = ncp->nc_name; 1393 name_len = ncp->nc_nlen; 1394 1395 /* 1396 * ip represents the file being hardlinked. The file could be a 1397 * normal file or a hardlink target if it has already been hardlinked. 1398 * (with the new semantics, it will almost always be a hardlink 1399 * target). 1400 * 1401 * Bump nlinks and potentially also create or move the hardlink 1402 * target in the parent directory common to (ip) and (tdip). The 1403 * consolidation code can modify ip->cluster. The returned cluster 1404 * is locked. 1405 */ 1406 ip = VTOI(ap->a_vp); 1407 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp)); 1408 hammer2_pfs_memory_wait(ip->pmp); 1409 hammer2_trans_init(ip->pmp, 0); 1410 1411 /* 1412 * Target should be an indexed inode or there's no way we will ever 1413 * be able to find it! 1414 */ 1415 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0); 1416 1417 error = 0; 1418 1419 /* 1420 * Can return NULL and error == EXDEV if the common parent 1421 * crosses a directory with the xlink flag set. 1422 */ 1423 hammer2_inode_lock(tdip, 0); 1424 hammer2_inode_lock(ip, 0); 1425 1426 /* 1427 * Create the hardlink target and bump nlinks. 1428 */ 1429 if (error == 0) { 1430 hammer2_inode_create(tdip, tdip, NULL, NULL, 1431 name, name_len, 0, 1432 ip->meta.inum, 1433 HAMMER2_OBJTYPE_HARDLINK, ip->meta.type, 1434 0, &error); 1435 hammer2_inode_modify(ip); 1436 ++ip->meta.nlinks; 1437 } 1438 if (error == 0) { 1439 /* 1440 * Update dip's mtime 1441 */ 1442 uint64_t mtime; 1443 1444 hammer2_update_time(&mtime); 1445 hammer2_inode_modify(tdip); 1446 tdip->meta.mtime = mtime; 1447 1448 cache_setunresolved(ap->a_nch); 1449 cache_setvp(ap->a_nch, ap->a_vp); 1450 } 1451 hammer2_inode_unlock(ip); 1452 hammer2_inode_unlock(tdip); 1453 1454 hammer2_trans_done(ip->pmp); 1455 1456 LOCKSTOP; 1457 return error; 1458 } 1459 1460 /* 1461 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap } 1462 * 1463 * The operating system has already ensured that the directory entry 1464 * does not exist and done all appropriate namespace locking. 1465 */ 1466 static 1467 int 1468 hammer2_vop_ncreate(struct vop_ncreate_args *ap) 1469 { 1470 hammer2_inode_t *dip; 1471 hammer2_inode_t *nip; 1472 struct namecache *ncp; 1473 const uint8_t *name; 1474 size_t name_len; 1475 hammer2_tid_t inum; 1476 int error; 1477 1478 LOCKSTART; 1479 dip = VTOI(ap->a_dvp); 1480 if (dip->pmp->ronly) { 1481 LOCKSTOP; 1482 return (EROFS); 1483 } 1484 1485 ncp = ap->a_nch->ncp; 1486 name = ncp->nc_name; 1487 name_len = ncp->nc_nlen; 1488 hammer2_pfs_memory_wait(dip->pmp); 1489 hammer2_trans_init(dip->pmp, 0); 1490 1491 inum = hammer2_trans_newinum(dip->pmp); 1492 1493 /* 1494 * Create the actual inode as a hidden file in the iroot, then 1495 * create the directory entry as a hardlink to it. The creation 1496 * of the actual inode sets its nlinks to 1 which is the value 1497 * we desire. 1498 */ 1499 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred, 1500 NULL, 0, inum, 1501 inum, 0, 0, 1502 0, &error); 1503 1504 if (error == 0) { 1505 hammer2_inode_create(dip, dip, NULL, NULL, 1506 name, name_len, 0, 1507 nip->meta.inum, 1508 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type, 1509 0, &error); 1510 } 1511 if (error) { 1512 KKASSERT(nip == NULL); 1513 *ap->a_vpp = NULL; 1514 } else { 1515 *ap->a_vpp = hammer2_igetv(nip, &error); 1516 hammer2_inode_unlock(nip); 1517 } 1518 1519 /* 1520 * Update dip's mtime 1521 */ 1522 if (error == 0) { 1523 uint64_t mtime; 1524 1525 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1526 hammer2_update_time(&mtime); 1527 hammer2_inode_modify(dip); 1528 dip->meta.mtime = mtime; 1529 hammer2_inode_unlock(dip); 1530 } 1531 1532 hammer2_trans_done(dip->pmp); 1533 1534 if (error == 0) { 1535 cache_setunresolved(ap->a_nch); 1536 cache_setvp(ap->a_nch, *ap->a_vpp); 1537 } 1538 LOCKSTOP; 1539 return error; 1540 } 1541 1542 /* 1543 * Make a device node (typically a fifo) 1544 */ 1545 static 1546 int 1547 hammer2_vop_nmknod(struct vop_nmknod_args *ap) 1548 { 1549 hammer2_inode_t *dip; 1550 hammer2_inode_t *nip; 1551 struct namecache *ncp; 1552 const uint8_t *name; 1553 size_t name_len; 1554 hammer2_tid_t inum; 1555 int error; 1556 1557 LOCKSTART; 1558 dip = VTOI(ap->a_dvp); 1559 if (dip->pmp->ronly) { 1560 LOCKSTOP; 1561 return (EROFS); 1562 } 1563 1564 ncp = ap->a_nch->ncp; 1565 name = ncp->nc_name; 1566 name_len = ncp->nc_nlen; 1567 hammer2_pfs_memory_wait(dip->pmp); 1568 hammer2_trans_init(dip->pmp, 0); 1569 1570 /* 1571 * The device node is entered as the directory entry itself and not 1572 * as a hardlink to an inode. Since one cannot obtain a 1573 * file handle on the filesystem entry representing the device, we 1574 * do not have to worry about indexing its inode. 1575 */ 1576 inum = hammer2_trans_newinum(dip->pmp); 1577 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred, 1578 NULL, 0, inum, 1579 inum, 0, 0, 1580 0, &error); 1581 if (error == 0) { 1582 hammer2_inode_create(dip, dip, NULL, NULL, 1583 name, name_len, 0, 1584 nip->meta.inum, 1585 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type, 1586 0, &error); 1587 } 1588 1589 1590 if (error) { 1591 KKASSERT(nip == NULL); 1592 *ap->a_vpp = NULL; 1593 } else { 1594 *ap->a_vpp = hammer2_igetv(nip, &error); 1595 hammer2_inode_unlock(nip); 1596 } 1597 1598 /* 1599 * Update dip's mtime 1600 */ 1601 if (error == 0) { 1602 uint64_t mtime; 1603 1604 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1605 hammer2_update_time(&mtime); 1606 hammer2_inode_modify(dip); 1607 dip->meta.mtime = mtime; 1608 hammer2_inode_unlock(dip); 1609 } 1610 1611 hammer2_trans_done(dip->pmp); 1612 1613 if (error == 0) { 1614 cache_setunresolved(ap->a_nch); 1615 cache_setvp(ap->a_nch, *ap->a_vpp); 1616 } 1617 LOCKSTOP; 1618 return error; 1619 } 1620 1621 /* 1622 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target } 1623 */ 1624 static 1625 int 1626 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap) 1627 { 1628 hammer2_inode_t *dip; 1629 hammer2_inode_t *nip; 1630 struct namecache *ncp; 1631 const uint8_t *name; 1632 size_t name_len; 1633 hammer2_tid_t inum; 1634 int error; 1635 1636 dip = VTOI(ap->a_dvp); 1637 if (dip->pmp->ronly) 1638 return (EROFS); 1639 1640 ncp = ap->a_nch->ncp; 1641 name = ncp->nc_name; 1642 name_len = ncp->nc_nlen; 1643 hammer2_pfs_memory_wait(dip->pmp); 1644 hammer2_trans_init(dip->pmp, 0); 1645 1646 ap->a_vap->va_type = VLNK; /* enforce type */ 1647 1648 /* 1649 * The softlink is entered into the directory itself and not 1650 * as a hardlink to an inode. Since one cannot obtain a 1651 * file handle on the softlink itself we do not have to worry 1652 * about indexing its inode. 1653 */ 1654 inum = hammer2_trans_newinum(dip->pmp); 1655 1656 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred, 1657 NULL, 0, inum, 1658 inum, 0, 0, 1659 0, &error); 1660 if (error == 0) { 1661 hammer2_inode_create(dip, dip, NULL, NULL, 1662 name, name_len, 0, 1663 nip->meta.inum, 1664 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type, 1665 0, &error); 1666 } 1667 1668 1669 if (error) { 1670 KKASSERT(nip == NULL); 1671 *ap->a_vpp = NULL; 1672 hammer2_trans_done(dip->pmp); 1673 return error; 1674 } 1675 *ap->a_vpp = hammer2_igetv(nip, &error); 1676 1677 /* 1678 * Build the softlink (~like file data) and finalize the namecache. 1679 */ 1680 if (error == 0) { 1681 size_t bytes; 1682 struct uio auio; 1683 struct iovec aiov; 1684 1685 bytes = strlen(ap->a_target); 1686 1687 hammer2_inode_unlock(nip); 1688 bzero(&auio, sizeof(auio)); 1689 bzero(&aiov, sizeof(aiov)); 1690 auio.uio_iov = &aiov; 1691 auio.uio_segflg = UIO_SYSSPACE; 1692 auio.uio_rw = UIO_WRITE; 1693 auio.uio_resid = bytes; 1694 auio.uio_iovcnt = 1; 1695 auio.uio_td = curthread; 1696 aiov.iov_base = ap->a_target; 1697 aiov.iov_len = bytes; 1698 error = hammer2_write_file(nip, &auio, IO_APPEND, 0); 1699 /* XXX handle error */ 1700 error = 0; 1701 } else { 1702 hammer2_inode_unlock(nip); 1703 } 1704 1705 /* 1706 * Update dip's mtime 1707 */ 1708 if (error == 0) { 1709 uint64_t mtime; 1710 1711 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1712 hammer2_update_time(&mtime); 1713 hammer2_inode_modify(dip); 1714 dip->meta.mtime = mtime; 1715 hammer2_inode_unlock(dip); 1716 } 1717 1718 hammer2_trans_done(dip->pmp); 1719 1720 /* 1721 * Finalize namecache 1722 */ 1723 if (error == 0) { 1724 cache_setunresolved(ap->a_nch); 1725 cache_setvp(ap->a_nch, *ap->a_vpp); 1726 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */ 1727 } 1728 return error; 1729 } 1730 1731 /* 1732 * hammer2_vop_nremove { nch, dvp, cred } 1733 */ 1734 static 1735 int 1736 hammer2_vop_nremove(struct vop_nremove_args *ap) 1737 { 1738 hammer2_xop_unlink_t *xop; 1739 hammer2_inode_t *dip; 1740 hammer2_inode_t *ip; 1741 struct namecache *ncp; 1742 int error; 1743 int isopen; 1744 1745 LOCKSTART; 1746 dip = VTOI(ap->a_dvp); 1747 if (dip->pmp->ronly) { 1748 LOCKSTOP; 1749 return(EROFS); 1750 } 1751 1752 ncp = ap->a_nch->ncp; 1753 1754 hammer2_pfs_memory_wait(dip->pmp); 1755 hammer2_trans_init(dip->pmp, 0); 1756 hammer2_inode_lock(dip, 0); 1757 1758 /* 1759 * The unlink XOP unlinks the path from the directory and 1760 * locates and returns the cluster associated with the real inode. 1761 * We have to handle nlinks here on the frontend. 1762 */ 1763 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1764 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1765 isopen = cache_isopen(ap->a_nch); 1766 xop->isdir = 0; 1767 xop->dopermanent = 0; 1768 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 1769 1770 /* 1771 * Collect the real inode and adjust nlinks, destroy the real 1772 * inode if nlinks transitions to 0 and it was the real inode 1773 * (else it has already been removed). 1774 */ 1775 error = hammer2_xop_collect(&xop->head, 0); 1776 hammer2_inode_unlock(dip); 1777 1778 if (error == 0) { 1779 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1780 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1781 if (ip) { 1782 hammer2_inode_unlink_finisher(ip, isopen); 1783 hammer2_inode_unlock(ip); 1784 } 1785 } else { 1786 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1787 } 1788 1789 /* 1790 * Update dip's mtime 1791 */ 1792 if (error == 0) { 1793 uint64_t mtime; 1794 1795 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1796 hammer2_update_time(&mtime); 1797 hammer2_inode_modify(dip); 1798 dip->meta.mtime = mtime; 1799 hammer2_inode_unlock(dip); 1800 } 1801 1802 hammer2_inode_run_sideq(dip->pmp); 1803 hammer2_trans_done(dip->pmp); 1804 if (error == 0) 1805 cache_unlink(ap->a_nch); 1806 LOCKSTOP; 1807 return (error); 1808 } 1809 1810 /* 1811 * hammer2_vop_nrmdir { nch, dvp, cred } 1812 */ 1813 static 1814 int 1815 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap) 1816 { 1817 hammer2_xop_unlink_t *xop; 1818 hammer2_inode_t *dip; 1819 hammer2_inode_t *ip; 1820 struct namecache *ncp; 1821 int isopen; 1822 int error; 1823 1824 LOCKSTART; 1825 dip = VTOI(ap->a_dvp); 1826 if (dip->pmp->ronly) { 1827 LOCKSTOP; 1828 return(EROFS); 1829 } 1830 1831 hammer2_pfs_memory_wait(dip->pmp); 1832 hammer2_trans_init(dip->pmp, 0); 1833 hammer2_inode_lock(dip, 0); 1834 1835 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1836 1837 ncp = ap->a_nch->ncp; 1838 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1839 isopen = cache_isopen(ap->a_nch); 1840 xop->isdir = 1; 1841 xop->dopermanent = 0; 1842 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 1843 1844 /* 1845 * Collect the real inode and adjust nlinks, destroy the real 1846 * inode if nlinks transitions to 0 and it was the real inode 1847 * (else it has already been removed). 1848 */ 1849 error = hammer2_xop_collect(&xop->head, 0); 1850 hammer2_inode_unlock(dip); 1851 1852 if (error == 0) { 1853 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1854 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1855 if (ip) { 1856 hammer2_inode_unlink_finisher(ip, isopen); 1857 hammer2_inode_unlock(ip); 1858 } 1859 } else { 1860 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1861 } 1862 1863 /* 1864 * Update dip's mtime 1865 */ 1866 if (error == 0) { 1867 uint64_t mtime; 1868 1869 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1870 hammer2_update_time(&mtime); 1871 hammer2_inode_modify(dip); 1872 dip->meta.mtime = mtime; 1873 hammer2_inode_unlock(dip); 1874 } 1875 1876 hammer2_inode_run_sideq(dip->pmp); 1877 hammer2_trans_done(dip->pmp); 1878 if (error == 0) 1879 cache_unlink(ap->a_nch); 1880 LOCKSTOP; 1881 return (error); 1882 } 1883 1884 /* 1885 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred } 1886 */ 1887 static 1888 int 1889 hammer2_vop_nrename(struct vop_nrename_args *ap) 1890 { 1891 struct namecache *fncp; 1892 struct namecache *tncp; 1893 hammer2_inode_t *fdip; 1894 hammer2_inode_t *tdip; 1895 hammer2_inode_t *ip; 1896 const uint8_t *fname; 1897 size_t fname_len; 1898 const uint8_t *tname; 1899 size_t tname_len; 1900 int error; 1901 int tnch_error; 1902 int update_tdip; 1903 int update_fdip; 1904 hammer2_key_t tlhc; 1905 1906 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount) 1907 return(EXDEV); 1908 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount) 1909 return(EXDEV); 1910 1911 fdip = VTOI(ap->a_fdvp); /* source directory */ 1912 tdip = VTOI(ap->a_tdvp); /* target directory */ 1913 1914 if (fdip->pmp->ronly) 1915 return(EROFS); 1916 1917 LOCKSTART; 1918 fncp = ap->a_fnch->ncp; /* entry name in source */ 1919 fname = fncp->nc_name; 1920 fname_len = fncp->nc_nlen; 1921 1922 tncp = ap->a_tnch->ncp; /* entry name in target */ 1923 tname = tncp->nc_name; 1924 tname_len = tncp->nc_nlen; 1925 1926 hammer2_pfs_memory_wait(tdip->pmp); 1927 hammer2_trans_init(tdip->pmp, 0); 1928 1929 update_tdip = 0; 1930 update_fdip = 0; 1931 1932 /* 1933 * ip is the inode being renamed. If this is a hardlink then 1934 * ip represents the actual file and not the hardlink marker. 1935 */ 1936 ip = VTOI(fncp->nc_vp); 1937 1938 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0); 1939 1940 /* 1941 * Can return NULL and error == EXDEV if the common parent 1942 * crosses a directory with the xlink flag set. 1943 */ 1944 error = 0; 1945 hammer2_inode_lock(fdip, 0); 1946 hammer2_inode_lock(tdip, 0); 1947 hammer2_inode_ref(ip); /* extra ref */ 1948 1949 hammer2_inode_lock(ip, 0); 1950 1951 /* 1952 * Delete the target namespace. 1953 */ 1954 { 1955 hammer2_xop_unlink_t *xop2; 1956 hammer2_inode_t *tip; 1957 int isopen; 1958 1959 /* 1960 * The unlink XOP unlinks the path from the directory and 1961 * locates and returns the cluster associated with the real 1962 * inode. We have to handle nlinks here on the frontend. 1963 */ 1964 xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 1965 hammer2_xop_setname(&xop2->head, tname, tname_len); 1966 isopen = cache_isopen(ap->a_tnch); 1967 xop2->isdir = -1; 1968 xop2->dopermanent = 0; 1969 hammer2_xop_start(&xop2->head, hammer2_xop_unlink); 1970 1971 /* 1972 * Collect the real inode and adjust nlinks, destroy the real 1973 * inode if nlinks transitions to 0 and it was the real inode 1974 * (else it has already been removed). 1975 */ 1976 tnch_error = hammer2_xop_collect(&xop2->head, 0); 1977 /* hammer2_inode_unlock(tdip); */ 1978 1979 if (tnch_error == 0) { 1980 tip = hammer2_inode_get(tdip->pmp, NULL, 1981 &xop2->head.cluster, -1); 1982 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP); 1983 if (tip) { 1984 hammer2_inode_unlink_finisher(tip, isopen); 1985 hammer2_inode_unlock(tip); 1986 } 1987 } else { 1988 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP); 1989 } 1990 /* hammer2_inode_lock(tdip, 0); */ 1991 1992 if (tnch_error && tnch_error != ENOENT) { 1993 error = tnch_error; 1994 goto done2; 1995 } 1996 update_tdip = 1; 1997 } 1998 1999 /* 2000 * Resolve the collision space for (tdip, tname, tname_len) 2001 * 2002 * tdip must be held exclusively locked to prevent races. 2003 */ 2004 { 2005 hammer2_xop_scanlhc_t *sxop; 2006 hammer2_tid_t lhcbase; 2007 2008 tlhc = hammer2_dirhash(tname, tname_len); 2009 lhcbase = tlhc; 2010 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 2011 sxop->lhc = tlhc; 2012 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc); 2013 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 2014 if (tlhc != sxop->head.cluster.focus->bref.key) 2015 break; 2016 ++tlhc; 2017 } 2018 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 2019 2020 if (error) { 2021 if (error != ENOENT) 2022 goto done2; 2023 ++tlhc; 2024 error = 0; 2025 } 2026 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) { 2027 error = ENOSPC; 2028 goto done2; 2029 } 2030 } 2031 2032 /* 2033 * Everything is setup, do the rename. 2034 * 2035 * We have to synchronize ip->meta to the underlying operation. 2036 * 2037 * NOTE: To avoid deadlocks we cannot lock (ip) while we are 2038 * unlinking elements from their directories. Locking 2039 * the nlinks field does not lock the whole inode. 2040 */ 2041 /* hammer2_inode_lock(ip, 0); */ 2042 if (error == 0) { 2043 hammer2_xop_nrename_t *xop4; 2044 2045 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 2046 xop4->lhc = tlhc; 2047 xop4->ip_key = ip->meta.name_key; 2048 hammer2_xop_setip2(&xop4->head, ip); 2049 hammer2_xop_setip3(&xop4->head, tdip); 2050 hammer2_xop_setname(&xop4->head, fname, fname_len); 2051 hammer2_xop_setname2(&xop4->head, tname, tname_len); 2052 hammer2_xop_start(&xop4->head, hammer2_xop_nrename); 2053 2054 error = hammer2_xop_collect(&xop4->head, 0); 2055 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP); 2056 2057 if (error == ENOENT) 2058 error = 0; 2059 if (error == 0 && 2060 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 2061 hammer2_inode_modify(ip); 2062 ip->meta.name_len = tname_len; 2063 ip->meta.name_key = tlhc; 2064 2065 } 2066 update_fdip = 1; 2067 update_fdip = 1; 2068 } 2069 2070 done2: 2071 /* 2072 * Update directory mtimes to represent the something changed. 2073 */ 2074 if (update_fdip || update_tdip) { 2075 uint64_t mtime; 2076 2077 hammer2_update_time(&mtime); 2078 if (update_fdip) { 2079 hammer2_inode_modify(fdip); 2080 fdip->meta.mtime = mtime; 2081 } 2082 if (update_tdip) { 2083 hammer2_inode_modify(tdip); 2084 tdip->meta.mtime = mtime; 2085 } 2086 } 2087 hammer2_inode_unlock(ip); 2088 hammer2_inode_unlock(tdip); 2089 hammer2_inode_unlock(fdip); 2090 hammer2_inode_drop(ip); 2091 hammer2_inode_run_sideq(fdip->pmp); 2092 2093 hammer2_trans_done(tdip->pmp); 2094 2095 /* 2096 * Issue the namecache update after unlocking all the internal 2097 * hammer structures, otherwise we might deadlock. 2098 */ 2099 if (tnch_error == 0) { 2100 cache_unlink(ap->a_tnch); 2101 cache_setunresolved(ap->a_tnch); 2102 } 2103 if (error == 0) 2104 cache_rename(ap->a_fnch, ap->a_tnch); 2105 2106 LOCKSTOP; 2107 return (error); 2108 } 2109 2110 /* 2111 * hammer2_vop_ioctl { vp, command, data, fflag, cred } 2112 */ 2113 static 2114 int 2115 hammer2_vop_ioctl(struct vop_ioctl_args *ap) 2116 { 2117 hammer2_inode_t *ip; 2118 int error; 2119 2120 LOCKSTART; 2121 ip = VTOI(ap->a_vp); 2122 2123 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data, 2124 ap->a_fflag, ap->a_cred); 2125 LOCKSTOP; 2126 return (error); 2127 } 2128 2129 static 2130 int 2131 hammer2_vop_mountctl(struct vop_mountctl_args *ap) 2132 { 2133 struct mount *mp; 2134 hammer2_pfs_t *pmp; 2135 int rc; 2136 2137 LOCKSTART; 2138 switch (ap->a_op) { 2139 case (MOUNTCTL_SET_EXPORT): 2140 mp = ap->a_head.a_ops->head.vv_mount; 2141 pmp = MPTOPMP(mp); 2142 2143 if (ap->a_ctllen != sizeof(struct export_args)) 2144 rc = (EINVAL); 2145 else 2146 rc = vfs_export(mp, &pmp->export, 2147 (const struct export_args *)ap->a_ctl); 2148 break; 2149 default: 2150 rc = vop_stdmountctl(ap); 2151 break; 2152 } 2153 LOCKSTOP; 2154 return (rc); 2155 } 2156 2157 /* 2158 * KQFILTER 2159 */ 2160 static void filt_hammer2detach(struct knote *kn); 2161 static int filt_hammer2read(struct knote *kn, long hint); 2162 static int filt_hammer2write(struct knote *kn, long hint); 2163 static int filt_hammer2vnode(struct knote *kn, long hint); 2164 2165 static struct filterops hammer2read_filtops = 2166 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2167 NULL, filt_hammer2detach, filt_hammer2read }; 2168 static struct filterops hammer2write_filtops = 2169 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2170 NULL, filt_hammer2detach, filt_hammer2write }; 2171 static struct filterops hammer2vnode_filtops = 2172 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2173 NULL, filt_hammer2detach, filt_hammer2vnode }; 2174 2175 static 2176 int 2177 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap) 2178 { 2179 struct vnode *vp = ap->a_vp; 2180 struct knote *kn = ap->a_kn; 2181 2182 switch (kn->kn_filter) { 2183 case EVFILT_READ: 2184 kn->kn_fop = &hammer2read_filtops; 2185 break; 2186 case EVFILT_WRITE: 2187 kn->kn_fop = &hammer2write_filtops; 2188 break; 2189 case EVFILT_VNODE: 2190 kn->kn_fop = &hammer2vnode_filtops; 2191 break; 2192 default: 2193 return (EOPNOTSUPP); 2194 } 2195 2196 kn->kn_hook = (caddr_t)vp; 2197 2198 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2199 2200 return(0); 2201 } 2202 2203 static void 2204 filt_hammer2detach(struct knote *kn) 2205 { 2206 struct vnode *vp = (void *)kn->kn_hook; 2207 2208 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2209 } 2210 2211 static int 2212 filt_hammer2read(struct knote *kn, long hint) 2213 { 2214 struct vnode *vp = (void *)kn->kn_hook; 2215 hammer2_inode_t *ip = VTOI(vp); 2216 off_t off; 2217 2218 if (hint == NOTE_REVOKE) { 2219 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2220 return(1); 2221 } 2222 off = ip->meta.size - kn->kn_fp->f_offset; 2223 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 2224 if (kn->kn_sfflags & NOTE_OLDAPI) 2225 return(1); 2226 return (kn->kn_data != 0); 2227 } 2228 2229 2230 static int 2231 filt_hammer2write(struct knote *kn, long hint) 2232 { 2233 if (hint == NOTE_REVOKE) 2234 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2235 kn->kn_data = 0; 2236 return (1); 2237 } 2238 2239 static int 2240 filt_hammer2vnode(struct knote *kn, long hint) 2241 { 2242 if (kn->kn_sfflags & hint) 2243 kn->kn_fflags |= hint; 2244 if (hint == NOTE_REVOKE) { 2245 kn->kn_flags |= (EV_EOF | EV_NODATA); 2246 return (1); 2247 } 2248 return (kn->kn_fflags != 0); 2249 } 2250 2251 /* 2252 * FIFO VOPS 2253 */ 2254 static 2255 int 2256 hammer2_vop_markatime(struct vop_markatime_args *ap) 2257 { 2258 hammer2_inode_t *ip; 2259 struct vnode *vp; 2260 2261 vp = ap->a_vp; 2262 ip = VTOI(vp); 2263 2264 if (ip->pmp->ronly) 2265 return(EROFS); 2266 return(0); 2267 } 2268 2269 static 2270 int 2271 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap) 2272 { 2273 int error; 2274 2275 error = VOCALL(&fifo_vnode_vops, &ap->a_head); 2276 if (error) 2277 error = hammer2_vop_kqfilter(ap); 2278 return(error); 2279 } 2280 2281 /* 2282 * VOPS vector 2283 */ 2284 struct vop_ops hammer2_vnode_vops = { 2285 .vop_default = vop_defaultop, 2286 .vop_fsync = hammer2_vop_fsync, 2287 .vop_getpages = vop_stdgetpages, 2288 .vop_putpages = vop_stdputpages, 2289 .vop_access = hammer2_vop_access, 2290 .vop_advlock = hammer2_vop_advlock, 2291 .vop_close = hammer2_vop_close, 2292 .vop_nlink = hammer2_vop_nlink, 2293 .vop_ncreate = hammer2_vop_ncreate, 2294 .vop_nsymlink = hammer2_vop_nsymlink, 2295 .vop_nremove = hammer2_vop_nremove, 2296 .vop_nrmdir = hammer2_vop_nrmdir, 2297 .vop_nrename = hammer2_vop_nrename, 2298 .vop_getattr = hammer2_vop_getattr, 2299 .vop_setattr = hammer2_vop_setattr, 2300 .vop_readdir = hammer2_vop_readdir, 2301 .vop_readlink = hammer2_vop_readlink, 2302 .vop_getpages = vop_stdgetpages, 2303 .vop_putpages = vop_stdputpages, 2304 .vop_read = hammer2_vop_read, 2305 .vop_write = hammer2_vop_write, 2306 .vop_open = hammer2_vop_open, 2307 .vop_inactive = hammer2_vop_inactive, 2308 .vop_reclaim = hammer2_vop_reclaim, 2309 .vop_nresolve = hammer2_vop_nresolve, 2310 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot, 2311 .vop_nmkdir = hammer2_vop_nmkdir, 2312 .vop_nmknod = hammer2_vop_nmknod, 2313 .vop_ioctl = hammer2_vop_ioctl, 2314 .vop_mountctl = hammer2_vop_mountctl, 2315 .vop_bmap = hammer2_vop_bmap, 2316 .vop_strategy = hammer2_vop_strategy, 2317 .vop_kqfilter = hammer2_vop_kqfilter 2318 }; 2319 2320 struct vop_ops hammer2_spec_vops = { 2321 .vop_default = vop_defaultop, 2322 .vop_fsync = hammer2_vop_fsync, 2323 .vop_read = vop_stdnoread, 2324 .vop_write = vop_stdnowrite, 2325 .vop_access = hammer2_vop_access, 2326 .vop_close = hammer2_vop_close, 2327 .vop_markatime = hammer2_vop_markatime, 2328 .vop_getattr = hammer2_vop_getattr, 2329 .vop_inactive = hammer2_vop_inactive, 2330 .vop_reclaim = hammer2_vop_reclaim, 2331 .vop_setattr = hammer2_vop_setattr 2332 }; 2333 2334 struct vop_ops hammer2_fifo_vops = { 2335 .vop_default = fifo_vnoperate, 2336 .vop_fsync = hammer2_vop_fsync, 2337 #if 0 2338 .vop_read = hammer2_vop_fiforead, 2339 .vop_write = hammer2_vop_fifowrite, 2340 #endif 2341 .vop_access = hammer2_vop_access, 2342 #if 0 2343 .vop_close = hammer2_vop_fifoclose, 2344 #endif 2345 .vop_markatime = hammer2_vop_markatime, 2346 .vop_getattr = hammer2_vop_getattr, 2347 .vop_inactive = hammer2_vop_inactive, 2348 .vop_reclaim = hammer2_vop_reclaim, 2349 .vop_setattr = hammer2_vop_setattr, 2350 .vop_kqfilter = hammer2_vop_fifokqfilter 2351 }; 2352 2353