1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 /* 37 * Kernel Filesystem interface 38 * 39 * NOTE! local ipdata pointers must be reloaded on any modifying operation 40 * to the inode as its underlying chain may have changed. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/fcntl.h> 47 #include <sys/buf.h> 48 #include <sys/proc.h> 49 #include <sys/namei.h> 50 #include <sys/mount.h> 51 #include <sys/vnode.h> 52 #include <sys/mountctl.h> 53 #include <sys/dirent.h> 54 #include <sys/uio.h> 55 #include <sys/objcache.h> 56 #include <sys/event.h> 57 #include <sys/file.h> 58 #include <vfs/fifofs/fifo.h> 59 60 #include "hammer2.h" 61 62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, 63 int seqcount); 64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 65 int ioflag, int seqcount); 66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize); 67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize); 68 69 struct objcache *cache_xops; 70 71 static __inline 72 void 73 hammer2_knote(struct vnode *vp, int flags) 74 { 75 if (flags) 76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 77 } 78 79 /* 80 * Last reference to a vnode is going away but it is still cached. 81 */ 82 static 83 int 84 hammer2_vop_inactive(struct vop_inactive_args *ap) 85 { 86 hammer2_inode_t *ip; 87 struct vnode *vp; 88 89 LOCKSTART; 90 vp = ap->a_vp; 91 ip = VTOI(vp); 92 93 /* 94 * Degenerate case 95 */ 96 if (ip == NULL) { 97 vrecycle(vp); 98 LOCKSTOP; 99 return (0); 100 } 101 102 /* 103 * Check for deleted inodes and recycle immediately on the last 104 * release. Be sure to destroy any left-over buffer cache buffers 105 * so we do not waste time trying to flush them. 106 * 107 * Note that deleting the file block chains under the inode chain 108 * would just be a waste of energy, so don't do it. 109 * 110 * WARNING: nvtruncbuf() can only be safely called without the inode 111 * lock held due to the way our write thread works. 112 */ 113 if (ip->flags & HAMMER2_INODE_ISUNLINKED) { 114 hammer2_key_t lbase; 115 int nblksize; 116 117 /* 118 * Detect updates to the embedded data which may be 119 * synchronized by the strategy code. Simply mark the 120 * inode modified so it gets picked up by our normal flush. 121 */ 122 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL); 123 nvtruncbuf(vp, 0, nblksize, 0, 0); 124 vrecycle(vp); 125 } 126 LOCKSTOP; 127 return (0); 128 } 129 130 /* 131 * Reclaim a vnode so that it can be reused; after the inode is 132 * disassociated, the filesystem must manage it alone. 133 */ 134 static 135 int 136 hammer2_vop_reclaim(struct vop_reclaim_args *ap) 137 { 138 hammer2_inode_t *ip; 139 hammer2_pfs_t *pmp; 140 struct vnode *vp; 141 142 LOCKSTART; 143 vp = ap->a_vp; 144 ip = VTOI(vp); 145 if (ip == NULL) { 146 LOCKSTOP; 147 return(0); 148 } 149 pmp = ip->pmp; 150 151 /* 152 * The final close of a deleted file or directory marks it for 153 * destruction. The DELETED flag allows the flusher to shortcut 154 * any modified blocks still unflushed (that is, just ignore them). 155 * 156 * HAMMER2 usually does not try to optimize the freemap by returning 157 * deleted blocks to it as it does not usually know how many snapshots 158 * might be referencing portions of the file/dir. 159 */ 160 vp->v_data = NULL; 161 ip->vp = NULL; 162 163 /* 164 * NOTE! We do not attempt to flush chains here, flushing is 165 * really fragile and could also deadlock. 166 */ 167 vclrisdirty(vp); 168 169 /* 170 * This occurs if the inode was unlinked while open. Reclamation of 171 * these inodes requires processing we cannot safely do here so add 172 * the inode to the sideq in that situation. 173 * 174 * A modified inode may require chain synchronization which will no 175 * longer be driven by a sync or fsync without the vnode, also use 176 * the sideq for that. 177 * 178 * A reclaim can occur at any time so we cannot safely start a 179 * transaction to handle reclamation of unlinked files. Instead, 180 * the ip is left with a reference and placed on a linked list and 181 * handled later on. 182 */ 183 184 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED | 185 HAMMER2_INODE_MODIFIED | 186 HAMMER2_INODE_RESIZED)) && 187 (ip->flags & HAMMER2_INODE_ISDELETED) == 0) { 188 hammer2_inode_sideq_t *ipul; 189 190 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO); 191 ipul->ip = ip; 192 193 hammer2_spin_ex(&pmp->list_spin); 194 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) { 195 /* ref -> sideq */ 196 atomic_set_int(&ip->flags, HAMMER2_INODE_ONSIDEQ); 197 TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry); 198 hammer2_spin_unex(&pmp->list_spin); 199 } else { 200 hammer2_spin_unex(&pmp->list_spin); 201 kfree(ipul, pmp->minode); 202 hammer2_inode_drop(ip); /* vp ref */ 203 } 204 /* retain ref from vp for ipul */ 205 } else { 206 hammer2_inode_drop(ip); /* vp ref */ 207 } 208 209 /* 210 * XXX handle background sync when ip dirty, kernel will no longer 211 * notify us regarding this inode because there is no longer a 212 * vnode attached to it. 213 */ 214 215 LOCKSTOP; 216 return (0); 217 } 218 219 static 220 int 221 hammer2_vop_fsync(struct vop_fsync_args *ap) 222 { 223 hammer2_inode_t *ip; 224 struct vnode *vp; 225 226 LOCKSTART; 227 vp = ap->a_vp; 228 ip = VTOI(vp); 229 230 #if 0 231 /* XXX can't do this yet */ 232 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_ISFLUSH); 233 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 234 #endif 235 hammer2_trans_init(ip->pmp, 0); 236 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 237 238 /* 239 * Calling chain_flush here creates a lot of duplicative 240 * COW operations due to non-optimal vnode ordering. 241 * 242 * Only do it for an actual fsync() syscall. The other forms 243 * which call this function will eventually call chain_flush 244 * on the volume root as a catch-all, which is far more optimal. 245 */ 246 hammer2_inode_lock(ip, 0); 247 if (ip->flags & HAMMER2_INODE_MODIFIED) 248 hammer2_inode_chain_sync(ip); 249 hammer2_inode_unlock(ip); 250 hammer2_trans_done(ip->pmp); 251 252 LOCKSTOP; 253 return (0); 254 } 255 256 static 257 int 258 hammer2_vop_access(struct vop_access_args *ap) 259 { 260 hammer2_inode_t *ip = VTOI(ap->a_vp); 261 uid_t uid; 262 gid_t gid; 263 int error; 264 265 LOCKSTART; 266 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 267 uid = hammer2_to_unix_xid(&ip->meta.uid); 268 gid = hammer2_to_unix_xid(&ip->meta.gid); 269 error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags); 270 hammer2_inode_unlock(ip); 271 272 LOCKSTOP; 273 return (error); 274 } 275 276 static 277 int 278 hammer2_vop_getattr(struct vop_getattr_args *ap) 279 { 280 hammer2_pfs_t *pmp; 281 hammer2_inode_t *ip; 282 struct vnode *vp; 283 struct vattr *vap; 284 hammer2_chain_t *chain; 285 int i; 286 287 LOCKSTART; 288 vp = ap->a_vp; 289 vap = ap->a_vap; 290 291 ip = VTOI(vp); 292 pmp = ip->pmp; 293 294 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 295 296 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 297 vap->va_fileid = ip->meta.inum; 298 vap->va_mode = ip->meta.mode; 299 vap->va_nlink = ip->meta.nlinks; 300 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 301 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 302 vap->va_rmajor = 0; 303 vap->va_rminor = 0; 304 vap->va_size = ip->meta.size; /* protected by shared lock */ 305 vap->va_blocksize = HAMMER2_PBUFSIZE; 306 vap->va_flags = ip->meta.uflags; 307 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime); 308 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime); 309 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime); 310 vap->va_gen = 1; 311 vap->va_bytes = 0; 312 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) { 313 /* 314 * Can't really calculate directory use sans the files under 315 * it, just assume one block for now. 316 */ 317 vap->va_bytes += HAMMER2_INODE_BYTES; 318 } else { 319 for (i = 0; i < ip->cluster.nchains; ++i) { 320 if ((chain = ip->cluster.array[i].chain) != NULL) { 321 if (vap->va_bytes < chain->bref.data_count) 322 vap->va_bytes = chain->bref.data_count; 323 } 324 } 325 } 326 vap->va_type = hammer2_get_vtype(ip->meta.type); 327 vap->va_filerev = 0; 328 vap->va_uid_uuid = ip->meta.uid; 329 vap->va_gid_uuid = ip->meta.gid; 330 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 331 VA_FSID_UUID_VALID; 332 333 hammer2_inode_unlock(ip); 334 335 LOCKSTOP; 336 return (0); 337 } 338 339 static 340 int 341 hammer2_vop_setattr(struct vop_setattr_args *ap) 342 { 343 hammer2_inode_t *ip; 344 struct vnode *vp; 345 struct vattr *vap; 346 int error; 347 int kflags = 0; 348 uint64_t ctime; 349 350 LOCKSTART; 351 vp = ap->a_vp; 352 vap = ap->a_vap; 353 hammer2_update_time(&ctime); 354 355 ip = VTOI(vp); 356 357 if (ip->pmp->ronly) { 358 LOCKSTOP; 359 return(EROFS); 360 } 361 362 hammer2_pfs_memory_wait(ip->pmp); 363 hammer2_trans_init(ip->pmp, 0); 364 hammer2_inode_lock(ip, 0); 365 error = 0; 366 367 if (vap->va_flags != VNOVAL) { 368 uint32_t flags; 369 370 flags = ip->meta.uflags; 371 error = vop_helper_setattr_flags(&flags, vap->va_flags, 372 hammer2_to_unix_xid(&ip->meta.uid), 373 ap->a_cred); 374 if (error == 0) { 375 if (ip->meta.uflags != flags) { 376 hammer2_inode_modify(ip); 377 ip->meta.uflags = flags; 378 ip->meta.ctime = ctime; 379 kflags |= NOTE_ATTRIB; 380 } 381 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 382 error = 0; 383 goto done; 384 } 385 } 386 goto done; 387 } 388 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 389 error = EPERM; 390 goto done; 391 } 392 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { 393 mode_t cur_mode = ip->meta.mode; 394 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 395 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 396 uuid_t uuid_uid; 397 uuid_t uuid_gid; 398 399 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 400 ap->a_cred, 401 &cur_uid, &cur_gid, &cur_mode); 402 if (error == 0) { 403 hammer2_guid_to_uuid(&uuid_uid, cur_uid); 404 hammer2_guid_to_uuid(&uuid_gid, cur_gid); 405 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) || 406 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) || 407 ip->meta.mode != cur_mode 408 ) { 409 hammer2_inode_modify(ip); 410 ip->meta.uid = uuid_uid; 411 ip->meta.gid = uuid_gid; 412 ip->meta.mode = cur_mode; 413 ip->meta.ctime = ctime; 414 } 415 kflags |= NOTE_ATTRIB; 416 } 417 } 418 419 /* 420 * Resize the file 421 */ 422 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) { 423 switch(vp->v_type) { 424 case VREG: 425 if (vap->va_size == ip->meta.size) 426 break; 427 if (vap->va_size < ip->meta.size) { 428 hammer2_mtx_ex(&ip->truncate_lock); 429 hammer2_truncate_file(ip, vap->va_size); 430 hammer2_mtx_unlock(&ip->truncate_lock); 431 } else { 432 hammer2_extend_file(ip, vap->va_size); 433 } 434 hammer2_inode_modify(ip); 435 ip->meta.mtime = ctime; 436 break; 437 default: 438 error = EINVAL; 439 goto done; 440 } 441 } 442 #if 0 443 /* atime not supported */ 444 if (vap->va_atime.tv_sec != VNOVAL) { 445 hammer2_inode_modify(ip); 446 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime); 447 kflags |= NOTE_ATTRIB; 448 } 449 #endif 450 if (vap->va_mode != (mode_t)VNOVAL) { 451 mode_t cur_mode = ip->meta.mode; 452 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 453 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 454 455 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 456 cur_uid, cur_gid, &cur_mode); 457 if (error == 0 && ip->meta.mode != cur_mode) { 458 hammer2_inode_modify(ip); 459 ip->meta.mode = cur_mode; 460 ip->meta.ctime = ctime; 461 kflags |= NOTE_ATTRIB; 462 } 463 } 464 465 if (vap->va_mtime.tv_sec != VNOVAL) { 466 hammer2_inode_modify(ip); 467 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime); 468 kflags |= NOTE_ATTRIB; 469 } 470 471 done: 472 /* 473 * If a truncation occurred we must call inode_fsync() now in order 474 * to trim the related data chains, otherwise a later expansion can 475 * cause havoc. 476 * 477 * If an extend occured that changed the DIRECTDATA state, we must 478 * call inode_fsync now in order to prepare the inode's indirect 479 * block table. 480 */ 481 if (ip->flags & HAMMER2_INODE_RESIZED) 482 hammer2_inode_chain_sync(ip); 483 484 /* 485 * Cleanup. 486 */ 487 hammer2_inode_unlock(ip); 488 hammer2_trans_done(ip->pmp); 489 hammer2_knote(ip->vp, kflags); 490 491 LOCKSTOP; 492 return (error); 493 } 494 495 static 496 int 497 hammer2_vop_readdir(struct vop_readdir_args *ap) 498 { 499 hammer2_xop_readdir_t *xop; 500 hammer2_blockref_t bref; 501 hammer2_inode_t *ip; 502 hammer2_tid_t inum; 503 hammer2_key_t lkey; 504 struct uio *uio; 505 off_t *cookies; 506 off_t saveoff; 507 int cookie_index; 508 int ncookies; 509 int error; 510 int eofflag; 511 int dtype; 512 int r; 513 514 LOCKSTART; 515 ip = VTOI(ap->a_vp); 516 uio = ap->a_uio; 517 saveoff = uio->uio_offset; 518 eofflag = 0; 519 error = 0; 520 521 /* 522 * Setup cookies directory entry cookies if requested 523 */ 524 if (ap->a_ncookies) { 525 ncookies = uio->uio_resid / 16 + 1; 526 if (ncookies > 1024) 527 ncookies = 1024; 528 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK); 529 } else { 530 ncookies = -1; 531 cookies = NULL; 532 } 533 cookie_index = 0; 534 535 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 536 537 /* 538 * Handle artificial entries. To ensure that only positive 64 bit 539 * quantities are returned to userland we always strip off bit 63. 540 * The hash code is designed such that codes 0x0000-0x7FFF are not 541 * used, allowing us to use these codes for articial entries. 542 * 543 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not 544 * allow '..' to cross the mount point into (e.g.) the super-root. 545 */ 546 if (saveoff == 0) { 547 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 548 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, "."); 549 if (r) 550 goto done; 551 if (cookies) 552 cookies[cookie_index] = saveoff; 553 ++saveoff; 554 ++cookie_index; 555 if (cookie_index == ncookies) 556 goto done; 557 } 558 559 if (saveoff == 1) { 560 /* 561 * Be careful with lockorder when accessing ".." 562 * 563 * (ip is the current dir. xip is the parent dir). 564 */ 565 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 566 if (ip != ip->pmp->iroot) 567 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK; 568 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, ".."); 569 if (r) 570 goto done; 571 if (cookies) 572 cookies[cookie_index] = saveoff; 573 ++saveoff; 574 ++cookie_index; 575 if (cookie_index == ncookies) 576 goto done; 577 } 578 579 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE; 580 if (hammer2_debug & 0x0020) 581 kprintf("readdir: lkey %016jx\n", lkey); 582 if (error) 583 goto done; 584 585 /* 586 * Use XOP for cluster scan. 587 * 588 * parent is the inode cluster, already locked for us. Don't 589 * double lock shared locks as this will screw up upgrades. 590 */ 591 xop = hammer2_xop_alloc(ip, 0); 592 xop->lkey = lkey; 593 hammer2_xop_start(&xop->head, hammer2_xop_readdir); 594 595 for (;;) { 596 const hammer2_inode_data_t *ripdata; 597 598 error = hammer2_xop_collect(&xop->head, 0); 599 if (error) 600 break; 601 if (cookie_index == ncookies) 602 break; 603 if (hammer2_debug & 0x0020) 604 kprintf("cluster chain %p %p\n", 605 xop->head.cluster.focus, 606 (xop->head.cluster.focus ? 607 xop->head.cluster.focus->data : (void *)-1)); 608 ripdata = &hammer2_cluster_rdata(&xop->head.cluster)->ipdata; 609 hammer2_cluster_bref(&xop->head.cluster, &bref); 610 if (bref.type == HAMMER2_BREF_TYPE_INODE) { 611 dtype = hammer2_get_dtype(ripdata); 612 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 613 r = vop_write_dirent(&error, uio, 614 ripdata->meta.inum & 615 HAMMER2_DIRHASH_USERMSK, 616 dtype, 617 ripdata->meta.name_len, 618 ripdata->filename); 619 if (r) 620 break; 621 if (cookies) 622 cookies[cookie_index] = saveoff; 623 ++cookie_index; 624 } else { 625 /* XXX chain error */ 626 kprintf("bad chain type readdir %d\n", bref.type); 627 } 628 } 629 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 630 if (error == ENOENT) { 631 error = 0; 632 eofflag = 1; 633 saveoff = (hammer2_key_t)-1; 634 } else { 635 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 636 } 637 done: 638 hammer2_inode_unlock(ip); 639 if (ap->a_eofflag) 640 *ap->a_eofflag = eofflag; 641 if (hammer2_debug & 0x0020) 642 kprintf("readdir: done at %016jx\n", saveoff); 643 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE; 644 if (error && cookie_index == 0) { 645 if (cookies) { 646 kfree(cookies, M_TEMP); 647 *ap->a_ncookies = 0; 648 *ap->a_cookies = NULL; 649 } 650 } else { 651 if (cookies) { 652 *ap->a_ncookies = cookie_index; 653 *ap->a_cookies = cookies; 654 } 655 } 656 LOCKSTOP; 657 return (error); 658 } 659 660 /* 661 * hammer2_vop_readlink { vp, uio, cred } 662 */ 663 static 664 int 665 hammer2_vop_readlink(struct vop_readlink_args *ap) 666 { 667 struct vnode *vp; 668 hammer2_inode_t *ip; 669 int error; 670 671 vp = ap->a_vp; 672 if (vp->v_type != VLNK) 673 return (EINVAL); 674 ip = VTOI(vp); 675 676 error = hammer2_read_file(ip, ap->a_uio, 0); 677 return (error); 678 } 679 680 static 681 int 682 hammer2_vop_read(struct vop_read_args *ap) 683 { 684 struct vnode *vp; 685 hammer2_inode_t *ip; 686 struct uio *uio; 687 int error; 688 int seqcount; 689 int bigread; 690 691 /* 692 * Read operations supported on this vnode? 693 */ 694 vp = ap->a_vp; 695 if (vp->v_type != VREG) 696 return (EINVAL); 697 698 /* 699 * Misc 700 */ 701 ip = VTOI(vp); 702 uio = ap->a_uio; 703 error = 0; 704 705 seqcount = ap->a_ioflag >> 16; 706 bigread = (uio->uio_resid > 100 * 1024 * 1024); 707 708 error = hammer2_read_file(ip, uio, seqcount); 709 return (error); 710 } 711 712 static 713 int 714 hammer2_vop_write(struct vop_write_args *ap) 715 { 716 hammer2_inode_t *ip; 717 thread_t td; 718 struct vnode *vp; 719 struct uio *uio; 720 int error; 721 int seqcount; 722 723 /* 724 * Read operations supported on this vnode? 725 */ 726 vp = ap->a_vp; 727 if (vp->v_type != VREG) 728 return (EINVAL); 729 730 /* 731 * Misc 732 */ 733 ip = VTOI(vp); 734 uio = ap->a_uio; 735 error = 0; 736 if (ip->pmp->ronly) { 737 return (EROFS); 738 } 739 740 seqcount = ap->a_ioflag >> 16; 741 742 /* 743 * Check resource limit 744 */ 745 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc && 746 uio->uio_offset + uio->uio_resid > 747 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 748 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); 749 return (EFBIG); 750 } 751 752 /* 753 * The transaction interlocks against flush initiations 754 * (note: but will run concurrently with the actual flush). 755 * 756 * To avoid deadlocking against the VM system, we must flag any 757 * transaction related to the buffer cache or other direct 758 * VM page manipulation. 759 */ 760 if (uio->uio_segflg == UIO_NOCOPY) 761 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE); 762 else 763 hammer2_trans_init(ip->pmp, 0); 764 error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount); 765 hammer2_trans_done(ip->pmp); 766 767 return (error); 768 } 769 770 /* 771 * Perform read operations on a file or symlink given an UNLOCKED 772 * inode and uio. 773 * 774 * The passed ip is not locked. 775 */ 776 static 777 int 778 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount) 779 { 780 hammer2_off_t size; 781 struct buf *bp; 782 int error; 783 784 error = 0; 785 786 /* 787 * UIO read loop. 788 * 789 * WARNING! Assumes that the kernel interlocks size changes at the 790 * vnode level. 791 */ 792 hammer2_mtx_sh(&ip->lock); 793 hammer2_mtx_sh(&ip->truncate_lock); 794 size = ip->meta.size; 795 hammer2_mtx_unlock(&ip->lock); 796 797 while (uio->uio_resid > 0 && uio->uio_offset < size) { 798 hammer2_key_t lbase; 799 hammer2_key_t leof; 800 int lblksize; 801 int loff; 802 int n; 803 804 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 805 &lbase, &leof); 806 807 #if 1 808 error = cluster_read(ip->vp, leof, lbase, lblksize, 809 uio->uio_resid, seqcount * MAXBSIZE, 810 &bp); 811 #else 812 if (uio->uio_segflg == UIO_NOCOPY) { 813 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0); 814 if (bp->b_flags & B_CACHE) { 815 int i; 816 int j = 0; 817 if (bp->b_xio.xio_npages != 16) 818 kprintf("NPAGES BAD\n"); 819 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 820 vm_page_t m; 821 m = bp->b_xio.xio_pages[i]; 822 if (m == NULL || m->valid == 0) { 823 kprintf("bp %016jx %016jx pg %d inv", 824 lbase, leof, i); 825 if (m) 826 kprintf("m->object %p/%p", m->object, ip->vp->v_object); 827 kprintf("\n"); 828 j = 1; 829 } 830 } 831 if (j) 832 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error); 833 } 834 bqrelse(bp); 835 } 836 error = bread(ip->vp, lbase, lblksize, &bp); 837 #endif 838 if (error) { 839 brelse(bp); 840 break; 841 } 842 loff = (int)(uio->uio_offset - lbase); 843 n = lblksize - loff; 844 if (n > uio->uio_resid) 845 n = uio->uio_resid; 846 if (n > size - uio->uio_offset) 847 n = (int)(size - uio->uio_offset); 848 bp->b_flags |= B_AGE; 849 uiomovebp(bp, (char *)bp->b_data + loff, n, uio); 850 bqrelse(bp); 851 } 852 hammer2_mtx_unlock(&ip->truncate_lock); 853 854 return (error); 855 } 856 857 /* 858 * Write to the file represented by the inode via the logical buffer cache. 859 * The inode may represent a regular file or a symlink. 860 * 861 * The inode must not be locked. 862 */ 863 static 864 int 865 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 866 int ioflag, int seqcount) 867 { 868 hammer2_key_t old_eof; 869 hammer2_key_t new_eof; 870 struct buf *bp; 871 int kflags; 872 int error; 873 int modified; 874 875 /* 876 * Setup if append 877 * 878 * WARNING! Assumes that the kernel interlocks size changes at the 879 * vnode level. 880 */ 881 hammer2_mtx_ex(&ip->lock); 882 hammer2_mtx_sh(&ip->truncate_lock); 883 if (ioflag & IO_APPEND) 884 uio->uio_offset = ip->meta.size; 885 old_eof = ip->meta.size; 886 887 /* 888 * Extend the file if necessary. If the write fails at some point 889 * we will truncate it back down to cover as much as we were able 890 * to write. 891 * 892 * Doing this now makes it easier to calculate buffer sizes in 893 * the loop. 894 */ 895 kflags = 0; 896 error = 0; 897 modified = 0; 898 899 if (uio->uio_offset + uio->uio_resid > old_eof) { 900 new_eof = uio->uio_offset + uio->uio_resid; 901 modified = 1; 902 hammer2_extend_file(ip, new_eof); 903 kflags |= NOTE_EXTEND; 904 } else { 905 new_eof = old_eof; 906 } 907 hammer2_mtx_unlock(&ip->lock); 908 909 /* 910 * UIO write loop 911 */ 912 while (uio->uio_resid > 0) { 913 hammer2_key_t lbase; 914 int trivial; 915 int endofblk; 916 int lblksize; 917 int loff; 918 int n; 919 920 /* 921 * Don't allow the buffer build to blow out the buffer 922 * cache. 923 */ 924 if ((ioflag & IO_RECURSE) == 0) 925 bwillwrite(HAMMER2_PBUFSIZE); 926 927 /* 928 * This nominally tells us how much we can cluster and 929 * what the logical buffer size needs to be. Currently 930 * we don't try to cluster the write and just handle one 931 * block at a time. 932 */ 933 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 934 &lbase, NULL); 935 loff = (int)(uio->uio_offset - lbase); 936 937 KKASSERT(lblksize <= 65536); 938 939 /* 940 * Calculate bytes to copy this transfer and whether the 941 * copy completely covers the buffer or not. 942 */ 943 trivial = 0; 944 n = lblksize - loff; 945 if (n > uio->uio_resid) { 946 n = uio->uio_resid; 947 if (loff == lbase && uio->uio_offset + n == new_eof) 948 trivial = 1; 949 endofblk = 0; 950 } else { 951 if (loff == 0) 952 trivial = 1; 953 endofblk = 1; 954 } 955 if (lbase >= new_eof) 956 trivial = 1; 957 958 /* 959 * Get the buffer 960 */ 961 if (uio->uio_segflg == UIO_NOCOPY) { 962 /* 963 * Issuing a write with the same data backing the 964 * buffer. Instantiate the buffer to collect the 965 * backing vm pages, then read-in any missing bits. 966 * 967 * This case is used by vop_stdputpages(). 968 */ 969 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0); 970 if ((bp->b_flags & B_CACHE) == 0) { 971 bqrelse(bp); 972 error = bread(ip->vp, lbase, lblksize, &bp); 973 } 974 } else if (trivial) { 975 /* 976 * Even though we are entirely overwriting the buffer 977 * we may still have to zero it out to avoid a 978 * mmap/write visibility issue. 979 */ 980 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0); 981 if ((bp->b_flags & B_CACHE) == 0) 982 vfs_bio_clrbuf(bp); 983 } else { 984 /* 985 * Partial overwrite, read in any missing bits then 986 * replace the portion being written. 987 * 988 * (The strategy code will detect zero-fill physical 989 * blocks for this case). 990 */ 991 error = bread(ip->vp, lbase, lblksize, &bp); 992 if (error == 0) 993 bheavy(bp); 994 } 995 996 if (error) { 997 brelse(bp); 998 break; 999 } 1000 1001 /* 1002 * Ok, copy the data in 1003 */ 1004 error = uiomovebp(bp, bp->b_data + loff, n, uio); 1005 kflags |= NOTE_WRITE; 1006 modified = 1; 1007 if (error) { 1008 brelse(bp); 1009 break; 1010 } 1011 1012 /* 1013 * WARNING: Pageout daemon will issue UIO_NOCOPY writes 1014 * with IO_SYNC or IO_ASYNC set. These writes 1015 * must be handled as the pageout daemon expects. 1016 * 1017 * NOTE! H2 relies on cluster_write() here because it 1018 * cannot preallocate disk blocks at the logical 1019 * level due to not knowing what the compression 1020 * size will be at this time. 1021 * 1022 * We must use cluster_write() here and we depend 1023 * on the write-behind feature to flush buffers 1024 * appropriately. If we let the buffer daemons do 1025 * it the block allocations will be all over the 1026 * map. 1027 */ 1028 if (ioflag & IO_SYNC) { 1029 bwrite(bp); 1030 } else if ((ioflag & IO_DIRECT) && endofblk) { 1031 bawrite(bp); 1032 } else if (ioflag & IO_ASYNC) { 1033 bawrite(bp); 1034 } else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) { 1035 bdwrite(bp); 1036 } else { 1037 #if 1 1038 bp->b_flags |= B_CLUSTEROK; 1039 cluster_write(bp, new_eof, lblksize, seqcount); 1040 #else 1041 bp->b_flags |= B_CLUSTEROK; 1042 bdwrite(bp); 1043 #endif 1044 } 1045 } 1046 1047 /* 1048 * Cleanup. If we extended the file EOF but failed to write through 1049 * the entire write is a failure and we have to back-up. 1050 */ 1051 if (error && new_eof != old_eof) { 1052 hammer2_mtx_unlock(&ip->truncate_lock); 1053 hammer2_mtx_ex(&ip->lock); 1054 hammer2_mtx_ex(&ip->truncate_lock); 1055 hammer2_truncate_file(ip, old_eof); 1056 if (ip->flags & HAMMER2_INODE_MODIFIED) 1057 hammer2_inode_chain_sync(ip); 1058 hammer2_mtx_unlock(&ip->lock); 1059 } else if (modified) { 1060 hammer2_mtx_ex(&ip->lock); 1061 hammer2_inode_modify(ip); 1062 hammer2_update_time(&ip->meta.mtime); 1063 if (ip->flags & HAMMER2_INODE_MODIFIED) 1064 hammer2_inode_chain_sync(ip); 1065 hammer2_mtx_unlock(&ip->lock); 1066 hammer2_knote(ip->vp, kflags); 1067 } 1068 hammer2_trans_assert_strategy(ip->pmp); 1069 hammer2_mtx_unlock(&ip->truncate_lock); 1070 1071 return error; 1072 } 1073 1074 /* 1075 * Truncate the size of a file. The inode must not be locked. 1076 * 1077 * We must unconditionally set HAMMER2_INODE_RESIZED to properly 1078 * ensure that any on-media data beyond the new file EOF has been destroyed. 1079 * 1080 * WARNING: nvtruncbuf() can only be safely called without the inode lock 1081 * held due to the way our write thread works. If the truncation 1082 * occurs in the middle of a buffer, nvtruncbuf() is responsible 1083 * for dirtying that buffer and zeroing out trailing bytes. 1084 * 1085 * WARNING! Assumes that the kernel interlocks size changes at the 1086 * vnode level. 1087 * 1088 * WARNING! Caller assumes responsibility for removing dead blocks 1089 * if INODE_RESIZED is set. 1090 */ 1091 static 1092 void 1093 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1094 { 1095 hammer2_key_t lbase; 1096 int nblksize; 1097 1098 LOCKSTART; 1099 hammer2_mtx_unlock(&ip->lock); 1100 if (ip->vp) { 1101 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1102 nvtruncbuf(ip->vp, nsize, 1103 nblksize, (int)nsize & (nblksize - 1), 1104 0); 1105 } 1106 hammer2_mtx_ex(&ip->lock); 1107 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1108 ip->osize = ip->meta.size; 1109 ip->meta.size = nsize; 1110 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1111 hammer2_inode_modify(ip); 1112 LOCKSTOP; 1113 } 1114 1115 /* 1116 * Extend the size of a file. The inode must not be locked. 1117 * 1118 * Even though the file size is changing, we do not have to set the 1119 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES 1120 * boundary. When this occurs a hammer2_inode_chain_sync() is required 1121 * to prepare the inode cluster's indirect block table, otherwise 1122 * async execution of the strategy code will implode on us. 1123 * 1124 * WARNING! Assumes that the kernel interlocks size changes at the 1125 * vnode level. 1126 * 1127 * WARNING! Caller assumes responsibility for transitioning out 1128 * of the inode DIRECTDATA mode if INODE_RESIZED is set. 1129 */ 1130 static 1131 void 1132 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1133 { 1134 hammer2_key_t lbase; 1135 hammer2_key_t osize; 1136 int oblksize; 1137 int nblksize; 1138 1139 LOCKSTART; 1140 1141 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1142 hammer2_inode_modify(ip); 1143 osize = ip->meta.size; 1144 ip->osize = osize; 1145 ip->meta.size = nsize; 1146 1147 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) { 1148 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1149 hammer2_inode_chain_sync(ip); 1150 } 1151 1152 hammer2_mtx_unlock(&ip->lock); 1153 if (ip->vp) { 1154 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL); 1155 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1156 nvextendbuf(ip->vp, 1157 osize, nsize, 1158 oblksize, nblksize, 1159 -1, -1, 0); 1160 } 1161 hammer2_mtx_ex(&ip->lock); 1162 1163 LOCKSTOP; 1164 } 1165 1166 static 1167 int 1168 hammer2_vop_nresolve(struct vop_nresolve_args *ap) 1169 { 1170 hammer2_xop_nresolve_t *xop; 1171 hammer2_inode_t *ip; 1172 hammer2_inode_t *dip; 1173 struct namecache *ncp; 1174 struct vnode *vp; 1175 int error; 1176 1177 LOCKSTART; 1178 dip = VTOI(ap->a_dvp); 1179 xop = hammer2_xop_alloc(dip, 0); 1180 1181 ncp = ap->a_nch->ncp; 1182 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1183 1184 /* 1185 * Note: In DragonFly the kernel handles '.' and '..'. 1186 */ 1187 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1188 hammer2_xop_start(&xop->head, hammer2_xop_nresolve); 1189 1190 error = hammer2_xop_collect(&xop->head, 0); 1191 if (error) { 1192 ip = NULL; 1193 } else { 1194 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1195 } 1196 hammer2_inode_unlock(dip); 1197 1198 /* 1199 * Acquire the related vnode 1200 * 1201 * NOTE: For error processing, only ENOENT resolves the namecache 1202 * entry to NULL, otherwise we just return the error and 1203 * leave the namecache unresolved. 1204 * 1205 * NOTE: multiple hammer2_inode structures can be aliased to the 1206 * same chain element, for example for hardlinks. This 1207 * use case does not 'reattach' inode associations that 1208 * might already exist, but always allocates a new one. 1209 * 1210 * WARNING: inode structure is locked exclusively via inode_get 1211 * but chain was locked shared. inode_unlock() 1212 * will handle it properly. 1213 */ 1214 if (ip) { 1215 vp = hammer2_igetv(ip, &error); 1216 if (error == 0) { 1217 vn_unlock(vp); 1218 cache_setvp(ap->a_nch, vp); 1219 } else if (error == ENOENT) { 1220 cache_setvp(ap->a_nch, NULL); 1221 } 1222 hammer2_inode_unlock(ip); 1223 1224 /* 1225 * The vp should not be released until after we've disposed 1226 * of our locks, because it might cause vop_inactive() to 1227 * be called. 1228 */ 1229 if (vp) 1230 vrele(vp); 1231 } else { 1232 error = ENOENT; 1233 cache_setvp(ap->a_nch, NULL); 1234 } 1235 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1236 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL, 1237 ("resolve error %d/%p ap %p\n", 1238 error, ap->a_nch->ncp->nc_vp, ap)); 1239 LOCKSTOP; 1240 1241 return error; 1242 } 1243 1244 static 1245 int 1246 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 1247 { 1248 hammer2_inode_t *dip; 1249 hammer2_tid_t inum; 1250 int error; 1251 1252 LOCKSTART; 1253 dip = VTOI(ap->a_dvp); 1254 inum = dip->meta.iparent; 1255 *ap->a_vpp = NULL; 1256 1257 if (inum) { 1258 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL, 1259 inum, ap->a_vpp); 1260 } else { 1261 error = ENOENT; 1262 } 1263 LOCKSTOP; 1264 return error; 1265 } 1266 1267 static 1268 int 1269 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap) 1270 { 1271 hammer2_inode_t *dip; 1272 hammer2_inode_t *nip; 1273 struct namecache *ncp; 1274 const uint8_t *name; 1275 size_t name_len; 1276 hammer2_tid_t inum; 1277 int error; 1278 1279 LOCKSTART; 1280 dip = VTOI(ap->a_dvp); 1281 if (dip->pmp->ronly) { 1282 LOCKSTOP; 1283 return (EROFS); 1284 } 1285 1286 ncp = ap->a_nch->ncp; 1287 name = ncp->nc_name; 1288 name_len = ncp->nc_nlen; 1289 1290 hammer2_pfs_memory_wait(dip->pmp); 1291 hammer2_trans_init(dip->pmp, 0); 1292 1293 inum = hammer2_trans_newinum(dip->pmp); 1294 1295 /* 1296 * Create the actual inode as a hidden file in the iroot, then 1297 * create the directory entry as a hardlink to it. The creation 1298 * of the actual inode sets its nlinks to 1 which is the value 1299 * we desire. 1300 */ 1301 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred, 1302 NULL, 0, inum, 1303 inum, 0, 0, 1304 0, &error); 1305 if (error == 0) { 1306 hammer2_inode_create(dip, dip, NULL, NULL, 1307 name, name_len, 0, 1308 nip->meta.inum, 1309 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type, 1310 0, &error); 1311 } 1312 1313 if (error) { 1314 KKASSERT(nip == NULL); 1315 *ap->a_vpp = NULL; 1316 } else { 1317 *ap->a_vpp = hammer2_igetv(nip, &error); 1318 hammer2_inode_unlock(nip); 1319 } 1320 1321 /* 1322 * Update dip's mtime 1323 */ 1324 if (error == 0) { 1325 uint64_t mtime; 1326 1327 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1328 hammer2_update_time(&mtime); 1329 hammer2_inode_modify(dip); 1330 dip->meta.mtime = mtime; 1331 hammer2_inode_unlock(dip); 1332 } 1333 1334 hammer2_trans_done(dip->pmp); 1335 1336 if (error == 0) { 1337 cache_setunresolved(ap->a_nch); 1338 cache_setvp(ap->a_nch, *ap->a_vpp); 1339 } 1340 LOCKSTOP; 1341 return error; 1342 } 1343 1344 static 1345 int 1346 hammer2_vop_open(struct vop_open_args *ap) 1347 { 1348 return vop_stdopen(ap); 1349 } 1350 1351 /* 1352 * hammer2_vop_advlock { vp, id, op, fl, flags } 1353 */ 1354 static 1355 int 1356 hammer2_vop_advlock(struct vop_advlock_args *ap) 1357 { 1358 hammer2_inode_t *ip = VTOI(ap->a_vp); 1359 hammer2_off_t size; 1360 1361 size = ip->meta.size; 1362 return (lf_advlock(ap, &ip->advlock, size)); 1363 } 1364 1365 static 1366 int 1367 hammer2_vop_close(struct vop_close_args *ap) 1368 { 1369 return vop_stdclose(ap); 1370 } 1371 1372 /* 1373 * hammer2_vop_nlink { nch, dvp, vp, cred } 1374 * 1375 * Create a hardlink from (vp) to {dvp, nch}. 1376 */ 1377 static 1378 int 1379 hammer2_vop_nlink(struct vop_nlink_args *ap) 1380 { 1381 hammer2_inode_t *tdip; /* target directory to create link in */ 1382 hammer2_inode_t *ip; /* inode we are hardlinking to */ 1383 struct namecache *ncp; 1384 const uint8_t *name; 1385 size_t name_len; 1386 int error; 1387 1388 if (ap->a_dvp->v_mount != ap->a_vp->v_mount) 1389 return(EXDEV); 1390 1391 LOCKSTART; 1392 tdip = VTOI(ap->a_dvp); 1393 if (tdip->pmp->ronly) { 1394 LOCKSTOP; 1395 return (EROFS); 1396 } 1397 1398 ncp = ap->a_nch->ncp; 1399 name = ncp->nc_name; 1400 name_len = ncp->nc_nlen; 1401 1402 /* 1403 * ip represents the file being hardlinked. The file could be a 1404 * normal file or a hardlink target if it has already been hardlinked. 1405 * (with the new semantics, it will almost always be a hardlink 1406 * target). 1407 * 1408 * Bump nlinks and potentially also create or move the hardlink 1409 * target in the parent directory common to (ip) and (tdip). The 1410 * consolidation code can modify ip->cluster. The returned cluster 1411 * is locked. 1412 */ 1413 ip = VTOI(ap->a_vp); 1414 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp)); 1415 hammer2_pfs_memory_wait(ip->pmp); 1416 hammer2_trans_init(ip->pmp, 0); 1417 1418 /* 1419 * Target should be an indexed inode or there's no way we will ever 1420 * be able to find it! 1421 */ 1422 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0); 1423 1424 error = 0; 1425 1426 /* 1427 * Can return NULL and error == EXDEV if the common parent 1428 * crosses a directory with the xlink flag set. 1429 */ 1430 hammer2_inode_lock(tdip, 0); 1431 hammer2_inode_lock(ip, 0); 1432 1433 /* 1434 * Create the hardlink target and bump nlinks. 1435 */ 1436 if (error == 0) { 1437 hammer2_inode_create(tdip, tdip, NULL, NULL, 1438 name, name_len, 0, 1439 ip->meta.inum, 1440 HAMMER2_OBJTYPE_HARDLINK, ip->meta.type, 1441 0, &error); 1442 hammer2_inode_modify(ip); 1443 ++ip->meta.nlinks; 1444 } 1445 if (error == 0) { 1446 /* 1447 * Update dip's mtime 1448 */ 1449 uint64_t mtime; 1450 1451 hammer2_update_time(&mtime); 1452 hammer2_inode_modify(tdip); 1453 tdip->meta.mtime = mtime; 1454 1455 cache_setunresolved(ap->a_nch); 1456 cache_setvp(ap->a_nch, ap->a_vp); 1457 } 1458 hammer2_inode_unlock(ip); 1459 hammer2_inode_unlock(tdip); 1460 1461 hammer2_trans_done(ip->pmp); 1462 1463 LOCKSTOP; 1464 return error; 1465 } 1466 1467 /* 1468 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap } 1469 * 1470 * The operating system has already ensured that the directory entry 1471 * does not exist and done all appropriate namespace locking. 1472 */ 1473 static 1474 int 1475 hammer2_vop_ncreate(struct vop_ncreate_args *ap) 1476 { 1477 hammer2_inode_t *dip; 1478 hammer2_inode_t *nip; 1479 struct namecache *ncp; 1480 const uint8_t *name; 1481 size_t name_len; 1482 hammer2_tid_t inum; 1483 int error; 1484 1485 LOCKSTART; 1486 dip = VTOI(ap->a_dvp); 1487 if (dip->pmp->ronly) { 1488 LOCKSTOP; 1489 return (EROFS); 1490 } 1491 1492 ncp = ap->a_nch->ncp; 1493 name = ncp->nc_name; 1494 name_len = ncp->nc_nlen; 1495 hammer2_pfs_memory_wait(dip->pmp); 1496 hammer2_trans_init(dip->pmp, 0); 1497 1498 inum = hammer2_trans_newinum(dip->pmp); 1499 1500 /* 1501 * Create the actual inode as a hidden file in the iroot, then 1502 * create the directory entry as a hardlink to it. The creation 1503 * of the actual inode sets its nlinks to 1 which is the value 1504 * we desire. 1505 */ 1506 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred, 1507 NULL, 0, inum, 1508 inum, 0, 0, 1509 0, &error); 1510 1511 if (error == 0) { 1512 hammer2_inode_create(dip, dip, NULL, NULL, 1513 name, name_len, 0, 1514 nip->meta.inum, 1515 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type, 1516 0, &error); 1517 } 1518 if (error) { 1519 KKASSERT(nip == NULL); 1520 *ap->a_vpp = NULL; 1521 } else { 1522 *ap->a_vpp = hammer2_igetv(nip, &error); 1523 hammer2_inode_unlock(nip); 1524 } 1525 1526 /* 1527 * Update dip's mtime 1528 */ 1529 if (error == 0) { 1530 uint64_t mtime; 1531 1532 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1533 hammer2_update_time(&mtime); 1534 hammer2_inode_modify(dip); 1535 dip->meta.mtime = mtime; 1536 hammer2_inode_unlock(dip); 1537 } 1538 1539 hammer2_trans_done(dip->pmp); 1540 1541 if (error == 0) { 1542 cache_setunresolved(ap->a_nch); 1543 cache_setvp(ap->a_nch, *ap->a_vpp); 1544 } 1545 LOCKSTOP; 1546 return error; 1547 } 1548 1549 /* 1550 * Make a device node (typically a fifo) 1551 */ 1552 static 1553 int 1554 hammer2_vop_nmknod(struct vop_nmknod_args *ap) 1555 { 1556 hammer2_inode_t *dip; 1557 hammer2_inode_t *nip; 1558 struct namecache *ncp; 1559 const uint8_t *name; 1560 size_t name_len; 1561 hammer2_tid_t inum; 1562 int error; 1563 1564 LOCKSTART; 1565 dip = VTOI(ap->a_dvp); 1566 if (dip->pmp->ronly) { 1567 LOCKSTOP; 1568 return (EROFS); 1569 } 1570 1571 ncp = ap->a_nch->ncp; 1572 name = ncp->nc_name; 1573 name_len = ncp->nc_nlen; 1574 hammer2_pfs_memory_wait(dip->pmp); 1575 hammer2_trans_init(dip->pmp, 0); 1576 1577 /* 1578 * The device node is entered as the directory entry itself and not 1579 * as a hardlink to an inode. Since one cannot obtain a 1580 * file handle on the filesystem entry representing the device, we 1581 * do not have to worry about indexing its inode. 1582 */ 1583 inum = hammer2_trans_newinum(dip->pmp); 1584 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred, 1585 NULL, 0, inum, 1586 inum, 0, 0, 1587 0, &error); 1588 if (error == 0) { 1589 hammer2_inode_create(dip, dip, NULL, NULL, 1590 name, name_len, 0, 1591 nip->meta.inum, 1592 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type, 1593 0, &error); 1594 } 1595 1596 1597 if (error) { 1598 KKASSERT(nip == NULL); 1599 *ap->a_vpp = NULL; 1600 } else { 1601 *ap->a_vpp = hammer2_igetv(nip, &error); 1602 hammer2_inode_unlock(nip); 1603 } 1604 1605 /* 1606 * Update dip's mtime 1607 */ 1608 if (error == 0) { 1609 uint64_t mtime; 1610 1611 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1612 hammer2_update_time(&mtime); 1613 hammer2_inode_modify(dip); 1614 dip->meta.mtime = mtime; 1615 hammer2_inode_unlock(dip); 1616 } 1617 1618 hammer2_trans_done(dip->pmp); 1619 1620 if (error == 0) { 1621 cache_setunresolved(ap->a_nch); 1622 cache_setvp(ap->a_nch, *ap->a_vpp); 1623 } 1624 LOCKSTOP; 1625 return error; 1626 } 1627 1628 /* 1629 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target } 1630 */ 1631 static 1632 int 1633 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap) 1634 { 1635 hammer2_inode_t *dip; 1636 hammer2_inode_t *nip; 1637 struct namecache *ncp; 1638 const uint8_t *name; 1639 size_t name_len; 1640 hammer2_tid_t inum; 1641 int error; 1642 1643 dip = VTOI(ap->a_dvp); 1644 if (dip->pmp->ronly) 1645 return (EROFS); 1646 1647 ncp = ap->a_nch->ncp; 1648 name = ncp->nc_name; 1649 name_len = ncp->nc_nlen; 1650 hammer2_pfs_memory_wait(dip->pmp); 1651 hammer2_trans_init(dip->pmp, 0); 1652 1653 ap->a_vap->va_type = VLNK; /* enforce type */ 1654 1655 /* 1656 * The softlink is entered into the directory itself and not 1657 * as a hardlink to an inode. Since one cannot obtain a 1658 * file handle on the softlink itself we do not have to worry 1659 * about indexing its inode. 1660 */ 1661 inum = hammer2_trans_newinum(dip->pmp); 1662 1663 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred, 1664 NULL, 0, inum, 1665 inum, 0, 0, 1666 0, &error); 1667 if (error == 0) { 1668 hammer2_inode_create(dip, dip, NULL, NULL, 1669 name, name_len, 0, 1670 nip->meta.inum, 1671 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type, 1672 0, &error); 1673 } 1674 1675 1676 if (error) { 1677 KKASSERT(nip == NULL); 1678 *ap->a_vpp = NULL; 1679 hammer2_trans_done(dip->pmp); 1680 return error; 1681 } 1682 *ap->a_vpp = hammer2_igetv(nip, &error); 1683 1684 /* 1685 * Build the softlink (~like file data) and finalize the namecache. 1686 */ 1687 if (error == 0) { 1688 size_t bytes; 1689 struct uio auio; 1690 struct iovec aiov; 1691 1692 bytes = strlen(ap->a_target); 1693 1694 hammer2_inode_unlock(nip); 1695 bzero(&auio, sizeof(auio)); 1696 bzero(&aiov, sizeof(aiov)); 1697 auio.uio_iov = &aiov; 1698 auio.uio_segflg = UIO_SYSSPACE; 1699 auio.uio_rw = UIO_WRITE; 1700 auio.uio_resid = bytes; 1701 auio.uio_iovcnt = 1; 1702 auio.uio_td = curthread; 1703 aiov.iov_base = ap->a_target; 1704 aiov.iov_len = bytes; 1705 error = hammer2_write_file(nip, &auio, IO_APPEND, 0); 1706 /* XXX handle error */ 1707 error = 0; 1708 } else { 1709 hammer2_inode_unlock(nip); 1710 } 1711 1712 /* 1713 * Update dip's mtime 1714 */ 1715 if (error == 0) { 1716 uint64_t mtime; 1717 1718 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1719 hammer2_update_time(&mtime); 1720 hammer2_inode_modify(dip); 1721 dip->meta.mtime = mtime; 1722 hammer2_inode_unlock(dip); 1723 } 1724 1725 hammer2_trans_done(dip->pmp); 1726 1727 /* 1728 * Finalize namecache 1729 */ 1730 if (error == 0) { 1731 cache_setunresolved(ap->a_nch); 1732 cache_setvp(ap->a_nch, *ap->a_vpp); 1733 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */ 1734 } 1735 return error; 1736 } 1737 1738 /* 1739 * hammer2_vop_nremove { nch, dvp, cred } 1740 */ 1741 static 1742 int 1743 hammer2_vop_nremove(struct vop_nremove_args *ap) 1744 { 1745 hammer2_xop_unlink_t *xop; 1746 hammer2_inode_t *dip; 1747 hammer2_inode_t *ip; 1748 struct namecache *ncp; 1749 int error; 1750 int isopen; 1751 1752 LOCKSTART; 1753 dip = VTOI(ap->a_dvp); 1754 if (dip->pmp->ronly) { 1755 LOCKSTOP; 1756 return(EROFS); 1757 } 1758 1759 ncp = ap->a_nch->ncp; 1760 1761 hammer2_pfs_memory_wait(dip->pmp); 1762 hammer2_trans_init(dip->pmp, 0); 1763 hammer2_inode_lock(dip, 0); 1764 1765 /* 1766 * The unlink XOP unlinks the path from the directory and 1767 * locates and returns the cluster associated with the real inode. 1768 * We have to handle nlinks here on the frontend. 1769 */ 1770 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1771 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1772 isopen = cache_isopen(ap->a_nch); 1773 xop->isdir = 0; 1774 xop->dopermanent = 0; 1775 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 1776 1777 /* 1778 * Collect the real inode and adjust nlinks, destroy the real 1779 * inode if nlinks transitions to 0 and it was the real inode 1780 * (else it has already been removed). 1781 */ 1782 error = hammer2_xop_collect(&xop->head, 0); 1783 hammer2_inode_unlock(dip); 1784 1785 if (error == 0) { 1786 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1787 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1788 if (ip) { 1789 hammer2_inode_unlink_finisher(ip, isopen); 1790 hammer2_inode_unlock(ip); 1791 } 1792 } else { 1793 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1794 } 1795 1796 /* 1797 * Update dip's mtime 1798 */ 1799 if (error == 0) { 1800 uint64_t mtime; 1801 1802 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1803 hammer2_update_time(&mtime); 1804 hammer2_inode_modify(dip); 1805 dip->meta.mtime = mtime; 1806 hammer2_inode_unlock(dip); 1807 } 1808 1809 hammer2_inode_run_sideq(dip->pmp); 1810 hammer2_trans_done(dip->pmp); 1811 if (error == 0) 1812 cache_unlink(ap->a_nch); 1813 LOCKSTOP; 1814 return (error); 1815 } 1816 1817 /* 1818 * hammer2_vop_nrmdir { nch, dvp, cred } 1819 */ 1820 static 1821 int 1822 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap) 1823 { 1824 hammer2_xop_unlink_t *xop; 1825 hammer2_inode_t *dip; 1826 hammer2_inode_t *ip; 1827 struct namecache *ncp; 1828 int isopen; 1829 int error; 1830 1831 LOCKSTART; 1832 dip = VTOI(ap->a_dvp); 1833 if (dip->pmp->ronly) { 1834 LOCKSTOP; 1835 return(EROFS); 1836 } 1837 1838 hammer2_pfs_memory_wait(dip->pmp); 1839 hammer2_trans_init(dip->pmp, 0); 1840 hammer2_inode_lock(dip, 0); 1841 1842 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1843 1844 ncp = ap->a_nch->ncp; 1845 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1846 isopen = cache_isopen(ap->a_nch); 1847 xop->isdir = 1; 1848 xop->dopermanent = 0; 1849 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 1850 1851 /* 1852 * Collect the real inode and adjust nlinks, destroy the real 1853 * inode if nlinks transitions to 0 and it was the real inode 1854 * (else it has already been removed). 1855 */ 1856 error = hammer2_xop_collect(&xop->head, 0); 1857 hammer2_inode_unlock(dip); 1858 1859 if (error == 0) { 1860 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1861 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1862 if (ip) { 1863 hammer2_inode_unlink_finisher(ip, isopen); 1864 hammer2_inode_unlock(ip); 1865 } 1866 } else { 1867 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1868 } 1869 1870 /* 1871 * Update dip's mtime 1872 */ 1873 if (error == 0) { 1874 uint64_t mtime; 1875 1876 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1877 hammer2_update_time(&mtime); 1878 hammer2_inode_modify(dip); 1879 dip->meta.mtime = mtime; 1880 hammer2_inode_unlock(dip); 1881 } 1882 1883 hammer2_inode_run_sideq(dip->pmp); 1884 hammer2_trans_done(dip->pmp); 1885 if (error == 0) 1886 cache_unlink(ap->a_nch); 1887 LOCKSTOP; 1888 return (error); 1889 } 1890 1891 /* 1892 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred } 1893 */ 1894 static 1895 int 1896 hammer2_vop_nrename(struct vop_nrename_args *ap) 1897 { 1898 struct namecache *fncp; 1899 struct namecache *tncp; 1900 hammer2_inode_t *fdip; 1901 hammer2_inode_t *tdip; 1902 hammer2_inode_t *ip; 1903 const uint8_t *fname; 1904 size_t fname_len; 1905 const uint8_t *tname; 1906 size_t tname_len; 1907 int error; 1908 int tnch_error; 1909 int update_tdip; 1910 int update_fdip; 1911 hammer2_key_t tlhc; 1912 1913 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount) 1914 return(EXDEV); 1915 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount) 1916 return(EXDEV); 1917 1918 fdip = VTOI(ap->a_fdvp); /* source directory */ 1919 tdip = VTOI(ap->a_tdvp); /* target directory */ 1920 1921 if (fdip->pmp->ronly) 1922 return(EROFS); 1923 1924 LOCKSTART; 1925 fncp = ap->a_fnch->ncp; /* entry name in source */ 1926 fname = fncp->nc_name; 1927 fname_len = fncp->nc_nlen; 1928 1929 tncp = ap->a_tnch->ncp; /* entry name in target */ 1930 tname = tncp->nc_name; 1931 tname_len = tncp->nc_nlen; 1932 1933 hammer2_pfs_memory_wait(tdip->pmp); 1934 hammer2_trans_init(tdip->pmp, 0); 1935 1936 update_tdip = 0; 1937 update_fdip = 0; 1938 1939 /* 1940 * ip is the inode being renamed. If this is a hardlink then 1941 * ip represents the actual file and not the hardlink marker. 1942 */ 1943 ip = VTOI(fncp->nc_vp); 1944 1945 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0); 1946 1947 /* 1948 * Can return NULL and error == EXDEV if the common parent 1949 * crosses a directory with the xlink flag set. 1950 */ 1951 error = 0; 1952 hammer2_inode_lock(fdip, 0); 1953 hammer2_inode_lock(tdip, 0); 1954 hammer2_inode_ref(ip); /* extra ref */ 1955 1956 hammer2_inode_lock(ip, 0); 1957 1958 /* 1959 * Delete the target namespace. 1960 */ 1961 { 1962 hammer2_xop_unlink_t *xop2; 1963 hammer2_inode_t *tip; 1964 int isopen; 1965 1966 /* 1967 * The unlink XOP unlinks the path from the directory and 1968 * locates and returns the cluster associated with the real 1969 * inode. We have to handle nlinks here on the frontend. 1970 */ 1971 xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 1972 hammer2_xop_setname(&xop2->head, tname, tname_len); 1973 isopen = cache_isopen(ap->a_tnch); 1974 xop2->isdir = -1; 1975 xop2->dopermanent = 0; 1976 hammer2_xop_start(&xop2->head, hammer2_xop_unlink); 1977 1978 /* 1979 * Collect the real inode and adjust nlinks, destroy the real 1980 * inode if nlinks transitions to 0 and it was the real inode 1981 * (else it has already been removed). 1982 */ 1983 tnch_error = hammer2_xop_collect(&xop2->head, 0); 1984 /* hammer2_inode_unlock(tdip); */ 1985 1986 if (tnch_error == 0) { 1987 tip = hammer2_inode_get(tdip->pmp, NULL, 1988 &xop2->head.cluster, -1); 1989 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP); 1990 if (tip) { 1991 hammer2_inode_unlink_finisher(tip, isopen); 1992 hammer2_inode_unlock(tip); 1993 } 1994 } else { 1995 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP); 1996 } 1997 /* hammer2_inode_lock(tdip, 0); */ 1998 1999 if (tnch_error && tnch_error != ENOENT) { 2000 error = tnch_error; 2001 goto done2; 2002 } 2003 update_tdip = 1; 2004 } 2005 2006 /* 2007 * Resolve the collision space for (tdip, tname, tname_len) 2008 * 2009 * tdip must be held exclusively locked to prevent races. 2010 */ 2011 { 2012 hammer2_xop_scanlhc_t *sxop; 2013 hammer2_tid_t lhcbase; 2014 2015 tlhc = hammer2_dirhash(tname, tname_len); 2016 lhcbase = tlhc; 2017 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 2018 sxop->lhc = tlhc; 2019 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc); 2020 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 2021 if (tlhc != sxop->head.cluster.focus->bref.key) 2022 break; 2023 ++tlhc; 2024 } 2025 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 2026 2027 if (error) { 2028 if (error != ENOENT) 2029 goto done2; 2030 ++tlhc; 2031 error = 0; 2032 } 2033 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) { 2034 error = ENOSPC; 2035 goto done2; 2036 } 2037 } 2038 2039 /* 2040 * Everything is setup, do the rename. 2041 * 2042 * We have to synchronize ip->meta to the underlying operation. 2043 * 2044 * NOTE: To avoid deadlocks we cannot lock (ip) while we are 2045 * unlinking elements from their directories. Locking 2046 * the nlinks field does not lock the whole inode. 2047 */ 2048 /* hammer2_inode_lock(ip, 0); */ 2049 if (error == 0) { 2050 hammer2_xop_nrename_t *xop4; 2051 2052 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 2053 xop4->lhc = tlhc; 2054 xop4->ip_key = ip->meta.name_key; 2055 hammer2_xop_setip2(&xop4->head, ip); 2056 hammer2_xop_setip3(&xop4->head, tdip); 2057 hammer2_xop_setname(&xop4->head, fname, fname_len); 2058 hammer2_xop_setname2(&xop4->head, tname, tname_len); 2059 hammer2_xop_start(&xop4->head, hammer2_xop_nrename); 2060 2061 error = hammer2_xop_collect(&xop4->head, 0); 2062 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP); 2063 2064 if (error == ENOENT) 2065 error = 0; 2066 if (error == 0 && 2067 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 2068 hammer2_inode_modify(ip); 2069 ip->meta.name_len = tname_len; 2070 ip->meta.name_key = tlhc; 2071 2072 } 2073 update_fdip = 1; 2074 update_fdip = 1; 2075 } 2076 2077 done2: 2078 /* 2079 * Update directory mtimes to represent the something changed. 2080 */ 2081 if (update_fdip || update_tdip) { 2082 uint64_t mtime; 2083 2084 hammer2_update_time(&mtime); 2085 if (update_fdip) { 2086 hammer2_inode_modify(fdip); 2087 fdip->meta.mtime = mtime; 2088 } 2089 if (update_tdip) { 2090 hammer2_inode_modify(tdip); 2091 tdip->meta.mtime = mtime; 2092 } 2093 } 2094 hammer2_inode_unlock(ip); 2095 hammer2_inode_unlock(tdip); 2096 hammer2_inode_unlock(fdip); 2097 hammer2_inode_drop(ip); 2098 hammer2_inode_run_sideq(fdip->pmp); 2099 2100 hammer2_trans_done(tdip->pmp); 2101 2102 /* 2103 * Issue the namecache update after unlocking all the internal 2104 * hammer structures, otherwise we might deadlock. 2105 */ 2106 if (tnch_error == 0) { 2107 cache_unlink(ap->a_tnch); 2108 cache_setunresolved(ap->a_tnch); 2109 } 2110 if (error == 0) 2111 cache_rename(ap->a_fnch, ap->a_tnch); 2112 2113 LOCKSTOP; 2114 return (error); 2115 } 2116 2117 /* 2118 * hammer2_vop_ioctl { vp, command, data, fflag, cred } 2119 */ 2120 static 2121 int 2122 hammer2_vop_ioctl(struct vop_ioctl_args *ap) 2123 { 2124 hammer2_inode_t *ip; 2125 int error; 2126 2127 LOCKSTART; 2128 ip = VTOI(ap->a_vp); 2129 2130 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data, 2131 ap->a_fflag, ap->a_cred); 2132 LOCKSTOP; 2133 return (error); 2134 } 2135 2136 static 2137 int 2138 hammer2_vop_mountctl(struct vop_mountctl_args *ap) 2139 { 2140 struct mount *mp; 2141 hammer2_pfs_t *pmp; 2142 int rc; 2143 2144 LOCKSTART; 2145 switch (ap->a_op) { 2146 case (MOUNTCTL_SET_EXPORT): 2147 mp = ap->a_head.a_ops->head.vv_mount; 2148 pmp = MPTOPMP(mp); 2149 2150 if (ap->a_ctllen != sizeof(struct export_args)) 2151 rc = (EINVAL); 2152 else 2153 rc = vfs_export(mp, &pmp->export, 2154 (const struct export_args *)ap->a_ctl); 2155 break; 2156 default: 2157 rc = vop_stdmountctl(ap); 2158 break; 2159 } 2160 LOCKSTOP; 2161 return (rc); 2162 } 2163 2164 /* 2165 * KQFILTER 2166 */ 2167 static void filt_hammer2detach(struct knote *kn); 2168 static int filt_hammer2read(struct knote *kn, long hint); 2169 static int filt_hammer2write(struct knote *kn, long hint); 2170 static int filt_hammer2vnode(struct knote *kn, long hint); 2171 2172 static struct filterops hammer2read_filtops = 2173 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2174 NULL, filt_hammer2detach, filt_hammer2read }; 2175 static struct filterops hammer2write_filtops = 2176 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2177 NULL, filt_hammer2detach, filt_hammer2write }; 2178 static struct filterops hammer2vnode_filtops = 2179 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2180 NULL, filt_hammer2detach, filt_hammer2vnode }; 2181 2182 static 2183 int 2184 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap) 2185 { 2186 struct vnode *vp = ap->a_vp; 2187 struct knote *kn = ap->a_kn; 2188 2189 switch (kn->kn_filter) { 2190 case EVFILT_READ: 2191 kn->kn_fop = &hammer2read_filtops; 2192 break; 2193 case EVFILT_WRITE: 2194 kn->kn_fop = &hammer2write_filtops; 2195 break; 2196 case EVFILT_VNODE: 2197 kn->kn_fop = &hammer2vnode_filtops; 2198 break; 2199 default: 2200 return (EOPNOTSUPP); 2201 } 2202 2203 kn->kn_hook = (caddr_t)vp; 2204 2205 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2206 2207 return(0); 2208 } 2209 2210 static void 2211 filt_hammer2detach(struct knote *kn) 2212 { 2213 struct vnode *vp = (void *)kn->kn_hook; 2214 2215 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2216 } 2217 2218 static int 2219 filt_hammer2read(struct knote *kn, long hint) 2220 { 2221 struct vnode *vp = (void *)kn->kn_hook; 2222 hammer2_inode_t *ip = VTOI(vp); 2223 off_t off; 2224 2225 if (hint == NOTE_REVOKE) { 2226 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2227 return(1); 2228 } 2229 off = ip->meta.size - kn->kn_fp->f_offset; 2230 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 2231 if (kn->kn_sfflags & NOTE_OLDAPI) 2232 return(1); 2233 return (kn->kn_data != 0); 2234 } 2235 2236 2237 static int 2238 filt_hammer2write(struct knote *kn, long hint) 2239 { 2240 if (hint == NOTE_REVOKE) 2241 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2242 kn->kn_data = 0; 2243 return (1); 2244 } 2245 2246 static int 2247 filt_hammer2vnode(struct knote *kn, long hint) 2248 { 2249 if (kn->kn_sfflags & hint) 2250 kn->kn_fflags |= hint; 2251 if (hint == NOTE_REVOKE) { 2252 kn->kn_flags |= (EV_EOF | EV_NODATA); 2253 return (1); 2254 } 2255 return (kn->kn_fflags != 0); 2256 } 2257 2258 /* 2259 * FIFO VOPS 2260 */ 2261 static 2262 int 2263 hammer2_vop_markatime(struct vop_markatime_args *ap) 2264 { 2265 hammer2_inode_t *ip; 2266 struct vnode *vp; 2267 2268 vp = ap->a_vp; 2269 ip = VTOI(vp); 2270 2271 if (ip->pmp->ronly) 2272 return(EROFS); 2273 return(0); 2274 } 2275 2276 static 2277 int 2278 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap) 2279 { 2280 int error; 2281 2282 error = VOCALL(&fifo_vnode_vops, &ap->a_head); 2283 if (error) 2284 error = hammer2_vop_kqfilter(ap); 2285 return(error); 2286 } 2287 2288 /* 2289 * VOPS vector 2290 */ 2291 struct vop_ops hammer2_vnode_vops = { 2292 .vop_default = vop_defaultop, 2293 .vop_fsync = hammer2_vop_fsync, 2294 .vop_getpages = vop_stdgetpages, 2295 .vop_putpages = vop_stdputpages, 2296 .vop_access = hammer2_vop_access, 2297 .vop_advlock = hammer2_vop_advlock, 2298 .vop_close = hammer2_vop_close, 2299 .vop_nlink = hammer2_vop_nlink, 2300 .vop_ncreate = hammer2_vop_ncreate, 2301 .vop_nsymlink = hammer2_vop_nsymlink, 2302 .vop_nremove = hammer2_vop_nremove, 2303 .vop_nrmdir = hammer2_vop_nrmdir, 2304 .vop_nrename = hammer2_vop_nrename, 2305 .vop_getattr = hammer2_vop_getattr, 2306 .vop_setattr = hammer2_vop_setattr, 2307 .vop_readdir = hammer2_vop_readdir, 2308 .vop_readlink = hammer2_vop_readlink, 2309 .vop_getpages = vop_stdgetpages, 2310 .vop_putpages = vop_stdputpages, 2311 .vop_read = hammer2_vop_read, 2312 .vop_write = hammer2_vop_write, 2313 .vop_open = hammer2_vop_open, 2314 .vop_inactive = hammer2_vop_inactive, 2315 .vop_reclaim = hammer2_vop_reclaim, 2316 .vop_nresolve = hammer2_vop_nresolve, 2317 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot, 2318 .vop_nmkdir = hammer2_vop_nmkdir, 2319 .vop_nmknod = hammer2_vop_nmknod, 2320 .vop_ioctl = hammer2_vop_ioctl, 2321 .vop_mountctl = hammer2_vop_mountctl, 2322 .vop_bmap = hammer2_vop_bmap, 2323 .vop_strategy = hammer2_vop_strategy, 2324 .vop_kqfilter = hammer2_vop_kqfilter 2325 }; 2326 2327 struct vop_ops hammer2_spec_vops = { 2328 .vop_default = vop_defaultop, 2329 .vop_fsync = hammer2_vop_fsync, 2330 .vop_read = vop_stdnoread, 2331 .vop_write = vop_stdnowrite, 2332 .vop_access = hammer2_vop_access, 2333 .vop_close = hammer2_vop_close, 2334 .vop_markatime = hammer2_vop_markatime, 2335 .vop_getattr = hammer2_vop_getattr, 2336 .vop_inactive = hammer2_vop_inactive, 2337 .vop_reclaim = hammer2_vop_reclaim, 2338 .vop_setattr = hammer2_vop_setattr 2339 }; 2340 2341 struct vop_ops hammer2_fifo_vops = { 2342 .vop_default = fifo_vnoperate, 2343 .vop_fsync = hammer2_vop_fsync, 2344 #if 0 2345 .vop_read = hammer2_vop_fiforead, 2346 .vop_write = hammer2_vop_fifowrite, 2347 #endif 2348 .vop_access = hammer2_vop_access, 2349 #if 0 2350 .vop_close = hammer2_vop_fifoclose, 2351 #endif 2352 .vop_markatime = hammer2_vop_markatime, 2353 .vop_getattr = hammer2_vop_getattr, 2354 .vop_inactive = hammer2_vop_inactive, 2355 .vop_reclaim = hammer2_vop_reclaim, 2356 .vop_setattr = hammer2_vop_setattr, 2357 .vop_kqfilter = hammer2_vop_fifokqfilter 2358 }; 2359 2360