1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 /* 37 * Kernel Filesystem interface 38 * 39 * NOTE! local ipdata pointers must be reloaded on any modifying operation 40 * to the inode as its underlying chain may have changed. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/fcntl.h> 47 #include <sys/buf.h> 48 #include <sys/proc.h> 49 #include <sys/namei.h> 50 #include <sys/mount.h> 51 #include <sys/vnode.h> 52 #include <sys/mountctl.h> 53 #include <sys/dirent.h> 54 #include <sys/uio.h> 55 #include <sys/objcache.h> 56 #include <sys/event.h> 57 #include <sys/file.h> 58 #include <vfs/fifofs/fifo.h> 59 60 #include "hammer2.h" 61 62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, 63 int seqcount); 64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 65 int ioflag, int seqcount); 66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize); 67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize); 68 69 struct objcache *cache_xops; 70 71 static __inline 72 void 73 hammer2_knote(struct vnode *vp, int flags) 74 { 75 if (flags) 76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 77 } 78 79 /* 80 * Last reference to a vnode is going away but it is still cached. 81 */ 82 static 83 int 84 hammer2_vop_inactive(struct vop_inactive_args *ap) 85 { 86 hammer2_inode_t *ip; 87 struct vnode *vp; 88 89 LOCKSTART; 90 vp = ap->a_vp; 91 ip = VTOI(vp); 92 93 /* 94 * Degenerate case 95 */ 96 if (ip == NULL) { 97 vrecycle(vp); 98 LOCKSTOP; 99 return (0); 100 } 101 102 /* 103 * Check for deleted inodes and recycle immediately on the last 104 * release. Be sure to destroy any left-over buffer cache buffers 105 * so we do not waste time trying to flush them. 106 * 107 * Note that deleting the file block chains under the inode chain 108 * would just be a waste of time. 109 * 110 * WARNING: nvtruncbuf() can only be safely called without the inode 111 * lock held due to the way our write thread works. 112 */ 113 if (ip->flags & HAMMER2_INODE_ISUNLINKED) { 114 hammer2_key_t lbase; 115 int nblksize; 116 117 /* 118 * Detect updates to the embedded data which may be 119 * synchronized by the strategy code. Simply mark the 120 * inode modified so it gets picked up by our normal flush. 121 */ 122 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL); 123 nvtruncbuf(vp, 0, nblksize, 0, 0); 124 vrecycle(vp); 125 } 126 LOCKSTOP; 127 return (0); 128 } 129 130 /* 131 * Reclaim a vnode so that it can be reused; after the inode is 132 * disassociated, the filesystem must manage it alone. 133 */ 134 static 135 int 136 hammer2_vop_reclaim(struct vop_reclaim_args *ap) 137 { 138 hammer2_inode_t *ip; 139 hammer2_pfs_t *pmp; 140 struct vnode *vp; 141 142 LOCKSTART; 143 vp = ap->a_vp; 144 ip = VTOI(vp); 145 if (ip == NULL) { 146 LOCKSTOP; 147 return(0); 148 } 149 pmp = ip->pmp; 150 151 /* 152 * The final close of a deleted file or directory marks it for 153 * destruction. The DELETED flag allows the flusher to shortcut 154 * any modified blocks still unflushed (that is, just ignore them). 155 * 156 * HAMMER2 usually does not try to optimize the freemap by returning 157 * deleted blocks to it as it does not usually know how many snapshots 158 * might be referencing portions of the file/dir. 159 */ 160 vp->v_data = NULL; 161 ip->vp = NULL; 162 163 /* 164 * NOTE! We do not attempt to flush chains here, flushing is 165 * really fragile and could also deadlock. 166 */ 167 vclrisdirty(vp); 168 169 /* 170 * An unlinked inode may have been relinked to the ihidden directory. 171 * This occurs if the inode was unlinked while open. Reclamation of 172 * these inodes requires processing we cannot safely do here so add 173 * the inode to the unlinkq in that situation. 174 * 175 * A reclaim can occur at any time so we cannot safely start a 176 * transaction to handle reclamation of unlinked files. Instead, 177 * the ip is left with a reference and placed on a linked list and 178 * handled later on. 179 */ 180 if ((ip->flags & HAMMER2_INODE_ISUNLINKED) && 181 (ip->flags & HAMMER2_INODE_ISDELETED) == 0) { 182 hammer2_inode_unlink_t *ipul; 183 184 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO); 185 ipul->ip = ip; 186 187 hammer2_spin_ex(&pmp->list_spin); 188 TAILQ_INSERT_TAIL(&pmp->unlinkq, ipul, entry); 189 hammer2_spin_unex(&pmp->list_spin); 190 /* retain ref from vp for ipul */ 191 } else { 192 hammer2_inode_drop(ip); /* vp ref */ 193 } 194 195 /* 196 * XXX handle background sync when ip dirty, kernel will no longer 197 * notify us regarding this inode because there is no longer a 198 * vnode attached to it. 199 */ 200 201 LOCKSTOP; 202 return (0); 203 } 204 205 static 206 int 207 hammer2_vop_fsync(struct vop_fsync_args *ap) 208 { 209 hammer2_inode_t *ip; 210 struct vnode *vp; 211 212 LOCKSTART; 213 vp = ap->a_vp; 214 ip = VTOI(vp); 215 216 #if 0 217 /* XXX can't do this yet */ 218 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_ISFLUSH); 219 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 220 #endif 221 hammer2_trans_init(ip->pmp, 0); 222 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 223 224 /* 225 * Calling chain_flush here creates a lot of duplicative 226 * COW operations due to non-optimal vnode ordering. 227 * 228 * Only do it for an actual fsync() syscall. The other forms 229 * which call this function will eventually call chain_flush 230 * on the volume root as a catch-all, which is far more optimal. 231 */ 232 hammer2_inode_lock(ip, 0); 233 if (ip->flags & HAMMER2_INODE_MODIFIED) 234 hammer2_inode_chain_sync(ip); 235 hammer2_inode_unlock(ip); 236 hammer2_trans_done(ip->pmp); 237 238 LOCKSTOP; 239 return (0); 240 } 241 242 static 243 int 244 hammer2_vop_access(struct vop_access_args *ap) 245 { 246 hammer2_inode_t *ip = VTOI(ap->a_vp); 247 uid_t uid; 248 gid_t gid; 249 int error; 250 251 LOCKSTART; 252 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 253 uid = hammer2_to_unix_xid(&ip->meta.uid); 254 gid = hammer2_to_unix_xid(&ip->meta.gid); 255 error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags); 256 hammer2_inode_unlock(ip); 257 258 LOCKSTOP; 259 return (error); 260 } 261 262 static 263 int 264 hammer2_vop_getattr(struct vop_getattr_args *ap) 265 { 266 hammer2_pfs_t *pmp; 267 hammer2_inode_t *ip; 268 struct vnode *vp; 269 struct vattr *vap; 270 hammer2_chain_t *chain; 271 int i; 272 273 LOCKSTART; 274 vp = ap->a_vp; 275 vap = ap->a_vap; 276 277 ip = VTOI(vp); 278 pmp = ip->pmp; 279 280 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 281 282 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 283 vap->va_fileid = ip->meta.inum; 284 vap->va_mode = ip->meta.mode; 285 vap->va_nlink = ip->meta.nlinks; 286 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 287 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 288 vap->va_rmajor = 0; 289 vap->va_rminor = 0; 290 vap->va_size = ip->meta.size; /* protected by shared lock */ 291 vap->va_blocksize = HAMMER2_PBUFSIZE; 292 vap->va_flags = ip->meta.uflags; 293 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime); 294 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime); 295 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime); 296 vap->va_gen = 1; 297 vap->va_bytes = 0; 298 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) { 299 /* 300 * Can't really calculate directory use sans the files under 301 * it, just assume one block for now. 302 */ 303 vap->va_bytes += HAMMER2_INODE_BYTES; 304 } else { 305 for (i = 0; i < ip->cluster.nchains; ++i) { 306 if ((chain = ip->cluster.array[i].chain) != NULL) { 307 if (vap->va_bytes < chain->bref.data_count) 308 vap->va_bytes = chain->bref.data_count; 309 } 310 } 311 } 312 vap->va_type = hammer2_get_vtype(ip->meta.type); 313 vap->va_filerev = 0; 314 vap->va_uid_uuid = ip->meta.uid; 315 vap->va_gid_uuid = ip->meta.gid; 316 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 317 VA_FSID_UUID_VALID; 318 319 hammer2_inode_unlock(ip); 320 321 LOCKSTOP; 322 return (0); 323 } 324 325 static 326 int 327 hammer2_vop_setattr(struct vop_setattr_args *ap) 328 { 329 hammer2_inode_t *ip; 330 struct vnode *vp; 331 struct vattr *vap; 332 int error; 333 int kflags = 0; 334 uint64_t ctime; 335 336 LOCKSTART; 337 vp = ap->a_vp; 338 vap = ap->a_vap; 339 hammer2_update_time(&ctime); 340 341 ip = VTOI(vp); 342 343 if (ip->pmp->ronly) { 344 LOCKSTOP; 345 return(EROFS); 346 } 347 348 hammer2_pfs_memory_wait(ip->pmp); 349 hammer2_trans_init(ip->pmp, 0); 350 hammer2_inode_lock(ip, 0); 351 error = 0; 352 353 if (vap->va_flags != VNOVAL) { 354 u_int32_t flags; 355 356 flags = ip->meta.uflags; 357 error = vop_helper_setattr_flags(&flags, vap->va_flags, 358 hammer2_to_unix_xid(&ip->meta.uid), 359 ap->a_cred); 360 if (error == 0) { 361 if (ip->meta.uflags != flags) { 362 hammer2_inode_modify(ip); 363 ip->meta.uflags = flags; 364 ip->meta.ctime = ctime; 365 kflags |= NOTE_ATTRIB; 366 } 367 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 368 error = 0; 369 goto done; 370 } 371 } 372 goto done; 373 } 374 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 375 error = EPERM; 376 goto done; 377 } 378 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { 379 mode_t cur_mode = ip->meta.mode; 380 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 381 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 382 uuid_t uuid_uid; 383 uuid_t uuid_gid; 384 385 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 386 ap->a_cred, 387 &cur_uid, &cur_gid, &cur_mode); 388 if (error == 0) { 389 hammer2_guid_to_uuid(&uuid_uid, cur_uid); 390 hammer2_guid_to_uuid(&uuid_gid, cur_gid); 391 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) || 392 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) || 393 ip->meta.mode != cur_mode 394 ) { 395 hammer2_inode_modify(ip); 396 ip->meta.uid = uuid_uid; 397 ip->meta.gid = uuid_gid; 398 ip->meta.mode = cur_mode; 399 ip->meta.ctime = ctime; 400 } 401 kflags |= NOTE_ATTRIB; 402 } 403 } 404 405 /* 406 * Resize the file 407 */ 408 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) { 409 switch(vp->v_type) { 410 case VREG: 411 if (vap->va_size == ip->meta.size) 412 break; 413 if (vap->va_size < ip->meta.size) { 414 hammer2_truncate_file(ip, vap->va_size); 415 } else { 416 hammer2_extend_file(ip, vap->va_size); 417 } 418 hammer2_inode_modify(ip); 419 ip->meta.mtime = ctime; 420 break; 421 default: 422 error = EINVAL; 423 goto done; 424 } 425 } 426 #if 0 427 /* atime not supported */ 428 if (vap->va_atime.tv_sec != VNOVAL) { 429 hammer2_inode_modify(ip); 430 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime); 431 kflags |= NOTE_ATTRIB; 432 } 433 #endif 434 if (vap->va_mode != (mode_t)VNOVAL) { 435 mode_t cur_mode = ip->meta.mode; 436 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 437 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 438 439 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 440 cur_uid, cur_gid, &cur_mode); 441 if (error == 0 && ip->meta.mode != cur_mode) { 442 hammer2_inode_modify(ip); 443 ip->meta.mode = cur_mode; 444 ip->meta.ctime = ctime; 445 kflags |= NOTE_ATTRIB; 446 } 447 } 448 449 if (vap->va_mtime.tv_sec != VNOVAL) { 450 hammer2_inode_modify(ip); 451 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime); 452 kflags |= NOTE_ATTRIB; 453 } 454 455 done: 456 /* 457 * If a truncation occurred we must call inode_fsync() now in order 458 * to trim the related data chains, otherwise a later expansion can 459 * cause havoc. 460 * 461 * If an extend occured that changed the DIRECTDATA state, we must 462 * call inode_fsync now in order to prepare the inode's indirect 463 * block table. 464 */ 465 if (ip->flags & HAMMER2_INODE_RESIZED) 466 hammer2_inode_chain_sync(ip); 467 468 /* 469 * Cleanup. 470 */ 471 hammer2_inode_unlock(ip); 472 hammer2_trans_done(ip->pmp); 473 hammer2_knote(ip->vp, kflags); 474 475 LOCKSTOP; 476 return (error); 477 } 478 479 static 480 int 481 hammer2_vop_readdir(struct vop_readdir_args *ap) 482 { 483 hammer2_xop_readdir_t *xop; 484 hammer2_blockref_t bref; 485 hammer2_inode_t *ip; 486 hammer2_tid_t inum; 487 hammer2_key_t lkey; 488 struct uio *uio; 489 off_t *cookies; 490 off_t saveoff; 491 int cookie_index; 492 int ncookies; 493 int error; 494 int eofflag; 495 int dtype; 496 int r; 497 498 LOCKSTART; 499 ip = VTOI(ap->a_vp); 500 uio = ap->a_uio; 501 saveoff = uio->uio_offset; 502 eofflag = 0; 503 error = 0; 504 505 /* 506 * Setup cookies directory entry cookies if requested 507 */ 508 if (ap->a_ncookies) { 509 ncookies = uio->uio_resid / 16 + 1; 510 if (ncookies > 1024) 511 ncookies = 1024; 512 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK); 513 } else { 514 ncookies = -1; 515 cookies = NULL; 516 } 517 cookie_index = 0; 518 519 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 520 521 /* 522 * Handle artificial entries. To ensure that only positive 64 bit 523 * quantities are returned to userland we always strip off bit 63. 524 * The hash code is designed such that codes 0x0000-0x7FFF are not 525 * used, allowing us to use these codes for articial entries. 526 * 527 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not 528 * allow '..' to cross the mount point into (e.g.) the super-root. 529 */ 530 if (saveoff == 0) { 531 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 532 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, "."); 533 if (r) 534 goto done; 535 if (cookies) 536 cookies[cookie_index] = saveoff; 537 ++saveoff; 538 ++cookie_index; 539 if (cookie_index == ncookies) 540 goto done; 541 } 542 543 if (saveoff == 1) { 544 /* 545 * Be careful with lockorder when accessing ".." 546 * 547 * (ip is the current dir. xip is the parent dir). 548 */ 549 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 550 if (ip->pip && ip != ip->pmp->iroot) 551 inum = ip->pip->meta.inum & HAMMER2_DIRHASH_USERMSK; 552 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, ".."); 553 if (r) 554 goto done; 555 if (cookies) 556 cookies[cookie_index] = saveoff; 557 ++saveoff; 558 ++cookie_index; 559 if (cookie_index == ncookies) 560 goto done; 561 } 562 563 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE; 564 if (hammer2_debug & 0x0020) 565 kprintf("readdir: lkey %016jx\n", lkey); 566 if (error) 567 goto done; 568 569 /* 570 * Use XOP for cluster scan. 571 * 572 * parent is the inode cluster, already locked for us. Don't 573 * double lock shared locks as this will screw up upgrades. 574 */ 575 xop = hammer2_xop_alloc(ip, 0); 576 xop->lkey = lkey; 577 hammer2_xop_start(&xop->head, hammer2_xop_readdir); 578 579 for (;;) { 580 const hammer2_inode_data_t *ripdata; 581 582 error = hammer2_xop_collect(&xop->head, 0); 583 if (error) 584 break; 585 if (cookie_index == ncookies) 586 break; 587 if (hammer2_debug & 0x0020) 588 kprintf("cluster chain %p %p\n", 589 xop->head.cluster.focus, 590 (xop->head.cluster.focus ? 591 xop->head.cluster.focus->data : (void *)-1)); 592 ripdata = &hammer2_cluster_rdata(&xop->head.cluster)->ipdata; 593 hammer2_cluster_bref(&xop->head.cluster, &bref); 594 if (bref.type == HAMMER2_BREF_TYPE_INODE) { 595 dtype = hammer2_get_dtype(ripdata); 596 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 597 r = vop_write_dirent(&error, uio, 598 ripdata->meta.inum & 599 HAMMER2_DIRHASH_USERMSK, 600 dtype, 601 ripdata->meta.name_len, 602 ripdata->filename); 603 if (r) 604 break; 605 if (cookies) 606 cookies[cookie_index] = saveoff; 607 ++cookie_index; 608 } else { 609 /* XXX chain error */ 610 kprintf("bad chain type readdir %d\n", bref.type); 611 } 612 } 613 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 614 if (error == ENOENT) { 615 error = 0; 616 eofflag = 1; 617 saveoff = (hammer2_key_t)-1; 618 } else { 619 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 620 } 621 done: 622 hammer2_inode_unlock(ip); 623 if (ap->a_eofflag) 624 *ap->a_eofflag = eofflag; 625 if (hammer2_debug & 0x0020) 626 kprintf("readdir: done at %016jx\n", saveoff); 627 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE; 628 if (error && cookie_index == 0) { 629 if (cookies) { 630 kfree(cookies, M_TEMP); 631 *ap->a_ncookies = 0; 632 *ap->a_cookies = NULL; 633 } 634 } else { 635 if (cookies) { 636 *ap->a_ncookies = cookie_index; 637 *ap->a_cookies = cookies; 638 } 639 } 640 LOCKSTOP; 641 return (error); 642 } 643 644 /* 645 * hammer2_vop_readlink { vp, uio, cred } 646 */ 647 static 648 int 649 hammer2_vop_readlink(struct vop_readlink_args *ap) 650 { 651 struct vnode *vp; 652 hammer2_inode_t *ip; 653 int error; 654 655 vp = ap->a_vp; 656 if (vp->v_type != VLNK) 657 return (EINVAL); 658 ip = VTOI(vp); 659 660 error = hammer2_read_file(ip, ap->a_uio, 0); 661 return (error); 662 } 663 664 static 665 int 666 hammer2_vop_read(struct vop_read_args *ap) 667 { 668 struct vnode *vp; 669 hammer2_inode_t *ip; 670 struct uio *uio; 671 int error; 672 int seqcount; 673 int bigread; 674 675 /* 676 * Read operations supported on this vnode? 677 */ 678 vp = ap->a_vp; 679 if (vp->v_type != VREG) 680 return (EINVAL); 681 682 /* 683 * Misc 684 */ 685 ip = VTOI(vp); 686 uio = ap->a_uio; 687 error = 0; 688 689 seqcount = ap->a_ioflag >> 16; 690 bigread = (uio->uio_resid > 100 * 1024 * 1024); 691 692 error = hammer2_read_file(ip, uio, seqcount); 693 return (error); 694 } 695 696 static 697 int 698 hammer2_vop_write(struct vop_write_args *ap) 699 { 700 hammer2_inode_t *ip; 701 thread_t td; 702 struct vnode *vp; 703 struct uio *uio; 704 int error; 705 int seqcount; 706 707 /* 708 * Read operations supported on this vnode? 709 */ 710 vp = ap->a_vp; 711 if (vp->v_type != VREG) 712 return (EINVAL); 713 714 /* 715 * Misc 716 */ 717 ip = VTOI(vp); 718 uio = ap->a_uio; 719 error = 0; 720 if (ip->pmp->ronly) { 721 return (EROFS); 722 } 723 724 seqcount = ap->a_ioflag >> 16; 725 726 /* 727 * Check resource limit 728 */ 729 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc && 730 uio->uio_offset + uio->uio_resid > 731 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 732 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); 733 return (EFBIG); 734 } 735 736 /* 737 * The transaction interlocks against flushes initiations 738 * (note: but will run concurrently with the actual flush). 739 */ 740 hammer2_trans_init(ip->pmp, 0); 741 error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount); 742 hammer2_trans_done(ip->pmp); 743 744 return (error); 745 } 746 747 /* 748 * Perform read operations on a file or symlink given an UNLOCKED 749 * inode and uio. 750 * 751 * The passed ip is not locked. 752 */ 753 static 754 int 755 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount) 756 { 757 hammer2_off_t size; 758 struct buf *bp; 759 int error; 760 761 error = 0; 762 763 /* 764 * UIO read loop. 765 * 766 * WARNING! Assumes that the kernel interlocks size changes at the 767 * vnode level. 768 */ 769 hammer2_mtx_sh(&ip->lock); 770 size = ip->meta.size; 771 hammer2_mtx_unlock(&ip->lock); 772 773 while (uio->uio_resid > 0 && uio->uio_offset < size) { 774 hammer2_key_t lbase; 775 hammer2_key_t leof; 776 int lblksize; 777 int loff; 778 int n; 779 780 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 781 &lbase, &leof); 782 783 error = cluster_read(ip->vp, leof, lbase, lblksize, 784 uio->uio_resid, seqcount * BKVASIZE, 785 &bp); 786 787 if (error) 788 break; 789 loff = (int)(uio->uio_offset - lbase); 790 n = lblksize - loff; 791 if (n > uio->uio_resid) 792 n = uio->uio_resid; 793 if (n > size - uio->uio_offset) 794 n = (int)(size - uio->uio_offset); 795 bp->b_flags |= B_AGE; 796 uiomove((char *)bp->b_data + loff, n, uio); 797 bqrelse(bp); 798 } 799 return (error); 800 } 801 802 /* 803 * Write to the file represented by the inode via the logical buffer cache. 804 * The inode may represent a regular file or a symlink. 805 * 806 * The inode must not be locked. 807 */ 808 static 809 int 810 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 811 int ioflag, int seqcount) 812 { 813 hammer2_key_t old_eof; 814 hammer2_key_t new_eof; 815 struct buf *bp; 816 int kflags; 817 int error; 818 int modified; 819 820 /* 821 * Setup if append 822 * 823 * WARNING! Assumes that the kernel interlocks size changes at the 824 * vnode level. 825 */ 826 hammer2_mtx_ex(&ip->lock); 827 if (ioflag & IO_APPEND) 828 uio->uio_offset = ip->meta.size; 829 old_eof = ip->meta.size; 830 831 /* 832 * Extend the file if necessary. If the write fails at some point 833 * we will truncate it back down to cover as much as we were able 834 * to write. 835 * 836 * Doing this now makes it easier to calculate buffer sizes in 837 * the loop. 838 */ 839 kflags = 0; 840 error = 0; 841 modified = 0; 842 843 if (uio->uio_offset + uio->uio_resid > old_eof) { 844 new_eof = uio->uio_offset + uio->uio_resid; 845 modified = 1; 846 hammer2_extend_file(ip, new_eof); 847 kflags |= NOTE_EXTEND; 848 } else { 849 new_eof = old_eof; 850 } 851 hammer2_mtx_unlock(&ip->lock); 852 853 /* 854 * UIO write loop 855 */ 856 while (uio->uio_resid > 0) { 857 hammer2_key_t lbase; 858 int trivial; 859 int endofblk; 860 int lblksize; 861 int loff; 862 int n; 863 864 /* 865 * Don't allow the buffer build to blow out the buffer 866 * cache. 867 */ 868 if ((ioflag & IO_RECURSE) == 0) 869 bwillwrite(HAMMER2_PBUFSIZE); 870 871 /* 872 * This nominally tells us how much we can cluster and 873 * what the logical buffer size needs to be. Currently 874 * we don't try to cluster the write and just handle one 875 * block at a time. 876 */ 877 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 878 &lbase, NULL); 879 loff = (int)(uio->uio_offset - lbase); 880 881 KKASSERT(lblksize <= 65536); 882 883 /* 884 * Calculate bytes to copy this transfer and whether the 885 * copy completely covers the buffer or not. 886 */ 887 trivial = 0; 888 n = lblksize - loff; 889 if (n > uio->uio_resid) { 890 n = uio->uio_resid; 891 if (loff == lbase && uio->uio_offset + n == new_eof) 892 trivial = 1; 893 endofblk = 0; 894 } else { 895 if (loff == 0) 896 trivial = 1; 897 endofblk = 1; 898 } 899 900 /* 901 * Get the buffer 902 */ 903 if (uio->uio_segflg == UIO_NOCOPY) { 904 /* 905 * Issuing a write with the same data backing the 906 * buffer. Instantiate the buffer to collect the 907 * backing vm pages, then read-in any missing bits. 908 * 909 * This case is used by vop_stdputpages(). 910 */ 911 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0); 912 if ((bp->b_flags & B_CACHE) == 0) { 913 bqrelse(bp); 914 error = bread(ip->vp, lbase, lblksize, &bp); 915 } 916 } else if (trivial) { 917 /* 918 * Even though we are entirely overwriting the buffer 919 * we may still have to zero it out to avoid a 920 * mmap/write visibility issue. 921 */ 922 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0); 923 if ((bp->b_flags & B_CACHE) == 0) 924 vfs_bio_clrbuf(bp); 925 } else { 926 /* 927 * Partial overwrite, read in any missing bits then 928 * replace the portion being written. 929 * 930 * (The strategy code will detect zero-fill physical 931 * blocks for this case). 932 */ 933 error = bread(ip->vp, lbase, lblksize, &bp); 934 if (error == 0) 935 bheavy(bp); 936 } 937 938 if (error) { 939 brelse(bp); 940 break; 941 } 942 943 /* 944 * Ok, copy the data in 945 */ 946 error = uiomove(bp->b_data + loff, n, uio); 947 kflags |= NOTE_WRITE; 948 modified = 1; 949 if (error) { 950 brelse(bp); 951 break; 952 } 953 954 /* 955 * WARNING: Pageout daemon will issue UIO_NOCOPY writes 956 * with IO_SYNC or IO_ASYNC set. These writes 957 * must be handled as the pageout daemon expects. 958 */ 959 if (ioflag & IO_SYNC) { 960 bwrite(bp); 961 } else if ((ioflag & IO_DIRECT) && endofblk) { 962 bawrite(bp); 963 } else if (ioflag & IO_ASYNC) { 964 bawrite(bp); 965 } else { 966 bdwrite(bp); 967 } 968 } 969 970 /* 971 * Cleanup. If we extended the file EOF but failed to write through 972 * the entire write is a failure and we have to back-up. 973 */ 974 if (error && new_eof != old_eof) { 975 hammer2_mtx_ex(&ip->lock); 976 hammer2_truncate_file(ip, old_eof); 977 if (ip->flags & HAMMER2_INODE_MODIFIED) 978 hammer2_inode_chain_sync(ip); 979 hammer2_mtx_unlock(&ip->lock); 980 } else if (modified) { 981 hammer2_mtx_ex(&ip->lock); 982 hammer2_inode_modify(ip); 983 hammer2_update_time(&ip->meta.mtime); 984 if (ip->flags & HAMMER2_INODE_MODIFIED) 985 hammer2_inode_chain_sync(ip); 986 hammer2_mtx_unlock(&ip->lock); 987 hammer2_knote(ip->vp, kflags); 988 } 989 hammer2_trans_assert_strategy(ip->pmp); 990 991 return error; 992 } 993 994 /* 995 * Truncate the size of a file. The inode must not be locked. 996 * 997 * We must unconditionally set HAMMER2_INODE_RESIZED to properly 998 * ensure that any on-media data beyond the new file EOF has been destroyed. 999 * 1000 * WARNING: nvtruncbuf() can only be safely called without the inode lock 1001 * held due to the way our write thread works. If the truncation 1002 * occurs in the middle of a buffer, nvtruncbuf() is responsible 1003 * for dirtying that buffer and zeroing out trailing bytes. 1004 * 1005 * WARNING! Assumes that the kernel interlocks size changes at the 1006 * vnode level. 1007 * 1008 * WARNING! Caller assumes responsibility for removing dead blocks 1009 * if INODE_RESIZED is set. 1010 */ 1011 static 1012 void 1013 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1014 { 1015 hammer2_key_t lbase; 1016 int nblksize; 1017 1018 LOCKSTART; 1019 hammer2_mtx_unlock(&ip->lock); 1020 if (ip->vp) { 1021 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1022 nvtruncbuf(ip->vp, nsize, 1023 nblksize, (int)nsize & (nblksize - 1), 1024 0); 1025 } 1026 hammer2_mtx_ex(&ip->lock); 1027 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1028 ip->osize = ip->meta.size; 1029 ip->meta.size = nsize; 1030 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED | 1031 HAMMER2_INODE_RESIZED); 1032 LOCKSTOP; 1033 } 1034 1035 /* 1036 * Extend the size of a file. The inode must not be locked. 1037 * 1038 * Even though the file size is changing, we do not have to set the 1039 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES 1040 * boundary. When this occurs a hammer2_inode_chain_sync() is required 1041 * to prepare the inode cluster's indirect block table, otherwise 1042 * async execution of the strategy code will implode on us. 1043 * 1044 * WARNING! Assumes that the kernel interlocks size changes at the 1045 * vnode level. 1046 * 1047 * WARNING! Caller assumes responsibility for transitioning out 1048 * of the inode DIRECTDATA mode if INODE_RESIZED is set. 1049 */ 1050 static 1051 void 1052 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1053 { 1054 hammer2_key_t lbase; 1055 hammer2_key_t osize; 1056 int oblksize; 1057 int nblksize; 1058 1059 LOCKSTART; 1060 1061 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1062 osize = ip->meta.size; 1063 ip->osize = osize; 1064 ip->meta.size = nsize; 1065 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED); 1066 1067 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) { 1068 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1069 hammer2_inode_chain_sync(ip); 1070 } 1071 1072 hammer2_mtx_unlock(&ip->lock); 1073 if (ip->vp) { 1074 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL); 1075 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1076 nvextendbuf(ip->vp, 1077 osize, nsize, 1078 oblksize, nblksize, 1079 -1, -1, 0); 1080 } 1081 hammer2_mtx_ex(&ip->lock); 1082 1083 LOCKSTOP; 1084 } 1085 1086 static 1087 int 1088 hammer2_vop_nresolve(struct vop_nresolve_args *ap) 1089 { 1090 hammer2_xop_nresolve_t *xop; 1091 hammer2_inode_t *ip; 1092 hammer2_inode_t *dip; 1093 struct namecache *ncp; 1094 struct vnode *vp; 1095 int error; 1096 1097 LOCKSTART; 1098 dip = VTOI(ap->a_dvp); 1099 xop = hammer2_xop_alloc(dip, 0); 1100 1101 ncp = ap->a_nch->ncp; 1102 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1103 1104 /* 1105 * Note: In DragonFly the kernel handles '.' and '..'. 1106 */ 1107 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1108 hammer2_xop_start(&xop->head, hammer2_xop_nresolve); 1109 1110 error = hammer2_xop_collect(&xop->head, 0); 1111 if (error) { 1112 ip = NULL; 1113 } else { 1114 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1115 } 1116 hammer2_inode_unlock(dip); 1117 1118 /* 1119 * Acquire the related vnode 1120 * 1121 * NOTE: For error processing, only ENOENT resolves the namecache 1122 * entry to NULL, otherwise we just return the error and 1123 * leave the namecache unresolved. 1124 * 1125 * NOTE: multiple hammer2_inode structures can be aliased to the 1126 * same chain element, for example for hardlinks. This 1127 * use case does not 'reattach' inode associations that 1128 * might already exist, but always allocates a new one. 1129 * 1130 * WARNING: inode structure is locked exclusively via inode_get 1131 * but chain was locked shared. inode_unlock() 1132 * will handle it properly. 1133 */ 1134 if (ip) { 1135 vp = hammer2_igetv(ip, &error); 1136 if (error == 0) { 1137 vn_unlock(vp); 1138 cache_setvp(ap->a_nch, vp); 1139 } else if (error == ENOENT) { 1140 cache_setvp(ap->a_nch, NULL); 1141 } 1142 hammer2_inode_unlock(ip); 1143 1144 /* 1145 * The vp should not be released until after we've disposed 1146 * of our locks, because it might cause vop_inactive() to 1147 * be called. 1148 */ 1149 if (vp) 1150 vrele(vp); 1151 } else { 1152 error = ENOENT; 1153 cache_setvp(ap->a_nch, NULL); 1154 } 1155 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1156 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL, 1157 ("resolve error %d/%p ap %p\n", 1158 error, ap->a_nch->ncp->nc_vp, ap)); 1159 LOCKSTOP; 1160 1161 return error; 1162 } 1163 1164 static 1165 int 1166 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 1167 { 1168 hammer2_inode_t *dip; 1169 hammer2_inode_t *ip; 1170 int error; 1171 1172 LOCKSTART; 1173 dip = VTOI(ap->a_dvp); 1174 1175 if ((ip = dip->pip) == NULL) { 1176 *ap->a_vpp = NULL; 1177 LOCKSTOP; 1178 return ENOENT; 1179 } 1180 hammer2_inode_lock(ip, 0); 1181 *ap->a_vpp = hammer2_igetv(ip, &error); 1182 hammer2_inode_unlock(ip); 1183 1184 LOCKSTOP; 1185 return error; 1186 } 1187 1188 static 1189 int 1190 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap) 1191 { 1192 hammer2_inode_t *dip; 1193 hammer2_inode_t *nip; 1194 struct namecache *ncp; 1195 const uint8_t *name; 1196 size_t name_len; 1197 int error; 1198 1199 LOCKSTART; 1200 dip = VTOI(ap->a_dvp); 1201 if (dip->pmp->ronly) { 1202 LOCKSTOP; 1203 return (EROFS); 1204 } 1205 1206 ncp = ap->a_nch->ncp; 1207 name = ncp->nc_name; 1208 name_len = ncp->nc_nlen; 1209 1210 hammer2_pfs_memory_wait(dip->pmp); 1211 hammer2_trans_init(dip->pmp, 0); 1212 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred, 1213 name, name_len, 0, 1214 hammer2_trans_newinum(dip->pmp), 0, 0, 1215 0, &error); 1216 if (error) { 1217 KKASSERT(nip == NULL); 1218 *ap->a_vpp = NULL; 1219 } else { 1220 *ap->a_vpp = hammer2_igetv(nip, &error); 1221 hammer2_inode_unlock(nip); 1222 } 1223 hammer2_trans_done(dip->pmp); 1224 1225 if (error == 0) { 1226 cache_setunresolved(ap->a_nch); 1227 cache_setvp(ap->a_nch, *ap->a_vpp); 1228 } 1229 LOCKSTOP; 1230 return error; 1231 } 1232 1233 static 1234 int 1235 hammer2_vop_open(struct vop_open_args *ap) 1236 { 1237 return vop_stdopen(ap); 1238 } 1239 1240 /* 1241 * hammer2_vop_advlock { vp, id, op, fl, flags } 1242 */ 1243 static 1244 int 1245 hammer2_vop_advlock(struct vop_advlock_args *ap) 1246 { 1247 hammer2_inode_t *ip = VTOI(ap->a_vp); 1248 hammer2_off_t size; 1249 1250 size = ip->meta.size; 1251 return (lf_advlock(ap, &ip->advlock, size)); 1252 } 1253 1254 static 1255 int 1256 hammer2_vop_close(struct vop_close_args *ap) 1257 { 1258 return vop_stdclose(ap); 1259 } 1260 1261 /* 1262 * hammer2_vop_nlink { nch, dvp, vp, cred } 1263 * 1264 * Create a hardlink from (vp) to {dvp, nch}. 1265 */ 1266 static 1267 int 1268 hammer2_vop_nlink(struct vop_nlink_args *ap) 1269 { 1270 hammer2_xop_nlink_t *xop1; 1271 hammer2_inode_t *fdip; /* target directory to create link in */ 1272 hammer2_inode_t *tdip; /* target directory to create link in */ 1273 hammer2_inode_t *cdip; /* common parent directory */ 1274 hammer2_inode_t *ip; /* inode we are hardlinking to */ 1275 struct namecache *ncp; 1276 const uint8_t *name; 1277 size_t name_len; 1278 int error; 1279 1280 LOCKSTART; 1281 tdip = VTOI(ap->a_dvp); 1282 if (tdip->pmp->ronly) { 1283 LOCKSTOP; 1284 return (EROFS); 1285 } 1286 1287 ncp = ap->a_nch->ncp; 1288 name = ncp->nc_name; 1289 name_len = ncp->nc_nlen; 1290 1291 /* 1292 * ip represents the file being hardlinked. The file could be a 1293 * normal file or a hardlink target if it has already been hardlinked. 1294 * If ip is a hardlinked target then ip->pip represents the location 1295 * of the hardlinked target, NOT the location of the hardlink pointer. 1296 * 1297 * Bump nlinks and potentially also create or move the hardlink 1298 * target in the parent directory common to (ip) and (tdip). The 1299 * consolidation code can modify ip->cluster and ip->pip. The 1300 * returned cluster is locked. 1301 */ 1302 ip = VTOI(ap->a_vp); 1303 hammer2_pfs_memory_wait(ip->pmp); 1304 hammer2_trans_init(ip->pmp, 0); 1305 1306 /* 1307 * The common parent directory must be locked first to avoid deadlocks. 1308 * Also note that fdip and/or tdip might match cdip. 1309 */ 1310 fdip = ip->pip; 1311 cdip = hammer2_inode_common_parent(fdip, tdip); 1312 hammer2_inode_lock(cdip, 0); 1313 hammer2_inode_lock(fdip, 0); 1314 hammer2_inode_lock(tdip, 0); 1315 hammer2_inode_lock(ip, 0); 1316 error = 0; 1317 1318 /* 1319 * If ip is not a hardlink target we must convert it to a hardlink. 1320 * If fdip != cdip we must shift the inode to cdip. 1321 */ 1322 if (fdip != cdip || (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 1323 xop1 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 1324 hammer2_xop_setip2(&xop1->head, ip); 1325 hammer2_xop_setip3(&xop1->head, cdip); 1326 1327 hammer2_xop_start(&xop1->head, hammer2_xop_nlink); 1328 error = hammer2_xop_collect(&xop1->head, 0); 1329 hammer2_xop_retire(&xop1->head, HAMMER2_XOPMASK_VOP); 1330 if (error == ENOENT) 1331 error = 0; 1332 } 1333 1334 /* 1335 * Must synchronize original inode whos chains are now a hardlink 1336 * target. We must match what the backend XOP did to the 1337 * chains. 1338 */ 1339 if (error == 0 && (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 1340 hammer2_inode_modify(ip); 1341 ip->meta.name_key = ip->meta.inum; 1342 ip->meta.name_len = 18; /* "0x%016jx" */ 1343 } 1344 1345 /* 1346 * Create the hardlink target and bump nlinks. 1347 */ 1348 if (error == 0) { 1349 hammer2_inode_create(tdip, NULL, NULL, 1350 name, name_len, 0, 1351 ip->meta.inum, 1352 HAMMER2_OBJTYPE_HARDLINK, ip->meta.type, 1353 0, &error); 1354 hammer2_inode_modify(ip); 1355 ++ip->meta.nlinks; 1356 } 1357 if (error == 0) { 1358 cache_setunresolved(ap->a_nch); 1359 cache_setvp(ap->a_nch, ap->a_vp); 1360 } 1361 hammer2_inode_unlock(ip); 1362 hammer2_inode_unlock(tdip); 1363 hammer2_inode_unlock(fdip); 1364 hammer2_inode_unlock(cdip); 1365 hammer2_inode_drop(cdip); 1366 hammer2_trans_done(ip->pmp); 1367 1368 LOCKSTOP; 1369 return error; 1370 } 1371 1372 /* 1373 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap } 1374 * 1375 * The operating system has already ensured that the directory entry 1376 * does not exist and done all appropriate namespace locking. 1377 */ 1378 static 1379 int 1380 hammer2_vop_ncreate(struct vop_ncreate_args *ap) 1381 { 1382 hammer2_inode_t *dip; 1383 hammer2_inode_t *nip; 1384 struct namecache *ncp; 1385 const uint8_t *name; 1386 size_t name_len; 1387 int error; 1388 1389 LOCKSTART; 1390 dip = VTOI(ap->a_dvp); 1391 if (dip->pmp->ronly) { 1392 LOCKSTOP; 1393 return (EROFS); 1394 } 1395 1396 ncp = ap->a_nch->ncp; 1397 name = ncp->nc_name; 1398 name_len = ncp->nc_nlen; 1399 hammer2_pfs_memory_wait(dip->pmp); 1400 hammer2_trans_init(dip->pmp, 0); 1401 1402 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred, 1403 name, name_len, 0, 1404 hammer2_trans_newinum(dip->pmp), 0, 0, 1405 0, &error); 1406 if (error) { 1407 KKASSERT(nip == NULL); 1408 *ap->a_vpp = NULL; 1409 } else { 1410 *ap->a_vpp = hammer2_igetv(nip, &error); 1411 hammer2_inode_unlock(nip); 1412 } 1413 hammer2_trans_done(dip->pmp); 1414 1415 if (error == 0) { 1416 cache_setunresolved(ap->a_nch); 1417 cache_setvp(ap->a_nch, *ap->a_vpp); 1418 } 1419 LOCKSTOP; 1420 return error; 1421 } 1422 1423 /* 1424 * Make a device node (typically a fifo) 1425 */ 1426 static 1427 int 1428 hammer2_vop_nmknod(struct vop_nmknod_args *ap) 1429 { 1430 hammer2_inode_t *dip; 1431 hammer2_inode_t *nip; 1432 struct namecache *ncp; 1433 const uint8_t *name; 1434 size_t name_len; 1435 int error; 1436 1437 LOCKSTART; 1438 dip = VTOI(ap->a_dvp); 1439 if (dip->pmp->ronly) { 1440 LOCKSTOP; 1441 return (EROFS); 1442 } 1443 1444 ncp = ap->a_nch->ncp; 1445 name = ncp->nc_name; 1446 name_len = ncp->nc_nlen; 1447 hammer2_pfs_memory_wait(dip->pmp); 1448 hammer2_trans_init(dip->pmp, 0); 1449 1450 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred, 1451 name, name_len, 0, 1452 hammer2_trans_newinum(dip->pmp), 0, 0, 1453 0, &error); 1454 if (error) { 1455 KKASSERT(nip == NULL); 1456 *ap->a_vpp = NULL; 1457 } else { 1458 *ap->a_vpp = hammer2_igetv(nip, &error); 1459 hammer2_inode_unlock(nip); 1460 } 1461 hammer2_trans_done(dip->pmp); 1462 1463 if (error == 0) { 1464 cache_setunresolved(ap->a_nch); 1465 cache_setvp(ap->a_nch, *ap->a_vpp); 1466 } 1467 LOCKSTOP; 1468 return error; 1469 } 1470 1471 /* 1472 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target } 1473 */ 1474 static 1475 int 1476 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap) 1477 { 1478 hammer2_inode_t *dip; 1479 hammer2_inode_t *nip; 1480 struct namecache *ncp; 1481 const uint8_t *name; 1482 size_t name_len; 1483 int error; 1484 1485 dip = VTOI(ap->a_dvp); 1486 if (dip->pmp->ronly) 1487 return (EROFS); 1488 1489 ncp = ap->a_nch->ncp; 1490 name = ncp->nc_name; 1491 name_len = ncp->nc_nlen; 1492 hammer2_pfs_memory_wait(dip->pmp); 1493 hammer2_trans_init(dip->pmp, 0); 1494 1495 ap->a_vap->va_type = VLNK; /* enforce type */ 1496 1497 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred, 1498 name, name_len, 0, 1499 hammer2_trans_newinum(dip->pmp), 0, 0, 1500 0, &error); 1501 if (error) { 1502 KKASSERT(nip == NULL); 1503 *ap->a_vpp = NULL; 1504 hammer2_trans_done(dip->pmp); 1505 return error; 1506 } 1507 *ap->a_vpp = hammer2_igetv(nip, &error); 1508 1509 /* 1510 * Build the softlink (~like file data) and finalize the namecache. 1511 */ 1512 if (error == 0) { 1513 size_t bytes; 1514 struct uio auio; 1515 struct iovec aiov; 1516 1517 bytes = strlen(ap->a_target); 1518 1519 hammer2_inode_unlock(nip); 1520 bzero(&auio, sizeof(auio)); 1521 bzero(&aiov, sizeof(aiov)); 1522 auio.uio_iov = &aiov; 1523 auio.uio_segflg = UIO_SYSSPACE; 1524 auio.uio_rw = UIO_WRITE; 1525 auio.uio_resid = bytes; 1526 auio.uio_iovcnt = 1; 1527 auio.uio_td = curthread; 1528 aiov.iov_base = ap->a_target; 1529 aiov.iov_len = bytes; 1530 error = hammer2_write_file(nip, &auio, IO_APPEND, 0); 1531 /* XXX handle error */ 1532 error = 0; 1533 } else { 1534 hammer2_inode_unlock(nip); 1535 } 1536 hammer2_trans_done(dip->pmp); 1537 1538 /* 1539 * Finalize namecache 1540 */ 1541 if (error == 0) { 1542 cache_setunresolved(ap->a_nch); 1543 cache_setvp(ap->a_nch, *ap->a_vpp); 1544 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */ 1545 } 1546 return error; 1547 } 1548 1549 /* 1550 * hammer2_vop_nremove { nch, dvp, cred } 1551 */ 1552 static 1553 int 1554 hammer2_vop_nremove(struct vop_nremove_args *ap) 1555 { 1556 hammer2_xop_unlink_t *xop; 1557 hammer2_inode_t *dip; 1558 hammer2_inode_t *ip; 1559 struct namecache *ncp; 1560 int error; 1561 int isopen; 1562 1563 LOCKSTART; 1564 dip = VTOI(ap->a_dvp); 1565 if (dip->pmp->ronly) { 1566 LOCKSTOP; 1567 return(EROFS); 1568 } 1569 1570 ncp = ap->a_nch->ncp; 1571 1572 hammer2_pfs_memory_wait(dip->pmp); 1573 hammer2_trans_init(dip->pmp, 0); 1574 hammer2_inode_lock(dip, 0); 1575 1576 /* 1577 * The unlink XOP unlinks the path from the directory and 1578 * locates and returns the cluster associated with the real inode. 1579 * We have to handle nlinks here on the frontend. 1580 */ 1581 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1582 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1583 isopen = cache_isopen(ap->a_nch); 1584 xop->isdir = 0; 1585 xop->dopermanent = isopen ? 0 : HAMMER2_DELETE_PERMANENT; 1586 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 1587 1588 /* 1589 * Collect the real inode and adjust nlinks, destroy the real 1590 * inode if nlinks transitions to 0 and it was the real inode 1591 * (else it has already been removed). 1592 */ 1593 error = hammer2_xop_collect(&xop->head, 0); 1594 hammer2_inode_unlock(dip); 1595 1596 if (error == 0) { 1597 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1598 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1599 if (ip) { 1600 hammer2_inode_unlink_finisher(ip, isopen); 1601 hammer2_inode_unlock(ip); 1602 } 1603 } else { 1604 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1605 } 1606 1607 hammer2_inode_run_unlinkq(dip->pmp); 1608 hammer2_trans_done(dip->pmp); 1609 if (error == 0) 1610 cache_unlink(ap->a_nch); 1611 LOCKSTOP; 1612 return (error); 1613 } 1614 1615 /* 1616 * hammer2_vop_nrmdir { nch, dvp, cred } 1617 */ 1618 static 1619 int 1620 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap) 1621 { 1622 hammer2_xop_unlink_t *xop; 1623 hammer2_inode_t *dip; 1624 hammer2_inode_t *ip; 1625 struct namecache *ncp; 1626 int isopen; 1627 int error; 1628 1629 LOCKSTART; 1630 dip = VTOI(ap->a_dvp); 1631 if (dip->pmp->ronly) { 1632 LOCKSTOP; 1633 return(EROFS); 1634 } 1635 1636 hammer2_pfs_memory_wait(dip->pmp); 1637 hammer2_trans_init(dip->pmp, 0); 1638 hammer2_inode_lock(dip, 0); 1639 1640 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1641 1642 ncp = ap->a_nch->ncp; 1643 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1644 isopen = cache_isopen(ap->a_nch); 1645 xop->isdir = 1; 1646 xop->dopermanent = isopen ? 0 : HAMMER2_DELETE_PERMANENT; 1647 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 1648 1649 /* 1650 * Collect the real inode and adjust nlinks, destroy the real 1651 * inode if nlinks transitions to 0 and it was the real inode 1652 * (else it has already been removed). 1653 */ 1654 error = hammer2_xop_collect(&xop->head, 0); 1655 hammer2_inode_unlock(dip); 1656 1657 if (error == 0) { 1658 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1659 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1660 if (ip) { 1661 hammer2_inode_unlink_finisher(ip, isopen); 1662 hammer2_inode_unlock(ip); 1663 } 1664 } else { 1665 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1666 } 1667 hammer2_inode_run_unlinkq(dip->pmp); 1668 hammer2_trans_done(dip->pmp); 1669 if (error == 0) 1670 cache_unlink(ap->a_nch); 1671 LOCKSTOP; 1672 return (error); 1673 } 1674 1675 /* 1676 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred } 1677 */ 1678 static 1679 int 1680 hammer2_vop_nrename(struct vop_nrename_args *ap) 1681 { 1682 struct namecache *fncp; 1683 struct namecache *tncp; 1684 hammer2_inode_t *cdip; 1685 hammer2_inode_t *fdip; 1686 hammer2_inode_t *tdip; 1687 hammer2_inode_t *ip; 1688 const uint8_t *fname; 1689 size_t fname_len; 1690 const uint8_t *tname; 1691 size_t tname_len; 1692 int error; 1693 int tnch_error; 1694 hammer2_key_t tlhc; 1695 1696 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount) 1697 return(EXDEV); 1698 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount) 1699 return(EXDEV); 1700 1701 fdip = VTOI(ap->a_fdvp); /* source directory */ 1702 tdip = VTOI(ap->a_tdvp); /* target directory */ 1703 1704 if (fdip->pmp->ronly) 1705 return(EROFS); 1706 1707 LOCKSTART; 1708 fncp = ap->a_fnch->ncp; /* entry name in source */ 1709 fname = fncp->nc_name; 1710 fname_len = fncp->nc_nlen; 1711 1712 tncp = ap->a_tnch->ncp; /* entry name in target */ 1713 tname = tncp->nc_name; 1714 tname_len = tncp->nc_nlen; 1715 1716 hammer2_pfs_memory_wait(tdip->pmp); 1717 hammer2_trans_init(tdip->pmp, 0); 1718 1719 /* 1720 * ip is the inode being renamed. If this is a hardlink then 1721 * ip represents the actual file and not the hardlink marker. 1722 */ 1723 ip = VTOI(fncp->nc_vp); 1724 1725 /* 1726 * The common parent directory must be locked first to avoid deadlocks. 1727 * Also note that fdip and/or tdip might match cdip. 1728 */ 1729 cdip = hammer2_inode_common_parent(ip->pip, tdip); 1730 hammer2_inode_lock(cdip, 0); 1731 hammer2_inode_lock(fdip, 0); 1732 hammer2_inode_lock(tdip, 0); 1733 hammer2_inode_ref(ip); /* extra ref */ 1734 error = 0; 1735 1736 /* 1737 * If ip is a hardlink target and fdip != cdip we must shift the 1738 * inode to cdip. 1739 */ 1740 if (fdip != cdip && 1741 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) { 1742 hammer2_xop_nlink_t *xop1; 1743 1744 xop1 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 1745 hammer2_xop_setip2(&xop1->head, ip); 1746 hammer2_xop_setip3(&xop1->head, cdip); 1747 1748 hammer2_xop_start(&xop1->head, hammer2_xop_nlink); 1749 error = hammer2_xop_collect(&xop1->head, 0); 1750 hammer2_xop_retire(&xop1->head, HAMMER2_XOPMASK_VOP); 1751 } 1752 1753 /* 1754 * Delete the target namespace. 1755 */ 1756 { 1757 hammer2_xop_unlink_t *xop2; 1758 hammer2_inode_t *tip; 1759 int isopen; 1760 1761 /* 1762 * The unlink XOP unlinks the path from the directory and 1763 * locates and returns the cluster associated with the real 1764 * inode. We have to handle nlinks here on the frontend. 1765 */ 1766 xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 1767 hammer2_xop_setname(&xop2->head, tname, tname_len); 1768 isopen = cache_isopen(ap->a_tnch); 1769 xop2->isdir = -1; 1770 xop2->dopermanent = isopen ? 0 : HAMMER2_DELETE_PERMANENT; 1771 hammer2_xop_start(&xop2->head, hammer2_xop_unlink); 1772 1773 /* 1774 * Collect the real inode and adjust nlinks, destroy the real 1775 * inode if nlinks transitions to 0 and it was the real inode 1776 * (else it has already been removed). 1777 */ 1778 tnch_error = hammer2_xop_collect(&xop2->head, 0); 1779 /* hammer2_inode_unlock(tdip); */ 1780 1781 if (tnch_error == 0) { 1782 tip = hammer2_inode_get(tdip->pmp, NULL, 1783 &xop2->head.cluster, -1); 1784 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP); 1785 if (tip) { 1786 hammer2_inode_unlink_finisher(tip, isopen); 1787 hammer2_inode_unlock(tip); 1788 } 1789 } else { 1790 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP); 1791 } 1792 /* hammer2_inode_lock(tdip, 0); */ 1793 1794 if (tnch_error && tnch_error != ENOENT) { 1795 error = tnch_error; 1796 goto done2; 1797 } 1798 } 1799 1800 /* 1801 * Resolve the collision space for (tdip, tname, tname_len) 1802 * 1803 * tdip must be held exclusively locked to prevent races. 1804 */ 1805 { 1806 hammer2_xop_scanlhc_t *sxop; 1807 hammer2_tid_t lhcbase; 1808 1809 tlhc = hammer2_dirhash(tname, tname_len); 1810 lhcbase = tlhc; 1811 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 1812 sxop->lhc = tlhc; 1813 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc); 1814 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1815 if (tlhc != sxop->head.cluster.focus->bref.key) 1816 break; 1817 ++tlhc; 1818 } 1819 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1820 1821 if (error) { 1822 if (error != ENOENT) 1823 goto done2; 1824 ++tlhc; 1825 error = 0; 1826 } 1827 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) { 1828 error = ENOSPC; 1829 goto done2; 1830 } 1831 } 1832 1833 /* 1834 * Everything is setup, do the rename. 1835 * 1836 * We have to synchronize ip->meta to the underlying operation. 1837 * 1838 * NOTE: To avoid deadlocks we cannot lock (ip) while we are 1839 * unlinking elements from their directories. Locking 1840 * the nlinks field does not lock the whole inode. 1841 */ 1842 hammer2_inode_lock(ip, 0); 1843 if (error == 0) { 1844 hammer2_xop_nrename_t *xop4; 1845 1846 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 1847 xop4->lhc = tlhc; 1848 xop4->ip_key = ip->meta.name_key; 1849 hammer2_xop_setip2(&xop4->head, ip); 1850 hammer2_xop_setip3(&xop4->head, tdip); 1851 hammer2_xop_setname(&xop4->head, fname, fname_len); 1852 hammer2_xop_setname2(&xop4->head, tname, tname_len); 1853 hammer2_xop_start(&xop4->head, hammer2_xop_nrename); 1854 1855 error = hammer2_xop_collect(&xop4->head, 0); 1856 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP); 1857 1858 if (error == ENOENT) 1859 error = 0; 1860 if (error == 0 && 1861 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 1862 hammer2_inode_modify(ip); 1863 ip->meta.name_len = tname_len; 1864 ip->meta.name_key = tlhc; 1865 1866 } 1867 } 1868 1869 /* 1870 * Fixup ip->pip if we were renaming the actual file and not a 1871 * hardlink pointer. 1872 */ 1873 if (error == 0 && (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 1874 hammer2_inode_t *opip; 1875 1876 if (ip->pip != tdip) { 1877 hammer2_inode_ref(tdip); 1878 opip = ip->pip; 1879 ip->pip = tdip; 1880 if (opip) 1881 hammer2_inode_drop(opip); 1882 } 1883 } 1884 hammer2_inode_unlock(ip); 1885 done2: 1886 hammer2_inode_unlock(tdip); 1887 hammer2_inode_unlock(fdip); 1888 hammer2_inode_unlock(cdip); 1889 hammer2_inode_drop(ip); 1890 hammer2_inode_drop(cdip); 1891 hammer2_inode_run_unlinkq(fdip->pmp); 1892 hammer2_trans_done(tdip->pmp); 1893 1894 /* 1895 * Issue the namecache update after unlocking all the internal 1896 * hammer structures, otherwise we might deadlock. 1897 */ 1898 if (tnch_error == 0) { 1899 cache_unlink(ap->a_tnch); 1900 cache_setunresolved(ap->a_tnch); 1901 } 1902 if (error == 0) 1903 cache_rename(ap->a_fnch, ap->a_tnch); 1904 1905 LOCKSTOP; 1906 return (error); 1907 } 1908 1909 /* 1910 * hammer2_vop_ioctl { vp, command, data, fflag, cred } 1911 */ 1912 static 1913 int 1914 hammer2_vop_ioctl(struct vop_ioctl_args *ap) 1915 { 1916 hammer2_inode_t *ip; 1917 int error; 1918 1919 LOCKSTART; 1920 ip = VTOI(ap->a_vp); 1921 1922 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data, 1923 ap->a_fflag, ap->a_cred); 1924 LOCKSTOP; 1925 return (error); 1926 } 1927 1928 static 1929 int 1930 hammer2_vop_mountctl(struct vop_mountctl_args *ap) 1931 { 1932 struct mount *mp; 1933 hammer2_pfs_t *pmp; 1934 int rc; 1935 1936 LOCKSTART; 1937 switch (ap->a_op) { 1938 case (MOUNTCTL_SET_EXPORT): 1939 mp = ap->a_head.a_ops->head.vv_mount; 1940 pmp = MPTOPMP(mp); 1941 1942 if (ap->a_ctllen != sizeof(struct export_args)) 1943 rc = (EINVAL); 1944 else 1945 rc = vfs_export(mp, &pmp->export, 1946 (const struct export_args *)ap->a_ctl); 1947 break; 1948 default: 1949 rc = vop_stdmountctl(ap); 1950 break; 1951 } 1952 LOCKSTOP; 1953 return (rc); 1954 } 1955 1956 /* 1957 * KQFILTER 1958 */ 1959 static void filt_hammer2detach(struct knote *kn); 1960 static int filt_hammer2read(struct knote *kn, long hint); 1961 static int filt_hammer2write(struct knote *kn, long hint); 1962 static int filt_hammer2vnode(struct knote *kn, long hint); 1963 1964 static struct filterops hammer2read_filtops = 1965 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1966 NULL, filt_hammer2detach, filt_hammer2read }; 1967 static struct filterops hammer2write_filtops = 1968 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1969 NULL, filt_hammer2detach, filt_hammer2write }; 1970 static struct filterops hammer2vnode_filtops = 1971 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1972 NULL, filt_hammer2detach, filt_hammer2vnode }; 1973 1974 static 1975 int 1976 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap) 1977 { 1978 struct vnode *vp = ap->a_vp; 1979 struct knote *kn = ap->a_kn; 1980 1981 switch (kn->kn_filter) { 1982 case EVFILT_READ: 1983 kn->kn_fop = &hammer2read_filtops; 1984 break; 1985 case EVFILT_WRITE: 1986 kn->kn_fop = &hammer2write_filtops; 1987 break; 1988 case EVFILT_VNODE: 1989 kn->kn_fop = &hammer2vnode_filtops; 1990 break; 1991 default: 1992 return (EOPNOTSUPP); 1993 } 1994 1995 kn->kn_hook = (caddr_t)vp; 1996 1997 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 1998 1999 return(0); 2000 } 2001 2002 static void 2003 filt_hammer2detach(struct knote *kn) 2004 { 2005 struct vnode *vp = (void *)kn->kn_hook; 2006 2007 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2008 } 2009 2010 static int 2011 filt_hammer2read(struct knote *kn, long hint) 2012 { 2013 struct vnode *vp = (void *)kn->kn_hook; 2014 hammer2_inode_t *ip = VTOI(vp); 2015 off_t off; 2016 2017 if (hint == NOTE_REVOKE) { 2018 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2019 return(1); 2020 } 2021 off = ip->meta.size - kn->kn_fp->f_offset; 2022 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 2023 if (kn->kn_sfflags & NOTE_OLDAPI) 2024 return(1); 2025 return (kn->kn_data != 0); 2026 } 2027 2028 2029 static int 2030 filt_hammer2write(struct knote *kn, long hint) 2031 { 2032 if (hint == NOTE_REVOKE) 2033 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2034 kn->kn_data = 0; 2035 return (1); 2036 } 2037 2038 static int 2039 filt_hammer2vnode(struct knote *kn, long hint) 2040 { 2041 if (kn->kn_sfflags & hint) 2042 kn->kn_fflags |= hint; 2043 if (hint == NOTE_REVOKE) { 2044 kn->kn_flags |= (EV_EOF | EV_NODATA); 2045 return (1); 2046 } 2047 return (kn->kn_fflags != 0); 2048 } 2049 2050 /* 2051 * FIFO VOPS 2052 */ 2053 static 2054 int 2055 hammer2_vop_markatime(struct vop_markatime_args *ap) 2056 { 2057 hammer2_inode_t *ip; 2058 struct vnode *vp; 2059 2060 vp = ap->a_vp; 2061 ip = VTOI(vp); 2062 2063 if (ip->pmp->ronly) 2064 return(EROFS); 2065 return(0); 2066 } 2067 2068 static 2069 int 2070 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap) 2071 { 2072 int error; 2073 2074 error = VOCALL(&fifo_vnode_vops, &ap->a_head); 2075 if (error) 2076 error = hammer2_vop_kqfilter(ap); 2077 return(error); 2078 } 2079 2080 /* 2081 * VOPS vector 2082 */ 2083 struct vop_ops hammer2_vnode_vops = { 2084 .vop_default = vop_defaultop, 2085 .vop_fsync = hammer2_vop_fsync, 2086 .vop_getpages = vop_stdgetpages, 2087 .vop_putpages = vop_stdputpages, 2088 .vop_access = hammer2_vop_access, 2089 .vop_advlock = hammer2_vop_advlock, 2090 .vop_close = hammer2_vop_close, 2091 .vop_nlink = hammer2_vop_nlink, 2092 .vop_ncreate = hammer2_vop_ncreate, 2093 .vop_nsymlink = hammer2_vop_nsymlink, 2094 .vop_nremove = hammer2_vop_nremove, 2095 .vop_nrmdir = hammer2_vop_nrmdir, 2096 .vop_nrename = hammer2_vop_nrename, 2097 .vop_getattr = hammer2_vop_getattr, 2098 .vop_setattr = hammer2_vop_setattr, 2099 .vop_readdir = hammer2_vop_readdir, 2100 .vop_readlink = hammer2_vop_readlink, 2101 .vop_getpages = vop_stdgetpages, 2102 .vop_putpages = vop_stdputpages, 2103 .vop_read = hammer2_vop_read, 2104 .vop_write = hammer2_vop_write, 2105 .vop_open = hammer2_vop_open, 2106 .vop_inactive = hammer2_vop_inactive, 2107 .vop_reclaim = hammer2_vop_reclaim, 2108 .vop_nresolve = hammer2_vop_nresolve, 2109 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot, 2110 .vop_nmkdir = hammer2_vop_nmkdir, 2111 .vop_nmknod = hammer2_vop_nmknod, 2112 .vop_ioctl = hammer2_vop_ioctl, 2113 .vop_mountctl = hammer2_vop_mountctl, 2114 .vop_bmap = hammer2_vop_bmap, 2115 .vop_strategy = hammer2_vop_strategy, 2116 .vop_kqfilter = hammer2_vop_kqfilter 2117 }; 2118 2119 struct vop_ops hammer2_spec_vops = { 2120 .vop_default = vop_defaultop, 2121 .vop_fsync = hammer2_vop_fsync, 2122 .vop_read = vop_stdnoread, 2123 .vop_write = vop_stdnowrite, 2124 .vop_access = hammer2_vop_access, 2125 .vop_close = hammer2_vop_close, 2126 .vop_markatime = hammer2_vop_markatime, 2127 .vop_getattr = hammer2_vop_getattr, 2128 .vop_inactive = hammer2_vop_inactive, 2129 .vop_reclaim = hammer2_vop_reclaim, 2130 .vop_setattr = hammer2_vop_setattr 2131 }; 2132 2133 struct vop_ops hammer2_fifo_vops = { 2134 .vop_default = fifo_vnoperate, 2135 .vop_fsync = hammer2_vop_fsync, 2136 #if 0 2137 .vop_read = hammer2_vop_fiforead, 2138 .vop_write = hammer2_vop_fifowrite, 2139 #endif 2140 .vop_access = hammer2_vop_access, 2141 #if 0 2142 .vop_close = hammer2_vop_fifoclose, 2143 #endif 2144 .vop_markatime = hammer2_vop_markatime, 2145 .vop_getattr = hammer2_vop_getattr, 2146 .vop_inactive = hammer2_vop_inactive, 2147 .vop_reclaim = hammer2_vop_reclaim, 2148 .vop_setattr = hammer2_vop_setattr, 2149 .vop_kqfilter = hammer2_vop_fifokqfilter 2150 }; 2151 2152