1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org> 5 * Copyright (c) 2011-2022 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Matthew Dillon <dillon@dragonflybsd.org> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 /* 38 * Kernel Filesystem interface 39 * 40 * NOTE! local ipdata pointers must be reloaded on any modifying operation 41 * to the inode as its underlying chain may have changed. 42 */ 43 44 /* 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/fcntl.h> 49 #include <sys/buf.h> 50 #include <sys/proc.h> 51 #include <sys/mount.h> 52 #include <sys/vnode.h> 53 #include <sys/mountctl.h> 54 #include <sys/dirent.h> 55 #include <sys/uio.h> 56 #include <sys/objcache.h> 57 #include <sys/event.h> 58 #include <sys/file.h> 59 #include <vfs/fifofs/fifo.h> 60 */ 61 62 #include "hammer2.h" 63 64 /* 65 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, 66 int seqcount); 67 */ 68 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 69 int ioflag, int seqcount); 70 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize); 71 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize); 72 73 /* 74 * Last reference to a vnode is going away but it is still cached. 75 */ 76 static 77 int 78 hammer2_vop_inactive(struct vop_inactive_args *ap) 79 { 80 #if 0 81 hammer2_inode_t *ip; 82 struct vnode *vp; 83 84 vp = ap->a_vp; 85 ip = VTOI(vp); 86 87 /* 88 * Degenerate case 89 */ 90 if (ip == NULL) { 91 vrecycle(vp); 92 return (0); 93 } 94 95 /* 96 * Aquire the inode lock to interlock against vp updates via 97 * the inode path and file deletions and such (which can be 98 * namespace-only operations that might not hold the vnode). 99 */ 100 hammer2_inode_lock(ip, 0); 101 if (ip->flags & HAMMER2_INODE_ISUNLINKED) { 102 hammer2_key_t lbase; 103 int nblksize; 104 105 /* 106 * If the inode has been unlinked we can throw away all 107 * buffers (dirty or not) and clean the file out. 108 * 109 * Because vrecycle() calls are not guaranteed, try to 110 * dispose of the inode as much as possible right here. 111 */ 112 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL); 113 nvtruncbuf(vp, 0, nblksize, 0, 0); 114 115 /* 116 * Delete the file on-media. 117 */ 118 if ((ip->flags & HAMMER2_INODE_DELETING) == 0) { 119 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); 120 hammer2_inode_delayed_sideq(ip); 121 } 122 hammer2_inode_unlock(ip); 123 124 /* 125 * Recycle immediately if possible 126 */ 127 vrecycle(vp); 128 } else { 129 hammer2_inode_unlock(ip); 130 } 131 return (0); 132 #endif 133 return (EOPNOTSUPP); 134 } 135 136 /* 137 * Reclaim a vnode so that it can be reused; after the inode is 138 * disassociated, the filesystem must manage it alone. 139 */ 140 static 141 int 142 hammer2_vop_reclaim(struct vop_reclaim_args *ap) 143 { 144 hammer2_inode_t *ip; 145 hammer2_pfs_t *pmp; 146 struct vnode *vp; 147 148 vp = ap->a_vp; 149 ip = VTOI(vp); 150 if (ip == NULL) 151 return(0); 152 153 pmp = ip->pmp; 154 155 /* 156 * NOTE! We do not attempt to flush chains here, flushing is 157 * really fragile and could also deadlock. 158 */ 159 vclrisdirty(vp); 160 161 /* 162 * The inode lock is required to disconnect it. 163 */ 164 hammer2_inode_lock(ip, 0); 165 vp->v_data = NULL; 166 ip->vp = NULL; 167 168 /* 169 * Delete the file on-media. This should have been handled by the 170 * inactivation. The operation is likely still queued on the inode 171 * though so only complain if the stars don't align. 172 */ 173 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED | HAMMER2_INODE_DELETING)) == 174 HAMMER2_INODE_ISUNLINKED) 175 { 176 assert(0); 177 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); 178 hammer2_inode_delayed_sideq(ip); 179 kprintf("hammer2: vp=%p ip=%p unlinked but not disposed\n", 180 vp, ip); 181 } 182 hammer2_inode_unlock(ip); 183 184 /* 185 * Modified inodes will already be on SIDEQ or SYNCQ, no further 186 * action is needed. 187 * 188 * We cannot safely synchronize the inode from inside the reclaim 189 * due to potentially deep locks held as-of when the reclaim occurs. 190 * Interactions and potential deadlocks abound. We also can't do it 191 * here without desynchronizing from the related directory entrie(s). 192 */ 193 hammer2_inode_drop(ip); /* vp ref */ 194 195 /* 196 * XXX handle background sync when ip dirty, kernel will no longer 197 * notify us regarding this inode because there is no longer a 198 * vnode attached to it. 199 */ 200 201 return (0); 202 } 203 204 int 205 hammer2_reclaim(struct vnode *vp) 206 { 207 struct vop_reclaim_args ap = { 208 .a_vp = vp, 209 }; 210 211 return hammer2_vop_reclaim(&ap); 212 } 213 214 /* 215 * Currently this function synchronizes the front-end inode state to the 216 * backend chain topology, then flushes the inode's chain and sub-topology 217 * to backend media. This function does not flush the root topology down to 218 * the inode. 219 */ 220 static 221 int 222 hammer2_vop_fsync(struct vop_fsync_args *ap) 223 { 224 #if 0 225 hammer2_inode_t *ip; 226 struct vnode *vp; 227 int error1; 228 int error2; 229 230 vp = ap->a_vp; 231 ip = VTOI(vp); 232 error1 = 0; 233 234 hammer2_trans_init(ip->pmp, 0); 235 236 /* 237 * Flush dirty buffers in the file's logical buffer cache. 238 * It is best to wait for the strategy code to commit the 239 * buffers to the device's backing buffer cache before 240 * then trying to flush the inode. 241 * 242 * This should be quick, but certain inode modifications cached 243 * entirely in the hammer2_inode structure may not trigger a 244 * buffer read until the flush so the fsync can wind up also 245 * doing scattered reads. 246 */ 247 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 248 bio_track_wait(&vp->v_track_write, 0, 0); 249 250 /* 251 * Flush any inode changes 252 */ 253 hammer2_inode_lock(ip, 0); 254 if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MODIFIED)) 255 error1 = hammer2_inode_chain_sync(ip); 256 257 /* 258 * Flush dirty chains related to the inode. 259 * 260 * NOTE! We are not in a flush transaction. The inode remains on 261 * the sideq so the filesystem syncer can synchronize it to 262 * the volume root. 263 */ 264 error2 = hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP); 265 if (error2) 266 error1 = error2; 267 268 /* 269 * We may be able to clear the vnode dirty flag. 270 */ 271 if ((ip->flags & (HAMMER2_INODE_MODIFIED | 272 HAMMER2_INODE_RESIZED | 273 HAMMER2_INODE_DIRTYDATA)) == 0 && 274 RB_EMPTY(&vp->v_rbdirty_tree) && 275 !bio_track_active(&vp->v_track_write)) { 276 vclrisdirty(vp); 277 } 278 hammer2_inode_unlock(ip); 279 hammer2_trans_done(ip->pmp, 0); 280 281 return (error1); 282 #endif 283 return (EOPNOTSUPP); 284 } 285 286 /* 287 * No lock needed, just handle ip->update 288 */ 289 static 290 int 291 hammer2_vop_access(struct vop_access_args *ap) 292 { 293 #if 0 294 hammer2_inode_t *ip = VTOI(ap->a_vp); 295 uid_t uid; 296 gid_t gid; 297 mode_t mode; 298 uint32_t uflags; 299 int error; 300 int update; 301 302 retry: 303 update = spin_access_start(&ip->cluster_spin); 304 305 /*hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);*/ 306 uid = hammer2_to_unix_xid(&ip->meta.uid); 307 gid = hammer2_to_unix_xid(&ip->meta.gid); 308 mode = ip->meta.mode; 309 uflags = ip->meta.uflags; 310 /*hammer2_inode_unlock(ip);*/ 311 312 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 313 goto retry; 314 315 error = vop_helper_access(ap, uid, gid, mode, uflags); 316 317 return (error); 318 #endif 319 return (EOPNOTSUPP); 320 } 321 322 static 323 int 324 hammer2_vop_getattr(struct vop_getattr_args *ap) 325 { 326 #if 0 327 hammer2_pfs_t *pmp; 328 hammer2_inode_t *ip; 329 struct vnode *vp; 330 struct vattr *vap; 331 int update; 332 333 vp = ap->a_vp; 334 vap = ap->a_vap; 335 336 ip = VTOI(vp); 337 pmp = ip->pmp; 338 339 retry: 340 update = spin_access_start(&ip->cluster_spin); 341 342 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 343 vap->va_fileid = ip->meta.inum; 344 vap->va_mode = ip->meta.mode; 345 vap->va_nlink = ip->meta.nlinks; 346 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 347 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 348 vap->va_rmajor = 0; 349 vap->va_rminor = 0; 350 vap->va_size = ip->meta.size; /* protected by shared lock */ 351 vap->va_blocksize = HAMMER2_PBUFSIZE; 352 vap->va_flags = ip->meta.uflags; 353 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime); 354 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime); 355 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime); 356 vap->va_gen = 1; 357 vap->va_bytes = 0; 358 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) { 359 /* 360 * Can't really calculate directory use sans the files under 361 * it, just assume one block for now. 362 */ 363 vap->va_bytes += HAMMER2_INODE_BYTES; 364 } else { 365 vap->va_bytes = hammer2_inode_data_count(ip); 366 } 367 vap->va_type = hammer2_get_vtype(ip->meta.type); 368 vap->va_filerev = 0; 369 vap->va_uid_uuid = ip->meta.uid; 370 vap->va_gid_uuid = ip->meta.gid; 371 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 372 VA_FSID_UUID_VALID; 373 374 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 375 goto retry; 376 377 return (0); 378 #endif 379 return (EOPNOTSUPP); 380 } 381 382 static 383 int 384 hammer2_vop_getattr_lite(struct vop_getattr_lite_args *ap) 385 { 386 #if 0 387 hammer2_pfs_t *pmp; 388 hammer2_inode_t *ip; 389 struct vnode *vp; 390 struct vattr_lite *lvap; 391 int update; 392 393 vp = ap->a_vp; 394 lvap = ap->a_lvap; 395 396 ip = VTOI(vp); 397 pmp = ip->pmp; 398 399 retry: 400 update = spin_access_start(&ip->cluster_spin); 401 402 #if 0 403 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 404 vap->va_fileid = ip->meta.inum; 405 #endif 406 lvap->va_mode = ip->meta.mode; 407 lvap->va_nlink = ip->meta.nlinks; 408 lvap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 409 lvap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 410 #if 0 411 vap->va_rmajor = 0; 412 vap->va_rminor = 0; 413 #endif 414 lvap->va_size = ip->meta.size; 415 #if 0 416 vap->va_blocksize = HAMMER2_PBUFSIZE; 417 #endif 418 lvap->va_flags = ip->meta.uflags; 419 lvap->va_type = hammer2_get_vtype(ip->meta.type); 420 #if 0 421 vap->va_filerev = 0; 422 vap->va_uid_uuid = ip->meta.uid; 423 vap->va_gid_uuid = ip->meta.gid; 424 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 425 VA_FSID_UUID_VALID; 426 #endif 427 428 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 429 goto retry; 430 431 return (0); 432 #endif 433 return (EOPNOTSUPP); 434 } 435 436 static 437 int 438 hammer2_vop_setattr(struct vop_setattr_args *ap) 439 { 440 #if 0 441 hammer2_inode_t *ip; 442 struct vnode *vp; 443 struct vattr *vap; 444 int error; 445 int kflags = 0; 446 uint64_t ctime; 447 448 vp = ap->a_vp; 449 vap = ap->a_vap; 450 hammer2_update_time(&ctime); 451 452 ip = VTOI(vp); 453 454 if (ip->pmp->ronly) 455 return (EROFS); 456 457 /* 458 * Normally disallow setattr if there is no space, unless we 459 * are in emergency mode (might be needed to chflags -R noschg 460 * files prior to removal). 461 */ 462 if ((ip->pmp->flags & HAMMER2_PMPF_EMERG) == 0 && 463 hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1) { 464 return (ENOSPC); 465 } 466 467 hammer2_trans_init(ip->pmp, 0); 468 hammer2_inode_lock(ip, 0); 469 error = 0; 470 471 if (vap->va_flags != VNOVAL) { 472 uint32_t flags; 473 474 flags = ip->meta.uflags; 475 error = vop_helper_setattr_flags(&flags, vap->va_flags, 476 hammer2_to_unix_xid(&ip->meta.uid), 477 ap->a_cred); 478 if (error == 0) { 479 if (ip->meta.uflags != flags) { 480 hammer2_inode_modify(ip); 481 hammer2_spin_lock_update(&ip->cluster_spin); 482 ip->meta.uflags = flags; 483 ip->meta.ctime = ctime; 484 hammer2_spin_unlock_update(&ip->cluster_spin); 485 kflags |= NOTE_ATTRIB; 486 } 487 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 488 error = 0; 489 goto done; 490 } 491 } 492 goto done; 493 } 494 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 495 error = EPERM; 496 goto done; 497 } 498 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { 499 mode_t cur_mode = ip->meta.mode; 500 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 501 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 502 uuid_t uuid_uid; 503 uuid_t uuid_gid; 504 505 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 506 ap->a_cred, 507 &cur_uid, &cur_gid, &cur_mode); 508 if (error == 0) { 509 hammer2_guid_to_uuid(&uuid_uid, cur_uid); 510 hammer2_guid_to_uuid(&uuid_gid, cur_gid); 511 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) || 512 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) || 513 ip->meta.mode != cur_mode 514 ) { 515 hammer2_inode_modify(ip); 516 hammer2_spin_lock_update(&ip->cluster_spin); 517 ip->meta.uid = uuid_uid; 518 ip->meta.gid = uuid_gid; 519 ip->meta.mode = cur_mode; 520 ip->meta.ctime = ctime; 521 hammer2_spin_unlock_update(&ip->cluster_spin); 522 } 523 kflags |= NOTE_ATTRIB; 524 } 525 } 526 527 /* 528 * Resize the file 529 */ 530 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) { 531 switch(vp->v_type) { 532 case VREG: 533 if (vap->va_size == ip->meta.size) 534 break; 535 if (vap->va_size < ip->meta.size) { 536 hammer2_mtx_ex(&ip->truncate_lock); 537 hammer2_truncate_file(ip, vap->va_size); 538 hammer2_mtx_unlock(&ip->truncate_lock); 539 kflags |= NOTE_WRITE; 540 } else { 541 hammer2_extend_file(ip, vap->va_size); 542 kflags |= NOTE_WRITE | NOTE_EXTEND; 543 } 544 hammer2_inode_modify(ip); 545 ip->meta.mtime = ctime; 546 vclrflags(vp, VLASTWRITETS); 547 break; 548 default: 549 error = EINVAL; 550 goto done; 551 } 552 } 553 #if 0 554 /* atime not supported */ 555 if (vap->va_atime.tv_sec != VNOVAL) { 556 hammer2_inode_modify(ip); 557 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime); 558 kflags |= NOTE_ATTRIB; 559 } 560 #endif 561 if (vap->va_mode != (mode_t)VNOVAL) { 562 mode_t cur_mode = ip->meta.mode; 563 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 564 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 565 566 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 567 cur_uid, cur_gid, &cur_mode); 568 if (error == 0) { 569 hammer2_inode_modify(ip); 570 hammer2_spin_lock_update(&ip->cluster_spin); 571 ip->meta.mode = cur_mode; 572 ip->meta.ctime = ctime; 573 hammer2_spin_unlock_update(&ip->cluster_spin); 574 kflags |= NOTE_ATTRIB; 575 } 576 } 577 578 if (vap->va_mtime.tv_sec != VNOVAL) { 579 hammer2_inode_modify(ip); 580 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime); 581 kflags |= NOTE_ATTRIB; 582 vclrflags(vp, VLASTWRITETS); 583 } 584 585 done: 586 /* 587 * If a truncation occurred we must call chain_sync() now in order 588 * to trim the related data chains, otherwise a later expansion can 589 * cause havoc. 590 * 591 * If an extend occured that changed the DIRECTDATA state, we must 592 * call inode_chain_sync now in order to prepare the inode's indirect 593 * block table. 594 * 595 * WARNING! This means we are making an adjustment to the inode's 596 * chain outside of sync/fsync, and not just to inode->meta, which 597 * may result in some consistency issues if a crash were to occur 598 * at just the wrong time. 599 */ 600 if (ip->flags & HAMMER2_INODE_RESIZED) 601 hammer2_inode_chain_sync(ip); 602 603 /* 604 * Cleanup. 605 */ 606 hammer2_inode_unlock(ip); 607 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 608 hammer2_knote(ip->vp, kflags); 609 610 return (error); 611 #endif 612 return (EOPNOTSUPP); 613 } 614 615 static 616 int 617 hammer2_vop_readdir(struct vop_readdir_args *ap) 618 { 619 #if 0 620 hammer2_xop_readdir_t *xop; 621 hammer2_blockref_t bref; 622 hammer2_inode_t *ip; 623 hammer2_tid_t inum; 624 hammer2_key_t lkey; 625 struct uio *uio; 626 off_t *cookies; 627 off_t saveoff; 628 int cookie_index; 629 int ncookies; 630 int error; 631 int eofflag; 632 int r; 633 634 ip = VTOI(ap->a_vp); 635 uio = ap->a_uio; 636 saveoff = uio->uio_offset; 637 eofflag = 0; 638 error = 0; 639 640 /* 641 * Setup cookies directory entry cookies if requested 642 */ 643 if (ap->a_ncookies) { 644 ncookies = uio->uio_resid / 16 + 1; 645 if (ncookies > 1024) 646 ncookies = 1024; 647 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK); 648 } else { 649 ncookies = -1; 650 cookies = NULL; 651 } 652 cookie_index = 0; 653 654 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 655 656 /* 657 * Handle artificial entries. To ensure that only positive 64 bit 658 * quantities are returned to userland we always strip off bit 63. 659 * The hash code is designed such that codes 0x0000-0x7FFF are not 660 * used, allowing us to use these codes for articial entries. 661 * 662 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not 663 * allow '..' to cross the mount point into (e.g.) the super-root. 664 */ 665 if (saveoff == 0) { 666 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 667 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, "."); 668 if (r) 669 goto done; 670 if (cookies) 671 cookies[cookie_index] = saveoff; 672 ++saveoff; 673 ++cookie_index; 674 if (cookie_index == ncookies) 675 goto done; 676 } 677 678 if (saveoff == 1) { 679 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 680 if (ip != ip->pmp->iroot) 681 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK; 682 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, ".."); 683 if (r) 684 goto done; 685 if (cookies) 686 cookies[cookie_index] = saveoff; 687 ++saveoff; 688 ++cookie_index; 689 if (cookie_index == ncookies) 690 goto done; 691 } 692 693 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE; 694 if (hammer2_debug & 0x0020) 695 kprintf("readdir: lkey %016jx\n", lkey); 696 if (error) 697 goto done; 698 699 /* 700 * Use XOP for cluster scan. 701 * 702 * parent is the inode cluster, already locked for us. Don't 703 * double lock shared locks as this will screw up upgrades. 704 */ 705 xop = hammer2_xop_alloc(ip, 0); 706 xop->lkey = lkey; 707 hammer2_xop_start(&xop->head, &hammer2_readdir_desc); 708 709 for (;;) { 710 const hammer2_inode_data_t *ripdata; 711 const char *dname; 712 int dtype; 713 714 error = hammer2_xop_collect(&xop->head, 0); 715 error = hammer2_error_to_errno(error); 716 if (error) { 717 break; 718 } 719 if (cookie_index == ncookies) 720 break; 721 if (hammer2_debug & 0x0020) 722 kprintf("cluster chain %p %p\n", 723 xop->head.cluster.focus, 724 (xop->head.cluster.focus ? 725 xop->head.cluster.focus->data : (void *)-1)); 726 hammer2_cluster_bref(&xop->head.cluster, &bref); 727 728 if (bref.type == HAMMER2_BREF_TYPE_INODE) { 729 ripdata = &hammer2_xop_gdata(&xop->head)->ipdata; 730 dtype = hammer2_get_dtype(ripdata->meta.type); 731 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 732 r = vop_write_dirent(&error, uio, 733 ripdata->meta.inum & 734 HAMMER2_DIRHASH_USERMSK, 735 dtype, 736 ripdata->meta.name_len, 737 ripdata->filename); 738 hammer2_xop_pdata(&xop->head); 739 if (r) 740 break; 741 if (cookies) 742 cookies[cookie_index] = saveoff; 743 ++cookie_index; 744 } else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) { 745 uint16_t namlen; 746 747 dtype = hammer2_get_dtype(bref.embed.dirent.type); 748 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 749 namlen = bref.embed.dirent.namlen; 750 if (namlen <= sizeof(bref.check.buf)) { 751 dname = bref.check.buf; 752 } else { 753 dname = hammer2_xop_gdata(&xop->head)->buf; 754 } 755 r = vop_write_dirent(&error, uio, 756 bref.embed.dirent.inum, dtype, 757 namlen, dname); 758 if (namlen > sizeof(bref.check.buf)) 759 hammer2_xop_pdata(&xop->head); 760 if (r) 761 break; 762 if (cookies) 763 cookies[cookie_index] = saveoff; 764 ++cookie_index; 765 } else { 766 /* XXX chain error */ 767 kprintf("bad chain type readdir %d\n", bref.type); 768 } 769 } 770 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 771 if (error == ENOENT) { 772 error = 0; 773 eofflag = 1; 774 saveoff = (hammer2_key_t)-1; 775 } else { 776 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 777 } 778 done: 779 hammer2_inode_unlock(ip); 780 if (ap->a_eofflag) 781 *ap->a_eofflag = eofflag; 782 if (hammer2_debug & 0x0020) 783 kprintf("readdir: done at %016jx\n", saveoff); 784 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE; 785 if (error && cookie_index == 0) { 786 if (cookies) { 787 kfree(cookies, M_TEMP); 788 *ap->a_ncookies = 0; 789 *ap->a_cookies = NULL; 790 } 791 } else { 792 if (cookies) { 793 *ap->a_ncookies = cookie_index; 794 *ap->a_cookies = cookies; 795 } 796 } 797 return (error); 798 #endif 799 return (EOPNOTSUPP); 800 } 801 802 /* 803 * hammer2_vop_readlink { vp, uio, cred } 804 */ 805 static 806 int 807 hammer2_vop_readlink(struct vop_readlink_args *ap) 808 { 809 #if 0 810 struct vnode *vp; 811 hammer2_inode_t *ip; 812 int error; 813 814 vp = ap->a_vp; 815 if (vp->v_type != VLNK) 816 return (EINVAL); 817 ip = VTOI(vp); 818 819 error = hammer2_read_file(ip, ap->a_uio, 0); 820 return (error); 821 #endif 822 return (EOPNOTSUPP); 823 } 824 825 static 826 int 827 hammer2_vop_read(struct vop_read_args *ap) 828 { 829 #if 0 830 struct vnode *vp; 831 hammer2_inode_t *ip; 832 struct uio *uio; 833 int error; 834 int seqcount; 835 836 /* 837 * Read operations supported on this vnode? 838 */ 839 vp = ap->a_vp; 840 if (vp->v_type == VDIR) 841 return (EISDIR); 842 if (vp->v_type != VREG) 843 return (EINVAL); 844 845 /* 846 * Misc 847 */ 848 ip = VTOI(vp); 849 uio = ap->a_uio; 850 error = 0; 851 852 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 853 854 error = hammer2_read_file(ip, uio, seqcount); 855 return (error); 856 #endif 857 return (EOPNOTSUPP); 858 } 859 860 static 861 int 862 hammer2_vop_write(struct vop_write_args *ap) 863 { 864 hammer2_inode_t *ip; 865 //thread_t td; 866 struct vnode *vp; 867 struct uio *uio; 868 int error; 869 int seqcount; 870 int ioflag; 871 872 /* 873 * Read operations supported on this vnode? 874 */ 875 vp = ap->a_vp; 876 if (vp->v_type != VREG) 877 return (EINVAL); 878 879 /* 880 * Misc 881 */ 882 ip = VTOI(vp); 883 ioflag = ap->a_ioflag; 884 uio = ap->a_uio; 885 error = 0; 886 if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG)) 887 return (EROFS); 888 switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) { 889 case 2: 890 return (ENOSPC); 891 case 1: 892 ioflag |= IO_DIRECT; /* semi-synchronous */ 893 /* fall through */ 894 default: 895 break; 896 } 897 898 seqcount = ioflag >> IO_SEQSHIFT; 899 900 /* 901 * Check resource limit 902 */ 903 /* 904 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc && 905 uio->uio_offset + uio->uio_resid > 906 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 907 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); 908 return (EFBIG); 909 } 910 */ 911 912 /* 913 * The transaction interlocks against flush initiations 914 * (note: but will run concurrently with the actual flush). 915 * 916 * To avoid deadlocking against the VM system, we must flag any 917 * transaction related to the buffer cache or other direct 918 * VM page manipulation. 919 */ 920 if (uio->uio_segflg == UIO_NOCOPY) { 921 assert(0); /* no UIO_NOCOPY in makefs */ 922 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE); 923 } else { 924 hammer2_trans_init(ip->pmp, 0); 925 } 926 error = hammer2_write_file(ip, uio, ioflag, seqcount); 927 if (uio->uio_segflg == UIO_NOCOPY) { 928 assert(0); /* no UIO_NOCOPY in makefs */ 929 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE | 930 HAMMER2_TRANS_SIDEQ); 931 } else 932 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 933 934 return (error); 935 } 936 937 int 938 hammer2_write(struct vnode *vp, void *buf, size_t size, off_t offset) 939 { 940 assert(buf); 941 assert(size > 0); 942 assert(size <= HAMMER2_PBUFSIZE); 943 944 struct iovec iov = { 945 .iov_base = buf, 946 .iov_len = size, 947 }; 948 struct uio uio = { 949 .uio_iov = &iov, 950 .uio_iovcnt = 1, 951 .uio_offset = offset, 952 .uio_resid = size, 953 .uio_segflg = UIO_USERSPACE, 954 .uio_rw = UIO_WRITE, 955 .uio_td = NULL, 956 }; 957 struct vop_write_args ap = { 958 .a_vp = vp, 959 .a_uio = &uio, 960 .a_ioflag = 0, 961 .a_cred = NULL, 962 }; 963 964 return hammer2_vop_write(&ap); 965 } 966 967 #if 0 968 /* 969 * Perform read operations on a file or symlink given an UNLOCKED 970 * inode and uio. 971 * 972 * The passed ip is not locked. 973 */ 974 static 975 int 976 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount) 977 { 978 hammer2_off_t size; 979 struct buf *bp; 980 int error; 981 982 error = 0; 983 984 /* 985 * UIO read loop. 986 * 987 * WARNING! Assumes that the kernel interlocks size changes at the 988 * vnode level. 989 */ 990 hammer2_mtx_sh(&ip->lock); 991 hammer2_mtx_sh(&ip->truncate_lock); 992 size = ip->meta.size; 993 hammer2_mtx_unlock(&ip->lock); 994 995 while (uio->uio_resid > 0 && uio->uio_offset < size) { 996 hammer2_key_t lbase; 997 hammer2_key_t leof; 998 int lblksize; 999 int loff; 1000 int n; 1001 1002 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 1003 &lbase, &leof); 1004 1005 #if 1 1006 bp = NULL; 1007 error = cluster_readx(ip->vp, leof, lbase, lblksize, 1008 B_NOTMETA | B_KVABIO, 1009 uio->uio_resid, 1010 seqcount * MAXBSIZE, 1011 &bp); 1012 #else 1013 if (uio->uio_segflg == UIO_NOCOPY) { 1014 bp = getblk(ip->vp, lbase, lblksize, 1015 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1016 if (bp->b_flags & B_CACHE) { 1017 int i; 1018 int j = 0; 1019 if (bp->b_xio.xio_npages != 16) 1020 kprintf("NPAGES BAD\n"); 1021 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1022 vm_page_t m; 1023 m = bp->b_xio.xio_pages[i]; 1024 if (m == NULL || m->valid == 0) { 1025 kprintf("bp %016jx %016jx pg %d inv", 1026 lbase, leof, i); 1027 if (m) 1028 kprintf("m->object %p/%p", m->object, ip->vp->v_object); 1029 kprintf("\n"); 1030 j = 1; 1031 } 1032 } 1033 if (j) 1034 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error); 1035 } 1036 bqrelse(bp); 1037 } 1038 error = bread_kvabio(ip->vp, lbase, lblksize, &bp); 1039 #endif 1040 if (error) { 1041 brelse(bp); 1042 break; 1043 } 1044 bkvasync(bp); 1045 loff = (int)(uio->uio_offset - lbase); 1046 n = lblksize - loff; 1047 if (n > uio->uio_resid) 1048 n = uio->uio_resid; 1049 if (n > size - uio->uio_offset) 1050 n = (int)(size - uio->uio_offset); 1051 bp->b_flags |= B_AGE; 1052 uiomovebp(bp, (char *)bp->b_data + loff, n, uio); 1053 bqrelse(bp); 1054 } 1055 hammer2_mtx_unlock(&ip->truncate_lock); 1056 1057 return (error); 1058 } 1059 #endif 1060 1061 /* 1062 * Write to the file represented by the inode via the logical buffer cache. 1063 * The inode may represent a regular file or a symlink. 1064 * 1065 * The inode must not be locked. 1066 */ 1067 static 1068 int 1069 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 1070 int ioflag, int seqcount) 1071 { 1072 hammer2_key_t old_eof; 1073 hammer2_key_t new_eof; 1074 struct buf *bp; 1075 int kflags; 1076 int error; 1077 int modified; 1078 1079 /* 1080 * Setup if append 1081 * 1082 * WARNING! Assumes that the kernel interlocks size changes at the 1083 * vnode level. 1084 */ 1085 hammer2_mtx_ex(&ip->lock); 1086 hammer2_mtx_sh(&ip->truncate_lock); 1087 if (ioflag & IO_APPEND) 1088 uio->uio_offset = ip->meta.size; 1089 old_eof = ip->meta.size; 1090 1091 /* 1092 * Extend the file if necessary. If the write fails at some point 1093 * we will truncate it back down to cover as much as we were able 1094 * to write. 1095 * 1096 * Doing this now makes it easier to calculate buffer sizes in 1097 * the loop. 1098 */ 1099 kflags = 0; 1100 error = 0; 1101 modified = 0; 1102 1103 if (uio->uio_offset + uio->uio_resid > old_eof) { 1104 new_eof = uio->uio_offset + uio->uio_resid; 1105 modified = 1; 1106 hammer2_extend_file(ip, new_eof); 1107 kflags |= NOTE_EXTEND; 1108 } else { 1109 new_eof = old_eof; 1110 } 1111 hammer2_mtx_unlock(&ip->lock); 1112 1113 /* 1114 * UIO write loop 1115 */ 1116 while (uio->uio_resid > 0) { 1117 hammer2_key_t lbase; 1118 int trivial; 1119 int endofblk; 1120 int lblksize; 1121 int loff; 1122 int n; 1123 1124 /* 1125 * Don't allow the buffer build to blow out the buffer 1126 * cache. 1127 */ 1128 if ((ioflag & IO_RECURSE) == 0) 1129 bwillwrite(HAMMER2_PBUFSIZE); 1130 1131 /* 1132 * This nominally tells us how much we can cluster and 1133 * what the logical buffer size needs to be. Currently 1134 * we don't try to cluster the write and just handle one 1135 * block at a time. 1136 */ 1137 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 1138 &lbase, NULL); 1139 loff = (int)(uio->uio_offset - lbase); 1140 1141 KKASSERT(lblksize <= MAXBSIZE); 1142 1143 /* 1144 * Calculate bytes to copy this transfer and whether the 1145 * copy completely covers the buffer or not. 1146 */ 1147 trivial = 0; 1148 n = lblksize - loff; 1149 if (n > uio->uio_resid) { 1150 n = uio->uio_resid; 1151 if (loff == lbase && uio->uio_offset + n == new_eof) 1152 trivial = 1; 1153 endofblk = 0; 1154 } else { 1155 if (loff == 0) 1156 trivial = 1; 1157 endofblk = 1; 1158 } 1159 if (lbase >= new_eof) 1160 trivial = 1; 1161 trivial = 1; /* force trivial for makefs */ 1162 1163 /* 1164 * Get the buffer 1165 */ 1166 if (uio->uio_segflg == UIO_NOCOPY) { 1167 assert(0); /* no UIO_NOCOPY in makefs */ 1168 /* 1169 * Issuing a write with the same data backing the 1170 * buffer. Instantiate the buffer to collect the 1171 * backing vm pages, then read-in any missing bits. 1172 * 1173 * This case is used by vop_stdputpages(). 1174 */ 1175 bp = getblkx(ip->vp, lbase, lblksize, 1176 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1177 /* 1178 if ((bp->b_flags & B_CACHE) == 0) { 1179 bqrelse(bp); 1180 error = bread_kvabio(ip->vp, lbase, 1181 lblksize, &bp); 1182 } 1183 */ 1184 } else if (trivial) { 1185 /* 1186 * Even though we are entirely overwriting the buffer 1187 * we may still have to zero it out to avoid a 1188 * mmap/write visibility issue. 1189 */ 1190 bp = getblkx(ip->vp, lbase, lblksize, 1191 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1192 /* 1193 if ((bp->b_flags & B_CACHE) == 0) 1194 vfs_bio_clrbuf(bp); 1195 */ 1196 } else { 1197 assert(0); /* no partial write in makefs */ 1198 /* 1199 * Partial overwrite, read in any missing bits then 1200 * replace the portion being written. 1201 * 1202 * (The strategy code will detect zero-fill physical 1203 * blocks for this case). 1204 */ 1205 error = bread_kvabio(ip->vp, lbase, lblksize, &bp); 1206 if (error == 0) 1207 bheavy(bp); 1208 } 1209 1210 if (error) { 1211 brelse(bp); 1212 break; 1213 } 1214 1215 /* 1216 * Ok, copy the data in 1217 */ 1218 bkvasync(bp); 1219 error = uiomovebp(bp, bp->b_data + loff, n, uio); 1220 kflags |= NOTE_WRITE; 1221 modified = 1; 1222 if (error) { 1223 brelse(bp); 1224 break; 1225 } 1226 1227 /* 1228 * WARNING: Pageout daemon will issue UIO_NOCOPY writes 1229 * with IO_SYNC or IO_ASYNC set. These writes 1230 * must be handled as the pageout daemon expects. 1231 * 1232 * NOTE! H2 relies on cluster_write() here because it 1233 * cannot preallocate disk blocks at the logical 1234 * level due to not knowing what the compression 1235 * size will be at this time. 1236 * 1237 * We must use cluster_write() here and we depend 1238 * on the write-behind feature to flush buffers 1239 * appropriately. If we let the buffer daemons do 1240 * it the block allocations will be all over the 1241 * map. 1242 */ 1243 if (1) { 1244 bp->b_cmd = BUF_CMD_WRITE; 1245 1246 struct bio bio; 1247 bio.bio_buf = bp; 1248 bio.bio_offset = lbase; 1249 1250 struct vop_strategy_args ap; 1251 ap.a_vp = ip->vp; 1252 ap.a_bio = &bio; 1253 1254 error = hammer2_vop_strategy(&ap); 1255 assert(!error); 1256 1257 brelse(bp); 1258 } else if (ioflag & IO_SYNC) { 1259 assert(0); 1260 bwrite(bp); 1261 } else if ((ioflag & IO_DIRECT) && endofblk) { 1262 assert(0); 1263 bawrite(bp); 1264 } else if (ioflag & IO_ASYNC) { 1265 assert(0); 1266 bawrite(bp); 1267 } else if (0 /*ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW*/) { 1268 assert(0); 1269 bdwrite(bp); 1270 } else { 1271 assert(0); 1272 #if 0 1273 #if 1 1274 bp->b_flags |= B_CLUSTEROK; 1275 cluster_write(bp, new_eof, lblksize, seqcount); 1276 #else 1277 bp->b_flags |= B_CLUSTEROK; 1278 bdwrite(bp); 1279 #endif 1280 #endif 1281 } 1282 } 1283 1284 /* 1285 * Cleanup. If we extended the file EOF but failed to write through 1286 * the entire write is a failure and we have to back-up. 1287 */ 1288 if (error && new_eof != old_eof) { 1289 hammer2_mtx_unlock(&ip->truncate_lock); 1290 hammer2_mtx_ex(&ip->lock); /* note lock order */ 1291 hammer2_mtx_ex(&ip->truncate_lock); /* note lock order */ 1292 hammer2_truncate_file(ip, old_eof); 1293 if (ip->flags & HAMMER2_INODE_MODIFIED) 1294 hammer2_inode_chain_sync(ip); 1295 hammer2_mtx_unlock(&ip->lock); 1296 } else if (modified) { 1297 struct vnode *vp = ip->vp; 1298 1299 hammer2_mtx_ex(&ip->lock); 1300 hammer2_inode_modify(ip); 1301 if (uio->uio_segflg == UIO_NOCOPY) { 1302 assert(0); /* no UIO_NOCOPY in makefs */ 1303 /* 1304 if (vp->v_flag & VLASTWRITETS) { 1305 ip->meta.mtime = 1306 (unsigned long)vp->v_lastwrite_ts.tv_sec * 1307 1000000 + 1308 vp->v_lastwrite_ts.tv_nsec / 1000; 1309 } 1310 */ 1311 } else { 1312 hammer2_update_time(&ip->meta.mtime); 1313 vclrflags(vp, VLASTWRITETS); 1314 } 1315 1316 #if 0 1317 /* 1318 * REMOVED - handled by hammer2_extend_file(). Do not issue 1319 * a chain_sync() outside of a sync/fsync except for DIRECTDATA 1320 * state changes. 1321 * 1322 * Under normal conditions we only issue a chain_sync if 1323 * the inode's DIRECTDATA state changed. 1324 */ 1325 if (ip->flags & HAMMER2_INODE_RESIZED) 1326 hammer2_inode_chain_sync(ip); 1327 #endif 1328 hammer2_mtx_unlock(&ip->lock); 1329 hammer2_knote(ip->vp, kflags); 1330 } 1331 hammer2_trans_assert_strategy(ip->pmp); 1332 hammer2_mtx_unlock(&ip->truncate_lock); 1333 1334 return error; 1335 } 1336 1337 /* 1338 * Truncate the size of a file. The inode must be locked. 1339 * 1340 * We must unconditionally set HAMMER2_INODE_RESIZED to properly 1341 * ensure that any on-media data beyond the new file EOF has been destroyed. 1342 * 1343 * WARNING: nvtruncbuf() can only be safely called without the inode lock 1344 * held due to the way our write thread works. If the truncation 1345 * occurs in the middle of a buffer, nvtruncbuf() is responsible 1346 * for dirtying that buffer and zeroing out trailing bytes. 1347 * 1348 * WARNING! Assumes that the kernel interlocks size changes at the 1349 * vnode level. 1350 * 1351 * WARNING! Caller assumes responsibility for removing dead blocks 1352 * if INODE_RESIZED is set. 1353 */ 1354 static 1355 void 1356 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1357 { 1358 hammer2_key_t lbase; 1359 int nblksize; 1360 1361 hammer2_mtx_unlock(&ip->lock); 1362 if (ip->vp) { 1363 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1364 nvtruncbuf(ip->vp, nsize, 1365 nblksize, (int)nsize & (nblksize - 1), 1366 0); 1367 } 1368 hammer2_mtx_ex(&ip->lock); 1369 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1370 ip->osize = ip->meta.size; 1371 ip->meta.size = nsize; 1372 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1373 hammer2_inode_modify(ip); 1374 } 1375 1376 /* 1377 * Extend the size of a file. The inode must be locked. 1378 * 1379 * Even though the file size is changing, we do not have to set the 1380 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES 1381 * boundary. When this occurs a hammer2_inode_chain_sync() is required 1382 * to prepare the inode cluster's indirect block table, otherwise 1383 * async execution of the strategy code will implode on us. 1384 * 1385 * WARNING! Assumes that the kernel interlocks size changes at the 1386 * vnode level. 1387 * 1388 * WARNING! Caller assumes responsibility for transitioning out 1389 * of the inode DIRECTDATA mode if INODE_RESIZED is set. 1390 */ 1391 static 1392 void 1393 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1394 { 1395 hammer2_key_t lbase; 1396 hammer2_key_t osize; 1397 int oblksize; 1398 int nblksize; 1399 int error; 1400 1401 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1402 hammer2_inode_modify(ip); 1403 osize = ip->meta.size; 1404 ip->osize = osize; 1405 ip->meta.size = nsize; 1406 1407 /* 1408 * We must issue a chain_sync() when the DIRECTDATA state changes 1409 * to prevent confusion between the flush code and the in-memory 1410 * state. This is not perfect because we are doing it outside of 1411 * a sync/fsync operation, so it might not be fully synchronized 1412 * with the meta-data topology flush. 1413 * 1414 * We must retain and re-dirty the buffer cache buffer containing 1415 * the direct data so it can be written to a real block. It should 1416 * not be possible for a bread error to occur since the original data 1417 * is extracted from the inode structure directly. 1418 */ 1419 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) { 1420 if (osize) { 1421 assert(0); /* no such transition in makefs */ 1422 struct buf *bp; 1423 1424 oblksize = hammer2_calc_logical(ip, 0, NULL, NULL); 1425 error = bread_kvabio(ip->vp, 0, oblksize, &bp); 1426 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1427 hammer2_inode_chain_sync(ip); 1428 if (error == 0) { 1429 bheavy(bp); 1430 bdwrite(bp); 1431 } else { 1432 brelse(bp); 1433 } 1434 } else { 1435 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1436 hammer2_inode_chain_sync(ip); 1437 } 1438 } 1439 hammer2_mtx_unlock(&ip->lock); 1440 if (ip->vp) { 1441 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL); 1442 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1443 nvextendbuf(ip->vp, 1444 osize, nsize, 1445 oblksize, nblksize, 1446 -1, -1, 0); 1447 } 1448 hammer2_mtx_ex(&ip->lock); 1449 } 1450 1451 static 1452 int 1453 hammer2_vop_nresolve(struct vop_nresolve_args *ap) 1454 { 1455 hammer2_xop_nresolve_t *xop; 1456 hammer2_inode_t *ip; 1457 hammer2_inode_t *dip; 1458 struct namecache *ncp; 1459 struct vnode *vp; 1460 int error; 1461 1462 dip = VTOI(ap->a_dvp); 1463 xop = hammer2_xop_alloc(dip, 0); 1464 1465 ncp = ap->a_nch->ncp; 1466 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1467 1468 /* 1469 * Note: In DragonFly the kernel handles '.' and '..'. 1470 */ 1471 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1472 hammer2_xop_start(&xop->head, &hammer2_nresolve_desc); 1473 1474 error = hammer2_xop_collect(&xop->head, 0); 1475 error = hammer2_error_to_errno(error); 1476 if (error) { 1477 ip = NULL; 1478 } else { 1479 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 1480 } 1481 hammer2_inode_unlock(dip); 1482 1483 /* 1484 * Acquire the related vnode 1485 * 1486 * NOTE: For error processing, only ENOENT resolves the namecache 1487 * entry to NULL, otherwise we just return the error and 1488 * leave the namecache unresolved. 1489 * 1490 * NOTE: multiple hammer2_inode structures can be aliased to the 1491 * same chain element, for example for hardlinks. This 1492 * use case does not 'reattach' inode associations that 1493 * might already exist, but always allocates a new one. 1494 * 1495 * WARNING: inode structure is locked exclusively via inode_get 1496 * but chain was locked shared. inode_unlock() 1497 * will handle it properly. 1498 */ 1499 if (ip) { 1500 vp = hammer2_igetv(ip, &error); /* error set to UNIX error */ 1501 if (error == 0) { 1502 vn_unlock(vp); 1503 cache_setvp(ap->a_nch, vp); 1504 *ap->a_vpp = vp; 1505 } else if (error == ENOENT) { 1506 cache_setvp(ap->a_nch, NULL); 1507 } 1508 hammer2_inode_unlock(ip); 1509 1510 /* 1511 * The vp should not be released until after we've disposed 1512 * of our locks, because it might cause vop_inactive() to 1513 * be called. 1514 */ 1515 if (vp) 1516 vrele(vp); 1517 } else { 1518 error = ENOENT; 1519 cache_setvp(ap->a_nch, NULL); 1520 } 1521 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1522 /* 1523 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL, 1524 ("resolve error %d/%p ap %p\n", 1525 error, ap->a_nch->ncp->nc_vp, ap)); 1526 */ 1527 1528 return error; 1529 } 1530 1531 int 1532 hammer2_nresolve(struct vnode *dvp, struct vnode **vpp, char *name, int nlen) 1533 { 1534 *vpp = NULL; 1535 struct namecache nc = { 1536 .nc_name = name, 1537 .nc_nlen = nlen, 1538 }; 1539 struct nchandle nch = { 1540 .ncp = &nc, 1541 }; 1542 struct vop_nresolve_args ap = { 1543 .a_nch = &nch, 1544 .a_dvp = dvp, 1545 .a_vpp = vpp, 1546 }; 1547 1548 return hammer2_vop_nresolve(&ap); 1549 } 1550 1551 static 1552 int 1553 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 1554 { 1555 #if 0 1556 hammer2_inode_t *dip; 1557 hammer2_tid_t inum; 1558 int error; 1559 1560 dip = VTOI(ap->a_dvp); 1561 inum = dip->meta.iparent; 1562 *ap->a_vpp = NULL; 1563 1564 if (inum) { 1565 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL, 1566 inum, ap->a_vpp); 1567 } else { 1568 error = ENOENT; 1569 } 1570 return error; 1571 #endif 1572 return (EOPNOTSUPP); 1573 } 1574 1575 static 1576 int 1577 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap) 1578 { 1579 hammer2_inode_t *dip; 1580 hammer2_inode_t *nip; 1581 struct namecache *ncp; 1582 const uint8_t *name; 1583 size_t name_len; 1584 hammer2_tid_t inum; 1585 int error; 1586 1587 dip = VTOI(ap->a_dvp); 1588 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1589 return (EROFS); 1590 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1591 return (ENOSPC); 1592 1593 ncp = ap->a_nch->ncp; 1594 name = ncp->nc_name; 1595 name_len = ncp->nc_nlen; 1596 1597 hammer2_trans_init(dip->pmp, 0); 1598 1599 inum = hammer2_trans_newinum(dip->pmp); 1600 1601 /* 1602 * Create the actual inode as a hidden file in the iroot, then 1603 * create the directory entry. The creation of the actual inode 1604 * sets its nlinks to 1 which is the value we desire. 1605 * 1606 * dip must be locked before nip to avoid deadlock. 1607 */ 1608 hammer2_inode_lock(dip, 0); 1609 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1610 inum, &error); 1611 if (error) { 1612 error = hammer2_error_to_errno(error); 1613 } else { 1614 error = hammer2_dirent_create(dip, name, name_len, 1615 nip->meta.inum, nip->meta.type); 1616 /* returns UNIX error code */ 1617 } 1618 if (error) { 1619 if (nip) { 1620 hammer2_inode_unlink_finisher(nip, NULL); 1621 hammer2_inode_unlock(nip); 1622 nip = NULL; 1623 } 1624 *ap->a_vpp = NULL; 1625 } else { 1626 /* 1627 * inode_depend() must occur before the igetv() because 1628 * the igetv() can temporarily release the inode lock. 1629 */ 1630 hammer2_inode_depend(dip, nip); /* before igetv */ 1631 *ap->a_vpp = hammer2_igetv(nip, &error); 1632 hammer2_inode_unlock(nip); 1633 } 1634 1635 /* 1636 * Update dip's mtime 1637 * 1638 * We can use a shared inode lock and allow the meta.mtime update 1639 * SMP race. hammer2_inode_modify() is MPSAFE w/a shared lock. 1640 */ 1641 if (error == 0) { 1642 uint64_t mtime; 1643 1644 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1645 hammer2_update_time(&mtime); 1646 hammer2_inode_modify(dip); 1647 dip->meta.mtime = mtime; 1648 /*hammer2_inode_unlock(dip);*/ 1649 } 1650 hammer2_inode_unlock(dip); 1651 1652 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1653 1654 if (error == 0) { 1655 cache_setunresolved(ap->a_nch); 1656 cache_setvp(ap->a_nch, *ap->a_vpp); 1657 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK); 1658 } 1659 return error; 1660 } 1661 1662 int 1663 hammer2_nmkdir(struct vnode *dvp, struct vnode **vpp, char *name, int nlen) 1664 { 1665 struct namecache nc = { 1666 .nc_name = name, 1667 .nc_nlen = nlen, 1668 }; 1669 struct nchandle nch = { 1670 .ncp = &nc, 1671 }; 1672 uid_t va_uid = VNOVAL; //getuid(); 1673 uid_t va_gid = VNOVAL; //getgid(); 1674 struct vattr va = { 1675 .va_type = VDIR, 1676 .va_mode = 0755, /* should be tunable */ 1677 .va_uid = va_uid, 1678 .va_gid = va_gid, 1679 }; 1680 struct vop_nmkdir_args ap = { 1681 .a_nch = &nch, 1682 .a_dvp = dvp, 1683 .a_vpp = vpp, 1684 .a_vap = &va, 1685 }; 1686 1687 return hammer2_vop_nmkdir(&ap); 1688 } 1689 1690 static 1691 int 1692 hammer2_vop_open(struct vop_open_args *ap) 1693 { 1694 #if 0 1695 return vop_stdopen(ap); 1696 #endif 1697 return (EOPNOTSUPP); 1698 } 1699 1700 /* 1701 * hammer2_vop_advlock { vp, id, op, fl, flags } 1702 */ 1703 static 1704 int 1705 hammer2_vop_advlock(struct vop_advlock_args *ap) 1706 { 1707 #if 0 1708 hammer2_inode_t *ip = VTOI(ap->a_vp); 1709 hammer2_off_t size; 1710 1711 size = ip->meta.size; 1712 return (lf_advlock(ap, &ip->advlock, size)); 1713 #endif 1714 return (EOPNOTSUPP); 1715 } 1716 1717 static 1718 int 1719 hammer2_vop_close(struct vop_close_args *ap) 1720 { 1721 #if 0 1722 return vop_stdclose(ap); 1723 #endif 1724 return (EOPNOTSUPP); 1725 } 1726 1727 /* 1728 * hammer2_vop_nlink { nch, dvp, vp, cred } 1729 * 1730 * Create a hardlink from (vp) to {dvp, nch}. 1731 */ 1732 static 1733 int 1734 hammer2_vop_nlink(struct vop_nlink_args *ap) 1735 { 1736 hammer2_inode_t *tdip; /* target directory to create link in */ 1737 hammer2_inode_t *ip; /* inode we are hardlinking to */ 1738 struct namecache *ncp; 1739 const uint8_t *name; 1740 size_t name_len; 1741 int error; 1742 uint64_t cmtime; 1743 1744 /* We know it's the same in makefs */ 1745 /* 1746 if (ap->a_dvp->v_mount != ap->a_vp->v_mount) 1747 return(EXDEV); 1748 */ 1749 1750 tdip = VTOI(ap->a_dvp); 1751 if (tdip->pmp->ronly || (tdip->pmp->flags & HAMMER2_PMPF_EMERG)) 1752 return (EROFS); 1753 if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1) 1754 return (ENOSPC); 1755 1756 ncp = ap->a_nch->ncp; 1757 name = ncp->nc_name; 1758 name_len = ncp->nc_nlen; 1759 1760 /* 1761 * ip represents the file being hardlinked. The file could be a 1762 * normal file or a hardlink target if it has already been hardlinked. 1763 * (with the new semantics, it will almost always be a hardlink 1764 * target). 1765 * 1766 * Bump nlinks and potentially also create or move the hardlink 1767 * target in the parent directory common to (ip) and (tdip). The 1768 * consolidation code can modify ip->cluster. The returned cluster 1769 * is locked. 1770 */ 1771 ip = VTOI(ap->a_vp); 1772 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp)); 1773 hammer2_trans_init(ip->pmp, 0); 1774 1775 /* 1776 * Target should be an indexed inode or there's no way we will ever 1777 * be able to find it! 1778 */ 1779 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0); 1780 1781 error = 0; 1782 1783 /* 1784 * Can return NULL and error == EXDEV if the common parent 1785 * crosses a directory with the xlink flag set. 1786 */ 1787 hammer2_inode_lock4(tdip, ip, NULL, NULL); 1788 1789 hammer2_update_time(&cmtime); 1790 1791 /* 1792 * Create the directory entry and bump nlinks. 1793 * Also update ip's ctime. 1794 */ 1795 if (error == 0) { 1796 error = hammer2_dirent_create(tdip, name, name_len, 1797 ip->meta.inum, ip->meta.type); 1798 hammer2_inode_modify(ip); 1799 ++ip->meta.nlinks; 1800 ip->meta.ctime = cmtime; 1801 } 1802 if (error == 0) { 1803 /* 1804 * Update dip's [cm]time 1805 */ 1806 hammer2_inode_modify(tdip); 1807 tdip->meta.mtime = cmtime; 1808 tdip->meta.ctime = cmtime; 1809 1810 cache_setunresolved(ap->a_nch); 1811 cache_setvp(ap->a_nch, ap->a_vp); 1812 } 1813 hammer2_inode_unlock(ip); 1814 hammer2_inode_unlock(tdip); 1815 1816 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 1817 hammer2_knote(ap->a_vp, NOTE_LINK); 1818 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1819 1820 return error; 1821 } 1822 1823 int 1824 hammer2_nlink(struct vnode *dvp, struct vnode *vp, char *name, int nlen) 1825 { 1826 struct namecache nc = { 1827 .nc_name = name, 1828 .nc_nlen = nlen, 1829 }; 1830 struct nchandle nch = { 1831 .ncp = &nc, 1832 }; 1833 struct vop_nlink_args ap = { 1834 .a_nch = &nch, 1835 .a_dvp = dvp, 1836 .a_vp = vp, 1837 }; 1838 1839 return hammer2_vop_nlink(&ap); 1840 } 1841 1842 /* 1843 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap } 1844 * 1845 * The operating system has already ensured that the directory entry 1846 * does not exist and done all appropriate namespace locking. 1847 */ 1848 static 1849 int 1850 hammer2_vop_ncreate(struct vop_ncreate_args *ap) 1851 { 1852 hammer2_inode_t *dip; 1853 hammer2_inode_t *nip; 1854 struct namecache *ncp; 1855 const uint8_t *name; 1856 size_t name_len; 1857 hammer2_tid_t inum; 1858 int error; 1859 1860 dip = VTOI(ap->a_dvp); 1861 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1862 return (EROFS); 1863 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1864 return (ENOSPC); 1865 1866 ncp = ap->a_nch->ncp; 1867 name = ncp->nc_name; 1868 name_len = ncp->nc_nlen; 1869 hammer2_trans_init(dip->pmp, 0); 1870 1871 inum = hammer2_trans_newinum(dip->pmp); 1872 1873 /* 1874 * Create the actual inode as a hidden file in the iroot, then 1875 * create the directory entry. The creation of the actual inode 1876 * sets its nlinks to 1 which is the value we desire. 1877 * 1878 * dip must be locked before nip to avoid deadlock. 1879 */ 1880 hammer2_inode_lock(dip, 0); 1881 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1882 inum, &error); 1883 1884 if (error) { 1885 error = hammer2_error_to_errno(error); 1886 } else { 1887 error = hammer2_dirent_create(dip, name, name_len, 1888 nip->meta.inum, nip->meta.type); 1889 } 1890 if (error) { 1891 if (nip) { 1892 hammer2_inode_unlink_finisher(nip, NULL); 1893 hammer2_inode_unlock(nip); 1894 nip = NULL; 1895 } 1896 *ap->a_vpp = NULL; 1897 } else { 1898 hammer2_inode_depend(dip, nip); /* before igetv */ 1899 *ap->a_vpp = hammer2_igetv(nip, &error); 1900 hammer2_inode_unlock(nip); 1901 } 1902 1903 /* 1904 * Update dip's mtime 1905 */ 1906 if (error == 0) { 1907 uint64_t mtime; 1908 1909 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1910 hammer2_update_time(&mtime); 1911 hammer2_inode_modify(dip); 1912 dip->meta.mtime = mtime; 1913 /*hammer2_inode_unlock(dip);*/ 1914 } 1915 hammer2_inode_unlock(dip); 1916 1917 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1918 1919 if (error == 0) { 1920 cache_setunresolved(ap->a_nch); 1921 cache_setvp(ap->a_nch, *ap->a_vpp); 1922 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1923 } 1924 return error; 1925 } 1926 1927 int 1928 hammer2_ncreate(struct vnode *dvp, struct vnode **vpp, char *name, int nlen) 1929 { 1930 struct namecache nc = { 1931 .nc_name = name, 1932 .nc_nlen = nlen, 1933 }; 1934 struct nchandle nch = { 1935 .ncp = &nc, 1936 }; 1937 uid_t va_uid = VNOVAL; //getuid(); 1938 uid_t va_gid = VNOVAL; //getgid(); 1939 struct vattr va = { 1940 .va_type = VREG, 1941 .va_mode = 0644, /* should be tunable */ 1942 .va_uid = va_uid, 1943 .va_gid = va_gid, 1944 }; 1945 struct vop_ncreate_args ap = { 1946 .a_nch = &nch, 1947 .a_dvp = dvp, 1948 .a_vpp = vpp, 1949 .a_vap = &va, 1950 }; 1951 1952 return hammer2_vop_ncreate(&ap); 1953 } 1954 1955 /* 1956 * Make a device node (typically a fifo) 1957 */ 1958 static 1959 int 1960 hammer2_vop_nmknod(struct vop_nmknod_args *ap) 1961 { 1962 hammer2_inode_t *dip; 1963 hammer2_inode_t *nip; 1964 struct namecache *ncp; 1965 const uint8_t *name; 1966 size_t name_len; 1967 hammer2_tid_t inum; 1968 int error; 1969 1970 dip = VTOI(ap->a_dvp); 1971 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1972 return (EROFS); 1973 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1974 return (ENOSPC); 1975 1976 ncp = ap->a_nch->ncp; 1977 name = ncp->nc_name; 1978 name_len = ncp->nc_nlen; 1979 hammer2_trans_init(dip->pmp, 0); 1980 1981 /* 1982 * Create the device inode and then create the directory entry. 1983 * 1984 * dip must be locked before nip to avoid deadlock. 1985 */ 1986 inum = hammer2_trans_newinum(dip->pmp); 1987 1988 hammer2_inode_lock(dip, 0); 1989 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1990 inum, &error); 1991 if (error == 0) { 1992 error = hammer2_dirent_create(dip, name, name_len, 1993 nip->meta.inum, nip->meta.type); 1994 } 1995 if (error) { 1996 if (nip) { 1997 hammer2_inode_unlink_finisher(nip, NULL); 1998 hammer2_inode_unlock(nip); 1999 nip = NULL; 2000 } 2001 *ap->a_vpp = NULL; 2002 } else { 2003 hammer2_inode_depend(dip, nip); /* before igetv */ 2004 *ap->a_vpp = hammer2_igetv(nip, &error); 2005 hammer2_inode_unlock(nip); 2006 } 2007 2008 /* 2009 * Update dip's mtime 2010 */ 2011 if (error == 0) { 2012 uint64_t mtime; 2013 2014 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2015 hammer2_update_time(&mtime); 2016 hammer2_inode_modify(dip); 2017 dip->meta.mtime = mtime; 2018 /*hammer2_inode_unlock(dip);*/ 2019 } 2020 hammer2_inode_unlock(dip); 2021 2022 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2023 2024 if (error == 0) { 2025 cache_setunresolved(ap->a_nch); 2026 cache_setvp(ap->a_nch, *ap->a_vpp); 2027 hammer2_knote(ap->a_dvp, NOTE_WRITE); 2028 } 2029 return error; 2030 } 2031 2032 int 2033 hammer2_nmknod(struct vnode *dvp, struct vnode **vpp, char *name, int nlen, 2034 int type) 2035 { 2036 struct namecache nc = { 2037 .nc_name = name, 2038 .nc_nlen = nlen, 2039 }; 2040 struct nchandle nch = { 2041 .ncp = &nc, 2042 }; 2043 uid_t va_uid = VNOVAL; //getuid(); 2044 uid_t va_gid = VNOVAL; //getgid(); 2045 struct vattr va = { 2046 .va_type = type, 2047 .va_mode = 0644, /* should be tunable */ 2048 .va_uid = va_uid, 2049 .va_gid = va_gid, 2050 }; 2051 struct vop_nmknod_args ap = { 2052 .a_nch = &nch, 2053 .a_dvp = dvp, 2054 .a_vpp = vpp, 2055 .a_vap = &va, 2056 }; 2057 2058 return hammer2_vop_nmknod(&ap); 2059 } 2060 2061 /* 2062 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target } 2063 */ 2064 static 2065 int 2066 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap) 2067 { 2068 hammer2_inode_t *dip; 2069 hammer2_inode_t *nip; 2070 struct namecache *ncp; 2071 const uint8_t *name; 2072 size_t name_len; 2073 hammer2_tid_t inum; 2074 int error; 2075 2076 dip = VTOI(ap->a_dvp); 2077 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 2078 return (EROFS); 2079 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2080 return (ENOSPC); 2081 2082 ncp = ap->a_nch->ncp; 2083 name = ncp->nc_name; 2084 name_len = ncp->nc_nlen; 2085 hammer2_trans_init(dip->pmp, 0); 2086 2087 ap->a_vap->va_type = VLNK; /* enforce type */ 2088 2089 /* 2090 * Create the softlink as an inode and then create the directory 2091 * entry. 2092 * 2093 * dip must be locked before nip to avoid deadlock. 2094 */ 2095 inum = hammer2_trans_newinum(dip->pmp); 2096 2097 hammer2_inode_lock(dip, 0); 2098 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 2099 inum, &error); 2100 if (error == 0) { 2101 error = hammer2_dirent_create(dip, name, name_len, 2102 nip->meta.inum, nip->meta.type); 2103 } 2104 if (error) { 2105 if (nip) { 2106 hammer2_inode_unlink_finisher(nip, NULL); 2107 hammer2_inode_unlock(nip); 2108 nip = NULL; 2109 } 2110 *ap->a_vpp = NULL; 2111 hammer2_inode_unlock(dip); 2112 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2113 return error; 2114 } 2115 hammer2_inode_depend(dip, nip); /* before igetv */ 2116 *ap->a_vpp = hammer2_igetv(nip, &error); 2117 2118 /* 2119 * Build the softlink (~like file data) and finalize the namecache. 2120 */ 2121 if (error == 0) { 2122 size_t bytes; 2123 struct uio auio; 2124 struct iovec aiov; 2125 2126 bytes = strlen(ap->a_target); 2127 2128 hammer2_inode_unlock(nip); 2129 bzero(&auio, sizeof(auio)); 2130 bzero(&aiov, sizeof(aiov)); 2131 auio.uio_iov = &aiov; 2132 auio.uio_segflg = UIO_SYSSPACE; 2133 auio.uio_rw = UIO_WRITE; 2134 auio.uio_resid = bytes; 2135 auio.uio_iovcnt = 1; 2136 auio.uio_td = curthread; 2137 aiov.iov_base = ap->a_target; 2138 aiov.iov_len = bytes; 2139 error = hammer2_write_file(nip, &auio, IO_APPEND, 0); 2140 /* XXX handle error */ 2141 error = 0; 2142 } else { 2143 hammer2_inode_unlock(nip); 2144 } 2145 2146 /* 2147 * Update dip's mtime 2148 */ 2149 if (error == 0) { 2150 uint64_t mtime; 2151 2152 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2153 hammer2_update_time(&mtime); 2154 hammer2_inode_modify(dip); 2155 dip->meta.mtime = mtime; 2156 /*hammer2_inode_unlock(dip);*/ 2157 } 2158 hammer2_inode_unlock(dip); 2159 2160 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2161 2162 /* 2163 * Finalize namecache 2164 */ 2165 if (error == 0) { 2166 cache_setunresolved(ap->a_nch); 2167 cache_setvp(ap->a_nch, *ap->a_vpp); 2168 hammer2_knote(ap->a_dvp, NOTE_WRITE); 2169 } 2170 return error; 2171 } 2172 2173 int 2174 hammer2_nsymlink(struct vnode *dvp, struct vnode **vpp, char *name, int nlen, 2175 char *target) 2176 { 2177 struct namecache nc = { 2178 .nc_name = name, 2179 .nc_nlen = nlen, 2180 }; 2181 struct nchandle nch = { 2182 .ncp = &nc, 2183 }; 2184 uid_t va_uid = VNOVAL; //getuid(); 2185 uid_t va_gid = VNOVAL; //getgid(); 2186 struct vattr va = { 2187 .va_type = VDIR, 2188 .va_mode = 0755, /* should be tunable */ 2189 .va_uid = va_uid, 2190 .va_gid = va_gid, 2191 }; 2192 struct vop_nsymlink_args ap = { 2193 .a_nch = &nch, 2194 .a_dvp = dvp, 2195 .a_vpp = vpp, 2196 .a_vap = &va, 2197 .a_target = target, 2198 }; 2199 2200 return hammer2_vop_nsymlink(&ap); 2201 } 2202 2203 /* 2204 * hammer2_vop_nremove { nch, dvp, cred } 2205 */ 2206 static 2207 int 2208 hammer2_vop_nremove(struct vop_nremove_args *ap) 2209 { 2210 #if 0 2211 hammer2_xop_unlink_t *xop; 2212 hammer2_inode_t *dip; 2213 hammer2_inode_t *ip; 2214 struct vnode *vprecycle; 2215 struct namecache *ncp; 2216 int error; 2217 2218 dip = VTOI(ap->a_dvp); 2219 if (dip->pmp->ronly) 2220 return (EROFS); 2221 #if 0 2222 /* allow removals, except user to also bulkfree */ 2223 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2224 return (ENOSPC); 2225 #endif 2226 2227 ncp = ap->a_nch->ncp; 2228 2229 if (hammer2_debug_inode && dip->meta.inum == hammer2_debug_inode) { 2230 kprintf("hammer2: attempt to delete inside debug inode: %s\n", 2231 ncp->nc_name); 2232 while (hammer2_debug_inode && 2233 dip->meta.inum == hammer2_debug_inode) { 2234 tsleep(&hammer2_debug_inode, 0, "h2debug", hz*5); 2235 } 2236 } 2237 2238 hammer2_trans_init(dip->pmp, 0); 2239 hammer2_inode_lock(dip, 0); 2240 2241 /* 2242 * The unlink XOP unlinks the path from the directory and 2243 * locates and returns the cluster associated with the real inode. 2244 * We have to handle nlinks here on the frontend. 2245 */ 2246 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 2247 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 2248 2249 xop->isdir = 0; 2250 xop->dopermanent = 0; 2251 hammer2_xop_start(&xop->head, &hammer2_unlink_desc); 2252 2253 /* 2254 * Collect the real inode and adjust nlinks, destroy the real 2255 * inode if nlinks transitions to 0 and it was the real inode 2256 * (else it has already been removed). 2257 */ 2258 error = hammer2_xop_collect(&xop->head, 0); 2259 error = hammer2_error_to_errno(error); 2260 vprecycle = NULL; 2261 2262 if (error == 0) { 2263 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 2264 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2265 if (ip) { 2266 if (hammer2_debug_inode && 2267 ip->meta.inum == hammer2_debug_inode) { 2268 kprintf("hammer2: attempt to delete debug " 2269 "inode!\n"); 2270 while (hammer2_debug_inode && 2271 ip->meta.inum == hammer2_debug_inode) { 2272 tsleep(&hammer2_debug_inode, 0, 2273 "h2debug", hz*5); 2274 } 2275 } 2276 hammer2_inode_unlink_finisher(ip, &vprecycle); 2277 hammer2_inode_depend(dip, ip); /* after modified */ 2278 hammer2_inode_unlock(ip); 2279 } 2280 } else { 2281 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2282 } 2283 2284 /* 2285 * Update dip's mtime 2286 */ 2287 if (error == 0) { 2288 uint64_t mtime; 2289 2290 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2291 hammer2_update_time(&mtime); 2292 hammer2_inode_modify(dip); 2293 dip->meta.mtime = mtime; 2294 /*hammer2_inode_unlock(dip);*/ 2295 } 2296 hammer2_inode_unlock(dip); 2297 2298 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2299 if (error == 0) { 2300 cache_unlink(ap->a_nch); 2301 hammer2_knote(ap->a_dvp, NOTE_WRITE); 2302 } 2303 if (vprecycle) 2304 hammer2_inode_vprecycle(vprecycle); 2305 2306 return (error); 2307 #endif 2308 return (EOPNOTSUPP); 2309 } 2310 2311 /* 2312 * hammer2_vop_nrmdir { nch, dvp, cred } 2313 */ 2314 static 2315 int 2316 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap) 2317 { 2318 #if 0 2319 hammer2_xop_unlink_t *xop; 2320 hammer2_inode_t *dip; 2321 hammer2_inode_t *ip; 2322 struct namecache *ncp; 2323 struct vnode *vprecycle; 2324 int error; 2325 2326 dip = VTOI(ap->a_dvp); 2327 if (dip->pmp->ronly) 2328 return (EROFS); 2329 #if 0 2330 /* allow removals, except user to also bulkfree */ 2331 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2332 return (ENOSPC); 2333 #endif 2334 2335 hammer2_trans_init(dip->pmp, 0); 2336 hammer2_inode_lock(dip, 0); 2337 2338 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 2339 2340 ncp = ap->a_nch->ncp; 2341 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 2342 xop->isdir = 1; 2343 xop->dopermanent = 0; 2344 hammer2_xop_start(&xop->head, &hammer2_unlink_desc); 2345 2346 /* 2347 * Collect the real inode and adjust nlinks, destroy the real 2348 * inode if nlinks transitions to 0 and it was the real inode 2349 * (else it has already been removed). 2350 */ 2351 error = hammer2_xop_collect(&xop->head, 0); 2352 error = hammer2_error_to_errno(error); 2353 vprecycle = NULL; 2354 2355 if (error == 0) { 2356 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 2357 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2358 if (ip) { 2359 hammer2_inode_unlink_finisher(ip, &vprecycle); 2360 hammer2_inode_depend(dip, ip); /* after modified */ 2361 hammer2_inode_unlock(ip); 2362 } 2363 } else { 2364 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2365 } 2366 2367 /* 2368 * Update dip's mtime 2369 */ 2370 if (error == 0) { 2371 uint64_t mtime; 2372 2373 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2374 hammer2_update_time(&mtime); 2375 hammer2_inode_modify(dip); 2376 dip->meta.mtime = mtime; 2377 /*hammer2_inode_unlock(dip);*/ 2378 } 2379 hammer2_inode_unlock(dip); 2380 2381 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2382 if (error == 0) { 2383 cache_unlink(ap->a_nch); 2384 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK); 2385 } 2386 if (vprecycle) 2387 hammer2_inode_vprecycle(vprecycle); 2388 return (error); 2389 #endif 2390 return (EOPNOTSUPP); 2391 } 2392 2393 /* 2394 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred } 2395 */ 2396 static 2397 int 2398 hammer2_vop_nrename(struct vop_nrename_args *ap) 2399 { 2400 #if 0 2401 struct namecache *fncp; 2402 struct namecache *tncp; 2403 hammer2_inode_t *fdip; /* source directory */ 2404 hammer2_inode_t *tdip; /* target directory */ 2405 hammer2_inode_t *ip; /* file being renamed */ 2406 hammer2_inode_t *tip; /* replaced target during rename or NULL */ 2407 struct vnode *vprecycle; 2408 const uint8_t *fname; 2409 size_t fname_len; 2410 const uint8_t *tname; 2411 size_t tname_len; 2412 int error; 2413 int update_tdip; 2414 int update_fdip; 2415 hammer2_key_t tlhc; 2416 2417 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount) 2418 return(EXDEV); 2419 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount) 2420 return(EXDEV); 2421 2422 fdip = VTOI(ap->a_fdvp); /* source directory */ 2423 tdip = VTOI(ap->a_tdvp); /* target directory */ 2424 2425 if (fdip->pmp->ronly || (fdip->pmp->flags & HAMMER2_PMPF_EMERG)) 2426 return (EROFS); 2427 if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1) 2428 return (ENOSPC); 2429 2430 fncp = ap->a_fnch->ncp; /* entry name in source */ 2431 fname = fncp->nc_name; 2432 fname_len = fncp->nc_nlen; 2433 2434 tncp = ap->a_tnch->ncp; /* entry name in target */ 2435 tname = tncp->nc_name; 2436 tname_len = tncp->nc_nlen; 2437 2438 hammer2_trans_init(tdip->pmp, 0); 2439 2440 update_tdip = 0; 2441 update_fdip = 0; 2442 2443 ip = VTOI(fncp->nc_vp); 2444 hammer2_inode_ref(ip); /* extra ref */ 2445 2446 /* 2447 * Lookup the target name to determine if a directory entry 2448 * is being overwritten. We only hold related inode locks 2449 * temporarily, the operating system is expected to protect 2450 * against rename races. 2451 */ 2452 tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL; 2453 if (tip) 2454 hammer2_inode_ref(tip); /* extra ref */ 2455 2456 /* 2457 * Can return NULL and error == EXDEV if the common parent 2458 * crosses a directory with the xlink flag set. 2459 * 2460 * For now try to avoid deadlocks with a simple pointer address 2461 * test. (tip) can be NULL. 2462 */ 2463 error = 0; 2464 { 2465 hammer2_inode_t *ip1 = fdip; 2466 hammer2_inode_t *ip2 = tdip; 2467 hammer2_inode_t *ip3 = ip; 2468 hammer2_inode_t *ip4 = tip; /* may be NULL */ 2469 2470 if (fdip > tdip) { 2471 ip1 = tdip; 2472 ip2 = fdip; 2473 } 2474 if (tip && ip > tip) { 2475 ip3 = tip; 2476 ip4 = ip; 2477 } 2478 hammer2_inode_lock4(ip1, ip2, ip3, ip4); 2479 } 2480 2481 /* 2482 * Resolve the collision space for (tdip, tname, tname_len) 2483 * 2484 * tdip must be held exclusively locked to prevent races since 2485 * multiple filenames can end up in the same collision space. 2486 */ 2487 { 2488 hammer2_xop_scanlhc_t *sxop; 2489 hammer2_tid_t lhcbase; 2490 2491 tlhc = hammer2_dirhash(tname, tname_len); 2492 lhcbase = tlhc; 2493 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 2494 sxop->lhc = tlhc; 2495 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 2496 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 2497 if (tlhc != sxop->head.cluster.focus->bref.key) 2498 break; 2499 ++tlhc; 2500 } 2501 error = hammer2_error_to_errno(error); 2502 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 2503 2504 if (error) { 2505 if (error != ENOENT) 2506 goto done2; 2507 ++tlhc; 2508 error = 0; 2509 } 2510 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) { 2511 error = ENOSPC; 2512 goto done2; 2513 } 2514 } 2515 2516 /* 2517 * Ready to go, issue the rename to the backend. Note that meta-data 2518 * updates to the related inodes occur separately from the rename 2519 * operation. 2520 * 2521 * NOTE: While it is not necessary to update ip->meta.name*, doing 2522 * so aids catastrophic recovery and debugging. 2523 */ 2524 if (error == 0) { 2525 hammer2_xop_nrename_t *xop4; 2526 2527 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 2528 xop4->lhc = tlhc; 2529 xop4->ip_key = ip->meta.name_key; 2530 hammer2_xop_setip2(&xop4->head, ip); 2531 hammer2_xop_setip3(&xop4->head, tdip); 2532 if (tip && tip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) 2533 hammer2_xop_setip4(&xop4->head, tip); 2534 hammer2_xop_setname(&xop4->head, fname, fname_len); 2535 hammer2_xop_setname2(&xop4->head, tname, tname_len); 2536 hammer2_xop_start(&xop4->head, &hammer2_nrename_desc); 2537 2538 error = hammer2_xop_collect(&xop4->head, 0); 2539 error = hammer2_error_to_errno(error); 2540 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP); 2541 2542 if (error == ENOENT) 2543 error = 0; 2544 2545 /* 2546 * Update inode meta-data. 2547 * 2548 * WARNING! The in-memory inode (ip) structure does not 2549 * maintain a copy of the inode's filename buffer. 2550 */ 2551 if (error == 0 && 2552 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 2553 hammer2_inode_modify(ip); 2554 ip->meta.name_len = tname_len; 2555 ip->meta.name_key = tlhc; 2556 } 2557 if (error == 0) { 2558 hammer2_inode_modify(ip); 2559 ip->meta.iparent = tdip->meta.inum; 2560 } 2561 update_fdip = 1; 2562 update_tdip = 1; 2563 } 2564 2565 done2: 2566 /* 2567 * If no error, the backend has replaced the target directory entry. 2568 * We must adjust nlinks on the original replace target if it exists. 2569 */ 2570 vprecycle = NULL; 2571 if (error == 0 && tip) { 2572 hammer2_inode_unlink_finisher(tip, &vprecycle); 2573 } 2574 2575 /* 2576 * Update directory mtimes to represent the something changed. 2577 */ 2578 if (update_fdip || update_tdip) { 2579 uint64_t mtime; 2580 2581 hammer2_update_time(&mtime); 2582 if (update_fdip) { 2583 hammer2_inode_modify(fdip); 2584 fdip->meta.mtime = mtime; 2585 } 2586 if (update_tdip) { 2587 hammer2_inode_modify(tdip); 2588 tdip->meta.mtime = mtime; 2589 } 2590 } 2591 if (tip) { 2592 hammer2_inode_unlock(tip); 2593 hammer2_inode_drop(tip); 2594 } 2595 hammer2_inode_unlock(ip); 2596 hammer2_inode_unlock(tdip); 2597 hammer2_inode_unlock(fdip); 2598 hammer2_inode_drop(ip); 2599 hammer2_trans_done(tdip->pmp, HAMMER2_TRANS_SIDEQ); 2600 2601 /* 2602 * Issue the namecache update after unlocking all the internal 2603 * hammer2 structures, otherwise we might deadlock. 2604 * 2605 * WARNING! The target namespace must be updated atomically, 2606 * and we depend on cache_rename() to handle that for 2607 * us. Do not do a separate cache_unlink() because 2608 * that leaves a small window of opportunity for other 2609 * threads to allocate the target namespace before we 2610 * manage to complete our rename. 2611 * 2612 * WARNING! cache_rename() (and cache_unlink()) will properly 2613 * set VREF_FINALIZE on any attached vnode. Do not 2614 * call cache_setunresolved() manually before-hand as 2615 * this will prevent the flag from being set later via 2616 * cache_rename(). If VREF_FINALIZE is not properly set 2617 * and the inode is no longer in the topology, related 2618 * chains can remain dirty indefinitely. 2619 */ 2620 if (error == 0 && tip) { 2621 /*cache_unlink(ap->a_tnch); see above */ 2622 /*cache_setunresolved(ap->a_tnch); see above */ 2623 } 2624 if (error == 0) { 2625 cache_rename(ap->a_fnch, ap->a_tnch); 2626 hammer2_knote(ap->a_fdvp, NOTE_WRITE); 2627 hammer2_knote(ap->a_tdvp, NOTE_WRITE); 2628 hammer2_knote(fncp->nc_vp, NOTE_RENAME); 2629 } 2630 if (vprecycle) 2631 hammer2_inode_vprecycle(vprecycle); 2632 2633 return (error); 2634 #endif 2635 return (EOPNOTSUPP); 2636 } 2637 2638 /* 2639 * hammer2_vop_ioctl { vp, command, data, fflag, cred } 2640 */ 2641 static 2642 int 2643 hammer2_vop_ioctl(struct vop_ioctl_args *ap) 2644 { 2645 #if 0 2646 hammer2_inode_t *ip; 2647 int error; 2648 2649 ip = VTOI(ap->a_vp); 2650 2651 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data, 2652 ap->a_fflag, ap->a_cred); 2653 return (error); 2654 #endif 2655 return (EOPNOTSUPP); 2656 } 2657 2658 static 2659 int 2660 hammer2_vop_mountctl(struct vop_mountctl_args *ap) 2661 { 2662 #if 0 2663 struct mount *mp; 2664 hammer2_pfs_t *pmp; 2665 int rc; 2666 2667 switch (ap->a_op) { 2668 case (MOUNTCTL_SET_EXPORT): 2669 mp = ap->a_head.a_ops->head.vv_mount; 2670 pmp = MPTOPMP(mp); 2671 2672 if (ap->a_ctllen != sizeof(struct export_args)) 2673 rc = (EINVAL); 2674 else 2675 rc = vfs_export(mp, &pmp->export, 2676 (const struct export_args *)ap->a_ctl); 2677 break; 2678 default: 2679 rc = vop_stdmountctl(ap); 2680 break; 2681 } 2682 return (rc); 2683 #endif 2684 return (EOPNOTSUPP); 2685 } 2686 2687 /* 2688 * KQFILTER 2689 */ 2690 /* 2691 static void filt_hammer2detach(struct knote *kn); 2692 static int filt_hammer2read(struct knote *kn, long hint); 2693 static int filt_hammer2write(struct knote *kn, long hint); 2694 static int filt_hammer2vnode(struct knote *kn, long hint); 2695 2696 static struct filterops hammer2read_filtops = 2697 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2698 NULL, filt_hammer2detach, filt_hammer2read }; 2699 static struct filterops hammer2write_filtops = 2700 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2701 NULL, filt_hammer2detach, filt_hammer2write }; 2702 static struct filterops hammer2vnode_filtops = 2703 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2704 NULL, filt_hammer2detach, filt_hammer2vnode }; 2705 */ 2706 2707 static 2708 int 2709 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap) 2710 { 2711 #if 0 2712 struct vnode *vp = ap->a_vp; 2713 struct knote *kn = ap->a_kn; 2714 2715 switch (kn->kn_filter) { 2716 case EVFILT_READ: 2717 kn->kn_fop = &hammer2read_filtops; 2718 break; 2719 case EVFILT_WRITE: 2720 kn->kn_fop = &hammer2write_filtops; 2721 break; 2722 case EVFILT_VNODE: 2723 kn->kn_fop = &hammer2vnode_filtops; 2724 break; 2725 default: 2726 return (EOPNOTSUPP); 2727 } 2728 2729 kn->kn_hook = (caddr_t)vp; 2730 2731 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2732 2733 return(0); 2734 #endif 2735 return (EOPNOTSUPP); 2736 } 2737 2738 #if 0 2739 static void 2740 filt_hammer2detach(struct knote *kn) 2741 { 2742 struct vnode *vp = (void *)kn->kn_hook; 2743 2744 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2745 } 2746 2747 static int 2748 filt_hammer2read(struct knote *kn, long hint) 2749 { 2750 struct vnode *vp = (void *)kn->kn_hook; 2751 hammer2_inode_t *ip = VTOI(vp); 2752 off_t off; 2753 2754 if (hint == NOTE_REVOKE) { 2755 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2756 return(1); 2757 } 2758 off = ip->meta.size - kn->kn_fp->f_offset; 2759 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 2760 if (kn->kn_sfflags & NOTE_OLDAPI) 2761 return(1); 2762 return (kn->kn_data != 0); 2763 } 2764 2765 2766 static int 2767 filt_hammer2write(struct knote *kn, long hint) 2768 { 2769 if (hint == NOTE_REVOKE) 2770 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2771 kn->kn_data = 0; 2772 return (1); 2773 } 2774 2775 static int 2776 filt_hammer2vnode(struct knote *kn, long hint) 2777 { 2778 if (kn->kn_sfflags & hint) 2779 kn->kn_fflags |= hint; 2780 if (hint == NOTE_REVOKE) { 2781 kn->kn_flags |= (EV_EOF | EV_NODATA); 2782 return (1); 2783 } 2784 return (kn->kn_fflags != 0); 2785 } 2786 #endif 2787 2788 /* 2789 * FIFO VOPS 2790 */ 2791 static 2792 int 2793 hammer2_vop_markatime(struct vop_markatime_args *ap) 2794 { 2795 #if 0 2796 hammer2_inode_t *ip; 2797 struct vnode *vp; 2798 2799 vp = ap->a_vp; 2800 ip = VTOI(vp); 2801 2802 if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG)) 2803 return (EROFS); 2804 return(0); 2805 #endif 2806 return (EOPNOTSUPP); 2807 } 2808 2809 static 2810 int 2811 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap) 2812 { 2813 #if 0 2814 int error; 2815 2816 error = VOCALL(&fifo_vnode_vops, &ap->a_head); 2817 if (error) 2818 error = hammer2_vop_kqfilter(ap); 2819 return(error); 2820 #endif 2821 return (EOPNOTSUPP); 2822 } 2823 2824 /* 2825 * VOPS vector 2826 */ 2827 struct vop_ops hammer2_vnode_vops = { 2828 .vop_default = vop_defaultop, 2829 .vop_fsync = hammer2_vop_fsync, 2830 .vop_getpages = vop_stdgetpages, 2831 .vop_putpages = vop_stdputpages, 2832 .vop_access = hammer2_vop_access, 2833 .vop_advlock = hammer2_vop_advlock, 2834 .vop_close = hammer2_vop_close, 2835 .vop_nlink = hammer2_vop_nlink, 2836 .vop_ncreate = hammer2_vop_ncreate, 2837 .vop_nsymlink = hammer2_vop_nsymlink, 2838 .vop_nremove = hammer2_vop_nremove, 2839 .vop_nrmdir = hammer2_vop_nrmdir, 2840 .vop_nrename = hammer2_vop_nrename, 2841 .vop_getattr = hammer2_vop_getattr, 2842 .vop_getattr_lite = hammer2_vop_getattr_lite, 2843 .vop_setattr = hammer2_vop_setattr, 2844 .vop_readdir = hammer2_vop_readdir, 2845 .vop_readlink = hammer2_vop_readlink, 2846 .vop_read = hammer2_vop_read, 2847 .vop_write = hammer2_vop_write, 2848 .vop_open = hammer2_vop_open, 2849 .vop_inactive = hammer2_vop_inactive, 2850 .vop_reclaim = hammer2_vop_reclaim, 2851 .vop_nresolve = hammer2_vop_nresolve, 2852 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot, 2853 .vop_nmkdir = hammer2_vop_nmkdir, 2854 .vop_nmknod = hammer2_vop_nmknod, 2855 .vop_ioctl = hammer2_vop_ioctl, 2856 .vop_mountctl = hammer2_vop_mountctl, 2857 .vop_bmap = hammer2_vop_bmap, 2858 .vop_strategy = hammer2_vop_strategy, 2859 .vop_kqfilter = hammer2_vop_kqfilter 2860 }; 2861 2862 struct vop_ops hammer2_spec_vops = { 2863 .vop_default = vop_defaultop, 2864 .vop_fsync = hammer2_vop_fsync, 2865 .vop_read = vop_stdnoread, 2866 .vop_write = vop_stdnowrite, 2867 .vop_access = hammer2_vop_access, 2868 .vop_close = hammer2_vop_close, 2869 .vop_markatime = hammer2_vop_markatime, 2870 .vop_getattr = hammer2_vop_getattr, 2871 .vop_inactive = hammer2_vop_inactive, 2872 .vop_reclaim = hammer2_vop_reclaim, 2873 .vop_setattr = hammer2_vop_setattr 2874 }; 2875 2876 struct vop_ops hammer2_fifo_vops = { 2877 .vop_default = fifo_vnoperate, 2878 .vop_fsync = hammer2_vop_fsync, 2879 #if 0 2880 .vop_read = hammer2_vop_fiforead, 2881 .vop_write = hammer2_vop_fifowrite, 2882 #endif 2883 .vop_access = hammer2_vop_access, 2884 #if 0 2885 .vop_close = hammer2_vop_fifoclose, 2886 #endif 2887 .vop_markatime = hammer2_vop_markatime, 2888 .vop_getattr = hammer2_vop_getattr, 2889 .vop_inactive = hammer2_vop_inactive, 2890 .vop_reclaim = hammer2_vop_reclaim, 2891 .vop_setattr = hammer2_vop_setattr, 2892 .vop_kqfilter = hammer2_vop_fifokqfilter 2893 }; 2894 2895