1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org> 5 * Copyright (c) 2011-2022 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Matthew Dillon <dillon@dragonflybsd.org> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 /* 38 * Kernel Filesystem interface 39 * 40 * NOTE! local ipdata pointers must be reloaded on any modifying operation 41 * to the inode as its underlying chain may have changed. 42 */ 43 44 /* 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/fcntl.h> 49 #include <sys/buf.h> 50 #include <sys/proc.h> 51 #include <sys/mount.h> 52 #include <sys/vnode.h> 53 #include <sys/mountctl.h> 54 #include <sys/dirent.h> 55 #include <sys/uio.h> 56 #include <sys/objcache.h> 57 #include <sys/event.h> 58 #include <sys/file.h> 59 #include <vfs/fifofs/fifo.h> 60 */ 61 62 #include "hammer2.h" 63 64 /* 65 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, 66 int seqcount); 67 */ 68 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 69 int ioflag, int seqcount); 70 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize); 71 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize); 72 73 /* 74 * Last reference to a vnode is going away but it is still cached. 75 */ 76 static 77 int 78 hammer2_vop_inactive(struct vop_inactive_args *ap) 79 { 80 #if 0 81 hammer2_inode_t *ip; 82 struct m_vnode *vp; 83 84 vp = ap->a_vp; 85 ip = VTOI(vp); 86 87 /* 88 * Degenerate case 89 */ 90 if (ip == NULL) { 91 vrecycle(vp); 92 return (0); 93 } 94 95 /* 96 * Aquire the inode lock to interlock against vp updates via 97 * the inode path and file deletions and such (which can be 98 * namespace-only operations that might not hold the vnode). 99 */ 100 hammer2_inode_lock(ip, 0); 101 if (ip->flags & HAMMER2_INODE_ISUNLINKED) { 102 hammer2_key_t lbase; 103 int nblksize; 104 105 /* 106 * If the inode has been unlinked we can throw away all 107 * buffers (dirty or not) and clean the file out. 108 * 109 * Because vrecycle() calls are not guaranteed, try to 110 * dispose of the inode as much as possible right here. 111 */ 112 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL); 113 nvtruncbuf(vp, 0, nblksize, 0, 0); 114 115 /* 116 * Delete the file on-media. 117 */ 118 if ((ip->flags & HAMMER2_INODE_DELETING) == 0) { 119 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); 120 hammer2_inode_delayed_sideq(ip); 121 } 122 hammer2_inode_unlock(ip); 123 124 /* 125 * Recycle immediately if possible 126 */ 127 vrecycle(vp); 128 } else { 129 hammer2_inode_unlock(ip); 130 } 131 return (0); 132 #endif 133 return (EOPNOTSUPP); 134 } 135 136 /* 137 * Reclaim a vnode so that it can be reused; after the inode is 138 * disassociated, the filesystem must manage it alone. 139 */ 140 static 141 int 142 hammer2_vop_reclaim(struct vop_reclaim_args *ap) 143 { 144 hammer2_inode_t *ip; 145 struct m_vnode *vp; 146 147 vp = ap->a_vp; 148 ip = VTOI(vp); 149 if (ip == NULL) 150 return(0); 151 152 /* 153 * NOTE! We do not attempt to flush chains here, flushing is 154 * really fragile and could also deadlock. 155 */ 156 vclrisdirty(vp); 157 158 /* 159 * The inode lock is required to disconnect it. 160 */ 161 hammer2_inode_lock(ip, 0); 162 vp->v_data = NULL; 163 ip->vp = NULL; 164 165 /* 166 * Delete the file on-media. This should have been handled by the 167 * inactivation. The operation is likely still queued on the inode 168 * though so only complain if the stars don't align. 169 */ 170 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED | HAMMER2_INODE_DELETING)) == 171 HAMMER2_INODE_ISUNLINKED) 172 { 173 assert(0); 174 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); 175 hammer2_inode_delayed_sideq(ip); 176 kprintf("hammer2: vp=%p ip=%p unlinked but not disposed\n", 177 vp, ip); 178 } 179 hammer2_inode_unlock(ip); 180 181 /* 182 * Modified inodes will already be on SIDEQ or SYNCQ, no further 183 * action is needed. 184 * 185 * We cannot safely synchronize the inode from inside the reclaim 186 * due to potentially deep locks held as-of when the reclaim occurs. 187 * Interactions and potential deadlocks abound. We also can't do it 188 * here without desynchronizing from the related directory entrie(s). 189 */ 190 hammer2_inode_drop(ip); /* vp ref */ 191 192 /* 193 * XXX handle background sync when ip dirty, kernel will no longer 194 * notify us regarding this inode because there is no longer a 195 * vnode attached to it. 196 */ 197 198 return (0); 199 } 200 201 int 202 hammer2_reclaim(struct m_vnode *vp) 203 { 204 struct vop_reclaim_args ap = { 205 .a_vp = vp, 206 }; 207 208 return hammer2_vop_reclaim(&ap); 209 } 210 211 /* 212 * Currently this function synchronizes the front-end inode state to the 213 * backend chain topology, then flushes the inode's chain and sub-topology 214 * to backend media. This function does not flush the root topology down to 215 * the inode. 216 */ 217 static 218 int 219 hammer2_vop_fsync(struct vop_fsync_args *ap) 220 { 221 #if 0 222 hammer2_inode_t *ip; 223 struct m_vnode *vp; 224 int error1; 225 int error2; 226 227 vp = ap->a_vp; 228 ip = VTOI(vp); 229 error1 = 0; 230 231 hammer2_trans_init(ip->pmp, 0); 232 233 /* 234 * Flush dirty buffers in the file's logical buffer cache. 235 * It is best to wait for the strategy code to commit the 236 * buffers to the device's backing buffer cache before 237 * then trying to flush the inode. 238 * 239 * This should be quick, but certain inode modifications cached 240 * entirely in the hammer2_inode structure may not trigger a 241 * buffer read until the flush so the fsync can wind up also 242 * doing scattered reads. 243 */ 244 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 245 bio_track_wait(&vp->v_track_write, 0, 0); 246 247 /* 248 * Flush any inode changes 249 */ 250 hammer2_inode_lock(ip, 0); 251 if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MODIFIED)) 252 error1 = hammer2_inode_chain_sync(ip); 253 254 /* 255 * Flush dirty chains related to the inode. 256 * 257 * NOTE! We are not in a flush transaction. The inode remains on 258 * the sideq so the filesystem syncer can synchronize it to 259 * the volume root. 260 */ 261 error2 = hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP); 262 if (error2) 263 error1 = error2; 264 265 /* 266 * We may be able to clear the vnode dirty flag. 267 */ 268 if ((ip->flags & (HAMMER2_INODE_MODIFIED | 269 HAMMER2_INODE_RESIZED | 270 HAMMER2_INODE_DIRTYDATA)) == 0 && 271 RB_EMPTY(&vp->v_rbdirty_tree) && 272 !bio_track_active(&vp->v_track_write)) { 273 vclrisdirty(vp); 274 } 275 hammer2_inode_unlock(ip); 276 hammer2_trans_done(ip->pmp, 0); 277 278 return (error1); 279 #endif 280 return (EOPNOTSUPP); 281 } 282 283 /* 284 * No lock needed, just handle ip->update 285 */ 286 static 287 int 288 hammer2_vop_access(struct vop_access_args *ap) 289 { 290 #if 0 291 hammer2_inode_t *ip = VTOI(ap->a_vp); 292 uid_t uid; 293 gid_t gid; 294 mode_t mode; 295 uint32_t uflags; 296 int error; 297 int update; 298 299 retry: 300 update = spin_access_start(&ip->cluster_spin); 301 302 /*hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);*/ 303 uid = hammer2_to_unix_xid(&ip->meta.uid); 304 gid = hammer2_to_unix_xid(&ip->meta.gid); 305 mode = ip->meta.mode; 306 uflags = ip->meta.uflags; 307 /*hammer2_inode_unlock(ip);*/ 308 309 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 310 goto retry; 311 312 error = vop_helper_access(ap, uid, gid, mode, uflags); 313 314 return (error); 315 #endif 316 return (EOPNOTSUPP); 317 } 318 319 static 320 int 321 hammer2_vop_getattr(struct vop_getattr_args *ap) 322 { 323 #if 0 324 hammer2_pfs_t *pmp; 325 hammer2_inode_t *ip; 326 struct m_vnode *vp; 327 struct vattr *vap; 328 int update; 329 330 vp = ap->a_vp; 331 vap = ap->a_vap; 332 333 ip = VTOI(vp); 334 pmp = ip->pmp; 335 336 retry: 337 update = spin_access_start(&ip->cluster_spin); 338 339 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 340 vap->va_fileid = ip->meta.inum; 341 vap->va_mode = ip->meta.mode; 342 vap->va_nlink = ip->meta.nlinks; 343 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 344 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 345 vap->va_rmajor = 0; 346 vap->va_rminor = 0; 347 vap->va_size = ip->meta.size; /* protected by shared lock */ 348 vap->va_blocksize = HAMMER2_PBUFSIZE; 349 vap->va_flags = ip->meta.uflags; 350 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime); 351 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime); 352 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime); 353 vap->va_gen = 1; 354 vap->va_bytes = 0; 355 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) { 356 /* 357 * Can't really calculate directory use sans the files under 358 * it, just assume one block for now. 359 */ 360 vap->va_bytes += HAMMER2_INODE_BYTES; 361 } else { 362 vap->va_bytes = hammer2_inode_data_count(ip); 363 } 364 vap->va_type = hammer2_get_vtype(ip->meta.type); 365 vap->va_filerev = 0; 366 vap->va_uid_uuid = ip->meta.uid; 367 vap->va_gid_uuid = ip->meta.gid; 368 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 369 VA_FSID_UUID_VALID; 370 371 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 372 goto retry; 373 374 return (0); 375 #endif 376 return (EOPNOTSUPP); 377 } 378 379 static 380 int 381 hammer2_vop_getattr_lite(struct vop_getattr_lite_args *ap) 382 { 383 #if 0 384 hammer2_pfs_t *pmp; 385 hammer2_inode_t *ip; 386 struct m_vnode *vp; 387 struct vattr_lite *lvap; 388 int update; 389 390 vp = ap->a_vp; 391 lvap = ap->a_lvap; 392 393 ip = VTOI(vp); 394 pmp = ip->pmp; 395 396 retry: 397 update = spin_access_start(&ip->cluster_spin); 398 399 #if 0 400 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 401 vap->va_fileid = ip->meta.inum; 402 #endif 403 lvap->va_mode = ip->meta.mode; 404 lvap->va_nlink = ip->meta.nlinks; 405 lvap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 406 lvap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 407 #if 0 408 vap->va_rmajor = 0; 409 vap->va_rminor = 0; 410 #endif 411 lvap->va_size = ip->meta.size; 412 #if 0 413 vap->va_blocksize = HAMMER2_PBUFSIZE; 414 #endif 415 lvap->va_flags = ip->meta.uflags; 416 lvap->va_type = hammer2_get_vtype(ip->meta.type); 417 #if 0 418 vap->va_filerev = 0; 419 vap->va_uid_uuid = ip->meta.uid; 420 vap->va_gid_uuid = ip->meta.gid; 421 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 422 VA_FSID_UUID_VALID; 423 #endif 424 425 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 426 goto retry; 427 428 return (0); 429 #endif 430 return (EOPNOTSUPP); 431 } 432 433 static 434 int 435 hammer2_vop_setattr(struct vop_setattr_args *ap) 436 { 437 #if 0 438 hammer2_inode_t *ip; 439 struct m_vnode *vp; 440 struct vattr *vap; 441 int error; 442 int kflags = 0; 443 uint64_t ctime; 444 445 vp = ap->a_vp; 446 vap = ap->a_vap; 447 hammer2_update_time(&ctime); 448 449 ip = VTOI(vp); 450 451 if (ip->pmp->ronly) 452 return (EROFS); 453 454 /* 455 * Normally disallow setattr if there is no space, unless we 456 * are in emergency mode (might be needed to chflags -R noschg 457 * files prior to removal). 458 */ 459 if ((ip->pmp->flags & HAMMER2_PMPF_EMERG) == 0 && 460 hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1) { 461 return (ENOSPC); 462 } 463 464 hammer2_trans_init(ip->pmp, 0); 465 hammer2_inode_lock(ip, 0); 466 error = 0; 467 468 if (vap->va_flags != VNOVAL) { 469 uint32_t flags; 470 471 flags = ip->meta.uflags; 472 error = vop_helper_setattr_flags(&flags, vap->va_flags, 473 hammer2_to_unix_xid(&ip->meta.uid), 474 ap->a_cred); 475 if (error == 0) { 476 if (ip->meta.uflags != flags) { 477 hammer2_inode_modify(ip); 478 hammer2_spin_lock_update(&ip->cluster_spin); 479 ip->meta.uflags = flags; 480 ip->meta.ctime = ctime; 481 hammer2_spin_unlock_update(&ip->cluster_spin); 482 kflags |= NOTE_ATTRIB; 483 } 484 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 485 error = 0; 486 goto done; 487 } 488 } 489 goto done; 490 } 491 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 492 error = EPERM; 493 goto done; 494 } 495 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { 496 mode_t cur_mode = ip->meta.mode; 497 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 498 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 499 uuid_t uuid_uid; 500 uuid_t uuid_gid; 501 502 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 503 ap->a_cred, 504 &cur_uid, &cur_gid, &cur_mode); 505 if (error == 0) { 506 hammer2_guid_to_uuid(&uuid_uid, cur_uid); 507 hammer2_guid_to_uuid(&uuid_gid, cur_gid); 508 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) || 509 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) || 510 ip->meta.mode != cur_mode 511 ) { 512 hammer2_inode_modify(ip); 513 hammer2_spin_lock_update(&ip->cluster_spin); 514 ip->meta.uid = uuid_uid; 515 ip->meta.gid = uuid_gid; 516 ip->meta.mode = cur_mode; 517 ip->meta.ctime = ctime; 518 hammer2_spin_unlock_update(&ip->cluster_spin); 519 } 520 kflags |= NOTE_ATTRIB; 521 } 522 } 523 524 /* 525 * Resize the file 526 */ 527 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) { 528 switch(vp->v_type) { 529 case VREG: 530 if (vap->va_size == ip->meta.size) 531 break; 532 if (vap->va_size < ip->meta.size) { 533 hammer2_mtx_ex(&ip->truncate_lock); 534 hammer2_truncate_file(ip, vap->va_size); 535 hammer2_mtx_unlock(&ip->truncate_lock); 536 kflags |= NOTE_WRITE; 537 } else { 538 hammer2_extend_file(ip, vap->va_size); 539 kflags |= NOTE_WRITE | NOTE_EXTEND; 540 } 541 hammer2_inode_modify(ip); 542 ip->meta.mtime = ctime; 543 vclrflags(vp, VLASTWRITETS); 544 break; 545 default: 546 error = EINVAL; 547 goto done; 548 } 549 } 550 #if 0 551 /* atime not supported */ 552 if (vap->va_atime.tv_sec != VNOVAL) { 553 hammer2_inode_modify(ip); 554 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime); 555 kflags |= NOTE_ATTRIB; 556 } 557 #endif 558 if (vap->va_mode != (mode_t)VNOVAL) { 559 mode_t cur_mode = ip->meta.mode; 560 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 561 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 562 563 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 564 cur_uid, cur_gid, &cur_mode); 565 if (error == 0) { 566 hammer2_inode_modify(ip); 567 hammer2_spin_lock_update(&ip->cluster_spin); 568 ip->meta.mode = cur_mode; 569 ip->meta.ctime = ctime; 570 hammer2_spin_unlock_update(&ip->cluster_spin); 571 kflags |= NOTE_ATTRIB; 572 } 573 } 574 575 if (vap->va_mtime.tv_sec != VNOVAL) { 576 hammer2_inode_modify(ip); 577 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime); 578 kflags |= NOTE_ATTRIB; 579 vclrflags(vp, VLASTWRITETS); 580 } 581 582 done: 583 /* 584 * If a truncation occurred we must call chain_sync() now in order 585 * to trim the related data chains, otherwise a later expansion can 586 * cause havoc. 587 * 588 * If an extend occured that changed the DIRECTDATA state, we must 589 * call inode_chain_sync now in order to prepare the inode's indirect 590 * block table. 591 * 592 * WARNING! This means we are making an adjustment to the inode's 593 * chain outside of sync/fsync, and not just to inode->meta, which 594 * may result in some consistency issues if a crash were to occur 595 * at just the wrong time. 596 */ 597 if (ip->flags & HAMMER2_INODE_RESIZED) 598 hammer2_inode_chain_sync(ip); 599 600 /* 601 * Cleanup. 602 */ 603 hammer2_inode_unlock(ip); 604 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 605 hammer2_knote(ip->vp, kflags); 606 607 return (error); 608 #endif 609 return (EOPNOTSUPP); 610 } 611 612 static 613 int 614 hammer2_vop_readdir(struct vop_readdir_args *ap) 615 { 616 #if 0 617 hammer2_xop_readdir_t *xop; 618 hammer2_blockref_t bref; 619 hammer2_inode_t *ip; 620 hammer2_tid_t inum; 621 hammer2_key_t lkey; 622 struct uio *uio; 623 off_t *cookies; 624 off_t saveoff; 625 int cookie_index; 626 int ncookies; 627 int error; 628 int eofflag; 629 int r; 630 631 ip = VTOI(ap->a_vp); 632 uio = ap->a_uio; 633 saveoff = uio->uio_offset; 634 eofflag = 0; 635 error = 0; 636 637 /* 638 * Setup cookies directory entry cookies if requested 639 */ 640 if (ap->a_ncookies) { 641 ncookies = uio->uio_resid / 16 + 1; 642 if (ncookies > 1024) 643 ncookies = 1024; 644 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK); 645 } else { 646 ncookies = -1; 647 cookies = NULL; 648 } 649 cookie_index = 0; 650 651 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 652 653 /* 654 * Handle artificial entries. To ensure that only positive 64 bit 655 * quantities are returned to userland we always strip off bit 63. 656 * The hash code is designed such that codes 0x0000-0x7FFF are not 657 * used, allowing us to use these codes for articial entries. 658 * 659 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not 660 * allow '..' to cross the mount point into (e.g.) the super-root. 661 */ 662 if (saveoff == 0) { 663 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 664 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, "."); 665 if (r) 666 goto done; 667 if (cookies) 668 cookies[cookie_index] = saveoff; 669 ++saveoff; 670 ++cookie_index; 671 if (cookie_index == ncookies) 672 goto done; 673 } 674 675 if (saveoff == 1) { 676 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 677 if (ip != ip->pmp->iroot) 678 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK; 679 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, ".."); 680 if (r) 681 goto done; 682 if (cookies) 683 cookies[cookie_index] = saveoff; 684 ++saveoff; 685 ++cookie_index; 686 if (cookie_index == ncookies) 687 goto done; 688 } 689 690 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE; 691 if (hammer2_debug & 0x0020) 692 kprintf("readdir: lkey %016jx\n", lkey); 693 if (error) 694 goto done; 695 696 xop = hammer2_xop_alloc(ip, 0); 697 xop->lkey = lkey; 698 hammer2_xop_start(&xop->head, &hammer2_readdir_desc); 699 700 for (;;) { 701 const hammer2_inode_data_t *ripdata; 702 const char *dname; 703 int dtype; 704 705 error = hammer2_xop_collect(&xop->head, 0); 706 error = hammer2_error_to_errno(error); 707 if (error) { 708 break; 709 } 710 if (cookie_index == ncookies) 711 break; 712 if (hammer2_debug & 0x0020) 713 kprintf("cluster chain %p %p\n", 714 xop->head.cluster.focus, 715 (xop->head.cluster.focus ? 716 xop->head.cluster.focus->data : (void *)-1)); 717 hammer2_cluster_bref(&xop->head.cluster, &bref); 718 719 if (bref.type == HAMMER2_BREF_TYPE_INODE) { 720 ripdata = &hammer2_xop_gdata(&xop->head)->ipdata; 721 dtype = hammer2_get_dtype(ripdata->meta.type); 722 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 723 r = vop_write_dirent(&error, uio, 724 ripdata->meta.inum & 725 HAMMER2_DIRHASH_USERMSK, 726 dtype, 727 ripdata->meta.name_len, 728 ripdata->filename); 729 hammer2_xop_pdata(&xop->head); 730 if (r) 731 break; 732 if (cookies) 733 cookies[cookie_index] = saveoff; 734 ++cookie_index; 735 } else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) { 736 uint16_t namlen; 737 738 dtype = hammer2_get_dtype(bref.embed.dirent.type); 739 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 740 namlen = bref.embed.dirent.namlen; 741 if (namlen <= sizeof(bref.check.buf)) { 742 dname = bref.check.buf; 743 } else { 744 dname = hammer2_xop_gdata(&xop->head)->buf; 745 } 746 r = vop_write_dirent(&error, uio, 747 bref.embed.dirent.inum, dtype, 748 namlen, dname); 749 if (namlen > sizeof(bref.check.buf)) 750 hammer2_xop_pdata(&xop->head); 751 if (r) 752 break; 753 if (cookies) 754 cookies[cookie_index] = saveoff; 755 ++cookie_index; 756 } else { 757 /* XXX chain error */ 758 kprintf("bad chain type readdir %d\n", bref.type); 759 } 760 } 761 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 762 if (error == ENOENT) { 763 error = 0; 764 eofflag = 1; 765 saveoff = (hammer2_key_t)-1; 766 } else { 767 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 768 } 769 done: 770 hammer2_inode_unlock(ip); 771 if (ap->a_eofflag) 772 *ap->a_eofflag = eofflag; 773 if (hammer2_debug & 0x0020) 774 kprintf("readdir: done at %016jx\n", saveoff); 775 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE; 776 if (error && cookie_index == 0) { 777 if (cookies) { 778 kfree(cookies, M_TEMP); 779 *ap->a_ncookies = 0; 780 *ap->a_cookies = NULL; 781 } 782 } else { 783 if (cookies) { 784 *ap->a_ncookies = cookie_index; 785 *ap->a_cookies = cookies; 786 } 787 } 788 return (error); 789 #endif 790 return (EOPNOTSUPP); 791 } 792 793 /* 794 * hammer2_vop_readlink { vp, uio, cred } 795 */ 796 static 797 int 798 hammer2_vop_readlink(struct vop_readlink_args *ap) 799 { 800 #if 0 801 struct m_vnode *vp; 802 hammer2_inode_t *ip; 803 int error; 804 805 vp = ap->a_vp; 806 if (vp->v_type != VLNK) 807 return (EINVAL); 808 ip = VTOI(vp); 809 810 error = hammer2_read_file(ip, ap->a_uio, 0); 811 return (error); 812 #endif 813 return (EOPNOTSUPP); 814 } 815 816 static 817 int 818 hammer2_vop_read(struct vop_read_args *ap) 819 { 820 #if 0 821 struct m_vnode *vp; 822 hammer2_inode_t *ip; 823 struct uio *uio; 824 int error; 825 int seqcount; 826 827 /* 828 * Read operations supported on this vnode? 829 */ 830 vp = ap->a_vp; 831 if (vp->v_type == VDIR) 832 return (EISDIR); 833 if (vp->v_type != VREG) 834 return (EINVAL); 835 836 /* 837 * Misc 838 */ 839 ip = VTOI(vp); 840 uio = ap->a_uio; 841 error = 0; 842 843 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 844 845 error = hammer2_read_file(ip, uio, seqcount); 846 return (error); 847 #endif 848 return (EOPNOTSUPP); 849 } 850 851 static 852 int 853 hammer2_vop_write(struct vop_write_args *ap) 854 { 855 hammer2_inode_t *ip; 856 //thread_t td; 857 struct m_vnode *vp; 858 struct uio *uio; 859 int error; 860 int seqcount; 861 int ioflag; 862 863 /* 864 * Read operations supported on this vnode? 865 */ 866 vp = ap->a_vp; 867 if (vp->v_type != VREG) 868 return (EINVAL); 869 870 /* 871 * Misc 872 */ 873 ip = VTOI(vp); 874 ioflag = ap->a_ioflag; 875 uio = ap->a_uio; 876 error = 0; 877 if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG)) 878 return (EROFS); 879 switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) { 880 case 2: 881 return (ENOSPC); 882 case 1: 883 ioflag |= IO_DIRECT; /* semi-synchronous */ 884 /* fall through */ 885 default: 886 break; 887 } 888 889 seqcount = ioflag >> IO_SEQSHIFT; 890 891 /* 892 * Check resource limit 893 */ 894 /* 895 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc && 896 uio->uio_offset + uio->uio_resid > 897 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 898 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); 899 return (EFBIG); 900 } 901 */ 902 903 /* 904 * The transaction interlocks against flush initiations 905 * (note: but will run concurrently with the actual flush). 906 * 907 * To avoid deadlocking against the VM system, we must flag any 908 * transaction related to the buffer cache or other direct 909 * VM page manipulation. 910 */ 911 if (uio->uio_segflg == UIO_NOCOPY) { 912 assert(0); /* no UIO_NOCOPY in makefs */ 913 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE); 914 } else { 915 hammer2_trans_init(ip->pmp, 0); 916 } 917 error = hammer2_write_file(ip, uio, ioflag, seqcount); 918 if (uio->uio_segflg == UIO_NOCOPY) { 919 assert(0); /* no UIO_NOCOPY in makefs */ 920 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE | 921 HAMMER2_TRANS_SIDEQ); 922 } else 923 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 924 925 return (error); 926 } 927 928 int 929 hammer2_write(struct m_vnode *vp, void *buf, size_t size, off_t offset) 930 { 931 assert(buf); 932 assert(size > 0); 933 assert(size <= HAMMER2_PBUFSIZE); 934 935 struct iovec iov = { 936 .iov_base = buf, 937 .iov_len = size, 938 }; 939 struct uio uio = { 940 .uio_iov = &iov, 941 .uio_iovcnt = 1, 942 .uio_offset = offset, 943 .uio_resid = size, 944 .uio_segflg = UIO_USERSPACE, 945 .uio_rw = UIO_WRITE, 946 .uio_td = NULL, 947 }; 948 struct vop_write_args ap = { 949 .a_vp = vp, 950 .a_uio = &uio, 951 .a_ioflag = 0, 952 .a_cred = NULL, 953 }; 954 955 return hammer2_vop_write(&ap); 956 } 957 958 #if 0 959 /* 960 * Perform read operations on a file or symlink given an UNLOCKED 961 * inode and uio. 962 * 963 * The passed ip is not locked. 964 */ 965 static 966 int 967 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount) 968 { 969 hammer2_off_t size; 970 struct m_buf *bp; 971 int error; 972 973 error = 0; 974 975 /* 976 * UIO read loop. 977 * 978 * WARNING! Assumes that the kernel interlocks size changes at the 979 * vnode level. 980 */ 981 hammer2_mtx_sh(&ip->lock); 982 hammer2_mtx_sh(&ip->truncate_lock); 983 size = ip->meta.size; 984 hammer2_mtx_unlock(&ip->lock); 985 986 while (uio->uio_resid > 0 && uio->uio_offset < size) { 987 hammer2_key_t lbase; 988 hammer2_key_t leof; 989 int lblksize; 990 int loff; 991 int n; 992 993 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 994 &lbase, &leof); 995 996 #if 1 997 bp = NULL; 998 error = cluster_readx(ip->vp, leof, lbase, lblksize, 999 B_NOTMETA | B_KVABIO, 1000 uio->uio_resid, 1001 seqcount * MAXBSIZE, 1002 &bp); 1003 #else 1004 if (uio->uio_segflg == UIO_NOCOPY) { 1005 bp = getblk(ip->vp, lbase, lblksize, 1006 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1007 if (bp->b_flags & B_CACHE) { 1008 int i; 1009 int j = 0; 1010 if (bp->b_xio.xio_npages != 16) 1011 kprintf("NPAGES BAD\n"); 1012 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1013 vm_page_t m; 1014 m = bp->b_xio.xio_pages[i]; 1015 if (m == NULL || m->valid == 0) { 1016 kprintf("bp %016jx %016jx pg %d inv", 1017 lbase, leof, i); 1018 if (m) 1019 kprintf("m->object %p/%p", m->object, ip->vp->v_object); 1020 kprintf("\n"); 1021 j = 1; 1022 } 1023 } 1024 if (j) 1025 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error); 1026 } 1027 bqrelse(bp); 1028 } 1029 error = bread_kvabio(ip->vp, lbase, lblksize, &bp); 1030 #endif 1031 if (error) { 1032 brelse(bp); 1033 break; 1034 } 1035 bkvasync(bp); 1036 loff = (int)(uio->uio_offset - lbase); 1037 n = lblksize - loff; 1038 if (n > uio->uio_resid) 1039 n = uio->uio_resid; 1040 if (n > size - uio->uio_offset) 1041 n = (int)(size - uio->uio_offset); 1042 bp->b_flags |= B_AGE; 1043 uiomovebp(bp, (char *)bp->b_data + loff, n, uio); 1044 bqrelse(bp); 1045 } 1046 hammer2_mtx_unlock(&ip->truncate_lock); 1047 1048 return (error); 1049 } 1050 #endif 1051 1052 /* 1053 * Write to the file represented by the inode via the logical buffer cache. 1054 * The inode may represent a regular file or a symlink. 1055 * 1056 * The inode must not be locked. 1057 */ 1058 static 1059 int 1060 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 1061 int ioflag, int seqcount) 1062 { 1063 hammer2_key_t old_eof; 1064 hammer2_key_t new_eof; 1065 struct m_buf *bp; 1066 int kflags; 1067 int error; 1068 int modified; 1069 1070 /* 1071 * Setup if append 1072 * 1073 * WARNING! Assumes that the kernel interlocks size changes at the 1074 * vnode level. 1075 */ 1076 hammer2_mtx_ex(&ip->lock); 1077 hammer2_mtx_sh(&ip->truncate_lock); 1078 if (ioflag & IO_APPEND) 1079 uio->uio_offset = ip->meta.size; 1080 old_eof = ip->meta.size; 1081 1082 /* 1083 * Extend the file if necessary. If the write fails at some point 1084 * we will truncate it back down to cover as much as we were able 1085 * to write. 1086 * 1087 * Doing this now makes it easier to calculate buffer sizes in 1088 * the loop. 1089 */ 1090 kflags = 0; 1091 error = 0; 1092 modified = 0; 1093 1094 if (uio->uio_offset + uio->uio_resid > old_eof) { 1095 new_eof = uio->uio_offset + uio->uio_resid; 1096 modified = 1; 1097 hammer2_extend_file(ip, new_eof); 1098 kflags |= NOTE_EXTEND; 1099 } else { 1100 new_eof = old_eof; 1101 } 1102 hammer2_mtx_unlock(&ip->lock); 1103 1104 /* 1105 * UIO write loop 1106 */ 1107 while (uio->uio_resid > 0) { 1108 hammer2_key_t lbase; 1109 int trivial; 1110 int endofblk; 1111 int lblksize; 1112 int loff; 1113 int n; 1114 1115 /* 1116 * Don't allow the buffer build to blow out the buffer 1117 * cache. 1118 */ 1119 if ((ioflag & IO_RECURSE) == 0) 1120 bwillwrite(HAMMER2_PBUFSIZE); 1121 1122 /* 1123 * This nominally tells us how much we can cluster and 1124 * what the logical buffer size needs to be. Currently 1125 * we don't try to cluster the write and just handle one 1126 * block at a time. 1127 */ 1128 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 1129 &lbase, NULL); 1130 loff = (int)(uio->uio_offset - lbase); 1131 1132 KKASSERT(lblksize <= MAXBSIZE); 1133 1134 /* 1135 * Calculate bytes to copy this transfer and whether the 1136 * copy completely covers the buffer or not. 1137 */ 1138 trivial = 0; 1139 n = lblksize - loff; 1140 if (n > uio->uio_resid) { 1141 n = uio->uio_resid; 1142 if (loff == lbase && uio->uio_offset + n == new_eof) 1143 trivial = 1; 1144 endofblk = 0; 1145 } else { 1146 if (loff == 0) 1147 trivial = 1; 1148 endofblk = 1; 1149 } 1150 if (lbase >= new_eof) 1151 trivial = 1; 1152 trivial = 1; /* force trivial for makefs */ 1153 1154 /* 1155 * Get the buffer 1156 */ 1157 if (uio->uio_segflg == UIO_NOCOPY) { 1158 assert(0); /* no UIO_NOCOPY in makefs */ 1159 /* 1160 * Issuing a write with the same data backing the 1161 * buffer. Instantiate the buffer to collect the 1162 * backing vm pages, then read-in any missing bits. 1163 * 1164 * This case is used by vop_stdputpages(). 1165 */ 1166 bp = getblkx(ip->vp, lbase, lblksize, 1167 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1168 /* 1169 if ((bp->b_flags & B_CACHE) == 0) { 1170 bqrelse(bp); 1171 error = bread_kvabio(ip->vp, lbase, 1172 lblksize, &bp); 1173 } 1174 */ 1175 } else if (trivial) { 1176 /* 1177 * Even though we are entirely overwriting the buffer 1178 * we may still have to zero it out to avoid a 1179 * mmap/write visibility issue. 1180 */ 1181 bp = getblkx(ip->vp, lbase, lblksize, 1182 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1183 /* 1184 if ((bp->b_flags & B_CACHE) == 0) 1185 vfs_bio_clrbuf(bp); 1186 */ 1187 } else { 1188 assert(0); /* no partial write in makefs */ 1189 /* 1190 * Partial overwrite, read in any missing bits then 1191 * replace the portion being written. 1192 * 1193 * (The strategy code will detect zero-fill physical 1194 * blocks for this case). 1195 */ 1196 error = bread_kvabio(ip->vp, lbase, lblksize, &bp); 1197 if (error == 0) 1198 bheavy(bp); 1199 } 1200 1201 if (error) { 1202 brelse(bp); 1203 break; 1204 } 1205 1206 /* 1207 * Ok, copy the data in 1208 */ 1209 bkvasync(bp); 1210 error = uiomovebp(bp, (char *)bp->b_data + loff, n, uio); 1211 kflags |= NOTE_WRITE; 1212 modified = 1; 1213 if (error) { 1214 brelse(bp); 1215 break; 1216 } 1217 1218 /* 1219 * WARNING: Pageout daemon will issue UIO_NOCOPY writes 1220 * with IO_SYNC or IO_ASYNC set. These writes 1221 * must be handled as the pageout daemon expects. 1222 * 1223 * NOTE! H2 relies on cluster_write() here because it 1224 * cannot preallocate disk blocks at the logical 1225 * level due to not knowing what the compression 1226 * size will be at this time. 1227 * 1228 * We must use cluster_write() here and we depend 1229 * on the write-behind feature to flush buffers 1230 * appropriately. If we let the buffer daemons do 1231 * it the block allocations will be all over the 1232 * map. 1233 */ 1234 if (1) { 1235 bp->b_cmd = BUF_CMD_WRITE; 1236 1237 struct bio bio; 1238 bio.bio_buf = bp; 1239 bio.bio_offset = lbase; 1240 1241 struct vop_strategy_args ap; 1242 ap.a_vp = ip->vp; 1243 ap.a_bio = &bio; 1244 1245 error = hammer2_vop_strategy(&ap); 1246 assert(!error); 1247 1248 brelse(bp); 1249 } else if (ioflag & IO_SYNC) { 1250 assert(0); 1251 bwrite(bp); 1252 } else if ((ioflag & IO_DIRECT) && endofblk) { 1253 assert(0); 1254 bawrite(bp); 1255 } else if (ioflag & IO_ASYNC) { 1256 assert(0); 1257 bawrite(bp); 1258 } else if (0 /*ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW*/) { 1259 assert(0); 1260 bdwrite(bp); 1261 } else { 1262 assert(0); 1263 #if 0 1264 #if 1 1265 bp->b_flags |= B_CLUSTEROK; 1266 cluster_write(bp, new_eof, lblksize, seqcount); 1267 #else 1268 bp->b_flags |= B_CLUSTEROK; 1269 bdwrite(bp); 1270 #endif 1271 #endif 1272 } 1273 } 1274 1275 /* 1276 * Cleanup. If we extended the file EOF but failed to write through 1277 * the entire write is a failure and we have to back-up. 1278 */ 1279 if (error && new_eof != old_eof) { 1280 hammer2_mtx_unlock(&ip->truncate_lock); 1281 hammer2_mtx_ex(&ip->lock); /* note lock order */ 1282 hammer2_mtx_ex(&ip->truncate_lock); /* note lock order */ 1283 hammer2_truncate_file(ip, old_eof); 1284 if (ip->flags & HAMMER2_INODE_MODIFIED) 1285 hammer2_inode_chain_sync(ip); 1286 hammer2_mtx_unlock(&ip->lock); 1287 } else if (modified) { 1288 struct m_vnode *vp = ip->vp; 1289 1290 hammer2_mtx_ex(&ip->lock); 1291 hammer2_inode_modify(ip); 1292 if (uio->uio_segflg == UIO_NOCOPY) { 1293 assert(0); /* no UIO_NOCOPY in makefs */ 1294 /* 1295 if (vp->v_flag & VLASTWRITETS) { 1296 ip->meta.mtime = 1297 (unsigned long)vp->v_lastwrite_ts.tv_sec * 1298 1000000 + 1299 vp->v_lastwrite_ts.tv_nsec / 1000; 1300 } 1301 */ 1302 } else { 1303 hammer2_update_time(&ip->meta.mtime); 1304 vclrflags(vp, VLASTWRITETS); 1305 } 1306 1307 #if 0 1308 /* 1309 * REMOVED - handled by hammer2_extend_file(). Do not issue 1310 * a chain_sync() outside of a sync/fsync except for DIRECTDATA 1311 * state changes. 1312 * 1313 * Under normal conditions we only issue a chain_sync if 1314 * the inode's DIRECTDATA state changed. 1315 */ 1316 if (ip->flags & HAMMER2_INODE_RESIZED) 1317 hammer2_inode_chain_sync(ip); 1318 #endif 1319 hammer2_mtx_unlock(&ip->lock); 1320 hammer2_knote(ip->vp, kflags); 1321 } 1322 hammer2_trans_assert_strategy(ip->pmp); 1323 hammer2_mtx_unlock(&ip->truncate_lock); 1324 1325 return error; 1326 } 1327 1328 /* 1329 * Truncate the size of a file. The inode must be locked. 1330 * 1331 * We must unconditionally set HAMMER2_INODE_RESIZED to properly 1332 * ensure that any on-media data beyond the new file EOF has been destroyed. 1333 * 1334 * WARNING: nvtruncbuf() can only be safely called without the inode lock 1335 * held due to the way our write thread works. If the truncation 1336 * occurs in the middle of a buffer, nvtruncbuf() is responsible 1337 * for dirtying that buffer and zeroing out trailing bytes. 1338 * 1339 * WARNING! Assumes that the kernel interlocks size changes at the 1340 * vnode level. 1341 * 1342 * WARNING! Caller assumes responsibility for removing dead blocks 1343 * if INODE_RESIZED is set. 1344 */ 1345 static 1346 void 1347 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1348 { 1349 hammer2_key_t lbase; 1350 int nblksize; 1351 1352 hammer2_mtx_unlock(&ip->lock); 1353 if (ip->vp) { 1354 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1355 nvtruncbuf(ip->vp, nsize, 1356 nblksize, (int)nsize & (nblksize - 1), 1357 0); 1358 } 1359 hammer2_mtx_ex(&ip->lock); 1360 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1361 ip->osize = ip->meta.size; 1362 ip->meta.size = nsize; 1363 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1364 hammer2_inode_modify(ip); 1365 } 1366 1367 /* 1368 * Extend the size of a file. The inode must be locked. 1369 * 1370 * Even though the file size is changing, we do not have to set the 1371 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES 1372 * boundary. When this occurs a hammer2_inode_chain_sync() is required 1373 * to prepare the inode cluster's indirect block table, otherwise 1374 * async execution of the strategy code will implode on us. 1375 * 1376 * WARNING! Assumes that the kernel interlocks size changes at the 1377 * vnode level. 1378 * 1379 * WARNING! Caller assumes responsibility for transitioning out 1380 * of the inode DIRECTDATA mode if INODE_RESIZED is set. 1381 */ 1382 static 1383 void 1384 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1385 { 1386 hammer2_key_t lbase; 1387 hammer2_key_t osize; 1388 int oblksize; 1389 int nblksize; 1390 int error; 1391 1392 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1393 hammer2_inode_modify(ip); 1394 osize = ip->meta.size; 1395 ip->osize = osize; 1396 ip->meta.size = nsize; 1397 1398 /* 1399 * We must issue a chain_sync() when the DIRECTDATA state changes 1400 * to prevent confusion between the flush code and the in-memory 1401 * state. This is not perfect because we are doing it outside of 1402 * a sync/fsync operation, so it might not be fully synchronized 1403 * with the meta-data topology flush. 1404 * 1405 * We must retain and re-dirty the buffer cache buffer containing 1406 * the direct data so it can be written to a real block. It should 1407 * not be possible for a bread error to occur since the original data 1408 * is extracted from the inode structure directly. 1409 */ 1410 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) { 1411 if (osize) { 1412 assert(0); /* no such transition in makefs */ 1413 struct m_buf *bp; 1414 1415 oblksize = hammer2_calc_logical(ip, 0, NULL, NULL); 1416 error = bread_kvabio(ip->vp, 0, oblksize, &bp); 1417 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1418 hammer2_inode_chain_sync(ip); 1419 if (error == 0) { 1420 bheavy(bp); 1421 bdwrite(bp); 1422 } else { 1423 brelse(bp); 1424 } 1425 } else { 1426 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1427 hammer2_inode_chain_sync(ip); 1428 } 1429 } 1430 hammer2_mtx_unlock(&ip->lock); 1431 if (ip->vp) { 1432 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL); 1433 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1434 nvextendbuf(ip->vp, 1435 osize, nsize, 1436 oblksize, nblksize, 1437 -1, -1, 0); 1438 } 1439 hammer2_mtx_ex(&ip->lock); 1440 } 1441 1442 static 1443 int 1444 hammer2_vop_nresolve(struct vop_nresolve_args *ap) 1445 { 1446 hammer2_xop_nresolve_t *xop; 1447 hammer2_inode_t *ip; 1448 hammer2_inode_t *dip; 1449 struct namecache *ncp; 1450 struct m_vnode *vp; 1451 int error; 1452 1453 dip = VTOI(ap->a_dvp); 1454 xop = hammer2_xop_alloc(dip, 0); 1455 1456 ncp = ap->a_nch->ncp; 1457 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1458 1459 /* 1460 * Note: In DragonFly the kernel handles '.' and '..'. 1461 */ 1462 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1463 hammer2_xop_start(&xop->head, &hammer2_nresolve_desc); 1464 1465 error = hammer2_xop_collect(&xop->head, 0); 1466 error = hammer2_error_to_errno(error); 1467 if (error) { 1468 ip = NULL; 1469 } else { 1470 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 1471 } 1472 hammer2_inode_unlock(dip); 1473 1474 /* 1475 * Acquire the related vnode 1476 * 1477 * NOTE: For error processing, only ENOENT resolves the namecache 1478 * entry to NULL, otherwise we just return the error and 1479 * leave the namecache unresolved. 1480 * 1481 * WARNING: inode structure is locked exclusively via inode_get 1482 * but chain was locked shared. inode_unlock() 1483 * will handle it properly. 1484 */ 1485 if (ip) { 1486 vp = hammer2_igetv(ip, &error); /* error set to UNIX error */ 1487 if (error == 0) { 1488 vn_unlock(vp); 1489 cache_setvp(ap->a_nch, vp); 1490 *ap->a_vpp = vp; 1491 } else if (error == ENOENT) { 1492 cache_setvp(ap->a_nch, NULL); 1493 } 1494 hammer2_inode_unlock(ip); 1495 1496 /* 1497 * The vp should not be released until after we've disposed 1498 * of our locks, because it might cause vop_inactive() to 1499 * be called. 1500 */ 1501 if (vp) 1502 vrele(vp); 1503 } else { 1504 error = ENOENT; 1505 cache_setvp(ap->a_nch, NULL); 1506 } 1507 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1508 /* 1509 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL, 1510 ("resolve error %d/%p ap %p\n", 1511 error, ap->a_nch->ncp->nc_vp, ap)); 1512 */ 1513 1514 return error; 1515 } 1516 1517 int 1518 hammer2_nresolve(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen) 1519 { 1520 *vpp = NULL; 1521 struct namecache nc = { 1522 .nc_name = name, 1523 .nc_nlen = nlen, 1524 }; 1525 struct nchandle nch = { 1526 .ncp = &nc, 1527 }; 1528 struct vop_nresolve_args ap = { 1529 .a_nch = &nch, 1530 .a_dvp = dvp, 1531 .a_vpp = vpp, 1532 }; 1533 1534 return hammer2_vop_nresolve(&ap); 1535 } 1536 1537 static 1538 int 1539 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 1540 { 1541 #if 0 1542 hammer2_inode_t *dip; 1543 hammer2_tid_t inum; 1544 int error; 1545 1546 dip = VTOI(ap->a_dvp); 1547 inum = dip->meta.iparent; 1548 *ap->a_vpp = NULL; 1549 1550 if (inum) { 1551 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL, 1552 inum, ap->a_vpp); 1553 } else { 1554 error = ENOENT; 1555 } 1556 return error; 1557 #endif 1558 return (EOPNOTSUPP); 1559 } 1560 1561 static 1562 int 1563 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap) 1564 { 1565 hammer2_inode_t *dip; 1566 hammer2_inode_t *nip; 1567 struct namecache *ncp; 1568 const char *name; 1569 size_t name_len; 1570 hammer2_tid_t inum; 1571 int error; 1572 1573 dip = VTOI(ap->a_dvp); 1574 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1575 return (EROFS); 1576 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1577 return (ENOSPC); 1578 1579 ncp = ap->a_nch->ncp; 1580 name = ncp->nc_name; 1581 name_len = ncp->nc_nlen; 1582 1583 hammer2_trans_init(dip->pmp, 0); 1584 1585 inum = hammer2_trans_newinum(dip->pmp); 1586 1587 /* 1588 * Create the actual inode as a hidden file in the iroot, then 1589 * create the directory entry. The creation of the actual inode 1590 * sets its nlinks to 1 which is the value we desire. 1591 * 1592 * dip must be locked before nip to avoid deadlock. 1593 */ 1594 hammer2_inode_lock(dip, 0); 1595 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1596 inum, &error); 1597 if (error) { 1598 error = hammer2_error_to_errno(error); 1599 } else { 1600 error = hammer2_dirent_create(dip, name, name_len, 1601 nip->meta.inum, nip->meta.type); 1602 /* returns UNIX error code */ 1603 } 1604 if (error) { 1605 if (nip) { 1606 hammer2_inode_unlink_finisher(nip, NULL); 1607 hammer2_inode_unlock(nip); 1608 nip = NULL; 1609 } 1610 *ap->a_vpp = NULL; 1611 } else { 1612 /* 1613 * inode_depend() must occur before the igetv() because 1614 * the igetv() can temporarily release the inode lock. 1615 */ 1616 hammer2_inode_depend(dip, nip); /* before igetv */ 1617 *ap->a_vpp = hammer2_igetv(nip, &error); 1618 hammer2_inode_unlock(nip); 1619 } 1620 1621 /* 1622 * Update dip's mtime 1623 * 1624 * We can use a shared inode lock and allow the meta.mtime update 1625 * SMP race. hammer2_inode_modify() is MPSAFE w/a shared lock. 1626 */ 1627 if (error == 0) { 1628 uint64_t mtime; 1629 1630 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1631 hammer2_update_time(&mtime); 1632 hammer2_inode_modify(dip); 1633 dip->meta.mtime = mtime; 1634 /*hammer2_inode_unlock(dip);*/ 1635 } 1636 hammer2_inode_unlock(dip); 1637 1638 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1639 1640 if (error == 0) { 1641 cache_setunresolved(ap->a_nch); 1642 cache_setvp(ap->a_nch, *ap->a_vpp); 1643 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK); 1644 } 1645 return error; 1646 } 1647 1648 int 1649 hammer2_nmkdir(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen, 1650 mode_t mode) 1651 { 1652 struct namecache nc = { 1653 .nc_name = name, 1654 .nc_nlen = nlen, 1655 }; 1656 struct nchandle nch = { 1657 .ncp = &nc, 1658 }; 1659 uid_t va_uid = VNOVAL; //getuid(); 1660 uid_t va_gid = VNOVAL; //getgid(); 1661 struct vattr va = { 1662 .va_type = VDIR, 1663 .va_mode = mode & ~S_IFMT, 1664 .va_uid = va_uid, 1665 .va_gid = va_gid, 1666 }; 1667 struct vop_nmkdir_args ap = { 1668 .a_nch = &nch, 1669 .a_dvp = dvp, 1670 .a_vpp = vpp, 1671 .a_vap = &va, 1672 }; 1673 1674 return hammer2_vop_nmkdir(&ap); 1675 } 1676 1677 static 1678 int 1679 hammer2_vop_open(struct vop_open_args *ap) 1680 { 1681 #if 0 1682 return vop_stdopen(ap); 1683 #endif 1684 return (EOPNOTSUPP); 1685 } 1686 1687 /* 1688 * hammer2_vop_advlock { vp, id, op, fl, flags } 1689 */ 1690 static 1691 int 1692 hammer2_vop_advlock(struct vop_advlock_args *ap) 1693 { 1694 #if 0 1695 hammer2_inode_t *ip = VTOI(ap->a_vp); 1696 hammer2_off_t size; 1697 1698 size = ip->meta.size; 1699 return (lf_advlock(ap, &ip->advlock, size)); 1700 #endif 1701 return (EOPNOTSUPP); 1702 } 1703 1704 static 1705 int 1706 hammer2_vop_close(struct vop_close_args *ap) 1707 { 1708 #if 0 1709 return vop_stdclose(ap); 1710 #endif 1711 return (EOPNOTSUPP); 1712 } 1713 1714 /* 1715 * hammer2_vop_nlink { nch, dvp, vp, cred } 1716 * 1717 * Create a hardlink from (vp) to {dvp, nch}. 1718 */ 1719 static 1720 int 1721 hammer2_vop_nlink(struct vop_nlink_args *ap) 1722 { 1723 hammer2_inode_t *tdip; /* target directory to create link in */ 1724 hammer2_inode_t *ip; /* inode we are hardlinking to */ 1725 struct namecache *ncp; 1726 const char *name; 1727 size_t name_len; 1728 int error; 1729 uint64_t cmtime; 1730 1731 /* We know it's the same in makefs */ 1732 /* 1733 if (ap->a_dvp->v_mount != ap->a_vp->v_mount) 1734 return(EXDEV); 1735 */ 1736 1737 tdip = VTOI(ap->a_dvp); 1738 if (tdip->pmp->ronly || (tdip->pmp->flags & HAMMER2_PMPF_EMERG)) 1739 return (EROFS); 1740 if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1) 1741 return (ENOSPC); 1742 1743 ncp = ap->a_nch->ncp; 1744 name = ncp->nc_name; 1745 name_len = ncp->nc_nlen; 1746 1747 /* 1748 * ip represents the file being hardlinked. The file could be a 1749 * normal file or a hardlink target if it has already been hardlinked. 1750 * (with the new semantics, it will almost always be a hardlink 1751 * target). 1752 * 1753 * Bump nlinks and potentially also create or move the hardlink 1754 * target in the parent directory common to (ip) and (tdip). The 1755 * consolidation code can modify ip->cluster. The returned cluster 1756 * is locked. 1757 */ 1758 ip = VTOI(ap->a_vp); 1759 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp)); 1760 hammer2_trans_init(ip->pmp, 0); 1761 1762 /* 1763 * Target should be an indexed inode or there's no way we will ever 1764 * be able to find it! 1765 */ 1766 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0); 1767 1768 error = 0; 1769 1770 /* 1771 * Can return NULL and error == EXDEV if the common parent 1772 * crosses a directory with the xlink flag set. 1773 */ 1774 hammer2_inode_lock4(tdip, ip, NULL, NULL); 1775 1776 hammer2_update_time(&cmtime); 1777 1778 /* 1779 * Create the directory entry and bump nlinks. 1780 * Also update ip's ctime. 1781 */ 1782 if (error == 0) { 1783 error = hammer2_dirent_create(tdip, name, name_len, 1784 ip->meta.inum, ip->meta.type); 1785 hammer2_inode_modify(ip); 1786 ++ip->meta.nlinks; 1787 ip->meta.ctime = cmtime; 1788 } 1789 if (error == 0) { 1790 /* 1791 * Update dip's [cm]time 1792 */ 1793 hammer2_inode_modify(tdip); 1794 tdip->meta.mtime = cmtime; 1795 tdip->meta.ctime = cmtime; 1796 1797 cache_setunresolved(ap->a_nch); 1798 cache_setvp(ap->a_nch, ap->a_vp); 1799 } 1800 hammer2_inode_unlock(ip); 1801 hammer2_inode_unlock(tdip); 1802 1803 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 1804 hammer2_knote(ap->a_vp, NOTE_LINK); 1805 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1806 1807 return error; 1808 } 1809 1810 int 1811 hammer2_nlink(struct m_vnode *dvp, struct m_vnode *vp, char *name, int nlen) 1812 { 1813 struct namecache nc = { 1814 .nc_name = name, 1815 .nc_nlen = nlen, 1816 }; 1817 struct nchandle nch = { 1818 .ncp = &nc, 1819 }; 1820 struct vop_nlink_args ap = { 1821 .a_nch = &nch, 1822 .a_dvp = dvp, 1823 .a_vp = vp, 1824 }; 1825 1826 return hammer2_vop_nlink(&ap); 1827 } 1828 1829 /* 1830 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap } 1831 * 1832 * The operating system has already ensured that the directory entry 1833 * does not exist and done all appropriate namespace locking. 1834 */ 1835 static 1836 int 1837 hammer2_vop_ncreate(struct vop_ncreate_args *ap) 1838 { 1839 hammer2_inode_t *dip; 1840 hammer2_inode_t *nip; 1841 struct namecache *ncp; 1842 const char *name; 1843 size_t name_len; 1844 hammer2_tid_t inum; 1845 int error; 1846 1847 dip = VTOI(ap->a_dvp); 1848 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1849 return (EROFS); 1850 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1851 return (ENOSPC); 1852 1853 ncp = ap->a_nch->ncp; 1854 name = ncp->nc_name; 1855 name_len = ncp->nc_nlen; 1856 hammer2_trans_init(dip->pmp, 0); 1857 1858 inum = hammer2_trans_newinum(dip->pmp); 1859 1860 /* 1861 * Create the actual inode as a hidden file in the iroot, then 1862 * create the directory entry. The creation of the actual inode 1863 * sets its nlinks to 1 which is the value we desire. 1864 * 1865 * dip must be locked before nip to avoid deadlock. 1866 */ 1867 hammer2_inode_lock(dip, 0); 1868 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1869 inum, &error); 1870 1871 if (error) { 1872 error = hammer2_error_to_errno(error); 1873 } else { 1874 error = hammer2_dirent_create(dip, name, name_len, 1875 nip->meta.inum, nip->meta.type); 1876 } 1877 if (error) { 1878 if (nip) { 1879 hammer2_inode_unlink_finisher(nip, NULL); 1880 hammer2_inode_unlock(nip); 1881 nip = NULL; 1882 } 1883 *ap->a_vpp = NULL; 1884 } else { 1885 hammer2_inode_depend(dip, nip); /* before igetv */ 1886 *ap->a_vpp = hammer2_igetv(nip, &error); 1887 hammer2_inode_unlock(nip); 1888 } 1889 1890 /* 1891 * Update dip's mtime 1892 */ 1893 if (error == 0) { 1894 uint64_t mtime; 1895 1896 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1897 hammer2_update_time(&mtime); 1898 hammer2_inode_modify(dip); 1899 dip->meta.mtime = mtime; 1900 /*hammer2_inode_unlock(dip);*/ 1901 } 1902 hammer2_inode_unlock(dip); 1903 1904 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1905 1906 if (error == 0) { 1907 cache_setunresolved(ap->a_nch); 1908 cache_setvp(ap->a_nch, *ap->a_vpp); 1909 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1910 } 1911 return error; 1912 } 1913 1914 int 1915 hammer2_ncreate(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen, 1916 mode_t mode) 1917 { 1918 struct namecache nc = { 1919 .nc_name = name, 1920 .nc_nlen = nlen, 1921 }; 1922 struct nchandle nch = { 1923 .ncp = &nc, 1924 }; 1925 uid_t va_uid = VNOVAL; //getuid(); 1926 uid_t va_gid = VNOVAL; //getgid(); 1927 struct vattr va = { 1928 .va_type = VREG, 1929 .va_mode = mode & ~S_IFMT, 1930 .va_uid = va_uid, 1931 .va_gid = va_gid, 1932 }; 1933 struct vop_ncreate_args ap = { 1934 .a_nch = &nch, 1935 .a_dvp = dvp, 1936 .a_vpp = vpp, 1937 .a_vap = &va, 1938 }; 1939 1940 return hammer2_vop_ncreate(&ap); 1941 } 1942 1943 /* 1944 * Make a device node (typically a fifo) 1945 */ 1946 static 1947 int 1948 hammer2_vop_nmknod(struct vop_nmknod_args *ap) 1949 { 1950 hammer2_inode_t *dip; 1951 hammer2_inode_t *nip; 1952 struct namecache *ncp; 1953 const char *name; 1954 size_t name_len; 1955 hammer2_tid_t inum; 1956 int error; 1957 1958 dip = VTOI(ap->a_dvp); 1959 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1960 return (EROFS); 1961 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1962 return (ENOSPC); 1963 1964 ncp = ap->a_nch->ncp; 1965 name = ncp->nc_name; 1966 name_len = ncp->nc_nlen; 1967 hammer2_trans_init(dip->pmp, 0); 1968 1969 /* 1970 * Create the device inode and then create the directory entry. 1971 * 1972 * dip must be locked before nip to avoid deadlock. 1973 */ 1974 inum = hammer2_trans_newinum(dip->pmp); 1975 1976 hammer2_inode_lock(dip, 0); 1977 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1978 inum, &error); 1979 if (error == 0) { 1980 error = hammer2_dirent_create(dip, name, name_len, 1981 nip->meta.inum, nip->meta.type); 1982 } 1983 if (error) { 1984 if (nip) { 1985 hammer2_inode_unlink_finisher(nip, NULL); 1986 hammer2_inode_unlock(nip); 1987 nip = NULL; 1988 } 1989 *ap->a_vpp = NULL; 1990 } else { 1991 hammer2_inode_depend(dip, nip); /* before igetv */ 1992 *ap->a_vpp = hammer2_igetv(nip, &error); 1993 hammer2_inode_unlock(nip); 1994 } 1995 1996 /* 1997 * Update dip's mtime 1998 */ 1999 if (error == 0) { 2000 uint64_t mtime; 2001 2002 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2003 hammer2_update_time(&mtime); 2004 hammer2_inode_modify(dip); 2005 dip->meta.mtime = mtime; 2006 /*hammer2_inode_unlock(dip);*/ 2007 } 2008 hammer2_inode_unlock(dip); 2009 2010 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2011 2012 if (error == 0) { 2013 cache_setunresolved(ap->a_nch); 2014 cache_setvp(ap->a_nch, *ap->a_vpp); 2015 hammer2_knote(ap->a_dvp, NOTE_WRITE); 2016 } 2017 return error; 2018 } 2019 2020 int 2021 hammer2_nmknod(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen, 2022 int type, mode_t mode) 2023 { 2024 struct namecache nc = { 2025 .nc_name = name, 2026 .nc_nlen = nlen, 2027 }; 2028 struct nchandle nch = { 2029 .ncp = &nc, 2030 }; 2031 uid_t va_uid = VNOVAL; //getuid(); 2032 uid_t va_gid = VNOVAL; //getgid(); 2033 struct vattr va = { 2034 .va_type = type, 2035 .va_mode = mode & ~S_IFMT, 2036 .va_uid = va_uid, 2037 .va_gid = va_gid, 2038 }; 2039 struct vop_nmknod_args ap = { 2040 .a_nch = &nch, 2041 .a_dvp = dvp, 2042 .a_vpp = vpp, 2043 .a_vap = &va, 2044 }; 2045 2046 return hammer2_vop_nmknod(&ap); 2047 } 2048 2049 /* 2050 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target } 2051 */ 2052 static 2053 int 2054 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap) 2055 { 2056 hammer2_inode_t *dip; 2057 hammer2_inode_t *nip; 2058 struct namecache *ncp; 2059 const char *name; 2060 size_t name_len; 2061 hammer2_tid_t inum; 2062 int error; 2063 2064 dip = VTOI(ap->a_dvp); 2065 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 2066 return (EROFS); 2067 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2068 return (ENOSPC); 2069 2070 ncp = ap->a_nch->ncp; 2071 name = ncp->nc_name; 2072 name_len = ncp->nc_nlen; 2073 hammer2_trans_init(dip->pmp, 0); 2074 2075 ap->a_vap->va_type = VLNK; /* enforce type */ 2076 2077 /* 2078 * Create the softlink as an inode and then create the directory 2079 * entry. 2080 * 2081 * dip must be locked before nip to avoid deadlock. 2082 */ 2083 inum = hammer2_trans_newinum(dip->pmp); 2084 2085 hammer2_inode_lock(dip, 0); 2086 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 2087 inum, &error); 2088 if (error == 0) { 2089 error = hammer2_dirent_create(dip, name, name_len, 2090 nip->meta.inum, nip->meta.type); 2091 } 2092 if (error) { 2093 if (nip) { 2094 hammer2_inode_unlink_finisher(nip, NULL); 2095 hammer2_inode_unlock(nip); 2096 nip = NULL; 2097 } 2098 *ap->a_vpp = NULL; 2099 hammer2_inode_unlock(dip); 2100 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2101 return error; 2102 } 2103 hammer2_inode_depend(dip, nip); /* before igetv */ 2104 *ap->a_vpp = hammer2_igetv(nip, &error); 2105 2106 /* 2107 * Build the softlink (~like file data) and finalize the namecache. 2108 */ 2109 if (error == 0) { 2110 size_t bytes; 2111 struct uio auio; 2112 struct iovec aiov; 2113 2114 bytes = strlen(ap->a_target); 2115 2116 hammer2_inode_unlock(nip); 2117 bzero(&auio, sizeof(auio)); 2118 bzero(&aiov, sizeof(aiov)); 2119 auio.uio_iov = &aiov; 2120 auio.uio_segflg = UIO_SYSSPACE; 2121 auio.uio_rw = UIO_WRITE; 2122 auio.uio_resid = bytes; 2123 auio.uio_iovcnt = 1; 2124 auio.uio_td = curthread; 2125 aiov.iov_base = ap->a_target; 2126 aiov.iov_len = bytes; 2127 error = hammer2_write_file(nip, &auio, IO_APPEND, 0); 2128 /* XXX handle error */ 2129 error = 0; 2130 } else { 2131 hammer2_inode_unlock(nip); 2132 } 2133 2134 /* 2135 * Update dip's mtime 2136 */ 2137 if (error == 0) { 2138 uint64_t mtime; 2139 2140 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2141 hammer2_update_time(&mtime); 2142 hammer2_inode_modify(dip); 2143 dip->meta.mtime = mtime; 2144 /*hammer2_inode_unlock(dip);*/ 2145 } 2146 hammer2_inode_unlock(dip); 2147 2148 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2149 2150 /* 2151 * Finalize namecache 2152 */ 2153 if (error == 0) { 2154 cache_setunresolved(ap->a_nch); 2155 cache_setvp(ap->a_nch, *ap->a_vpp); 2156 hammer2_knote(ap->a_dvp, NOTE_WRITE); 2157 } 2158 return error; 2159 } 2160 2161 int 2162 hammer2_nsymlink(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen, 2163 char *target, mode_t mode) 2164 { 2165 struct namecache nc = { 2166 .nc_name = name, 2167 .nc_nlen = nlen, 2168 }; 2169 struct nchandle nch = { 2170 .ncp = &nc, 2171 }; 2172 uid_t va_uid = VNOVAL; //getuid(); 2173 uid_t va_gid = VNOVAL; //getgid(); 2174 struct vattr va = { 2175 .va_type = VDIR, 2176 .va_mode = mode & ~S_IFMT, 2177 .va_uid = va_uid, 2178 .va_gid = va_gid, 2179 }; 2180 struct vop_nsymlink_args ap = { 2181 .a_nch = &nch, 2182 .a_dvp = dvp, 2183 .a_vpp = vpp, 2184 .a_vap = &va, 2185 .a_target = target, 2186 }; 2187 2188 return hammer2_vop_nsymlink(&ap); 2189 } 2190 2191 /* 2192 * hammer2_vop_nremove { nch, dvp, cred } 2193 */ 2194 static 2195 int 2196 hammer2_vop_nremove(struct vop_nremove_args *ap) 2197 { 2198 #if 0 2199 hammer2_xop_unlink_t *xop; 2200 hammer2_inode_t *dip; 2201 hammer2_inode_t *ip; 2202 struct m_vnode *vprecycle; 2203 struct namecache *ncp; 2204 int error; 2205 2206 dip = VTOI(ap->a_dvp); 2207 if (dip->pmp->ronly) 2208 return (EROFS); 2209 #if 0 2210 /* allow removals, except user to also bulkfree */ 2211 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2212 return (ENOSPC); 2213 #endif 2214 2215 ncp = ap->a_nch->ncp; 2216 2217 if (hammer2_debug_inode && dip->meta.inum == hammer2_debug_inode) { 2218 kprintf("hammer2: attempt to delete inside debug inode: %s\n", 2219 ncp->nc_name); 2220 while (hammer2_debug_inode && 2221 dip->meta.inum == hammer2_debug_inode) { 2222 tsleep(&hammer2_debug_inode, 0, "h2debug", hz*5); 2223 } 2224 } 2225 2226 hammer2_trans_init(dip->pmp, 0); 2227 hammer2_inode_lock(dip, 0); 2228 2229 /* 2230 * The unlink XOP unlinks the path from the directory and 2231 * locates and returns the cluster associated with the real inode. 2232 * We have to handle nlinks here on the frontend. 2233 */ 2234 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 2235 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 2236 2237 xop->isdir = 0; 2238 xop->dopermanent = 0; 2239 hammer2_xop_start(&xop->head, &hammer2_unlink_desc); 2240 2241 /* 2242 * Collect the real inode and adjust nlinks, destroy the real 2243 * inode if nlinks transitions to 0 and it was the real inode 2244 * (else it has already been removed). 2245 */ 2246 error = hammer2_xop_collect(&xop->head, 0); 2247 error = hammer2_error_to_errno(error); 2248 vprecycle = NULL; 2249 2250 if (error == 0) { 2251 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 2252 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2253 if (ip) { 2254 if (hammer2_debug_inode && 2255 ip->meta.inum == hammer2_debug_inode) { 2256 kprintf("hammer2: attempt to delete debug " 2257 "inode!\n"); 2258 while (hammer2_debug_inode && 2259 ip->meta.inum == hammer2_debug_inode) { 2260 tsleep(&hammer2_debug_inode, 0, 2261 "h2debug", hz*5); 2262 } 2263 } 2264 hammer2_inode_unlink_finisher(ip, &vprecycle); 2265 hammer2_inode_depend(dip, ip); /* after modified */ 2266 hammer2_inode_unlock(ip); 2267 } 2268 } else { 2269 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2270 } 2271 2272 /* 2273 * Update dip's mtime 2274 */ 2275 if (error == 0) { 2276 uint64_t mtime; 2277 2278 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2279 hammer2_update_time(&mtime); 2280 hammer2_inode_modify(dip); 2281 dip->meta.mtime = mtime; 2282 /*hammer2_inode_unlock(dip);*/ 2283 } 2284 hammer2_inode_unlock(dip); 2285 2286 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2287 if (error == 0) { 2288 cache_unlink(ap->a_nch); 2289 hammer2_knote(ap->a_dvp, NOTE_WRITE); 2290 } 2291 if (vprecycle) 2292 hammer2_inode_vprecycle(vprecycle); 2293 2294 return (error); 2295 #endif 2296 return (EOPNOTSUPP); 2297 } 2298 2299 /* 2300 * hammer2_vop_nrmdir { nch, dvp, cred } 2301 */ 2302 static 2303 int 2304 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap) 2305 { 2306 #if 0 2307 hammer2_xop_unlink_t *xop; 2308 hammer2_inode_t *dip; 2309 hammer2_inode_t *ip; 2310 struct namecache *ncp; 2311 struct m_vnode *vprecycle; 2312 int error; 2313 2314 dip = VTOI(ap->a_dvp); 2315 if (dip->pmp->ronly) 2316 return (EROFS); 2317 #if 0 2318 /* allow removals, except user to also bulkfree */ 2319 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2320 return (ENOSPC); 2321 #endif 2322 2323 hammer2_trans_init(dip->pmp, 0); 2324 hammer2_inode_lock(dip, 0); 2325 2326 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 2327 2328 ncp = ap->a_nch->ncp; 2329 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 2330 xop->isdir = 1; 2331 xop->dopermanent = 0; 2332 hammer2_xop_start(&xop->head, &hammer2_unlink_desc); 2333 2334 /* 2335 * Collect the real inode and adjust nlinks, destroy the real 2336 * inode if nlinks transitions to 0 and it was the real inode 2337 * (else it has already been removed). 2338 */ 2339 error = hammer2_xop_collect(&xop->head, 0); 2340 error = hammer2_error_to_errno(error); 2341 vprecycle = NULL; 2342 2343 if (error == 0) { 2344 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 2345 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2346 if (ip) { 2347 hammer2_inode_unlink_finisher(ip, &vprecycle); 2348 hammer2_inode_depend(dip, ip); /* after modified */ 2349 hammer2_inode_unlock(ip); 2350 } 2351 } else { 2352 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2353 } 2354 2355 /* 2356 * Update dip's mtime 2357 */ 2358 if (error == 0) { 2359 uint64_t mtime; 2360 2361 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2362 hammer2_update_time(&mtime); 2363 hammer2_inode_modify(dip); 2364 dip->meta.mtime = mtime; 2365 /*hammer2_inode_unlock(dip);*/ 2366 } 2367 hammer2_inode_unlock(dip); 2368 2369 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2370 if (error == 0) { 2371 cache_unlink(ap->a_nch); 2372 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK); 2373 } 2374 if (vprecycle) 2375 hammer2_inode_vprecycle(vprecycle); 2376 return (error); 2377 #endif 2378 return (EOPNOTSUPP); 2379 } 2380 2381 /* 2382 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred } 2383 */ 2384 static 2385 int 2386 hammer2_vop_nrename(struct vop_nrename_args *ap) 2387 { 2388 #if 0 2389 struct namecache *fncp; 2390 struct namecache *tncp; 2391 hammer2_inode_t *fdip; /* source directory */ 2392 hammer2_inode_t *tdip; /* target directory */ 2393 hammer2_inode_t *ip; /* file being renamed */ 2394 hammer2_inode_t *tip; /* replaced target during rename or NULL */ 2395 struct m_vnode *vprecycle; 2396 const char *fname; 2397 size_t fname_len; 2398 const char *tname; 2399 size_t tname_len; 2400 int error; 2401 int update_tdip; 2402 int update_fdip; 2403 hammer2_key_t tlhc; 2404 2405 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount) 2406 return(EXDEV); 2407 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount) 2408 return(EXDEV); 2409 2410 fdip = VTOI(ap->a_fdvp); /* source directory */ 2411 tdip = VTOI(ap->a_tdvp); /* target directory */ 2412 2413 if (fdip->pmp->ronly || (fdip->pmp->flags & HAMMER2_PMPF_EMERG)) 2414 return (EROFS); 2415 if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1) 2416 return (ENOSPC); 2417 2418 fncp = ap->a_fnch->ncp; /* entry name in source */ 2419 fname = fncp->nc_name; 2420 fname_len = fncp->nc_nlen; 2421 2422 tncp = ap->a_tnch->ncp; /* entry name in target */ 2423 tname = tncp->nc_name; 2424 tname_len = tncp->nc_nlen; 2425 2426 hammer2_trans_init(tdip->pmp, 0); 2427 2428 update_tdip = 0; 2429 update_fdip = 0; 2430 2431 ip = VTOI(fncp->nc_vp); 2432 hammer2_inode_ref(ip); /* extra ref */ 2433 2434 /* 2435 * Lookup the target name to determine if a directory entry 2436 * is being overwritten. We only hold related inode locks 2437 * temporarily, the operating system is expected to protect 2438 * against rename races. 2439 */ 2440 tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL; 2441 if (tip) 2442 hammer2_inode_ref(tip); /* extra ref */ 2443 2444 /* 2445 * Can return NULL and error == EXDEV if the common parent 2446 * crosses a directory with the xlink flag set. 2447 * 2448 * For now try to avoid deadlocks with a simple pointer address 2449 * test. (tip) can be NULL. 2450 */ 2451 error = 0; 2452 { 2453 hammer2_inode_t *ip1 = fdip; 2454 hammer2_inode_t *ip2 = tdip; 2455 hammer2_inode_t *ip3 = ip; 2456 hammer2_inode_t *ip4 = tip; /* may be NULL */ 2457 2458 if (fdip > tdip) { 2459 ip1 = tdip; 2460 ip2 = fdip; 2461 } 2462 if (tip && ip > tip) { 2463 ip3 = tip; 2464 ip4 = ip; 2465 } 2466 hammer2_inode_lock4(ip1, ip2, ip3, ip4); 2467 } 2468 2469 /* 2470 * Resolve the collision space for (tdip, tname, tname_len) 2471 * 2472 * tdip must be held exclusively locked to prevent races since 2473 * multiple filenames can end up in the same collision space. 2474 */ 2475 { 2476 hammer2_xop_scanlhc_t *sxop; 2477 hammer2_tid_t lhcbase; 2478 2479 tlhc = hammer2_dirhash(tname, tname_len); 2480 lhcbase = tlhc; 2481 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 2482 sxop->lhc = tlhc; 2483 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 2484 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 2485 if (tlhc != sxop->head.cluster.focus->bref.key) 2486 break; 2487 ++tlhc; 2488 } 2489 error = hammer2_error_to_errno(error); 2490 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 2491 2492 if (error) { 2493 if (error != ENOENT) 2494 goto done2; 2495 ++tlhc; 2496 error = 0; 2497 } 2498 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) { 2499 error = ENOSPC; 2500 goto done2; 2501 } 2502 } 2503 2504 /* 2505 * Ready to go, issue the rename to the backend. Note that meta-data 2506 * updates to the related inodes occur separately from the rename 2507 * operation. 2508 * 2509 * NOTE: While it is not necessary to update ip->meta.name*, doing 2510 * so aids catastrophic recovery and debugging. 2511 */ 2512 if (error == 0) { 2513 hammer2_xop_nrename_t *xop4; 2514 2515 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 2516 xop4->lhc = tlhc; 2517 xop4->ip_key = ip->meta.name_key; 2518 hammer2_xop_setip2(&xop4->head, ip); 2519 hammer2_xop_setip3(&xop4->head, tdip); 2520 if (tip && tip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) 2521 hammer2_xop_setip4(&xop4->head, tip); 2522 hammer2_xop_setname(&xop4->head, fname, fname_len); 2523 hammer2_xop_setname2(&xop4->head, tname, tname_len); 2524 hammer2_xop_start(&xop4->head, &hammer2_nrename_desc); 2525 2526 error = hammer2_xop_collect(&xop4->head, 0); 2527 error = hammer2_error_to_errno(error); 2528 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP); 2529 2530 if (error == ENOENT) 2531 error = 0; 2532 2533 /* 2534 * Update inode meta-data. 2535 * 2536 * WARNING! The in-memory inode (ip) structure does not 2537 * maintain a copy of the inode's filename buffer. 2538 */ 2539 if (error == 0 && 2540 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 2541 hammer2_inode_modify(ip); 2542 ip->meta.name_len = tname_len; 2543 ip->meta.name_key = tlhc; 2544 } 2545 if (error == 0) { 2546 hammer2_inode_modify(ip); 2547 ip->meta.iparent = tdip->meta.inum; 2548 } 2549 update_fdip = 1; 2550 update_tdip = 1; 2551 } 2552 2553 done2: 2554 /* 2555 * If no error, the backend has replaced the target directory entry. 2556 * We must adjust nlinks on the original replace target if it exists. 2557 */ 2558 vprecycle = NULL; 2559 if (error == 0 && tip) { 2560 hammer2_inode_unlink_finisher(tip, &vprecycle); 2561 } 2562 2563 /* 2564 * Update directory mtimes to represent the something changed. 2565 */ 2566 if (update_fdip || update_tdip) { 2567 uint64_t mtime; 2568 2569 hammer2_update_time(&mtime); 2570 if (update_fdip) { 2571 hammer2_inode_modify(fdip); 2572 fdip->meta.mtime = mtime; 2573 } 2574 if (update_tdip) { 2575 hammer2_inode_modify(tdip); 2576 tdip->meta.mtime = mtime; 2577 } 2578 } 2579 if (tip) { 2580 hammer2_inode_unlock(tip); 2581 hammer2_inode_drop(tip); 2582 } 2583 hammer2_inode_unlock(ip); 2584 hammer2_inode_unlock(tdip); 2585 hammer2_inode_unlock(fdip); 2586 hammer2_inode_drop(ip); 2587 hammer2_trans_done(tdip->pmp, HAMMER2_TRANS_SIDEQ); 2588 2589 /* 2590 * Issue the namecache update after unlocking all the internal 2591 * hammer2 structures, otherwise we might deadlock. 2592 * 2593 * WARNING! The target namespace must be updated atomically, 2594 * and we depend on cache_rename() to handle that for 2595 * us. Do not do a separate cache_unlink() because 2596 * that leaves a small window of opportunity for other 2597 * threads to allocate the target namespace before we 2598 * manage to complete our rename. 2599 * 2600 * WARNING! cache_rename() (and cache_unlink()) will properly 2601 * set VREF_FINALIZE on any attached vnode. Do not 2602 * call cache_setunresolved() manually before-hand as 2603 * this will prevent the flag from being set later via 2604 * cache_rename(). If VREF_FINALIZE is not properly set 2605 * and the inode is no longer in the topology, related 2606 * chains can remain dirty indefinitely. 2607 */ 2608 if (error == 0 && tip) { 2609 /*cache_unlink(ap->a_tnch); see above */ 2610 /*cache_setunresolved(ap->a_tnch); see above */ 2611 } 2612 if (error == 0) { 2613 cache_rename(ap->a_fnch, ap->a_tnch); 2614 hammer2_knote(ap->a_fdvp, NOTE_WRITE); 2615 hammer2_knote(ap->a_tdvp, NOTE_WRITE); 2616 hammer2_knote(fncp->nc_vp, NOTE_RENAME); 2617 } 2618 if (vprecycle) 2619 hammer2_inode_vprecycle(vprecycle); 2620 2621 return (error); 2622 #endif 2623 return (EOPNOTSUPP); 2624 } 2625 2626 /* 2627 * hammer2_vop_ioctl { vp, command, data, fflag, cred } 2628 */ 2629 static 2630 int 2631 hammer2_vop_ioctl(struct vop_ioctl_args *ap) 2632 { 2633 #if 0 2634 hammer2_inode_t *ip; 2635 int error; 2636 2637 ip = VTOI(ap->a_vp); 2638 2639 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data, 2640 ap->a_fflag, ap->a_cred); 2641 return (error); 2642 #endif 2643 return (EOPNOTSUPP); 2644 } 2645 2646 static 2647 int 2648 hammer2_vop_mountctl(struct vop_mountctl_args *ap) 2649 { 2650 #if 0 2651 struct mount *mp; 2652 hammer2_pfs_t *pmp; 2653 int rc; 2654 2655 switch (ap->a_op) { 2656 case (MOUNTCTL_SET_EXPORT): 2657 mp = ap->a_head.a_ops->head.vv_mount; 2658 pmp = MPTOPMP(mp); 2659 2660 if (ap->a_ctllen != sizeof(struct export_args)) 2661 rc = (EINVAL); 2662 else 2663 rc = vfs_export(mp, &pmp->export, 2664 (const struct export_args *)ap->a_ctl); 2665 break; 2666 default: 2667 rc = vop_stdmountctl(ap); 2668 break; 2669 } 2670 return (rc); 2671 #endif 2672 return (EOPNOTSUPP); 2673 } 2674 2675 /* 2676 * KQFILTER 2677 */ 2678 /* 2679 static void filt_hammer2detach(struct knote *kn); 2680 static int filt_hammer2read(struct knote *kn, long hint); 2681 static int filt_hammer2write(struct knote *kn, long hint); 2682 static int filt_hammer2vnode(struct knote *kn, long hint); 2683 2684 static struct filterops hammer2read_filtops = 2685 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2686 NULL, filt_hammer2detach, filt_hammer2read }; 2687 static struct filterops hammer2write_filtops = 2688 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2689 NULL, filt_hammer2detach, filt_hammer2write }; 2690 static struct filterops hammer2vnode_filtops = 2691 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2692 NULL, filt_hammer2detach, filt_hammer2vnode }; 2693 */ 2694 2695 static 2696 int 2697 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap) 2698 { 2699 #if 0 2700 struct m_vnode *vp = ap->a_vp; 2701 struct knote *kn = ap->a_kn; 2702 2703 switch (kn->kn_filter) { 2704 case EVFILT_READ: 2705 kn->kn_fop = &hammer2read_filtops; 2706 break; 2707 case EVFILT_WRITE: 2708 kn->kn_fop = &hammer2write_filtops; 2709 break; 2710 case EVFILT_VNODE: 2711 kn->kn_fop = &hammer2vnode_filtops; 2712 break; 2713 default: 2714 return (EOPNOTSUPP); 2715 } 2716 2717 kn->kn_hook = (caddr_t)vp; 2718 2719 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2720 2721 return(0); 2722 #endif 2723 return (EOPNOTSUPP); 2724 } 2725 2726 #if 0 2727 static void 2728 filt_hammer2detach(struct knote *kn) 2729 { 2730 struct m_vnode *vp = (void *)kn->kn_hook; 2731 2732 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2733 } 2734 2735 static int 2736 filt_hammer2read(struct knote *kn, long hint) 2737 { 2738 struct m_vnode *vp = (void *)kn->kn_hook; 2739 hammer2_inode_t *ip = VTOI(vp); 2740 off_t off; 2741 2742 if (hint == NOTE_REVOKE) { 2743 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2744 return(1); 2745 } 2746 off = ip->meta.size - kn->kn_fp->f_offset; 2747 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 2748 if (kn->kn_sfflags & NOTE_OLDAPI) 2749 return(1); 2750 return (kn->kn_data != 0); 2751 } 2752 2753 2754 static int 2755 filt_hammer2write(struct knote *kn, long hint) 2756 { 2757 if (hint == NOTE_REVOKE) 2758 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2759 kn->kn_data = 0; 2760 return (1); 2761 } 2762 2763 static int 2764 filt_hammer2vnode(struct knote *kn, long hint) 2765 { 2766 if (kn->kn_sfflags & hint) 2767 kn->kn_fflags |= hint; 2768 if (hint == NOTE_REVOKE) { 2769 kn->kn_flags |= (EV_EOF | EV_NODATA); 2770 return (1); 2771 } 2772 return (kn->kn_fflags != 0); 2773 } 2774 #endif 2775 2776 /* 2777 * FIFO VOPS 2778 */ 2779 static 2780 int 2781 hammer2_vop_markatime(struct vop_markatime_args *ap) 2782 { 2783 #if 0 2784 hammer2_inode_t *ip; 2785 struct m_vnode *vp; 2786 2787 vp = ap->a_vp; 2788 ip = VTOI(vp); 2789 2790 if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG)) 2791 return (EROFS); 2792 return(0); 2793 #endif 2794 return (EOPNOTSUPP); 2795 } 2796 2797 static 2798 int 2799 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap) 2800 { 2801 #if 0 2802 int error; 2803 2804 error = VOCALL(&fifo_vnode_vops, &ap->a_head); 2805 if (error) 2806 error = hammer2_vop_kqfilter(ap); 2807 return(error); 2808 #endif 2809 return (EOPNOTSUPP); 2810 } 2811 2812 /* 2813 * VOPS vector 2814 */ 2815 struct vop_ops hammer2_vnode_vops = { 2816 .vop_default = vop_defaultop, 2817 .vop_fsync = hammer2_vop_fsync, 2818 .vop_getpages = vop_stdgetpages, 2819 .vop_putpages = vop_stdputpages, 2820 .vop_access = hammer2_vop_access, 2821 .vop_advlock = hammer2_vop_advlock, 2822 .vop_close = hammer2_vop_close, 2823 .vop_nlink = hammer2_vop_nlink, 2824 .vop_ncreate = hammer2_vop_ncreate, 2825 .vop_nsymlink = hammer2_vop_nsymlink, 2826 .vop_nremove = hammer2_vop_nremove, 2827 .vop_nrmdir = hammer2_vop_nrmdir, 2828 .vop_nrename = hammer2_vop_nrename, 2829 .vop_getattr = hammer2_vop_getattr, 2830 .vop_getattr_lite = hammer2_vop_getattr_lite, 2831 .vop_setattr = hammer2_vop_setattr, 2832 .vop_readdir = hammer2_vop_readdir, 2833 .vop_readlink = hammer2_vop_readlink, 2834 .vop_read = hammer2_vop_read, 2835 .vop_write = hammer2_vop_write, 2836 .vop_open = hammer2_vop_open, 2837 .vop_inactive = hammer2_vop_inactive, 2838 .vop_reclaim = hammer2_vop_reclaim, 2839 .vop_nresolve = hammer2_vop_nresolve, 2840 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot, 2841 .vop_nmkdir = hammer2_vop_nmkdir, 2842 .vop_nmknod = hammer2_vop_nmknod, 2843 .vop_ioctl = hammer2_vop_ioctl, 2844 .vop_mountctl = hammer2_vop_mountctl, 2845 .vop_bmap = hammer2_vop_bmap, 2846 .vop_strategy = hammer2_vop_strategy, 2847 .vop_kqfilter = hammer2_vop_kqfilter 2848 }; 2849 2850 struct vop_ops hammer2_spec_vops = { 2851 .vop_default = vop_defaultop, 2852 .vop_fsync = hammer2_vop_fsync, 2853 .vop_read = vop_stdnoread, 2854 .vop_write = vop_stdnowrite, 2855 .vop_access = hammer2_vop_access, 2856 .vop_close = hammer2_vop_close, 2857 .vop_markatime = hammer2_vop_markatime, 2858 .vop_getattr = hammer2_vop_getattr, 2859 .vop_inactive = hammer2_vop_inactive, 2860 .vop_reclaim = hammer2_vop_reclaim, 2861 .vop_setattr = hammer2_vop_setattr 2862 }; 2863 2864 struct vop_ops hammer2_fifo_vops = { 2865 .vop_default = fifo_vnoperate, 2866 .vop_fsync = hammer2_vop_fsync, 2867 #if 0 2868 .vop_read = hammer2_vop_fiforead, 2869 .vop_write = hammer2_vop_fifowrite, 2870 #endif 2871 .vop_access = hammer2_vop_access, 2872 #if 0 2873 .vop_close = hammer2_vop_fifoclose, 2874 #endif 2875 .vop_markatime = hammer2_vop_markatime, 2876 .vop_getattr = hammer2_vop_getattr, 2877 .vop_inactive = hammer2_vop_inactive, 2878 .vop_reclaim = hammer2_vop_reclaim, 2879 .vop_setattr = hammer2_vop_setattr, 2880 .vop_kqfilter = hammer2_vop_fifokqfilter 2881 }; 2882 2883