1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org> 5 * Copyright (c) 2011-2022 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Matthew Dillon <dillon@dragonflybsd.org> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 /* 38 * Kernel Filesystem interface 39 * 40 * NOTE! local ipdata pointers must be reloaded on any modifying operation 41 * to the inode as its underlying chain may have changed. 42 */ 43 44 /* 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/fcntl.h> 49 #include <sys/buf.h> 50 #include <sys/proc.h> 51 #include <sys/mount.h> 52 #include <sys/vnode.h> 53 #include <sys/mountctl.h> 54 */ 55 #include <sys/dirent.h> 56 /* 57 #include <sys/uio.h> 58 #include <sys/objcache.h> 59 #include <sys/event.h> 60 #include <sys/file.h> 61 #include <vfs/fifofs/fifo.h> 62 */ 63 64 #include "hammer2.h" 65 66 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, 67 int seqcount); 68 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 69 int ioflag, int seqcount); 70 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize); 71 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize); 72 73 /* 74 * Last reference to a vnode is going away but it is still cached. 75 */ 76 static 77 int 78 hammer2_vop_inactive(struct vop_inactive_args *ap) 79 { 80 #if 0 81 hammer2_inode_t *ip; 82 struct m_vnode *vp; 83 84 vp = ap->a_vp; 85 ip = VTOI(vp); 86 87 /* 88 * Degenerate case 89 */ 90 if (ip == NULL) { 91 vrecycle(vp); 92 return (0); 93 } 94 95 /* 96 * Aquire the inode lock to interlock against vp updates via 97 * the inode path and file deletions and such (which can be 98 * namespace-only operations that might not hold the vnode). 99 */ 100 hammer2_inode_lock(ip, 0); 101 if (ip->flags & HAMMER2_INODE_ISUNLINKED) { 102 hammer2_key_t lbase; 103 int nblksize; 104 105 /* 106 * If the inode has been unlinked we can throw away all 107 * buffers (dirty or not) and clean the file out. 108 * 109 * Because vrecycle() calls are not guaranteed, try to 110 * dispose of the inode as much as possible right here. 111 */ 112 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL); 113 nvtruncbuf(vp, 0, nblksize, 0, 0); 114 115 /* 116 * Delete the file on-media. 117 */ 118 if ((ip->flags & HAMMER2_INODE_DELETING) == 0) { 119 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); 120 hammer2_inode_delayed_sideq(ip); 121 } 122 hammer2_inode_unlock(ip); 123 124 /* 125 * Recycle immediately if possible 126 */ 127 vrecycle(vp); 128 } else { 129 hammer2_inode_unlock(ip); 130 } 131 return (0); 132 #endif 133 return (EOPNOTSUPP); 134 } 135 136 /* 137 * Reclaim a vnode so that it can be reused; after the inode is 138 * disassociated, the filesystem must manage it alone. 139 */ 140 static 141 int 142 hammer2_vop_reclaim(struct vop_reclaim_args *ap) 143 { 144 hammer2_inode_t *ip; 145 struct m_vnode *vp; 146 147 vp = ap->a_vp; 148 ip = VTOI(vp); 149 if (ip == NULL) 150 return(0); 151 152 /* 153 * NOTE! We do not attempt to flush chains here, flushing is 154 * really fragile and could also deadlock. 155 */ 156 vclrisdirty(vp); 157 158 /* 159 * The inode lock is required to disconnect it. 160 */ 161 hammer2_inode_lock(ip, 0); 162 vp->v_data = NULL; 163 ip->vp = NULL; 164 165 /* 166 * Delete the file on-media. This should have been handled by the 167 * inactivation. The operation is likely still queued on the inode 168 * though so only complain if the stars don't align. 169 */ 170 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED | HAMMER2_INODE_DELETING)) == 171 HAMMER2_INODE_ISUNLINKED) 172 { 173 assert(0); 174 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); 175 hammer2_inode_delayed_sideq(ip); 176 kprintf("hammer2: vp=%p ip=%p unlinked but not disposed\n", 177 vp, ip); 178 } 179 hammer2_inode_unlock(ip); 180 181 /* 182 * Modified inodes will already be on SIDEQ or SYNCQ, no further 183 * action is needed. 184 * 185 * We cannot safely synchronize the inode from inside the reclaim 186 * due to potentially deep locks held as-of when the reclaim occurs. 187 * Interactions and potential deadlocks abound. We also can't do it 188 * here without desynchronizing from the related directory entrie(s). 189 */ 190 hammer2_inode_drop(ip); /* vp ref */ 191 192 /* 193 * XXX handle background sync when ip dirty, kernel will no longer 194 * notify us regarding this inode because there is no longer a 195 * vnode attached to it. 196 */ 197 198 return (0); 199 } 200 201 int 202 hammer2_reclaim(struct m_vnode *vp) 203 { 204 struct vop_reclaim_args ap = { 205 .a_vp = vp, 206 }; 207 208 return hammer2_vop_reclaim(&ap); 209 } 210 211 /* 212 * Currently this function synchronizes the front-end inode state to the 213 * backend chain topology, then flushes the inode's chain and sub-topology 214 * to backend media. This function does not flush the root topology down to 215 * the inode. 216 */ 217 static 218 int 219 hammer2_vop_fsync(struct vop_fsync_args *ap) 220 { 221 #if 0 222 hammer2_inode_t *ip; 223 struct m_vnode *vp; 224 int error1; 225 int error2; 226 227 vp = ap->a_vp; 228 ip = VTOI(vp); 229 error1 = 0; 230 231 hammer2_trans_init(ip->pmp, 0); 232 233 /* 234 * Flush dirty buffers in the file's logical buffer cache. 235 * It is best to wait for the strategy code to commit the 236 * buffers to the device's backing buffer cache before 237 * then trying to flush the inode. 238 * 239 * This should be quick, but certain inode modifications cached 240 * entirely in the hammer2_inode structure may not trigger a 241 * buffer read until the flush so the fsync can wind up also 242 * doing scattered reads. 243 */ 244 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 245 bio_track_wait(&vp->v_track_write, 0, 0); 246 247 /* 248 * Flush any inode changes 249 */ 250 hammer2_inode_lock(ip, 0); 251 if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MODIFIED)) 252 error1 = hammer2_inode_chain_sync(ip); 253 254 /* 255 * Flush dirty chains related to the inode. 256 * 257 * NOTE! We are not in a flush transaction. The inode remains on 258 * the sideq so the filesystem syncer can synchronize it to 259 * the volume root. 260 */ 261 error2 = hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP); 262 if (error2) 263 error1 = error2; 264 265 /* 266 * We may be able to clear the vnode dirty flag. 267 */ 268 if ((ip->flags & (HAMMER2_INODE_MODIFIED | 269 HAMMER2_INODE_RESIZED | 270 HAMMER2_INODE_DIRTYDATA)) == 0 && 271 RB_EMPTY(&vp->v_rbdirty_tree) && 272 !bio_track_active(&vp->v_track_write)) { 273 vclrisdirty(vp); 274 } 275 hammer2_inode_unlock(ip); 276 hammer2_trans_done(ip->pmp, 0); 277 278 return (error1); 279 #endif 280 return (EOPNOTSUPP); 281 } 282 283 /* 284 * No lock needed, just handle ip->update 285 */ 286 static 287 int 288 hammer2_vop_access(struct vop_access_args *ap) 289 { 290 #if 0 291 hammer2_inode_t *ip = VTOI(ap->a_vp); 292 uid_t uid; 293 gid_t gid; 294 mode_t mode; 295 uint32_t uflags; 296 int error; 297 int update; 298 299 retry: 300 update = spin_access_start(&ip->cluster_spin); 301 302 /*hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);*/ 303 uid = hammer2_to_unix_xid(&ip->meta.uid); 304 gid = hammer2_to_unix_xid(&ip->meta.gid); 305 mode = ip->meta.mode; 306 uflags = ip->meta.uflags; 307 /*hammer2_inode_unlock(ip);*/ 308 309 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 310 goto retry; 311 312 error = vop_helper_access(ap, uid, gid, mode, uflags); 313 314 return (error); 315 #endif 316 return (EOPNOTSUPP); 317 } 318 319 static 320 int 321 hammer2_vop_getattr(struct vop_getattr_args *ap) 322 { 323 #if 0 324 hammer2_pfs_t *pmp; 325 hammer2_inode_t *ip; 326 struct m_vnode *vp; 327 struct vattr *vap; 328 int update; 329 330 vp = ap->a_vp; 331 vap = ap->a_vap; 332 333 ip = VTOI(vp); 334 pmp = ip->pmp; 335 336 retry: 337 update = spin_access_start(&ip->cluster_spin); 338 339 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 340 vap->va_fileid = ip->meta.inum; 341 vap->va_mode = ip->meta.mode; 342 vap->va_nlink = ip->meta.nlinks; 343 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 344 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 345 vap->va_rmajor = 0; 346 vap->va_rminor = 0; 347 vap->va_size = ip->meta.size; /* protected by shared lock */ 348 vap->va_blocksize = HAMMER2_PBUFSIZE; 349 vap->va_flags = ip->meta.uflags; 350 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime); 351 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime); 352 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime); 353 vap->va_gen = 1; 354 vap->va_bytes = 0; 355 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) { 356 /* 357 * Can't really calculate directory use sans the files under 358 * it, just assume one block for now. 359 */ 360 vap->va_bytes += HAMMER2_INODE_BYTES; 361 } else { 362 vap->va_bytes = hammer2_inode_data_count(ip); 363 } 364 vap->va_type = hammer2_get_vtype(ip->meta.type); 365 vap->va_filerev = 0; 366 vap->va_uid_uuid = ip->meta.uid; 367 vap->va_gid_uuid = ip->meta.gid; 368 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 369 VA_FSID_UUID_VALID; 370 371 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 372 goto retry; 373 374 return (0); 375 #endif 376 return (EOPNOTSUPP); 377 } 378 379 static 380 int 381 hammer2_vop_getattr_lite(struct vop_getattr_lite_args *ap) 382 { 383 #if 0 384 hammer2_pfs_t *pmp; 385 hammer2_inode_t *ip; 386 struct m_vnode *vp; 387 struct vattr_lite *lvap; 388 int update; 389 390 vp = ap->a_vp; 391 lvap = ap->a_lvap; 392 393 ip = VTOI(vp); 394 pmp = ip->pmp; 395 396 retry: 397 update = spin_access_start(&ip->cluster_spin); 398 399 #if 0 400 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 401 vap->va_fileid = ip->meta.inum; 402 #endif 403 lvap->va_mode = ip->meta.mode; 404 lvap->va_nlink = ip->meta.nlinks; 405 lvap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 406 lvap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 407 #if 0 408 vap->va_rmajor = 0; 409 vap->va_rminor = 0; 410 #endif 411 lvap->va_size = ip->meta.size; 412 #if 0 413 vap->va_blocksize = HAMMER2_PBUFSIZE; 414 #endif 415 lvap->va_flags = ip->meta.uflags; 416 lvap->va_type = hammer2_get_vtype(ip->meta.type); 417 #if 0 418 vap->va_filerev = 0; 419 vap->va_uid_uuid = ip->meta.uid; 420 vap->va_gid_uuid = ip->meta.gid; 421 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 422 VA_FSID_UUID_VALID; 423 #endif 424 425 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 426 goto retry; 427 428 return (0); 429 #endif 430 return (EOPNOTSUPP); 431 } 432 433 static 434 int 435 hammer2_vop_setattr(struct vop_setattr_args *ap) 436 { 437 #if 0 438 hammer2_inode_t *ip; 439 struct m_vnode *vp; 440 struct vattr *vap; 441 int error; 442 int kflags = 0; 443 uint64_t ctime; 444 445 vp = ap->a_vp; 446 vap = ap->a_vap; 447 hammer2_update_time(&ctime); 448 449 ip = VTOI(vp); 450 451 if (ip->pmp->ronly) 452 return (EROFS); 453 454 /* 455 * Normally disallow setattr if there is no space, unless we 456 * are in emergency mode (might be needed to chflags -R noschg 457 * files prior to removal). 458 */ 459 if ((ip->pmp->flags & HAMMER2_PMPF_EMERG) == 0 && 460 hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1) { 461 return (ENOSPC); 462 } 463 464 hammer2_trans_init(ip->pmp, 0); 465 hammer2_inode_lock(ip, 0); 466 error = 0; 467 468 if (vap->va_flags != VNOVAL) { 469 uint32_t flags; 470 471 flags = ip->meta.uflags; 472 error = vop_helper_setattr_flags(&flags, vap->va_flags, 473 hammer2_to_unix_xid(&ip->meta.uid), 474 ap->a_cred); 475 if (error == 0) { 476 if (ip->meta.uflags != flags) { 477 hammer2_inode_modify(ip); 478 hammer2_spin_lock_update(&ip->cluster_spin); 479 ip->meta.uflags = flags; 480 ip->meta.ctime = ctime; 481 hammer2_spin_unlock_update(&ip->cluster_spin); 482 kflags |= NOTE_ATTRIB; 483 } 484 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 485 error = 0; 486 goto done; 487 } 488 } 489 goto done; 490 } 491 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 492 error = EPERM; 493 goto done; 494 } 495 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { 496 mode_t cur_mode = ip->meta.mode; 497 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 498 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 499 uuid_t uuid_uid; 500 uuid_t uuid_gid; 501 502 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 503 ap->a_cred, 504 &cur_uid, &cur_gid, &cur_mode); 505 if (error == 0) { 506 hammer2_guid_to_uuid(&uuid_uid, cur_uid); 507 hammer2_guid_to_uuid(&uuid_gid, cur_gid); 508 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) || 509 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) || 510 ip->meta.mode != cur_mode 511 ) { 512 hammer2_inode_modify(ip); 513 hammer2_spin_lock_update(&ip->cluster_spin); 514 ip->meta.uid = uuid_uid; 515 ip->meta.gid = uuid_gid; 516 ip->meta.mode = cur_mode; 517 ip->meta.ctime = ctime; 518 hammer2_spin_unlock_update(&ip->cluster_spin); 519 } 520 kflags |= NOTE_ATTRIB; 521 } 522 } 523 524 /* 525 * Resize the file 526 */ 527 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) { 528 switch(vp->v_type) { 529 case VREG: 530 if (vap->va_size == ip->meta.size) 531 break; 532 if (vap->va_size < ip->meta.size) { 533 hammer2_mtx_ex(&ip->truncate_lock); 534 hammer2_truncate_file(ip, vap->va_size); 535 hammer2_mtx_unlock(&ip->truncate_lock); 536 kflags |= NOTE_WRITE; 537 } else { 538 hammer2_extend_file(ip, vap->va_size); 539 kflags |= NOTE_WRITE | NOTE_EXTEND; 540 } 541 hammer2_inode_modify(ip); 542 ip->meta.mtime = ctime; 543 vclrflags(vp, VLASTWRITETS); 544 break; 545 default: 546 error = EINVAL; 547 goto done; 548 } 549 } 550 #if 0 551 /* atime not supported */ 552 if (vap->va_atime.tv_sec != VNOVAL) { 553 hammer2_inode_modify(ip); 554 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime); 555 kflags |= NOTE_ATTRIB; 556 } 557 #endif 558 if (vap->va_mode != (mode_t)VNOVAL) { 559 mode_t cur_mode = ip->meta.mode; 560 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 561 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 562 563 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 564 cur_uid, cur_gid, &cur_mode); 565 if (error == 0) { 566 hammer2_inode_modify(ip); 567 hammer2_spin_lock_update(&ip->cluster_spin); 568 ip->meta.mode = cur_mode; 569 ip->meta.ctime = ctime; 570 hammer2_spin_unlock_update(&ip->cluster_spin); 571 kflags |= NOTE_ATTRIB; 572 } 573 } 574 575 if (vap->va_mtime.tv_sec != VNOVAL) { 576 hammer2_inode_modify(ip); 577 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime); 578 kflags |= NOTE_ATTRIB; 579 vclrflags(vp, VLASTWRITETS); 580 } 581 582 done: 583 /* 584 * If a truncation occurred we must call chain_sync() now in order 585 * to trim the related data chains, otherwise a later expansion can 586 * cause havoc. 587 * 588 * If an extend occured that changed the DIRECTDATA state, we must 589 * call inode_chain_sync now in order to prepare the inode's indirect 590 * block table. 591 * 592 * WARNING! This means we are making an adjustment to the inode's 593 * chain outside of sync/fsync, and not just to inode->meta, which 594 * may result in some consistency issues if a crash were to occur 595 * at just the wrong time. 596 */ 597 if (ip->flags & HAMMER2_INODE_RESIZED) 598 hammer2_inode_chain_sync(ip); 599 600 /* 601 * Cleanup. 602 */ 603 hammer2_inode_unlock(ip); 604 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 605 hammer2_knote(ip->vp, kflags); 606 607 return (error); 608 #endif 609 return (EOPNOTSUPP); 610 } 611 612 static int 613 vop_write_dirent(int *error, struct uio *uio, ino_t d_ino, uint8_t d_type, 614 uint16_t d_namlen, const char *d_name) 615 { 616 struct dirent *dp; 617 size_t len; 618 619 len = _DIRENT_RECLEN(d_namlen); 620 if (len > uio->uio_resid) 621 return(1); 622 623 dp = kmalloc(len, M_TEMP, M_WAITOK | M_ZERO); 624 625 dp->d_ino = d_ino; 626 dp->d_namlen = d_namlen; 627 dp->d_type = d_type; 628 bcopy(d_name, dp->d_name, d_namlen); 629 630 *error = uiomove((caddr_t)dp, len, uio); 631 632 kfree(dp, M_TEMP); 633 634 return(0); 635 } 636 637 static 638 int 639 hammer2_vop_readdir(struct vop_readdir_args *ap) 640 { 641 hammer2_xop_readdir_t *xop; 642 hammer2_blockref_t bref; 643 hammer2_inode_t *ip; 644 hammer2_tid_t inum; 645 hammer2_key_t lkey; 646 struct uio *uio; 647 off_t *cookies; 648 off_t saveoff; 649 int cookie_index; 650 int ncookies; 651 int error; 652 int ndirent; 653 int eofflag; 654 int r; 655 656 ip = VTOI(ap->a_vp); 657 uio = ap->a_uio; 658 saveoff = uio->uio_offset; 659 ndirent = 0; 660 eofflag = 0; 661 error = 0; 662 663 /* 664 * Setup cookies directory entry cookies if requested 665 */ 666 if (ap->a_ncookies) { 667 ncookies = uio->uio_resid / 16 + 1; 668 if (ncookies > 1024) 669 ncookies = 1024; 670 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK); 671 } else { 672 ncookies = -1; 673 cookies = NULL; 674 } 675 cookie_index = 0; 676 677 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 678 679 /* 680 * Handle artificial entries. To ensure that only positive 64 bit 681 * quantities are returned to userland we always strip off bit 63. 682 * The hash code is designed such that codes 0x0000-0x7FFF are not 683 * used, allowing us to use these codes for articial entries. 684 * 685 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not 686 * allow '..' to cross the mount point into (e.g.) the super-root. 687 */ 688 if (saveoff == 0) { 689 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 690 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, "."); 691 if (r) 692 goto done; 693 if (cookies) 694 cookies[cookie_index] = saveoff; 695 ++saveoff; 696 ++cookie_index; 697 ++ndirent; 698 if (cookie_index == ncookies) 699 goto done; 700 } 701 702 if (saveoff == 1) { 703 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 704 if (ip != ip->pmp->iroot) 705 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK; 706 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, ".."); 707 if (r) 708 goto done; 709 if (cookies) 710 cookies[cookie_index] = saveoff; 711 ++saveoff; 712 ++cookie_index; 713 ++ndirent; 714 if (cookie_index == ncookies) 715 goto done; 716 } 717 718 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE; 719 if (hammer2_debug & 0x0020) 720 kprintf("readdir: lkey %016jx\n", lkey); 721 if (error) 722 goto done; 723 724 xop = hammer2_xop_alloc(ip, 0); 725 xop->lkey = lkey; 726 hammer2_xop_start(&xop->head, &hammer2_readdir_desc); 727 728 for (;;) { 729 const hammer2_inode_data_t *ripdata; 730 const char *dname; 731 int dtype; 732 733 error = hammer2_xop_collect(&xop->head, 0); 734 error = hammer2_error_to_errno(error); 735 if (error) { 736 break; 737 } 738 if (cookie_index == ncookies) 739 break; 740 if (hammer2_debug & 0x0020) 741 kprintf("cluster chain %p %p\n", 742 xop->head.cluster.focus, 743 (xop->head.cluster.focus ? 744 xop->head.cluster.focus->data : (void *)-1)); 745 hammer2_cluster_bref(&xop->head.cluster, &bref); 746 747 if (bref.type == HAMMER2_BREF_TYPE_INODE) { 748 ripdata = &hammer2_xop_gdata(&xop->head)->ipdata; 749 dtype = hammer2_get_dtype(ripdata->meta.type); 750 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 751 r = vop_write_dirent(&error, uio, 752 ripdata->meta.inum & 753 HAMMER2_DIRHASH_USERMSK, 754 dtype, 755 ripdata->meta.name_len, 756 ripdata->filename); 757 hammer2_xop_pdata(&xop->head); 758 if (r) 759 break; 760 if (cookies) 761 cookies[cookie_index] = saveoff; 762 ++cookie_index; 763 ++ndirent; 764 } else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) { 765 uint16_t namlen; 766 767 dtype = hammer2_get_dtype(bref.embed.dirent.type); 768 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 769 namlen = bref.embed.dirent.namlen; 770 if (namlen <= sizeof(bref.check.buf)) { 771 dname = bref.check.buf; 772 } else { 773 dname = hammer2_xop_gdata(&xop->head)->buf; 774 } 775 r = vop_write_dirent(&error, uio, 776 bref.embed.dirent.inum, dtype, 777 namlen, dname); 778 if (namlen > sizeof(bref.check.buf)) 779 hammer2_xop_pdata(&xop->head); 780 if (r) 781 break; 782 if (cookies) 783 cookies[cookie_index] = saveoff; 784 ++cookie_index; 785 ++ndirent; 786 } else { 787 /* XXX chain error */ 788 kprintf("bad chain type readdir %d\n", bref.type); 789 } 790 } 791 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 792 if (error == ENOENT) { 793 error = 0; 794 eofflag = 1; 795 saveoff = (hammer2_key_t)-1; 796 } else { 797 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 798 } 799 done: 800 hammer2_inode_unlock(ip); 801 if (ap->a_eofflag) 802 *ap->a_eofflag = eofflag; 803 if (hammer2_debug & 0x0020) 804 kprintf("readdir: done at %016jx\n", saveoff); 805 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE; 806 if (error && cookie_index == 0) { 807 if (cookies) { 808 kfree(cookies, M_TEMP); 809 *ap->a_ncookies = 0; 810 *ap->a_cookies = NULL; 811 } 812 } else { 813 if (cookies) { 814 *ap->a_ncookies = cookie_index; 815 *ap->a_cookies = cookies; 816 } 817 } 818 *ap->a_ndirent = ndirent; 819 820 return (error); 821 } 822 823 int 824 hammer2_readdir(struct m_vnode *vp, void *buf, size_t size, off_t *offsetp, 825 int *ndirentp, int *eofflagp) 826 { 827 int error; 828 829 assert(buf); 830 assert(size > 0); 831 assert(size <= HAMMER2_PBUFSIZE); 832 833 struct iovec iov = { 834 .iov_base = buf, 835 .iov_len = size, 836 }; 837 struct uio uio = { 838 .uio_iov = &iov, 839 .uio_iovcnt = 1, 840 .uio_offset = *offsetp, 841 .uio_resid = size, 842 .uio_segflg = UIO_USERSPACE, 843 .uio_rw = UIO_READ, 844 .uio_td = NULL, 845 }; 846 struct vop_readdir_args ap = { 847 .a_vp = vp, 848 .a_uio = &uio, 849 .a_cred = NULL, 850 .a_eofflag = eofflagp, 851 .a_ncookies = NULL, 852 .a_cookies = NULL, 853 .a_ndirent = ndirentp, 854 }; 855 856 error = hammer2_vop_readdir(&ap); 857 *offsetp = uio.uio_offset; 858 859 return (error); 860 } 861 862 /* 863 * hammer2_vop_readlink { vp, uio, cred } 864 */ 865 static 866 int 867 hammer2_vop_readlink(struct vop_readlink_args *ap) 868 { 869 struct m_vnode *vp; 870 hammer2_inode_t *ip; 871 int error; 872 873 vp = ap->a_vp; 874 if (vp->v_type != VLNK) 875 return (EINVAL); 876 ip = VTOI(vp); 877 878 error = hammer2_read_file(ip, ap->a_uio, 0); 879 return (error); 880 } 881 882 int 883 hammer2_readlink(struct m_vnode *vp, void *buf, size_t size) 884 { 885 assert(buf); 886 assert(size > 0); 887 assert(size <= HAMMER2_PBUFSIZE); 888 889 struct iovec iov = { 890 .iov_base = buf, 891 .iov_len = size, 892 }; 893 struct uio uio = { 894 .uio_iov = &iov, 895 .uio_iovcnt = 1, 896 .uio_offset = 0, 897 .uio_resid = size, 898 .uio_segflg = UIO_USERSPACE, 899 .uio_rw = UIO_READ, 900 .uio_td = NULL, 901 }; 902 struct vop_readlink_args ap = { 903 .a_vp = vp, 904 .a_uio = &uio, 905 .a_cred = NULL, 906 }; 907 908 return hammer2_vop_readlink(&ap); 909 } 910 911 static 912 int 913 hammer2_vop_read(struct vop_read_args *ap) 914 { 915 struct m_vnode *vp; 916 hammer2_inode_t *ip; 917 struct uio *uio; 918 int error; 919 int seqcount; 920 921 /* 922 * Read operations supported on this vnode? 923 */ 924 vp = ap->a_vp; 925 if (vp->v_type == VDIR) 926 return (EISDIR); 927 if (vp->v_type != VREG) 928 return (EINVAL); 929 930 /* 931 * Misc 932 */ 933 ip = VTOI(vp); 934 uio = ap->a_uio; 935 error = 0; 936 937 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 938 939 error = hammer2_read_file(ip, uio, seqcount); 940 return (error); 941 } 942 943 int 944 hammer2_read(struct m_vnode *vp, void *buf, size_t size, off_t offset) 945 { 946 assert(buf); 947 assert(size > 0); 948 assert(size <= HAMMER2_PBUFSIZE); 949 950 struct iovec iov = { 951 .iov_base = buf, 952 .iov_len = size, 953 }; 954 struct uio uio = { 955 .uio_iov = &iov, 956 .uio_iovcnt = 1, 957 .uio_offset = offset, 958 .uio_resid = size, 959 .uio_segflg = UIO_USERSPACE, 960 .uio_rw = UIO_READ, 961 .uio_td = NULL, 962 }; 963 struct vop_read_args ap = { 964 .a_vp = vp, 965 .a_uio = &uio, 966 .a_ioflag = 0, 967 .a_cred = NULL, 968 }; 969 970 return hammer2_vop_read(&ap); 971 } 972 973 static 974 int 975 hammer2_vop_write(struct vop_write_args *ap) 976 { 977 hammer2_inode_t *ip; 978 //thread_t td; 979 struct m_vnode *vp; 980 struct uio *uio; 981 int error; 982 int seqcount; 983 int ioflag; 984 985 /* 986 * Read operations supported on this vnode? 987 */ 988 vp = ap->a_vp; 989 if (vp->v_type != VREG) 990 return (EINVAL); 991 992 /* 993 * Misc 994 */ 995 ip = VTOI(vp); 996 ioflag = ap->a_ioflag; 997 uio = ap->a_uio; 998 error = 0; 999 if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG)) 1000 return (EROFS); 1001 switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) { 1002 case 2: 1003 return (ENOSPC); 1004 case 1: 1005 ioflag |= IO_DIRECT; /* semi-synchronous */ 1006 /* fall through */ 1007 default: 1008 break; 1009 } 1010 1011 seqcount = ioflag >> IO_SEQSHIFT; 1012 1013 /* 1014 * Check resource limit 1015 */ 1016 /* 1017 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc && 1018 uio->uio_offset + uio->uio_resid > 1019 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 1020 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); 1021 return (EFBIG); 1022 } 1023 */ 1024 1025 /* 1026 * The transaction interlocks against flush initiations 1027 * (note: but will run concurrently with the actual flush). 1028 * 1029 * To avoid deadlocking against the VM system, we must flag any 1030 * transaction related to the buffer cache or other direct 1031 * VM page manipulation. 1032 */ 1033 if (uio->uio_segflg == UIO_NOCOPY) { 1034 assert(0); /* no UIO_NOCOPY in makefs */ 1035 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE); 1036 } else { 1037 hammer2_trans_init(ip->pmp, 0); 1038 } 1039 error = hammer2_write_file(ip, uio, ioflag, seqcount); 1040 if (uio->uio_segflg == UIO_NOCOPY) { 1041 assert(0); /* no UIO_NOCOPY in makefs */ 1042 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE | 1043 HAMMER2_TRANS_SIDEQ); 1044 } else 1045 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 1046 1047 return (error); 1048 } 1049 1050 int 1051 hammer2_write(struct m_vnode *vp, void *buf, size_t size, off_t offset) 1052 { 1053 assert(buf); 1054 assert(size > 0); 1055 assert(size <= HAMMER2_PBUFSIZE); 1056 1057 struct iovec iov = { 1058 .iov_base = buf, 1059 .iov_len = size, 1060 }; 1061 struct uio uio = { 1062 .uio_iov = &iov, 1063 .uio_iovcnt = 1, 1064 .uio_offset = offset, 1065 .uio_resid = size, 1066 .uio_segflg = UIO_USERSPACE, 1067 .uio_rw = UIO_WRITE, 1068 .uio_td = NULL, 1069 }; 1070 struct vop_write_args ap = { 1071 .a_vp = vp, 1072 .a_uio = &uio, 1073 .a_ioflag = 0, 1074 .a_cred = NULL, 1075 }; 1076 1077 return hammer2_vop_write(&ap); 1078 } 1079 1080 /* 1081 * Perform read operations on a file or symlink given an UNLOCKED 1082 * inode and uio. 1083 * 1084 * The passed ip is not locked. 1085 */ 1086 static 1087 int 1088 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount) 1089 { 1090 hammer2_off_t size; 1091 struct m_buf *bp; 1092 int error; 1093 1094 error = 0; 1095 1096 /* 1097 * UIO read loop. 1098 * 1099 * WARNING! Assumes that the kernel interlocks size changes at the 1100 * vnode level. 1101 */ 1102 hammer2_mtx_sh(&ip->lock); 1103 hammer2_mtx_sh(&ip->truncate_lock); 1104 size = ip->meta.size; 1105 hammer2_mtx_unlock(&ip->lock); 1106 1107 while (uio->uio_resid > 0 && uio->uio_offset < size) { 1108 hammer2_key_t lbase; 1109 hammer2_key_t leof; 1110 int lblksize; 1111 int loff; 1112 int n; 1113 1114 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 1115 &lbase, &leof); 1116 #if 0 1117 #if 1 1118 bp = NULL; 1119 error = cluster_readx(ip->vp, leof, lbase, lblksize, 1120 B_NOTMETA | B_KVABIO, 1121 uio->uio_resid, 1122 seqcount * MAXBSIZE, 1123 &bp); 1124 #else 1125 if (uio->uio_segflg == UIO_NOCOPY) { 1126 bp = getblk(ip->vp, lbase, lblksize, 1127 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1128 if (bp->b_flags & B_CACHE) { 1129 int i; 1130 int j = 0; 1131 if (bp->b_xio.xio_npages != 16) 1132 kprintf("NPAGES BAD\n"); 1133 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1134 vm_page_t m; 1135 m = bp->b_xio.xio_pages[i]; 1136 if (m == NULL || m->valid == 0) { 1137 kprintf("bp %016jx %016jx pg %d inv", 1138 lbase, leof, i); 1139 if (m) 1140 kprintf("m->object %p/%p", m->object, ip->vp->v_object); 1141 kprintf("\n"); 1142 j = 1; 1143 } 1144 } 1145 if (j) 1146 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error); 1147 } 1148 bqrelse(bp); 1149 } 1150 error = bread_kvabio(ip->vp, lbase, lblksize, &bp); 1151 #endif 1152 #else 1153 bp = getblkx(ip->vp, lbase, lblksize, 1154 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1155 bp->b_cmd = BUF_CMD_READ; 1156 1157 struct bio bio; 1158 bio.bio_buf = bp; 1159 bio.bio_offset = lbase; 1160 1161 struct vop_strategy_args ap; 1162 ap.a_vp = ip->vp; 1163 ap.a_bio = &bio; 1164 1165 error = hammer2_vop_strategy(&ap); 1166 assert(!error); 1167 #endif 1168 if (error) { 1169 brelse(bp); 1170 break; 1171 } 1172 bkvasync(bp); 1173 loff = (int)(uio->uio_offset - lbase); 1174 n = lblksize - loff; 1175 if (n > uio->uio_resid) 1176 n = uio->uio_resid; 1177 if (n > size - uio->uio_offset) 1178 n = (int)(size - uio->uio_offset); 1179 //bp->b_flags |= B_AGE; 1180 uiomovebp(bp, bp->b_data + loff, n, uio); 1181 bqrelse(bp); 1182 } 1183 hammer2_mtx_unlock(&ip->truncate_lock); 1184 1185 return (error); 1186 } 1187 1188 /* 1189 * Write to the file represented by the inode via the logical buffer cache. 1190 * The inode may represent a regular file or a symlink. 1191 * 1192 * The inode must not be locked. 1193 */ 1194 static 1195 int 1196 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 1197 int ioflag, int seqcount) 1198 { 1199 hammer2_key_t old_eof; 1200 hammer2_key_t new_eof; 1201 struct m_buf *bp; 1202 int kflags; 1203 int error; 1204 int modified; 1205 1206 /* 1207 * Setup if append 1208 * 1209 * WARNING! Assumes that the kernel interlocks size changes at the 1210 * vnode level. 1211 */ 1212 hammer2_mtx_ex(&ip->lock); 1213 hammer2_mtx_sh(&ip->truncate_lock); 1214 if (ioflag & IO_APPEND) 1215 uio->uio_offset = ip->meta.size; 1216 old_eof = ip->meta.size; 1217 1218 /* 1219 * Extend the file if necessary. If the write fails at some point 1220 * we will truncate it back down to cover as much as we were able 1221 * to write. 1222 * 1223 * Doing this now makes it easier to calculate buffer sizes in 1224 * the loop. 1225 */ 1226 kflags = 0; 1227 error = 0; 1228 modified = 0; 1229 1230 if (uio->uio_offset + uio->uio_resid > old_eof) { 1231 new_eof = uio->uio_offset + uio->uio_resid; 1232 modified = 1; 1233 hammer2_extend_file(ip, new_eof); 1234 kflags |= NOTE_EXTEND; 1235 } else { 1236 new_eof = old_eof; 1237 } 1238 hammer2_mtx_unlock(&ip->lock); 1239 1240 /* 1241 * UIO write loop 1242 */ 1243 while (uio->uio_resid > 0) { 1244 hammer2_key_t lbase; 1245 int trivial; 1246 int endofblk; 1247 int lblksize; 1248 int loff; 1249 int n; 1250 1251 /* 1252 * Don't allow the buffer build to blow out the buffer 1253 * cache. 1254 */ 1255 if ((ioflag & IO_RECURSE) == 0) 1256 bwillwrite(HAMMER2_PBUFSIZE); 1257 1258 /* 1259 * This nominally tells us how much we can cluster and 1260 * what the logical buffer size needs to be. Currently 1261 * we don't try to cluster the write and just handle one 1262 * block at a time. 1263 */ 1264 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 1265 &lbase, NULL); 1266 loff = (int)(uio->uio_offset - lbase); 1267 1268 KKASSERT(lblksize <= MAXBSIZE); 1269 1270 /* 1271 * Calculate bytes to copy this transfer and whether the 1272 * copy completely covers the buffer or not. 1273 */ 1274 trivial = 0; 1275 n = lblksize - loff; 1276 if (n > uio->uio_resid) { 1277 n = uio->uio_resid; 1278 if (loff == lbase && uio->uio_offset + n == new_eof) 1279 trivial = 1; 1280 endofblk = 0; 1281 } else { 1282 if (loff == 0) 1283 trivial = 1; 1284 endofblk = 1; 1285 } 1286 if (lbase >= new_eof) 1287 trivial = 1; 1288 trivial = 1; /* force trivial for makefs */ 1289 1290 /* 1291 * Get the buffer 1292 */ 1293 if (uio->uio_segflg == UIO_NOCOPY) { 1294 assert(0); /* no UIO_NOCOPY in makefs */ 1295 /* 1296 * Issuing a write with the same data backing the 1297 * buffer. Instantiate the buffer to collect the 1298 * backing vm pages, then read-in any missing bits. 1299 * 1300 * This case is used by vop_stdputpages(). 1301 */ 1302 bp = getblkx(ip->vp, lbase, lblksize, 1303 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1304 /* 1305 if ((bp->b_flags & B_CACHE) == 0) { 1306 bqrelse(bp); 1307 error = bread_kvabio(ip->vp, lbase, 1308 lblksize, &bp); 1309 } 1310 */ 1311 } else if (trivial) { 1312 /* 1313 * Even though we are entirely overwriting the buffer 1314 * we may still have to zero it out to avoid a 1315 * mmap/write visibility issue. 1316 */ 1317 bp = getblkx(ip->vp, lbase, lblksize, 1318 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1319 /* 1320 if ((bp->b_flags & B_CACHE) == 0) 1321 vfs_bio_clrbuf(bp); 1322 */ 1323 } else { 1324 assert(0); /* no partial write in makefs */ 1325 /* 1326 * Partial overwrite, read in any missing bits then 1327 * replace the portion being written. 1328 * 1329 * (The strategy code will detect zero-fill physical 1330 * blocks for this case). 1331 */ 1332 error = bread_kvabio(ip->vp, lbase, lblksize, &bp); 1333 if (error == 0) 1334 bheavy(bp); 1335 } 1336 1337 if (error) { 1338 brelse(bp); 1339 break; 1340 } 1341 1342 /* 1343 * Ok, copy the data in 1344 */ 1345 bkvasync(bp); 1346 error = uiomovebp(bp, bp->b_data + loff, n, uio); 1347 kflags |= NOTE_WRITE; 1348 modified = 1; 1349 if (error) { 1350 brelse(bp); 1351 break; 1352 } 1353 1354 /* 1355 * WARNING: Pageout daemon will issue UIO_NOCOPY writes 1356 * with IO_SYNC or IO_ASYNC set. These writes 1357 * must be handled as the pageout daemon expects. 1358 * 1359 * NOTE! H2 relies on cluster_write() here because it 1360 * cannot preallocate disk blocks at the logical 1361 * level due to not knowing what the compression 1362 * size will be at this time. 1363 * 1364 * We must use cluster_write() here and we depend 1365 * on the write-behind feature to flush buffers 1366 * appropriately. If we let the buffer daemons do 1367 * it the block allocations will be all over the 1368 * map. 1369 */ 1370 if (1) { 1371 bp->b_cmd = BUF_CMD_WRITE; 1372 1373 struct bio bio; 1374 bio.bio_buf = bp; 1375 bio.bio_offset = lbase; 1376 1377 struct vop_strategy_args ap; 1378 ap.a_vp = ip->vp; 1379 ap.a_bio = &bio; 1380 1381 error = hammer2_vop_strategy(&ap); 1382 assert(!error); 1383 1384 brelse(bp); 1385 } else if (ioflag & IO_SYNC) { 1386 assert(0); 1387 bwrite(bp); 1388 } else if ((ioflag & IO_DIRECT) && endofblk) { 1389 assert(0); 1390 bawrite(bp); 1391 } else if (ioflag & IO_ASYNC) { 1392 assert(0); 1393 bawrite(bp); 1394 } else if (0 /*ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW*/) { 1395 assert(0); 1396 bdwrite(bp); 1397 } else { 1398 assert(0); 1399 #if 0 1400 #if 1 1401 bp->b_flags |= B_CLUSTEROK; 1402 cluster_write(bp, new_eof, lblksize, seqcount); 1403 #else 1404 bp->b_flags |= B_CLUSTEROK; 1405 bdwrite(bp); 1406 #endif 1407 #endif 1408 } 1409 } 1410 1411 /* 1412 * Cleanup. If we extended the file EOF but failed to write through 1413 * the entire write is a failure and we have to back-up. 1414 */ 1415 if (error && new_eof != old_eof) { 1416 hammer2_mtx_unlock(&ip->truncate_lock); 1417 hammer2_mtx_ex(&ip->lock); /* note lock order */ 1418 hammer2_mtx_ex(&ip->truncate_lock); /* note lock order */ 1419 hammer2_truncate_file(ip, old_eof); 1420 if (ip->flags & HAMMER2_INODE_MODIFIED) 1421 hammer2_inode_chain_sync(ip); 1422 hammer2_mtx_unlock(&ip->lock); 1423 } else if (modified) { 1424 struct m_vnode *vp = ip->vp; 1425 1426 hammer2_mtx_ex(&ip->lock); 1427 hammer2_inode_modify(ip); 1428 if (uio->uio_segflg == UIO_NOCOPY) { 1429 assert(0); /* no UIO_NOCOPY in makefs */ 1430 /* 1431 if (vp->v_flag & VLASTWRITETS) { 1432 ip->meta.mtime = 1433 (unsigned long)vp->v_lastwrite_ts.tv_sec * 1434 1000000 + 1435 vp->v_lastwrite_ts.tv_nsec / 1000; 1436 } 1437 */ 1438 } else { 1439 hammer2_update_time(&ip->meta.mtime, true); 1440 vclrflags(vp, VLASTWRITETS); 1441 } 1442 1443 #if 0 1444 /* 1445 * REMOVED - handled by hammer2_extend_file(). Do not issue 1446 * a chain_sync() outside of a sync/fsync except for DIRECTDATA 1447 * state changes. 1448 * 1449 * Under normal conditions we only issue a chain_sync if 1450 * the inode's DIRECTDATA state changed. 1451 */ 1452 if (ip->flags & HAMMER2_INODE_RESIZED) 1453 hammer2_inode_chain_sync(ip); 1454 #endif 1455 hammer2_mtx_unlock(&ip->lock); 1456 hammer2_knote(ip->vp, kflags); 1457 } 1458 hammer2_trans_assert_strategy(ip->pmp); 1459 hammer2_mtx_unlock(&ip->truncate_lock); 1460 1461 return error; 1462 } 1463 1464 /* 1465 * Truncate the size of a file. The inode must be locked. 1466 * 1467 * We must unconditionally set HAMMER2_INODE_RESIZED to properly 1468 * ensure that any on-media data beyond the new file EOF has been destroyed. 1469 * 1470 * WARNING: nvtruncbuf() can only be safely called without the inode lock 1471 * held due to the way our write thread works. If the truncation 1472 * occurs in the middle of a buffer, nvtruncbuf() is responsible 1473 * for dirtying that buffer and zeroing out trailing bytes. 1474 * 1475 * WARNING! Assumes that the kernel interlocks size changes at the 1476 * vnode level. 1477 * 1478 * WARNING! Caller assumes responsibility for removing dead blocks 1479 * if INODE_RESIZED is set. 1480 */ 1481 static 1482 void 1483 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1484 { 1485 hammer2_key_t lbase; 1486 int nblksize; 1487 1488 hammer2_mtx_unlock(&ip->lock); 1489 if (ip->vp) { 1490 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1491 nvtruncbuf(ip->vp, nsize, 1492 nblksize, (int)nsize & (nblksize - 1), 1493 0); 1494 } 1495 hammer2_mtx_ex(&ip->lock); 1496 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1497 ip->osize = ip->meta.size; 1498 ip->meta.size = nsize; 1499 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1500 hammer2_inode_modify(ip); 1501 } 1502 1503 /* 1504 * Extend the size of a file. The inode must be locked. 1505 * 1506 * Even though the file size is changing, we do not have to set the 1507 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES 1508 * boundary. When this occurs a hammer2_inode_chain_sync() is required 1509 * to prepare the inode cluster's indirect block table, otherwise 1510 * async execution of the strategy code will implode on us. 1511 * 1512 * WARNING! Assumes that the kernel interlocks size changes at the 1513 * vnode level. 1514 * 1515 * WARNING! Caller assumes responsibility for transitioning out 1516 * of the inode DIRECTDATA mode if INODE_RESIZED is set. 1517 */ 1518 static 1519 void 1520 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1521 { 1522 hammer2_key_t lbase; 1523 hammer2_key_t osize; 1524 int oblksize; 1525 int nblksize; 1526 int error; 1527 1528 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1529 hammer2_inode_modify(ip); 1530 osize = ip->meta.size; 1531 ip->osize = osize; 1532 ip->meta.size = nsize; 1533 1534 /* 1535 * We must issue a chain_sync() when the DIRECTDATA state changes 1536 * to prevent confusion between the flush code and the in-memory 1537 * state. This is not perfect because we are doing it outside of 1538 * a sync/fsync operation, so it might not be fully synchronized 1539 * with the meta-data topology flush. 1540 * 1541 * We must retain and re-dirty the buffer cache buffer containing 1542 * the direct data so it can be written to a real block. It should 1543 * not be possible for a bread error to occur since the original data 1544 * is extracted from the inode structure directly. 1545 */ 1546 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) { 1547 if (osize) { 1548 assert(0); /* no such transition in makefs */ 1549 struct m_buf *bp; 1550 1551 oblksize = hammer2_calc_logical(ip, 0, NULL, NULL); 1552 error = bread_kvabio(ip->vp, 0, oblksize, &bp); 1553 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1554 hammer2_inode_chain_sync(ip); 1555 if (error == 0) { 1556 bheavy(bp); 1557 bdwrite(bp); 1558 } else { 1559 brelse(bp); 1560 } 1561 } else { 1562 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1563 hammer2_inode_chain_sync(ip); 1564 } 1565 } 1566 hammer2_mtx_unlock(&ip->lock); 1567 if (ip->vp) { 1568 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL); 1569 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1570 nvextendbuf(ip->vp, 1571 osize, nsize, 1572 oblksize, nblksize, 1573 -1, -1, 0); 1574 } 1575 hammer2_mtx_ex(&ip->lock); 1576 } 1577 1578 static 1579 int 1580 hammer2_vop_nresolve(struct vop_nresolve_args *ap) 1581 { 1582 hammer2_xop_nresolve_t *xop; 1583 hammer2_inode_t *ip; 1584 hammer2_inode_t *dip; 1585 struct namecache *ncp; 1586 struct m_vnode *vp; 1587 int error; 1588 1589 dip = VTOI(ap->a_dvp); 1590 xop = hammer2_xop_alloc(dip, 0); 1591 1592 ncp = ap->a_nch->ncp; 1593 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1594 1595 /* 1596 * Note: In DragonFly the kernel handles '.' and '..'. 1597 */ 1598 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1599 hammer2_xop_start(&xop->head, &hammer2_nresolve_desc); 1600 1601 error = hammer2_xop_collect(&xop->head, 0); 1602 error = hammer2_error_to_errno(error); 1603 if (error) { 1604 ip = NULL; 1605 } else { 1606 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 1607 } 1608 hammer2_inode_unlock(dip); 1609 1610 /* 1611 * Acquire the related vnode 1612 * 1613 * NOTE: For error processing, only ENOENT resolves the namecache 1614 * entry to NULL, otherwise we just return the error and 1615 * leave the namecache unresolved. 1616 * 1617 * WARNING: inode structure is locked exclusively via inode_get 1618 * but chain was locked shared. inode_unlock() 1619 * will handle it properly. 1620 */ 1621 if (ip) { 1622 vp = hammer2_igetv(ip, &error); /* error set to UNIX error */ 1623 if (error == 0) { 1624 vn_unlock(vp); 1625 cache_setvp(ap->a_nch, vp); 1626 *ap->a_vpp = vp; 1627 } else if (error == ENOENT) { 1628 cache_setvp(ap->a_nch, NULL); 1629 } 1630 hammer2_inode_unlock(ip); 1631 1632 /* 1633 * The vp should not be released until after we've disposed 1634 * of our locks, because it might cause vop_inactive() to 1635 * be called. 1636 */ 1637 if (vp) 1638 vrele(vp); 1639 } else { 1640 error = ENOENT; 1641 cache_setvp(ap->a_nch, NULL); 1642 } 1643 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1644 /* 1645 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL, 1646 ("resolve error %d/%p ap %p\n", 1647 error, ap->a_nch->ncp->nc_vp, ap)); 1648 */ 1649 1650 return error; 1651 } 1652 1653 int 1654 hammer2_nresolve(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen) 1655 { 1656 *vpp = NULL; 1657 struct namecache nc = { 1658 .nc_name = name, 1659 .nc_nlen = nlen, 1660 }; 1661 struct nchandle nch = { 1662 .ncp = &nc, 1663 }; 1664 struct vop_nresolve_args ap = { 1665 .a_nch = &nch, 1666 .a_dvp = dvp, 1667 .a_vpp = vpp, 1668 }; 1669 1670 return hammer2_vop_nresolve(&ap); 1671 } 1672 1673 static 1674 int 1675 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 1676 { 1677 #if 0 1678 hammer2_inode_t *dip; 1679 hammer2_tid_t inum; 1680 int error; 1681 1682 dip = VTOI(ap->a_dvp); 1683 inum = dip->meta.iparent; 1684 *ap->a_vpp = NULL; 1685 1686 if (inum) { 1687 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL, 1688 inum, ap->a_vpp); 1689 } else { 1690 error = ENOENT; 1691 } 1692 return error; 1693 #endif 1694 return (EOPNOTSUPP); 1695 } 1696 1697 static 1698 int 1699 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap) 1700 { 1701 hammer2_inode_t *dip; 1702 hammer2_inode_t *nip; 1703 struct namecache *ncp; 1704 const char *name; 1705 size_t name_len; 1706 hammer2_tid_t inum; 1707 int error; 1708 1709 dip = VTOI(ap->a_dvp); 1710 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1711 return (EROFS); 1712 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1713 return (ENOSPC); 1714 1715 ncp = ap->a_nch->ncp; 1716 name = ncp->nc_name; 1717 name_len = ncp->nc_nlen; 1718 1719 hammer2_trans_init(dip->pmp, 0); 1720 1721 inum = hammer2_trans_newinum(dip->pmp); 1722 1723 /* 1724 * Create the actual inode as a hidden file in the iroot, then 1725 * create the directory entry. The creation of the actual inode 1726 * sets its nlinks to 1 which is the value we desire. 1727 * 1728 * dip must be locked before nip to avoid deadlock. 1729 */ 1730 hammer2_inode_lock(dip, 0); 1731 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1732 inum, &error); 1733 if (error) { 1734 error = hammer2_error_to_errno(error); 1735 } else { 1736 error = hammer2_dirent_create(dip, name, name_len, 1737 nip->meta.inum, nip->meta.type); 1738 /* returns UNIX error code */ 1739 } 1740 if (error) { 1741 if (nip) { 1742 hammer2_inode_unlink_finisher(nip, NULL); 1743 hammer2_inode_unlock(nip); 1744 nip = NULL; 1745 } 1746 *ap->a_vpp = NULL; 1747 } else { 1748 /* 1749 * inode_depend() must occur before the igetv() because 1750 * the igetv() can temporarily release the inode lock. 1751 */ 1752 hammer2_inode_depend(dip, nip); /* before igetv */ 1753 *ap->a_vpp = hammer2_igetv(nip, &error); 1754 hammer2_inode_unlock(nip); 1755 } 1756 1757 /* 1758 * Update dip's mtime 1759 * 1760 * We can use a shared inode lock and allow the meta.mtime update 1761 * SMP race. hammer2_inode_modify() is MPSAFE w/a shared lock. 1762 */ 1763 if (error == 0) { 1764 uint64_t mtime; 1765 1766 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1767 hammer2_update_time(&mtime, true); 1768 hammer2_inode_modify(dip); 1769 dip->meta.mtime = mtime; 1770 /*hammer2_inode_unlock(dip);*/ 1771 } 1772 hammer2_inode_unlock(dip); 1773 1774 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1775 1776 if (error == 0) { 1777 cache_setunresolved(ap->a_nch); 1778 cache_setvp(ap->a_nch, *ap->a_vpp); 1779 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK); 1780 } 1781 return error; 1782 } 1783 1784 int 1785 hammer2_nmkdir(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen, 1786 mode_t mode) 1787 { 1788 struct namecache nc = { 1789 .nc_name = name, 1790 .nc_nlen = nlen, 1791 }; 1792 struct nchandle nch = { 1793 .ncp = &nc, 1794 }; 1795 uid_t va_uid = VNOVAL; //getuid(); 1796 uid_t va_gid = VNOVAL; //getgid(); 1797 struct vattr va = { 1798 .va_type = VDIR, 1799 .va_mode = mode & ~S_IFMT, 1800 .va_uid = va_uid, 1801 .va_gid = va_gid, 1802 }; 1803 struct vop_nmkdir_args ap = { 1804 .a_nch = &nch, 1805 .a_dvp = dvp, 1806 .a_vpp = vpp, 1807 .a_vap = &va, 1808 }; 1809 1810 return hammer2_vop_nmkdir(&ap); 1811 } 1812 1813 static 1814 int 1815 hammer2_vop_open(struct vop_open_args *ap) 1816 { 1817 #if 0 1818 return vop_stdopen(ap); 1819 #endif 1820 return (EOPNOTSUPP); 1821 } 1822 1823 /* 1824 * hammer2_vop_advlock { vp, id, op, fl, flags } 1825 */ 1826 static 1827 int 1828 hammer2_vop_advlock(struct vop_advlock_args *ap) 1829 { 1830 #if 0 1831 hammer2_inode_t *ip = VTOI(ap->a_vp); 1832 hammer2_off_t size; 1833 1834 size = ip->meta.size; 1835 return (lf_advlock(ap, &ip->advlock, size)); 1836 #endif 1837 return (EOPNOTSUPP); 1838 } 1839 1840 static 1841 int 1842 hammer2_vop_close(struct vop_close_args *ap) 1843 { 1844 #if 0 1845 return vop_stdclose(ap); 1846 #endif 1847 return (EOPNOTSUPP); 1848 } 1849 1850 /* 1851 * hammer2_vop_nlink { nch, dvp, vp, cred } 1852 * 1853 * Create a hardlink from (vp) to {dvp, nch}. 1854 */ 1855 static 1856 int 1857 hammer2_vop_nlink(struct vop_nlink_args *ap) 1858 { 1859 hammer2_inode_t *tdip; /* target directory to create link in */ 1860 hammer2_inode_t *ip; /* inode we are hardlinking to */ 1861 struct namecache *ncp; 1862 const char *name; 1863 size_t name_len; 1864 int error; 1865 uint64_t cmtime; 1866 1867 /* We know it's the same in makefs */ 1868 /* 1869 if (ap->a_dvp->v_mount != ap->a_vp->v_mount) 1870 return(EXDEV); 1871 */ 1872 1873 tdip = VTOI(ap->a_dvp); 1874 if (tdip->pmp->ronly || (tdip->pmp->flags & HAMMER2_PMPF_EMERG)) 1875 return (EROFS); 1876 if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1) 1877 return (ENOSPC); 1878 1879 ncp = ap->a_nch->ncp; 1880 name = ncp->nc_name; 1881 name_len = ncp->nc_nlen; 1882 1883 /* 1884 * ip represents the file being hardlinked. The file could be a 1885 * normal file or a hardlink target if it has already been hardlinked. 1886 * (with the new semantics, it will almost always be a hardlink 1887 * target). 1888 * 1889 * Bump nlinks and potentially also create or move the hardlink 1890 * target in the parent directory common to (ip) and (tdip). The 1891 * consolidation code can modify ip->cluster. The returned cluster 1892 * is locked. 1893 */ 1894 ip = VTOI(ap->a_vp); 1895 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp)); 1896 hammer2_trans_init(ip->pmp, 0); 1897 1898 /* 1899 * Target should be an indexed inode or there's no way we will ever 1900 * be able to find it! 1901 */ 1902 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0); 1903 1904 error = 0; 1905 1906 /* 1907 * Can return NULL and error == EXDEV if the common parent 1908 * crosses a directory with the xlink flag set. 1909 */ 1910 hammer2_inode_lock4(tdip, ip, NULL, NULL); 1911 1912 hammer2_update_time(&cmtime, true); 1913 1914 /* 1915 * Create the directory entry and bump nlinks. 1916 * Also update ip's ctime. 1917 */ 1918 if (error == 0) { 1919 error = hammer2_dirent_create(tdip, name, name_len, 1920 ip->meta.inum, ip->meta.type); 1921 hammer2_inode_modify(ip); 1922 ++ip->meta.nlinks; 1923 ip->meta.ctime = cmtime; 1924 } 1925 if (error == 0) { 1926 /* 1927 * Update dip's [cm]time 1928 */ 1929 hammer2_inode_modify(tdip); 1930 tdip->meta.mtime = cmtime; 1931 tdip->meta.ctime = cmtime; 1932 1933 cache_setunresolved(ap->a_nch); 1934 cache_setvp(ap->a_nch, ap->a_vp); 1935 } 1936 hammer2_inode_unlock(ip); 1937 hammer2_inode_unlock(tdip); 1938 1939 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 1940 hammer2_knote(ap->a_vp, NOTE_LINK); 1941 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1942 1943 return error; 1944 } 1945 1946 int 1947 hammer2_nlink(struct m_vnode *dvp, struct m_vnode *vp, char *name, int nlen) 1948 { 1949 struct namecache nc = { 1950 .nc_name = name, 1951 .nc_nlen = nlen, 1952 }; 1953 struct nchandle nch = { 1954 .ncp = &nc, 1955 }; 1956 struct vop_nlink_args ap = { 1957 .a_nch = &nch, 1958 .a_dvp = dvp, 1959 .a_vp = vp, 1960 }; 1961 1962 return hammer2_vop_nlink(&ap); 1963 } 1964 1965 /* 1966 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap } 1967 * 1968 * The operating system has already ensured that the directory entry 1969 * does not exist and done all appropriate namespace locking. 1970 */ 1971 static 1972 int 1973 hammer2_vop_ncreate(struct vop_ncreate_args *ap) 1974 { 1975 hammer2_inode_t *dip; 1976 hammer2_inode_t *nip; 1977 struct namecache *ncp; 1978 const char *name; 1979 size_t name_len; 1980 hammer2_tid_t inum; 1981 int error; 1982 1983 dip = VTOI(ap->a_dvp); 1984 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1985 return (EROFS); 1986 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1987 return (ENOSPC); 1988 1989 ncp = ap->a_nch->ncp; 1990 name = ncp->nc_name; 1991 name_len = ncp->nc_nlen; 1992 hammer2_trans_init(dip->pmp, 0); 1993 1994 inum = hammer2_trans_newinum(dip->pmp); 1995 1996 /* 1997 * Create the actual inode as a hidden file in the iroot, then 1998 * create the directory entry. The creation of the actual inode 1999 * sets its nlinks to 1 which is the value we desire. 2000 * 2001 * dip must be locked before nip to avoid deadlock. 2002 */ 2003 hammer2_inode_lock(dip, 0); 2004 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 2005 inum, &error); 2006 2007 if (error) { 2008 error = hammer2_error_to_errno(error); 2009 } else { 2010 error = hammer2_dirent_create(dip, name, name_len, 2011 nip->meta.inum, nip->meta.type); 2012 } 2013 if (error) { 2014 if (nip) { 2015 hammer2_inode_unlink_finisher(nip, NULL); 2016 hammer2_inode_unlock(nip); 2017 nip = NULL; 2018 } 2019 *ap->a_vpp = NULL; 2020 } else { 2021 hammer2_inode_depend(dip, nip); /* before igetv */ 2022 *ap->a_vpp = hammer2_igetv(nip, &error); 2023 hammer2_inode_unlock(nip); 2024 } 2025 2026 /* 2027 * Update dip's mtime 2028 */ 2029 if (error == 0) { 2030 uint64_t mtime; 2031 2032 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2033 hammer2_update_time(&mtime, true); 2034 hammer2_inode_modify(dip); 2035 dip->meta.mtime = mtime; 2036 /*hammer2_inode_unlock(dip);*/ 2037 } 2038 hammer2_inode_unlock(dip); 2039 2040 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2041 2042 if (error == 0) { 2043 cache_setunresolved(ap->a_nch); 2044 cache_setvp(ap->a_nch, *ap->a_vpp); 2045 hammer2_knote(ap->a_dvp, NOTE_WRITE); 2046 } 2047 return error; 2048 } 2049 2050 int 2051 hammer2_ncreate(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen, 2052 mode_t mode) 2053 { 2054 struct namecache nc = { 2055 .nc_name = name, 2056 .nc_nlen = nlen, 2057 }; 2058 struct nchandle nch = { 2059 .ncp = &nc, 2060 }; 2061 uid_t va_uid = VNOVAL; //getuid(); 2062 uid_t va_gid = VNOVAL; //getgid(); 2063 struct vattr va = { 2064 .va_type = VREG, 2065 .va_mode = mode & ~S_IFMT, 2066 .va_uid = va_uid, 2067 .va_gid = va_gid, 2068 }; 2069 struct vop_ncreate_args ap = { 2070 .a_nch = &nch, 2071 .a_dvp = dvp, 2072 .a_vpp = vpp, 2073 .a_vap = &va, 2074 }; 2075 2076 return hammer2_vop_ncreate(&ap); 2077 } 2078 2079 /* 2080 * Make a device node (typically a fifo) 2081 */ 2082 static 2083 int 2084 hammer2_vop_nmknod(struct vop_nmknod_args *ap) 2085 { 2086 hammer2_inode_t *dip; 2087 hammer2_inode_t *nip; 2088 struct namecache *ncp; 2089 const char *name; 2090 size_t name_len; 2091 hammer2_tid_t inum; 2092 int error; 2093 2094 dip = VTOI(ap->a_dvp); 2095 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 2096 return (EROFS); 2097 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2098 return (ENOSPC); 2099 2100 ncp = ap->a_nch->ncp; 2101 name = ncp->nc_name; 2102 name_len = ncp->nc_nlen; 2103 hammer2_trans_init(dip->pmp, 0); 2104 2105 /* 2106 * Create the device inode and then create the directory entry. 2107 * 2108 * dip must be locked before nip to avoid deadlock. 2109 */ 2110 inum = hammer2_trans_newinum(dip->pmp); 2111 2112 hammer2_inode_lock(dip, 0); 2113 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 2114 inum, &error); 2115 if (error == 0) { 2116 error = hammer2_dirent_create(dip, name, name_len, 2117 nip->meta.inum, nip->meta.type); 2118 } 2119 if (error) { 2120 if (nip) { 2121 hammer2_inode_unlink_finisher(nip, NULL); 2122 hammer2_inode_unlock(nip); 2123 nip = NULL; 2124 } 2125 *ap->a_vpp = NULL; 2126 } else { 2127 hammer2_inode_depend(dip, nip); /* before igetv */ 2128 *ap->a_vpp = hammer2_igetv(nip, &error); 2129 hammer2_inode_unlock(nip); 2130 } 2131 2132 /* 2133 * Update dip's mtime 2134 */ 2135 if (error == 0) { 2136 uint64_t mtime; 2137 2138 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2139 hammer2_update_time(&mtime, true); 2140 hammer2_inode_modify(dip); 2141 dip->meta.mtime = mtime; 2142 /*hammer2_inode_unlock(dip);*/ 2143 } 2144 hammer2_inode_unlock(dip); 2145 2146 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2147 2148 if (error == 0) { 2149 cache_setunresolved(ap->a_nch); 2150 cache_setvp(ap->a_nch, *ap->a_vpp); 2151 hammer2_knote(ap->a_dvp, NOTE_WRITE); 2152 } 2153 return error; 2154 } 2155 2156 int 2157 hammer2_nmknod(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen, 2158 int type, mode_t mode) 2159 { 2160 struct namecache nc = { 2161 .nc_name = name, 2162 .nc_nlen = nlen, 2163 }; 2164 struct nchandle nch = { 2165 .ncp = &nc, 2166 }; 2167 uid_t va_uid = VNOVAL; //getuid(); 2168 uid_t va_gid = VNOVAL; //getgid(); 2169 struct vattr va = { 2170 .va_type = type, 2171 .va_mode = mode & ~S_IFMT, 2172 .va_uid = va_uid, 2173 .va_gid = va_gid, 2174 }; 2175 struct vop_nmknod_args ap = { 2176 .a_nch = &nch, 2177 .a_dvp = dvp, 2178 .a_vpp = vpp, 2179 .a_vap = &va, 2180 }; 2181 2182 return hammer2_vop_nmknod(&ap); 2183 } 2184 2185 /* 2186 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target } 2187 */ 2188 static 2189 int 2190 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap) 2191 { 2192 hammer2_inode_t *dip; 2193 hammer2_inode_t *nip; 2194 struct namecache *ncp; 2195 const char *name; 2196 size_t name_len; 2197 hammer2_tid_t inum; 2198 int error; 2199 2200 dip = VTOI(ap->a_dvp); 2201 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 2202 return (EROFS); 2203 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2204 return (ENOSPC); 2205 2206 ncp = ap->a_nch->ncp; 2207 name = ncp->nc_name; 2208 name_len = ncp->nc_nlen; 2209 hammer2_trans_init(dip->pmp, 0); 2210 2211 ap->a_vap->va_type = VLNK; /* enforce type */ 2212 2213 /* 2214 * Create the softlink as an inode and then create the directory 2215 * entry. 2216 * 2217 * dip must be locked before nip to avoid deadlock. 2218 */ 2219 inum = hammer2_trans_newinum(dip->pmp); 2220 2221 hammer2_inode_lock(dip, 0); 2222 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 2223 inum, &error); 2224 if (error == 0) { 2225 error = hammer2_dirent_create(dip, name, name_len, 2226 nip->meta.inum, nip->meta.type); 2227 } 2228 if (error) { 2229 if (nip) { 2230 hammer2_inode_unlink_finisher(nip, NULL); 2231 hammer2_inode_unlock(nip); 2232 nip = NULL; 2233 } 2234 *ap->a_vpp = NULL; 2235 hammer2_inode_unlock(dip); 2236 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2237 return error; 2238 } 2239 hammer2_inode_depend(dip, nip); /* before igetv */ 2240 *ap->a_vpp = hammer2_igetv(nip, &error); 2241 2242 /* 2243 * Build the softlink (~like file data) and finalize the namecache. 2244 */ 2245 if (error == 0) { 2246 size_t bytes; 2247 struct uio auio; 2248 struct iovec aiov; 2249 2250 bytes = strlen(ap->a_target); 2251 2252 hammer2_inode_unlock(nip); 2253 bzero(&auio, sizeof(auio)); 2254 bzero(&aiov, sizeof(aiov)); 2255 auio.uio_iov = &aiov; 2256 auio.uio_segflg = UIO_SYSSPACE; 2257 auio.uio_rw = UIO_WRITE; 2258 auio.uio_resid = bytes; 2259 auio.uio_iovcnt = 1; 2260 auio.uio_td = curthread; 2261 aiov.iov_base = ap->a_target; 2262 aiov.iov_len = bytes; 2263 error = hammer2_write_file(nip, &auio, IO_APPEND, 0); 2264 /* XXX handle error */ 2265 error = 0; 2266 } else { 2267 hammer2_inode_unlock(nip); 2268 } 2269 2270 /* 2271 * Update dip's mtime 2272 */ 2273 if (error == 0) { 2274 uint64_t mtime; 2275 2276 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2277 hammer2_update_time(&mtime, true); 2278 hammer2_inode_modify(dip); 2279 dip->meta.mtime = mtime; 2280 /*hammer2_inode_unlock(dip);*/ 2281 } 2282 hammer2_inode_unlock(dip); 2283 2284 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2285 2286 /* 2287 * Finalize namecache 2288 */ 2289 if (error == 0) { 2290 cache_setunresolved(ap->a_nch); 2291 cache_setvp(ap->a_nch, *ap->a_vpp); 2292 hammer2_knote(ap->a_dvp, NOTE_WRITE); 2293 } 2294 return error; 2295 } 2296 2297 int 2298 hammer2_nsymlink(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen, 2299 char *target, mode_t mode) 2300 { 2301 struct namecache nc = { 2302 .nc_name = name, 2303 .nc_nlen = nlen, 2304 }; 2305 struct nchandle nch = { 2306 .ncp = &nc, 2307 }; 2308 uid_t va_uid = VNOVAL; //getuid(); 2309 uid_t va_gid = VNOVAL; //getgid(); 2310 struct vattr va = { 2311 .va_type = VDIR, 2312 .va_mode = mode & ~S_IFMT, 2313 .va_uid = va_uid, 2314 .va_gid = va_gid, 2315 }; 2316 struct vop_nsymlink_args ap = { 2317 .a_nch = &nch, 2318 .a_dvp = dvp, 2319 .a_vpp = vpp, 2320 .a_vap = &va, 2321 .a_target = target, 2322 }; 2323 2324 return hammer2_vop_nsymlink(&ap); 2325 } 2326 2327 /* 2328 * hammer2_vop_nremove { nch, dvp, cred } 2329 */ 2330 static 2331 int 2332 hammer2_vop_nremove(struct vop_nremove_args *ap) 2333 { 2334 #if 0 2335 hammer2_xop_unlink_t *xop; 2336 hammer2_inode_t *dip; 2337 hammer2_inode_t *ip; 2338 struct m_vnode *vprecycle; 2339 struct namecache *ncp; 2340 int error; 2341 2342 dip = VTOI(ap->a_dvp); 2343 if (dip->pmp->ronly) 2344 return (EROFS); 2345 #if 0 2346 /* allow removals, except user to also bulkfree */ 2347 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2348 return (ENOSPC); 2349 #endif 2350 2351 ncp = ap->a_nch->ncp; 2352 2353 if (hammer2_debug_inode && dip->meta.inum == hammer2_debug_inode) { 2354 kprintf("hammer2: attempt to delete inside debug inode: %s\n", 2355 ncp->nc_name); 2356 while (hammer2_debug_inode && 2357 dip->meta.inum == hammer2_debug_inode) { 2358 tsleep(&hammer2_debug_inode, 0, "h2debug", hz*5); 2359 } 2360 } 2361 2362 hammer2_trans_init(dip->pmp, 0); 2363 hammer2_inode_lock(dip, 0); 2364 2365 /* 2366 * The unlink XOP unlinks the path from the directory and 2367 * locates and returns the cluster associated with the real inode. 2368 * We have to handle nlinks here on the frontend. 2369 */ 2370 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 2371 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 2372 2373 xop->isdir = 0; 2374 xop->dopermanent = 0; 2375 hammer2_xop_start(&xop->head, &hammer2_unlink_desc); 2376 2377 /* 2378 * Collect the real inode and adjust nlinks, destroy the real 2379 * inode if nlinks transitions to 0 and it was the real inode 2380 * (else it has already been removed). 2381 */ 2382 error = hammer2_xop_collect(&xop->head, 0); 2383 error = hammer2_error_to_errno(error); 2384 vprecycle = NULL; 2385 2386 if (error == 0) { 2387 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 2388 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2389 if (ip) { 2390 if (hammer2_debug_inode && 2391 ip->meta.inum == hammer2_debug_inode) { 2392 kprintf("hammer2: attempt to delete debug " 2393 "inode!\n"); 2394 while (hammer2_debug_inode && 2395 ip->meta.inum == hammer2_debug_inode) { 2396 tsleep(&hammer2_debug_inode, 0, 2397 "h2debug", hz*5); 2398 } 2399 } 2400 hammer2_inode_unlink_finisher(ip, &vprecycle); 2401 hammer2_inode_depend(dip, ip); /* after modified */ 2402 hammer2_inode_unlock(ip); 2403 } 2404 } else { 2405 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2406 } 2407 2408 /* 2409 * Update dip's mtime 2410 */ 2411 if (error == 0) { 2412 uint64_t mtime; 2413 2414 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2415 hammer2_update_time(&mtime); 2416 hammer2_inode_modify(dip); 2417 dip->meta.mtime = mtime; 2418 /*hammer2_inode_unlock(dip);*/ 2419 } 2420 hammer2_inode_unlock(dip); 2421 2422 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2423 if (error == 0) { 2424 cache_unlink(ap->a_nch); 2425 hammer2_knote(ap->a_dvp, NOTE_WRITE); 2426 } 2427 if (vprecycle) 2428 hammer2_inode_vprecycle(vprecycle); 2429 2430 return (error); 2431 #endif 2432 return (EOPNOTSUPP); 2433 } 2434 2435 /* 2436 * hammer2_vop_nrmdir { nch, dvp, cred } 2437 */ 2438 static 2439 int 2440 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap) 2441 { 2442 #if 0 2443 hammer2_xop_unlink_t *xop; 2444 hammer2_inode_t *dip; 2445 hammer2_inode_t *ip; 2446 struct namecache *ncp; 2447 struct m_vnode *vprecycle; 2448 int error; 2449 2450 dip = VTOI(ap->a_dvp); 2451 if (dip->pmp->ronly) 2452 return (EROFS); 2453 #if 0 2454 /* allow removals, except user to also bulkfree */ 2455 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2456 return (ENOSPC); 2457 #endif 2458 2459 hammer2_trans_init(dip->pmp, 0); 2460 hammer2_inode_lock(dip, 0); 2461 2462 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 2463 2464 ncp = ap->a_nch->ncp; 2465 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 2466 xop->isdir = 1; 2467 xop->dopermanent = 0; 2468 hammer2_xop_start(&xop->head, &hammer2_unlink_desc); 2469 2470 /* 2471 * Collect the real inode and adjust nlinks, destroy the real 2472 * inode if nlinks transitions to 0 and it was the real inode 2473 * (else it has already been removed). 2474 */ 2475 error = hammer2_xop_collect(&xop->head, 0); 2476 error = hammer2_error_to_errno(error); 2477 vprecycle = NULL; 2478 2479 if (error == 0) { 2480 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 2481 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2482 if (ip) { 2483 hammer2_inode_unlink_finisher(ip, &vprecycle); 2484 hammer2_inode_depend(dip, ip); /* after modified */ 2485 hammer2_inode_unlock(ip); 2486 } 2487 } else { 2488 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2489 } 2490 2491 /* 2492 * Update dip's mtime 2493 */ 2494 if (error == 0) { 2495 uint64_t mtime; 2496 2497 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2498 hammer2_update_time(&mtime); 2499 hammer2_inode_modify(dip); 2500 dip->meta.mtime = mtime; 2501 /*hammer2_inode_unlock(dip);*/ 2502 } 2503 hammer2_inode_unlock(dip); 2504 2505 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2506 if (error == 0) { 2507 cache_unlink(ap->a_nch); 2508 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK); 2509 } 2510 if (vprecycle) 2511 hammer2_inode_vprecycle(vprecycle); 2512 return (error); 2513 #endif 2514 return (EOPNOTSUPP); 2515 } 2516 2517 /* 2518 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred } 2519 */ 2520 static 2521 int 2522 hammer2_vop_nrename(struct vop_nrename_args *ap) 2523 { 2524 #if 0 2525 struct namecache *fncp; 2526 struct namecache *tncp; 2527 hammer2_inode_t *fdip; /* source directory */ 2528 hammer2_inode_t *tdip; /* target directory */ 2529 hammer2_inode_t *ip; /* file being renamed */ 2530 hammer2_inode_t *tip; /* replaced target during rename or NULL */ 2531 struct m_vnode *vprecycle; 2532 const char *fname; 2533 size_t fname_len; 2534 const char *tname; 2535 size_t tname_len; 2536 int error; 2537 int update_tdip; 2538 int update_fdip; 2539 hammer2_key_t tlhc; 2540 2541 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount) 2542 return(EXDEV); 2543 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount) 2544 return(EXDEV); 2545 2546 fdip = VTOI(ap->a_fdvp); /* source directory */ 2547 tdip = VTOI(ap->a_tdvp); /* target directory */ 2548 2549 if (fdip->pmp->ronly || (fdip->pmp->flags & HAMMER2_PMPF_EMERG)) 2550 return (EROFS); 2551 if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1) 2552 return (ENOSPC); 2553 2554 fncp = ap->a_fnch->ncp; /* entry name in source */ 2555 fname = fncp->nc_name; 2556 fname_len = fncp->nc_nlen; 2557 2558 tncp = ap->a_tnch->ncp; /* entry name in target */ 2559 tname = tncp->nc_name; 2560 tname_len = tncp->nc_nlen; 2561 2562 hammer2_trans_init(tdip->pmp, 0); 2563 2564 update_tdip = 0; 2565 update_fdip = 0; 2566 2567 ip = VTOI(fncp->nc_vp); 2568 hammer2_inode_ref(ip); /* extra ref */ 2569 2570 /* 2571 * Lookup the target name to determine if a directory entry 2572 * is being overwritten. We only hold related inode locks 2573 * temporarily, the operating system is expected to protect 2574 * against rename races. 2575 */ 2576 tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL; 2577 if (tip) 2578 hammer2_inode_ref(tip); /* extra ref */ 2579 2580 /* 2581 * Can return NULL and error == EXDEV if the common parent 2582 * crosses a directory with the xlink flag set. 2583 * 2584 * For now try to avoid deadlocks with a simple pointer address 2585 * test. (tip) can be NULL. 2586 */ 2587 error = 0; 2588 { 2589 hammer2_inode_t *ip1 = fdip; 2590 hammer2_inode_t *ip2 = tdip; 2591 hammer2_inode_t *ip3 = ip; 2592 hammer2_inode_t *ip4 = tip; /* may be NULL */ 2593 2594 if (fdip > tdip) { 2595 ip1 = tdip; 2596 ip2 = fdip; 2597 } 2598 if (tip && ip > tip) { 2599 ip3 = tip; 2600 ip4 = ip; 2601 } 2602 hammer2_inode_lock4(ip1, ip2, ip3, ip4); 2603 } 2604 2605 /* 2606 * Resolve the collision space for (tdip, tname, tname_len) 2607 * 2608 * tdip must be held exclusively locked to prevent races since 2609 * multiple filenames can end up in the same collision space. 2610 */ 2611 { 2612 hammer2_xop_scanlhc_t *sxop; 2613 hammer2_tid_t lhcbase; 2614 2615 tlhc = hammer2_dirhash(tname, tname_len); 2616 lhcbase = tlhc; 2617 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 2618 sxop->lhc = tlhc; 2619 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 2620 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 2621 if (tlhc != sxop->head.cluster.focus->bref.key) 2622 break; 2623 ++tlhc; 2624 } 2625 error = hammer2_error_to_errno(error); 2626 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 2627 2628 if (error) { 2629 if (error != ENOENT) 2630 goto done2; 2631 ++tlhc; 2632 error = 0; 2633 } 2634 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) { 2635 error = ENOSPC; 2636 goto done2; 2637 } 2638 } 2639 2640 /* 2641 * Ready to go, issue the rename to the backend. Note that meta-data 2642 * updates to the related inodes occur separately from the rename 2643 * operation. 2644 * 2645 * NOTE: While it is not necessary to update ip->meta.name*, doing 2646 * so aids catastrophic recovery and debugging. 2647 */ 2648 if (error == 0) { 2649 hammer2_xop_nrename_t *xop4; 2650 2651 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 2652 xop4->lhc = tlhc; 2653 xop4->ip_key = ip->meta.name_key; 2654 hammer2_xop_setip2(&xop4->head, ip); 2655 hammer2_xop_setip3(&xop4->head, tdip); 2656 if (tip && tip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) 2657 hammer2_xop_setip4(&xop4->head, tip); 2658 hammer2_xop_setname(&xop4->head, fname, fname_len); 2659 hammer2_xop_setname2(&xop4->head, tname, tname_len); 2660 hammer2_xop_start(&xop4->head, &hammer2_nrename_desc); 2661 2662 error = hammer2_xop_collect(&xop4->head, 0); 2663 error = hammer2_error_to_errno(error); 2664 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP); 2665 2666 if (error == ENOENT) 2667 error = 0; 2668 2669 /* 2670 * Update inode meta-data. 2671 * 2672 * WARNING! The in-memory inode (ip) structure does not 2673 * maintain a copy of the inode's filename buffer. 2674 */ 2675 if (error == 0 && 2676 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 2677 hammer2_inode_modify(ip); 2678 ip->meta.name_len = tname_len; 2679 ip->meta.name_key = tlhc; 2680 } 2681 if (error == 0) { 2682 hammer2_inode_modify(ip); 2683 ip->meta.iparent = tdip->meta.inum; 2684 } 2685 update_fdip = 1; 2686 update_tdip = 1; 2687 } 2688 2689 done2: 2690 /* 2691 * If no error, the backend has replaced the target directory entry. 2692 * We must adjust nlinks on the original replace target if it exists. 2693 */ 2694 vprecycle = NULL; 2695 if (error == 0 && tip) { 2696 hammer2_inode_unlink_finisher(tip, &vprecycle); 2697 } 2698 2699 /* 2700 * Update directory mtimes to represent the something changed. 2701 */ 2702 if (update_fdip || update_tdip) { 2703 uint64_t mtime; 2704 2705 hammer2_update_time(&mtime); 2706 if (update_fdip) { 2707 hammer2_inode_modify(fdip); 2708 fdip->meta.mtime = mtime; 2709 } 2710 if (update_tdip) { 2711 hammer2_inode_modify(tdip); 2712 tdip->meta.mtime = mtime; 2713 } 2714 } 2715 if (tip) { 2716 hammer2_inode_unlock(tip); 2717 hammer2_inode_drop(tip); 2718 } 2719 hammer2_inode_unlock(ip); 2720 hammer2_inode_unlock(tdip); 2721 hammer2_inode_unlock(fdip); 2722 hammer2_inode_drop(ip); 2723 hammer2_trans_done(tdip->pmp, HAMMER2_TRANS_SIDEQ); 2724 2725 /* 2726 * Issue the namecache update after unlocking all the internal 2727 * hammer2 structures, otherwise we might deadlock. 2728 * 2729 * WARNING! The target namespace must be updated atomically, 2730 * and we depend on cache_rename() to handle that for 2731 * us. Do not do a separate cache_unlink() because 2732 * that leaves a small window of opportunity for other 2733 * threads to allocate the target namespace before we 2734 * manage to complete our rename. 2735 * 2736 * WARNING! cache_rename() (and cache_unlink()) will properly 2737 * set VREF_FINALIZE on any attached vnode. Do not 2738 * call cache_setunresolved() manually before-hand as 2739 * this will prevent the flag from being set later via 2740 * cache_rename(). If VREF_FINALIZE is not properly set 2741 * and the inode is no longer in the topology, related 2742 * chains can remain dirty indefinitely. 2743 */ 2744 if (error == 0 && tip) { 2745 /*cache_unlink(ap->a_tnch); see above */ 2746 /*cache_setunresolved(ap->a_tnch); see above */ 2747 } 2748 if (error == 0) { 2749 cache_rename(ap->a_fnch, ap->a_tnch); 2750 hammer2_knote(ap->a_fdvp, NOTE_WRITE); 2751 hammer2_knote(ap->a_tdvp, NOTE_WRITE); 2752 hammer2_knote(fncp->nc_vp, NOTE_RENAME); 2753 } 2754 if (vprecycle) 2755 hammer2_inode_vprecycle(vprecycle); 2756 2757 return (error); 2758 #endif 2759 return (EOPNOTSUPP); 2760 } 2761 2762 /* 2763 * hammer2_vop_ioctl { vp, command, data, fflag, cred } 2764 */ 2765 static 2766 int 2767 hammer2_vop_ioctl(struct vop_ioctl_args *ap) 2768 { 2769 #if 0 2770 hammer2_inode_t *ip; 2771 int error; 2772 2773 ip = VTOI(ap->a_vp); 2774 2775 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data, 2776 ap->a_fflag, ap->a_cred); 2777 return (error); 2778 #endif 2779 return (EOPNOTSUPP); 2780 } 2781 2782 static 2783 int 2784 hammer2_vop_mountctl(struct vop_mountctl_args *ap) 2785 { 2786 #if 0 2787 struct mount *mp; 2788 hammer2_pfs_t *pmp; 2789 int rc; 2790 2791 switch (ap->a_op) { 2792 case (MOUNTCTL_SET_EXPORT): 2793 mp = ap->a_head.a_ops->head.vv_mount; 2794 pmp = MPTOPMP(mp); 2795 2796 if (ap->a_ctllen != sizeof(struct export_args)) 2797 rc = (EINVAL); 2798 else 2799 rc = vfs_export(mp, &pmp->export, 2800 (const struct export_args *)ap->a_ctl); 2801 break; 2802 default: 2803 rc = vop_stdmountctl(ap); 2804 break; 2805 } 2806 return (rc); 2807 #endif 2808 return (EOPNOTSUPP); 2809 } 2810 2811 /* 2812 * KQFILTER 2813 */ 2814 /* 2815 static void filt_hammer2detach(struct knote *kn); 2816 static int filt_hammer2read(struct knote *kn, long hint); 2817 static int filt_hammer2write(struct knote *kn, long hint); 2818 static int filt_hammer2vnode(struct knote *kn, long hint); 2819 2820 static struct filterops hammer2read_filtops = 2821 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2822 NULL, filt_hammer2detach, filt_hammer2read }; 2823 static struct filterops hammer2write_filtops = 2824 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2825 NULL, filt_hammer2detach, filt_hammer2write }; 2826 static struct filterops hammer2vnode_filtops = 2827 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2828 NULL, filt_hammer2detach, filt_hammer2vnode }; 2829 */ 2830 2831 static 2832 int 2833 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap) 2834 { 2835 #if 0 2836 struct m_vnode *vp = ap->a_vp; 2837 struct knote *kn = ap->a_kn; 2838 2839 switch (kn->kn_filter) { 2840 case EVFILT_READ: 2841 kn->kn_fop = &hammer2read_filtops; 2842 break; 2843 case EVFILT_WRITE: 2844 kn->kn_fop = &hammer2write_filtops; 2845 break; 2846 case EVFILT_VNODE: 2847 kn->kn_fop = &hammer2vnode_filtops; 2848 break; 2849 default: 2850 return (EOPNOTSUPP); 2851 } 2852 2853 kn->kn_hook = (caddr_t)vp; 2854 2855 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2856 2857 return(0); 2858 #endif 2859 return (EOPNOTSUPP); 2860 } 2861 2862 #if 0 2863 static void 2864 filt_hammer2detach(struct knote *kn) 2865 { 2866 struct m_vnode *vp = (void *)kn->kn_hook; 2867 2868 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2869 } 2870 2871 static int 2872 filt_hammer2read(struct knote *kn, long hint) 2873 { 2874 struct m_vnode *vp = (void *)kn->kn_hook; 2875 hammer2_inode_t *ip = VTOI(vp); 2876 off_t off; 2877 2878 if (hint == NOTE_REVOKE) { 2879 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2880 return(1); 2881 } 2882 off = ip->meta.size - kn->kn_fp->f_offset; 2883 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 2884 if (kn->kn_sfflags & NOTE_OLDAPI) 2885 return(1); 2886 return (kn->kn_data != 0); 2887 } 2888 2889 2890 static int 2891 filt_hammer2write(struct knote *kn, long hint) 2892 { 2893 if (hint == NOTE_REVOKE) 2894 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2895 kn->kn_data = 0; 2896 return (1); 2897 } 2898 2899 static int 2900 filt_hammer2vnode(struct knote *kn, long hint) 2901 { 2902 if (kn->kn_sfflags & hint) 2903 kn->kn_fflags |= hint; 2904 if (hint == NOTE_REVOKE) { 2905 kn->kn_flags |= (EV_EOF | EV_NODATA); 2906 return (1); 2907 } 2908 return (kn->kn_fflags != 0); 2909 } 2910 #endif 2911 2912 /* 2913 * FIFO VOPS 2914 */ 2915 static 2916 int 2917 hammer2_vop_markatime(struct vop_markatime_args *ap) 2918 { 2919 #if 0 2920 hammer2_inode_t *ip; 2921 struct m_vnode *vp; 2922 2923 vp = ap->a_vp; 2924 ip = VTOI(vp); 2925 2926 if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG)) 2927 return (EROFS); 2928 return(0); 2929 #endif 2930 return (EOPNOTSUPP); 2931 } 2932 2933 static 2934 int 2935 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap) 2936 { 2937 #if 0 2938 int error; 2939 2940 error = VOCALL(&fifo_vnode_vops, &ap->a_head); 2941 if (error) 2942 error = hammer2_vop_kqfilter(ap); 2943 return(error); 2944 #endif 2945 return (EOPNOTSUPP); 2946 } 2947 2948 /* 2949 * VOPS vector 2950 */ 2951 struct vop_ops hammer2_vnode_vops = { 2952 .vop_default = vop_defaultop, 2953 .vop_fsync = hammer2_vop_fsync, 2954 .vop_getpages = vop_stdgetpages, 2955 .vop_putpages = vop_stdputpages, 2956 .vop_access = hammer2_vop_access, 2957 .vop_advlock = hammer2_vop_advlock, 2958 .vop_close = hammer2_vop_close, 2959 .vop_nlink = hammer2_vop_nlink, 2960 .vop_ncreate = hammer2_vop_ncreate, 2961 .vop_nsymlink = hammer2_vop_nsymlink, 2962 .vop_nremove = hammer2_vop_nremove, 2963 .vop_nrmdir = hammer2_vop_nrmdir, 2964 .vop_nrename = hammer2_vop_nrename, 2965 .vop_getattr = hammer2_vop_getattr, 2966 .vop_getattr_lite = hammer2_vop_getattr_lite, 2967 .vop_setattr = hammer2_vop_setattr, 2968 .vop_readdir = hammer2_vop_readdir, 2969 .vop_readlink = hammer2_vop_readlink, 2970 .vop_read = hammer2_vop_read, 2971 .vop_write = hammer2_vop_write, 2972 .vop_open = hammer2_vop_open, 2973 .vop_inactive = hammer2_vop_inactive, 2974 .vop_reclaim = hammer2_vop_reclaim, 2975 .vop_nresolve = hammer2_vop_nresolve, 2976 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot, 2977 .vop_nmkdir = hammer2_vop_nmkdir, 2978 .vop_nmknod = hammer2_vop_nmknod, 2979 .vop_ioctl = hammer2_vop_ioctl, 2980 .vop_mountctl = hammer2_vop_mountctl, 2981 .vop_bmap = hammer2_vop_bmap, 2982 .vop_strategy = hammer2_vop_strategy, 2983 .vop_kqfilter = hammer2_vop_kqfilter 2984 }; 2985 2986 struct vop_ops hammer2_spec_vops = { 2987 .vop_default = vop_defaultop, 2988 .vop_fsync = hammer2_vop_fsync, 2989 .vop_read = vop_stdnoread, 2990 .vop_write = vop_stdnowrite, 2991 .vop_access = hammer2_vop_access, 2992 .vop_close = hammer2_vop_close, 2993 .vop_markatime = hammer2_vop_markatime, 2994 .vop_getattr = hammer2_vop_getattr, 2995 .vop_inactive = hammer2_vop_inactive, 2996 .vop_reclaim = hammer2_vop_reclaim, 2997 .vop_setattr = hammer2_vop_setattr 2998 }; 2999 3000 struct vop_ops hammer2_fifo_vops = { 3001 .vop_default = fifo_vnoperate, 3002 .vop_fsync = hammer2_vop_fsync, 3003 #if 0 3004 .vop_read = hammer2_vop_fiforead, 3005 .vop_write = hammer2_vop_fifowrite, 3006 #endif 3007 .vop_access = hammer2_vop_access, 3008 #if 0 3009 .vop_close = hammer2_vop_fifoclose, 3010 #endif 3011 .vop_markatime = hammer2_vop_markatime, 3012 .vop_getattr = hammer2_vop_getattr, 3013 .vop_inactive = hammer2_vop_inactive, 3014 .vop_reclaim = hammer2_vop_reclaim, 3015 .vop_setattr = hammer2_vop_setattr, 3016 .vop_kqfilter = hammer2_vop_fifokqfilter 3017 }; 3018 3019