1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/mountctl.h> 36 #include <sys/namecache.h> 37 #include <sys/buf2.h> 38 #include <vfs/fifofs/fifo.h> 39 40 #include "hammer.h" 41 42 /* 43 * USERFS VNOPS 44 */ 45 static int hammer_vop_fsync(struct vop_fsync_args *); 46 static int hammer_vop_read(struct vop_read_args *); 47 static int hammer_vop_write(struct vop_write_args *); 48 static int hammer_vop_access(struct vop_access_args *); 49 static int hammer_vop_advlock(struct vop_advlock_args *); 50 static int hammer_vop_close(struct vop_close_args *); 51 static int hammer_vop_ncreate(struct vop_ncreate_args *); 52 static int hammer_vop_getattr(struct vop_getattr_args *); 53 static int hammer_vop_nresolve(struct vop_nresolve_args *); 54 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *); 55 static int hammer_vop_nlink(struct vop_nlink_args *); 56 static int hammer_vop_nmkdir(struct vop_nmkdir_args *); 57 static int hammer_vop_nmknod(struct vop_nmknod_args *); 58 static int hammer_vop_open(struct vop_open_args *); 59 static int hammer_vop_print(struct vop_print_args *); 60 static int hammer_vop_readdir(struct vop_readdir_args *); 61 static int hammer_vop_readlink(struct vop_readlink_args *); 62 static int hammer_vop_nremove(struct vop_nremove_args *); 63 static int hammer_vop_nrename(struct vop_nrename_args *); 64 static int hammer_vop_nrmdir(struct vop_nrmdir_args *); 65 static int hammer_vop_markatime(struct vop_markatime_args *); 66 static int hammer_vop_setattr(struct vop_setattr_args *); 67 static int hammer_vop_strategy(struct vop_strategy_args *); 68 static int hammer_vop_bmap(struct vop_bmap_args *ap); 69 static int hammer_vop_nsymlink(struct vop_nsymlink_args *); 70 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *); 71 static int hammer_vop_ioctl(struct vop_ioctl_args *); 72 static int hammer_vop_mountctl(struct vop_mountctl_args *); 73 static int hammer_vop_kqfilter (struct vop_kqfilter_args *); 74 75 static int hammer_vop_fifoclose (struct vop_close_args *); 76 static int hammer_vop_fiforead (struct vop_read_args *); 77 static int hammer_vop_fifowrite (struct vop_write_args *); 78 static int hammer_vop_fifokqfilter (struct vop_kqfilter_args *); 79 80 struct vop_ops hammer_vnode_vops = { 81 .vop_default = vop_defaultop, 82 .vop_fsync = hammer_vop_fsync, 83 .vop_getpages = vop_stdgetpages, 84 .vop_putpages = vop_stdputpages, 85 .vop_read = hammer_vop_read, 86 .vop_write = hammer_vop_write, 87 .vop_access = hammer_vop_access, 88 .vop_advlock = hammer_vop_advlock, 89 .vop_close = hammer_vop_close, 90 .vop_ncreate = hammer_vop_ncreate, 91 .vop_getattr = hammer_vop_getattr, 92 .vop_inactive = hammer_vop_inactive, 93 .vop_reclaim = hammer_vop_reclaim, 94 .vop_nresolve = hammer_vop_nresolve, 95 .vop_nlookupdotdot = hammer_vop_nlookupdotdot, 96 .vop_nlink = hammer_vop_nlink, 97 .vop_nmkdir = hammer_vop_nmkdir, 98 .vop_nmknod = hammer_vop_nmknod, 99 .vop_open = hammer_vop_open, 100 .vop_pathconf = vop_stdpathconf, 101 .vop_print = hammer_vop_print, 102 .vop_readdir = hammer_vop_readdir, 103 .vop_readlink = hammer_vop_readlink, 104 .vop_nremove = hammer_vop_nremove, 105 .vop_nrename = hammer_vop_nrename, 106 .vop_nrmdir = hammer_vop_nrmdir, 107 .vop_markatime = hammer_vop_markatime, 108 .vop_setattr = hammer_vop_setattr, 109 .vop_bmap = hammer_vop_bmap, 110 .vop_strategy = hammer_vop_strategy, 111 .vop_nsymlink = hammer_vop_nsymlink, 112 .vop_nwhiteout = hammer_vop_nwhiteout, 113 .vop_ioctl = hammer_vop_ioctl, 114 .vop_mountctl = hammer_vop_mountctl, 115 .vop_kqfilter = hammer_vop_kqfilter 116 }; 117 118 struct vop_ops hammer_spec_vops = { 119 .vop_default = vop_defaultop, 120 .vop_fsync = hammer_vop_fsync, 121 .vop_read = vop_stdnoread, 122 .vop_write = vop_stdnowrite, 123 .vop_access = hammer_vop_access, 124 .vop_close = hammer_vop_close, 125 .vop_markatime = hammer_vop_markatime, 126 .vop_getattr = hammer_vop_getattr, 127 .vop_inactive = hammer_vop_inactive, 128 .vop_reclaim = hammer_vop_reclaim, 129 .vop_setattr = hammer_vop_setattr 130 }; 131 132 struct vop_ops hammer_fifo_vops = { 133 .vop_default = fifo_vnoperate, 134 .vop_fsync = hammer_vop_fsync, 135 .vop_read = hammer_vop_fiforead, 136 .vop_write = hammer_vop_fifowrite, 137 .vop_access = hammer_vop_access, 138 .vop_close = hammer_vop_fifoclose, 139 .vop_markatime = hammer_vop_markatime, 140 .vop_getattr = hammer_vop_getattr, 141 .vop_inactive = hammer_vop_inactive, 142 .vop_reclaim = hammer_vop_reclaim, 143 .vop_setattr = hammer_vop_setattr, 144 .vop_kqfilter = hammer_vop_fifokqfilter 145 }; 146 147 static __inline 148 void 149 hammer_knote(struct vnode *vp, int flags) 150 { 151 if (flags) 152 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 153 } 154 155 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch, 156 struct vnode *dvp, struct ucred *cred, 157 int flags, int isdir); 158 static int hammer_vop_strategy_read(struct vop_strategy_args *ap); 159 static int hammer_vop_strategy_write(struct vop_strategy_args *ap); 160 161 /* 162 * hammer_vop_fsync { vp, waitfor } 163 * 164 * fsync() an inode to disk and wait for it to be completely committed 165 * such that the information would not be undone if a crash occured after 166 * return. 167 * 168 * NOTE: HAMMER's fsync()'s are going to remain expensive until we implement 169 * a REDO log. A sysctl is provided to relax HAMMER's fsync() 170 * operation. 171 * 172 * Ultimately the combination of a REDO log and use of fast storage 173 * to front-end cluster caches will make fsync fast, but it aint 174 * here yet. And, in anycase, we need real transactional 175 * all-or-nothing features which are not restricted to a single file. 176 */ 177 static 178 int 179 hammer_vop_fsync(struct vop_fsync_args *ap) 180 { 181 hammer_inode_t ip = VTOI(ap->a_vp); 182 hammer_mount_t hmp = ip->hmp; 183 int waitfor = ap->a_waitfor; 184 int mode; 185 186 lwkt_gettoken(&hmp->fs_token); 187 188 /* 189 * Fsync rule relaxation (default is either full synchronous flush 190 * or REDO semantics with synchronous flush). 191 */ 192 if (ap->a_flags & VOP_FSYNC_SYSCALL) { 193 switch(hammer_fsync_mode) { 194 case 0: 195 mode0: 196 /* no REDO, full synchronous flush */ 197 goto skip; 198 case 1: 199 mode1: 200 /* no REDO, full asynchronous flush */ 201 if (waitfor == MNT_WAIT) 202 waitfor = MNT_NOWAIT; 203 goto skip; 204 case 2: 205 /* REDO semantics, synchronous flush */ 206 if (hmp->version < HAMMER_VOL_VERSION_FOUR) 207 goto mode0; 208 mode = HAMMER_FLUSH_UNDOS_AUTO; 209 break; 210 case 3: 211 /* REDO semantics, relaxed asynchronous flush */ 212 if (hmp->version < HAMMER_VOL_VERSION_FOUR) 213 goto mode1; 214 mode = HAMMER_FLUSH_UNDOS_RELAXED; 215 if (waitfor == MNT_WAIT) 216 waitfor = MNT_NOWAIT; 217 break; 218 case 4: 219 /* ignore the fsync() system call */ 220 lwkt_reltoken(&hmp->fs_token); 221 return(0); 222 default: 223 /* we have to do something */ 224 mode = HAMMER_FLUSH_UNDOS_RELAXED; 225 if (waitfor == MNT_WAIT) 226 waitfor = MNT_NOWAIT; 227 break; 228 } 229 230 /* 231 * Fast fsync only needs to flush the UNDO/REDO fifo if 232 * HAMMER_INODE_REDO is non-zero and the only modifications 233 * made to the file are write or write-extends. 234 */ 235 if ((ip->flags & HAMMER_INODE_REDO) && 236 (ip->flags & HAMMER_INODE_MODMASK_NOREDO) == 0) { 237 ++hammer_count_fsyncs; 238 hammer_flusher_flush_undos(hmp, mode); 239 ip->redo_count = 0; 240 if (ip->vp && (ip->flags & HAMMER_INODE_MODMASK) == 0) 241 vclrisdirty(ip->vp); 242 lwkt_reltoken(&hmp->fs_token); 243 return(0); 244 } 245 246 /* 247 * REDO is enabled by fsync(), the idea being we really only 248 * want to lay down REDO records when programs are using 249 * fsync() heavily. The first fsync() on the file starts 250 * the gravy train going and later fsync()s keep it hot by 251 * resetting the redo_count. 252 * 253 * We weren't running REDOs before now so we have to fall 254 * through and do a full fsync of what we have. 255 */ 256 if (hmp->version >= HAMMER_VOL_VERSION_FOUR && 257 (hmp->flags & HAMMER_MOUNT_REDO_RECOVERY_RUN) == 0) { 258 ip->flags |= HAMMER_INODE_REDO; 259 ip->redo_count = 0; 260 } 261 } 262 skip: 263 264 /* 265 * Do a full flush sequence. 266 * 267 * Attempt to release the vnode while waiting for the inode to 268 * finish flushing. This can really mess up inactive->reclaim 269 * sequences so only do it if the vnode is active. 270 * 271 * WARNING! The VX lock functions must be used. vn_lock() will 272 * fail when this is part of a VOP_RECLAIM sequence. 273 */ 274 ++hammer_count_fsyncs; 275 vfsync(ap->a_vp, waitfor, 1, NULL, NULL); 276 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 277 if (waitfor == MNT_WAIT) { 278 int dorelock; 279 280 if ((ap->a_vp->v_flag & VRECLAIMED) == 0) { 281 vx_unlock(ap->a_vp); 282 dorelock = 1; 283 } else { 284 dorelock = 0; 285 } 286 hammer_wait_inode(ip); 287 if (dorelock) 288 vx_lock(ap->a_vp); 289 } 290 if (ip->vp && (ip->flags & HAMMER_INODE_MODMASK) == 0) 291 vclrisdirty(ip->vp); 292 lwkt_reltoken(&hmp->fs_token); 293 return (ip->error); 294 } 295 296 /* 297 * hammer_vop_read { vp, uio, ioflag, cred } 298 * 299 * MPSAFE (for the cache safe does not require fs_token) 300 */ 301 static 302 int 303 hammer_vop_read(struct vop_read_args *ap) 304 { 305 struct hammer_transaction trans; 306 hammer_inode_t ip; 307 hammer_mount_t hmp; 308 off_t offset; 309 struct buf *bp; 310 struct uio *uio; 311 int error; 312 int n; 313 int seqcount; 314 int ioseqcount; 315 int blksize; 316 int bigread; 317 int got_trans; 318 size_t resid; 319 320 if (ap->a_vp->v_type != VREG) 321 return (EINVAL); 322 ip = VTOI(ap->a_vp); 323 hmp = ip->hmp; 324 error = 0; 325 got_trans = 0; 326 uio = ap->a_uio; 327 328 /* 329 * Attempt to shortcut directly to the VM object using lwbufs. 330 * This is much faster than instantiating buffer cache buffers. 331 */ 332 resid = uio->uio_resid; 333 error = vop_helper_read_shortcut(ap); 334 hammer_stats_file_read += resid - uio->uio_resid; 335 if (error) 336 return (error); 337 if (uio->uio_resid == 0) 338 goto finished; 339 340 /* 341 * Allow the UIO's size to override the sequential heuristic. 342 */ 343 blksize = hammer_blocksize(uio->uio_offset); 344 seqcount = (uio->uio_resid + (MAXBSIZE - 1)) / MAXBSIZE; 345 ioseqcount = (ap->a_ioflag >> 16); 346 if (seqcount < ioseqcount) 347 seqcount = ioseqcount; 348 349 /* 350 * If reading or writing a huge amount of data we have to break 351 * atomicy and allow the operation to be interrupted by a signal 352 * or it can DOS the machine. 353 */ 354 bigread = (uio->uio_resid > 100 * 1024 * 1024); 355 356 /* 357 * Access the data typically in HAMMER_BUFSIZE blocks via the 358 * buffer cache, but HAMMER may use a variable block size based 359 * on the offset. 360 * 361 * XXX Temporary hack, delay the start transaction while we remain 362 * MPSAFE. NOTE: ino_data.size cannot change while vnode is 363 * locked-shared. 364 */ 365 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) { 366 int64_t base_offset; 367 int64_t file_limit; 368 369 blksize = hammer_blocksize(uio->uio_offset); 370 offset = (int)uio->uio_offset & (blksize - 1); 371 base_offset = uio->uio_offset - offset; 372 373 if (bigread && (error = hammer_signal_check(ip->hmp)) != 0) 374 break; 375 376 /* 377 * MPSAFE 378 */ 379 bp = getblk(ap->a_vp, base_offset, blksize, 0, 0); 380 if ((bp->b_flags & (B_INVAL | B_CACHE | B_RAM)) == B_CACHE) { 381 bp->b_flags &= ~B_AGE; 382 error = 0; 383 goto skip; 384 } 385 if (ap->a_ioflag & IO_NRDELAY) { 386 bqrelse(bp); 387 return (EWOULDBLOCK); 388 } 389 390 /* 391 * MPUNSAFE 392 */ 393 if (got_trans == 0) { 394 hammer_start_transaction(&trans, ip->hmp); 395 got_trans = 1; 396 } 397 398 /* 399 * NOTE: A valid bp has already been acquired, but was not 400 * B_CACHE. 401 */ 402 if (hammer_cluster_enable) { 403 /* 404 * Use file_limit to prevent cluster_read() from 405 * creating buffers of the wrong block size past 406 * the demarc. 407 */ 408 file_limit = ip->ino_data.size; 409 if (base_offset < HAMMER_XDEMARC && 410 file_limit > HAMMER_XDEMARC) { 411 file_limit = HAMMER_XDEMARC; 412 } 413 error = cluster_readx(ap->a_vp, 414 file_limit, base_offset, 415 blksize, uio->uio_resid, 416 seqcount * MAXBSIZE, &bp); 417 } else { 418 error = breadnx(ap->a_vp, base_offset, blksize, 419 NULL, NULL, 0, &bp); 420 } 421 if (error) { 422 brelse(bp); 423 break; 424 } 425 skip: 426 if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IOISSUED)) { 427 hdkprintf("zone2_offset %016jx read file %016jx@%016jx\n", 428 (intmax_t)bp->b_bio2.bio_offset, 429 (intmax_t)ip->obj_id, 430 (intmax_t)bp->b_loffset); 431 } 432 bp->b_flags &= ~B_IOISSUED; 433 if (blksize == HAMMER_XBUFSIZE) 434 bp->b_flags |= B_CLUSTEROK; 435 436 n = blksize - offset; 437 if (n > uio->uio_resid) 438 n = uio->uio_resid; 439 if (n > ip->ino_data.size - uio->uio_offset) 440 n = (int)(ip->ino_data.size - uio->uio_offset); 441 442 /* 443 * Set B_AGE, data has a lower priority than meta-data. 444 * 445 * Use a hold/unlock/drop sequence to run the uiomove 446 * with the buffer unlocked, avoiding deadlocks against 447 * read()s on mmap()'d spaces. 448 */ 449 bp->b_flags |= B_AGE; 450 error = uiomovebp(bp, (char *)bp->b_data + offset, n, uio); 451 bqrelse(bp); 452 453 if (error) 454 break; 455 hammer_stats_file_read += n; 456 } 457 458 finished: 459 460 /* 461 * Try to update the atime with just the inode lock for maximum 462 * concurrency. If we can't shortcut it we have to get the full 463 * blown transaction. 464 */ 465 if (got_trans == 0 && hammer_update_atime_quick(ip) < 0) { 466 hammer_start_transaction(&trans, ip->hmp); 467 got_trans = 1; 468 } 469 470 if (got_trans) { 471 if ((ip->flags & HAMMER_INODE_RO) == 0 && 472 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) { 473 lwkt_gettoken(&hmp->fs_token); 474 ip->ino_data.atime = trans.time; 475 hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME); 476 hammer_done_transaction(&trans); 477 lwkt_reltoken(&hmp->fs_token); 478 } else { 479 hammer_done_transaction(&trans); 480 } 481 } 482 return (error); 483 } 484 485 /* 486 * hammer_vop_write { vp, uio, ioflag, cred } 487 */ 488 static 489 int 490 hammer_vop_write(struct vop_write_args *ap) 491 { 492 struct hammer_transaction trans; 493 hammer_inode_t ip; 494 hammer_mount_t hmp; 495 thread_t td; 496 struct uio *uio; 497 int offset; 498 off_t base_offset; 499 int64_t cluster_eof; 500 struct buf *bp; 501 int kflags; 502 int error; 503 int n; 504 int flags; 505 int seqcount; 506 int bigwrite; 507 508 if (ap->a_vp->v_type != VREG) 509 return (EINVAL); 510 ip = VTOI(ap->a_vp); 511 hmp = ip->hmp; 512 error = 0; 513 kflags = 0; 514 seqcount = ap->a_ioflag >> 16; 515 516 if (ip->flags & HAMMER_INODE_RO) 517 return (EROFS); 518 519 /* 520 * Create a transaction to cover the operations we perform. 521 */ 522 hammer_start_transaction(&trans, hmp); 523 uio = ap->a_uio; 524 525 /* 526 * Check append mode 527 */ 528 if (ap->a_ioflag & IO_APPEND) 529 uio->uio_offset = ip->ino_data.size; 530 531 /* 532 * Check for illegal write offsets. Valid range is 0...2^63-1. 533 * 534 * NOTE: the base_off assignment is required to work around what 535 * I consider to be a GCC-4 optimization bug. 536 */ 537 if (uio->uio_offset < 0) { 538 hammer_done_transaction(&trans); 539 return (EFBIG); 540 } 541 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */ 542 if (uio->uio_resid > 0 && base_offset <= uio->uio_offset) { 543 hammer_done_transaction(&trans); 544 return (EFBIG); 545 } 546 547 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc && 548 base_offset > td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 549 hammer_done_transaction(&trans); 550 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); 551 return (EFBIG); 552 } 553 554 /* 555 * If reading or writing a huge amount of data we have to break 556 * atomicy and allow the operation to be interrupted by a signal 557 * or it can DOS the machine. 558 * 559 * Preset redo_count so we stop generating REDOs earlier if the 560 * limit is exceeded. 561 * 562 * redo_count is heuristical, SMP races are ok 563 */ 564 bigwrite = (uio->uio_resid > 100 * 1024 * 1024); 565 if ((ip->flags & HAMMER_INODE_REDO) && 566 ip->redo_count < hammer_limit_redo) { 567 ip->redo_count += uio->uio_resid; 568 } 569 570 /* 571 * Access the data typically in HAMMER_BUFSIZE blocks via the 572 * buffer cache, but HAMMER may use a variable block size based 573 * on the offset. 574 */ 575 while (uio->uio_resid > 0) { 576 int fixsize = 0; 577 int blksize; 578 int blkmask; 579 int trivial; 580 int endofblk; 581 off_t nsize; 582 583 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE)) != 0) 584 break; 585 if (bigwrite && (error = hammer_signal_check(hmp)) != 0) 586 break; 587 588 blksize = hammer_blocksize(uio->uio_offset); 589 590 /* 591 * Control the number of pending records associated with 592 * this inode. If too many have accumulated start a 593 * flush. Try to maintain a pipeline with the flusher. 594 * 595 * NOTE: It is possible for other sources to grow the 596 * records but not necessarily issue another flush, 597 * so use a timeout and ensure that a re-flush occurs. 598 */ 599 if (ip->rsv_recs >= hammer_limit_inode_recs) { 600 lwkt_gettoken(&hmp->fs_token); 601 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 602 while (ip->rsv_recs >= hammer_limit_inode_recs * 2) { 603 ip->flags |= HAMMER_INODE_RECSW; 604 tsleep(&ip->rsv_recs, 0, "hmrwww", hz); 605 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 606 } 607 lwkt_reltoken(&hmp->fs_token); 608 } 609 610 /* 611 * Do not allow HAMMER to blow out the buffer cache. Very 612 * large UIOs can lockout other processes due to bwillwrite() 613 * mechanics. 614 * 615 * The hammer inode is not locked during these operations. 616 * The vnode is locked which can interfere with the pageout 617 * daemon for non-UIO_NOCOPY writes but should not interfere 618 * with the buffer cache. Even so, we cannot afford to 619 * allow the pageout daemon to build up too many dirty buffer 620 * cache buffers. 621 * 622 * Only call this if we aren't being recursively called from 623 * a virtual disk device (vn), else we may deadlock. 624 */ 625 if ((ap->a_ioflag & IO_RECURSE) == 0) 626 bwillwrite(blksize); 627 628 /* 629 * Calculate the blocksize at the current offset and figure 630 * out how much we can actually write. 631 */ 632 blkmask = blksize - 1; 633 offset = (int)uio->uio_offset & blkmask; 634 base_offset = uio->uio_offset & ~(int64_t)blkmask; 635 n = blksize - offset; 636 if (n > uio->uio_resid) { 637 n = uio->uio_resid; 638 endofblk = 0; 639 } else { 640 endofblk = 1; 641 } 642 nsize = uio->uio_offset + n; 643 if (nsize > ip->ino_data.size) { 644 if (uio->uio_offset > ip->ino_data.size) 645 trivial = 0; 646 else 647 trivial = 1; 648 nvextendbuf(ap->a_vp, 649 ip->ino_data.size, 650 nsize, 651 hammer_blocksize(ip->ino_data.size), 652 hammer_blocksize(nsize), 653 hammer_blockoff(ip->ino_data.size), 654 hammer_blockoff(nsize), 655 trivial); 656 fixsize = 1; 657 kflags |= NOTE_EXTEND; 658 } 659 660 if (uio->uio_segflg == UIO_NOCOPY) { 661 /* 662 * Issuing a write with the same data backing the 663 * buffer. Instantiate the buffer to collect the 664 * backing vm pages, then read-in any missing bits. 665 * 666 * This case is used by vop_stdputpages(). 667 */ 668 bp = getblk(ap->a_vp, base_offset, 669 blksize, GETBLK_BHEAVY, 0); 670 if ((bp->b_flags & B_CACHE) == 0) { 671 bqrelse(bp); 672 error = bread(ap->a_vp, base_offset, 673 blksize, &bp); 674 } 675 } else if (offset == 0 && uio->uio_resid >= blksize) { 676 /* 677 * Even though we are entirely overwriting the buffer 678 * we may still have to zero it out to avoid a 679 * mmap/write visibility issue. 680 */ 681 bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0); 682 if ((bp->b_flags & B_CACHE) == 0) 683 vfs_bio_clrbuf(bp); 684 } else if (base_offset >= ip->ino_data.size) { 685 /* 686 * If the base offset of the buffer is beyond the 687 * file EOF, we don't have to issue a read. 688 */ 689 bp = getblk(ap->a_vp, base_offset, 690 blksize, GETBLK_BHEAVY, 0); 691 vfs_bio_clrbuf(bp); 692 } else { 693 /* 694 * Partial overwrite, read in any missing bits then 695 * replace the portion being written. 696 */ 697 error = bread(ap->a_vp, base_offset, blksize, &bp); 698 if (error == 0) 699 bheavy(bp); 700 } 701 if (error == 0) 702 error = uiomovebp(bp, bp->b_data + offset, n, uio); 703 704 lwkt_gettoken(&hmp->fs_token); 705 706 /* 707 * Generate REDO records if enabled and redo_count will not 708 * exceeded the limit. 709 * 710 * If redo_count exceeds the limit we stop generating records 711 * and clear HAMMER_INODE_REDO. This will cause the next 712 * fsync() to do a full meta-data sync instead of just an 713 * UNDO/REDO fifo update. 714 * 715 * When clearing HAMMER_INODE_REDO any pre-existing REDOs 716 * will still be tracked. The tracks will be terminated 717 * when the related meta-data (including possible data 718 * modifications which are not tracked via REDO) is 719 * flushed. 720 */ 721 if ((ip->flags & HAMMER_INODE_REDO) && error == 0) { 722 if (ip->redo_count < hammer_limit_redo) { 723 bp->b_flags |= B_VFSFLAG1; 724 error = hammer_generate_redo(&trans, ip, 725 base_offset + offset, 726 HAMMER_REDO_WRITE, 727 bp->b_data + offset, 728 (size_t)n); 729 } else { 730 ip->flags &= ~HAMMER_INODE_REDO; 731 } 732 } 733 734 /* 735 * If we screwed up we have to undo any VM size changes we 736 * made. 737 */ 738 if (error) { 739 brelse(bp); 740 if (fixsize) { 741 nvtruncbuf(ap->a_vp, ip->ino_data.size, 742 hammer_blocksize(ip->ino_data.size), 743 hammer_blockoff(ip->ino_data.size), 744 0); 745 } 746 lwkt_reltoken(&hmp->fs_token); 747 break; 748 } 749 kflags |= NOTE_WRITE; 750 hammer_stats_file_write += n; 751 if (blksize == HAMMER_XBUFSIZE) 752 bp->b_flags |= B_CLUSTEROK; 753 if (ip->ino_data.size < uio->uio_offset) { 754 ip->ino_data.size = uio->uio_offset; 755 flags = HAMMER_INODE_SDIRTY; 756 } else { 757 flags = 0; 758 } 759 ip->ino_data.mtime = trans.time; 760 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS; 761 hammer_modify_inode(&trans, ip, flags); 762 763 /* 764 * Once we dirty the buffer any cached zone-X offset 765 * becomes invalid. HAMMER NOTE: no-history mode cannot 766 * allow overwriting over the same data sector unless 767 * we provide UNDOs for the old data, which we don't. 768 */ 769 bp->b_bio2.bio_offset = NOOFFSET; 770 771 lwkt_reltoken(&hmp->fs_token); 772 773 /* 774 * Final buffer disposition. 775 * 776 * Because meta-data updates are deferred, HAMMER is 777 * especially sensitive to excessive bdwrite()s because 778 * the I/O stream is not broken up by disk reads. So the 779 * buffer cache simply cannot keep up. 780 * 781 * WARNING! blksize is variable. cluster_write() is 782 * expected to not blow up if it encounters 783 * buffers that do not match the passed blksize. 784 * 785 * NOTE! Hammer shouldn't need to bawrite()/cluster_write(). 786 * The ip->rsv_recs check should burst-flush the data. 787 * If we queue it immediately the buf could be left 788 * locked on the device queue for a very long time. 789 * 790 * However, failing to flush a dirty buffer out when 791 * issued from the pageout daemon can result in a low 792 * memory deadlock against bio_page_alloc(), so we 793 * have to bawrite() on IO_ASYNC as well. 794 * 795 * NOTE! To avoid degenerate stalls due to mismatched block 796 * sizes we only honor IO_DIRECT on the write which 797 * abuts the end of the buffer. However, we must 798 * honor IO_SYNC in case someone is silly enough to 799 * configure a HAMMER file as swap, or when HAMMER 800 * is serving NFS (for commits). Ick ick. 801 */ 802 bp->b_flags |= B_AGE; 803 if (blksize == HAMMER_XBUFSIZE) 804 bp->b_flags |= B_CLUSTEROK; 805 806 if (ap->a_ioflag & IO_SYNC) { 807 bwrite(bp); 808 } else if ((ap->a_ioflag & IO_DIRECT) && endofblk) { 809 bawrite(bp); 810 } else if (ap->a_ioflag & IO_ASYNC) { 811 bawrite(bp); 812 } else if (hammer_cluster_enable && 813 !(ap->a_vp->v_mount->mnt_flag & MNT_NOCLUSTERW)) { 814 if (base_offset < HAMMER_XDEMARC) 815 cluster_eof = hammer_blockdemarc(base_offset, 816 ip->ino_data.size); 817 else 818 cluster_eof = ip->ino_data.size; 819 cluster_write(bp, cluster_eof, blksize, seqcount); 820 } else { 821 bdwrite(bp); 822 } 823 } 824 hammer_done_transaction(&trans); 825 hammer_knote(ap->a_vp, kflags); 826 827 return (error); 828 } 829 830 /* 831 * hammer_vop_access { vp, mode, cred } 832 * 833 * MPSAFE - does not require fs_token 834 */ 835 static 836 int 837 hammer_vop_access(struct vop_access_args *ap) 838 { 839 hammer_inode_t ip = VTOI(ap->a_vp); 840 uid_t uid; 841 gid_t gid; 842 int error; 843 844 ++hammer_stats_file_iopsr; 845 uid = hammer_to_unix_xid(&ip->ino_data.uid); 846 gid = hammer_to_unix_xid(&ip->ino_data.gid); 847 848 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode, 849 ip->ino_data.uflags); 850 return (error); 851 } 852 853 /* 854 * hammer_vop_advlock { vp, id, op, fl, flags } 855 * 856 * MPSAFE - does not require fs_token 857 */ 858 static 859 int 860 hammer_vop_advlock(struct vop_advlock_args *ap) 861 { 862 hammer_inode_t ip = VTOI(ap->a_vp); 863 864 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size)); 865 } 866 867 /* 868 * hammer_vop_close { vp, fflag } 869 * 870 * We can only sync-on-close for normal closes. XXX disabled for now. 871 */ 872 static 873 int 874 hammer_vop_close(struct vop_close_args *ap) 875 { 876 #if 0 877 struct vnode *vp = ap->a_vp; 878 hammer_inode_t ip = VTOI(vp); 879 int waitfor; 880 if (ip->flags & (HAMMER_INODE_CLOSESYNC|HAMMER_INODE_CLOSEASYNC)) { 881 if (vn_islocked(vp) == LK_EXCLUSIVE && 882 (vp->v_flag & (VINACTIVE|VRECLAIMED)) == 0) { 883 if (ip->flags & HAMMER_INODE_CLOSESYNC) 884 waitfor = MNT_WAIT; 885 else 886 waitfor = MNT_NOWAIT; 887 ip->flags &= ~(HAMMER_INODE_CLOSESYNC | 888 HAMMER_INODE_CLOSEASYNC); 889 VOP_FSYNC(vp, MNT_NOWAIT, waitfor); 890 } 891 } 892 #endif 893 return (vop_stdclose(ap)); 894 } 895 896 /* 897 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap } 898 * 899 * The operating system has already ensured that the directory entry 900 * does not exist and done all appropriate namespace locking. 901 */ 902 static 903 int 904 hammer_vop_ncreate(struct vop_ncreate_args *ap) 905 { 906 struct hammer_transaction trans; 907 hammer_inode_t dip; 908 hammer_inode_t nip; 909 struct nchandle *nch; 910 hammer_mount_t hmp; 911 int error; 912 913 nch = ap->a_nch; 914 dip = VTOI(ap->a_dvp); 915 hmp = dip->hmp; 916 917 if (dip->flags & HAMMER_INODE_RO) 918 return (EROFS); 919 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) 920 return (error); 921 922 /* 923 * Create a transaction to cover the operations we perform. 924 */ 925 lwkt_gettoken(&hmp->fs_token); 926 hammer_start_transaction(&trans, hmp); 927 ++hammer_stats_file_iopsw; 928 929 /* 930 * Create a new filesystem object of the requested type. The 931 * returned inode will be referenced and shared-locked to prevent 932 * it from being moved to the flusher. 933 */ 934 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, 935 dip, nch->ncp->nc_name, nch->ncp->nc_nlen, 936 NULL, &nip); 937 if (error) { 938 hkprintf("hammer_create_inode error %d\n", error); 939 hammer_done_transaction(&trans); 940 *ap->a_vpp = NULL; 941 lwkt_reltoken(&hmp->fs_token); 942 return (error); 943 } 944 945 /* 946 * Add the new filesystem object to the directory. This will also 947 * bump the inode's link count. 948 */ 949 error = hammer_ip_add_direntry(&trans, dip, 950 nch->ncp->nc_name, nch->ncp->nc_nlen, 951 nip); 952 if (error) 953 hkprintf("hammer_ip_add_direntry error %d\n", error); 954 955 /* 956 * Finish up. 957 */ 958 if (error) { 959 hammer_rel_inode(nip, 0); 960 hammer_done_transaction(&trans); 961 *ap->a_vpp = NULL; 962 } else { 963 error = hammer_get_vnode(nip, ap->a_vpp); 964 hammer_done_transaction(&trans); 965 hammer_rel_inode(nip, 0); 966 if (error == 0) { 967 cache_setunresolved(ap->a_nch); 968 cache_setvp(ap->a_nch, *ap->a_vpp); 969 } 970 hammer_knote(ap->a_dvp, NOTE_WRITE); 971 } 972 lwkt_reltoken(&hmp->fs_token); 973 return (error); 974 } 975 976 /* 977 * hammer_vop_getattr { vp, vap } 978 * 979 * Retrieve an inode's attribute information. When accessing inodes 980 * historically we fake the atime field to ensure consistent results. 981 * The atime field is stored in the B-Tree element and allowed to be 982 * updated without cycling the element. 983 * 984 * MPSAFE - does not require fs_token 985 */ 986 static 987 int 988 hammer_vop_getattr(struct vop_getattr_args *ap) 989 { 990 hammer_inode_t ip = VTOI(ap->a_vp); 991 struct vattr *vap = ap->a_vap; 992 993 /* 994 * We want the fsid to be different when accessing a filesystem 995 * with different as-of's so programs like diff don't think 996 * the files are the same. 997 * 998 * We also want the fsid to be the same when comparing snapshots, 999 * or when comparing mirrors (which might be backed by different 1000 * physical devices). HAMMER fsids are based on the PFS's 1001 * shared_uuid field. 1002 * 1003 * XXX there is a chance of collision here. The va_fsid reported 1004 * by stat is different from the more involved fsid used in the 1005 * mount structure. 1006 */ 1007 ++hammer_stats_file_iopsr; 1008 hammer_lock_sh(&ip->lock); 1009 vap->va_fsid = ip->pfsm->fsid_udev ^ (uint32_t)ip->obj_asof ^ 1010 (uint32_t)(ip->obj_asof >> 32); 1011 1012 vap->va_fileid = ip->ino_leaf.base.obj_id; 1013 vap->va_mode = ip->ino_data.mode; 1014 vap->va_nlink = ip->ino_data.nlinks; 1015 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid); 1016 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid); 1017 vap->va_rmajor = 0; 1018 vap->va_rminor = 0; 1019 vap->va_size = ip->ino_data.size; 1020 1021 /* 1022 * Special case for @@PFS softlinks. The actual size of the 1023 * expanded softlink is "@@0x%016llx:%05d" == 26 bytes. 1024 * or for MAX_TID is "@@-1:%05d" == 10 bytes. 1025 * 1026 * Note that userspace hammer command does not allow users to 1027 * create a @@PFS softlink under an existing other PFS (id!=0) 1028 * so the ip localization here for @@PFS softlink is always 0. 1029 */ 1030 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_SOFTLINK && 1031 ip->ino_data.size == 10 && 1032 ip->obj_asof == HAMMER_MAX_TID && 1033 ip->obj_localization == HAMMER_DEF_LOCALIZATION && 1034 strncmp(ip->ino_data.ext.symlink, "@@PFS", 5) == 0) { 1035 if (hammer_is_pfs_slave(&ip->pfsm->pfsd)) 1036 vap->va_size = 26; 1037 else 1038 vap->va_size = 10; 1039 } 1040 1041 /* 1042 * We must provide a consistent atime and mtime for snapshots 1043 * so people can do a 'tar cf - ... | md5' on them and get 1044 * consistent results. 1045 */ 1046 if (ip->flags & HAMMER_INODE_RO) { 1047 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime); 1048 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime); 1049 } else { 1050 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime); 1051 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime); 1052 } 1053 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime); 1054 vap->va_flags = ip->ino_data.uflags; 1055 vap->va_gen = 1; /* hammer inums are unique for all time */ 1056 vap->va_blocksize = HAMMER_BUFSIZE; 1057 if (ip->ino_data.size >= HAMMER_XDEMARC) { 1058 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) & 1059 ~HAMMER_XBUFMASK64; 1060 } else if (ip->ino_data.size > HAMMER_HBUFSIZE) { 1061 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) & 1062 ~HAMMER_BUFMASK64; 1063 } else { 1064 vap->va_bytes = (ip->ino_data.size + 15) & ~15; 1065 } 1066 1067 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type); 1068 vap->va_filerev = 0; /* XXX */ 1069 vap->va_uid_uuid = ip->ino_data.uid; 1070 vap->va_gid_uuid = ip->ino_data.gid; 1071 vap->va_fsid_uuid = ip->hmp->fsid; 1072 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 1073 VA_FSID_UUID_VALID; 1074 1075 switch (ip->ino_data.obj_type) { 1076 case HAMMER_OBJTYPE_CDEV: 1077 case HAMMER_OBJTYPE_BDEV: 1078 vap->va_rmajor = ip->ino_data.rmajor; 1079 vap->va_rminor = ip->ino_data.rminor; 1080 break; 1081 default: 1082 break; 1083 } 1084 hammer_unlock(&ip->lock); 1085 return(0); 1086 } 1087 1088 /* 1089 * hammer_vop_nresolve { nch, dvp, cred } 1090 * 1091 * Locate the requested directory entry. 1092 */ 1093 static 1094 int 1095 hammer_vop_nresolve(struct vop_nresolve_args *ap) 1096 { 1097 struct hammer_transaction trans; 1098 struct namecache *ncp; 1099 hammer_mount_t hmp; 1100 hammer_inode_t dip; 1101 hammer_inode_t ip; 1102 hammer_tid_t asof; 1103 struct hammer_cursor cursor; 1104 struct vnode *vp; 1105 int64_t namekey; 1106 int error; 1107 int i; 1108 int nlen; 1109 int flags; 1110 int ispfs; 1111 int64_t obj_id; 1112 uint32_t localization; 1113 uint32_t max_iterations; 1114 1115 /* 1116 * Misc initialization, plus handle as-of name extensions. Look for 1117 * the '@@' extension. Note that as-of files and directories cannot 1118 * be modified. 1119 */ 1120 dip = VTOI(ap->a_dvp); 1121 ncp = ap->a_nch->ncp; 1122 asof = dip->obj_asof; 1123 localization = dip->obj_localization; /* for code consistency */ 1124 nlen = ncp->nc_nlen; 1125 flags = dip->flags & HAMMER_INODE_RO; 1126 ispfs = 0; 1127 hmp = dip->hmp; 1128 1129 lwkt_gettoken(&hmp->fs_token); 1130 hammer_simple_transaction(&trans, hmp); 1131 ++hammer_stats_file_iopsr; 1132 1133 for (i = 0; i < nlen; ++i) { 1134 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') { 1135 error = hammer_str_to_tid(ncp->nc_name + i + 2, 1136 &ispfs, &asof, &localization); 1137 if (error != 0) { 1138 i = nlen; 1139 break; 1140 } 1141 if (asof != HAMMER_MAX_TID) 1142 flags |= HAMMER_INODE_RO; 1143 break; 1144 } 1145 } 1146 nlen = i; 1147 1148 /* 1149 * If this is a PFS softlink we dive into the PFS 1150 */ 1151 if (ispfs && nlen == 0) { 1152 ip = hammer_get_inode(&trans, dip, HAMMER_OBJID_ROOT, 1153 asof, localization, 1154 flags, &error); 1155 if (error == 0) { 1156 error = hammer_get_vnode(ip, &vp); 1157 hammer_rel_inode(ip, 0); 1158 } else { 1159 vp = NULL; 1160 } 1161 if (error == 0) { 1162 vn_unlock(vp); 1163 cache_setvp(ap->a_nch, vp); 1164 vrele(vp); 1165 } 1166 goto done; 1167 } 1168 1169 /* 1170 * If there is no path component the time extension is relative to dip. 1171 * e.g. "fubar/@@<snapshot>" 1172 * 1173 * "." is handled by the kernel, but ".@@<snapshot>" is not. 1174 * e.g. "fubar/.@@<snapshot>" 1175 * 1176 * ".." is handled by the kernel. We do not currently handle 1177 * "..@<snapshot>". 1178 */ 1179 if (nlen == 0 || (nlen == 1 && ncp->nc_name[0] == '.')) { 1180 ip = hammer_get_inode(&trans, dip, dip->obj_id, 1181 asof, dip->obj_localization, 1182 flags, &error); 1183 if (error == 0) { 1184 error = hammer_get_vnode(ip, &vp); 1185 hammer_rel_inode(ip, 0); 1186 } else { 1187 vp = NULL; 1188 } 1189 if (error == 0) { 1190 vn_unlock(vp); 1191 cache_setvp(ap->a_nch, vp); 1192 vrele(vp); 1193 } 1194 goto done; 1195 } 1196 1197 /* 1198 * Calculate the namekey and setup the key range for the scan. This 1199 * works kinda like a chained hash table where the lower 32 bits 1200 * of the namekey synthesize the chain. 1201 * 1202 * The key range is inclusive of both key_beg and key_end. 1203 */ 1204 namekey = hammer_direntry_namekey(dip, ncp->nc_name, nlen, 1205 &max_iterations); 1206 1207 error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip); 1208 cursor.key_beg.localization = dip->obj_localization | 1209 hammer_dir_localization(dip); 1210 cursor.key_beg.obj_id = dip->obj_id; 1211 cursor.key_beg.key = namekey; 1212 cursor.key_beg.create_tid = 0; 1213 cursor.key_beg.delete_tid = 0; 1214 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY; 1215 cursor.key_beg.obj_type = 0; 1216 1217 cursor.key_end = cursor.key_beg; 1218 cursor.key_end.key += max_iterations; 1219 cursor.asof = asof; 1220 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF; 1221 1222 /* 1223 * Scan all matching records (the chain), locate the one matching 1224 * the requested path component. 1225 * 1226 * The hammer_ip_*() functions merge in-memory records with on-disk 1227 * records for the purposes of the search. 1228 */ 1229 obj_id = 0; 1230 localization = HAMMER_DEF_LOCALIZATION; 1231 1232 if (error == 0) { 1233 error = hammer_ip_first(&cursor); 1234 while (error == 0) { 1235 error = hammer_ip_resolve_data(&cursor); 1236 if (error) 1237 break; 1238 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF && 1239 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) { 1240 obj_id = cursor.data->entry.obj_id; 1241 localization = cursor.data->entry.localization; 1242 break; 1243 } 1244 error = hammer_ip_next(&cursor); 1245 } 1246 } 1247 hammer_done_cursor(&cursor); 1248 1249 /* 1250 * Lookup the obj_id. This should always succeed. If it does not 1251 * the filesystem may be damaged and we return a dummy inode. 1252 */ 1253 if (error == 0) { 1254 ip = hammer_get_inode(&trans, dip, obj_id, 1255 asof, localization, 1256 flags, &error); 1257 if (error == ENOENT) { 1258 hkprintf("WARNING: Missing inode for dirent \"%s\"\n" 1259 "\tobj_id = %016jx, asof=%016jx, lo=%08x\n", 1260 ncp->nc_name, 1261 (intmax_t)obj_id, (intmax_t)asof, 1262 localization); 1263 error = 0; 1264 ip = hammer_get_dummy_inode(&trans, dip, obj_id, 1265 asof, localization, 1266 flags, &error); 1267 } 1268 if (error == 0) { 1269 error = hammer_get_vnode(ip, &vp); 1270 hammer_rel_inode(ip, 0); 1271 } else { 1272 vp = NULL; 1273 } 1274 if (error == 0) { 1275 vn_unlock(vp); 1276 cache_setvp(ap->a_nch, vp); 1277 vrele(vp); 1278 } 1279 } else if (error == ENOENT) { 1280 cache_setvp(ap->a_nch, NULL); 1281 } 1282 done: 1283 hammer_done_transaction(&trans); 1284 lwkt_reltoken(&hmp->fs_token); 1285 return (error); 1286 } 1287 1288 /* 1289 * hammer_vop_nlookupdotdot { dvp, vpp, cred } 1290 * 1291 * Locate the parent directory of a directory vnode. 1292 * 1293 * dvp is referenced but not locked. *vpp must be returned referenced and 1294 * locked. A parent_obj_id of 0 indicates that we are at the root. 1295 * 1296 * NOTE: as-of sequences are not linked into the directory structure. If 1297 * we are at the root with a different asof then the mount point, reload 1298 * the same directory with the mount point's asof. I'm not sure what this 1299 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not 1300 * get confused, but it hasn't been tested. 1301 */ 1302 static 1303 int 1304 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 1305 { 1306 struct hammer_transaction trans; 1307 hammer_inode_t dip; 1308 hammer_inode_t ip; 1309 hammer_mount_t hmp; 1310 int64_t parent_obj_id; 1311 uint32_t parent_obj_localization; 1312 hammer_tid_t asof; 1313 int error; 1314 1315 dip = VTOI(ap->a_dvp); 1316 asof = dip->obj_asof; 1317 hmp = dip->hmp; 1318 1319 /* 1320 * Whos are parent? This could be the root of a pseudo-filesystem 1321 * whos parent is in another localization domain. 1322 */ 1323 lwkt_gettoken(&hmp->fs_token); 1324 parent_obj_id = dip->ino_data.parent_obj_id; 1325 if (dip->obj_id == HAMMER_OBJID_ROOT) 1326 parent_obj_localization = HAMMER_DEF_LOCALIZATION; 1327 else 1328 parent_obj_localization = dip->obj_localization; 1329 1330 /* 1331 * It's probably a PFS root when dip->ino_data.parent_obj_id is 0. 1332 */ 1333 if (parent_obj_id == 0) { 1334 if (dip->obj_id == HAMMER_OBJID_ROOT && 1335 asof != hmp->asof) { 1336 parent_obj_id = dip->obj_id; 1337 asof = hmp->asof; 1338 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK); 1339 ksnprintf(*ap->a_fakename, 19, "0x%016jx", 1340 (intmax_t)dip->obj_asof); 1341 } else { 1342 *ap->a_vpp = NULL; 1343 lwkt_reltoken(&hmp->fs_token); 1344 return ENOENT; 1345 } 1346 } 1347 1348 hammer_simple_transaction(&trans, hmp); 1349 ++hammer_stats_file_iopsr; 1350 1351 ip = hammer_get_inode(&trans, dip, parent_obj_id, 1352 asof, parent_obj_localization, 1353 dip->flags, &error); 1354 if (ip) { 1355 error = hammer_get_vnode(ip, ap->a_vpp); 1356 hammer_rel_inode(ip, 0); 1357 } else { 1358 *ap->a_vpp = NULL; 1359 } 1360 hammer_done_transaction(&trans); 1361 lwkt_reltoken(&hmp->fs_token); 1362 return (error); 1363 } 1364 1365 /* 1366 * hammer_vop_nlink { nch, dvp, vp, cred } 1367 */ 1368 static 1369 int 1370 hammer_vop_nlink(struct vop_nlink_args *ap) 1371 { 1372 struct hammer_transaction trans; 1373 hammer_inode_t dip; 1374 hammer_inode_t ip; 1375 struct nchandle *nch; 1376 hammer_mount_t hmp; 1377 int error; 1378 1379 if (ap->a_dvp->v_mount != ap->a_vp->v_mount) 1380 return(EXDEV); 1381 1382 nch = ap->a_nch; 1383 dip = VTOI(ap->a_dvp); 1384 ip = VTOI(ap->a_vp); 1385 hmp = dip->hmp; 1386 1387 if (dip->obj_localization != ip->obj_localization) 1388 return(EXDEV); 1389 1390 if (dip->flags & HAMMER_INODE_RO) 1391 return (EROFS); 1392 if (ip->flags & HAMMER_INODE_RO) 1393 return (EROFS); 1394 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) 1395 return (error); 1396 1397 /* 1398 * Create a transaction to cover the operations we perform. 1399 */ 1400 lwkt_gettoken(&hmp->fs_token); 1401 hammer_start_transaction(&trans, hmp); 1402 ++hammer_stats_file_iopsw; 1403 1404 /* 1405 * Add the filesystem object to the directory. Note that neither 1406 * dip nor ip are referenced or locked, but their vnodes are 1407 * referenced. This function will bump the inode's link count. 1408 */ 1409 error = hammer_ip_add_direntry(&trans, dip, 1410 nch->ncp->nc_name, nch->ncp->nc_nlen, 1411 ip); 1412 1413 /* 1414 * Finish up. 1415 */ 1416 if (error == 0) { 1417 cache_setunresolved(nch); 1418 cache_setvp(nch, ap->a_vp); 1419 } 1420 hammer_done_transaction(&trans); 1421 hammer_knote(ap->a_vp, NOTE_LINK); 1422 hammer_knote(ap->a_dvp, NOTE_WRITE); 1423 lwkt_reltoken(&hmp->fs_token); 1424 return (error); 1425 } 1426 1427 /* 1428 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap } 1429 * 1430 * The operating system has already ensured that the directory entry 1431 * does not exist and done all appropriate namespace locking. 1432 */ 1433 static 1434 int 1435 hammer_vop_nmkdir(struct vop_nmkdir_args *ap) 1436 { 1437 struct hammer_transaction trans; 1438 hammer_inode_t dip; 1439 hammer_inode_t nip; 1440 struct nchandle *nch; 1441 hammer_mount_t hmp; 1442 int error; 1443 1444 nch = ap->a_nch; 1445 dip = VTOI(ap->a_dvp); 1446 hmp = dip->hmp; 1447 1448 if (dip->flags & HAMMER_INODE_RO) 1449 return (EROFS); 1450 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) 1451 return (error); 1452 1453 /* 1454 * Create a transaction to cover the operations we perform. 1455 */ 1456 lwkt_gettoken(&hmp->fs_token); 1457 hammer_start_transaction(&trans, hmp); 1458 ++hammer_stats_file_iopsw; 1459 1460 /* 1461 * Create a new filesystem object of the requested type. The 1462 * returned inode will be referenced but not locked. 1463 */ 1464 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, 1465 dip, nch->ncp->nc_name, nch->ncp->nc_nlen, 1466 NULL, &nip); 1467 if (error) { 1468 hammer_done_transaction(&trans); 1469 *ap->a_vpp = NULL; 1470 lwkt_reltoken(&hmp->fs_token); 1471 return (error); 1472 } 1473 /* 1474 * Add the new filesystem object to the directory. This will also 1475 * bump the inode's link count. 1476 */ 1477 error = hammer_ip_add_direntry(&trans, dip, 1478 nch->ncp->nc_name, nch->ncp->nc_nlen, 1479 nip); 1480 if (error) 1481 hkprintf("hammer_mkdir (add) error %d\n", error); 1482 1483 /* 1484 * Finish up. 1485 */ 1486 if (error) { 1487 hammer_rel_inode(nip, 0); 1488 *ap->a_vpp = NULL; 1489 } else { 1490 error = hammer_get_vnode(nip, ap->a_vpp); 1491 hammer_rel_inode(nip, 0); 1492 if (error == 0) { 1493 cache_setunresolved(ap->a_nch); 1494 cache_setvp(ap->a_nch, *ap->a_vpp); 1495 } 1496 } 1497 hammer_done_transaction(&trans); 1498 if (error == 0) 1499 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK); 1500 lwkt_reltoken(&hmp->fs_token); 1501 return (error); 1502 } 1503 1504 /* 1505 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap } 1506 * 1507 * The operating system has already ensured that the directory entry 1508 * does not exist and done all appropriate namespace locking. 1509 */ 1510 static 1511 int 1512 hammer_vop_nmknod(struct vop_nmknod_args *ap) 1513 { 1514 struct hammer_transaction trans; 1515 hammer_inode_t dip; 1516 hammer_inode_t nip; 1517 struct nchandle *nch; 1518 hammer_mount_t hmp; 1519 int error; 1520 1521 nch = ap->a_nch; 1522 dip = VTOI(ap->a_dvp); 1523 hmp = dip->hmp; 1524 1525 if (dip->flags & HAMMER_INODE_RO) 1526 return (EROFS); 1527 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) 1528 return (error); 1529 1530 /* 1531 * Create a transaction to cover the operations we perform. 1532 */ 1533 lwkt_gettoken(&hmp->fs_token); 1534 hammer_start_transaction(&trans, hmp); 1535 ++hammer_stats_file_iopsw; 1536 1537 /* 1538 * Create a new filesystem object of the requested type. The 1539 * returned inode will be referenced but not locked. 1540 * 1541 * If mknod specifies a directory a pseudo-fs is created. 1542 */ 1543 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, 1544 dip, nch->ncp->nc_name, nch->ncp->nc_nlen, 1545 NULL, &nip); 1546 if (error) { 1547 hammer_done_transaction(&trans); 1548 *ap->a_vpp = NULL; 1549 lwkt_reltoken(&hmp->fs_token); 1550 return (error); 1551 } 1552 1553 /* 1554 * Add the new filesystem object to the directory. This will also 1555 * bump the inode's link count. 1556 */ 1557 error = hammer_ip_add_direntry(&trans, dip, 1558 nch->ncp->nc_name, nch->ncp->nc_nlen, 1559 nip); 1560 1561 /* 1562 * Finish up. 1563 */ 1564 if (error) { 1565 hammer_rel_inode(nip, 0); 1566 *ap->a_vpp = NULL; 1567 } else { 1568 error = hammer_get_vnode(nip, ap->a_vpp); 1569 hammer_rel_inode(nip, 0); 1570 if (error == 0) { 1571 cache_setunresolved(ap->a_nch); 1572 cache_setvp(ap->a_nch, *ap->a_vpp); 1573 } 1574 } 1575 hammer_done_transaction(&trans); 1576 if (error == 0) 1577 hammer_knote(ap->a_dvp, NOTE_WRITE); 1578 lwkt_reltoken(&hmp->fs_token); 1579 return (error); 1580 } 1581 1582 /* 1583 * hammer_vop_open { vp, mode, cred, fp } 1584 * 1585 * MPSAFE (does not require fs_token) 1586 */ 1587 static 1588 int 1589 hammer_vop_open(struct vop_open_args *ap) 1590 { 1591 hammer_inode_t ip; 1592 1593 ++hammer_stats_file_iopsr; 1594 ip = VTOI(ap->a_vp); 1595 1596 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO)) 1597 return (EROFS); 1598 return(vop_stdopen(ap)); 1599 } 1600 1601 /* 1602 * hammer_vop_print { vp } 1603 */ 1604 static 1605 int 1606 hammer_vop_print(struct vop_print_args *ap) 1607 { 1608 return EOPNOTSUPP; 1609 } 1610 1611 /* 1612 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies } 1613 */ 1614 static 1615 int 1616 hammer_vop_readdir(struct vop_readdir_args *ap) 1617 { 1618 struct hammer_transaction trans; 1619 struct hammer_cursor cursor; 1620 hammer_inode_t ip; 1621 hammer_mount_t hmp; 1622 struct uio *uio; 1623 hammer_base_elm_t base; 1624 int error; 1625 int cookie_index; 1626 int ncookies; 1627 off_t *cookies; 1628 off_t saveoff; 1629 int r; 1630 int dtype; 1631 1632 ++hammer_stats_file_iopsr; 1633 ip = VTOI(ap->a_vp); 1634 uio = ap->a_uio; 1635 saveoff = uio->uio_offset; 1636 hmp = ip->hmp; 1637 1638 if (ap->a_ncookies) { 1639 ncookies = uio->uio_resid / 16 + 1; 1640 if (ncookies > 1024) 1641 ncookies = 1024; 1642 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK); 1643 cookie_index = 0; 1644 } else { 1645 ncookies = -1; 1646 cookies = NULL; 1647 cookie_index = 0; 1648 } 1649 1650 lwkt_gettoken(&hmp->fs_token); 1651 hammer_simple_transaction(&trans, hmp); 1652 1653 /* 1654 * Handle artificial entries 1655 * 1656 * It should be noted that the minimum value for a directory 1657 * hash key on-media is 0x0000000100000000, so we can use anything 1658 * less then that to represent our 'special' key space. 1659 */ 1660 error = 0; 1661 if (saveoff == 0) { 1662 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, "."); 1663 if (r) 1664 goto done; 1665 if (cookies) 1666 cookies[cookie_index] = saveoff; 1667 ++saveoff; 1668 ++cookie_index; 1669 if (cookie_index == ncookies) 1670 goto done; 1671 } 1672 if (saveoff == 1) { 1673 if (ip->ino_data.parent_obj_id) { 1674 r = vop_write_dirent(&error, uio, 1675 ip->ino_data.parent_obj_id, 1676 DT_DIR, 2, ".."); 1677 } else { 1678 r = vop_write_dirent(&error, uio, 1679 ip->obj_id, DT_DIR, 2, ".."); 1680 } 1681 if (r) 1682 goto done; 1683 if (cookies) 1684 cookies[cookie_index] = saveoff; 1685 ++saveoff; 1686 ++cookie_index; 1687 if (cookie_index == ncookies) 1688 goto done; 1689 } 1690 1691 /* 1692 * Key range (begin and end inclusive) to scan. Directory keys 1693 * directly translate to a 64 bit 'seek' position. 1694 */ 1695 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip); 1696 cursor.key_beg.localization = ip->obj_localization | 1697 hammer_dir_localization(ip); 1698 cursor.key_beg.obj_id = ip->obj_id; 1699 cursor.key_beg.create_tid = 0; 1700 cursor.key_beg.delete_tid = 0; 1701 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY; 1702 cursor.key_beg.obj_type = 0; 1703 cursor.key_beg.key = saveoff; 1704 1705 cursor.key_end = cursor.key_beg; 1706 cursor.key_end.key = HAMMER_MAX_KEY; 1707 cursor.asof = ip->obj_asof; 1708 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF; 1709 1710 error = hammer_ip_first(&cursor); 1711 1712 while (error == 0) { 1713 error = hammer_ip_resolve_data(&cursor); 1714 if (error) 1715 break; 1716 base = &cursor.leaf->base; 1717 saveoff = base->key; 1718 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF); 1719 1720 if (base->obj_id != ip->obj_id) 1721 hpanic("bad record at %p", cursor.node); 1722 1723 dtype = hammer_get_dtype(cursor.leaf->base.obj_type); 1724 r = vop_write_dirent( 1725 &error, uio, cursor.data->entry.obj_id, 1726 dtype, 1727 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF , 1728 (void *)cursor.data->entry.name); 1729 if (r) 1730 break; 1731 ++saveoff; 1732 if (cookies) 1733 cookies[cookie_index] = base->key; 1734 ++cookie_index; 1735 if (cookie_index == ncookies) 1736 break; 1737 error = hammer_ip_next(&cursor); 1738 } 1739 hammer_done_cursor(&cursor); 1740 1741 done: 1742 hammer_done_transaction(&trans); 1743 1744 if (ap->a_eofflag) 1745 *ap->a_eofflag = (error == ENOENT); 1746 uio->uio_offset = saveoff; 1747 if (error && cookie_index == 0) { 1748 if (error == ENOENT) 1749 error = 0; 1750 if (cookies) { 1751 kfree(cookies, M_TEMP); 1752 *ap->a_ncookies = 0; 1753 *ap->a_cookies = NULL; 1754 } 1755 } else { 1756 if (error == ENOENT) 1757 error = 0; 1758 if (cookies) { 1759 *ap->a_ncookies = cookie_index; 1760 *ap->a_cookies = cookies; 1761 } 1762 } 1763 lwkt_reltoken(&hmp->fs_token); 1764 return(error); 1765 } 1766 1767 /* 1768 * hammer_vop_readlink { vp, uio, cred } 1769 */ 1770 static 1771 int 1772 hammer_vop_readlink(struct vop_readlink_args *ap) 1773 { 1774 struct hammer_transaction trans; 1775 struct hammer_cursor cursor; 1776 hammer_inode_t ip; 1777 hammer_mount_t hmp; 1778 char buf[32]; 1779 uint32_t localization; 1780 hammer_pseudofs_inmem_t pfsm; 1781 int error; 1782 1783 ip = VTOI(ap->a_vp); 1784 hmp = ip->hmp; 1785 1786 lwkt_gettoken(&hmp->fs_token); 1787 1788 /* 1789 * Shortcut if the symlink data was stuffed into ino_data. 1790 * 1791 * Also expand special "@@PFS%05d" softlinks (expansion only 1792 * occurs for non-historical (current) accesses made from the 1793 * primary filesystem). 1794 * 1795 * Note that userspace hammer command does not allow users to 1796 * create a @@PFS softlink under an existing other PFS (id!=0) 1797 * so the ip localization here for @@PFS softlink is always 0. 1798 */ 1799 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) { 1800 char *ptr; 1801 int bytes; 1802 1803 ptr = ip->ino_data.ext.symlink; 1804 bytes = (int)ip->ino_data.size; 1805 if (bytes == 10 && 1806 ip->obj_asof == HAMMER_MAX_TID && 1807 ip->obj_localization == HAMMER_DEF_LOCALIZATION && 1808 strncmp(ptr, "@@PFS", 5) == 0) { 1809 hammer_simple_transaction(&trans, hmp); 1810 bcopy(ptr + 5, buf, 5); 1811 buf[5] = 0; 1812 localization = pfs_to_lo(strtoul(buf, NULL, 10)); 1813 pfsm = hammer_load_pseudofs(&trans, localization, 1814 &error); 1815 if (error == 0) { 1816 if (hammer_is_pfs_slave(&pfsm->pfsd)) { 1817 /* vap->va_size == 26 */ 1818 ksnprintf(buf, sizeof(buf), 1819 "@@0x%016jx:%05d", 1820 (intmax_t)pfsm->pfsd.sync_end_tid, 1821 lo_to_pfs(localization)); 1822 } else { 1823 /* vap->va_size == 10 */ 1824 ksnprintf(buf, sizeof(buf), 1825 "@@-1:%05d", 1826 lo_to_pfs(localization)); 1827 } 1828 ptr = buf; 1829 bytes = strlen(buf); 1830 } 1831 if (pfsm) 1832 hammer_rel_pseudofs(hmp, pfsm); 1833 hammer_done_transaction(&trans); 1834 } 1835 error = uiomove(ptr, bytes, ap->a_uio); 1836 lwkt_reltoken(&hmp->fs_token); 1837 return(error); 1838 } 1839 1840 /* 1841 * Long version 1842 */ 1843 hammer_simple_transaction(&trans, hmp); 1844 ++hammer_stats_file_iopsr; 1845 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip); 1846 1847 /* 1848 * Key range (begin and end inclusive) to scan. Directory keys 1849 * directly translate to a 64 bit 'seek' position. 1850 */ 1851 cursor.key_beg.localization = ip->obj_localization | 1852 HAMMER_LOCALIZE_MISC; 1853 cursor.key_beg.obj_id = ip->obj_id; 1854 cursor.key_beg.create_tid = 0; 1855 cursor.key_beg.delete_tid = 0; 1856 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX; 1857 cursor.key_beg.obj_type = 0; 1858 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK; 1859 cursor.asof = ip->obj_asof; 1860 cursor.flags |= HAMMER_CURSOR_ASOF; 1861 1862 error = hammer_ip_lookup(&cursor); 1863 if (error == 0) { 1864 error = hammer_ip_resolve_data(&cursor); 1865 if (error == 0) { 1866 KKASSERT(cursor.leaf->data_len >= 1867 HAMMER_SYMLINK_NAME_OFF); 1868 error = uiomove(cursor.data->symlink.name, 1869 cursor.leaf->data_len - 1870 HAMMER_SYMLINK_NAME_OFF, 1871 ap->a_uio); 1872 } 1873 } 1874 hammer_done_cursor(&cursor); 1875 hammer_done_transaction(&trans); 1876 lwkt_reltoken(&hmp->fs_token); 1877 return(error); 1878 } 1879 1880 /* 1881 * hammer_vop_nremove { nch, dvp, cred } 1882 */ 1883 static 1884 int 1885 hammer_vop_nremove(struct vop_nremove_args *ap) 1886 { 1887 struct hammer_transaction trans; 1888 hammer_inode_t dip; 1889 hammer_mount_t hmp; 1890 int error; 1891 1892 dip = VTOI(ap->a_dvp); 1893 hmp = dip->hmp; 1894 1895 if (hammer_nohistory(dip) == 0 && 1896 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) { 1897 return (error); 1898 } 1899 1900 lwkt_gettoken(&hmp->fs_token); 1901 hammer_start_transaction(&trans, hmp); 1902 ++hammer_stats_file_iopsw; 1903 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 0); 1904 hammer_done_transaction(&trans); 1905 if (error == 0) 1906 hammer_knote(ap->a_dvp, NOTE_WRITE); 1907 lwkt_reltoken(&hmp->fs_token); 1908 return (error); 1909 } 1910 1911 /* 1912 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred } 1913 */ 1914 static 1915 int 1916 hammer_vop_nrename(struct vop_nrename_args *ap) 1917 { 1918 struct hammer_transaction trans; 1919 struct namecache *fncp; 1920 struct namecache *tncp; 1921 hammer_inode_t fdip; 1922 hammer_inode_t tdip; 1923 hammer_inode_t ip; 1924 hammer_mount_t hmp; 1925 struct hammer_cursor cursor; 1926 int64_t namekey; 1927 uint32_t max_iterations; 1928 int nlen, error; 1929 1930 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount) 1931 return(EXDEV); 1932 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount) 1933 return(EXDEV); 1934 1935 fdip = VTOI(ap->a_fdvp); 1936 tdip = VTOI(ap->a_tdvp); 1937 fncp = ap->a_fnch->ncp; 1938 tncp = ap->a_tnch->ncp; 1939 ip = VTOI(fncp->nc_vp); 1940 KKASSERT(ip != NULL); 1941 1942 hmp = ip->hmp; 1943 1944 if (fdip->obj_localization != tdip->obj_localization) 1945 return(EXDEV); 1946 if (fdip->obj_localization != ip->obj_localization) 1947 return(EXDEV); 1948 1949 if (fdip->flags & HAMMER_INODE_RO) 1950 return (EROFS); 1951 if (tdip->flags & HAMMER_INODE_RO) 1952 return (EROFS); 1953 if (ip->flags & HAMMER_INODE_RO) 1954 return (EROFS); 1955 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) 1956 return (error); 1957 1958 lwkt_gettoken(&hmp->fs_token); 1959 hammer_start_transaction(&trans, hmp); 1960 ++hammer_stats_file_iopsw; 1961 1962 /* 1963 * Remove tncp from the target directory and then link ip as 1964 * tncp. XXX pass trans to dounlink 1965 * 1966 * Force the inode sync-time to match the transaction so it is 1967 * in-sync with the creation of the target directory entry. 1968 */ 1969 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp, 1970 ap->a_cred, 0, -1); 1971 if (error == 0 || error == ENOENT) { 1972 error = hammer_ip_add_direntry(&trans, tdip, 1973 tncp->nc_name, tncp->nc_nlen, 1974 ip); 1975 if (error == 0) { 1976 ip->ino_data.parent_obj_id = tdip->obj_id; 1977 ip->ino_data.ctime = trans.time; 1978 hammer_modify_inode(&trans, ip, HAMMER_INODE_DDIRTY); 1979 } 1980 } 1981 if (error) 1982 goto failed; /* XXX */ 1983 1984 /* 1985 * Locate the record in the originating directory and remove it. 1986 * 1987 * Calculate the namekey and setup the key range for the scan. This 1988 * works kinda like a chained hash table where the lower 32 bits 1989 * of the namekey synthesize the chain. 1990 * 1991 * The key range is inclusive of both key_beg and key_end. 1992 */ 1993 namekey = hammer_direntry_namekey(fdip, fncp->nc_name, fncp->nc_nlen, 1994 &max_iterations); 1995 retry: 1996 hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip); 1997 cursor.key_beg.localization = fdip->obj_localization | 1998 hammer_dir_localization(fdip); 1999 cursor.key_beg.obj_id = fdip->obj_id; 2000 cursor.key_beg.key = namekey; 2001 cursor.key_beg.create_tid = 0; 2002 cursor.key_beg.delete_tid = 0; 2003 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY; 2004 cursor.key_beg.obj_type = 0; 2005 2006 cursor.key_end = cursor.key_beg; 2007 cursor.key_end.key += max_iterations; 2008 cursor.asof = fdip->obj_asof; 2009 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF; 2010 2011 /* 2012 * Scan all matching records (the chain), locate the one matching 2013 * the requested path component. 2014 * 2015 * The hammer_ip_*() functions merge in-memory records with on-disk 2016 * records for the purposes of the search. 2017 */ 2018 error = hammer_ip_first(&cursor); 2019 while (error == 0) { 2020 if (hammer_ip_resolve_data(&cursor) != 0) 2021 break; 2022 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF; 2023 KKASSERT(nlen > 0); 2024 if (fncp->nc_nlen == nlen && 2025 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) { 2026 break; 2027 } 2028 error = hammer_ip_next(&cursor); 2029 } 2030 2031 /* 2032 * If all is ok we have to get the inode so we can adjust nlinks. 2033 * 2034 * WARNING: hammer_ip_del_direntry() may have to terminate the 2035 * cursor to avoid a recursion. It's ok to call hammer_done_cursor() 2036 * twice. 2037 */ 2038 if (error == 0) 2039 error = hammer_ip_del_direntry(&trans, &cursor, fdip, ip); 2040 2041 /* 2042 * XXX A deadlock here will break rename's atomicy for the purposes 2043 * of crash recovery. 2044 */ 2045 if (error == EDEADLK) { 2046 hammer_done_cursor(&cursor); 2047 goto retry; 2048 } 2049 2050 /* 2051 * Cleanup and tell the kernel that the rename succeeded. 2052 * 2053 * NOTE: ip->vp, if non-NULL, cannot be directly referenced 2054 * without formally acquiring the vp since the vp might 2055 * have zero refs on it, or in the middle of a reclaim, 2056 * etc. 2057 */ 2058 hammer_done_cursor(&cursor); 2059 if (error == 0) { 2060 cache_rename(ap->a_fnch, ap->a_tnch); 2061 hammer_knote(ap->a_fdvp, NOTE_WRITE); 2062 hammer_knote(ap->a_tdvp, NOTE_WRITE); 2063 while (ip->vp) { 2064 struct vnode *vp; 2065 2066 error = hammer_get_vnode(ip, &vp); 2067 if (error == 0 && vp) { 2068 vn_unlock(vp); 2069 hammer_knote(ip->vp, NOTE_RENAME); 2070 vrele(vp); 2071 break; 2072 } 2073 hdkprintf("ip/vp race2 avoided\n"); 2074 } 2075 } 2076 2077 failed: 2078 hammer_done_transaction(&trans); 2079 lwkt_reltoken(&hmp->fs_token); 2080 return (error); 2081 } 2082 2083 /* 2084 * hammer_vop_nrmdir { nch, dvp, cred } 2085 */ 2086 static 2087 int 2088 hammer_vop_nrmdir(struct vop_nrmdir_args *ap) 2089 { 2090 struct hammer_transaction trans; 2091 hammer_inode_t dip; 2092 hammer_mount_t hmp; 2093 int error; 2094 2095 dip = VTOI(ap->a_dvp); 2096 hmp = dip->hmp; 2097 2098 if (hammer_nohistory(dip) == 0 && 2099 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) { 2100 return (error); 2101 } 2102 2103 lwkt_gettoken(&hmp->fs_token); 2104 hammer_start_transaction(&trans, hmp); 2105 ++hammer_stats_file_iopsw; 2106 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 1); 2107 hammer_done_transaction(&trans); 2108 if (error == 0) 2109 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK); 2110 lwkt_reltoken(&hmp->fs_token); 2111 return (error); 2112 } 2113 2114 /* 2115 * hammer_vop_markatime { vp, cred } 2116 */ 2117 static 2118 int 2119 hammer_vop_markatime(struct vop_markatime_args *ap) 2120 { 2121 struct hammer_transaction trans; 2122 hammer_inode_t ip; 2123 hammer_mount_t hmp; 2124 2125 ip = VTOI(ap->a_vp); 2126 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) 2127 return (EROFS); 2128 if (ip->flags & HAMMER_INODE_RO) 2129 return (EROFS); 2130 hmp = ip->hmp; 2131 if (hmp->mp->mnt_flag & MNT_NOATIME) 2132 return (0); 2133 lwkt_gettoken(&hmp->fs_token); 2134 hammer_start_transaction(&trans, hmp); 2135 ++hammer_stats_file_iopsw; 2136 2137 ip->ino_data.atime = trans.time; 2138 hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME); 2139 hammer_done_transaction(&trans); 2140 hammer_knote(ap->a_vp, NOTE_ATTRIB); 2141 lwkt_reltoken(&hmp->fs_token); 2142 return (0); 2143 } 2144 2145 /* 2146 * hammer_vop_setattr { vp, vap, cred } 2147 */ 2148 static 2149 int 2150 hammer_vop_setattr(struct vop_setattr_args *ap) 2151 { 2152 struct hammer_transaction trans; 2153 hammer_inode_t ip; 2154 struct vattr *vap; 2155 hammer_mount_t hmp; 2156 int modflags; 2157 int error; 2158 int truncating; 2159 int blksize; 2160 int kflags; 2161 #if 0 2162 int64_t aligned_size; 2163 #endif 2164 uint32_t flags; 2165 2166 vap = ap->a_vap; 2167 ip = ap->a_vp->v_data; 2168 modflags = 0; 2169 kflags = 0; 2170 hmp = ip->hmp; 2171 2172 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) 2173 return(EROFS); 2174 if (ip->flags & HAMMER_INODE_RO) 2175 return (EROFS); 2176 if (hammer_nohistory(ip) == 0 && 2177 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) { 2178 return (error); 2179 } 2180 2181 lwkt_gettoken(&hmp->fs_token); 2182 hammer_start_transaction(&trans, hmp); 2183 ++hammer_stats_file_iopsw; 2184 error = 0; 2185 2186 if (vap->va_flags != VNOVAL) { 2187 flags = ip->ino_data.uflags; 2188 error = vop_helper_setattr_flags(&flags, vap->va_flags, 2189 hammer_to_unix_xid(&ip->ino_data.uid), 2190 ap->a_cred); 2191 if (error == 0) { 2192 if (ip->ino_data.uflags != flags) { 2193 ip->ino_data.uflags = flags; 2194 ip->ino_data.ctime = trans.time; 2195 modflags |= HAMMER_INODE_DDIRTY; 2196 kflags |= NOTE_ATTRIB; 2197 } 2198 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) { 2199 error = 0; 2200 goto done; 2201 } 2202 } 2203 goto done; 2204 } 2205 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) { 2206 error = EPERM; 2207 goto done; 2208 } 2209 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { 2210 mode_t cur_mode = ip->ino_data.mode; 2211 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid); 2212 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid); 2213 uuid_t uuid_uid; 2214 uuid_t uuid_gid; 2215 2216 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 2217 ap->a_cred, 2218 &cur_uid, &cur_gid, &cur_mode); 2219 if (error == 0) { 2220 hammer_guid_to_uuid(&uuid_uid, cur_uid); 2221 hammer_guid_to_uuid(&uuid_gid, cur_gid); 2222 if (bcmp(&uuid_uid, &ip->ino_data.uid, 2223 sizeof(uuid_uid)) || 2224 bcmp(&uuid_gid, &ip->ino_data.gid, 2225 sizeof(uuid_gid)) || 2226 ip->ino_data.mode != cur_mode) { 2227 ip->ino_data.uid = uuid_uid; 2228 ip->ino_data.gid = uuid_gid; 2229 ip->ino_data.mode = cur_mode; 2230 ip->ino_data.ctime = trans.time; 2231 modflags |= HAMMER_INODE_DDIRTY; 2232 } 2233 kflags |= NOTE_ATTRIB; 2234 } 2235 } 2236 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) { 2237 switch(ap->a_vp->v_type) { 2238 case VREG: 2239 if (vap->va_size == ip->ino_data.size) 2240 break; 2241 2242 /* 2243 * Log the operation if in fast-fsync mode or if 2244 * there are unterminated redo write records present. 2245 * 2246 * The second check is needed so the recovery code 2247 * properly truncates write redos even if nominal 2248 * REDO operations is turned off due to excessive 2249 * writes, because the related records might be 2250 * destroyed and never lay down a TERM_WRITE. 2251 */ 2252 if ((ip->flags & HAMMER_INODE_REDO) || 2253 (ip->flags & HAMMER_INODE_RDIRTY)) { 2254 error = hammer_generate_redo(&trans, ip, 2255 vap->va_size, 2256 HAMMER_REDO_TRUNC, 2257 NULL, 0); 2258 } 2259 blksize = hammer_blocksize(vap->va_size); 2260 2261 /* 2262 * XXX break atomicy, we can deadlock the backend 2263 * if we do not release the lock. Probably not a 2264 * big deal here. 2265 */ 2266 if (vap->va_size < ip->ino_data.size) { 2267 nvtruncbuf(ap->a_vp, vap->va_size, 2268 blksize, 2269 hammer_blockoff(vap->va_size), 2270 0); 2271 truncating = 1; 2272 kflags |= NOTE_WRITE; 2273 } else { 2274 nvextendbuf(ap->a_vp, 2275 ip->ino_data.size, 2276 vap->va_size, 2277 hammer_blocksize(ip->ino_data.size), 2278 hammer_blocksize(vap->va_size), 2279 hammer_blockoff(ip->ino_data.size), 2280 hammer_blockoff(vap->va_size), 2281 0); 2282 truncating = 0; 2283 kflags |= NOTE_WRITE | NOTE_EXTEND; 2284 } 2285 ip->ino_data.size = vap->va_size; 2286 ip->ino_data.mtime = trans.time; 2287 /* XXX safe to use SDIRTY instead of DDIRTY here? */ 2288 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY; 2289 2290 /* 2291 * On-media truncation is cached in the inode until 2292 * the inode is synchronized. We must immediately 2293 * handle any frontend records. 2294 */ 2295 if (truncating) { 2296 hammer_ip_frontend_trunc(ip, vap->va_size); 2297 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) { 2298 ip->flags |= HAMMER_INODE_TRUNCATED; 2299 ip->trunc_off = vap->va_size; 2300 hammer_inode_dirty(ip); 2301 } else if (ip->trunc_off > vap->va_size) { 2302 ip->trunc_off = vap->va_size; 2303 } 2304 } 2305 2306 #if 0 2307 /* 2308 * When truncating, nvtruncbuf() may have cleaned out 2309 * a portion of the last block on-disk in the buffer 2310 * cache. We must clean out any frontend records 2311 * for blocks beyond the new last block. 2312 */ 2313 aligned_size = (vap->va_size + (blksize - 1)) & 2314 ~(int64_t)(blksize - 1); 2315 if (truncating && vap->va_size < aligned_size) { 2316 aligned_size -= blksize; 2317 hammer_ip_frontend_trunc(ip, aligned_size); 2318 } 2319 #endif 2320 break; 2321 case VDATABASE: 2322 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) { 2323 ip->flags |= HAMMER_INODE_TRUNCATED; 2324 ip->trunc_off = vap->va_size; 2325 hammer_inode_dirty(ip); 2326 } else if (ip->trunc_off > vap->va_size) { 2327 ip->trunc_off = vap->va_size; 2328 } 2329 hammer_ip_frontend_trunc(ip, vap->va_size); 2330 ip->ino_data.size = vap->va_size; 2331 ip->ino_data.mtime = trans.time; 2332 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY; 2333 kflags |= NOTE_ATTRIB; 2334 break; 2335 default: 2336 error = EINVAL; 2337 goto done; 2338 } 2339 break; 2340 } 2341 if (vap->va_atime.tv_sec != VNOVAL) { 2342 ip->ino_data.atime = hammer_timespec_to_time(&vap->va_atime); 2343 modflags |= HAMMER_INODE_ATIME; 2344 kflags |= NOTE_ATTRIB; 2345 } 2346 if (vap->va_mtime.tv_sec != VNOVAL) { 2347 ip->ino_data.mtime = hammer_timespec_to_time(&vap->va_mtime); 2348 modflags |= HAMMER_INODE_MTIME; 2349 kflags |= NOTE_ATTRIB; 2350 } 2351 if (vap->va_mode != (mode_t)VNOVAL) { 2352 mode_t cur_mode = ip->ino_data.mode; 2353 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid); 2354 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid); 2355 2356 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 2357 cur_uid, cur_gid, &cur_mode); 2358 if (error == 0 && ip->ino_data.mode != cur_mode) { 2359 ip->ino_data.mode = cur_mode; 2360 ip->ino_data.ctime = trans.time; 2361 modflags |= HAMMER_INODE_DDIRTY; 2362 kflags |= NOTE_ATTRIB; 2363 } 2364 } 2365 done: 2366 if (error == 0) 2367 hammer_modify_inode(&trans, ip, modflags); 2368 hammer_done_transaction(&trans); 2369 hammer_knote(ap->a_vp, kflags); 2370 lwkt_reltoken(&hmp->fs_token); 2371 return (error); 2372 } 2373 2374 /* 2375 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target } 2376 */ 2377 static 2378 int 2379 hammer_vop_nsymlink(struct vop_nsymlink_args *ap) 2380 { 2381 struct hammer_transaction trans; 2382 hammer_inode_t dip; 2383 hammer_inode_t nip; 2384 hammer_record_t record; 2385 struct nchandle *nch; 2386 hammer_mount_t hmp; 2387 int error; 2388 int bytes; 2389 2390 ap->a_vap->va_type = VLNK; 2391 2392 nch = ap->a_nch; 2393 dip = VTOI(ap->a_dvp); 2394 hmp = dip->hmp; 2395 2396 if (dip->flags & HAMMER_INODE_RO) 2397 return (EROFS); 2398 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) 2399 return (error); 2400 2401 /* 2402 * Create a transaction to cover the operations we perform. 2403 */ 2404 lwkt_gettoken(&hmp->fs_token); 2405 hammer_start_transaction(&trans, hmp); 2406 ++hammer_stats_file_iopsw; 2407 2408 /* 2409 * Create a new filesystem object of the requested type. The 2410 * returned inode will be referenced but not locked. 2411 */ 2412 2413 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, 2414 dip, nch->ncp->nc_name, nch->ncp->nc_nlen, 2415 NULL, &nip); 2416 if (error) { 2417 hammer_done_transaction(&trans); 2418 *ap->a_vpp = NULL; 2419 lwkt_reltoken(&hmp->fs_token); 2420 return (error); 2421 } 2422 2423 /* 2424 * Add a record representing the symlink. symlink stores the link 2425 * as pure data, not a string, and is no \0 terminated. 2426 */ 2427 if (error == 0) { 2428 bytes = strlen(ap->a_target); 2429 2430 if (bytes <= HAMMER_INODE_BASESYMLEN) { 2431 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes); 2432 } else { 2433 record = hammer_alloc_mem_record(nip, bytes); 2434 record->type = HAMMER_MEM_RECORD_GENERAL; 2435 2436 record->leaf.base.localization = nip->obj_localization | 2437 HAMMER_LOCALIZE_MISC; 2438 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK; 2439 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX; 2440 record->leaf.data_len = bytes; 2441 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0); 2442 bcopy(ap->a_target, record->data->symlink.name, bytes); 2443 error = hammer_ip_add_record(&trans, record); 2444 } 2445 2446 /* 2447 * Set the file size to the length of the link. 2448 */ 2449 if (error == 0) { 2450 nip->ino_data.size = bytes; 2451 hammer_modify_inode(&trans, nip, HAMMER_INODE_DDIRTY); 2452 } 2453 } 2454 if (error == 0) 2455 error = hammer_ip_add_direntry(&trans, dip, nch->ncp->nc_name, 2456 nch->ncp->nc_nlen, nip); 2457 2458 /* 2459 * Finish up. 2460 */ 2461 if (error) { 2462 hammer_rel_inode(nip, 0); 2463 *ap->a_vpp = NULL; 2464 } else { 2465 error = hammer_get_vnode(nip, ap->a_vpp); 2466 hammer_rel_inode(nip, 0); 2467 if (error == 0) { 2468 cache_setunresolved(ap->a_nch); 2469 cache_setvp(ap->a_nch, *ap->a_vpp); 2470 hammer_knote(ap->a_dvp, NOTE_WRITE); 2471 } 2472 } 2473 hammer_done_transaction(&trans); 2474 lwkt_reltoken(&hmp->fs_token); 2475 return (error); 2476 } 2477 2478 /* 2479 * hammer_vop_nwhiteout { nch, dvp, cred, flags } 2480 */ 2481 static 2482 int 2483 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap) 2484 { 2485 struct hammer_transaction trans; 2486 hammer_inode_t dip; 2487 hammer_mount_t hmp; 2488 int error; 2489 2490 dip = VTOI(ap->a_dvp); 2491 hmp = dip->hmp; 2492 2493 if (hammer_nohistory(dip) == 0 && 2494 (error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) { 2495 return (error); 2496 } 2497 2498 lwkt_gettoken(&hmp->fs_token); 2499 hammer_start_transaction(&trans, hmp); 2500 ++hammer_stats_file_iopsw; 2501 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, 2502 ap->a_cred, ap->a_flags, -1); 2503 hammer_done_transaction(&trans); 2504 lwkt_reltoken(&hmp->fs_token); 2505 2506 return (error); 2507 } 2508 2509 /* 2510 * hammer_vop_ioctl { vp, command, data, fflag, cred } 2511 */ 2512 static 2513 int 2514 hammer_vop_ioctl(struct vop_ioctl_args *ap) 2515 { 2516 hammer_inode_t ip = ap->a_vp->v_data; 2517 hammer_mount_t hmp = ip->hmp; 2518 int error; 2519 2520 ++hammer_stats_file_iopsr; 2521 lwkt_gettoken(&hmp->fs_token); 2522 error = hammer_ioctl(ip, ap->a_command, ap->a_data, 2523 ap->a_fflag, ap->a_cred); 2524 lwkt_reltoken(&hmp->fs_token); 2525 return (error); 2526 } 2527 2528 static 2529 int 2530 hammer_vop_mountctl(struct vop_mountctl_args *ap) 2531 { 2532 static const struct mountctl_opt extraopt[] = { 2533 { HMNT_NOHISTORY, "nohistory" }, 2534 { HMNT_MASTERID, "master" }, 2535 { HMNT_NOMIRROR, "nomirror" }, 2536 { 0, NULL} 2537 2538 }; 2539 hammer_mount_t hmp; 2540 struct mount *mp; 2541 int usedbytes; 2542 int error; 2543 2544 error = 0; 2545 usedbytes = 0; 2546 mp = ap->a_head.a_ops->head.vv_mount; 2547 KKASSERT(mp->mnt_data != NULL); 2548 hmp = (hammer_mount_t)mp->mnt_data; 2549 2550 lwkt_gettoken(&hmp->fs_token); 2551 2552 switch(ap->a_op) { 2553 case MOUNTCTL_SET_EXPORT: 2554 if (ap->a_ctllen != sizeof(struct export_args)) 2555 error = EINVAL; 2556 else 2557 error = hammer_vfs_export(mp, ap->a_op, 2558 (const struct export_args *)ap->a_ctl); 2559 break; 2560 case MOUNTCTL_MOUNTFLAGS: 2561 /* 2562 * Call standard mountctl VOP function 2563 * so we get user mount flags. 2564 */ 2565 error = vop_stdmountctl(ap); 2566 if (error) 2567 break; 2568 2569 usedbytes = *ap->a_res; 2570 2571 if (usedbytes > 0 && usedbytes < ap->a_buflen) { 2572 usedbytes += vfs_flagstostr(hmp->hflags, extraopt, 2573 ap->a_buf, 2574 ap->a_buflen - usedbytes, 2575 &error); 2576 } 2577 2578 *ap->a_res += usedbytes; 2579 break; 2580 default: 2581 error = vop_stdmountctl(ap); 2582 break; 2583 } 2584 lwkt_reltoken(&hmp->fs_token); 2585 return(error); 2586 } 2587 2588 /* 2589 * hammer_vop_strategy { vp, bio } 2590 * 2591 * Strategy call, used for regular file read & write only. Note that the 2592 * bp may represent a cluster. 2593 * 2594 * To simplify operation and allow better optimizations in the future, 2595 * this code does not make any assumptions with regards to buffer alignment 2596 * or size. 2597 */ 2598 static 2599 int 2600 hammer_vop_strategy(struct vop_strategy_args *ap) 2601 { 2602 struct buf *bp; 2603 int error; 2604 2605 bp = ap->a_bio->bio_buf; 2606 2607 switch(bp->b_cmd) { 2608 case BUF_CMD_READ: 2609 error = hammer_vop_strategy_read(ap); 2610 break; 2611 case BUF_CMD_WRITE: 2612 error = hammer_vop_strategy_write(ap); 2613 break; 2614 default: 2615 bp->b_error = error = EINVAL; 2616 bp->b_flags |= B_ERROR; 2617 biodone(ap->a_bio); 2618 break; 2619 } 2620 2621 /* hammer_dump_dedup_cache(((hammer_inode_t)ap->a_vp->v_data)->hmp); */ 2622 2623 return (error); 2624 } 2625 2626 /* 2627 * Read from a regular file. Iterate the related records and fill in the 2628 * BIO/BUF. Gaps are zero-filled. 2629 * 2630 * The support code in hammer_object.c should be used to deal with mixed 2631 * in-memory and on-disk records. 2632 * 2633 * NOTE: Can be called from the cluster code with an oversized buf. 2634 * 2635 * XXX atime update 2636 */ 2637 static 2638 int 2639 hammer_vop_strategy_read(struct vop_strategy_args *ap) 2640 { 2641 struct hammer_transaction trans; 2642 hammer_inode_t ip; 2643 hammer_inode_t dip; 2644 hammer_mount_t hmp; 2645 struct hammer_cursor cursor; 2646 hammer_base_elm_t base; 2647 hammer_off_t disk_offset; 2648 struct bio *bio; 2649 struct bio *nbio; 2650 struct buf *bp; 2651 int64_t rec_offset; 2652 int64_t ran_end; 2653 int64_t tmp64; 2654 int error; 2655 int boff; 2656 int roff; 2657 int n; 2658 int isdedupable; 2659 2660 bio = ap->a_bio; 2661 bp = bio->bio_buf; 2662 ip = ap->a_vp->v_data; 2663 hmp = ip->hmp; 2664 2665 /* 2666 * The zone-2 disk offset may have been set by the cluster code via 2667 * a BMAP operation, or else should be NOOFFSET. 2668 * 2669 * Checking the high bits for a match against zone-2 should suffice. 2670 * 2671 * In cases where a lot of data duplication is present it may be 2672 * more beneficial to drop through and doubule-buffer through the 2673 * device. 2674 */ 2675 nbio = push_bio(bio); 2676 if (hammer_is_zone_large_data(nbio->bio_offset)) { 2677 if (hammer_double_buffer == 0) { 2678 lwkt_gettoken(&hmp->fs_token); 2679 error = hammer_io_direct_read(hmp, nbio, NULL); 2680 lwkt_reltoken(&hmp->fs_token); 2681 return (error); 2682 } 2683 2684 /* 2685 * Try to shortcut requests for double_buffer mode too. 2686 * Since this mode runs through the device buffer cache 2687 * only compatible buffer sizes (meaning those generated 2688 * by normal filesystem buffers) are legal. 2689 */ 2690 if (hammer_live_dedup == 0 && (bp->b_flags & B_PAGING) == 0) { 2691 lwkt_gettoken(&hmp->fs_token); 2692 error = hammer_io_indirect_read(hmp, nbio, NULL); 2693 lwkt_reltoken(&hmp->fs_token); 2694 return (error); 2695 } 2696 } 2697 2698 /* 2699 * Well, that sucked. Do it the hard way. If all the stars are 2700 * aligned we may still be able to issue a direct-read. 2701 */ 2702 lwkt_gettoken(&hmp->fs_token); 2703 hammer_simple_transaction(&trans, hmp); 2704 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip); 2705 2706 /* 2707 * Key range (begin and end inclusive) to scan. Note that the key's 2708 * stored in the actual records represent BASE+LEN, not BASE. The 2709 * first record containing bio_offset will have a key > bio_offset. 2710 */ 2711 cursor.key_beg.localization = ip->obj_localization | 2712 HAMMER_LOCALIZE_MISC; 2713 cursor.key_beg.obj_id = ip->obj_id; 2714 cursor.key_beg.create_tid = 0; 2715 cursor.key_beg.delete_tid = 0; 2716 cursor.key_beg.obj_type = 0; 2717 cursor.key_beg.key = bio->bio_offset + 1; 2718 cursor.asof = ip->obj_asof; 2719 cursor.flags |= HAMMER_CURSOR_ASOF; 2720 2721 cursor.key_end = cursor.key_beg; 2722 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE); 2723 #if 0 2724 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) { 2725 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB; 2726 cursor.key_end.rec_type = HAMMER_RECTYPE_DB; 2727 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL; 2728 } else 2729 #endif 2730 { 2731 ran_end = bio->bio_offset + bp->b_bufsize; 2732 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA; 2733 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA; 2734 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */ 2735 if (tmp64 < ran_end) 2736 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL; 2737 else 2738 cursor.key_end.key = ran_end + MAXPHYS + 1; 2739 } 2740 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE; 2741 2742 /* 2743 * Set NOSWAPCACHE for cursor data extraction if double buffering 2744 * is disabled or (if the file is not marked cacheable via chflags 2745 * and vm.swapcache_use_chflags is enabled). 2746 */ 2747 if (hammer_double_buffer == 0 || 2748 ((ap->a_vp->v_flag & VSWAPCACHE) == 0 && 2749 vm_swapcache_use_chflags)) { 2750 cursor.flags |= HAMMER_CURSOR_NOSWAPCACHE; 2751 } 2752 2753 error = hammer_ip_first(&cursor); 2754 boff = 0; 2755 2756 while (error == 0) { 2757 /* 2758 * Get the base file offset of the record. The key for 2759 * data records is (base + bytes) rather then (base). 2760 */ 2761 base = &cursor.leaf->base; 2762 rec_offset = base->key - cursor.leaf->data_len; 2763 2764 /* 2765 * Calculate the gap, if any, and zero-fill it. 2766 * 2767 * n is the offset of the start of the record verses our 2768 * current seek offset in the bio. 2769 */ 2770 n = (int)(rec_offset - (bio->bio_offset + boff)); 2771 if (n > 0) { 2772 if (n > bp->b_bufsize - boff) 2773 n = bp->b_bufsize - boff; 2774 bzero((char *)bp->b_data + boff, n); 2775 boff += n; 2776 n = 0; 2777 } 2778 2779 /* 2780 * Calculate the data offset in the record and the number 2781 * of bytes we can copy. 2782 * 2783 * There are two degenerate cases. First, boff may already 2784 * be at bp->b_bufsize. Secondly, the data offset within 2785 * the record may exceed the record's size. 2786 */ 2787 roff = -n; 2788 rec_offset += roff; 2789 n = cursor.leaf->data_len - roff; 2790 if (n <= 0) { 2791 hdkprintf("bad n=%d roff=%d\n", n, roff); 2792 n = 0; 2793 } else if (n > bp->b_bufsize - boff) { 2794 n = bp->b_bufsize - boff; 2795 } 2796 2797 /* 2798 * Deal with cached truncations. This cool bit of code 2799 * allows truncate()/ftruncate() to avoid having to sync 2800 * the file. 2801 * 2802 * If the frontend is truncated then all backend records are 2803 * subject to the frontend's truncation. 2804 * 2805 * If the backend is truncated then backend records on-disk 2806 * (but not in-memory) are subject to the backend's 2807 * truncation. In-memory records owned by the backend 2808 * represent data written after the truncation point on the 2809 * backend and must not be truncated. 2810 * 2811 * Truncate operations deal with frontend buffer cache 2812 * buffers and frontend-owned in-memory records synchronously. 2813 */ 2814 if (ip->flags & HAMMER_INODE_TRUNCATED) { 2815 if (hammer_cursor_ondisk(&cursor)/* || 2816 cursor.iprec->flush_state == HAMMER_FST_FLUSH*/) { 2817 if (ip->trunc_off <= rec_offset) 2818 n = 0; 2819 else if (ip->trunc_off < rec_offset + n) 2820 n = (int)(ip->trunc_off - rec_offset); 2821 } 2822 } 2823 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) { 2824 if (hammer_cursor_ondisk(&cursor)) { 2825 if (ip->sync_trunc_off <= rec_offset) 2826 n = 0; 2827 else if (ip->sync_trunc_off < rec_offset + n) 2828 n = (int)(ip->sync_trunc_off - rec_offset); 2829 } 2830 } 2831 2832 /* 2833 * Try to issue a direct read into our bio if possible, 2834 * otherwise resolve the element data into a hammer_buffer 2835 * and copy. 2836 * 2837 * The buffer on-disk should be zerod past any real 2838 * truncation point, but may not be for any synthesized 2839 * truncation point from above. 2840 * 2841 * NOTE: disk_offset is only valid if the cursor data is 2842 * on-disk. 2843 */ 2844 disk_offset = cursor.leaf->data_offset + roff; 2845 isdedupable = (boff == 0 && n == bp->b_bufsize && 2846 hammer_cursor_ondisk(&cursor) && 2847 ((int)disk_offset & HAMMER_BUFMASK) == 0); 2848 2849 if (isdedupable && hammer_double_buffer == 0) { 2850 /* 2851 * Direct read case 2852 */ 2853 KKASSERT(hammer_is_zone_large_data(disk_offset)); 2854 nbio->bio_offset = disk_offset; 2855 error = hammer_io_direct_read(hmp, nbio, cursor.leaf); 2856 if (hammer_live_dedup && error == 0) 2857 hammer_dedup_cache_add(ip, cursor.leaf); 2858 goto done; 2859 } else if (isdedupable) { 2860 /* 2861 * Async I/O case for reading from backing store 2862 * and copying the data to the filesystem buffer. 2863 * live-dedup has to verify the data anyway if it 2864 * gets a hit later so we can just add the entry 2865 * now. 2866 */ 2867 KKASSERT(hammer_is_zone_large_data(disk_offset)); 2868 nbio->bio_offset = disk_offset; 2869 if (hammer_live_dedup) 2870 hammer_dedup_cache_add(ip, cursor.leaf); 2871 error = hammer_io_indirect_read(hmp, nbio, cursor.leaf); 2872 goto done; 2873 } else if (n) { 2874 error = hammer_ip_resolve_data(&cursor); 2875 if (error == 0) { 2876 if (hammer_live_dedup && isdedupable) 2877 hammer_dedup_cache_add(ip, cursor.leaf); 2878 bcopy((char *)cursor.data + roff, 2879 (char *)bp->b_data + boff, n); 2880 } 2881 } 2882 if (error) 2883 break; 2884 2885 /* 2886 * We have to be sure that the only elements added to the 2887 * dedup cache are those which are already on-media. 2888 */ 2889 if (hammer_live_dedup && hammer_cursor_ondisk(&cursor)) 2890 hammer_dedup_cache_add(ip, cursor.leaf); 2891 2892 /* 2893 * Iterate until we have filled the request. 2894 */ 2895 boff += n; 2896 if (boff == bp->b_bufsize) 2897 break; 2898 error = hammer_ip_next(&cursor); 2899 } 2900 2901 /* 2902 * There may have been a gap after the last record 2903 */ 2904 if (error == ENOENT) 2905 error = 0; 2906 if (error == 0 && boff != bp->b_bufsize) { 2907 KKASSERT(boff < bp->b_bufsize); 2908 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff); 2909 /* boff = bp->b_bufsize; */ 2910 } 2911 2912 /* 2913 * Disallow swapcache operation on the vnode buffer if double 2914 * buffering is enabled, the swapcache will get the data via 2915 * the block device buffer. 2916 */ 2917 if (hammer_double_buffer) 2918 bp->b_flags |= B_NOTMETA; 2919 2920 /* 2921 * Cleanup 2922 */ 2923 bp->b_resid = 0; 2924 bp->b_error = error; 2925 if (error) 2926 bp->b_flags |= B_ERROR; 2927 biodone(ap->a_bio); 2928 2929 done: 2930 /* 2931 * Cache the b-tree node for the last data read in cache[1]. 2932 * 2933 * If we hit the file EOF then also cache the node in the 2934 * governing directory's cache[3], it will be used to initialize 2935 * the new inode's cache[1] for any inodes looked up via the directory. 2936 * 2937 * This doesn't reduce disk accesses since the B-Tree chain is 2938 * likely cached, but it does reduce cpu overhead when looking 2939 * up file offsets for cpdup/tar/cpio style iterations. 2940 */ 2941 if (cursor.node) 2942 hammer_cache_node(&ip->cache[1], cursor.node); 2943 if (ran_end >= ip->ino_data.size) { 2944 dip = hammer_find_inode(&trans, ip->ino_data.parent_obj_id, 2945 ip->obj_asof, ip->obj_localization); 2946 if (dip) { 2947 hammer_cache_node(&dip->cache[3], cursor.node); 2948 hammer_rel_inode(dip, 0); 2949 } 2950 } 2951 hammer_done_cursor(&cursor); 2952 hammer_done_transaction(&trans); 2953 lwkt_reltoken(&hmp->fs_token); 2954 return(error); 2955 } 2956 2957 /* 2958 * BMAP operation - used to support cluster_read() only. 2959 * 2960 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb) 2961 * 2962 * This routine may return EOPNOTSUPP if the opration is not supported for 2963 * the specified offset. The contents of the pointer arguments do not 2964 * need to be initialized in that case. 2965 * 2966 * If a disk address is available and properly aligned return 0 with 2967 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately 2968 * to the run-length relative to that offset. Callers may assume that 2969 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently 2970 * large, so return EOPNOTSUPP if it is not sufficiently large. 2971 */ 2972 static 2973 int 2974 hammer_vop_bmap(struct vop_bmap_args *ap) 2975 { 2976 struct hammer_transaction trans; 2977 hammer_inode_t ip; 2978 hammer_mount_t hmp; 2979 struct hammer_cursor cursor; 2980 hammer_base_elm_t base; 2981 int64_t rec_offset; 2982 int64_t ran_end; 2983 int64_t tmp64; 2984 int64_t base_offset; 2985 int64_t base_disk_offset; 2986 int64_t last_offset; 2987 hammer_off_t last_disk_offset; 2988 hammer_off_t disk_offset; 2989 int rec_len; 2990 int error; 2991 int blksize; 2992 2993 ++hammer_stats_file_iopsr; 2994 ip = ap->a_vp->v_data; 2995 hmp = ip->hmp; 2996 2997 /* 2998 * We can only BMAP regular files. We can't BMAP database files, 2999 * directories, etc. 3000 */ 3001 if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE) 3002 return(EOPNOTSUPP); 3003 3004 /* 3005 * bmap is typically called with runp/runb both NULL when used 3006 * for writing. We do not support BMAP for writing atm. 3007 */ 3008 if (ap->a_cmd != BUF_CMD_READ) 3009 return(EOPNOTSUPP); 3010 3011 /* 3012 * Scan the B-Tree to acquire blockmap addresses, then translate 3013 * to raw addresses. 3014 */ 3015 lwkt_gettoken(&hmp->fs_token); 3016 hammer_simple_transaction(&trans, hmp); 3017 3018 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip); 3019 3020 /* 3021 * Key range (begin and end inclusive) to scan. Note that the key's 3022 * stored in the actual records represent BASE+LEN, not BASE. The 3023 * first record containing bio_offset will have a key > bio_offset. 3024 */ 3025 cursor.key_beg.localization = ip->obj_localization | 3026 HAMMER_LOCALIZE_MISC; 3027 cursor.key_beg.obj_id = ip->obj_id; 3028 cursor.key_beg.create_tid = 0; 3029 cursor.key_beg.delete_tid = 0; 3030 cursor.key_beg.obj_type = 0; 3031 if (ap->a_runb) 3032 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1; 3033 else 3034 cursor.key_beg.key = ap->a_loffset + 1; 3035 if (cursor.key_beg.key < 0) 3036 cursor.key_beg.key = 0; 3037 cursor.asof = ip->obj_asof; 3038 cursor.flags |= HAMMER_CURSOR_ASOF; 3039 3040 cursor.key_end = cursor.key_beg; 3041 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE); 3042 3043 ran_end = ap->a_loffset + MAXPHYS; 3044 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA; 3045 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA; 3046 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */ 3047 if (tmp64 < ran_end) 3048 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL; 3049 else 3050 cursor.key_end.key = ran_end + MAXPHYS + 1; 3051 3052 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE; 3053 3054 error = hammer_ip_first(&cursor); 3055 base_offset = last_offset = 0; 3056 base_disk_offset = last_disk_offset = 0; 3057 3058 while (error == 0) { 3059 /* 3060 * Get the base file offset of the record. The key for 3061 * data records is (base + bytes) rather then (base). 3062 * 3063 * NOTE: rec_offset + rec_len may exceed the end-of-file. 3064 * The extra bytes should be zero on-disk and the BMAP op 3065 * should still be ok. 3066 */ 3067 base = &cursor.leaf->base; 3068 rec_offset = base->key - cursor.leaf->data_len; 3069 rec_len = cursor.leaf->data_len; 3070 3071 /* 3072 * Incorporate any cached truncation. 3073 * 3074 * NOTE: Modifications to rec_len based on synthesized 3075 * truncation points remove the guarantee that any extended 3076 * data on disk is zero (since the truncations may not have 3077 * taken place on-media yet). 3078 */ 3079 if (ip->flags & HAMMER_INODE_TRUNCATED) { 3080 if (hammer_cursor_ondisk(&cursor) || 3081 cursor.iprec->flush_state == HAMMER_FST_FLUSH) { 3082 if (ip->trunc_off <= rec_offset) 3083 rec_len = 0; 3084 else if (ip->trunc_off < rec_offset + rec_len) 3085 rec_len = (int)(ip->trunc_off - rec_offset); 3086 } 3087 } 3088 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) { 3089 if (hammer_cursor_ondisk(&cursor)) { 3090 if (ip->sync_trunc_off <= rec_offset) 3091 rec_len = 0; 3092 else if (ip->sync_trunc_off < rec_offset + rec_len) 3093 rec_len = (int)(ip->sync_trunc_off - rec_offset); 3094 } 3095 } 3096 3097 /* 3098 * Accumulate information. If we have hit a discontiguous 3099 * block reset base_offset unless we are already beyond the 3100 * requested offset. If we are, that's it, we stop. 3101 */ 3102 if (error) 3103 break; 3104 if (hammer_cursor_ondisk(&cursor)) { 3105 disk_offset = cursor.leaf->data_offset; 3106 if (rec_offset != last_offset || 3107 disk_offset != last_disk_offset) { 3108 if (rec_offset > ap->a_loffset) 3109 break; 3110 base_offset = rec_offset; 3111 base_disk_offset = disk_offset; 3112 } 3113 last_offset = rec_offset + rec_len; 3114 last_disk_offset = disk_offset + rec_len; 3115 3116 if (hammer_live_dedup) 3117 hammer_dedup_cache_add(ip, cursor.leaf); 3118 } 3119 3120 error = hammer_ip_next(&cursor); 3121 } 3122 3123 if (cursor.node) 3124 hammer_cache_node(&ip->cache[1], cursor.node); 3125 3126 hammer_done_cursor(&cursor); 3127 hammer_done_transaction(&trans); 3128 lwkt_reltoken(&hmp->fs_token); 3129 3130 /* 3131 * If we couldn't find any records or the records we did find were 3132 * all behind the requested offset, return failure. A forward 3133 * truncation can leave a hole w/ no on-disk records. 3134 */ 3135 if (last_offset == 0 || last_offset < ap->a_loffset) 3136 return (EOPNOTSUPP); 3137 3138 /* 3139 * Figure out the block size at the requested offset and adjust 3140 * our limits so the cluster_read() does not create inappropriately 3141 * sized buffer cache buffers. 3142 */ 3143 blksize = hammer_blocksize(ap->a_loffset); 3144 if (hammer_blocksize(base_offset) != blksize) { 3145 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset); 3146 } 3147 if (last_offset != ap->a_loffset && 3148 hammer_blocksize(last_offset - 1) != blksize) { 3149 last_offset = hammer_blockdemarc(ap->a_loffset, 3150 last_offset - 1); 3151 } 3152 3153 /* 3154 * Returning EOPNOTSUPP simply prevents the direct-IO optimization 3155 * from occuring. 3156 */ 3157 disk_offset = base_disk_offset + (ap->a_loffset - base_offset); 3158 3159 if (!hammer_is_zone_large_data(disk_offset)) { 3160 /* 3161 * Only large-data zones can be direct-IOd 3162 */ 3163 error = EOPNOTSUPP; 3164 } else if ((disk_offset & HAMMER_BUFMASK) || 3165 (last_offset - ap->a_loffset) < blksize) { 3166 /* 3167 * doffsetp is not aligned or the forward run size does 3168 * not cover a whole buffer, disallow the direct I/O. 3169 */ 3170 error = EOPNOTSUPP; 3171 } else { 3172 /* 3173 * We're good. 3174 */ 3175 *ap->a_doffsetp = disk_offset; 3176 if (ap->a_runb) { 3177 *ap->a_runb = ap->a_loffset - base_offset; 3178 KKASSERT(*ap->a_runb >= 0); 3179 } 3180 if (ap->a_runp) { 3181 *ap->a_runp = last_offset - ap->a_loffset; 3182 KKASSERT(*ap->a_runp >= 0); 3183 } 3184 error = 0; 3185 } 3186 return(error); 3187 } 3188 3189 /* 3190 * Write to a regular file. Because this is a strategy call the OS is 3191 * trying to actually get data onto the media. 3192 */ 3193 static 3194 int 3195 hammer_vop_strategy_write(struct vop_strategy_args *ap) 3196 { 3197 hammer_record_t record; 3198 hammer_mount_t hmp; 3199 hammer_inode_t ip; 3200 struct bio *bio; 3201 struct buf *bp; 3202 int blksize __debugvar; 3203 int bytes; 3204 int error; 3205 3206 bio = ap->a_bio; 3207 bp = bio->bio_buf; 3208 ip = ap->a_vp->v_data; 3209 hmp = ip->hmp; 3210 3211 blksize = hammer_blocksize(bio->bio_offset); 3212 KKASSERT(bp->b_bufsize == blksize); 3213 3214 if (ip->flags & HAMMER_INODE_RO) { 3215 bp->b_error = EROFS; 3216 bp->b_flags |= B_ERROR; 3217 biodone(ap->a_bio); 3218 return(EROFS); 3219 } 3220 3221 lwkt_gettoken(&hmp->fs_token); 3222 3223 /* 3224 * Disallow swapcache operation on the vnode buffer if double 3225 * buffering is enabled, the swapcache will get the data via 3226 * the block device buffer. 3227 */ 3228 if (hammer_double_buffer) 3229 bp->b_flags |= B_NOTMETA; 3230 3231 /* 3232 * Interlock with inode destruction (no in-kernel or directory 3233 * topology visibility). If we queue new IO while trying to 3234 * destroy the inode we can deadlock the vtrunc call in 3235 * hammer_inode_unloadable_check(). 3236 * 3237 * Besides, there's no point flushing a bp associated with an 3238 * inode that is being destroyed on-media and has no kernel 3239 * references. 3240 */ 3241 if ((ip->flags | ip->sync_flags) & 3242 (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) { 3243 bp->b_resid = 0; 3244 biodone(ap->a_bio); 3245 lwkt_reltoken(&hmp->fs_token); 3246 return(0); 3247 } 3248 3249 /* 3250 * Reserve space and issue a direct-write from the front-end. 3251 * NOTE: The direct_io code will hammer_bread/bcopy smaller 3252 * allocations. 3253 * 3254 * An in-memory record will be installed to reference the storage 3255 * until the flusher can get to it. 3256 * 3257 * Since we own the high level bio the front-end will not try to 3258 * do a direct-read until the write completes. 3259 * 3260 * NOTE: The only time we do not reserve a full-sized buffers 3261 * worth of data is if the file is small. We do not try to 3262 * allocate a fragment (from the small-data zone) at the end of 3263 * an otherwise large file as this can lead to wildly separated 3264 * data. 3265 */ 3266 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0); 3267 KKASSERT(bio->bio_offset < ip->ino_data.size); 3268 if (bio->bio_offset || ip->ino_data.size > HAMMER_HBUFSIZE) 3269 bytes = bp->b_bufsize; 3270 else 3271 bytes = ((int)ip->ino_data.size + 15) & ~15; 3272 3273 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data, 3274 bytes, &error); 3275 3276 /* 3277 * B_VFSFLAG1 indicates that a REDO_WRITE entry was generated 3278 * in hammer_vop_write(). We must flag the record so the proper 3279 * REDO_TERM_WRITE entry is generated during the flush. 3280 */ 3281 if (record) { 3282 if (bp->b_flags & B_VFSFLAG1) { 3283 record->flags |= HAMMER_RECF_REDO; 3284 bp->b_flags &= ~B_VFSFLAG1; 3285 } 3286 if (record->flags & HAMMER_RECF_DEDUPED) { 3287 bp->b_resid = 0; 3288 hammer_ip_replace_bulk(hmp, record); 3289 biodone(ap->a_bio); 3290 } else { 3291 hammer_io_direct_write(hmp, bio, record); 3292 } 3293 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs) 3294 hammer_flush_inode(ip, 0); 3295 } else { 3296 bp->b_bio2.bio_offset = NOOFFSET; 3297 bp->b_error = error; 3298 bp->b_flags |= B_ERROR; 3299 biodone(ap->a_bio); 3300 } 3301 lwkt_reltoken(&hmp->fs_token); 3302 return(error); 3303 } 3304 3305 /* 3306 * dounlink - disconnect a directory entry 3307 * 3308 * XXX whiteout support not really in yet 3309 */ 3310 static int 3311 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch, 3312 struct vnode *dvp, struct ucred *cred, 3313 int flags, int isdir) 3314 { 3315 struct namecache *ncp; 3316 hammer_inode_t dip; 3317 hammer_inode_t ip; 3318 hammer_mount_t hmp; 3319 struct hammer_cursor cursor; 3320 int64_t namekey; 3321 uint32_t max_iterations; 3322 int nlen, error; 3323 3324 /* 3325 * Calculate the namekey and setup the key range for the scan. This 3326 * works kinda like a chained hash table where the lower 32 bits 3327 * of the namekey synthesize the chain. 3328 * 3329 * The key range is inclusive of both key_beg and key_end. 3330 */ 3331 dip = VTOI(dvp); 3332 ncp = nch->ncp; 3333 hmp = dip->hmp; 3334 3335 if (dip->flags & HAMMER_INODE_RO) 3336 return (EROFS); 3337 3338 namekey = hammer_direntry_namekey(dip, ncp->nc_name, ncp->nc_nlen, 3339 &max_iterations); 3340 retry: 3341 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip); 3342 cursor.key_beg.localization = dip->obj_localization | 3343 hammer_dir_localization(dip); 3344 cursor.key_beg.obj_id = dip->obj_id; 3345 cursor.key_beg.key = namekey; 3346 cursor.key_beg.create_tid = 0; 3347 cursor.key_beg.delete_tid = 0; 3348 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY; 3349 cursor.key_beg.obj_type = 0; 3350 3351 cursor.key_end = cursor.key_beg; 3352 cursor.key_end.key += max_iterations; 3353 cursor.asof = dip->obj_asof; 3354 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF; 3355 3356 /* 3357 * Scan all matching records (the chain), locate the one matching 3358 * the requested path component. info->last_error contains the 3359 * error code on search termination and could be 0, ENOENT, or 3360 * something else. 3361 * 3362 * The hammer_ip_*() functions merge in-memory records with on-disk 3363 * records for the purposes of the search. 3364 */ 3365 error = hammer_ip_first(&cursor); 3366 3367 while (error == 0) { 3368 error = hammer_ip_resolve_data(&cursor); 3369 if (error) 3370 break; 3371 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF; 3372 KKASSERT(nlen > 0); 3373 if (ncp->nc_nlen == nlen && 3374 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) { 3375 break; 3376 } 3377 error = hammer_ip_next(&cursor); 3378 } 3379 3380 /* 3381 * If all is ok we have to get the inode so we can adjust nlinks. 3382 * To avoid a deadlock with the flusher we must release the inode 3383 * lock on the directory when acquiring the inode for the entry. 3384 * 3385 * If the target is a directory, it must be empty. 3386 */ 3387 if (error == 0) { 3388 hammer_unlock(&cursor.ip->lock); 3389 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id, 3390 hmp->asof, 3391 cursor.data->entry.localization, 3392 0, &error); 3393 hammer_lock_sh(&cursor.ip->lock); 3394 if (error == ENOENT) { 3395 hkprintf("WARNING: Removing dirent w/missing inode " 3396 "\"%s\"\n" 3397 "\tobj_id = %016jx\n", 3398 ncp->nc_name, 3399 (intmax_t)cursor.data->entry.obj_id); 3400 error = 0; 3401 } 3402 3403 /* 3404 * If isdir >= 0 we validate that the entry is or is not a 3405 * directory. If isdir < 0 we don't care. 3406 */ 3407 if (error == 0 && isdir >= 0 && ip) { 3408 if (isdir && 3409 ip->ino_data.obj_type != HAMMER_OBJTYPE_DIRECTORY) { 3410 error = ENOTDIR; 3411 } else if (isdir == 0 && 3412 ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY) { 3413 error = EISDIR; 3414 } 3415 } 3416 3417 /* 3418 * If we are trying to remove a directory the directory must 3419 * be empty. 3420 * 3421 * The check directory code can loop and deadlock/retry. Our 3422 * own cursor's node locks must be released to avoid a 3-way 3423 * deadlock with the flusher if the check directory code 3424 * blocks. 3425 * 3426 * If any changes whatsoever have been made to the cursor 3427 * set EDEADLK and retry. 3428 * 3429 * WARNING: See warnings in hammer_unlock_cursor() 3430 * function. 3431 */ 3432 if (error == 0 && ip && ip->ino_data.obj_type == 3433 HAMMER_OBJTYPE_DIRECTORY) { 3434 hammer_unlock_cursor(&cursor); 3435 error = hammer_ip_check_directory_empty(trans, ip); 3436 hammer_lock_cursor(&cursor); 3437 if (cursor.flags & HAMMER_CURSOR_RETEST) { 3438 hkprintf("Warning: avoided deadlock " 3439 "on rmdir '%s'\n", 3440 ncp->nc_name); 3441 error = EDEADLK; 3442 } 3443 } 3444 3445 /* 3446 * Delete the directory entry. 3447 * 3448 * WARNING: hammer_ip_del_direntry() may have to terminate 3449 * the cursor to avoid a deadlock. It is ok to call 3450 * hammer_done_cursor() twice. 3451 */ 3452 if (error == 0) { 3453 error = hammer_ip_del_direntry(trans, &cursor, 3454 dip, ip); 3455 } 3456 hammer_done_cursor(&cursor); 3457 if (error == 0) { 3458 /* 3459 * Tell the namecache that we are now unlinked. 3460 */ 3461 cache_unlink(nch); 3462 3463 /* 3464 * NOTE: ip->vp, if non-NULL, cannot be directly 3465 * referenced without formally acquiring the 3466 * vp since the vp might have zero refs on it, 3467 * or in the middle of a reclaim, etc. 3468 * 3469 * NOTE: The cache_setunresolved() can rip the vp 3470 * out from under us since the vp may not have 3471 * any refs, in which case ip->vp will be NULL 3472 * from the outset. 3473 */ 3474 while (ip && ip->vp) { 3475 struct vnode *vp; 3476 3477 error = hammer_get_vnode(ip, &vp); 3478 if (error == 0 && vp) { 3479 vn_unlock(vp); 3480 hammer_knote(ip->vp, NOTE_DELETE); 3481 #if 0 3482 /* 3483 * Don't do this, it can deadlock 3484 * on concurrent rm's of hardlinks. 3485 * Shouldn't be needed any more. 3486 */ 3487 cache_inval_vp(ip->vp, CINV_DESTROY); 3488 #endif 3489 vrele(vp); 3490 break; 3491 } 3492 hdkprintf("ip/vp race1 avoided\n"); 3493 } 3494 } 3495 if (ip) 3496 hammer_rel_inode(ip, 0); 3497 } else { 3498 hammer_done_cursor(&cursor); 3499 } 3500 if (error == EDEADLK) 3501 goto retry; 3502 3503 return (error); 3504 } 3505 3506 /************************************************************************ 3507 * FIFO AND SPECFS OPS * 3508 ************************************************************************ 3509 * 3510 */ 3511 static int 3512 hammer_vop_fifoclose (struct vop_close_args *ap) 3513 { 3514 /* XXX update itimes */ 3515 return (VOCALL(&fifo_vnode_vops, &ap->a_head)); 3516 } 3517 3518 static int 3519 hammer_vop_fiforead (struct vop_read_args *ap) 3520 { 3521 int error; 3522 3523 error = VOCALL(&fifo_vnode_vops, &ap->a_head); 3524 /* XXX update access time */ 3525 return (error); 3526 } 3527 3528 static int 3529 hammer_vop_fifowrite (struct vop_write_args *ap) 3530 { 3531 int error; 3532 3533 error = VOCALL(&fifo_vnode_vops, &ap->a_head); 3534 /* XXX update access time */ 3535 return (error); 3536 } 3537 3538 static 3539 int 3540 hammer_vop_fifokqfilter(struct vop_kqfilter_args *ap) 3541 { 3542 int error; 3543 3544 error = VOCALL(&fifo_vnode_vops, &ap->a_head); 3545 if (error) 3546 error = hammer_vop_kqfilter(ap); 3547 return(error); 3548 } 3549 3550 /************************************************************************ 3551 * KQFILTER OPS * 3552 ************************************************************************ 3553 * 3554 */ 3555 static void filt_hammerdetach(struct knote *kn); 3556 static int filt_hammerread(struct knote *kn, long hint); 3557 static int filt_hammerwrite(struct knote *kn, long hint); 3558 static int filt_hammervnode(struct knote *kn, long hint); 3559 3560 static struct filterops hammerread_filtops = 3561 { FILTEROP_ISFD | FILTEROP_MPSAFE, 3562 NULL, filt_hammerdetach, filt_hammerread }; 3563 static struct filterops hammerwrite_filtops = 3564 { FILTEROP_ISFD | FILTEROP_MPSAFE, 3565 NULL, filt_hammerdetach, filt_hammerwrite }; 3566 static struct filterops hammervnode_filtops = 3567 { FILTEROP_ISFD | FILTEROP_MPSAFE, 3568 NULL, filt_hammerdetach, filt_hammervnode }; 3569 3570 static 3571 int 3572 hammer_vop_kqfilter(struct vop_kqfilter_args *ap) 3573 { 3574 struct vnode *vp = ap->a_vp; 3575 struct knote *kn = ap->a_kn; 3576 3577 switch (kn->kn_filter) { 3578 case EVFILT_READ: 3579 kn->kn_fop = &hammerread_filtops; 3580 break; 3581 case EVFILT_WRITE: 3582 kn->kn_fop = &hammerwrite_filtops; 3583 break; 3584 case EVFILT_VNODE: 3585 kn->kn_fop = &hammervnode_filtops; 3586 break; 3587 default: 3588 return (EOPNOTSUPP); 3589 } 3590 3591 kn->kn_hook = (caddr_t)vp; 3592 3593 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 3594 3595 return(0); 3596 } 3597 3598 static void 3599 filt_hammerdetach(struct knote *kn) 3600 { 3601 struct vnode *vp = (void *)kn->kn_hook; 3602 3603 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 3604 } 3605 3606 static int 3607 filt_hammerread(struct knote *kn, long hint) 3608 { 3609 struct vnode *vp = (void *)kn->kn_hook; 3610 hammer_inode_t ip = VTOI(vp); 3611 hammer_mount_t hmp = ip->hmp; 3612 off_t off; 3613 3614 if (hint == NOTE_REVOKE) { 3615 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 3616 return(1); 3617 } 3618 lwkt_gettoken(&hmp->fs_token); /* XXX use per-ip-token */ 3619 off = ip->ino_data.size - kn->kn_fp->f_offset; 3620 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 3621 lwkt_reltoken(&hmp->fs_token); 3622 if (kn->kn_sfflags & NOTE_OLDAPI) 3623 return(1); 3624 return (kn->kn_data != 0); 3625 } 3626 3627 static int 3628 filt_hammerwrite(struct knote *kn, long hint) 3629 { 3630 if (hint == NOTE_REVOKE) 3631 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 3632 kn->kn_data = 0; 3633 return (1); 3634 } 3635 3636 static int 3637 filt_hammervnode(struct knote *kn, long hint) 3638 { 3639 if (kn->kn_sfflags & hint) 3640 kn->kn_fflags |= hint; 3641 if (hint == NOTE_REVOKE) { 3642 kn->kn_flags |= (EV_EOF | EV_NODATA); 3643 return (1); 3644 } 3645 return (kn->kn_fflags != 0); 3646 } 3647 3648