1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 /* 35 * IO Primitives and buffer cache management 36 * 37 * All major data-tracking structures in HAMMER contain a struct hammer_io 38 * which is used to manage their backing store. We use filesystem buffers 39 * for backing store and we leave them passively associated with their 40 * HAMMER structures. 41 * 42 * If the kernel tries to destroy a passively associated buf which we cannot 43 * yet let go we set B_LOCKED in the buffer and then actively released it 44 * later when we can. 45 * 46 * The io_token is required for anything which might race bioops and bio_done 47 * callbacks, with one exception: A successful hammer_try_interlock_norefs(). 48 * the fs_token will be held in all other cases. 49 */ 50 51 #include <sys/buf2.h> 52 53 #include "hammer.h" 54 55 static void hammer_io_modify(hammer_io_t io, int count); 56 static void hammer_io_deallocate(struct buf *bp); 57 static void hammer_indirect_callback(struct bio *bio); 58 static void hammer_io_direct_write_complete(struct bio *nbio); 59 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data); 60 static void hammer_io_set_modlist(struct hammer_io *io); 61 static __inline void hammer_io_flush_mark(hammer_volume_t volume); 62 static struct bio_ops hammer_bioops; 63 64 static int 65 hammer_mod_rb_compare(hammer_io_t io1, hammer_io_t io2) 66 { 67 hammer_off_t io1_offset; 68 hammer_off_t io2_offset; 69 70 /* 71 * Encoded offsets are neither valid block device offsets 72 * nor valid zone-X offsets. 73 */ 74 io1_offset = HAMMER_ENCODE(0, io1->volume->vol_no, io1->offset); 75 io2_offset = HAMMER_ENCODE(0, io2->volume->vol_no, io2->offset); 76 77 if (io1_offset < io2_offset) 78 return(-1); 79 if (io1_offset > io2_offset) 80 return(1); 81 return(0); 82 } 83 84 RB_GENERATE(hammer_mod_rb_tree, hammer_io, rb_node, hammer_mod_rb_compare); 85 86 /* 87 * Initialize a new, already-zero'd hammer_io structure, or reinitialize 88 * an existing hammer_io structure which may have switched to another type. 89 */ 90 void 91 hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type) 92 { 93 io->volume = volume; 94 io->hmp = volume->io.hmp; 95 io->type = type; 96 } 97 98 /* 99 * Helper routine to disassociate a buffer cache buffer from an I/O 100 * structure. The io must be interlocked and marked appropriately for 101 * reclamation. 102 * 103 * The io must be in a released state with the io->bp owned and 104 * locked by the caller of this function. When not called from an 105 * io_deallocate() this cannot race an io_deallocate() since the 106 * kernel would be unable to get the buffer lock in that case. 107 * (The released state in this case means we own the bp, not the 108 * hammer_io structure). 109 * 110 * The io may have 0 or 1 references depending on who called us. The 111 * caller is responsible for dealing with the refs. 112 * 113 * This call can only be made when no action is required on the buffer. 114 * 115 * This function is guaranteed not to race against anything because we 116 * own both the io lock and the bp lock and are interlocked with no 117 * references. 118 */ 119 static void 120 hammer_io_disassociate(hammer_io_t io) 121 { 122 struct buf *bp = io->bp; 123 124 KKASSERT(io->released); 125 KKASSERT(io->modified == 0); 126 KKASSERT(hammer_buf_peek_io(bp) == io); 127 buf_dep_init(bp); 128 io->bp = NULL; 129 130 /* 131 * If the buffer was locked someone wanted to get rid of it. 132 */ 133 if (bp->b_flags & B_LOCKED) { 134 atomic_add_int(&hammer_count_io_locked, -1); 135 bp->b_flags &= ~B_LOCKED; 136 } 137 if (io->reclaim) { 138 bp->b_flags |= B_NOCACHE|B_RELBUF; 139 io->reclaim = 0; 140 } 141 142 switch(io->type) { 143 case HAMMER_STRUCTURE_VOLUME: 144 HAMMER_ITOV(io)->ondisk = NULL; 145 break; 146 case HAMMER_STRUCTURE_DATA_BUFFER: 147 case HAMMER_STRUCTURE_META_BUFFER: 148 case HAMMER_STRUCTURE_UNDO_BUFFER: 149 HAMMER_ITOB(io)->ondisk = NULL; 150 break; 151 case HAMMER_STRUCTURE_DUMMY: 152 hpanic("bad io type"); 153 break; 154 } 155 } 156 157 /* 158 * Wait for any physical IO to complete 159 * 160 * XXX we aren't interlocked against a spinlock or anything so there 161 * is a small window in the interlock / io->running == 0 test. 162 */ 163 void 164 hammer_io_wait(hammer_io_t io) 165 { 166 if (io->running) { 167 hammer_mount_t hmp = io->hmp; 168 169 lwkt_gettoken(&hmp->io_token); 170 while (io->running) { 171 io->waiting = 1; 172 tsleep_interlock(io, 0); 173 if (io->running) 174 tsleep(io, PINTERLOCKED, "hmrflw", hz); 175 } 176 lwkt_reltoken(&hmp->io_token); 177 } 178 } 179 180 /* 181 * Wait for all currently queued HAMMER-initiated I/Os to complete. 182 * 183 * This is not supposed to count direct I/O's but some can leak 184 * through (for non-full-sized direct I/Os). 185 */ 186 void 187 hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush) 188 { 189 struct hammer_io iodummy; 190 hammer_io_t io; 191 192 /* 193 * Degenerate case, no I/O is running 194 */ 195 lwkt_gettoken(&hmp->io_token); 196 if (TAILQ_EMPTY(&hmp->iorun_list)) { 197 lwkt_reltoken(&hmp->io_token); 198 if (doflush) 199 hammer_io_flush_sync(hmp); 200 return; 201 } 202 bzero(&iodummy, sizeof(iodummy)); 203 iodummy.type = HAMMER_STRUCTURE_DUMMY; 204 205 /* 206 * Add placemarker and then wait until it becomes the head of 207 * the list. 208 */ 209 TAILQ_INSERT_TAIL(&hmp->iorun_list, &iodummy, iorun_entry); 210 while (TAILQ_FIRST(&hmp->iorun_list) != &iodummy) { 211 tsleep(&iodummy, 0, ident, 0); 212 } 213 214 /* 215 * Chain in case several placemarkers are present. 216 */ 217 TAILQ_REMOVE(&hmp->iorun_list, &iodummy, iorun_entry); 218 io = TAILQ_FIRST(&hmp->iorun_list); 219 if (io && io->type == HAMMER_STRUCTURE_DUMMY) 220 wakeup(io); 221 lwkt_reltoken(&hmp->io_token); 222 223 if (doflush) 224 hammer_io_flush_sync(hmp); 225 } 226 227 /* 228 * Clear a flagged error condition on a I/O buffer. The caller must hold 229 * its own ref on the buffer. 230 */ 231 void 232 hammer_io_clear_error(struct hammer_io *io) 233 { 234 hammer_mount_t hmp = io->hmp; 235 236 lwkt_gettoken(&hmp->io_token); 237 if (io->ioerror) { 238 io->ioerror = 0; 239 hammer_rel(&io->lock); 240 KKASSERT(hammer_isactive(&io->lock)); 241 } 242 lwkt_reltoken(&hmp->io_token); 243 } 244 245 void 246 hammer_io_clear_error_noassert(struct hammer_io *io) 247 { 248 hammer_mount_t hmp = io->hmp; 249 250 lwkt_gettoken(&hmp->io_token); 251 if (io->ioerror) { 252 io->ioerror = 0; 253 hammer_rel(&io->lock); 254 } 255 lwkt_reltoken(&hmp->io_token); 256 } 257 258 /* 259 * This is an advisory function only which tells the buffer cache 260 * the bp is not a meta-data buffer, even though it is backed by 261 * a block device. 262 * 263 * This is used by HAMMER's reblocking code to avoid trying to 264 * swapcache the filesystem's data when it is read or written 265 * by the reblocking code. 266 * 267 * The caller has a ref on the buffer preventing the bp from 268 * being disassociated from it. 269 */ 270 void 271 hammer_io_notmeta(hammer_buffer_t buffer) 272 { 273 if ((buffer->io.bp->b_flags & B_NOTMETA) == 0) { 274 hammer_mount_t hmp = buffer->io.hmp; 275 276 lwkt_gettoken(&hmp->io_token); 277 buffer->io.bp->b_flags |= B_NOTMETA; 278 lwkt_reltoken(&hmp->io_token); 279 } 280 } 281 282 /* 283 * Load bp for a HAMMER structure. The io must be exclusively locked by 284 * the caller. 285 * 286 * This routine is mostly used on meta-data and small-data blocks. Generally 287 * speaking HAMMER assumes some locality of reference and will cluster. 288 * 289 * Note that the caller (hammer_ondisk.c) may place further restrictions 290 * on clusterability via the limit (in bytes). Typically large-data 291 * zones cannot be clustered due to their mixed buffer sizes. This is 292 * not an issue since such clustering occurs in hammer_vnops at the 293 * regular file layer, whereas this is the buffered block device layer. 294 * 295 * No I/O callbacks can occur while we hold the buffer locked. 296 */ 297 int 298 hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit) 299 { 300 struct buf *bp; 301 int error; 302 303 if ((bp = io->bp) == NULL) { 304 atomic_add_long(&hammer_count_io_running_read, io->bytes); 305 if (hammer_cluster_enable && limit > io->bytes) { 306 error = cluster_read(devvp, io->offset + limit, 307 io->offset, io->bytes, 308 HAMMER_CLUSTER_SIZE, 309 HAMMER_CLUSTER_SIZE, 310 &io->bp); 311 } else { 312 error = bread(devvp, io->offset, io->bytes, &io->bp); 313 } 314 hammer_stats_disk_read += io->bytes; 315 atomic_add_long(&hammer_count_io_running_read, -io->bytes); 316 317 /* 318 * The code generally assumes b_ops/b_dep has been set-up, 319 * even if we error out here. 320 */ 321 bp = io->bp; 322 if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) { 323 const char *metatype; 324 325 switch(io->type) { 326 case HAMMER_STRUCTURE_VOLUME: 327 metatype = "volume"; 328 break; 329 case HAMMER_STRUCTURE_META_BUFFER: 330 switch(HAMMER_ZONE(HAMMER_ITOB(io)->zoneX_offset)) { 331 case HAMMER_ZONE_BTREE: 332 metatype = "btree"; 333 break; 334 case HAMMER_ZONE_META: 335 metatype = "meta"; 336 break; 337 case HAMMER_ZONE_FREEMAP: 338 metatype = "freemap"; 339 break; 340 default: 341 metatype = "meta?"; 342 break; 343 } 344 break; 345 case HAMMER_STRUCTURE_DATA_BUFFER: 346 metatype = "data"; 347 break; 348 case HAMMER_STRUCTURE_UNDO_BUFFER: 349 metatype = "undo"; 350 break; 351 default: 352 metatype = "unknown"; 353 break; 354 } 355 hdkprintf("zone2_offset %016jx %s\n", 356 (intmax_t)bp->b_bio2.bio_offset, 357 metatype); 358 } 359 bp->b_flags &= ~B_IODEBUG; 360 bp->b_ops = &hammer_bioops; 361 362 hammer_buf_attach_io(bp, io); /* locked by the io lock */ 363 BUF_KERNPROC(bp); 364 KKASSERT(io->modified == 0); 365 KKASSERT(io->running == 0); 366 KKASSERT(io->waiting == 0); 367 io->released = 0; /* we hold an active lock on bp */ 368 } else { 369 error = 0; 370 } 371 return(error); 372 } 373 374 /* 375 * Similar to hammer_io_read() but returns a zero'd out buffer instead. 376 * Must be called with the IO exclusively locked. 377 * 378 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background 379 * I/O by forcing the buffer to not be in a released state before calling 380 * it. 381 * 382 * This function will also mark the IO as modified but it will not 383 * increment the modify_refs count. 384 * 385 * No I/O callbacks can occur while we hold the buffer locked. 386 */ 387 int 388 hammer_io_new(struct vnode *devvp, struct hammer_io *io) 389 { 390 struct buf *bp; 391 392 if ((bp = io->bp) == NULL) { 393 io->bp = getblk(devvp, io->offset, io->bytes, 0, 0); 394 bp = io->bp; 395 bp->b_ops = &hammer_bioops; 396 397 hammer_buf_attach_io(bp, io); /* locked by the io lock */ 398 io->released = 0; 399 KKASSERT(io->running == 0); 400 io->waiting = 0; 401 BUF_KERNPROC(bp); 402 } else { 403 if (io->released) { 404 regetblk(bp); 405 BUF_KERNPROC(bp); 406 io->released = 0; 407 } 408 } 409 hammer_io_modify(io, 0); 410 vfs_bio_clrbuf(bp); 411 return(0); 412 } 413 414 /* 415 * Advance the activity count on the underlying buffer because 416 * HAMMER does not getblk/brelse on every access. 417 * 418 * The io->bp cannot go away while the buffer is referenced. 419 */ 420 void 421 hammer_io_advance(struct hammer_io *io) 422 { 423 if (io->bp) 424 buf_act_advance(io->bp); 425 } 426 427 /* 428 * Remove potential device level aliases against buffers managed by high level 429 * vnodes. Aliases can also be created due to mixed buffer sizes or via 430 * direct access to the backing store device. 431 * 432 * This is nasty because the buffers are also VMIO-backed. Even if a buffer 433 * does not exist its backing VM pages might, and we have to invalidate 434 * those as well or a getblk() will reinstate them. 435 * 436 * Buffer cache buffers associated with hammer_buffers cannot be 437 * invalidated. 438 */ 439 int 440 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset) 441 { 442 hammer_io_t io; 443 hammer_mount_t hmp; 444 hammer_off_t phys_offset; 445 struct buf *bp; 446 int error; 447 448 hmp = volume->io.hmp; 449 lwkt_gettoken(&hmp->io_token); 450 451 /* 452 * If a device buffer already exists for the specified physical 453 * offset use that, otherwise instantiate a buffer to cover any 454 * related VM pages, set BNOCACHE, and brelse(). 455 */ 456 phys_offset = hammer_xlate_to_phys(volume->ondisk, zone2_offset); 457 if ((bp = findblk(volume->devvp, phys_offset, 0)) != NULL) 458 bremfree(bp); 459 else 460 bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0); 461 462 if ((io = hammer_buf_peek_io(bp)) != NULL) { 463 #if 0 464 hammer_ref(&io->lock); 465 hammer_io_clear_modify(io, 1); 466 bundirty(bp); 467 io->released = 0; 468 BUF_KERNPROC(bp); 469 io->reclaim = 1; 470 io->waitdep = 1; /* XXX this is a fs_token field */ 471 KKASSERT(hammer_isactive(&io->lock) == 1); 472 hammer_rel_buffer(HAMMER_ITOB(io), 0); 473 /*hammer_io_deallocate(bp);*/ 474 #endif 475 bqrelse(bp); 476 error = EAGAIN; 477 } else { 478 KKASSERT((bp->b_flags & B_LOCKED) == 0); 479 bundirty(bp); 480 bp->b_flags |= B_NOCACHE|B_RELBUF; 481 brelse(bp); 482 error = 0; 483 } 484 lwkt_reltoken(&hmp->io_token); 485 return(error); 486 } 487 488 /* 489 * This routine is called on the last reference to a hammer structure. 490 * The io must be interlocked with a refcount of zero. The hammer structure 491 * will remain interlocked on return. 492 * 493 * This routine may return a non-NULL bp to the caller for dispoal. 494 * The caller typically brelse()'s the bp. 495 * 496 * The bp may or may not still be passively associated with the IO. It 497 * will remain passively associated if it is unreleasable (e.g. a modified 498 * meta-data buffer). 499 * 500 * The only requirement here is that modified meta-data and volume-header 501 * buffer may NOT be disassociated from the IO structure, and consequently 502 * we also leave such buffers actively associated with the IO if they already 503 * are (since the kernel can't do anything with them anyway). Only the 504 * flusher is allowed to write such buffers out. Modified pure-data and 505 * undo buffers are returned to the kernel but left passively associated 506 * so we can track when the kernel writes the bp out. 507 */ 508 struct buf * 509 hammer_io_release(struct hammer_io *io, int flush) 510 { 511 struct buf *bp; 512 513 if ((bp = io->bp) == NULL) 514 return(NULL); 515 516 /* 517 * Try to flush a dirty IO to disk if asked to by the 518 * caller or if the kernel tried to flush the buffer in the past. 519 * 520 * Kernel-initiated flushes are only allowed for pure-data buffers. 521 * meta-data and volume buffers can only be flushed explicitly 522 * by HAMMER. 523 */ 524 if (io->modified) { 525 if (flush) { 526 hammer_io_flush(io, 0); 527 } else if (bp->b_flags & B_LOCKED) { 528 switch(io->type) { 529 case HAMMER_STRUCTURE_DATA_BUFFER: 530 hammer_io_flush(io, 0); 531 break; 532 case HAMMER_STRUCTURE_UNDO_BUFFER: 533 hammer_io_flush(io, hammer_undo_reclaim(io)); 534 break; 535 default: 536 break; 537 } 538 } /* else no explicit request to flush the buffer */ 539 } 540 541 /* 542 * Wait for the IO to complete if asked to. This occurs when 543 * the buffer must be disposed of definitively during an umount 544 * or buffer invalidation. 545 */ 546 if (io->waitdep && io->running) { 547 hammer_io_wait(io); 548 } 549 550 /* 551 * Return control of the buffer to the kernel (with the provisio 552 * that our bioops can override kernel decisions with regards to 553 * the buffer). 554 */ 555 if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) { 556 /* 557 * Always disassociate the bp if an explicit flush 558 * was requested and the IO completed with no error 559 * (so unmount can really clean up the structure). 560 */ 561 if (io->released) { 562 regetblk(bp); 563 BUF_KERNPROC(bp); 564 } else { 565 io->released = 1; 566 } 567 hammer_io_disassociate(io); 568 /* return the bp */ 569 } else if (io->modified) { 570 /* 571 * Only certain IO types can be released to the kernel if 572 * the buffer has been modified. 573 * 574 * volume and meta-data IO types may only be explicitly 575 * flushed by HAMMER. 576 */ 577 switch(io->type) { 578 case HAMMER_STRUCTURE_DATA_BUFFER: 579 case HAMMER_STRUCTURE_UNDO_BUFFER: 580 if (io->released == 0) { 581 io->released = 1; 582 bp->b_flags |= B_CLUSTEROK; 583 bdwrite(bp); 584 } 585 break; 586 default: 587 break; 588 } 589 bp = NULL; /* bp left associated */ 590 } else if (io->released == 0) { 591 /* 592 * Clean buffers can be generally released to the kernel. 593 * We leave the bp passively associated with the HAMMER 594 * structure and use bioops to disconnect it later on 595 * if the kernel wants to discard the buffer. 596 * 597 * We can steal the structure's ownership of the bp. 598 */ 599 io->released = 1; 600 if (bp->b_flags & B_LOCKED) { 601 hammer_io_disassociate(io); 602 /* return the bp */ 603 } else { 604 if (io->reclaim) { 605 hammer_io_disassociate(io); 606 /* return the bp */ 607 } else { 608 /* return the bp (bp passively associated) */ 609 } 610 } 611 } else { 612 /* 613 * A released buffer is passively associate with our 614 * hammer_io structure. The kernel cannot destroy it 615 * without making a bioops call. If the kernel (B_LOCKED) 616 * or we (reclaim) requested that the buffer be destroyed 617 * we destroy it, otherwise we do a quick get/release to 618 * reset its position in the kernel's LRU list. 619 * 620 * Leaving the buffer passively associated allows us to 621 * use the kernel's LRU buffer flushing mechanisms rather 622 * then rolling our own. 623 * 624 * XXX there are two ways of doing this. We can re-acquire 625 * and passively release to reset the LRU, or not. 626 */ 627 if (io->running == 0) { 628 regetblk(bp); 629 if ((bp->b_flags & B_LOCKED) || io->reclaim) { 630 hammer_io_disassociate(io); 631 /* return the bp */ 632 } else { 633 /* return the bp (bp passively associated) */ 634 } 635 } else { 636 /* 637 * bp is left passively associated but we do not 638 * try to reacquire it. Interactions with the io 639 * structure will occur on completion of the bp's 640 * I/O. 641 */ 642 bp = NULL; 643 } 644 } 645 return(bp); 646 } 647 648 /* 649 * This routine is called with a locked IO when a flush is desired and 650 * no other references to the structure exists other then ours. This 651 * routine is ONLY called when HAMMER believes it is safe to flush a 652 * potentially modified buffer out. 653 * 654 * The locked io or io reference prevents a flush from being initiated 655 * by the kernel. 656 */ 657 void 658 hammer_io_flush(struct hammer_io *io, int reclaim) 659 { 660 struct buf *bp; 661 hammer_mount_t hmp; 662 663 /* 664 * Degenerate case - nothing to flush if nothing is dirty. 665 */ 666 if (io->modified == 0) 667 return; 668 669 KKASSERT(io->bp); 670 KKASSERT(io->modify_refs <= 0); 671 672 /* 673 * Acquire ownership of the bp, particularly before we clear our 674 * modified flag. 675 * 676 * We are going to bawrite() this bp. Don't leave a window where 677 * io->released is set, we actually own the bp rather then our 678 * buffer. 679 * 680 * The io_token should not be required here as only 681 */ 682 hmp = io->hmp; 683 bp = io->bp; 684 if (io->released) { 685 regetblk(bp); 686 /* BUF_KERNPROC(io->bp); */ 687 /* io->released = 0; */ 688 KKASSERT(io->released); 689 KKASSERT(io->bp == bp); 690 } else { 691 io->released = 1; 692 } 693 694 if (reclaim) { 695 io->reclaim = 1; 696 if ((bp->b_flags & B_LOCKED) == 0) { 697 bp->b_flags |= B_LOCKED; 698 atomic_add_int(&hammer_count_io_locked, 1); 699 } 700 } 701 702 /* 703 * Acquire exclusive access to the bp and then clear the modified 704 * state of the buffer prior to issuing I/O to interlock any 705 * modifications made while the I/O is in progress. This shouldn't 706 * happen anyway but losing data would be worse. The modified bit 707 * will be rechecked after the IO completes. 708 * 709 * NOTE: This call also finalizes the buffer's content (inval == 0). 710 * 711 * This is only legal when lock.refs == 1 (otherwise we might clear 712 * the modified bit while there are still users of the cluster 713 * modifying the data). 714 * 715 * Do this before potentially blocking so any attempt to modify the 716 * ondisk while we are blocked blocks waiting for us. 717 */ 718 hammer_ref(&io->lock); 719 hammer_io_clear_modify(io, 0); 720 hammer_rel(&io->lock); 721 722 if (hammer_debug_io & 0x0002) 723 hdkprintf("%016jx\n", bp->b_bio1.bio_offset); 724 725 /* 726 * Transfer ownership to the kernel and initiate I/O. 727 * 728 * NOTE: We do not hold io_token so an atomic op is required to 729 * update io_running_space. 730 */ 731 io->running = 1; 732 atomic_add_long(&hmp->io_running_space, io->bytes); 733 atomic_add_long(&hammer_count_io_running_write, io->bytes); 734 lwkt_gettoken(&hmp->io_token); 735 TAILQ_INSERT_TAIL(&hmp->iorun_list, io, iorun_entry); 736 lwkt_reltoken(&hmp->io_token); 737 cluster_awrite(bp); 738 hammer_io_flush_mark(io->volume); 739 } 740 741 /************************************************************************ 742 * BUFFER DIRTYING * 743 ************************************************************************ 744 * 745 * These routines deal with dependancies created when IO buffers get 746 * modified. The caller must call hammer_modify_*() on a referenced 747 * HAMMER structure prior to modifying its on-disk data. 748 * 749 * Any intent to modify an IO buffer acquires the related bp and imposes 750 * various write ordering dependancies. 751 */ 752 753 /* 754 * Mark a HAMMER structure as undergoing modification. Meta-data buffers 755 * are locked until the flusher can deal with them, pure data buffers 756 * can be written out. 757 * 758 * The referenced io prevents races. 759 */ 760 static 761 void 762 hammer_io_modify(hammer_io_t io, int count) 763 { 764 /* 765 * io->modify_refs must be >= 0 766 */ 767 while (io->modify_refs < 0) { 768 io->waitmod = 1; 769 tsleep(io, 0, "hmrmod", 0); 770 } 771 772 /* 773 * Shortcut if nothing to do. 774 */ 775 KKASSERT(hammer_isactive(&io->lock) && io->bp != NULL); 776 io->modify_refs += count; 777 if (io->modified && io->released == 0) 778 return; 779 780 /* 781 * NOTE: It is important not to set the modified bit 782 * until after we have acquired the bp or we risk 783 * racing against checkwrite. 784 */ 785 hammer_lock_ex(&io->lock); 786 if (io->released) { 787 regetblk(io->bp); 788 BUF_KERNPROC(io->bp); 789 io->released = 0; 790 } 791 if (io->modified == 0) { 792 hammer_io_set_modlist(io); 793 io->modified = 1; 794 } 795 hammer_unlock(&io->lock); 796 } 797 798 static __inline 799 void 800 hammer_io_modify_done(hammer_io_t io) 801 { 802 KKASSERT(io->modify_refs > 0); 803 --io->modify_refs; 804 if (io->modify_refs == 0 && io->waitmod) { 805 io->waitmod = 0; 806 wakeup(io); 807 } 808 } 809 810 /* 811 * The write interlock blocks other threads trying to modify a buffer 812 * (they block in hammer_io_modify()) after us, or blocks us while other 813 * threads are in the middle of modifying a buffer. 814 * 815 * The caller also has a ref on the io, however if we are not careful 816 * we will race bioops callbacks (checkwrite). To deal with this 817 * we must at least acquire and release the io_token, and it is probably 818 * better to hold it through the setting of modify_refs. 819 */ 820 void 821 hammer_io_write_interlock(hammer_io_t io) 822 { 823 hammer_mount_t hmp = io->hmp; 824 825 lwkt_gettoken(&hmp->io_token); 826 while (io->modify_refs != 0) { 827 io->waitmod = 1; 828 tsleep(io, 0, "hmrmod", 0); 829 } 830 io->modify_refs = -1; 831 lwkt_reltoken(&hmp->io_token); 832 } 833 834 void 835 hammer_io_done_interlock(hammer_io_t io) 836 { 837 KKASSERT(io->modify_refs == -1); 838 io->modify_refs = 0; 839 if (io->waitmod) { 840 io->waitmod = 0; 841 wakeup(io); 842 } 843 } 844 845 /* 846 * Caller intends to modify a volume's ondisk structure. 847 * 848 * This is only allowed if we are the flusher or we have a ref on the 849 * sync_lock. 850 */ 851 void 852 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume, 853 void *base, int len) 854 { 855 KKASSERT (trans == NULL || trans->sync_lock_refs > 0); 856 857 hammer_io_modify(&volume->io, 1); 858 if (len) { 859 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk; 860 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0); 861 hammer_generate_undo(trans, 862 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset), 863 base, len); 864 } 865 } 866 867 /* 868 * Caller intends to modify a buffer's ondisk structure. 869 * 870 * This is only allowed if we are the flusher or we have a ref on the 871 * sync_lock. 872 */ 873 void 874 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer, 875 void *base, int len) 876 { 877 KKASSERT (trans == NULL || trans->sync_lock_refs > 0); 878 879 hammer_io_modify(&buffer->io, 1); 880 if (len) { 881 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk; 882 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0); 883 hammer_generate_undo(trans, 884 buffer->zone2_offset + rel_offset, 885 base, len); 886 } 887 } 888 889 void 890 hammer_modify_volume_done(hammer_volume_t volume) 891 { 892 hammer_io_modify_done(&volume->io); 893 } 894 895 void 896 hammer_modify_buffer_done(hammer_buffer_t buffer) 897 { 898 hammer_io_modify_done(&buffer->io); 899 } 900 901 /* 902 * Mark an entity as not being dirty any more and finalize any 903 * delayed adjustments to the buffer. 904 * 905 * Delayed adjustments are an important performance enhancement, allowing 906 * us to avoid recalculating B-Tree node CRCs over and over again when 907 * making bulk-modifications to the B-Tree. 908 * 909 * If inval is non-zero delayed adjustments are ignored. 910 * 911 * This routine may dereference related btree nodes and cause the 912 * buffer to be dereferenced. The caller must own a reference on io. 913 */ 914 void 915 hammer_io_clear_modify(struct hammer_io *io, int inval) 916 { 917 hammer_mount_t hmp; 918 919 /* 920 * io_token is needed to avoid races on mod_root 921 */ 922 if (io->modified == 0) 923 return; 924 hmp = io->hmp; 925 lwkt_gettoken(&hmp->io_token); 926 if (io->modified == 0) { 927 lwkt_reltoken(&hmp->io_token); 928 return; 929 } 930 931 /* 932 * Take us off the mod-list and clear the modified bit. 933 */ 934 KKASSERT(io->mod_root != NULL); 935 if (io->mod_root == &io->hmp->volu_root || 936 io->mod_root == &io->hmp->meta_root) { 937 io->hmp->locked_dirty_space -= io->bytes; 938 atomic_add_long(&hammer_count_dirtybufspace, -io->bytes); 939 } 940 RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io); 941 io->mod_root = NULL; 942 io->modified = 0; 943 944 lwkt_reltoken(&hmp->io_token); 945 946 /* 947 * If this bit is not set there are no delayed adjustments. 948 */ 949 if (io->gencrc == 0) 950 return; 951 io->gencrc = 0; 952 953 /* 954 * Finalize requested CRCs. The NEEDSCRC flag also holds a reference 955 * on the node (& underlying buffer). Release the node after clearing 956 * the flag. 957 */ 958 if (io->type == HAMMER_STRUCTURE_META_BUFFER) { 959 hammer_buffer_t buffer = HAMMER_ITOB(io); 960 hammer_node_t node; 961 962 restart: 963 TAILQ_FOREACH(node, &buffer->node_list, entry) { 964 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0) 965 continue; 966 node->flags &= ~HAMMER_NODE_NEEDSCRC; 967 KKASSERT(node->ondisk); 968 if (inval == 0) 969 hammer_crc_set_btree(node->ondisk); 970 hammer_rel_node(node); 971 goto restart; 972 } 973 } 974 /* caller must still have ref on io */ 975 KKASSERT(hammer_isactive(&io->lock)); 976 } 977 978 /* 979 * Clear the IO's modify list. Even though the IO is no longer modified 980 * it may still be on the lose_root. This routine is called just before 981 * the governing hammer_buffer is destroyed. 982 * 983 * mod_root requires io_token protection. 984 */ 985 void 986 hammer_io_clear_modlist(struct hammer_io *io) 987 { 988 hammer_mount_t hmp = io->hmp; 989 990 KKASSERT(io->modified == 0); 991 if (io->mod_root) { 992 lwkt_gettoken(&hmp->io_token); 993 if (io->mod_root) { 994 KKASSERT(io->mod_root == &io->hmp->lose_root); 995 RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io); 996 io->mod_root = NULL; 997 } 998 lwkt_reltoken(&hmp->io_token); 999 } 1000 } 1001 1002 static void 1003 hammer_io_set_modlist(struct hammer_io *io) 1004 { 1005 struct hammer_mount *hmp = io->hmp; 1006 1007 lwkt_gettoken(&hmp->io_token); 1008 KKASSERT(io->mod_root == NULL); 1009 1010 switch(io->type) { 1011 case HAMMER_STRUCTURE_VOLUME: 1012 io->mod_root = &hmp->volu_root; 1013 hmp->locked_dirty_space += io->bytes; 1014 atomic_add_long(&hammer_count_dirtybufspace, io->bytes); 1015 break; 1016 case HAMMER_STRUCTURE_META_BUFFER: 1017 io->mod_root = &hmp->meta_root; 1018 hmp->locked_dirty_space += io->bytes; 1019 atomic_add_long(&hammer_count_dirtybufspace, io->bytes); 1020 break; 1021 case HAMMER_STRUCTURE_UNDO_BUFFER: 1022 io->mod_root = &hmp->undo_root; 1023 break; 1024 case HAMMER_STRUCTURE_DATA_BUFFER: 1025 io->mod_root = &hmp->data_root; 1026 break; 1027 case HAMMER_STRUCTURE_DUMMY: 1028 hpanic("bad io type"); 1029 break; /* NOT REACHED */ 1030 } 1031 if (RB_INSERT(hammer_mod_rb_tree, io->mod_root, io)) { 1032 hpanic("duplicate entry @ %d:%015jx", 1033 io->volume->vol_no, io->offset); 1034 /* NOT REACHED */ 1035 } 1036 lwkt_reltoken(&hmp->io_token); 1037 } 1038 1039 /************************************************************************ 1040 * HAMMER_BIOOPS * 1041 ************************************************************************ 1042 * 1043 */ 1044 1045 /* 1046 * Pre-IO initiation kernel callback - cluster build only 1047 * 1048 * bioops callback - hold io_token 1049 */ 1050 static void 1051 hammer_io_start(struct buf *bp) 1052 { 1053 /* nothing to do, so io_token not needed */ 1054 } 1055 1056 /* 1057 * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT! 1058 * 1059 * NOTE: HAMMER may modify a data buffer after we have initiated write 1060 * I/O. 1061 * 1062 * NOTE: MPSAFE callback 1063 * 1064 * bioops callback - hold io_token 1065 */ 1066 static void 1067 hammer_io_complete(struct buf *bp) 1068 { 1069 hammer_io_t io = hammer_buf_peek_io(bp); 1070 struct hammer_mount *hmp = io->hmp; 1071 struct hammer_io *ionext; 1072 1073 lwkt_gettoken(&hmp->io_token); 1074 1075 KKASSERT(io->released == 1); 1076 1077 /* 1078 * Deal with people waiting for I/O to drain 1079 */ 1080 if (io->running) { 1081 /* 1082 * Deal with critical write errors. Once a critical error 1083 * has been flagged in hmp the UNDO FIFO will not be updated. 1084 * That way crash recover will give us a consistent 1085 * filesystem. 1086 * 1087 * Because of this we can throw away failed UNDO buffers. If 1088 * we throw away META or DATA buffers we risk corrupting 1089 * the now read-only version of the filesystem visible to 1090 * the user. Clear B_ERROR so the buffer is not re-dirtied 1091 * by the kernel and ref the io so it doesn't get thrown 1092 * away. 1093 */ 1094 if (bp->b_flags & B_ERROR) { 1095 lwkt_gettoken(&hmp->fs_token); 1096 hammer_critical_error(hmp, NULL, bp->b_error, 1097 "while flushing meta-data"); 1098 lwkt_reltoken(&hmp->fs_token); 1099 1100 switch(io->type) { 1101 case HAMMER_STRUCTURE_UNDO_BUFFER: 1102 break; 1103 default: 1104 if (io->ioerror == 0) { 1105 io->ioerror = 1; 1106 hammer_ref(&io->lock); 1107 } 1108 break; 1109 } 1110 bp->b_flags &= ~B_ERROR; 1111 bundirty(bp); 1112 #if 0 1113 hammer_io_set_modlist(io); 1114 io->modified = 1; 1115 #endif 1116 } 1117 hammer_stats_disk_write += io->bytes; 1118 atomic_add_long(&hammer_count_io_running_write, -io->bytes); 1119 atomic_add_long(&hmp->io_running_space, -io->bytes); 1120 KKASSERT(hmp->io_running_space >= 0); 1121 io->running = 0; 1122 1123 /* 1124 * Remove from iorun list and wakeup any multi-io waiter(s). 1125 */ 1126 if (TAILQ_FIRST(&hmp->iorun_list) == io) { 1127 ionext = TAILQ_NEXT(io, iorun_entry); 1128 if (ionext && ionext->type == HAMMER_STRUCTURE_DUMMY) 1129 wakeup(ionext); 1130 } 1131 TAILQ_REMOVE(&hmp->iorun_list, io, iorun_entry); 1132 } else { 1133 hammer_stats_disk_read += io->bytes; 1134 } 1135 1136 if (io->waiting) { 1137 io->waiting = 0; 1138 wakeup(io); 1139 } 1140 1141 /* 1142 * If B_LOCKED is set someone wanted to deallocate the bp at some 1143 * point, try to do it now. The operation will fail if there are 1144 * refs or if hammer_io_deallocate() is unable to gain the 1145 * interlock. 1146 */ 1147 if (bp->b_flags & B_LOCKED) { 1148 atomic_add_int(&hammer_count_io_locked, -1); 1149 bp->b_flags &= ~B_LOCKED; 1150 hammer_io_deallocate(bp); 1151 /* structure may be dead now */ 1152 } 1153 lwkt_reltoken(&hmp->io_token); 1154 } 1155 1156 /* 1157 * Callback from kernel when it wishes to deallocate a passively 1158 * associated structure. This mostly occurs with clean buffers 1159 * but it may be possible for a holding structure to be marked dirty 1160 * while its buffer is passively associated. The caller owns the bp. 1161 * 1162 * If we cannot disassociate we set B_LOCKED to prevent the buffer 1163 * from getting reused. 1164 * 1165 * WARNING: Because this can be called directly by getnewbuf we cannot 1166 * recurse into the tree. If a bp cannot be immediately disassociated 1167 * our only recourse is to set B_LOCKED. 1168 * 1169 * WARNING: This may be called from an interrupt via hammer_io_complete() 1170 * 1171 * bioops callback - hold io_token 1172 */ 1173 static void 1174 hammer_io_deallocate(struct buf *bp) 1175 { 1176 hammer_io_t io = hammer_buf_peek_io(bp); 1177 hammer_mount_t hmp; 1178 1179 hmp = io->hmp; 1180 1181 lwkt_gettoken(&hmp->io_token); 1182 1183 KKASSERT((bp->b_flags & B_LOCKED) == 0 && io->running == 0); 1184 if (hammer_try_interlock_norefs(&io->lock) == 0) { 1185 /* 1186 * We cannot safely disassociate a bp from a referenced 1187 * or interlocked HAMMER structure. 1188 */ 1189 bp->b_flags |= B_LOCKED; 1190 atomic_add_int(&hammer_count_io_locked, 1); 1191 } else if (io->modified) { 1192 /* 1193 * It is not legal to disassociate a modified buffer. This 1194 * case really shouldn't ever occur. 1195 */ 1196 bp->b_flags |= B_LOCKED; 1197 atomic_add_int(&hammer_count_io_locked, 1); 1198 hammer_put_interlock(&io->lock, 0); 1199 } else { 1200 /* 1201 * Disassociate the BP. If the io has no refs left we 1202 * have to add it to the loose list. The kernel has 1203 * locked the buffer and therefore our io must be 1204 * in a released state. 1205 */ 1206 hammer_io_disassociate(io); 1207 if (io->type != HAMMER_STRUCTURE_VOLUME) { 1208 KKASSERT(io->bp == NULL); 1209 KKASSERT(io->mod_root == NULL); 1210 io->mod_root = &hmp->lose_root; 1211 if (RB_INSERT(hammer_mod_rb_tree, io->mod_root, io)) { 1212 hpanic("duplicate entry @ %d:%015jx", 1213 io->volume->vol_no, io->offset); 1214 /* NOT REACHED */ 1215 } 1216 } 1217 hammer_put_interlock(&io->lock, 1); 1218 } 1219 lwkt_reltoken(&hmp->io_token); 1220 } 1221 1222 /* 1223 * bioops callback - hold io_token 1224 */ 1225 static int 1226 hammer_io_fsync(struct vnode *vp) 1227 { 1228 /* nothing to do, so io_token not needed */ 1229 return(0); 1230 } 1231 1232 /* 1233 * NOTE: will not be called unless we tell the kernel about the 1234 * bioops. Unused... we use the mount's VFS_SYNC instead. 1235 * 1236 * bioops callback - hold io_token 1237 */ 1238 static int 1239 hammer_io_sync(struct mount *mp) 1240 { 1241 /* nothing to do, so io_token not needed */ 1242 return(0); 1243 } 1244 1245 /* 1246 * bioops callback - hold io_token 1247 */ 1248 static void 1249 hammer_io_movedeps(struct buf *bp1, struct buf *bp2) 1250 { 1251 /* nothing to do, so io_token not needed */ 1252 } 1253 1254 /* 1255 * I/O pre-check for reading and writing. HAMMER only uses this for 1256 * B_CACHE buffers so checkread just shouldn't happen, but if it does 1257 * allow it. 1258 * 1259 * Writing is a different case. We don't want the kernel to try to write 1260 * out a buffer that HAMMER may be modifying passively or which has a 1261 * dependancy. In addition, kernel-demanded writes can only proceed for 1262 * certain types of buffers (i.e. UNDO and DATA types). Other dirty 1263 * buffer types can only be explicitly written by the flusher. 1264 * 1265 * checkwrite will only be called for bdwrite()n buffers. If we return 1266 * success the kernel is guaranteed to initiate the buffer write. 1267 * 1268 * bioops callback - hold io_token 1269 */ 1270 static int 1271 hammer_io_checkread(struct buf *bp) 1272 { 1273 /* nothing to do, so io_token not needed */ 1274 return(0); 1275 } 1276 1277 /* 1278 * The kernel is asking us whether it can write out a dirty buffer or not. 1279 * 1280 * bioops callback - hold io_token 1281 */ 1282 static int 1283 hammer_io_checkwrite(struct buf *bp) 1284 { 1285 hammer_io_t io = hammer_buf_peek_io(bp); 1286 hammer_mount_t hmp = io->hmp; 1287 1288 /* 1289 * This shouldn't happen under normal operation. 1290 */ 1291 lwkt_gettoken(&hmp->io_token); 1292 if (io->type == HAMMER_STRUCTURE_VOLUME || 1293 io->type == HAMMER_STRUCTURE_META_BUFFER) { 1294 if (!panicstr) 1295 hpanic("illegal buffer"); 1296 if ((bp->b_flags & B_LOCKED) == 0) { 1297 bp->b_flags |= B_LOCKED; 1298 atomic_add_int(&hammer_count_io_locked, 1); 1299 } 1300 lwkt_reltoken(&hmp->io_token); 1301 return(1); 1302 } 1303 1304 /* 1305 * We have to be able to interlock the IO to safely modify any 1306 * of its fields without holding the fs_token. If we can't lock 1307 * it then we are racing someone. 1308 * 1309 * Our ownership of the bp lock prevents the io from being ripped 1310 * out from under us. 1311 */ 1312 if (hammer_try_interlock_norefs(&io->lock) == 0) { 1313 bp->b_flags |= B_LOCKED; 1314 atomic_add_int(&hammer_count_io_locked, 1); 1315 lwkt_reltoken(&hmp->io_token); 1316 return(1); 1317 } 1318 1319 /* 1320 * The modified bit must be cleared prior to the initiation of 1321 * any IO (returning 0 initiates the IO). Because this is a 1322 * normal data buffer hammer_io_clear_modify() runs through a 1323 * simple degenerate case. 1324 * 1325 * Return 0 will cause the kernel to initiate the IO, and we 1326 * must normally clear the modified bit before we begin. If 1327 * the io has modify_refs we do not clear the modified bit, 1328 * otherwise we may miss changes. 1329 * 1330 * Only data and undo buffers can reach here. These buffers do 1331 * not have terminal crc functions but we temporarily reference 1332 * the IO anyway, just in case. 1333 */ 1334 if (io->modify_refs == 0 && io->modified) { 1335 hammer_ref(&io->lock); 1336 hammer_io_clear_modify(io, 0); 1337 hammer_rel(&io->lock); 1338 } else if (io->modified) { 1339 KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER); 1340 } 1341 1342 /* 1343 * The kernel is going to start the IO, set io->running. 1344 */ 1345 KKASSERT(io->running == 0); 1346 io->running = 1; 1347 atomic_add_long(&io->hmp->io_running_space, io->bytes); 1348 atomic_add_long(&hammer_count_io_running_write, io->bytes); 1349 TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry); 1350 1351 hammer_put_interlock(&io->lock, 1); 1352 lwkt_reltoken(&hmp->io_token); 1353 1354 return(0); 1355 } 1356 1357 /* 1358 * Return non-zero if we wish to delay the kernel's attempt to flush 1359 * this buffer to disk. 1360 * 1361 * bioops callback - hold io_token 1362 */ 1363 static int 1364 hammer_io_countdeps(struct buf *bp, int n) 1365 { 1366 /* nothing to do, so io_token not needed */ 1367 return(0); 1368 } 1369 1370 static struct bio_ops hammer_bioops = { 1371 .io_start = hammer_io_start, 1372 .io_complete = hammer_io_complete, 1373 .io_deallocate = hammer_io_deallocate, 1374 .io_fsync = hammer_io_fsync, 1375 .io_sync = hammer_io_sync, 1376 .io_movedeps = hammer_io_movedeps, 1377 .io_countdeps = hammer_io_countdeps, 1378 .io_checkread = hammer_io_checkread, 1379 .io_checkwrite = hammer_io_checkwrite, 1380 }; 1381 1382 /************************************************************************ 1383 * DIRECT IO OPS * 1384 ************************************************************************ 1385 * 1386 * These functions operate directly on the buffer cache buffer associated 1387 * with a front-end vnode rather then a back-end device vnode. 1388 */ 1389 1390 /* 1391 * Read a buffer associated with a front-end vnode directly from the 1392 * disk media. The bio may be issued asynchronously. If leaf is non-NULL 1393 * we validate the CRC. 1394 * 1395 * We must check for the presence of a HAMMER buffer to handle the case 1396 * where the reblocker has rewritten the data (which it does via the HAMMER 1397 * buffer system, not via the high-level vnode buffer cache), but not yet 1398 * committed the buffer to the media. 1399 */ 1400 int 1401 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio, 1402 hammer_btree_leaf_elm_t leaf) 1403 { 1404 hammer_off_t buf_offset; 1405 hammer_off_t zone2_offset; 1406 hammer_volume_t volume; 1407 struct buf *bp; 1408 struct bio *nbio; 1409 int vol_no; 1410 int error; 1411 1412 buf_offset = bio->bio_offset; 1413 KKASSERT(hammer_is_zone_large_data(buf_offset)); 1414 1415 /* 1416 * The buffer cache may have an aliased buffer (the reblocker can 1417 * write them). If it does we have to sync any dirty data before 1418 * we can build our direct-read. This is a non-critical code path. 1419 */ 1420 bp = bio->bio_buf; 1421 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize); 1422 1423 /* 1424 * Resolve to a zone-2 offset. The conversion just requires 1425 * munging the top 4 bits but we want to abstract it anyway 1426 * so the blockmap code can verify the zone assignment. 1427 */ 1428 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error); 1429 if (error) 1430 goto done; 1431 KKASSERT(hammer_is_zone_raw_buffer(zone2_offset)); 1432 1433 /* 1434 * Resolve volume and raw-offset for 3rd level bio. The 1435 * offset will be specific to the volume. 1436 */ 1437 vol_no = HAMMER_VOL_DECODE(zone2_offset); 1438 volume = hammer_get_volume(hmp, vol_no, &error); 1439 if (error == 0 && zone2_offset >= volume->maxbuf_off) 1440 error = EIO; 1441 1442 if (error == 0) { 1443 /* 1444 * 3rd level bio (the caller has already pushed once) 1445 */ 1446 nbio = push_bio(bio); 1447 nbio->bio_offset = hammer_xlate_to_phys(volume->ondisk, 1448 zone2_offset); 1449 hammer_stats_disk_read += bp->b_bufsize; 1450 vn_strategy(volume->devvp, nbio); 1451 } 1452 hammer_rel_volume(volume, 0); 1453 done: 1454 if (error) { 1455 hdkprintf("failed @ %016jx\n", (intmax_t)zone2_offset); 1456 bp->b_error = error; 1457 bp->b_flags |= B_ERROR; 1458 biodone(bio); 1459 } 1460 return(error); 1461 } 1462 1463 /* 1464 * This works similarly to hammer_io_direct_read() except instead of 1465 * directly reading from the device into the bio we instead indirectly 1466 * read through the device's buffer cache and then copy the data into 1467 * the bio. 1468 * 1469 * If leaf is non-NULL and validation is enabled, the CRC will be checked. 1470 * 1471 * This routine also executes asynchronously. It allows hammer strategy 1472 * calls to operate asynchronously when in double_buffer mode (in addition 1473 * to operating asynchronously when in normal mode). 1474 */ 1475 int 1476 hammer_io_indirect_read(hammer_mount_t hmp, struct bio *bio, 1477 hammer_btree_leaf_elm_t leaf) 1478 { 1479 hammer_off_t buf_offset; 1480 hammer_off_t zone2_offset; 1481 hammer_volume_t volume; 1482 struct buf *bp; 1483 int vol_no; 1484 int error; 1485 1486 buf_offset = bio->bio_offset; 1487 KKASSERT(hammer_is_zone_large_data(buf_offset)); 1488 1489 /* 1490 * The buffer cache may have an aliased buffer (the reblocker can 1491 * write them). If it does we have to sync any dirty data before 1492 * we can build our direct-read. This is a non-critical code path. 1493 */ 1494 bp = bio->bio_buf; 1495 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize); 1496 1497 /* 1498 * Resolve to a zone-2 offset. The conversion just requires 1499 * munging the top 4 bits but we want to abstract it anyway 1500 * so the blockmap code can verify the zone assignment. 1501 */ 1502 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error); 1503 if (error) 1504 goto done; 1505 KKASSERT(hammer_is_zone_raw_buffer(zone2_offset)); 1506 1507 /* 1508 * Resolve volume and raw-offset for 3rd level bio. The 1509 * offset will be specific to the volume. 1510 */ 1511 vol_no = HAMMER_VOL_DECODE(zone2_offset); 1512 volume = hammer_get_volume(hmp, vol_no, &error); 1513 if (error == 0 && zone2_offset >= volume->maxbuf_off) 1514 error = EIO; 1515 1516 if (error == 0) { 1517 /* 1518 * Convert to the raw volume->devvp offset and acquire 1519 * the buf, issuing async I/O if necessary. 1520 */ 1521 buf_offset = hammer_xlate_to_phys(volume->ondisk, zone2_offset); 1522 1523 if (leaf && hammer_verify_data) { 1524 bio->bio_caller_info1.uvalue32 = leaf->data_crc; 1525 bio->bio_caller_info2.index = 1; 1526 } else { 1527 bio->bio_caller_info2.index = 0; 1528 } 1529 breadcb(volume->devvp, buf_offset, bp->b_bufsize, 1530 hammer_indirect_callback, bio); 1531 } 1532 hammer_rel_volume(volume, 0); 1533 done: 1534 if (error) { 1535 hdkprintf("failed @ %016jx\n", (intmax_t)zone2_offset); 1536 bp->b_error = error; 1537 bp->b_flags |= B_ERROR; 1538 biodone(bio); 1539 } 1540 return(error); 1541 } 1542 1543 /* 1544 * Indirect callback on completion. bio/bp specify the device-backed 1545 * buffer. bio->bio_caller_info1.ptr holds obio. 1546 * 1547 * obio/obp is the original regular file buffer. obio->bio_caller_info* 1548 * contains the crc specification. 1549 * 1550 * We are responsible for calling bpdone() and bqrelse() on bio/bp, and 1551 * for calling biodone() on obio. 1552 */ 1553 static void 1554 hammer_indirect_callback(struct bio *bio) 1555 { 1556 struct buf *bp = bio->bio_buf; 1557 struct buf *obp; 1558 struct bio *obio; 1559 1560 /* 1561 * If BIO_DONE is already set the device buffer was already 1562 * fully valid (B_CACHE). If it is not set then I/O was issued 1563 * and we have to run I/O completion as the last bio. 1564 * 1565 * Nobody is waiting for our device I/O to complete, we are 1566 * responsible for bqrelse()ing it which means we also have to do 1567 * the equivalent of biowait() and clear BIO_DONE (which breadcb() 1568 * may have set). 1569 * 1570 * Any preexisting device buffer should match the requested size, 1571 * but due to big-block recycling and other factors there is some 1572 * fragility there, so we assert that the device buffer covers 1573 * the request. 1574 */ 1575 if ((bio->bio_flags & BIO_DONE) == 0) 1576 bpdone(bp, 0); 1577 bio->bio_flags &= ~(BIO_DONE | BIO_SYNC); 1578 1579 obio = bio->bio_caller_info1.ptr; 1580 obp = obio->bio_buf; 1581 1582 if (bp->b_flags & B_ERROR) { 1583 obp->b_flags |= B_ERROR; 1584 obp->b_error = bp->b_error; 1585 } else if (obio->bio_caller_info2.index && 1586 obio->bio_caller_info1.uvalue32 != 1587 crc32(bp->b_data, bp->b_bufsize)) { 1588 obp->b_flags |= B_ERROR; 1589 obp->b_error = EIO; 1590 } else { 1591 KKASSERT(bp->b_bufsize >= obp->b_bufsize); 1592 bcopy(bp->b_data, obp->b_data, obp->b_bufsize); 1593 obp->b_resid = 0; 1594 obp->b_flags |= B_AGE; 1595 } 1596 biodone(obio); 1597 bqrelse(bp); 1598 } 1599 1600 /* 1601 * Write a buffer associated with a front-end vnode directly to the 1602 * disk media. The bio may be issued asynchronously. 1603 * 1604 * The BIO is associated with the specified record and RECG_DIRECT_IO 1605 * is set. The recorded is added to its object. 1606 */ 1607 int 1608 hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio, 1609 hammer_record_t record) 1610 { 1611 hammer_btree_leaf_elm_t leaf = &record->leaf; 1612 hammer_off_t buf_offset; 1613 hammer_off_t zone2_offset; 1614 hammer_volume_t volume; 1615 hammer_buffer_t buffer; 1616 struct buf *bp; 1617 struct bio *nbio; 1618 char *ptr; 1619 int vol_no; 1620 int error; 1621 1622 buf_offset = leaf->data_offset; 1623 1624 KKASSERT(hammer_is_zone2_mapped_index( 1625 HAMMER_ZONE_DECODE(buf_offset))); 1626 KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE); 1627 1628 /* 1629 * Issue or execute the I/O. The new memory record must replace 1630 * the old one before the I/O completes, otherwise a reaquisition of 1631 * the buffer will load the old media data instead of the new. 1632 */ 1633 if ((buf_offset & HAMMER_BUFMASK) == 0 && 1634 leaf->data_len >= HAMMER_BUFSIZE) { 1635 /* 1636 * We are using the vnode's bio to write directly to the 1637 * media, any hammer_buffer at the same zone-X offset will 1638 * now have stale data. 1639 */ 1640 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error); 1641 vol_no = HAMMER_VOL_DECODE(zone2_offset); 1642 volume = hammer_get_volume(hmp, vol_no, &error); 1643 1644 if (error == 0 && zone2_offset >= volume->maxbuf_off) 1645 error = EIO; 1646 if (error == 0) { 1647 bp = bio->bio_buf; 1648 KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0); 1649 /* 1650 hammer_del_buffers(hmp, buf_offset, 1651 zone2_offset, bp->b_bufsize); 1652 */ 1653 1654 /* 1655 * Second level bio - cached zone2 offset. 1656 * 1657 * (We can put our bio_done function in either the 1658 * 2nd or 3rd level). 1659 */ 1660 nbio = push_bio(bio); 1661 nbio->bio_offset = zone2_offset; 1662 nbio->bio_done = hammer_io_direct_write_complete; 1663 nbio->bio_caller_info1.ptr = record; 1664 record->zone2_offset = zone2_offset; 1665 record->gflags |= HAMMER_RECG_DIRECT_IO | 1666 HAMMER_RECG_DIRECT_INVAL; 1667 1668 /* 1669 * Third level bio - raw offset specific to the 1670 * correct volume. 1671 */ 1672 nbio = push_bio(nbio); 1673 nbio->bio_offset = hammer_xlate_to_phys(volume->ondisk, 1674 zone2_offset); 1675 hammer_stats_disk_write += bp->b_bufsize; 1676 hammer_ip_replace_bulk(hmp, record); 1677 vn_strategy(volume->devvp, nbio); 1678 hammer_io_flush_mark(volume); 1679 } 1680 hammer_rel_volume(volume, 0); 1681 } else { 1682 /* 1683 * Must fit in a standard HAMMER buffer. In this case all 1684 * consumers use the HAMMER buffer system and RECG_DIRECT_IO 1685 * does not need to be set-up. 1686 */ 1687 KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0); 1688 buffer = NULL; 1689 ptr = hammer_bread(hmp, buf_offset, &error, &buffer); 1690 if (error == 0) { 1691 bp = bio->bio_buf; 1692 bp->b_flags |= B_AGE; 1693 hammer_io_modify(&buffer->io, 1); 1694 bcopy(bp->b_data, ptr, leaf->data_len); 1695 hammer_io_modify_done(&buffer->io); 1696 hammer_rel_buffer(buffer, 0); 1697 bp->b_resid = 0; 1698 hammer_ip_replace_bulk(hmp, record); 1699 biodone(bio); 1700 } 1701 } 1702 if (error) { 1703 /* 1704 * Major suckage occured. Also note: The record was 1705 * never added to the tree so we do not have to worry 1706 * about the backend. 1707 */ 1708 hdkprintf("failed @ %016jx\n", (intmax_t)leaf->data_offset); 1709 bp = bio->bio_buf; 1710 bp->b_resid = 0; 1711 bp->b_error = EIO; 1712 bp->b_flags |= B_ERROR; 1713 biodone(bio); 1714 record->flags |= HAMMER_RECF_DELETED_FE; 1715 hammer_rel_mem_record(record); 1716 } 1717 return(error); 1718 } 1719 1720 /* 1721 * On completion of the BIO this callback must disconnect 1722 * it from the hammer_record and chain to the previous bio. 1723 * 1724 * An I/O error forces the mount to read-only. Data buffers 1725 * are not B_LOCKED like meta-data buffers are, so we have to 1726 * throw the buffer away to prevent the kernel from retrying. 1727 * 1728 * NOTE: MPSAFE callback, only modify fields we have explicit 1729 * access to (the bp and the record->gflags). 1730 */ 1731 static 1732 void 1733 hammer_io_direct_write_complete(struct bio *nbio) 1734 { 1735 struct bio *obio; 1736 struct buf *bp; 1737 hammer_record_t record; 1738 hammer_mount_t hmp; 1739 1740 record = nbio->bio_caller_info1.ptr; 1741 KKASSERT(record != NULL); 1742 hmp = record->ip->hmp; 1743 1744 lwkt_gettoken(&hmp->io_token); 1745 1746 bp = nbio->bio_buf; 1747 obio = pop_bio(nbio); 1748 if (bp->b_flags & B_ERROR) { 1749 lwkt_gettoken(&hmp->fs_token); 1750 hammer_critical_error(hmp, record->ip, bp->b_error, 1751 "while writing bulk data"); 1752 lwkt_reltoken(&hmp->fs_token); 1753 bp->b_flags |= B_INVAL; 1754 } 1755 biodone(obio); 1756 1757 KKASSERT(record->gflags & HAMMER_RECG_DIRECT_IO); 1758 if (record->gflags & HAMMER_RECG_DIRECT_WAIT) { 1759 record->gflags &= ~(HAMMER_RECG_DIRECT_IO | 1760 HAMMER_RECG_DIRECT_WAIT); 1761 /* record can disappear once DIRECT_IO flag is cleared */ 1762 wakeup(&record->flags); 1763 } else { 1764 record->gflags &= ~HAMMER_RECG_DIRECT_IO; 1765 /* record can disappear once DIRECT_IO flag is cleared */ 1766 } 1767 lwkt_reltoken(&hmp->io_token); 1768 } 1769 1770 1771 /* 1772 * This is called before a record is either committed to the B-Tree 1773 * or destroyed, to resolve any associated direct-IO. 1774 * 1775 * (1) We must wait for any direct-IO related to the record to complete. 1776 * 1777 * (2) We must remove any buffer cache aliases for data accessed via 1778 * leaf->data_offset or zone2_offset so non-direct-IO consumers 1779 * (the mirroring and reblocking code) do not see stale data. 1780 */ 1781 void 1782 hammer_io_direct_wait(hammer_record_t record) 1783 { 1784 hammer_mount_t hmp = record->ip->hmp; 1785 1786 /* 1787 * Wait for I/O to complete 1788 */ 1789 if (record->gflags & HAMMER_RECG_DIRECT_IO) { 1790 lwkt_gettoken(&hmp->io_token); 1791 while (record->gflags & HAMMER_RECG_DIRECT_IO) { 1792 record->gflags |= HAMMER_RECG_DIRECT_WAIT; 1793 tsleep(&record->flags, 0, "hmdiow", 0); 1794 } 1795 lwkt_reltoken(&hmp->io_token); 1796 } 1797 1798 /* 1799 * Invalidate any related buffer cache aliases associated with the 1800 * backing device. This is needed because the buffer cache buffer 1801 * for file data is associated with the file vnode, not the backing 1802 * device vnode. 1803 * 1804 * XXX I do not think this case can occur any more now that 1805 * reservations ensure that all such buffers are removed before 1806 * an area can be reused. 1807 */ 1808 if (record->gflags & HAMMER_RECG_DIRECT_INVAL) { 1809 KKASSERT(record->leaf.data_offset); 1810 hammer_del_buffers(hmp, record->leaf.data_offset, 1811 record->zone2_offset, record->leaf.data_len, 1812 1); 1813 record->gflags &= ~HAMMER_RECG_DIRECT_INVAL; 1814 } 1815 } 1816 1817 /* 1818 * This is called to remove the second-level cached zone-2 offset from 1819 * frontend buffer cache buffers, now stale due to a data relocation. 1820 * These offsets are generated by cluster_read() via VOP_BMAP, or directly 1821 * by hammer_vop_strategy_read(). 1822 * 1823 * This is rather nasty because here we have something like the reblocker 1824 * scanning the raw B-Tree with no held references on anything, really, 1825 * other then a shared lock on the B-Tree node, and we have to access the 1826 * frontend's buffer cache to check for and clean out the association. 1827 * Specifically, if the reblocker is moving data on the disk, these cached 1828 * offsets will become invalid. 1829 * 1830 * Only data record types associated with the large-data zone are subject 1831 * to direct-io and need to be checked. 1832 * 1833 */ 1834 void 1835 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf) 1836 { 1837 struct hammer_inode_info iinfo; 1838 int zone; 1839 1840 if (leaf->base.rec_type != HAMMER_RECTYPE_DATA) 1841 return; 1842 zone = HAMMER_ZONE_DECODE(leaf->data_offset); 1843 if (zone != HAMMER_ZONE_LARGE_DATA_INDEX) 1844 return; 1845 iinfo.obj_id = leaf->base.obj_id; 1846 iinfo.obj_asof = 0; /* unused */ 1847 iinfo.obj_localization = leaf->base.localization & 1848 HAMMER_LOCALIZE_PSEUDOFS_MASK; 1849 iinfo.u.leaf = leaf; 1850 hammer_scan_inode_snapshots(hmp, &iinfo, 1851 hammer_io_direct_uncache_callback, 1852 leaf); 1853 } 1854 1855 static int 1856 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data) 1857 { 1858 hammer_inode_info_t iinfo = data; 1859 hammer_off_t file_offset; 1860 struct vnode *vp; 1861 struct buf *bp; 1862 int blksize; 1863 1864 if (ip->vp == NULL) 1865 return(0); 1866 file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len; 1867 blksize = iinfo->u.leaf->data_len; 1868 KKASSERT((blksize & HAMMER_BUFMASK) == 0); 1869 1870 /* 1871 * Warning: FINDBLK_TEST return stable storage but not stable 1872 * contents. It happens to be ok in this case. 1873 */ 1874 hammer_ref(&ip->lock); 1875 if (hammer_get_vnode(ip, &vp) == 0) { 1876 if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL && 1877 bp->b_bio2.bio_offset != NOOFFSET) { 1878 bp = getblk(ip->vp, file_offset, blksize, 0, 0); 1879 bp->b_bio2.bio_offset = NOOFFSET; 1880 brelse(bp); 1881 } 1882 vput(vp); 1883 } 1884 hammer_rel_inode(ip, 0); 1885 return(0); 1886 } 1887 1888 1889 /* 1890 * This function is called when writes may have occured on the volume, 1891 * indicating that the device may be holding cached writes. 1892 */ 1893 static __inline void 1894 hammer_io_flush_mark(hammer_volume_t volume) 1895 { 1896 atomic_set_int(&volume->vol_flags, HAMMER_VOLF_NEEDFLUSH); 1897 } 1898 1899 /* 1900 * This function ensures that the device has flushed any cached writes out. 1901 */ 1902 void 1903 hammer_io_flush_sync(hammer_mount_t hmp) 1904 { 1905 hammer_volume_t volume; 1906 struct buf *bp_base = NULL; 1907 struct buf *bp; 1908 1909 RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) { 1910 if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) { 1911 atomic_clear_int(&volume->vol_flags, 1912 HAMMER_VOLF_NEEDFLUSH); 1913 bp = getpbuf(NULL); 1914 bp->b_bio1.bio_offset = 0; 1915 bp->b_bufsize = 0; 1916 bp->b_bcount = 0; 1917 bp->b_cmd = BUF_CMD_FLUSH; 1918 bp->b_bio1.bio_caller_info1.cluster_head = bp_base; 1919 bp->b_bio1.bio_done = biodone_sync; 1920 bp->b_bio1.bio_flags |= BIO_SYNC; 1921 bp_base = bp; 1922 vn_strategy(volume->devvp, &bp->b_bio1); 1923 } 1924 } 1925 while ((bp = bp_base) != NULL) { 1926 bp_base = bp->b_bio1.bio_caller_info1.cluster_head; 1927 biowait(&bp->b_bio1, "hmrFLS"); 1928 relpbuf(bp, NULL); 1929 } 1930 } 1931 1932 /* 1933 * Limit the amount of backlog which we allow to build up 1934 */ 1935 void 1936 hammer_io_limit_backlog(hammer_mount_t hmp) 1937 { 1938 waitrunningbufspace(); 1939 } 1940