1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 /* 35 * IO Primitives and buffer cache management 36 * 37 * All major data-tracking structures in HAMMER contain a struct hammer_io 38 * which is used to manage their backing store. We use filesystem buffers 39 * for backing store and we leave them passively associated with their 40 * HAMMER structures. 41 * 42 * If the kernel tries to destroy a passively associated buf which we cannot 43 * yet let go we set B_LOCKED in the buffer and then actively released it 44 * later when we can. 45 * 46 * The io_token is required for anything which might race bioops and bio_done 47 * callbacks, with one exception: A successful hammer_try_interlock_norefs(). 48 * the fs_token will be held in all other cases. 49 */ 50 51 #include "hammer.h" 52 #include <sys/fcntl.h> 53 #include <sys/nlookup.h> 54 #include <sys/buf.h> 55 56 #include <sys/buf2.h> 57 58 static void hammer_io_modify(hammer_io_t io, int count); 59 static void hammer_io_deallocate(struct buf *bp); 60 static void hammer_indirect_callback(struct bio *bio); 61 #if 0 62 static void hammer_io_direct_read_complete(struct bio *nbio); 63 #endif 64 static void hammer_io_direct_write_complete(struct bio *nbio); 65 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data); 66 static void hammer_io_set_modlist(struct hammer_io *io); 67 static void hammer_io_flush_mark(hammer_volume_t volume); 68 69 static int 70 hammer_mod_rb_compare(hammer_io_t io1, hammer_io_t io2) 71 { 72 hammer_off_t io1_offset; 73 hammer_off_t io2_offset; 74 75 io1_offset = ((io1->offset & HAMMER_OFF_SHORT_MASK) << 8) | 76 io1->volume->vol_no; 77 io2_offset = ((io2->offset & HAMMER_OFF_SHORT_MASK) << 8) | 78 io2->volume->vol_no; 79 80 if (io1_offset < io2_offset) 81 return(-1); 82 if (io1_offset > io2_offset) 83 return(1); 84 return(0); 85 } 86 87 RB_GENERATE(hammer_mod_rb_tree, hammer_io, rb_node, hammer_mod_rb_compare); 88 89 /* 90 * Initialize a new, already-zero'd hammer_io structure, or reinitialize 91 * an existing hammer_io structure which may have switched to another type. 92 */ 93 void 94 hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type) 95 { 96 io->volume = volume; 97 io->hmp = volume->io.hmp; 98 io->type = type; 99 } 100 101 /* 102 * Helper routine to disassociate a buffer cache buffer from an I/O 103 * structure. The io must be interlocked and marked appropriately for 104 * reclamation. 105 * 106 * The io must be in a released state with the io->bp owned and 107 * locked by the caller of this function. When not called from an 108 * io_deallocate() this cannot race an io_deallocate() since the 109 * kernel would be unable to get the buffer lock in that case. 110 * (The released state in this case means we own the bp, not the 111 * hammer_io structure). 112 * 113 * The io may have 0 or 1 references depending on who called us. The 114 * caller is responsible for dealing with the refs. 115 * 116 * This call can only be made when no action is required on the buffer. 117 * 118 * This function is guaranteed not to race against anything because we 119 * own both the io lock and the bp lock and are interlocked with no 120 * references. 121 */ 122 static void 123 hammer_io_disassociate(hammer_io_structure_t iou) 124 { 125 struct buf *bp = iou->io.bp; 126 127 KKASSERT(iou->io.released); 128 KKASSERT(iou->io.modified == 0); 129 KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou); 130 buf_dep_init(bp); 131 iou->io.bp = NULL; 132 133 /* 134 * If the buffer was locked someone wanted to get rid of it. 135 */ 136 if (bp->b_flags & B_LOCKED) { 137 atomic_add_int(&hammer_count_io_locked, -1); 138 bp->b_flags &= ~B_LOCKED; 139 } 140 if (iou->io.reclaim) { 141 bp->b_flags |= B_NOCACHE|B_RELBUF; 142 iou->io.reclaim = 0; 143 } 144 145 switch(iou->io.type) { 146 case HAMMER_STRUCTURE_VOLUME: 147 iou->volume.ondisk = NULL; 148 break; 149 case HAMMER_STRUCTURE_DATA_BUFFER: 150 case HAMMER_STRUCTURE_META_BUFFER: 151 case HAMMER_STRUCTURE_UNDO_BUFFER: 152 iou->buffer.ondisk = NULL; 153 break; 154 case HAMMER_STRUCTURE_DUMMY: 155 panic("hammer_io_disassociate: bad io type"); 156 break; 157 } 158 } 159 160 /* 161 * Wait for any physical IO to complete 162 * 163 * XXX we aren't interlocked against a spinlock or anything so there 164 * is a small window in the interlock / io->running == 0 test. 165 */ 166 void 167 hammer_io_wait(hammer_io_t io) 168 { 169 if (io->running) { 170 hammer_mount_t hmp = io->hmp; 171 172 lwkt_gettoken(&hmp->io_token); 173 while (io->running) { 174 io->waiting = 1; 175 tsleep_interlock(io, 0); 176 if (io->running) 177 tsleep(io, PINTERLOCKED, "hmrflw", hz); 178 } 179 lwkt_reltoken(&hmp->io_token); 180 } 181 } 182 183 /* 184 * Wait for all currently queued HAMMER-initiated I/Os to complete. 185 * 186 * This is not supposed to count direct I/O's but some can leak 187 * through (for non-full-sized direct I/Os). 188 */ 189 void 190 hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush) 191 { 192 struct hammer_io iodummy; 193 hammer_io_t io; 194 195 /* 196 * Degenerate case, no I/O is running 197 */ 198 lwkt_gettoken(&hmp->io_token); 199 if (TAILQ_EMPTY(&hmp->iorun_list)) { 200 lwkt_reltoken(&hmp->io_token); 201 if (doflush) 202 hammer_io_flush_sync(hmp); 203 return; 204 } 205 bzero(&iodummy, sizeof(iodummy)); 206 iodummy.type = HAMMER_STRUCTURE_DUMMY; 207 208 /* 209 * Add placemarker and then wait until it becomes the head of 210 * the list. 211 */ 212 TAILQ_INSERT_TAIL(&hmp->iorun_list, &iodummy, iorun_entry); 213 while (TAILQ_FIRST(&hmp->iorun_list) != &iodummy) { 214 tsleep(&iodummy, 0, ident, 0); 215 } 216 217 /* 218 * Chain in case several placemarkers are present. 219 */ 220 TAILQ_REMOVE(&hmp->iorun_list, &iodummy, iorun_entry); 221 io = TAILQ_FIRST(&hmp->iorun_list); 222 if (io && io->type == HAMMER_STRUCTURE_DUMMY) 223 wakeup(io); 224 lwkt_reltoken(&hmp->io_token); 225 226 if (doflush) 227 hammer_io_flush_sync(hmp); 228 } 229 230 /* 231 * Clear a flagged error condition on a I/O buffer. The caller must hold 232 * its own ref on the buffer. 233 */ 234 void 235 hammer_io_clear_error(struct hammer_io *io) 236 { 237 hammer_mount_t hmp = io->hmp; 238 239 lwkt_gettoken(&hmp->io_token); 240 if (io->ioerror) { 241 io->ioerror = 0; 242 hammer_rel(&io->lock); 243 KKASSERT(hammer_isactive(&io->lock)); 244 } 245 lwkt_reltoken(&hmp->io_token); 246 } 247 248 void 249 hammer_io_clear_error_noassert(struct hammer_io *io) 250 { 251 hammer_mount_t hmp = io->hmp; 252 253 lwkt_gettoken(&hmp->io_token); 254 if (io->ioerror) { 255 io->ioerror = 0; 256 hammer_rel(&io->lock); 257 } 258 lwkt_reltoken(&hmp->io_token); 259 } 260 261 /* 262 * This is an advisory function only which tells the buffer cache 263 * the bp is not a meta-data buffer, even though it is backed by 264 * a block device. 265 * 266 * This is used by HAMMER's reblocking code to avoid trying to 267 * swapcache the filesystem's data when it is read or written 268 * by the reblocking code. 269 * 270 * The caller has a ref on the buffer preventing the bp from 271 * being disassociated from it. 272 */ 273 void 274 hammer_io_notmeta(hammer_buffer_t buffer) 275 { 276 if ((buffer->io.bp->b_flags & B_NOTMETA) == 0) { 277 hammer_mount_t hmp = buffer->io.hmp; 278 279 lwkt_gettoken(&hmp->io_token); 280 buffer->io.bp->b_flags |= B_NOTMETA; 281 lwkt_reltoken(&hmp->io_token); 282 } 283 } 284 285 /* 286 * Load bp for a HAMMER structure. The io must be exclusively locked by 287 * the caller. 288 * 289 * This routine is mostly used on meta-data and small-data blocks. Generally 290 * speaking HAMMER assumes some locality of reference and will cluster. 291 * 292 * Note that the caller (hammer_ondisk.c) may place further restrictions 293 * on clusterability via the limit (in bytes). Typically large-data 294 * zones cannot be clustered due to their mixed buffer sizes. This is 295 * not an issue since such clustering occurs in hammer_vnops at the 296 * regular file layer, whereas this is the buffered block device layer. 297 * 298 * No I/O callbacks can occur while we hold the buffer locked. 299 */ 300 int 301 hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit) 302 { 303 struct buf *bp; 304 int error; 305 306 if ((bp = io->bp) == NULL) { 307 atomic_add_long(&hammer_count_io_running_read, io->bytes); 308 if (hammer_cluster_enable && limit > io->bytes) { 309 error = cluster_read(devvp, io->offset + limit, 310 io->offset, io->bytes, 311 HAMMER_CLUSTER_SIZE, 312 HAMMER_CLUSTER_SIZE, 313 &io->bp); 314 } else { 315 error = bread(devvp, io->offset, io->bytes, &io->bp); 316 } 317 hammer_stats_disk_read += io->bytes; 318 atomic_add_long(&hammer_count_io_running_read, -io->bytes); 319 320 /* 321 * The code generally assumes b_ops/b_dep has been set-up, 322 * even if we error out here. 323 */ 324 bp = io->bp; 325 if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) { 326 const char *metatype; 327 328 switch(io->type) { 329 case HAMMER_STRUCTURE_VOLUME: 330 metatype = "volume"; 331 break; 332 case HAMMER_STRUCTURE_META_BUFFER: 333 switch(((struct hammer_buffer *)io)-> 334 zoneX_offset & HAMMER_OFF_ZONE_MASK) { 335 case HAMMER_ZONE_BTREE: 336 metatype = "btree"; 337 break; 338 case HAMMER_ZONE_META: 339 metatype = "meta"; 340 break; 341 case HAMMER_ZONE_FREEMAP: 342 metatype = "freemap"; 343 break; 344 default: 345 metatype = "meta?"; 346 break; 347 } 348 break; 349 case HAMMER_STRUCTURE_DATA_BUFFER: 350 metatype = "data"; 351 break; 352 case HAMMER_STRUCTURE_UNDO_BUFFER: 353 metatype = "undo"; 354 break; 355 default: 356 metatype = "unknown"; 357 break; 358 } 359 kprintf("doff %016jx %s\n", 360 (intmax_t)bp->b_bio2.bio_offset, 361 metatype); 362 } 363 bp->b_flags &= ~B_IODEBUG; 364 bp->b_ops = &hammer_bioops; 365 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 366 367 /* io->worklist is locked by the io lock */ 368 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node); 369 BUF_KERNPROC(bp); 370 KKASSERT(io->modified == 0); 371 KKASSERT(io->running == 0); 372 KKASSERT(io->waiting == 0); 373 io->released = 0; /* we hold an active lock on bp */ 374 } else { 375 error = 0; 376 } 377 return(error); 378 } 379 380 /* 381 * Similar to hammer_io_read() but returns a zero'd out buffer instead. 382 * Must be called with the IO exclusively locked. 383 * 384 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background 385 * I/O by forcing the buffer to not be in a released state before calling 386 * it. 387 * 388 * This function will also mark the IO as modified but it will not 389 * increment the modify_refs count. 390 * 391 * No I/O callbacks can occur while we hold the buffer locked. 392 */ 393 int 394 hammer_io_new(struct vnode *devvp, struct hammer_io *io) 395 { 396 struct buf *bp; 397 398 if ((bp = io->bp) == NULL) { 399 io->bp = getblk(devvp, io->offset, io->bytes, 0, 0); 400 bp = io->bp; 401 bp->b_ops = &hammer_bioops; 402 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 403 404 /* io->worklist is locked by the io lock */ 405 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node); 406 io->released = 0; 407 KKASSERT(io->running == 0); 408 io->waiting = 0; 409 BUF_KERNPROC(bp); 410 } else { 411 if (io->released) { 412 regetblk(bp); 413 BUF_KERNPROC(bp); 414 io->released = 0; 415 } 416 } 417 hammer_io_modify(io, 0); 418 vfs_bio_clrbuf(bp); 419 return(0); 420 } 421 422 /* 423 * Advance the activity count on the underlying buffer because 424 * HAMMER does not getblk/brelse on every access. 425 * 426 * The io->bp cannot go away while the buffer is referenced. 427 */ 428 void 429 hammer_io_advance(struct hammer_io *io) 430 { 431 if (io->bp) 432 buf_act_advance(io->bp); 433 } 434 435 /* 436 * Remove potential device level aliases against buffers managed by high level 437 * vnodes. Aliases can also be created due to mixed buffer sizes or via 438 * direct access to the backing store device. 439 * 440 * This is nasty because the buffers are also VMIO-backed. Even if a buffer 441 * does not exist its backing VM pages might, and we have to invalidate 442 * those as well or a getblk() will reinstate them. 443 * 444 * Buffer cache buffers associated with hammer_buffers cannot be 445 * invalidated. 446 */ 447 int 448 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset) 449 { 450 hammer_io_structure_t iou; 451 hammer_mount_t hmp; 452 hammer_off_t phys_offset; 453 struct buf *bp; 454 int error; 455 456 hmp = volume->io.hmp; 457 lwkt_gettoken(&hmp->io_token); 458 459 /* 460 * If a device buffer already exists for the specified physical 461 * offset use that, otherwise instantiate a buffer to cover any 462 * related VM pages, set BNOCACHE, and brelse(). 463 */ 464 phys_offset = volume->ondisk->vol_buf_beg + 465 (zone2_offset & HAMMER_OFF_SHORT_MASK); 466 if ((bp = findblk(volume->devvp, phys_offset, 0)) != NULL) 467 bremfree(bp); 468 else 469 bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0); 470 471 if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) { 472 #if 0 473 hammer_ref(&iou->io.lock); 474 hammer_io_clear_modify(&iou->io, 1); 475 bundirty(bp); 476 iou->io.released = 0; 477 BUF_KERNPROC(bp); 478 iou->io.reclaim = 1; 479 iou->io.waitdep = 1; /* XXX this is a fs_token field */ 480 KKASSERT(hammer_isactive(&iou->io.lock) == 1); 481 hammer_rel_buffer(&iou->buffer, 0); 482 /*hammer_io_deallocate(bp);*/ 483 #endif 484 bqrelse(bp); 485 error = EAGAIN; 486 } else { 487 KKASSERT((bp->b_flags & B_LOCKED) == 0); 488 bundirty(bp); 489 bp->b_flags |= B_NOCACHE|B_RELBUF; 490 brelse(bp); 491 error = 0; 492 } 493 lwkt_reltoken(&hmp->io_token); 494 return(error); 495 } 496 497 /* 498 * This routine is called on the last reference to a hammer structure. 499 * The io must be interlocked with a refcount of zero. The hammer structure 500 * will remain interlocked on return. 501 * 502 * This routine may return a non-NULL bp to the caller for dispoal. 503 * The caller typically brelse()'s the bp. 504 * 505 * The bp may or may not still be passively associated with the IO. It 506 * will remain passively associated if it is unreleasable (e.g. a modified 507 * meta-data buffer). 508 * 509 * The only requirement here is that modified meta-data and volume-header 510 * buffer may NOT be disassociated from the IO structure, and consequently 511 * we also leave such buffers actively associated with the IO if they already 512 * are (since the kernel can't do anything with them anyway). Only the 513 * flusher is allowed to write such buffers out. Modified pure-data and 514 * undo buffers are returned to the kernel but left passively associated 515 * so we can track when the kernel writes the bp out. 516 */ 517 struct buf * 518 hammer_io_release(struct hammer_io *io, int flush) 519 { 520 union hammer_io_structure *iou = (void *)io; 521 struct buf *bp; 522 523 if ((bp = io->bp) == NULL) 524 return(NULL); 525 526 /* 527 * Try to flush a dirty IO to disk if asked to by the 528 * caller or if the kernel tried to flush the buffer in the past. 529 * 530 * Kernel-initiated flushes are only allowed for pure-data buffers. 531 * meta-data and volume buffers can only be flushed explicitly 532 * by HAMMER. 533 */ 534 if (io->modified) { 535 if (flush) { 536 hammer_io_flush(io, 0); 537 } else if (bp->b_flags & B_LOCKED) { 538 switch(io->type) { 539 case HAMMER_STRUCTURE_DATA_BUFFER: 540 hammer_io_flush(io, 0); 541 break; 542 case HAMMER_STRUCTURE_UNDO_BUFFER: 543 hammer_io_flush(io, hammer_undo_reclaim(io)); 544 break; 545 default: 546 break; 547 } 548 } /* else no explicit request to flush the buffer */ 549 } 550 551 /* 552 * Wait for the IO to complete if asked to. This occurs when 553 * the buffer must be disposed of definitively during an umount 554 * or buffer invalidation. 555 */ 556 if (io->waitdep && io->running) { 557 hammer_io_wait(io); 558 } 559 560 /* 561 * Return control of the buffer to the kernel (with the provisio 562 * that our bioops can override kernel decisions with regards to 563 * the buffer). 564 */ 565 if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) { 566 /* 567 * Always disassociate the bp if an explicit flush 568 * was requested and the IO completed with no error 569 * (so unmount can really clean up the structure). 570 */ 571 if (io->released) { 572 regetblk(bp); 573 BUF_KERNPROC(bp); 574 } else { 575 io->released = 1; 576 } 577 hammer_io_disassociate((hammer_io_structure_t)io); 578 /* return the bp */ 579 } else if (io->modified) { 580 /* 581 * Only certain IO types can be released to the kernel if 582 * the buffer has been modified. 583 * 584 * volume and meta-data IO types may only be explicitly 585 * flushed by HAMMER. 586 */ 587 switch(io->type) { 588 case HAMMER_STRUCTURE_DATA_BUFFER: 589 case HAMMER_STRUCTURE_UNDO_BUFFER: 590 if (io->released == 0) { 591 io->released = 1; 592 bp->b_flags |= B_CLUSTEROK; 593 bdwrite(bp); 594 } 595 break; 596 default: 597 break; 598 } 599 bp = NULL; /* bp left associated */ 600 } else if (io->released == 0) { 601 /* 602 * Clean buffers can be generally released to the kernel. 603 * We leave the bp passively associated with the HAMMER 604 * structure and use bioops to disconnect it later on 605 * if the kernel wants to discard the buffer. 606 * 607 * We can steal the structure's ownership of the bp. 608 */ 609 io->released = 1; 610 if (bp->b_flags & B_LOCKED) { 611 hammer_io_disassociate(iou); 612 /* return the bp */ 613 } else { 614 if (io->reclaim) { 615 hammer_io_disassociate(iou); 616 /* return the bp */ 617 } else { 618 /* return the bp (bp passively associated) */ 619 } 620 } 621 } else { 622 /* 623 * A released buffer is passively associate with our 624 * hammer_io structure. The kernel cannot destroy it 625 * without making a bioops call. If the kernel (B_LOCKED) 626 * or we (reclaim) requested that the buffer be destroyed 627 * we destroy it, otherwise we do a quick get/release to 628 * reset its position in the kernel's LRU list. 629 * 630 * Leaving the buffer passively associated allows us to 631 * use the kernel's LRU buffer flushing mechanisms rather 632 * then rolling our own. 633 * 634 * XXX there are two ways of doing this. We can re-acquire 635 * and passively release to reset the LRU, or not. 636 */ 637 if (io->running == 0) { 638 regetblk(bp); 639 if ((bp->b_flags & B_LOCKED) || io->reclaim) { 640 hammer_io_disassociate(iou); 641 /* return the bp */ 642 } else { 643 /* return the bp (bp passively associated) */ 644 } 645 } else { 646 /* 647 * bp is left passively associated but we do not 648 * try to reacquire it. Interactions with the io 649 * structure will occur on completion of the bp's 650 * I/O. 651 */ 652 bp = NULL; 653 } 654 } 655 return(bp); 656 } 657 658 /* 659 * This routine is called with a locked IO when a flush is desired and 660 * no other references to the structure exists other then ours. This 661 * routine is ONLY called when HAMMER believes it is safe to flush a 662 * potentially modified buffer out. 663 * 664 * The locked io or io reference prevents a flush from being initiated 665 * by the kernel. 666 */ 667 void 668 hammer_io_flush(struct hammer_io *io, int reclaim) 669 { 670 struct buf *bp; 671 hammer_mount_t hmp; 672 673 /* 674 * Degenerate case - nothing to flush if nothing is dirty. 675 */ 676 if (io->modified == 0) 677 return; 678 679 KKASSERT(io->bp); 680 KKASSERT(io->modify_refs <= 0); 681 682 /* 683 * Acquire ownership of the bp, particularly before we clear our 684 * modified flag. 685 * 686 * We are going to bawrite() this bp. Don't leave a window where 687 * io->released is set, we actually own the bp rather then our 688 * buffer. 689 * 690 * The io_token should not be required here as only 691 */ 692 hmp = io->hmp; 693 bp = io->bp; 694 if (io->released) { 695 regetblk(bp); 696 /* BUF_KERNPROC(io->bp); */ 697 /* io->released = 0; */ 698 KKASSERT(io->released); 699 KKASSERT(io->bp == bp); 700 } else { 701 io->released = 1; 702 } 703 704 if (reclaim) { 705 io->reclaim = 1; 706 if ((bp->b_flags & B_LOCKED) == 0) { 707 bp->b_flags |= B_LOCKED; 708 atomic_add_int(&hammer_count_io_locked, 1); 709 } 710 } 711 712 /* 713 * Acquire exclusive access to the bp and then clear the modified 714 * state of the buffer prior to issuing I/O to interlock any 715 * modifications made while the I/O is in progress. This shouldn't 716 * happen anyway but losing data would be worse. The modified bit 717 * will be rechecked after the IO completes. 718 * 719 * NOTE: This call also finalizes the buffer's content (inval == 0). 720 * 721 * This is only legal when lock.refs == 1 (otherwise we might clear 722 * the modified bit while there are still users of the cluster 723 * modifying the data). 724 * 725 * Do this before potentially blocking so any attempt to modify the 726 * ondisk while we are blocked blocks waiting for us. 727 */ 728 hammer_ref(&io->lock); 729 hammer_io_clear_modify(io, 0); 730 hammer_rel(&io->lock); 731 732 if (hammer_debug_io & 0x0002) 733 kprintf("hammer io_write %016jx\n", bp->b_bio1.bio_offset); 734 735 /* 736 * Transfer ownership to the kernel and initiate I/O. 737 * 738 * NOTE: We do not hold io_token so an atomic op is required to 739 * update io_running_space. 740 */ 741 io->running = 1; 742 atomic_add_long(&hmp->io_running_space, io->bytes); 743 atomic_add_long(&hammer_count_io_running_write, io->bytes); 744 lwkt_gettoken(&hmp->io_token); 745 TAILQ_INSERT_TAIL(&hmp->iorun_list, io, iorun_entry); 746 lwkt_reltoken(&hmp->io_token); 747 cluster_awrite(bp); 748 hammer_io_flush_mark(io->volume); 749 } 750 751 /************************************************************************ 752 * BUFFER DIRTYING * 753 ************************************************************************ 754 * 755 * These routines deal with dependancies created when IO buffers get 756 * modified. The caller must call hammer_modify_*() on a referenced 757 * HAMMER structure prior to modifying its on-disk data. 758 * 759 * Any intent to modify an IO buffer acquires the related bp and imposes 760 * various write ordering dependancies. 761 */ 762 763 /* 764 * Mark a HAMMER structure as undergoing modification. Meta-data buffers 765 * are locked until the flusher can deal with them, pure data buffers 766 * can be written out. 767 * 768 * The referenced io prevents races. 769 */ 770 static 771 void 772 hammer_io_modify(hammer_io_t io, int count) 773 { 774 /* 775 * io->modify_refs must be >= 0 776 */ 777 while (io->modify_refs < 0) { 778 io->waitmod = 1; 779 tsleep(io, 0, "hmrmod", 0); 780 } 781 782 /* 783 * Shortcut if nothing to do. 784 */ 785 KKASSERT(hammer_isactive(&io->lock) && io->bp != NULL); 786 io->modify_refs += count; 787 if (io->modified && io->released == 0) 788 return; 789 790 /* 791 * NOTE: It is important not to set the modified bit 792 * until after we have acquired the bp or we risk 793 * racing against checkwrite. 794 */ 795 hammer_lock_ex(&io->lock); 796 if (io->released) { 797 regetblk(io->bp); 798 BUF_KERNPROC(io->bp); 799 io->released = 0; 800 } 801 if (io->modified == 0) { 802 hammer_io_set_modlist(io); 803 io->modified = 1; 804 } 805 hammer_unlock(&io->lock); 806 } 807 808 static __inline 809 void 810 hammer_io_modify_done(hammer_io_t io) 811 { 812 KKASSERT(io->modify_refs > 0); 813 --io->modify_refs; 814 if (io->modify_refs == 0 && io->waitmod) { 815 io->waitmod = 0; 816 wakeup(io); 817 } 818 } 819 820 /* 821 * The write interlock blocks other threads trying to modify a buffer 822 * (they block in hammer_io_modify()) after us, or blocks us while other 823 * threads are in the middle of modifying a buffer. 824 * 825 * The caller also has a ref on the io, however if we are not careful 826 * we will race bioops callbacks (checkwrite). To deal with this 827 * we must at least acquire and release the io_token, and it is probably 828 * better to hold it through the setting of modify_refs. 829 */ 830 void 831 hammer_io_write_interlock(hammer_io_t io) 832 { 833 hammer_mount_t hmp = io->hmp; 834 835 lwkt_gettoken(&hmp->io_token); 836 while (io->modify_refs != 0) { 837 io->waitmod = 1; 838 tsleep(io, 0, "hmrmod", 0); 839 } 840 io->modify_refs = -1; 841 lwkt_reltoken(&hmp->io_token); 842 } 843 844 void 845 hammer_io_done_interlock(hammer_io_t io) 846 { 847 KKASSERT(io->modify_refs == -1); 848 io->modify_refs = 0; 849 if (io->waitmod) { 850 io->waitmod = 0; 851 wakeup(io); 852 } 853 } 854 855 /* 856 * Caller intends to modify a volume's ondisk structure. 857 * 858 * This is only allowed if we are the flusher or we have a ref on the 859 * sync_lock. 860 */ 861 void 862 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume, 863 void *base, int len) 864 { 865 KKASSERT (trans == NULL || trans->sync_lock_refs > 0); 866 867 hammer_io_modify(&volume->io, 1); 868 if (len) { 869 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk; 870 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0); 871 hammer_generate_undo(trans, 872 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset), 873 base, len); 874 } 875 } 876 877 /* 878 * Caller intends to modify a buffer's ondisk structure. 879 * 880 * This is only allowed if we are the flusher or we have a ref on the 881 * sync_lock. 882 */ 883 void 884 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer, 885 void *base, int len) 886 { 887 KKASSERT (trans == NULL || trans->sync_lock_refs > 0); 888 889 hammer_io_modify(&buffer->io, 1); 890 if (len) { 891 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk; 892 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0); 893 hammer_generate_undo(trans, 894 buffer->zone2_offset + rel_offset, 895 base, len); 896 } 897 } 898 899 void 900 hammer_modify_volume_done(hammer_volume_t volume) 901 { 902 hammer_io_modify_done(&volume->io); 903 } 904 905 void 906 hammer_modify_buffer_done(hammer_buffer_t buffer) 907 { 908 hammer_io_modify_done(&buffer->io); 909 } 910 911 /* 912 * Mark an entity as not being dirty any more and finalize any 913 * delayed adjustments to the buffer. 914 * 915 * Delayed adjustments are an important performance enhancement, allowing 916 * us to avoid recalculating B-Tree node CRCs over and over again when 917 * making bulk-modifications to the B-Tree. 918 * 919 * If inval is non-zero delayed adjustments are ignored. 920 * 921 * This routine may dereference related btree nodes and cause the 922 * buffer to be dereferenced. The caller must own a reference on io. 923 */ 924 void 925 hammer_io_clear_modify(struct hammer_io *io, int inval) 926 { 927 hammer_mount_t hmp; 928 929 /* 930 * io_token is needed to avoid races on mod_root 931 */ 932 if (io->modified == 0) 933 return; 934 hmp = io->hmp; 935 lwkt_gettoken(&hmp->io_token); 936 if (io->modified == 0) { 937 lwkt_reltoken(&hmp->io_token); 938 return; 939 } 940 941 /* 942 * Take us off the mod-list and clear the modified bit. 943 */ 944 KKASSERT(io->mod_root != NULL); 945 if (io->mod_root == &io->hmp->volu_root || 946 io->mod_root == &io->hmp->meta_root) { 947 io->hmp->locked_dirty_space -= io->bytes; 948 atomic_add_long(&hammer_count_dirtybufspace, -io->bytes); 949 } 950 RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io); 951 io->mod_root = NULL; 952 io->modified = 0; 953 954 lwkt_reltoken(&hmp->io_token); 955 956 /* 957 * If this bit is not set there are no delayed adjustments. 958 */ 959 if (io->gencrc == 0) 960 return; 961 io->gencrc = 0; 962 963 /* 964 * Finalize requested CRCs. The NEEDSCRC flag also holds a reference 965 * on the node (& underlying buffer). Release the node after clearing 966 * the flag. 967 */ 968 if (io->type == HAMMER_STRUCTURE_META_BUFFER) { 969 hammer_buffer_t buffer = (void *)io; 970 hammer_node_t node; 971 972 restart: 973 TAILQ_FOREACH(node, &buffer->clist, entry) { 974 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0) 975 continue; 976 node->flags &= ~HAMMER_NODE_NEEDSCRC; 977 KKASSERT(node->ondisk); 978 if (inval == 0) 979 node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE); 980 hammer_rel_node(node); 981 goto restart; 982 } 983 } 984 /* caller must still have ref on io */ 985 KKASSERT(hammer_isactive(&io->lock)); 986 } 987 988 /* 989 * Clear the IO's modify list. Even though the IO is no longer modified 990 * it may still be on the lose_root. This routine is called just before 991 * the governing hammer_buffer is destroyed. 992 * 993 * mod_root requires io_token protection. 994 */ 995 void 996 hammer_io_clear_modlist(struct hammer_io *io) 997 { 998 hammer_mount_t hmp = io->hmp; 999 1000 KKASSERT(io->modified == 0); 1001 if (io->mod_root) { 1002 lwkt_gettoken(&hmp->io_token); 1003 if (io->mod_root) { 1004 KKASSERT(io->mod_root == &io->hmp->lose_root); 1005 RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io); 1006 io->mod_root = NULL; 1007 } 1008 lwkt_reltoken(&hmp->io_token); 1009 } 1010 } 1011 1012 static void 1013 hammer_io_set_modlist(struct hammer_io *io) 1014 { 1015 struct hammer_mount *hmp = io->hmp; 1016 1017 lwkt_gettoken(&hmp->io_token); 1018 KKASSERT(io->mod_root == NULL); 1019 1020 switch(io->type) { 1021 case HAMMER_STRUCTURE_VOLUME: 1022 io->mod_root = &hmp->volu_root; 1023 hmp->locked_dirty_space += io->bytes; 1024 atomic_add_long(&hammer_count_dirtybufspace, io->bytes); 1025 break; 1026 case HAMMER_STRUCTURE_META_BUFFER: 1027 io->mod_root = &hmp->meta_root; 1028 hmp->locked_dirty_space += io->bytes; 1029 atomic_add_long(&hammer_count_dirtybufspace, io->bytes); 1030 break; 1031 case HAMMER_STRUCTURE_UNDO_BUFFER: 1032 io->mod_root = &hmp->undo_root; 1033 break; 1034 case HAMMER_STRUCTURE_DATA_BUFFER: 1035 io->mod_root = &hmp->data_root; 1036 break; 1037 case HAMMER_STRUCTURE_DUMMY: 1038 panic("hammer_io_set_modlist: bad io type"); 1039 break; /* NOT REACHED */ 1040 } 1041 if (RB_INSERT(hammer_mod_rb_tree, io->mod_root, io)) { 1042 panic("hammer_io_set_modlist: duplicate entry"); 1043 /* NOT REACHED */ 1044 } 1045 lwkt_reltoken(&hmp->io_token); 1046 } 1047 1048 /************************************************************************ 1049 * HAMMER_BIOOPS * 1050 ************************************************************************ 1051 * 1052 */ 1053 1054 /* 1055 * Pre-IO initiation kernel callback - cluster build only 1056 * 1057 * bioops callback - hold io_token 1058 */ 1059 static void 1060 hammer_io_start(struct buf *bp) 1061 { 1062 /* nothing to do, so io_token not needed */ 1063 } 1064 1065 /* 1066 * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT! 1067 * 1068 * NOTE: HAMMER may modify a data buffer after we have initiated write 1069 * I/O. 1070 * 1071 * NOTE: MPSAFE callback 1072 * 1073 * bioops callback - hold io_token 1074 */ 1075 static void 1076 hammer_io_complete(struct buf *bp) 1077 { 1078 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep); 1079 struct hammer_mount *hmp = iou->io.hmp; 1080 struct hammer_io *ionext; 1081 1082 lwkt_gettoken(&hmp->io_token); 1083 1084 KKASSERT(iou->io.released == 1); 1085 1086 /* 1087 * Deal with people waiting for I/O to drain 1088 */ 1089 if (iou->io.running) { 1090 /* 1091 * Deal with critical write errors. Once a critical error 1092 * has been flagged in hmp the UNDO FIFO will not be updated. 1093 * That way crash recover will give us a consistent 1094 * filesystem. 1095 * 1096 * Because of this we can throw away failed UNDO buffers. If 1097 * we throw away META or DATA buffers we risk corrupting 1098 * the now read-only version of the filesystem visible to 1099 * the user. Clear B_ERROR so the buffer is not re-dirtied 1100 * by the kernel and ref the io so it doesn't get thrown 1101 * away. 1102 */ 1103 if (bp->b_flags & B_ERROR) { 1104 lwkt_gettoken(&hmp->fs_token); 1105 hammer_critical_error(hmp, NULL, bp->b_error, 1106 "while flushing meta-data"); 1107 lwkt_reltoken(&hmp->fs_token); 1108 1109 switch(iou->io.type) { 1110 case HAMMER_STRUCTURE_UNDO_BUFFER: 1111 break; 1112 default: 1113 if (iou->io.ioerror == 0) { 1114 iou->io.ioerror = 1; 1115 hammer_ref(&iou->io.lock); 1116 } 1117 break; 1118 } 1119 bp->b_flags &= ~B_ERROR; 1120 bundirty(bp); 1121 #if 0 1122 hammer_io_set_modlist(&iou->io); 1123 iou->io.modified = 1; 1124 #endif 1125 } 1126 hammer_stats_disk_write += iou->io.bytes; 1127 atomic_add_long(&hammer_count_io_running_write, -iou->io.bytes); 1128 atomic_add_long(&hmp->io_running_space, -iou->io.bytes); 1129 KKASSERT(hmp->io_running_space >= 0); 1130 iou->io.running = 0; 1131 1132 /* 1133 * Remove from iorun list and wakeup any multi-io waiter(s). 1134 */ 1135 if (TAILQ_FIRST(&hmp->iorun_list) == &iou->io) { 1136 ionext = TAILQ_NEXT(&iou->io, iorun_entry); 1137 if (ionext && ionext->type == HAMMER_STRUCTURE_DUMMY) 1138 wakeup(ionext); 1139 } 1140 TAILQ_REMOVE(&hmp->iorun_list, &iou->io, iorun_entry); 1141 } else { 1142 hammer_stats_disk_read += iou->io.bytes; 1143 } 1144 1145 if (iou->io.waiting) { 1146 iou->io.waiting = 0; 1147 wakeup(iou); 1148 } 1149 1150 /* 1151 * If B_LOCKED is set someone wanted to deallocate the bp at some 1152 * point, try to do it now. The operation will fail if there are 1153 * refs or if hammer_io_deallocate() is unable to gain the 1154 * interlock. 1155 */ 1156 if (bp->b_flags & B_LOCKED) { 1157 atomic_add_int(&hammer_count_io_locked, -1); 1158 bp->b_flags &= ~B_LOCKED; 1159 hammer_io_deallocate(bp); 1160 /* structure may be dead now */ 1161 } 1162 lwkt_reltoken(&hmp->io_token); 1163 } 1164 1165 /* 1166 * Callback from kernel when it wishes to deallocate a passively 1167 * associated structure. This mostly occurs with clean buffers 1168 * but it may be possible for a holding structure to be marked dirty 1169 * while its buffer is passively associated. The caller owns the bp. 1170 * 1171 * If we cannot disassociate we set B_LOCKED to prevent the buffer 1172 * from getting reused. 1173 * 1174 * WARNING: Because this can be called directly by getnewbuf we cannot 1175 * recurse into the tree. If a bp cannot be immediately disassociated 1176 * our only recourse is to set B_LOCKED. 1177 * 1178 * WARNING: This may be called from an interrupt via hammer_io_complete() 1179 * 1180 * bioops callback - hold io_token 1181 */ 1182 static void 1183 hammer_io_deallocate(struct buf *bp) 1184 { 1185 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep); 1186 hammer_mount_t hmp; 1187 1188 hmp = iou->io.hmp; 1189 1190 lwkt_gettoken(&hmp->io_token); 1191 1192 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0); 1193 if (hammer_try_interlock_norefs(&iou->io.lock) == 0) { 1194 /* 1195 * We cannot safely disassociate a bp from a referenced 1196 * or interlocked HAMMER structure. 1197 */ 1198 bp->b_flags |= B_LOCKED; 1199 atomic_add_int(&hammer_count_io_locked, 1); 1200 } else if (iou->io.modified) { 1201 /* 1202 * It is not legal to disassociate a modified buffer. This 1203 * case really shouldn't ever occur. 1204 */ 1205 bp->b_flags |= B_LOCKED; 1206 atomic_add_int(&hammer_count_io_locked, 1); 1207 hammer_put_interlock(&iou->io.lock, 0); 1208 } else { 1209 /* 1210 * Disassociate the BP. If the io has no refs left we 1211 * have to add it to the loose list. The kernel has 1212 * locked the buffer and therefore our io must be 1213 * in a released state. 1214 */ 1215 hammer_io_disassociate(iou); 1216 if (iou->io.type != HAMMER_STRUCTURE_VOLUME) { 1217 KKASSERT(iou->io.bp == NULL); 1218 KKASSERT(iou->io.mod_root == NULL); 1219 iou->io.mod_root = &hmp->lose_root; 1220 if (RB_INSERT(hammer_mod_rb_tree, iou->io.mod_root, 1221 &iou->io)) { 1222 panic("hammer_io_deallocate: duplicate entry"); 1223 } 1224 } 1225 hammer_put_interlock(&iou->io.lock, 1); 1226 } 1227 lwkt_reltoken(&hmp->io_token); 1228 } 1229 1230 /* 1231 * bioops callback - hold io_token 1232 */ 1233 static int 1234 hammer_io_fsync(struct vnode *vp) 1235 { 1236 /* nothing to do, so io_token not needed */ 1237 return(0); 1238 } 1239 1240 /* 1241 * NOTE: will not be called unless we tell the kernel about the 1242 * bioops. Unused... we use the mount's VFS_SYNC instead. 1243 * 1244 * bioops callback - hold io_token 1245 */ 1246 static int 1247 hammer_io_sync(struct mount *mp) 1248 { 1249 /* nothing to do, so io_token not needed */ 1250 return(0); 1251 } 1252 1253 /* 1254 * bioops callback - hold io_token 1255 */ 1256 static void 1257 hammer_io_movedeps(struct buf *bp1, struct buf *bp2) 1258 { 1259 /* nothing to do, so io_token not needed */ 1260 } 1261 1262 /* 1263 * I/O pre-check for reading and writing. HAMMER only uses this for 1264 * B_CACHE buffers so checkread just shouldn't happen, but if it does 1265 * allow it. 1266 * 1267 * Writing is a different case. We don't want the kernel to try to write 1268 * out a buffer that HAMMER may be modifying passively or which has a 1269 * dependancy. In addition, kernel-demanded writes can only proceed for 1270 * certain types of buffers (i.e. UNDO and DATA types). Other dirty 1271 * buffer types can only be explicitly written by the flusher. 1272 * 1273 * checkwrite will only be called for bdwrite()n buffers. If we return 1274 * success the kernel is guaranteed to initiate the buffer write. 1275 * 1276 * bioops callback - hold io_token 1277 */ 1278 static int 1279 hammer_io_checkread(struct buf *bp) 1280 { 1281 /* nothing to do, so io_token not needed */ 1282 return(0); 1283 } 1284 1285 /* 1286 * The kernel is asking us whether it can write out a dirty buffer or not. 1287 * 1288 * bioops callback - hold io_token 1289 */ 1290 static int 1291 hammer_io_checkwrite(struct buf *bp) 1292 { 1293 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep); 1294 hammer_mount_t hmp = io->hmp; 1295 1296 /* 1297 * This shouldn't happen under normal operation. 1298 */ 1299 lwkt_gettoken(&hmp->io_token); 1300 if (io->type == HAMMER_STRUCTURE_VOLUME || 1301 io->type == HAMMER_STRUCTURE_META_BUFFER) { 1302 if (!panicstr) 1303 panic("hammer_io_checkwrite: illegal buffer"); 1304 if ((bp->b_flags & B_LOCKED) == 0) { 1305 bp->b_flags |= B_LOCKED; 1306 atomic_add_int(&hammer_count_io_locked, 1); 1307 } 1308 lwkt_reltoken(&hmp->io_token); 1309 return(1); 1310 } 1311 1312 /* 1313 * We have to be able to interlock the IO to safely modify any 1314 * of its fields without holding the fs_token. If we can't lock 1315 * it then we are racing someone. 1316 * 1317 * Our ownership of the bp lock prevents the io from being ripped 1318 * out from under us. 1319 */ 1320 if (hammer_try_interlock_norefs(&io->lock) == 0) { 1321 bp->b_flags |= B_LOCKED; 1322 atomic_add_int(&hammer_count_io_locked, 1); 1323 lwkt_reltoken(&hmp->io_token); 1324 return(1); 1325 } 1326 1327 /* 1328 * The modified bit must be cleared prior to the initiation of 1329 * any IO (returning 0 initiates the IO). Because this is a 1330 * normal data buffer hammer_io_clear_modify() runs through a 1331 * simple degenerate case. 1332 * 1333 * Return 0 will cause the kernel to initiate the IO, and we 1334 * must normally clear the modified bit before we begin. If 1335 * the io has modify_refs we do not clear the modified bit, 1336 * otherwise we may miss changes. 1337 * 1338 * Only data and undo buffers can reach here. These buffers do 1339 * not have terminal crc functions but we temporarily reference 1340 * the IO anyway, just in case. 1341 */ 1342 if (io->modify_refs == 0 && io->modified) { 1343 hammer_ref(&io->lock); 1344 hammer_io_clear_modify(io, 0); 1345 hammer_rel(&io->lock); 1346 } else if (io->modified) { 1347 KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER); 1348 } 1349 1350 /* 1351 * The kernel is going to start the IO, set io->running. 1352 */ 1353 KKASSERT(io->running == 0); 1354 io->running = 1; 1355 atomic_add_long(&io->hmp->io_running_space, io->bytes); 1356 atomic_add_long(&hammer_count_io_running_write, io->bytes); 1357 TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry); 1358 1359 hammer_put_interlock(&io->lock, 1); 1360 lwkt_reltoken(&hmp->io_token); 1361 1362 return(0); 1363 } 1364 1365 /* 1366 * Return non-zero if we wish to delay the kernel's attempt to flush 1367 * this buffer to disk. 1368 * 1369 * bioops callback - hold io_token 1370 */ 1371 static int 1372 hammer_io_countdeps(struct buf *bp, int n) 1373 { 1374 /* nothing to do, so io_token not needed */ 1375 return(0); 1376 } 1377 1378 struct bio_ops hammer_bioops = { 1379 .io_start = hammer_io_start, 1380 .io_complete = hammer_io_complete, 1381 .io_deallocate = hammer_io_deallocate, 1382 .io_fsync = hammer_io_fsync, 1383 .io_sync = hammer_io_sync, 1384 .io_movedeps = hammer_io_movedeps, 1385 .io_countdeps = hammer_io_countdeps, 1386 .io_checkread = hammer_io_checkread, 1387 .io_checkwrite = hammer_io_checkwrite, 1388 }; 1389 1390 /************************************************************************ 1391 * DIRECT IO OPS * 1392 ************************************************************************ 1393 * 1394 * These functions operate directly on the buffer cache buffer associated 1395 * with a front-end vnode rather then a back-end device vnode. 1396 */ 1397 1398 /* 1399 * Read a buffer associated with a front-end vnode directly from the 1400 * disk media. The bio may be issued asynchronously. If leaf is non-NULL 1401 * we validate the CRC. 1402 * 1403 * We must check for the presence of a HAMMER buffer to handle the case 1404 * where the reblocker has rewritten the data (which it does via the HAMMER 1405 * buffer system, not via the high-level vnode buffer cache), but not yet 1406 * committed the buffer to the media. 1407 */ 1408 int 1409 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio, 1410 hammer_btree_leaf_elm_t leaf) 1411 { 1412 hammer_off_t buf_offset; 1413 hammer_off_t zone2_offset; 1414 hammer_volume_t volume; 1415 struct buf *bp; 1416 struct bio *nbio; 1417 int vol_no; 1418 int error; 1419 1420 buf_offset = bio->bio_offset; 1421 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) == 1422 HAMMER_ZONE_LARGE_DATA); 1423 1424 /* 1425 * The buffer cache may have an aliased buffer (the reblocker can 1426 * write them). If it does we have to sync any dirty data before 1427 * we can build our direct-read. This is a non-critical code path. 1428 */ 1429 bp = bio->bio_buf; 1430 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize); 1431 1432 /* 1433 * Resolve to a zone-2 offset. The conversion just requires 1434 * munging the top 4 bits but we want to abstract it anyway 1435 * so the blockmap code can verify the zone assignment. 1436 */ 1437 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error); 1438 if (error) 1439 goto done; 1440 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) == 1441 HAMMER_ZONE_RAW_BUFFER); 1442 1443 /* 1444 * Resolve volume and raw-offset for 3rd level bio. The 1445 * offset will be specific to the volume. 1446 */ 1447 vol_no = HAMMER_VOL_DECODE(zone2_offset); 1448 volume = hammer_get_volume(hmp, vol_no, &error); 1449 if (error == 0 && zone2_offset >= volume->maxbuf_off) 1450 error = EIO; 1451 1452 if (error == 0) { 1453 /* 1454 * 3rd level bio 1455 */ 1456 nbio = push_bio(bio); 1457 nbio->bio_offset = volume->ondisk->vol_buf_beg + 1458 (zone2_offset & HAMMER_OFF_SHORT_MASK); 1459 #if 0 1460 /* 1461 * XXX disabled - our CRC check doesn't work if the OS 1462 * does bogus_page replacement on the direct-read. 1463 */ 1464 if (leaf && hammer_verify_data) { 1465 nbio->bio_done = hammer_io_direct_read_complete; 1466 nbio->bio_caller_info1.uvalue32 = leaf->data_crc; 1467 } 1468 #endif 1469 hammer_stats_disk_read += bp->b_bufsize; 1470 vn_strategy(volume->devvp, nbio); 1471 } 1472 hammer_rel_volume(volume, 0); 1473 done: 1474 if (error) { 1475 kprintf("hammer_direct_read: failed @ %016llx\n", 1476 (long long)zone2_offset); 1477 bp->b_error = error; 1478 bp->b_flags |= B_ERROR; 1479 biodone(bio); 1480 } 1481 return(error); 1482 } 1483 1484 /* 1485 * This works similarly to hammer_io_direct_read() except instead of 1486 * directly reading from the device into the bio we instead indirectly 1487 * read through the device's buffer cache and then copy the data into 1488 * the bio. 1489 * 1490 * If leaf is non-NULL and validation is enabled, the CRC will be checked. 1491 * 1492 * This routine also executes asynchronously. It allows hammer strategy 1493 * calls to operate asynchronously when in double_buffer mode (in addition 1494 * to operating asynchronously when in normal mode). 1495 */ 1496 int 1497 hammer_io_indirect_read(hammer_mount_t hmp, struct bio *bio, 1498 hammer_btree_leaf_elm_t leaf) 1499 { 1500 hammer_off_t buf_offset; 1501 hammer_off_t zone2_offset; 1502 hammer_volume_t volume; 1503 struct buf *bp; 1504 int vol_no; 1505 int error; 1506 1507 buf_offset = bio->bio_offset; 1508 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) == 1509 HAMMER_ZONE_LARGE_DATA); 1510 1511 /* 1512 * The buffer cache may have an aliased buffer (the reblocker can 1513 * write them). If it does we have to sync any dirty data before 1514 * we can build our direct-read. This is a non-critical code path. 1515 */ 1516 bp = bio->bio_buf; 1517 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize); 1518 1519 /* 1520 * Resolve to a zone-2 offset. The conversion just requires 1521 * munging the top 4 bits but we want to abstract it anyway 1522 * so the blockmap code can verify the zone assignment. 1523 */ 1524 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error); 1525 if (error) 1526 goto done; 1527 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) == 1528 HAMMER_ZONE_RAW_BUFFER); 1529 1530 /* 1531 * Resolve volume and raw-offset for 3rd level bio. The 1532 * offset will be specific to the volume. 1533 */ 1534 vol_no = HAMMER_VOL_DECODE(zone2_offset); 1535 volume = hammer_get_volume(hmp, vol_no, &error); 1536 if (error == 0 && zone2_offset >= volume->maxbuf_off) 1537 error = EIO; 1538 1539 if (error == 0) { 1540 /* 1541 * Convert to the raw volume->devvp offset and acquire 1542 * the buf, issuing async I/O if necessary. 1543 */ 1544 buf_offset = volume->ondisk->vol_buf_beg + 1545 (zone2_offset & HAMMER_OFF_SHORT_MASK); 1546 1547 if (leaf && hammer_verify_data) { 1548 bio->bio_caller_info1.uvalue32 = leaf->data_crc; 1549 bio->bio_caller_info2.index = 1; 1550 } else { 1551 bio->bio_caller_info2.index = 0; 1552 } 1553 breadcb(volume->devvp, buf_offset, bp->b_bufsize, 1554 hammer_indirect_callback, bio); 1555 } 1556 hammer_rel_volume(volume, 0); 1557 done: 1558 if (error) { 1559 kprintf("hammer_direct_read: failed @ %016llx\n", 1560 (long long)zone2_offset); 1561 bp->b_error = error; 1562 bp->b_flags |= B_ERROR; 1563 biodone(bio); 1564 } 1565 return(error); 1566 } 1567 1568 /* 1569 * Indirect callback on completion. bio/bp specify the device-backed 1570 * buffer. bio->bio_caller_info1.ptr holds obio. 1571 * 1572 * obio/obp is the original regular file buffer. obio->bio_caller_info* 1573 * contains the crc specification. 1574 * 1575 * We are responsible for calling bpdone() and bqrelse() on bio/bp, and 1576 * for calling biodone() on obio. 1577 */ 1578 static void 1579 hammer_indirect_callback(struct bio *bio) 1580 { 1581 struct buf *bp = bio->bio_buf; 1582 struct buf *obp; 1583 struct bio *obio; 1584 1585 /* 1586 * If BIO_DONE is already set the device buffer was already 1587 * fully valid (B_CACHE). If it is not set then I/O was issued 1588 * and we have to run I/O completion as the last bio. 1589 * 1590 * Nobody is waiting for our device I/O to complete, we are 1591 * responsible for bqrelse()ing it which means we also have to do 1592 * the equivalent of biowait() and clear BIO_DONE (which breadcb() 1593 * may have set). 1594 * 1595 * Any preexisting device buffer should match the requested size, 1596 * but due to bigblock recycling and other factors there is some 1597 * fragility there, so we assert that the device buffer covers 1598 * the request. 1599 */ 1600 if ((bio->bio_flags & BIO_DONE) == 0) 1601 bpdone(bp, 0); 1602 bio->bio_flags &= ~(BIO_DONE | BIO_SYNC); 1603 1604 obio = bio->bio_caller_info1.ptr; 1605 obp = obio->bio_buf; 1606 1607 if (bp->b_flags & B_ERROR) { 1608 obp->b_flags |= B_ERROR; 1609 obp->b_error = bp->b_error; 1610 } else if (obio->bio_caller_info2.index && 1611 obio->bio_caller_info1.uvalue32 != 1612 crc32(bp->b_data, bp->b_bufsize)) { 1613 obp->b_flags |= B_ERROR; 1614 obp->b_error = EIO; 1615 } else { 1616 KKASSERT(bp->b_bufsize >= obp->b_bufsize); 1617 bcopy(bp->b_data, obp->b_data, obp->b_bufsize); 1618 obp->b_resid = 0; 1619 obp->b_flags |= B_AGE; 1620 } 1621 biodone(obio); 1622 bqrelse(bp); 1623 } 1624 1625 #if 0 1626 /* 1627 * On completion of the BIO this callback must check the data CRC 1628 * and chain to the previous bio. 1629 * 1630 * MPSAFE - since we do not modify and hammer_records we do not need 1631 * io_token. 1632 * 1633 * NOTE: MPSAFE callback 1634 */ 1635 static 1636 void 1637 hammer_io_direct_read_complete(struct bio *nbio) 1638 { 1639 struct bio *obio; 1640 struct buf *bp; 1641 u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32; 1642 1643 bp = nbio->bio_buf; 1644 if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) { 1645 kprintf("HAMMER: data_crc error @%016llx/%d\n", 1646 nbio->bio_offset, bp->b_bufsize); 1647 if (hammer_debug_critical) 1648 Debugger("data_crc on read"); 1649 bp->b_flags |= B_ERROR; 1650 bp->b_error = EIO; 1651 } 1652 obio = pop_bio(nbio); 1653 biodone(obio); 1654 } 1655 #endif 1656 1657 /* 1658 * Write a buffer associated with a front-end vnode directly to the 1659 * disk media. The bio may be issued asynchronously. 1660 * 1661 * The BIO is associated with the specified record and RECG_DIRECT_IO 1662 * is set. The recorded is added to its object. 1663 */ 1664 int 1665 hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio, 1666 hammer_record_t record) 1667 { 1668 hammer_btree_leaf_elm_t leaf = &record->leaf; 1669 hammer_off_t buf_offset; 1670 hammer_off_t zone2_offset; 1671 hammer_volume_t volume; 1672 hammer_buffer_t buffer; 1673 struct buf *bp; 1674 struct bio *nbio; 1675 char *ptr; 1676 int vol_no; 1677 int error; 1678 1679 buf_offset = leaf->data_offset; 1680 1681 KKASSERT(buf_offset > HAMMER_ZONE_BTREE); 1682 KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE); 1683 1684 /* 1685 * Issue or execute the I/O. The new memory record must replace 1686 * the old one before the I/O completes, otherwise a reaquisition of 1687 * the buffer will load the old media data instead of the new. 1688 */ 1689 if ((buf_offset & HAMMER_BUFMASK) == 0 && 1690 leaf->data_len >= HAMMER_BUFSIZE) { 1691 /* 1692 * We are using the vnode's bio to write directly to the 1693 * media, any hammer_buffer at the same zone-X offset will 1694 * now have stale data. 1695 */ 1696 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error); 1697 vol_no = HAMMER_VOL_DECODE(zone2_offset); 1698 volume = hammer_get_volume(hmp, vol_no, &error); 1699 1700 if (error == 0 && zone2_offset >= volume->maxbuf_off) 1701 error = EIO; 1702 if (error == 0) { 1703 bp = bio->bio_buf; 1704 KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0); 1705 /* 1706 hammer_del_buffers(hmp, buf_offset, 1707 zone2_offset, bp->b_bufsize); 1708 */ 1709 1710 /* 1711 * Second level bio - cached zone2 offset. 1712 * 1713 * (We can put our bio_done function in either the 1714 * 2nd or 3rd level). 1715 */ 1716 nbio = push_bio(bio); 1717 nbio->bio_offset = zone2_offset; 1718 nbio->bio_done = hammer_io_direct_write_complete; 1719 nbio->bio_caller_info1.ptr = record; 1720 record->zone2_offset = zone2_offset; 1721 record->gflags |= HAMMER_RECG_DIRECT_IO | 1722 HAMMER_RECG_DIRECT_INVAL; 1723 1724 /* 1725 * Third level bio - raw offset specific to the 1726 * correct volume. 1727 */ 1728 zone2_offset &= HAMMER_OFF_SHORT_MASK; 1729 nbio = push_bio(nbio); 1730 nbio->bio_offset = volume->ondisk->vol_buf_beg + 1731 zone2_offset; 1732 hammer_stats_disk_write += bp->b_bufsize; 1733 hammer_ip_replace_bulk(hmp, record); 1734 vn_strategy(volume->devvp, nbio); 1735 hammer_io_flush_mark(volume); 1736 } 1737 hammer_rel_volume(volume, 0); 1738 } else { 1739 /* 1740 * Must fit in a standard HAMMER buffer. In this case all 1741 * consumers use the HAMMER buffer system and RECG_DIRECT_IO 1742 * does not need to be set-up. 1743 */ 1744 KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0); 1745 buffer = NULL; 1746 ptr = hammer_bread(hmp, buf_offset, &error, &buffer); 1747 if (error == 0) { 1748 bp = bio->bio_buf; 1749 bp->b_flags |= B_AGE; 1750 hammer_io_modify(&buffer->io, 1); 1751 bcopy(bp->b_data, ptr, leaf->data_len); 1752 hammer_io_modify_done(&buffer->io); 1753 hammer_rel_buffer(buffer, 0); 1754 bp->b_resid = 0; 1755 hammer_ip_replace_bulk(hmp, record); 1756 biodone(bio); 1757 } 1758 } 1759 if (error) { 1760 /* 1761 * Major suckage occured. Also note: The record was 1762 * never added to the tree so we do not have to worry 1763 * about the backend. 1764 */ 1765 kprintf("hammer_direct_write: failed @ %016llx\n", 1766 (long long)leaf->data_offset); 1767 bp = bio->bio_buf; 1768 bp->b_resid = 0; 1769 bp->b_error = EIO; 1770 bp->b_flags |= B_ERROR; 1771 biodone(bio); 1772 record->flags |= HAMMER_RECF_DELETED_FE; 1773 hammer_rel_mem_record(record); 1774 } 1775 return(error); 1776 } 1777 1778 /* 1779 * On completion of the BIO this callback must disconnect 1780 * it from the hammer_record and chain to the previous bio. 1781 * 1782 * An I/O error forces the mount to read-only. Data buffers 1783 * are not B_LOCKED like meta-data buffers are, so we have to 1784 * throw the buffer away to prevent the kernel from retrying. 1785 * 1786 * NOTE: MPSAFE callback, only modify fields we have explicit 1787 * access to (the bp and the record->gflags). 1788 */ 1789 static 1790 void 1791 hammer_io_direct_write_complete(struct bio *nbio) 1792 { 1793 struct bio *obio; 1794 struct buf *bp; 1795 hammer_record_t record; 1796 hammer_mount_t hmp; 1797 1798 record = nbio->bio_caller_info1.ptr; 1799 KKASSERT(record != NULL); 1800 hmp = record->ip->hmp; 1801 1802 lwkt_gettoken(&hmp->io_token); 1803 1804 bp = nbio->bio_buf; 1805 obio = pop_bio(nbio); 1806 if (bp->b_flags & B_ERROR) { 1807 lwkt_gettoken(&hmp->fs_token); 1808 hammer_critical_error(hmp, record->ip, 1809 bp->b_error, 1810 "while writing bulk data"); 1811 lwkt_reltoken(&hmp->fs_token); 1812 bp->b_flags |= B_INVAL; 1813 } 1814 biodone(obio); 1815 1816 KKASSERT(record->gflags & HAMMER_RECG_DIRECT_IO); 1817 if (record->gflags & HAMMER_RECG_DIRECT_WAIT) { 1818 record->gflags &= ~(HAMMER_RECG_DIRECT_IO | 1819 HAMMER_RECG_DIRECT_WAIT); 1820 /* record can disappear once DIRECT_IO flag is cleared */ 1821 wakeup(&record->flags); 1822 } else { 1823 record->gflags &= ~HAMMER_RECG_DIRECT_IO; 1824 /* record can disappear once DIRECT_IO flag is cleared */ 1825 } 1826 lwkt_reltoken(&hmp->io_token); 1827 } 1828 1829 1830 /* 1831 * This is called before a record is either committed to the B-Tree 1832 * or destroyed, to resolve any associated direct-IO. 1833 * 1834 * (1) We must wait for any direct-IO related to the record to complete. 1835 * 1836 * (2) We must remove any buffer cache aliases for data accessed via 1837 * leaf->data_offset or zone2_offset so non-direct-IO consumers 1838 * (the mirroring and reblocking code) do not see stale data. 1839 */ 1840 void 1841 hammer_io_direct_wait(hammer_record_t record) 1842 { 1843 hammer_mount_t hmp = record->ip->hmp; 1844 1845 /* 1846 * Wait for I/O to complete 1847 */ 1848 if (record->gflags & HAMMER_RECG_DIRECT_IO) { 1849 lwkt_gettoken(&hmp->io_token); 1850 while (record->gflags & HAMMER_RECG_DIRECT_IO) { 1851 record->gflags |= HAMMER_RECG_DIRECT_WAIT; 1852 tsleep(&record->flags, 0, "hmdiow", 0); 1853 } 1854 lwkt_reltoken(&hmp->io_token); 1855 } 1856 1857 /* 1858 * Invalidate any related buffer cache aliases associated with the 1859 * backing device. This is needed because the buffer cache buffer 1860 * for file data is associated with the file vnode, not the backing 1861 * device vnode. 1862 * 1863 * XXX I do not think this case can occur any more now that 1864 * reservations ensure that all such buffers are removed before 1865 * an area can be reused. 1866 */ 1867 if (record->gflags & HAMMER_RECG_DIRECT_INVAL) { 1868 KKASSERT(record->leaf.data_offset); 1869 hammer_del_buffers(hmp, record->leaf.data_offset, 1870 record->zone2_offset, record->leaf.data_len, 1871 1); 1872 record->gflags &= ~HAMMER_RECG_DIRECT_INVAL; 1873 } 1874 } 1875 1876 /* 1877 * This is called to remove the second-level cached zone-2 offset from 1878 * frontend buffer cache buffers, now stale due to a data relocation. 1879 * These offsets are generated by cluster_read() via VOP_BMAP, or directly 1880 * by hammer_vop_strategy_read(). 1881 * 1882 * This is rather nasty because here we have something like the reblocker 1883 * scanning the raw B-Tree with no held references on anything, really, 1884 * other then a shared lock on the B-Tree node, and we have to access the 1885 * frontend's buffer cache to check for and clean out the association. 1886 * Specifically, if the reblocker is moving data on the disk, these cached 1887 * offsets will become invalid. 1888 * 1889 * Only data record types associated with the large-data zone are subject 1890 * to direct-io and need to be checked. 1891 * 1892 */ 1893 void 1894 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf) 1895 { 1896 struct hammer_inode_info iinfo; 1897 int zone; 1898 1899 if (leaf->base.rec_type != HAMMER_RECTYPE_DATA) 1900 return; 1901 zone = HAMMER_ZONE_DECODE(leaf->data_offset); 1902 if (zone != HAMMER_ZONE_LARGE_DATA_INDEX) 1903 return; 1904 iinfo.obj_id = leaf->base.obj_id; 1905 iinfo.obj_asof = 0; /* unused */ 1906 iinfo.obj_localization = leaf->base.localization & 1907 HAMMER_LOCALIZE_PSEUDOFS_MASK; 1908 iinfo.u.leaf = leaf; 1909 hammer_scan_inode_snapshots(hmp, &iinfo, 1910 hammer_io_direct_uncache_callback, 1911 leaf); 1912 } 1913 1914 static int 1915 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data) 1916 { 1917 hammer_inode_info_t iinfo = data; 1918 hammer_off_t file_offset; 1919 struct vnode *vp; 1920 struct buf *bp; 1921 int blksize; 1922 1923 if (ip->vp == NULL) 1924 return(0); 1925 file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len; 1926 blksize = iinfo->u.leaf->data_len; 1927 KKASSERT((blksize & HAMMER_BUFMASK) == 0); 1928 1929 /* 1930 * Warning: FINDBLK_TEST return stable storage but not stable 1931 * contents. It happens to be ok in this case. 1932 */ 1933 hammer_ref(&ip->lock); 1934 if (hammer_get_vnode(ip, &vp) == 0) { 1935 if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL && 1936 bp->b_bio2.bio_offset != NOOFFSET) { 1937 bp = getblk(ip->vp, file_offset, blksize, 0, 0); 1938 bp->b_bio2.bio_offset = NOOFFSET; 1939 brelse(bp); 1940 } 1941 vput(vp); 1942 } 1943 hammer_rel_inode(ip, 0); 1944 return(0); 1945 } 1946 1947 1948 /* 1949 * This function is called when writes may have occured on the volume, 1950 * indicating that the device may be holding cached writes. 1951 */ 1952 static void 1953 hammer_io_flush_mark(hammer_volume_t volume) 1954 { 1955 atomic_set_int(&volume->vol_flags, HAMMER_VOLF_NEEDFLUSH); 1956 } 1957 1958 /* 1959 * This function ensures that the device has flushed any cached writes out. 1960 */ 1961 void 1962 hammer_io_flush_sync(hammer_mount_t hmp) 1963 { 1964 hammer_volume_t volume; 1965 struct buf *bp_base = NULL; 1966 struct buf *bp; 1967 1968 RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) { 1969 if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) { 1970 atomic_clear_int(&volume->vol_flags, 1971 HAMMER_VOLF_NEEDFLUSH); 1972 bp = getpbuf(NULL); 1973 bp->b_bio1.bio_offset = 0; 1974 bp->b_bufsize = 0; 1975 bp->b_bcount = 0; 1976 bp->b_cmd = BUF_CMD_FLUSH; 1977 bp->b_bio1.bio_caller_info1.cluster_head = bp_base; 1978 bp->b_bio1.bio_done = biodone_sync; 1979 bp->b_bio1.bio_flags |= BIO_SYNC; 1980 bp_base = bp; 1981 vn_strategy(volume->devvp, &bp->b_bio1); 1982 } 1983 } 1984 while ((bp = bp_base) != NULL) { 1985 bp_base = bp->b_bio1.bio_caller_info1.cluster_head; 1986 biowait(&bp->b_bio1, "hmrFLS"); 1987 relpbuf(bp, NULL); 1988 } 1989 } 1990 1991 /* 1992 * Limit the amount of backlog which we allow to build up 1993 */ 1994 void 1995 hammer_io_limit_backlog(hammer_mount_t hmp) 1996 { 1997 waitrunningbufspace(); 1998 } 1999