1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $ 35 */ 36 /* 37 * Manage HAMMER's on-disk structures. These routines are primarily 38 * responsible for interfacing with the kernel's I/O subsystem and for 39 * managing in-memory structures. 40 */ 41 42 #include "hammer.h" 43 #include <sys/fcntl.h> 44 #include <sys/nlookup.h> 45 #include <sys/buf.h> 46 47 #include <sys/buf2.h> 48 49 static void hammer_free_volume(hammer_volume_t volume); 50 static int hammer_load_volume(hammer_volume_t volume); 51 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew); 52 static int hammer_load_node(hammer_transaction_t trans, 53 hammer_node_t node, int isnew); 54 static void _hammer_rel_node(hammer_node_t node, int locked); 55 56 static int 57 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2) 58 { 59 if (vol1->vol_no < vol2->vol_no) 60 return(-1); 61 if (vol1->vol_no > vol2->vol_no) 62 return(1); 63 return(0); 64 } 65 66 /* 67 * hammer_buffer structures are indexed via their zoneX_offset, not 68 * their zone2_offset. 69 */ 70 static int 71 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2) 72 { 73 if (buf1->zoneX_offset < buf2->zoneX_offset) 74 return(-1); 75 if (buf1->zoneX_offset > buf2->zoneX_offset) 76 return(1); 77 return(0); 78 } 79 80 static int 81 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2) 82 { 83 if (node1->node_offset < node2->node_offset) 84 return(-1); 85 if (node1->node_offset > node2->node_offset) 86 return(1); 87 return(0); 88 } 89 90 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node, 91 hammer_vol_rb_compare, int32_t, vol_no); 92 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node, 93 hammer_buf_rb_compare, hammer_off_t, zoneX_offset); 94 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node, 95 hammer_nod_rb_compare, hammer_off_t, node_offset); 96 97 /************************************************************************ 98 * VOLUMES * 99 ************************************************************************ 100 * 101 * Load a HAMMER volume by name. Returns 0 on success or a positive error 102 * code on failure. Volumes must be loaded at mount time, hammer_get_volume() 103 * will not load a new volume. 104 * 105 * The passed devvp is vref()'d but not locked. This function consumes the 106 * ref (typically by associating it with the volume structure). 107 * 108 * Calls made to hammer_load_volume() or single-threaded 109 */ 110 int 111 hammer_install_volume(struct hammer_mount *hmp, const char *volname, 112 struct vnode *devvp) 113 { 114 struct mount *mp; 115 hammer_volume_t volume; 116 struct hammer_volume_ondisk *ondisk; 117 struct nlookupdata nd; 118 struct buf *bp = NULL; 119 int error; 120 int ronly; 121 int setmp = 0; 122 123 mp = hmp->mp; 124 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 125 126 /* 127 * Allocate a volume structure 128 */ 129 ++hammer_count_volumes; 130 volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO); 131 volume->vol_name = kstrdup(volname, hmp->m_misc); 132 volume->io.hmp = hmp; /* bootstrap */ 133 hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME); 134 volume->io.offset = 0LL; 135 volume->io.bytes = HAMMER_BUFSIZE; 136 137 /* 138 * Get the device vnode 139 */ 140 if (devvp == NULL) { 141 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW); 142 if (error == 0) 143 error = nlookup(&nd); 144 if (error == 0) 145 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp); 146 nlookup_done(&nd); 147 } else { 148 error = 0; 149 volume->devvp = devvp; 150 } 151 152 if (error == 0) { 153 if (vn_isdisk(volume->devvp, &error)) { 154 error = vfs_mountedon(volume->devvp); 155 } 156 } 157 if (error == 0 && vcount(volume->devvp) > 0) 158 error = EBUSY; 159 if (error == 0) { 160 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 161 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0); 162 if (error == 0) { 163 error = VOP_OPEN(volume->devvp, 164 (ronly ? FREAD : FREAD|FWRITE), 165 FSCRED, NULL); 166 } 167 vn_unlock(volume->devvp); 168 } 169 if (error) { 170 hammer_free_volume(volume); 171 return(error); 172 } 173 volume->devvp->v_rdev->si_mountpoint = mp; 174 setmp = 1; 175 176 /* 177 * Extract the volume number from the volume header and do various 178 * sanity checks. 179 */ 180 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp); 181 if (error) 182 goto late_failure; 183 ondisk = (void *)bp->b_data; 184 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) { 185 kprintf("hammer_mount: volume %s has an invalid header\n", 186 volume->vol_name); 187 error = EFTYPE; 188 goto late_failure; 189 } 190 volume->vol_no = ondisk->vol_no; 191 volume->buffer_base = ondisk->vol_buf_beg; 192 volume->vol_flags = ondisk->vol_flags; 193 volume->nblocks = ondisk->vol_nblocks; 194 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 195 ondisk->vol_buf_end - ondisk->vol_buf_beg); 196 volume->maxraw_off = ondisk->vol_buf_end; 197 198 if (RB_EMPTY(&hmp->rb_vols_root)) { 199 hmp->fsid = ondisk->vol_fsid; 200 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) { 201 kprintf("hammer_mount: volume %s's fsid does not match " 202 "other volumes\n", volume->vol_name); 203 error = EFTYPE; 204 goto late_failure; 205 } 206 207 /* 208 * Insert the volume structure into the red-black tree. 209 */ 210 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) { 211 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n", 212 volume->vol_name, volume->vol_no); 213 error = EEXIST; 214 } 215 216 /* 217 * Set the root volume . HAMMER special cases rootvol the structure. 218 * We do not hold a ref because this would prevent related I/O 219 * from being flushed. 220 */ 221 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) { 222 hmp->rootvol = volume; 223 hmp->nvolumes = ondisk->vol_count; 224 if (bp) { 225 brelse(bp); 226 bp = NULL; 227 } 228 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks * 229 (HAMMER_BIGBLOCK_SIZE / HAMMER_BUFSIZE); 230 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks * 231 (HAMMER_BIGBLOCK_SIZE / HAMMER_BUFSIZE); 232 } 233 late_failure: 234 if (bp) 235 brelse(bp); 236 if (error) { 237 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/ 238 if (setmp) 239 volume->devvp->v_rdev->si_mountpoint = NULL; 240 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 241 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE, NULL); 242 vn_unlock(volume->devvp); 243 hammer_free_volume(volume); 244 } 245 return (error); 246 } 247 248 /* 249 * This is called for each volume when updating the mount point from 250 * read-write to read-only or vise-versa. 251 */ 252 int 253 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused) 254 { 255 if (volume->devvp) { 256 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 257 if (volume->io.hmp->ronly) { 258 /* do not call vinvalbuf */ 259 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL); 260 VOP_CLOSE(volume->devvp, FREAD|FWRITE, NULL); 261 } else { 262 /* do not call vinvalbuf */ 263 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL); 264 VOP_CLOSE(volume->devvp, FREAD, NULL); 265 } 266 vn_unlock(volume->devvp); 267 } 268 return(0); 269 } 270 271 /* 272 * Unload and free a HAMMER volume. Must return >= 0 to continue scan 273 * so returns -1 on failure. 274 */ 275 int 276 hammer_unload_volume(hammer_volume_t volume, void *data __unused) 277 { 278 hammer_mount_t hmp = volume->io.hmp; 279 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 280 281 /* 282 * Clean up the root volume pointer, which is held unlocked in hmp. 283 */ 284 if (hmp->rootvol == volume) 285 hmp->rootvol = NULL; 286 287 /* 288 * We must not flush a dirty buffer to disk on umount. It should 289 * have already been dealt with by the flusher, or we may be in 290 * catastrophic failure. 291 */ 292 hammer_io_clear_modify(&volume->io, 1); 293 volume->io.waitdep = 1; 294 295 /* 296 * Clean up the persistent ref ioerror might have on the volume 297 */ 298 if (volume->io.ioerror) 299 hammer_io_clear_error_noassert(&volume->io); 300 301 /* 302 * This should release the bp. Releasing the volume with flush set 303 * implies the interlock is set. 304 */ 305 hammer_ref_interlock_true(&volume->io.lock); 306 hammer_rel_volume(volume, 1); 307 KKASSERT(volume->io.bp == NULL); 308 309 /* 310 * There should be no references on the volume, no clusters, and 311 * no super-clusters. 312 */ 313 KKASSERT(hammer_norefs(&volume->io.lock)); 314 315 volume->ondisk = NULL; 316 if (volume->devvp) { 317 if (volume->devvp->v_rdev && 318 volume->devvp->v_rdev->si_mountpoint == hmp->mp 319 ) { 320 volume->devvp->v_rdev->si_mountpoint = NULL; 321 } 322 if (ronly) { 323 /* 324 * Make sure we don't sync anything to disk if we 325 * are in read-only mode (1) or critically-errored 326 * (2). Note that there may be dirty buffers in 327 * normal read-only mode from crash recovery. 328 */ 329 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 330 vinvalbuf(volume->devvp, 0, 0, 0); 331 VOP_CLOSE(volume->devvp, FREAD, NULL); 332 vn_unlock(volume->devvp); 333 } else { 334 /* 335 * Normal termination, save any dirty buffers 336 * (XXX there really shouldn't be any). 337 */ 338 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 339 vinvalbuf(volume->devvp, V_SAVE, 0, 0); 340 VOP_CLOSE(volume->devvp, FREAD|FWRITE, NULL); 341 vn_unlock(volume->devvp); 342 } 343 } 344 345 /* 346 * Destroy the structure 347 */ 348 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume); 349 hammer_free_volume(volume); 350 return(0); 351 } 352 353 static 354 void 355 hammer_free_volume(hammer_volume_t volume) 356 { 357 hammer_mount_t hmp = volume->io.hmp; 358 359 if (volume->vol_name) { 360 kfree(volume->vol_name, hmp->m_misc); 361 volume->vol_name = NULL; 362 } 363 if (volume->devvp) { 364 vrele(volume->devvp); 365 volume->devvp = NULL; 366 } 367 --hammer_count_volumes; 368 kfree(volume, hmp->m_misc); 369 } 370 371 /* 372 * Get a HAMMER volume. The volume must already exist. 373 */ 374 hammer_volume_t 375 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp) 376 { 377 struct hammer_volume *volume; 378 379 /* 380 * Locate the volume structure 381 */ 382 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no); 383 if (volume == NULL) { 384 *errorp = ENOENT; 385 return(NULL); 386 } 387 388 /* 389 * Reference the volume, load/check the data on the 0->1 transition. 390 * hammer_load_volume() will dispose of the interlock on return, 391 * and also clean up the ref count on error. 392 */ 393 if (hammer_ref_interlock(&volume->io.lock)) { 394 *errorp = hammer_load_volume(volume); 395 if (*errorp) 396 volume = NULL; 397 } else { 398 KKASSERT(volume->ondisk); 399 *errorp = 0; 400 } 401 return(volume); 402 } 403 404 int 405 hammer_ref_volume(hammer_volume_t volume) 406 { 407 int error; 408 409 /* 410 * Reference the volume and deal with the check condition used to 411 * load its ondisk info. 412 */ 413 if (hammer_ref_interlock(&volume->io.lock)) { 414 error = hammer_load_volume(volume); 415 } else { 416 KKASSERT(volume->ondisk); 417 error = 0; 418 } 419 return (error); 420 } 421 422 /* 423 * May be called without fs_token 424 */ 425 hammer_volume_t 426 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp) 427 { 428 hammer_volume_t volume; 429 430 volume = hmp->rootvol; 431 KKASSERT(volume != NULL); 432 433 /* 434 * Reference the volume and deal with the check condition used to 435 * load its ondisk info. 436 */ 437 if (hammer_ref_interlock(&volume->io.lock)) { 438 lwkt_gettoken(&volume->io.hmp->fs_token); 439 *errorp = hammer_load_volume(volume); 440 lwkt_reltoken(&volume->io.hmp->fs_token); 441 if (*errorp) 442 volume = NULL; 443 } else { 444 KKASSERT(volume->ondisk); 445 *errorp = 0; 446 } 447 return (volume); 448 } 449 450 /* 451 * Load a volume's on-disk information. The volume must be referenced and 452 * the interlock is held on call. The interlock will be released on return. 453 * The reference will also be released on return if an error occurs. 454 */ 455 static int 456 hammer_load_volume(hammer_volume_t volume) 457 { 458 int error; 459 460 if (volume->ondisk == NULL) { 461 error = hammer_io_read(volume->devvp, &volume->io, 462 HAMMER_BUFSIZE); 463 if (error == 0) { 464 volume->ondisk = (void *)volume->io.bp->b_data; 465 hammer_ref_interlock_done(&volume->io.lock); 466 } else { 467 hammer_rel_volume(volume, 1); 468 } 469 } else { 470 error = 0; 471 } 472 return(error); 473 } 474 475 /* 476 * Release a previously acquired reference on the volume. 477 * 478 * Volumes are not unloaded from memory during normal operation. 479 * 480 * May be called without fs_token 481 */ 482 void 483 hammer_rel_volume(hammer_volume_t volume, int locked) 484 { 485 struct buf *bp; 486 487 if (hammer_rel_interlock(&volume->io.lock, locked)) { 488 lwkt_gettoken(&volume->io.hmp->fs_token); 489 volume->ondisk = NULL; 490 bp = hammer_io_release(&volume->io, locked); 491 lwkt_reltoken(&volume->io.hmp->fs_token); 492 hammer_rel_interlock_done(&volume->io.lock, locked); 493 if (bp) 494 brelse(bp); 495 } 496 } 497 498 int 499 hammer_mountcheck_volumes(struct hammer_mount *hmp) 500 { 501 hammer_volume_t vol; 502 int i; 503 504 for (i = 0; i < hmp->nvolumes; ++i) { 505 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i); 506 if (vol == NULL) 507 return(EINVAL); 508 } 509 return(0); 510 } 511 512 /************************************************************************ 513 * BUFFERS * 514 ************************************************************************ 515 * 516 * Manage buffers. Currently most blockmap-backed zones are direct-mapped 517 * to zone-2 buffer offsets, without a translation stage. However, the 518 * hammer_buffer structure is indexed by its zoneX_offset, not its 519 * zone2_offset. 520 * 521 * The proper zone must be maintained throughout the code-base all the way 522 * through to the big-block allocator, or routines like hammer_del_buffers() 523 * will not be able to locate all potentially conflicting buffers. 524 */ 525 526 /* 527 * Helper function returns whether a zone offset can be directly translated 528 * to a raw buffer index or not. Really only the volume and undo zones 529 * can't be directly translated. Volumes are special-cased and undo zones 530 * shouldn't be aliased accessed in read-only mode. 531 * 532 * This function is ONLY used to detect aliased zones during a read-only 533 * mount. 534 */ 535 static __inline int 536 hammer_direct_zone(hammer_off_t buf_offset) 537 { 538 switch(HAMMER_ZONE_DECODE(buf_offset)) { 539 case HAMMER_ZONE_RAW_BUFFER_INDEX: 540 case HAMMER_ZONE_FREEMAP_INDEX: 541 case HAMMER_ZONE_BTREE_INDEX: 542 case HAMMER_ZONE_META_INDEX: 543 case HAMMER_ZONE_LARGE_DATA_INDEX: 544 case HAMMER_ZONE_SMALL_DATA_INDEX: 545 return(1); 546 default: 547 return(0); 548 } 549 /* NOT REACHED */ 550 } 551 552 hammer_buffer_t 553 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, 554 int bytes, int isnew, int *errorp) 555 { 556 hammer_buffer_t buffer; 557 hammer_volume_t volume; 558 hammer_off_t zone2_offset; 559 hammer_io_type_t iotype; 560 int vol_no; 561 int zone; 562 563 buf_offset &= ~HAMMER_BUFMASK64; 564 again: 565 /* 566 * Shortcut if the buffer is already cached 567 */ 568 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset); 569 if (buffer) { 570 /* 571 * Once refed the ondisk field will not be cleared by 572 * any other action. Shortcut the operation if the 573 * ondisk structure is valid. 574 */ 575 found_aliased: 576 if (hammer_ref_interlock(&buffer->io.lock) == 0) { 577 hammer_io_advance(&buffer->io); 578 KKASSERT(buffer->ondisk); 579 *errorp = 0; 580 return(buffer); 581 } 582 583 /* 584 * 0->1 transition or defered 0->1 transition (CHECK), 585 * interlock now held. Shortcut if ondisk is already 586 * assigned. 587 */ 588 atomic_add_int(&hammer_count_refedbufs, 1); 589 if (buffer->ondisk) { 590 hammer_io_advance(&buffer->io); 591 hammer_ref_interlock_done(&buffer->io.lock); 592 *errorp = 0; 593 return(buffer); 594 } 595 596 /* 597 * The buffer is no longer loose if it has a ref, and 598 * cannot become loose once it gains a ref. Loose 599 * buffers will never be in a modified state. This should 600 * only occur on the 0->1 transition of refs. 601 * 602 * lose_list can be modified via a biodone() interrupt 603 * so the io_token must be held. 604 */ 605 if (buffer->io.mod_root == &hmp->lose_root) { 606 lwkt_gettoken(&hmp->io_token); 607 if (buffer->io.mod_root == &hmp->lose_root) { 608 RB_REMOVE(hammer_mod_rb_tree, 609 buffer->io.mod_root, &buffer->io); 610 buffer->io.mod_root = NULL; 611 KKASSERT(buffer->io.modified == 0); 612 } 613 lwkt_reltoken(&hmp->io_token); 614 } 615 goto found; 616 } else if (hmp->ronly && hammer_direct_zone(buf_offset)) { 617 /* 618 * If this is a read-only mount there could be an alias 619 * in the raw-zone. If there is we use that buffer instead. 620 * 621 * rw mounts will not have aliases. Also note when going 622 * from ro -> rw the recovered raw buffers are flushed and 623 * reclaimed, so again there will not be any aliases once 624 * the mount is rw. 625 */ 626 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 627 hammer_xlate_to_zone2(buf_offset)); 628 if (buffer) { 629 if (hammer_debug_general & 0x0001) { 630 krateprintf(&hmp->kdiag, 631 "HAMMER: recovered " 632 "aliased %016jx\n", 633 (intmax_t)buf_offset); 634 } 635 goto found_aliased; 636 } 637 } 638 639 /* 640 * What is the buffer class? 641 */ 642 zone = HAMMER_ZONE_DECODE(buf_offset); 643 644 switch(zone) { 645 case HAMMER_ZONE_LARGE_DATA_INDEX: 646 case HAMMER_ZONE_SMALL_DATA_INDEX: 647 iotype = HAMMER_STRUCTURE_DATA_BUFFER; 648 break; 649 case HAMMER_ZONE_UNDO_INDEX: 650 iotype = HAMMER_STRUCTURE_UNDO_BUFFER; 651 break; 652 case HAMMER_ZONE_META_INDEX: 653 default: 654 /* 655 * NOTE: inode data and directory entries are placed in this 656 * zone. inode atime/mtime is updated in-place and thus 657 * buffers containing inodes must be synchronized as 658 * meta-buffers, same as buffers containing B-Tree info. 659 */ 660 iotype = HAMMER_STRUCTURE_META_BUFFER; 661 break; 662 } 663 664 /* 665 * Handle blockmap offset translations 666 */ 667 if (zone >= HAMMER_ZONE2_MAPPED_INDEX) { 668 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp); 669 } else if (zone == HAMMER_ZONE_UNDO_INDEX) { 670 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp); 671 } else { 672 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX); 673 zone2_offset = buf_offset; 674 *errorp = 0; 675 } 676 if (*errorp) 677 return(NULL); 678 679 /* 680 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset 681 * specifications. 682 */ 683 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) == 684 HAMMER_ZONE_RAW_BUFFER); 685 vol_no = HAMMER_VOL_DECODE(zone2_offset); 686 volume = hammer_get_volume(hmp, vol_no, errorp); 687 if (volume == NULL) 688 return(NULL); 689 690 KKASSERT(zone2_offset < volume->maxbuf_off); 691 692 /* 693 * Allocate a new buffer structure. We will check for races later. 694 */ 695 ++hammer_count_buffers; 696 buffer = kmalloc(sizeof(*buffer), hmp->m_misc, 697 M_WAITOK|M_ZERO|M_USE_RESERVE); 698 buffer->zone2_offset = zone2_offset; 699 buffer->zoneX_offset = buf_offset; 700 701 hammer_io_init(&buffer->io, volume, iotype); 702 buffer->io.offset = volume->ondisk->vol_buf_beg + 703 (zone2_offset & HAMMER_OFF_SHORT_MASK); 704 buffer->io.bytes = bytes; 705 TAILQ_INIT(&buffer->clist); 706 hammer_ref_interlock_true(&buffer->io.lock); 707 708 /* 709 * Insert the buffer into the RB tree and handle late collisions. 710 */ 711 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) { 712 hammer_rel_volume(volume, 0); 713 buffer->io.volume = NULL; /* safety */ 714 if (hammer_rel_interlock(&buffer->io.lock, 1)) /* safety */ 715 hammer_rel_interlock_done(&buffer->io.lock, 1); 716 --hammer_count_buffers; 717 kfree(buffer, hmp->m_misc); 718 goto again; 719 } 720 atomic_add_int(&hammer_count_refedbufs, 1); 721 found: 722 723 /* 724 * The buffer is referenced and interlocked. Load the buffer 725 * if necessary. hammer_load_buffer() deals with the interlock 726 * and, if an error is returned, also deals with the ref. 727 */ 728 if (buffer->ondisk == NULL) { 729 *errorp = hammer_load_buffer(buffer, isnew); 730 if (*errorp) 731 buffer = NULL; 732 } else { 733 hammer_io_advance(&buffer->io); 734 hammer_ref_interlock_done(&buffer->io.lock); 735 *errorp = 0; 736 } 737 return(buffer); 738 } 739 740 /* 741 * This is used by the direct-read code to deal with large-data buffers 742 * created by the reblocker and mirror-write code. The direct-read code 743 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write- 744 * running hammer buffers must be fully synced to disk before we can issue 745 * the direct-read. 746 * 747 * This code path is not considered critical as only the rebocker and 748 * mirror-write code will create large-data buffers via the HAMMER buffer 749 * subsystem. They do that because they operate at the B-Tree level and 750 * do not access the vnode/inode structures. 751 */ 752 void 753 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes) 754 { 755 hammer_buffer_t buffer; 756 int error; 757 758 KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) == 759 HAMMER_ZONE_LARGE_DATA); 760 761 while (bytes > 0) { 762 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 763 base_offset); 764 if (buffer && (buffer->io.modified || buffer->io.running)) { 765 error = hammer_ref_buffer(buffer); 766 if (error == 0) { 767 hammer_io_wait(&buffer->io); 768 if (buffer->io.modified) { 769 hammer_io_write_interlock(&buffer->io); 770 hammer_io_flush(&buffer->io, 0); 771 hammer_io_done_interlock(&buffer->io); 772 hammer_io_wait(&buffer->io); 773 } 774 hammer_rel_buffer(buffer, 0); 775 } 776 } 777 base_offset += HAMMER_BUFSIZE; 778 bytes -= HAMMER_BUFSIZE; 779 } 780 } 781 782 /* 783 * Destroy all buffers covering the specified zoneX offset range. This 784 * is called when the related blockmap layer2 entry is freed or when 785 * a direct write bypasses our buffer/buffer-cache subsystem. 786 * 787 * The buffers may be referenced by the caller itself. Setting reclaim 788 * will cause the buffer to be destroyed when it's ref count reaches zero. 789 * 790 * Return 0 on success, EAGAIN if some buffers could not be destroyed due 791 * to additional references held by other threads, or some other (typically 792 * fatal) error. 793 */ 794 int 795 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset, 796 hammer_off_t zone2_offset, int bytes, 797 int report_conflicts) 798 { 799 hammer_buffer_t buffer; 800 hammer_volume_t volume; 801 int vol_no; 802 int error; 803 int ret_error; 804 805 vol_no = HAMMER_VOL_DECODE(zone2_offset); 806 volume = hammer_get_volume(hmp, vol_no, &ret_error); 807 KKASSERT(ret_error == 0); 808 809 while (bytes > 0) { 810 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 811 base_offset); 812 if (buffer) { 813 error = hammer_ref_buffer(buffer); 814 if (hammer_debug_general & 0x20000) { 815 kprintf("hammer: delbufr %016jx " 816 "rerr=%d 1ref=%d\n", 817 (intmax_t)buffer->zoneX_offset, 818 error, 819 hammer_oneref(&buffer->io.lock)); 820 } 821 if (error == 0 && !hammer_oneref(&buffer->io.lock)) { 822 error = EAGAIN; 823 hammer_rel_buffer(buffer, 0); 824 } 825 if (error == 0) { 826 KKASSERT(buffer->zone2_offset == zone2_offset); 827 hammer_io_clear_modify(&buffer->io, 1); 828 buffer->io.reclaim = 1; 829 buffer->io.waitdep = 1; 830 KKASSERT(buffer->io.volume == volume); 831 hammer_rel_buffer(buffer, 0); 832 } 833 } else { 834 error = hammer_io_inval(volume, zone2_offset); 835 } 836 if (error) { 837 ret_error = error; 838 if (report_conflicts || 839 (hammer_debug_general & 0x8000)) { 840 krateprintf(&hmp->kdiag, 841 "hammer_del_buffers: unable to " 842 "invalidate %016llx buffer=%p " 843 "rep=%d lkrefs=%08x\n", 844 (long long)base_offset, 845 buffer, report_conflicts, 846 (buffer ? buffer->io.lock.refs : -1)); 847 } 848 } 849 base_offset += HAMMER_BUFSIZE; 850 zone2_offset += HAMMER_BUFSIZE; 851 bytes -= HAMMER_BUFSIZE; 852 } 853 hammer_rel_volume(volume, 0); 854 return (ret_error); 855 } 856 857 /* 858 * Given a referenced and interlocked buffer load/validate the data. 859 * 860 * The buffer interlock will be released on return. If an error is 861 * returned the buffer reference will also be released (and the buffer 862 * pointer will thus be stale). 863 */ 864 static int 865 hammer_load_buffer(hammer_buffer_t buffer, int isnew) 866 { 867 hammer_volume_t volume; 868 int error; 869 870 /* 871 * Load the buffer's on-disk info 872 */ 873 volume = buffer->io.volume; 874 875 if (hammer_debug_io & 0x0004) { 876 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n", 877 (long long)buffer->zoneX_offset, 878 (long long)buffer->zone2_offset, 879 isnew, buffer->ondisk); 880 } 881 882 if (buffer->ondisk == NULL) { 883 /* 884 * Issue the read or generate a new buffer. When reading 885 * the limit argument controls any read-ahead clustering 886 * hammer_io_read() is allowed to do. 887 * 888 * We cannot read-ahead in the large-data zone and we cannot 889 * cross a big-block boundary as the next big-block might 890 * use a different buffer size. 891 */ 892 if (isnew) { 893 error = hammer_io_new(volume->devvp, &buffer->io); 894 } else if ((buffer->zoneX_offset & HAMMER_OFF_ZONE_MASK) == 895 HAMMER_ZONE_LARGE_DATA) { 896 error = hammer_io_read(volume->devvp, &buffer->io, 897 buffer->io.bytes); 898 } else { 899 hammer_off_t limit; 900 901 limit = (buffer->zone2_offset + 902 HAMMER_BIGBLOCK_MASK64) & 903 ~HAMMER_BIGBLOCK_MASK64; 904 limit -= buffer->zone2_offset; 905 error = hammer_io_read(volume->devvp, &buffer->io, 906 limit); 907 } 908 if (error == 0) 909 buffer->ondisk = (void *)buffer->io.bp->b_data; 910 } else if (isnew) { 911 error = hammer_io_new(volume->devvp, &buffer->io); 912 } else { 913 error = 0; 914 } 915 if (error == 0) { 916 hammer_io_advance(&buffer->io); 917 hammer_ref_interlock_done(&buffer->io.lock); 918 } else { 919 hammer_rel_buffer(buffer, 1); 920 } 921 return (error); 922 } 923 924 /* 925 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue. 926 * This routine is only called during unmount or when a volume is 927 * removed. 928 * 929 * If data != NULL, it specifies a volume whoose buffers should 930 * be unloaded. 931 */ 932 int 933 hammer_unload_buffer(hammer_buffer_t buffer, void *data) 934 { 935 struct hammer_volume *volume = (struct hammer_volume *) data; 936 937 /* 938 * If volume != NULL we are only interested in unloading buffers 939 * associated with a particular volume. 940 */ 941 if (volume != NULL && volume != buffer->io.volume) 942 return 0; 943 944 /* 945 * Clean up the persistent ref ioerror might have on the buffer 946 * and acquire a ref. Expect a 0->1 transition. 947 */ 948 if (buffer->io.ioerror) { 949 hammer_io_clear_error_noassert(&buffer->io); 950 atomic_add_int(&hammer_count_refedbufs, -1); 951 } 952 hammer_ref_interlock_true(&buffer->io.lock); 953 atomic_add_int(&hammer_count_refedbufs, 1); 954 955 /* 956 * We must not flush a dirty buffer to disk on umount. It should 957 * have already been dealt with by the flusher, or we may be in 958 * catastrophic failure. 959 * 960 * We must set waitdep to ensure that a running buffer is waited 961 * on and released prior to us trying to unload the volume. 962 */ 963 hammer_io_clear_modify(&buffer->io, 1); 964 hammer_flush_buffer_nodes(buffer); 965 buffer->io.waitdep = 1; 966 hammer_rel_buffer(buffer, 1); 967 return(0); 968 } 969 970 /* 971 * Reference a buffer that is either already referenced or via a specially 972 * handled pointer (aka cursor->buffer). 973 */ 974 int 975 hammer_ref_buffer(hammer_buffer_t buffer) 976 { 977 hammer_mount_t hmp; 978 int error; 979 int locked; 980 981 /* 982 * Acquire a ref, plus the buffer will be interlocked on the 983 * 0->1 transition. 984 */ 985 locked = hammer_ref_interlock(&buffer->io.lock); 986 hmp = buffer->io.hmp; 987 988 /* 989 * At this point a biodone() will not touch the buffer other then 990 * incidental bits. However, lose_list can be modified via 991 * a biodone() interrupt. 992 * 993 * No longer loose. lose_list requires the io_token. 994 */ 995 if (buffer->io.mod_root == &hmp->lose_root) { 996 lwkt_gettoken(&hmp->io_token); 997 if (buffer->io.mod_root == &hmp->lose_root) { 998 RB_REMOVE(hammer_mod_rb_tree, 999 buffer->io.mod_root, &buffer->io); 1000 buffer->io.mod_root = NULL; 1001 } 1002 lwkt_reltoken(&hmp->io_token); 1003 } 1004 1005 if (locked) { 1006 atomic_add_int(&hammer_count_refedbufs, 1); 1007 error = hammer_load_buffer(buffer, 0); 1008 /* NOTE: on error the buffer pointer is stale */ 1009 } else { 1010 error = 0; 1011 } 1012 return(error); 1013 } 1014 1015 /* 1016 * Release a reference on the buffer. On the 1->0 transition the 1017 * underlying IO will be released but the data reference is left 1018 * cached. 1019 * 1020 * Only destroy the structure itself if the related buffer cache buffer 1021 * was disassociated from it. This ties the management of the structure 1022 * to the buffer cache subsystem. buffer->ondisk determines whether the 1023 * embedded io is referenced or not. 1024 */ 1025 void 1026 hammer_rel_buffer(hammer_buffer_t buffer, int locked) 1027 { 1028 hammer_volume_t volume; 1029 hammer_mount_t hmp; 1030 struct buf *bp = NULL; 1031 int freeme = 0; 1032 1033 hmp = buffer->io.hmp; 1034 1035 if (hammer_rel_interlock(&buffer->io.lock, locked) == 0) 1036 return; 1037 1038 /* 1039 * hammer_count_refedbufs accounting. Decrement if we are in 1040 * the error path or if CHECK is clear. 1041 * 1042 * If we are not in the error path and CHECK is set the caller 1043 * probably just did a hammer_ref() and didn't account for it, 1044 * so we don't account for the loss here. 1045 */ 1046 if (locked || (buffer->io.lock.refs & HAMMER_REFS_CHECK) == 0) 1047 atomic_add_int(&hammer_count_refedbufs, -1); 1048 1049 /* 1050 * If the caller locked us or the normal released transitions 1051 * from 1->0 (and acquired the lock) attempt to release the 1052 * io. If the called locked us we tell hammer_io_release() 1053 * to flush (which would be the unload or failure path). 1054 */ 1055 bp = hammer_io_release(&buffer->io, locked); 1056 1057 /* 1058 * If the buffer has no bp association and no refs we can destroy 1059 * it. 1060 * 1061 * NOTE: It is impossible for any associated B-Tree nodes to have 1062 * refs if the buffer has no additional refs. 1063 */ 1064 if (buffer->io.bp == NULL && hammer_norefs(&buffer->io.lock)) { 1065 RB_REMOVE(hammer_buf_rb_tree, 1066 &buffer->io.hmp->rb_bufs_root, 1067 buffer); 1068 volume = buffer->io.volume; 1069 buffer->io.volume = NULL; /* sanity */ 1070 hammer_rel_volume(volume, 0); 1071 hammer_io_clear_modlist(&buffer->io); 1072 hammer_flush_buffer_nodes(buffer); 1073 KKASSERT(TAILQ_EMPTY(&buffer->clist)); 1074 freeme = 1; 1075 } 1076 1077 /* 1078 * Cleanup 1079 */ 1080 hammer_rel_interlock_done(&buffer->io.lock, locked); 1081 if (bp) 1082 brelse(bp); 1083 if (freeme) { 1084 --hammer_count_buffers; 1085 kfree(buffer, hmp->m_misc); 1086 } 1087 } 1088 1089 /* 1090 * Access the filesystem buffer containing the specified hammer offset. 1091 * buf_offset is a conglomeration of the volume number and vol_buf_beg 1092 * relative buffer offset. It must also have bit 55 set to be valid. 1093 * (see hammer_off_t in hammer_disk.h). 1094 * 1095 * Any prior buffer in *bufferp will be released and replaced by the 1096 * requested buffer. 1097 * 1098 * NOTE: The buffer is indexed via its zoneX_offset but we allow the 1099 * passed cached *bufferp to match against either zoneX or zone2. 1100 */ 1101 static __inline 1102 void * 1103 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1104 int *errorp, struct hammer_buffer **bufferp) 1105 { 1106 hammer_buffer_t buffer; 1107 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 1108 1109 buf_offset &= ~HAMMER_BUFMASK64; 1110 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0); 1111 1112 buffer = *bufferp; 1113 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 1114 buffer->zoneX_offset != buf_offset)) { 1115 if (buffer) 1116 hammer_rel_buffer(buffer, 0); 1117 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp); 1118 *bufferp = buffer; 1119 } else { 1120 *errorp = 0; 1121 } 1122 1123 /* 1124 * Return a pointer to the buffer data. 1125 */ 1126 if (buffer == NULL) 1127 return(NULL); 1128 else 1129 return((char *)buffer->ondisk + xoff); 1130 } 1131 1132 void * 1133 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, 1134 int *errorp, struct hammer_buffer **bufferp) 1135 { 1136 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp)); 1137 } 1138 1139 void * 1140 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1141 int *errorp, struct hammer_buffer **bufferp) 1142 { 1143 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 1144 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp)); 1145 } 1146 1147 /* 1148 * Access the filesystem buffer containing the specified hammer offset. 1149 * No disk read operation occurs. The result buffer may contain garbage. 1150 * 1151 * Any prior buffer in *bufferp will be released and replaced by the 1152 * requested buffer. 1153 * 1154 * This function marks the buffer dirty but does not increment its 1155 * modify_refs count. 1156 */ 1157 static __inline 1158 void * 1159 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1160 int *errorp, struct hammer_buffer **bufferp) 1161 { 1162 hammer_buffer_t buffer; 1163 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 1164 1165 buf_offset &= ~HAMMER_BUFMASK64; 1166 1167 buffer = *bufferp; 1168 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 1169 buffer->zoneX_offset != buf_offset)) { 1170 if (buffer) 1171 hammer_rel_buffer(buffer, 0); 1172 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp); 1173 *bufferp = buffer; 1174 } else { 1175 *errorp = 0; 1176 } 1177 1178 /* 1179 * Return a pointer to the buffer data. 1180 */ 1181 if (buffer == NULL) 1182 return(NULL); 1183 else 1184 return((char *)buffer->ondisk + xoff); 1185 } 1186 1187 void * 1188 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, 1189 int *errorp, struct hammer_buffer **bufferp) 1190 { 1191 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp)); 1192 } 1193 1194 void * 1195 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1196 int *errorp, struct hammer_buffer **bufferp) 1197 { 1198 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 1199 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp)); 1200 } 1201 1202 /************************************************************************ 1203 * NODES * 1204 ************************************************************************ 1205 * 1206 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing 1207 * method used by the HAMMER filesystem. 1208 * 1209 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY 1210 * associated with its buffer, and will only referenced the buffer while 1211 * the node itself is referenced. 1212 * 1213 * A hammer_node can also be passively associated with other HAMMER 1214 * structures, such as inodes, while retaining 0 references. These 1215 * associations can be cleared backwards using a pointer-to-pointer in 1216 * the hammer_node. 1217 * 1218 * This allows the HAMMER implementation to cache hammer_nodes long-term 1219 * and short-cut a great deal of the infrastructure's complexity. In 1220 * most cases a cached node can be reacquired without having to dip into 1221 * either the buffer or cluster management code. 1222 * 1223 * The caller must pass a referenced cluster on call and will retain 1224 * ownership of the reference on return. The node will acquire its own 1225 * additional references, if necessary. 1226 */ 1227 hammer_node_t 1228 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset, 1229 int isnew, int *errorp) 1230 { 1231 hammer_mount_t hmp = trans->hmp; 1232 hammer_node_t node; 1233 int doload; 1234 1235 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE); 1236 1237 /* 1238 * Locate the structure, allocating one if necessary. 1239 */ 1240 again: 1241 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset); 1242 if (node == NULL) { 1243 ++hammer_count_nodes; 1244 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE); 1245 node->node_offset = node_offset; 1246 node->hmp = hmp; 1247 TAILQ_INIT(&node->cursor_list); 1248 TAILQ_INIT(&node->cache_list); 1249 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) { 1250 --hammer_count_nodes; 1251 kfree(node, hmp->m_misc); 1252 goto again; 1253 } 1254 doload = hammer_ref_interlock_true(&node->lock); 1255 } else { 1256 doload = hammer_ref_interlock(&node->lock); 1257 } 1258 if (doload) { 1259 *errorp = hammer_load_node(trans, node, isnew); 1260 trans->flags |= HAMMER_TRANSF_DIDIO; 1261 if (*errorp) 1262 node = NULL; 1263 } else { 1264 KKASSERT(node->ondisk); 1265 *errorp = 0; 1266 hammer_io_advance(&node->buffer->io); 1267 } 1268 return(node); 1269 } 1270 1271 /* 1272 * Reference an already-referenced node. 0->1 transitions should assert 1273 * so we do not have to deal with hammer_ref() setting CHECK. 1274 */ 1275 void 1276 hammer_ref_node(hammer_node_t node) 1277 { 1278 KKASSERT(hammer_isactive(&node->lock) && node->ondisk != NULL); 1279 hammer_ref(&node->lock); 1280 } 1281 1282 /* 1283 * Load a node's on-disk data reference. Called with the node referenced 1284 * and interlocked. 1285 * 1286 * On return the node interlock will be unlocked. If a non-zero error code 1287 * is returned the node will also be dereferenced (and the caller's pointer 1288 * will be stale). 1289 */ 1290 static int 1291 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew) 1292 { 1293 hammer_buffer_t buffer; 1294 hammer_off_t buf_offset; 1295 int error; 1296 1297 error = 0; 1298 if (node->ondisk == NULL) { 1299 /* 1300 * This is a little confusing but the jist is that 1301 * node->buffer determines whether the node is on 1302 * the buffer's clist and node->ondisk determines 1303 * whether the buffer is referenced. 1304 * 1305 * We could be racing a buffer release, in which case 1306 * node->buffer may become NULL while we are blocked 1307 * referencing the buffer. 1308 */ 1309 if ((buffer = node->buffer) != NULL) { 1310 error = hammer_ref_buffer(buffer); 1311 if (error == 0 && node->buffer == NULL) { 1312 TAILQ_INSERT_TAIL(&buffer->clist, 1313 node, entry); 1314 node->buffer = buffer; 1315 } 1316 } else { 1317 buf_offset = node->node_offset & ~HAMMER_BUFMASK64; 1318 buffer = hammer_get_buffer(node->hmp, buf_offset, 1319 HAMMER_BUFSIZE, 0, &error); 1320 if (buffer) { 1321 KKASSERT(error == 0); 1322 TAILQ_INSERT_TAIL(&buffer->clist, 1323 node, entry); 1324 node->buffer = buffer; 1325 } 1326 } 1327 if (error) 1328 goto failed; 1329 node->ondisk = (void *)((char *)buffer->ondisk + 1330 (node->node_offset & HAMMER_BUFMASK)); 1331 1332 /* 1333 * Check CRC. NOTE: Neither flag is set and the CRC is not 1334 * generated on new B-Tree nodes. 1335 */ 1336 if (isnew == 0 && 1337 (node->flags & HAMMER_NODE_CRCANY) == 0) { 1338 if (hammer_crc_test_btree(node->ondisk) == 0) { 1339 if (hammer_debug_critical) 1340 Debugger("CRC FAILED: B-TREE NODE"); 1341 node->flags |= HAMMER_NODE_CRCBAD; 1342 } else { 1343 node->flags |= HAMMER_NODE_CRCGOOD; 1344 } 1345 } 1346 } 1347 if (node->flags & HAMMER_NODE_CRCBAD) { 1348 if (trans->flags & HAMMER_TRANSF_CRCDOM) 1349 error = EDOM; 1350 else 1351 error = EIO; 1352 } 1353 failed: 1354 if (error) { 1355 _hammer_rel_node(node, 1); 1356 } else { 1357 hammer_ref_interlock_done(&node->lock); 1358 } 1359 return (error); 1360 } 1361 1362 /* 1363 * Safely reference a node, interlock against flushes via the IO subsystem. 1364 */ 1365 hammer_node_t 1366 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache, 1367 int *errorp) 1368 { 1369 hammer_node_t node; 1370 int doload; 1371 1372 node = cache->node; 1373 if (node != NULL) { 1374 doload = hammer_ref_interlock(&node->lock); 1375 if (doload) { 1376 *errorp = hammer_load_node(trans, node, 0); 1377 if (*errorp) 1378 node = NULL; 1379 } else { 1380 KKASSERT(node->ondisk); 1381 if (node->flags & HAMMER_NODE_CRCBAD) { 1382 if (trans->flags & HAMMER_TRANSF_CRCDOM) 1383 *errorp = EDOM; 1384 else 1385 *errorp = EIO; 1386 _hammer_rel_node(node, 0); 1387 node = NULL; 1388 } else { 1389 *errorp = 0; 1390 } 1391 } 1392 } else { 1393 *errorp = ENOENT; 1394 } 1395 return(node); 1396 } 1397 1398 /* 1399 * Release a hammer_node. On the last release the node dereferences 1400 * its underlying buffer and may or may not be destroyed. 1401 * 1402 * If locked is non-zero the passed node has been interlocked by the 1403 * caller and we are in the failure/unload path, otherwise it has not and 1404 * we are doing a normal release. 1405 * 1406 * This function will dispose of the interlock and the reference. 1407 * On return the node pointer is stale. 1408 */ 1409 void 1410 _hammer_rel_node(hammer_node_t node, int locked) 1411 { 1412 hammer_buffer_t buffer; 1413 1414 /* 1415 * Deref the node. If this isn't the 1->0 transition we're basically 1416 * done. If locked is non-zero this function will just deref the 1417 * locked node and return TRUE, otherwise it will deref the locked 1418 * node and either lock and return TRUE on the 1->0 transition or 1419 * not lock and return FALSE. 1420 */ 1421 if (hammer_rel_interlock(&node->lock, locked) == 0) 1422 return; 1423 1424 /* 1425 * Either locked was non-zero and we are interlocked, or the 1426 * hammer_rel_interlock() call returned non-zero and we are 1427 * interlocked. 1428 * 1429 * The ref-count must still be decremented if locked != 0 so 1430 * the cleanup required still varies a bit. 1431 * 1432 * hammer_flush_node() when called with 1 or 2 will dispose of 1433 * the lock and possible ref-count. 1434 */ 1435 if (node->ondisk == NULL) { 1436 hammer_flush_node(node, locked + 1); 1437 /* node is stale now */ 1438 return; 1439 } 1440 1441 /* 1442 * Do not disassociate the node from the buffer if it represents 1443 * a modified B-Tree node that still needs its crc to be generated. 1444 */ 1445 if (node->flags & HAMMER_NODE_NEEDSCRC) { 1446 hammer_rel_interlock_done(&node->lock, locked); 1447 return; 1448 } 1449 1450 /* 1451 * Do final cleanups and then either destroy the node and leave it 1452 * passively cached. The buffer reference is removed regardless. 1453 */ 1454 buffer = node->buffer; 1455 node->ondisk = NULL; 1456 1457 if ((node->flags & HAMMER_NODE_FLUSH) == 0) { 1458 /* 1459 * Normal release. 1460 */ 1461 hammer_rel_interlock_done(&node->lock, locked); 1462 } else { 1463 /* 1464 * Destroy the node. 1465 */ 1466 hammer_flush_node(node, locked + 1); 1467 /* node is stale */ 1468 1469 } 1470 hammer_rel_buffer(buffer, 0); 1471 } 1472 1473 void 1474 hammer_rel_node(hammer_node_t node) 1475 { 1476 _hammer_rel_node(node, 0); 1477 } 1478 1479 /* 1480 * Free space on-media associated with a B-Tree node. 1481 */ 1482 void 1483 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node) 1484 { 1485 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0); 1486 node->flags |= HAMMER_NODE_DELETED; 1487 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk)); 1488 } 1489 1490 /* 1491 * Passively cache a referenced hammer_node. The caller may release 1492 * the node on return. 1493 */ 1494 void 1495 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node) 1496 { 1497 /* 1498 * If the node doesn't exist, or is being deleted, don't cache it! 1499 * 1500 * The node can only ever be NULL in the I/O failure path. 1501 */ 1502 if (node == NULL || (node->flags & HAMMER_NODE_DELETED)) 1503 return; 1504 if (cache->node == node) 1505 return; 1506 while (cache->node) 1507 hammer_uncache_node(cache); 1508 if (node->flags & HAMMER_NODE_DELETED) 1509 return; 1510 cache->node = node; 1511 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry); 1512 } 1513 1514 void 1515 hammer_uncache_node(hammer_node_cache_t cache) 1516 { 1517 hammer_node_t node; 1518 1519 if ((node = cache->node) != NULL) { 1520 TAILQ_REMOVE(&node->cache_list, cache, entry); 1521 cache->node = NULL; 1522 if (TAILQ_EMPTY(&node->cache_list)) 1523 hammer_flush_node(node, 0); 1524 } 1525 } 1526 1527 /* 1528 * Remove a node's cache references and destroy the node if it has no 1529 * other references or backing store. 1530 * 1531 * locked == 0 Normal unlocked operation 1532 * locked == 1 Call hammer_rel_interlock_done(..., 0); 1533 * locked == 2 Call hammer_rel_interlock_done(..., 1); 1534 * 1535 * XXX for now this isn't even close to being MPSAFE so the refs check 1536 * is sufficient. 1537 */ 1538 void 1539 hammer_flush_node(hammer_node_t node, int locked) 1540 { 1541 hammer_node_cache_t cache; 1542 hammer_buffer_t buffer; 1543 hammer_mount_t hmp = node->hmp; 1544 int dofree; 1545 1546 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) { 1547 TAILQ_REMOVE(&node->cache_list, cache, entry); 1548 cache->node = NULL; 1549 } 1550 1551 /* 1552 * NOTE: refs is predisposed if another thread is blocking and 1553 * will be larger than 0 in that case. We aren't MPSAFE 1554 * here. 1555 */ 1556 if (node->ondisk == NULL && hammer_norefs(&node->lock)) { 1557 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1558 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node); 1559 if ((buffer = node->buffer) != NULL) { 1560 node->buffer = NULL; 1561 TAILQ_REMOVE(&buffer->clist, node, entry); 1562 /* buffer is unreferenced because ondisk is NULL */ 1563 } 1564 dofree = 1; 1565 } else { 1566 dofree = 0; 1567 } 1568 1569 /* 1570 * Deal with the interlock if locked == 1 or locked == 2. 1571 */ 1572 if (locked) 1573 hammer_rel_interlock_done(&node->lock, locked - 1); 1574 1575 /* 1576 * Destroy if requested 1577 */ 1578 if (dofree) { 1579 --hammer_count_nodes; 1580 kfree(node, hmp->m_misc); 1581 } 1582 } 1583 1584 /* 1585 * Flush passively cached B-Tree nodes associated with this buffer. 1586 * This is only called when the buffer is about to be destroyed, so 1587 * none of the nodes should have any references. The buffer is locked. 1588 * 1589 * We may be interlocked with the buffer. 1590 */ 1591 void 1592 hammer_flush_buffer_nodes(hammer_buffer_t buffer) 1593 { 1594 hammer_node_t node; 1595 1596 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) { 1597 KKASSERT(node->ondisk == NULL); 1598 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1599 1600 if (hammer_try_interlock_norefs(&node->lock)) { 1601 hammer_ref(&node->lock); 1602 node->flags |= HAMMER_NODE_FLUSH; 1603 _hammer_rel_node(node, 1); 1604 } else { 1605 KKASSERT(node->buffer != NULL); 1606 buffer = node->buffer; 1607 node->buffer = NULL; 1608 TAILQ_REMOVE(&buffer->clist, node, entry); 1609 /* buffer is unreferenced because ondisk is NULL */ 1610 } 1611 } 1612 } 1613 1614 1615 /************************************************************************ 1616 * ALLOCATORS * 1617 ************************************************************************/ 1618 1619 /* 1620 * Allocate a B-Tree node. 1621 */ 1622 hammer_node_t 1623 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp) 1624 { 1625 hammer_buffer_t buffer = NULL; 1626 hammer_node_t node = NULL; 1627 hammer_off_t node_offset; 1628 1629 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX, 1630 sizeof(struct hammer_node_ondisk), 1631 hint, errorp); 1632 if (*errorp == 0) { 1633 node = hammer_get_node(trans, node_offset, 1, errorp); 1634 hammer_modify_node_noundo(trans, node); 1635 bzero(node->ondisk, sizeof(*node->ondisk)); 1636 hammer_modify_node_done(node); 1637 } 1638 if (buffer) 1639 hammer_rel_buffer(buffer, 0); 1640 return(node); 1641 } 1642 1643 /* 1644 * Allocate data. If the address of a data buffer is supplied then 1645 * any prior non-NULL *data_bufferp will be released and *data_bufferp 1646 * will be set to the related buffer. The caller must release it when 1647 * finally done. The initial *data_bufferp should be set to NULL by 1648 * the caller. 1649 * 1650 * The caller is responsible for making hammer_modify*() calls on the 1651 * *data_bufferp. 1652 */ 1653 void * 1654 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 1655 u_int16_t rec_type, hammer_off_t *data_offsetp, 1656 struct hammer_buffer **data_bufferp, 1657 hammer_off_t hint, int *errorp) 1658 { 1659 void *data; 1660 int zone; 1661 1662 /* 1663 * Allocate data 1664 */ 1665 if (data_len) { 1666 switch(rec_type) { 1667 case HAMMER_RECTYPE_INODE: 1668 case HAMMER_RECTYPE_DIRENTRY: 1669 case HAMMER_RECTYPE_EXT: 1670 case HAMMER_RECTYPE_FIX: 1671 case HAMMER_RECTYPE_PFS: 1672 case HAMMER_RECTYPE_SNAPSHOT: 1673 case HAMMER_RECTYPE_CONFIG: 1674 zone = HAMMER_ZONE_META_INDEX; 1675 break; 1676 case HAMMER_RECTYPE_DATA: 1677 case HAMMER_RECTYPE_DB: 1678 /* 1679 * This is an exceptional case. HAMMER usually 1680 * uses HAMMER_ZONE_LARGE_DATA when the data length 1681 * is >=HAMMER_BUFSIZE, but not 1/2 of that. Mirror 1682 * write code seems to be the only case that allocates 1683 * HAMMER_RECTYPE_DATA via this function. 1684 * 1685 * When data_len is >HAMMER_BUFSIZE/2 it uses 1686 * HAMMER_ZONE_LARGE_DATA but data_len is also rounded 1687 * up so it doesn't make much difference from the 1688 * normal way of using this zone. 1689 * 1690 * Also note hammer_vop_strategy_write() could have 1691 * rounded up storage allocation size of the original 1692 * mirror source to fs block size when it was written 1693 * if the file size was >HAMMER_BUFSIZE/2. 1694 */ 1695 if (data_len <= HAMMER_BUFSIZE / 2) { 1696 zone = HAMMER_ZONE_SMALL_DATA_INDEX; 1697 } else { 1698 data_len = (data_len + HAMMER_BUFMASK) & 1699 ~HAMMER_BUFMASK; 1700 zone = HAMMER_ZONE_LARGE_DATA_INDEX; 1701 } 1702 break; 1703 default: 1704 panic("hammer_alloc_data: rec_type %04x unknown", 1705 rec_type); 1706 zone = 0; /* NOT REACHED */ 1707 break; 1708 } 1709 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len, 1710 hint, errorp); 1711 } else { 1712 *data_offsetp = 0; 1713 } 1714 if (*errorp == 0 && data_bufferp) { 1715 if (data_len) { 1716 data = hammer_bread_ext(trans->hmp, *data_offsetp, 1717 data_len, errorp, data_bufferp); 1718 } else { 1719 data = NULL; 1720 } 1721 } else { 1722 data = NULL; 1723 } 1724 return(data); 1725 } 1726 1727 /* 1728 * Sync dirty buffers to the media and clean-up any loose ends. 1729 * 1730 * These functions do not start the flusher going, they simply 1731 * queue everything up to the flusher. 1732 */ 1733 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data); 1734 1735 int 1736 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor) 1737 { 1738 struct hammer_sync_info info; 1739 1740 info.error = 0; 1741 info.waitfor = waitfor; 1742 if (waitfor == MNT_WAIT) { 1743 vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS, 1744 hammer_sync_scan2, &info); 1745 } else { 1746 vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS | VMSC_NOWAIT, 1747 hammer_sync_scan2, &info); 1748 } 1749 return(info.error); 1750 } 1751 1752 /* 1753 * Filesystem sync. If doing a synchronous sync make a second pass on 1754 * the vnodes in case any were already flushing during the first pass, 1755 * and activate the flusher twice (the second time brings the UNDO FIFO's 1756 * start position up to the end position after the first call). 1757 * 1758 * If doing a lazy sync make just one pass on the vnode list, ignoring 1759 * any new vnodes added to the list while the sync is in progress. 1760 */ 1761 int 1762 hammer_sync_hmp(hammer_mount_t hmp, int waitfor) 1763 { 1764 struct hammer_sync_info info; 1765 int flags; 1766 1767 flags = VMSC_GETVP; 1768 if (waitfor & MNT_LAZY) 1769 flags |= VMSC_ONEPASS; 1770 1771 info.error = 0; 1772 info.waitfor = MNT_NOWAIT; 1773 vsyncscan(hmp->mp, flags | VMSC_NOWAIT, hammer_sync_scan2, &info); 1774 1775 if (info.error == 0 && (waitfor & MNT_WAIT)) { 1776 info.waitfor = waitfor; 1777 vsyncscan(hmp->mp, flags, hammer_sync_scan2, &info); 1778 } 1779 if (waitfor == MNT_WAIT) { 1780 hammer_flusher_sync(hmp); 1781 hammer_flusher_sync(hmp); 1782 } else { 1783 hammer_flusher_async(hmp, NULL); 1784 hammer_flusher_async(hmp, NULL); 1785 } 1786 return(info.error); 1787 } 1788 1789 static int 1790 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data) 1791 { 1792 struct hammer_sync_info *info = data; 1793 struct hammer_inode *ip; 1794 int error; 1795 1796 ip = VTOI(vp); 1797 if (ip == NULL) 1798 return(0); 1799 if (vp->v_type == VNON || vp->v_type == VBAD) { 1800 vclrisdirty(vp); 1801 return(0); 1802 } 1803 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 1804 RB_EMPTY(&vp->v_rbdirty_tree)) { 1805 vclrisdirty(vp); 1806 return(0); 1807 } 1808 error = VOP_FSYNC(vp, MNT_NOWAIT, 0); 1809 if (error) 1810 info->error = error; 1811 return(0); 1812 } 1813