1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $ 35 */ 36 /* 37 * Manage HAMMER's on-disk structures. These routines are primarily 38 * responsible for interfacing with the kernel's I/O subsystem and for 39 * managing in-memory structures. 40 */ 41 42 #include <sys/nlookup.h> 43 #include <sys/buf2.h> 44 45 #include "hammer.h" 46 47 static void hammer_free_volume(hammer_volume_t volume); 48 static int hammer_load_volume(hammer_volume_t volume); 49 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew); 50 static int hammer_load_node(hammer_transaction_t trans, 51 hammer_node_t node, int isnew); 52 static void _hammer_rel_node(hammer_node_t node, int locked); 53 54 static int 55 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2) 56 { 57 if (vol1->vol_no < vol2->vol_no) 58 return(-1); 59 if (vol1->vol_no > vol2->vol_no) 60 return(1); 61 return(0); 62 } 63 64 /* 65 * hammer_buffer structures are indexed via their zoneX_offset, not 66 * their zone2_offset. 67 */ 68 static int 69 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2) 70 { 71 if (buf1->zoneX_offset < buf2->zoneX_offset) 72 return(-1); 73 if (buf1->zoneX_offset > buf2->zoneX_offset) 74 return(1); 75 return(0); 76 } 77 78 static int 79 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2) 80 { 81 if (node1->node_offset < node2->node_offset) 82 return(-1); 83 if (node1->node_offset > node2->node_offset) 84 return(1); 85 return(0); 86 } 87 88 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node, 89 hammer_vol_rb_compare, int32_t, vol_no); 90 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node, 91 hammer_buf_rb_compare, hammer_off_t, zoneX_offset); 92 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node, 93 hammer_nod_rb_compare, hammer_off_t, node_offset); 94 95 /************************************************************************ 96 * VOLUMES * 97 ************************************************************************ 98 * 99 * Load a HAMMER volume by name. Returns 0 on success or a positive error 100 * code on failure. Volumes must be loaded at mount time or via hammer 101 * volume-add command, hammer_get_volume() will not load a new volume. 102 * 103 * The passed devvp is vref()'d but not locked. This function consumes the 104 * ref (typically by associating it with the volume structure). 105 * 106 * Calls made to hammer_load_volume() or single-threaded 107 */ 108 int 109 hammer_install_volume(hammer_mount_t hmp, const char *volname, 110 struct vnode *devvp, void *data) 111 { 112 struct mount *mp; 113 hammer_volume_t volume; 114 hammer_volume_ondisk_t ondisk; 115 hammer_volume_ondisk_t img; 116 struct nlookupdata nd; 117 struct buf *bp = NULL; 118 int error; 119 int ronly; 120 int setmp = 0; 121 int i; 122 123 mp = hmp->mp; 124 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 125 126 /* 127 * Allocate a volume structure 128 */ 129 ++hammer_count_volumes; 130 volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO); 131 volume->vol_name = kstrdup(volname, hmp->m_misc); 132 volume->io.hmp = hmp; /* bootstrap */ 133 hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME); 134 volume->io.offset = 0LL; 135 volume->io.bytes = HAMMER_BUFSIZE; 136 137 /* 138 * Get the device vnode 139 */ 140 if (devvp == NULL) { 141 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW); 142 if (error == 0) 143 error = nlookup(&nd); 144 if (error == 0) 145 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp); 146 nlookup_done(&nd); 147 } else { 148 error = 0; 149 volume->devvp = devvp; 150 } 151 152 if (error == 0) { 153 if (vn_isdisk(volume->devvp, &error)) { 154 error = vfs_mountedon(volume->devvp); 155 } 156 } 157 if (error == 0 && vcount(volume->devvp) > 0) 158 error = EBUSY; 159 if (error == 0) { 160 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 161 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0); 162 if (error == 0) { 163 error = VOP_OPEN(volume->devvp, 164 (ronly ? FREAD : FREAD|FWRITE), 165 FSCRED, NULL); 166 } 167 vn_unlock(volume->devvp); 168 } 169 if (error) { 170 hammer_free_volume(volume); 171 return(error); 172 } 173 volume->devvp->v_rdev->si_mountpoint = mp; 174 setmp = 1; 175 176 /* 177 * Extract the volume number from the volume header and do various 178 * sanity checks. 179 */ 180 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp); 181 if (error) 182 goto late_failure; 183 ondisk = (void *)bp->b_data; 184 185 /* 186 * Initialize the volume header with data if the data is specified. 187 */ 188 if (ronly == 0 && data) { 189 img = (hammer_volume_ondisk_t)data; 190 if (ondisk->vol_signature == HAMMER_FSBUF_VOLUME) { 191 hkprintf("Formatting of valid HAMMER volume " 192 "%s denied. Erase with dd!\n", volname); 193 error = EFTYPE; 194 goto late_failure; 195 } 196 bcopy(img, ondisk, sizeof(*img)); 197 } 198 199 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) { 200 hkprintf("volume %s has an invalid header\n", volume->vol_name); 201 for (i = 0; i < (int)sizeof(ondisk->vol_signature); i++) { 202 kprintf("%02x", ((char*)&ondisk->vol_signature)[i] & 0xFF); 203 if (i != (int)sizeof(ondisk->vol_signature) - 1) 204 kprintf(" "); 205 } 206 kprintf("\n"); 207 error = EFTYPE; 208 goto late_failure; 209 } 210 volume->vol_no = ondisk->vol_no; 211 volume->vol_flags = ondisk->vol_flags; 212 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 213 HAMMER_VOL_BUF_SIZE(ondisk)); 214 215 if (RB_EMPTY(&hmp->rb_vols_root)) { 216 hmp->fsid = ondisk->vol_fsid; 217 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) { 218 hkprintf("volume %s's fsid does not match other volumes\n", 219 volume->vol_name); 220 error = EFTYPE; 221 goto late_failure; 222 } 223 224 /* 225 * Insert the volume structure into the red-black tree. 226 */ 227 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) { 228 hkprintf("volume %s has a duplicate vol_no %d\n", 229 volume->vol_name, volume->vol_no); 230 error = EEXIST; 231 } 232 233 if (error == 0) 234 hammer_volume_number_add(hmp, volume); 235 236 /* 237 * Set the root volume . HAMMER special cases rootvol the structure. 238 * We do not hold a ref because this would prevent related I/O 239 * from being flushed. 240 */ 241 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) { 242 hmp->rootvol = volume; 243 hmp->nvolumes = ondisk->vol_count; 244 if (bp) { 245 brelse(bp); 246 bp = NULL; 247 } 248 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks * 249 HAMMER_BUFFERS_PER_BIGBLOCK; 250 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks * 251 HAMMER_BUFFERS_PER_BIGBLOCK; 252 } 253 late_failure: 254 if (bp) 255 brelse(bp); 256 if (error) { 257 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/ 258 if (setmp) 259 volume->devvp->v_rdev->si_mountpoint = NULL; 260 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 261 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE, NULL); 262 vn_unlock(volume->devvp); 263 hammer_free_volume(volume); 264 } 265 return (error); 266 } 267 268 /* 269 * This is called for each volume when updating the mount point from 270 * read-write to read-only or vise-versa. 271 */ 272 int 273 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused) 274 { 275 if (volume->devvp) { 276 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 277 if (volume->io.hmp->ronly) { 278 /* do not call vinvalbuf */ 279 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL); 280 VOP_CLOSE(volume->devvp, FREAD|FWRITE, NULL); 281 } else { 282 /* do not call vinvalbuf */ 283 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL); 284 VOP_CLOSE(volume->devvp, FREAD, NULL); 285 } 286 vn_unlock(volume->devvp); 287 } 288 return(0); 289 } 290 291 /* 292 * Unload and free a HAMMER volume. Must return >= 0 to continue scan 293 * so returns -1 on failure. 294 */ 295 int 296 hammer_unload_volume(hammer_volume_t volume, void *data) 297 { 298 hammer_mount_t hmp = volume->io.hmp; 299 struct buf *bp = NULL; 300 hammer_volume_ondisk_t img; 301 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 302 int error; 303 304 /* 305 * Clear the volume header with data if the data is specified. 306 */ 307 if (ronly == 0 && data && volume->devvp) { 308 img = (hammer_volume_ondisk_t)data; 309 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp); 310 if (error || bp->b_bcount < sizeof(*img)) { 311 hmkprintf(hmp, "Failed to read volume header: %d\n", error); 312 brelse(bp); 313 } else { 314 bcopy(img, bp->b_data, sizeof(*img)); 315 error = bwrite(bp); 316 if (error) 317 hmkprintf(hmp, "Failed to clear volume header: %d\n", 318 error); 319 } 320 } 321 322 /* 323 * Clean up the root volume pointer, which is held unlocked in hmp. 324 */ 325 if (hmp->rootvol == volume) 326 hmp->rootvol = NULL; 327 328 /* 329 * We must not flush a dirty buffer to disk on umount. It should 330 * have already been dealt with by the flusher, or we may be in 331 * catastrophic failure. 332 */ 333 hammer_io_clear_modify(&volume->io, 1); 334 volume->io.waitdep = 1; 335 336 /* 337 * Clean up the persistent ref ioerror might have on the volume 338 */ 339 if (volume->io.ioerror) 340 hammer_io_clear_error_noassert(&volume->io); 341 342 /* 343 * This should release the bp. Releasing the volume with flush set 344 * implies the interlock is set. 345 */ 346 hammer_ref_interlock_true(&volume->io.lock); 347 hammer_rel_volume(volume, 1); 348 KKASSERT(volume->io.bp == NULL); 349 350 /* 351 * There should be no references on the volume. 352 */ 353 KKASSERT(hammer_norefs(&volume->io.lock)); 354 355 volume->ondisk = NULL; 356 if (volume->devvp) { 357 if (volume->devvp->v_rdev && 358 volume->devvp->v_rdev->si_mountpoint == hmp->mp) { 359 volume->devvp->v_rdev->si_mountpoint = NULL; 360 } 361 if (ronly) { 362 /* 363 * Make sure we don't sync anything to disk if we 364 * are in read-only mode (1) or critically-errored 365 * (2). Note that there may be dirty buffers in 366 * normal read-only mode from crash recovery. 367 */ 368 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 369 vinvalbuf(volume->devvp, 0, 0, 0); 370 VOP_CLOSE(volume->devvp, FREAD, NULL); 371 vn_unlock(volume->devvp); 372 } else { 373 /* 374 * Normal termination, save any dirty buffers 375 * (XXX there really shouldn't be any). 376 */ 377 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 378 vinvalbuf(volume->devvp, V_SAVE, 0, 0); 379 VOP_CLOSE(volume->devvp, FREAD|FWRITE, NULL); 380 vn_unlock(volume->devvp); 381 } 382 } 383 384 /* 385 * Destroy the structure 386 */ 387 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume); 388 hammer_volume_number_del(hmp, volume); 389 hammer_free_volume(volume); 390 return(0); 391 } 392 393 static 394 void 395 hammer_free_volume(hammer_volume_t volume) 396 { 397 hammer_mount_t hmp = volume->io.hmp; 398 399 if (volume->vol_name) { 400 kfree(volume->vol_name, hmp->m_misc); 401 volume->vol_name = NULL; 402 } 403 if (volume->devvp) { 404 vrele(volume->devvp); 405 volume->devvp = NULL; 406 } 407 --hammer_count_volumes; 408 kfree(volume, hmp->m_misc); 409 } 410 411 /* 412 * Get a HAMMER volume. The volume must already exist. 413 */ 414 hammer_volume_t 415 hammer_get_volume(hammer_mount_t hmp, int32_t vol_no, int *errorp) 416 { 417 hammer_volume_t volume; 418 419 /* 420 * Locate the volume structure 421 */ 422 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no); 423 if (volume == NULL) { 424 *errorp = ENOENT; 425 return(NULL); 426 } 427 428 /* 429 * Reference the volume, load/check the data on the 0->1 transition. 430 * hammer_load_volume() will dispose of the interlock on return, 431 * and also clean up the ref count on error. 432 */ 433 if (hammer_ref_interlock(&volume->io.lock)) { 434 *errorp = hammer_load_volume(volume); 435 if (*errorp) 436 volume = NULL; 437 } else { 438 KKASSERT(volume->ondisk); 439 *errorp = 0; 440 } 441 return(volume); 442 } 443 444 int 445 hammer_ref_volume(hammer_volume_t volume) 446 { 447 int error; 448 449 /* 450 * Reference the volume and deal with the check condition used to 451 * load its ondisk info. 452 */ 453 if (hammer_ref_interlock(&volume->io.lock)) { 454 error = hammer_load_volume(volume); 455 } else { 456 KKASSERT(volume->ondisk); 457 error = 0; 458 } 459 return (error); 460 } 461 462 /* 463 * May be called without fs_token 464 */ 465 hammer_volume_t 466 hammer_get_root_volume(hammer_mount_t hmp, int *errorp) 467 { 468 hammer_volume_t volume; 469 470 volume = hmp->rootvol; 471 KKASSERT(volume != NULL); 472 473 /* 474 * Reference the volume and deal with the check condition used to 475 * load its ondisk info. 476 */ 477 if (hammer_ref_interlock(&volume->io.lock)) { 478 lwkt_gettoken(&volume->io.hmp->fs_token); 479 *errorp = hammer_load_volume(volume); 480 lwkt_reltoken(&volume->io.hmp->fs_token); 481 if (*errorp) 482 volume = NULL; 483 } else { 484 KKASSERT(volume->ondisk); 485 *errorp = 0; 486 } 487 return (volume); 488 } 489 490 /* 491 * Load a volume's on-disk information. The volume must be referenced and 492 * the interlock is held on call. The interlock will be released on return. 493 * The reference will also be released on return if an error occurs. 494 */ 495 static int 496 hammer_load_volume(hammer_volume_t volume) 497 { 498 int error; 499 500 if (volume->ondisk == NULL) { 501 error = hammer_io_read(volume->devvp, &volume->io, 502 HAMMER_BUFSIZE); 503 if (error == 0) { 504 volume->ondisk = (void *)volume->io.bp->b_data; 505 hammer_ref_interlock_done(&volume->io.lock); 506 } else { 507 hammer_rel_volume(volume, 1); 508 } 509 } else { 510 error = 0; 511 } 512 return(error); 513 } 514 515 /* 516 * Release a previously acquired reference on the volume. 517 * 518 * Volumes are not unloaded from memory during normal operation. 519 * 520 * May be called without fs_token 521 */ 522 void 523 hammer_rel_volume(hammer_volume_t volume, int locked) 524 { 525 struct buf *bp; 526 527 if (hammer_rel_interlock(&volume->io.lock, locked)) { 528 lwkt_gettoken(&volume->io.hmp->fs_token); 529 volume->ondisk = NULL; 530 bp = hammer_io_release(&volume->io, locked); 531 lwkt_reltoken(&volume->io.hmp->fs_token); 532 hammer_rel_interlock_done(&volume->io.lock, locked); 533 if (bp) 534 brelse(bp); 535 } 536 } 537 538 int 539 hammer_mountcheck_volumes(hammer_mount_t hmp) 540 { 541 hammer_volume_t vol; 542 int i; 543 544 HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) { 545 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i); 546 if (vol == NULL) 547 return(EINVAL); 548 } 549 return(0); 550 } 551 552 int 553 hammer_get_installed_volumes(hammer_mount_t hmp) 554 { 555 int i, ret = 0; 556 557 HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) 558 ret++; 559 return(ret); 560 } 561 562 /************************************************************************ 563 * BUFFERS * 564 ************************************************************************ 565 * 566 * Manage buffers. Currently most blockmap-backed zones are direct-mapped 567 * to zone-2 buffer offsets, without a translation stage. However, the 568 * hammer_buffer structure is indexed by its zoneX_offset, not its 569 * zone2_offset. 570 * 571 * The proper zone must be maintained throughout the code-base all the way 572 * through to the big-block allocator, or routines like hammer_del_buffers() 573 * will not be able to locate all potentially conflicting buffers. 574 */ 575 576 /* 577 * Helper function returns whether a zone offset can be directly translated 578 * to a raw buffer index or not. Really only the volume and undo zones 579 * can't be directly translated. Volumes are special-cased and undo zones 580 * shouldn't be aliased accessed in read-only mode. 581 * 582 * This function is ONLY used to detect aliased zones during a read-only 583 * mount. 584 */ 585 static __inline int 586 hammer_direct_zone(hammer_off_t buf_offset) 587 { 588 int zone = HAMMER_ZONE_DECODE(buf_offset); 589 590 return(hammer_is_direct_mapped_index(zone)); 591 } 592 593 hammer_buffer_t 594 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, 595 int bytes, int isnew, int *errorp) 596 { 597 hammer_buffer_t buffer; 598 hammer_volume_t volume; 599 hammer_off_t zone2_offset; 600 hammer_io_type_t iotype; 601 int vol_no; 602 int zone; 603 604 buf_offset &= ~HAMMER_BUFMASK64; 605 again: 606 /* 607 * Shortcut if the buffer is already cached 608 */ 609 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset); 610 if (buffer) { 611 /* 612 * Once refed the ondisk field will not be cleared by 613 * any other action. Shortcut the operation if the 614 * ondisk structure is valid. 615 */ 616 found_aliased: 617 if (hammer_ref_interlock(&buffer->io.lock) == 0) { 618 hammer_io_advance(&buffer->io); 619 KKASSERT(buffer->ondisk); 620 *errorp = 0; 621 return(buffer); 622 } 623 624 /* 625 * 0->1 transition or defered 0->1 transition (CHECK), 626 * interlock now held. Shortcut if ondisk is already 627 * assigned. 628 */ 629 atomic_add_int(&hammer_count_refedbufs, 1); 630 if (buffer->ondisk) { 631 hammer_io_advance(&buffer->io); 632 hammer_ref_interlock_done(&buffer->io.lock); 633 *errorp = 0; 634 return(buffer); 635 } 636 637 /* 638 * The buffer is no longer loose if it has a ref, and 639 * cannot become loose once it gains a ref. Loose 640 * buffers will never be in a modified state. This should 641 * only occur on the 0->1 transition of refs. 642 * 643 * lose_root can be modified via a biodone() interrupt 644 * so the io_token must be held. 645 */ 646 if (buffer->io.mod_root == &hmp->lose_root) { 647 lwkt_gettoken(&hmp->io_token); 648 if (buffer->io.mod_root == &hmp->lose_root) { 649 RB_REMOVE(hammer_mod_rb_tree, 650 buffer->io.mod_root, &buffer->io); 651 buffer->io.mod_root = NULL; 652 KKASSERT(buffer->io.modified == 0); 653 } 654 lwkt_reltoken(&hmp->io_token); 655 } 656 goto found; 657 } else if (hmp->ronly && hammer_direct_zone(buf_offset)) { 658 /* 659 * If this is a read-only mount there could be an alias 660 * in the raw-zone. If there is we use that buffer instead. 661 * 662 * rw mounts will not have aliases. Also note when going 663 * from ro -> rw the recovered raw buffers are flushed and 664 * reclaimed, so again there will not be any aliases once 665 * the mount is rw. 666 */ 667 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 668 hammer_xlate_to_zone2(buf_offset)); 669 if (buffer) { 670 if (hammer_debug_general & 0x0001) { 671 hkrateprintf(&hmp->kdiag, 672 "recovered aliased %016jx\n", 673 (intmax_t)buf_offset); 674 } 675 goto found_aliased; 676 } 677 } 678 679 /* 680 * What is the buffer class? 681 */ 682 zone = HAMMER_ZONE_DECODE(buf_offset); 683 684 switch(zone) { 685 case HAMMER_ZONE_LARGE_DATA_INDEX: 686 case HAMMER_ZONE_SMALL_DATA_INDEX: 687 iotype = HAMMER_STRUCTURE_DATA_BUFFER; 688 break; 689 case HAMMER_ZONE_UNDO_INDEX: 690 iotype = HAMMER_STRUCTURE_UNDO_BUFFER; 691 break; 692 case HAMMER_ZONE_META_INDEX: 693 default: 694 /* 695 * NOTE: inode data and directory entries are placed in this 696 * zone. inode atime/mtime is updated in-place and thus 697 * buffers containing inodes must be synchronized as 698 * meta-buffers, same as buffers containing B-Tree info. 699 */ 700 iotype = HAMMER_STRUCTURE_META_BUFFER; 701 break; 702 } 703 704 /* 705 * Handle blockmap offset translations 706 */ 707 if (hammer_is_zone2_mapped_index(zone)) { 708 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp); 709 } else if (zone == HAMMER_ZONE_UNDO_INDEX) { 710 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp); 711 } else { 712 /* Must be zone-2 (not 1 or 4 or 15) */ 713 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX); 714 zone2_offset = buf_offset; 715 *errorp = 0; 716 } 717 if (*errorp) 718 return(NULL); 719 720 /* 721 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset 722 * specifications. 723 */ 724 KKASSERT(hammer_is_zone_raw_buffer(zone2_offset)); 725 vol_no = HAMMER_VOL_DECODE(zone2_offset); 726 volume = hammer_get_volume(hmp, vol_no, errorp); 727 if (volume == NULL) 728 return(NULL); 729 730 KKASSERT(zone2_offset < volume->maxbuf_off); 731 732 /* 733 * Allocate a new buffer structure. We will check for races later. 734 */ 735 ++hammer_count_buffers; 736 buffer = kmalloc(sizeof(*buffer), hmp->m_misc, 737 M_WAITOK|M_ZERO|M_USE_RESERVE); 738 buffer->zone2_offset = zone2_offset; 739 buffer->zoneX_offset = buf_offset; 740 741 hammer_io_init(&buffer->io, volume, iotype); 742 buffer->io.offset = hammer_xlate_to_phys(volume->ondisk, zone2_offset); 743 buffer->io.bytes = bytes; 744 TAILQ_INIT(&buffer->node_list); 745 hammer_ref_interlock_true(&buffer->io.lock); 746 747 /* 748 * Insert the buffer into the RB tree and handle late collisions. 749 */ 750 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) { 751 hammer_rel_volume(volume, 0); 752 buffer->io.volume = NULL; /* safety */ 753 if (hammer_rel_interlock(&buffer->io.lock, 1)) /* safety */ 754 hammer_rel_interlock_done(&buffer->io.lock, 1); 755 --hammer_count_buffers; 756 kfree(buffer, hmp->m_misc); 757 goto again; 758 } 759 atomic_add_int(&hammer_count_refedbufs, 1); 760 found: 761 762 /* 763 * The buffer is referenced and interlocked. Load the buffer 764 * if necessary. hammer_load_buffer() deals with the interlock 765 * and, if an error is returned, also deals with the ref. 766 */ 767 if (buffer->ondisk == NULL) { 768 *errorp = hammer_load_buffer(buffer, isnew); 769 if (*errorp) 770 buffer = NULL; 771 } else { 772 hammer_io_advance(&buffer->io); 773 hammer_ref_interlock_done(&buffer->io.lock); 774 *errorp = 0; 775 } 776 return(buffer); 777 } 778 779 /* 780 * This is used by the direct-read code to deal with large-data buffers 781 * created by the reblocker and mirror-write code. The direct-read code 782 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write- 783 * running hammer buffers must be fully synced to disk before we can issue 784 * the direct-read. 785 * 786 * This code path is not considered critical as only the rebocker and 787 * mirror-write code will create large-data buffers via the HAMMER buffer 788 * subsystem. They do that because they operate at the B-Tree level and 789 * do not access the vnode/inode structures. 790 */ 791 void 792 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes) 793 { 794 hammer_buffer_t buffer; 795 int error; 796 797 KKASSERT(hammer_is_zone_large_data(base_offset)); 798 799 while (bytes > 0) { 800 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 801 base_offset); 802 if (buffer && (buffer->io.modified || buffer->io.running)) { 803 error = hammer_ref_buffer(buffer); 804 if (error == 0) { 805 hammer_io_wait(&buffer->io); 806 if (buffer->io.modified) { 807 hammer_io_write_interlock(&buffer->io); 808 hammer_io_flush(&buffer->io, 0); 809 hammer_io_done_interlock(&buffer->io); 810 hammer_io_wait(&buffer->io); 811 } 812 hammer_rel_buffer(buffer, 0); 813 } 814 } 815 base_offset += HAMMER_BUFSIZE; 816 bytes -= HAMMER_BUFSIZE; 817 } 818 } 819 820 /* 821 * Destroy all buffers covering the specified zoneX offset range. This 822 * is called when the related blockmap layer2 entry is freed or when 823 * a direct write bypasses our buffer/buffer-cache subsystem. 824 * 825 * The buffers may be referenced by the caller itself. Setting reclaim 826 * will cause the buffer to be destroyed when it's ref count reaches zero. 827 * 828 * Return 0 on success, EAGAIN if some buffers could not be destroyed due 829 * to additional references held by other threads, or some other (typically 830 * fatal) error. 831 */ 832 int 833 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset, 834 hammer_off_t zone2_offset, int bytes, 835 int report_conflicts) 836 { 837 hammer_buffer_t buffer; 838 hammer_volume_t volume; 839 int vol_no; 840 int error; 841 int ret_error; 842 843 vol_no = HAMMER_VOL_DECODE(zone2_offset); 844 volume = hammer_get_volume(hmp, vol_no, &ret_error); 845 KKASSERT(ret_error == 0); 846 847 while (bytes > 0) { 848 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 849 base_offset); 850 if (buffer) { 851 error = hammer_ref_buffer(buffer); 852 if (hammer_debug_general & 0x20000) { 853 hkprintf("delbufr %016jx rerr=%d 1ref=%d\n", 854 (intmax_t)buffer->zoneX_offset, 855 error, 856 hammer_oneref(&buffer->io.lock)); 857 } 858 if (error == 0 && !hammer_oneref(&buffer->io.lock)) { 859 error = EAGAIN; 860 hammer_rel_buffer(buffer, 0); 861 } 862 if (error == 0) { 863 KKASSERT(buffer->zone2_offset == zone2_offset); 864 hammer_io_clear_modify(&buffer->io, 1); 865 buffer->io.reclaim = 1; 866 buffer->io.waitdep = 1; 867 KKASSERT(buffer->io.volume == volume); 868 hammer_rel_buffer(buffer, 0); 869 } 870 } else { 871 error = hammer_io_inval(volume, zone2_offset); 872 } 873 if (error) { 874 ret_error = error; 875 if (report_conflicts || 876 (hammer_debug_general & 0x8000)) { 877 krateprintf(&hmp->kdiag, 878 "hammer_del_buffers: unable to " 879 "invalidate %016jx buffer=%p " 880 "rep=%d lkrefs=%08x\n", 881 (intmax_t)base_offset, 882 buffer, report_conflicts, 883 (buffer ? buffer->io.lock.refs : -1)); 884 } 885 } 886 base_offset += HAMMER_BUFSIZE; 887 zone2_offset += HAMMER_BUFSIZE; 888 bytes -= HAMMER_BUFSIZE; 889 } 890 hammer_rel_volume(volume, 0); 891 return (ret_error); 892 } 893 894 /* 895 * Given a referenced and interlocked buffer load/validate the data. 896 * 897 * The buffer interlock will be released on return. If an error is 898 * returned the buffer reference will also be released (and the buffer 899 * pointer will thus be stale). 900 */ 901 static int 902 hammer_load_buffer(hammer_buffer_t buffer, int isnew) 903 { 904 hammer_volume_t volume; 905 int error; 906 907 /* 908 * Load the buffer's on-disk info 909 */ 910 volume = buffer->io.volume; 911 912 if (hammer_debug_io & 0x0004) { 913 hdkprintf("load_buffer %016jx %016jx isnew=%d od=%p\n", 914 (intmax_t)buffer->zoneX_offset, 915 (intmax_t)buffer->zone2_offset, 916 isnew, buffer->ondisk); 917 } 918 919 if (buffer->ondisk == NULL) { 920 /* 921 * Issue the read or generate a new buffer. When reading 922 * the limit argument controls any read-ahead clustering 923 * hammer_io_read() is allowed to do. 924 * 925 * We cannot read-ahead in the large-data zone and we cannot 926 * cross a big-block boundary as the next big-block might 927 * use a different buffer size. 928 */ 929 if (isnew) { 930 error = hammer_io_new(volume->devvp, &buffer->io); 931 } else if (hammer_is_zone_large_data(buffer->zoneX_offset)) { 932 error = hammer_io_read(volume->devvp, &buffer->io, 933 buffer->io.bytes); 934 } else { 935 hammer_off_t limit; 936 937 limit = (buffer->zone2_offset + 938 HAMMER_BIGBLOCK_MASK64) & 939 ~HAMMER_BIGBLOCK_MASK64; 940 limit -= buffer->zone2_offset; 941 error = hammer_io_read(volume->devvp, &buffer->io, 942 limit); 943 } 944 if (error == 0) 945 buffer->ondisk = (void *)buffer->io.bp->b_data; 946 } else if (isnew) { 947 error = hammer_io_new(volume->devvp, &buffer->io); 948 } else { 949 error = 0; 950 } 951 if (error == 0) { 952 hammer_io_advance(&buffer->io); 953 hammer_ref_interlock_done(&buffer->io.lock); 954 } else { 955 hammer_rel_buffer(buffer, 1); 956 } 957 return (error); 958 } 959 960 /* 961 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue. 962 * This routine is only called during unmount or when a volume is 963 * removed. 964 * 965 * If data != NULL, it specifies a volume whoose buffers should 966 * be unloaded. 967 */ 968 int 969 hammer_unload_buffer(hammer_buffer_t buffer, void *data) 970 { 971 hammer_volume_t volume = (hammer_volume_t)data; 972 973 /* 974 * If volume != NULL we are only interested in unloading buffers 975 * associated with a particular volume. 976 */ 977 if (volume != NULL && volume != buffer->io.volume) 978 return 0; 979 980 /* 981 * Clean up the persistent ref ioerror might have on the buffer 982 * and acquire a ref. Expect a 0->1 transition. 983 */ 984 if (buffer->io.ioerror) { 985 hammer_io_clear_error_noassert(&buffer->io); 986 atomic_add_int(&hammer_count_refedbufs, -1); 987 } 988 hammer_ref_interlock_true(&buffer->io.lock); 989 atomic_add_int(&hammer_count_refedbufs, 1); 990 991 /* 992 * We must not flush a dirty buffer to disk on umount. It should 993 * have already been dealt with by the flusher, or we may be in 994 * catastrophic failure. 995 * 996 * We must set waitdep to ensure that a running buffer is waited 997 * on and released prior to us trying to unload the volume. 998 */ 999 hammer_io_clear_modify(&buffer->io, 1); 1000 hammer_flush_buffer_nodes(buffer); 1001 buffer->io.waitdep = 1; 1002 hammer_rel_buffer(buffer, 1); 1003 return(0); 1004 } 1005 1006 /* 1007 * Reference a buffer that is either already referenced or via a specially 1008 * handled pointer (aka cursor->buffer). 1009 */ 1010 int 1011 hammer_ref_buffer(hammer_buffer_t buffer) 1012 { 1013 hammer_mount_t hmp; 1014 int error; 1015 int locked; 1016 1017 /* 1018 * Acquire a ref, plus the buffer will be interlocked on the 1019 * 0->1 transition. 1020 */ 1021 locked = hammer_ref_interlock(&buffer->io.lock); 1022 hmp = buffer->io.hmp; 1023 1024 /* 1025 * At this point a biodone() will not touch the buffer other then 1026 * incidental bits. However, lose_root can be modified via 1027 * a biodone() interrupt. 1028 * 1029 * No longer loose. lose_root requires the io_token. 1030 */ 1031 if (buffer->io.mod_root == &hmp->lose_root) { 1032 lwkt_gettoken(&hmp->io_token); 1033 if (buffer->io.mod_root == &hmp->lose_root) { 1034 RB_REMOVE(hammer_mod_rb_tree, 1035 buffer->io.mod_root, &buffer->io); 1036 buffer->io.mod_root = NULL; 1037 } 1038 lwkt_reltoken(&hmp->io_token); 1039 } 1040 1041 if (locked) { 1042 atomic_add_int(&hammer_count_refedbufs, 1); 1043 error = hammer_load_buffer(buffer, 0); 1044 /* NOTE: on error the buffer pointer is stale */ 1045 } else { 1046 error = 0; 1047 } 1048 return(error); 1049 } 1050 1051 /* 1052 * Release a reference on the buffer. On the 1->0 transition the 1053 * underlying IO will be released but the data reference is left 1054 * cached. 1055 * 1056 * Only destroy the structure itself if the related buffer cache buffer 1057 * was disassociated from it. This ties the management of the structure 1058 * to the buffer cache subsystem. buffer->ondisk determines whether the 1059 * embedded io is referenced or not. 1060 */ 1061 void 1062 hammer_rel_buffer(hammer_buffer_t buffer, int locked) 1063 { 1064 hammer_volume_t volume; 1065 hammer_mount_t hmp; 1066 struct buf *bp = NULL; 1067 int freeme = 0; 1068 1069 hmp = buffer->io.hmp; 1070 1071 if (hammer_rel_interlock(&buffer->io.lock, locked) == 0) 1072 return; 1073 1074 /* 1075 * hammer_count_refedbufs accounting. Decrement if we are in 1076 * the error path or if CHECK is clear. 1077 * 1078 * If we are not in the error path and CHECK is set the caller 1079 * probably just did a hammer_ref() and didn't account for it, 1080 * so we don't account for the loss here. 1081 */ 1082 if (locked || (buffer->io.lock.refs & HAMMER_REFS_CHECK) == 0) 1083 atomic_add_int(&hammer_count_refedbufs, -1); 1084 1085 /* 1086 * If the caller locked us or the normal released transitions 1087 * from 1->0 (and acquired the lock) attempt to release the 1088 * io. If the called locked us we tell hammer_io_release() 1089 * to flush (which would be the unload or failure path). 1090 */ 1091 bp = hammer_io_release(&buffer->io, locked); 1092 1093 /* 1094 * If the buffer has no bp association and no refs we can destroy 1095 * it. 1096 * 1097 * NOTE: It is impossible for any associated B-Tree nodes to have 1098 * refs if the buffer has no additional refs. 1099 */ 1100 if (buffer->io.bp == NULL && hammer_norefs(&buffer->io.lock)) { 1101 RB_REMOVE(hammer_buf_rb_tree, 1102 &buffer->io.hmp->rb_bufs_root, 1103 buffer); 1104 volume = buffer->io.volume; 1105 buffer->io.volume = NULL; /* sanity */ 1106 hammer_rel_volume(volume, 0); 1107 hammer_io_clear_modlist(&buffer->io); 1108 hammer_flush_buffer_nodes(buffer); 1109 KKASSERT(TAILQ_EMPTY(&buffer->node_list)); 1110 freeme = 1; 1111 } 1112 1113 /* 1114 * Cleanup 1115 */ 1116 hammer_rel_interlock_done(&buffer->io.lock, locked); 1117 if (bp) 1118 brelse(bp); 1119 if (freeme) { 1120 --hammer_count_buffers; 1121 kfree(buffer, hmp->m_misc); 1122 } 1123 } 1124 1125 /* 1126 * Access the filesystem buffer containing the specified hammer offset. 1127 * buf_offset is a conglomeration of the volume number and vol_buf_beg 1128 * relative buffer offset. It must also have bit 55 set to be valid. 1129 * (see hammer_off_t in hammer_disk.h). 1130 * 1131 * Any prior buffer in *bufferp will be released and replaced by the 1132 * requested buffer. 1133 * 1134 * NOTE: The buffer is indexed via its zoneX_offset but we allow the 1135 * passed cached *bufferp to match against either zoneX or zone2. 1136 */ 1137 static __inline 1138 void * 1139 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1140 int isnew, int *errorp, hammer_buffer_t *bufferp) 1141 { 1142 hammer_buffer_t buffer; 1143 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 1144 1145 buf_offset &= ~HAMMER_BUFMASK64; 1146 KKASSERT(HAMMER_ZONE(buf_offset) != 0); 1147 1148 buffer = *bufferp; 1149 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 1150 buffer->zoneX_offset != buf_offset)) { 1151 if (buffer) 1152 hammer_rel_buffer(buffer, 0); 1153 buffer = hammer_get_buffer(hmp, buf_offset, bytes, isnew, errorp); 1154 *bufferp = buffer; 1155 } else { 1156 *errorp = 0; 1157 } 1158 1159 /* 1160 * Return a pointer to the buffer data. 1161 */ 1162 if (buffer == NULL) 1163 return(NULL); 1164 else 1165 return((char *)buffer->ondisk + xoff); 1166 } 1167 1168 void * 1169 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, 1170 int *errorp, hammer_buffer_t *bufferp) 1171 { 1172 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, 0, errorp, bufferp)); 1173 } 1174 1175 void * 1176 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1177 int *errorp, hammer_buffer_t *bufferp) 1178 { 1179 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 1180 return(_hammer_bread(hmp, buf_offset, bytes, 0, errorp, bufferp)); 1181 } 1182 1183 /* 1184 * Access the filesystem buffer containing the specified hammer offset. 1185 * No disk read operation occurs. The result buffer may contain garbage. 1186 * 1187 * Any prior buffer in *bufferp will be released and replaced by the 1188 * requested buffer. 1189 * 1190 * This function marks the buffer dirty but does not increment its 1191 * modify_refs count. 1192 */ 1193 void * 1194 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, 1195 int *errorp, hammer_buffer_t *bufferp) 1196 { 1197 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, 1, errorp, bufferp)); 1198 } 1199 1200 void * 1201 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1202 int *errorp, hammer_buffer_t *bufferp) 1203 { 1204 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 1205 return(_hammer_bread(hmp, buf_offset, bytes, 1, errorp, bufferp)); 1206 } 1207 1208 /************************************************************************ 1209 * NODES * 1210 ************************************************************************ 1211 * 1212 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing 1213 * method used by the HAMMER filesystem. 1214 * 1215 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY 1216 * associated with its buffer, and will only referenced the buffer while 1217 * the node itself is referenced. 1218 * 1219 * A hammer_node can also be passively associated with other HAMMER 1220 * structures, such as inodes, while retaining 0 references. These 1221 * associations can be cleared backwards using a pointer-to-pointer in 1222 * the hammer_node. 1223 * 1224 * This allows the HAMMER implementation to cache hammer_nodes long-term 1225 * and short-cut a great deal of the infrastructure's complexity. In 1226 * most cases a cached node can be reacquired without having to dip into 1227 * the B-Tree. 1228 */ 1229 hammer_node_t 1230 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset, 1231 int isnew, int *errorp) 1232 { 1233 hammer_mount_t hmp = trans->hmp; 1234 hammer_node_t node; 1235 int doload; 1236 1237 KKASSERT(hammer_is_zone_btree(node_offset)); 1238 1239 /* 1240 * Locate the structure, allocating one if necessary. 1241 */ 1242 again: 1243 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset); 1244 if (node == NULL) { 1245 ++hammer_count_nodes; 1246 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE); 1247 node->node_offset = node_offset; 1248 node->hmp = hmp; 1249 TAILQ_INIT(&node->cursor_list); 1250 TAILQ_INIT(&node->cache_list); 1251 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) { 1252 --hammer_count_nodes; 1253 kfree(node, hmp->m_misc); 1254 goto again; 1255 } 1256 doload = hammer_ref_interlock_true(&node->lock); 1257 } else { 1258 doload = hammer_ref_interlock(&node->lock); 1259 } 1260 if (doload) { 1261 *errorp = hammer_load_node(trans, node, isnew); 1262 if (*errorp) 1263 node = NULL; 1264 } else { 1265 KKASSERT(node->ondisk); 1266 *errorp = 0; 1267 hammer_io_advance(&node->buffer->io); 1268 } 1269 return(node); 1270 } 1271 1272 /* 1273 * Reference an already-referenced node. 0->1 transitions should assert 1274 * so we do not have to deal with hammer_ref() setting CHECK. 1275 */ 1276 void 1277 hammer_ref_node(hammer_node_t node) 1278 { 1279 KKASSERT(hammer_isactive(&node->lock) && node->ondisk != NULL); 1280 hammer_ref(&node->lock); 1281 } 1282 1283 /* 1284 * Load a node's on-disk data reference. Called with the node referenced 1285 * and interlocked. 1286 * 1287 * On return the node interlock will be unlocked. If a non-zero error code 1288 * is returned the node will also be dereferenced (and the caller's pointer 1289 * will be stale). 1290 */ 1291 static int 1292 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew) 1293 { 1294 hammer_buffer_t buffer; 1295 hammer_off_t buf_offset; 1296 int error; 1297 1298 error = 0; 1299 if (node->ondisk == NULL) { 1300 /* 1301 * This is a little confusing but the jist is that 1302 * node->buffer determines whether the node is on 1303 * the buffer's node_list and node->ondisk determines 1304 * whether the buffer is referenced. 1305 * 1306 * We could be racing a buffer release, in which case 1307 * node->buffer may become NULL while we are blocked 1308 * referencing the buffer. 1309 */ 1310 if ((buffer = node->buffer) != NULL) { 1311 error = hammer_ref_buffer(buffer); 1312 if (error == 0 && node->buffer == NULL) { 1313 TAILQ_INSERT_TAIL(&buffer->node_list, node, entry); 1314 node->buffer = buffer; 1315 } 1316 } else { 1317 buf_offset = node->node_offset & ~HAMMER_BUFMASK64; 1318 buffer = hammer_get_buffer(node->hmp, buf_offset, 1319 HAMMER_BUFSIZE, 0, &error); 1320 if (buffer) { 1321 KKASSERT(error == 0); 1322 TAILQ_INSERT_TAIL(&buffer->node_list, node, entry); 1323 node->buffer = buffer; 1324 } 1325 } 1326 if (error) 1327 goto failed; 1328 node->ondisk = (void *)((char *)buffer->ondisk + 1329 (node->node_offset & HAMMER_BUFMASK)); 1330 1331 /* 1332 * Check CRC. NOTE: Neither flag is set and the CRC is not 1333 * generated on new B-Tree nodes. 1334 */ 1335 if (isnew == 0 && 1336 (node->flags & HAMMER_NODE_CRCANY) == 0) { 1337 if (hammer_crc_test_btree(node->ondisk) == 0) { 1338 hdkprintf("CRC B-TREE NODE @ %016jx/%lu FAILED\n", 1339 (intmax_t)node->node_offset, 1340 sizeof(*node->ondisk)); 1341 if (hammer_debug_critical) 1342 Debugger("CRC FAILED: B-TREE NODE"); 1343 node->flags |= HAMMER_NODE_CRCBAD; 1344 } else { 1345 node->flags |= HAMMER_NODE_CRCGOOD; 1346 } 1347 } 1348 } 1349 if (node->flags & HAMMER_NODE_CRCBAD) { 1350 if (trans->flags & HAMMER_TRANSF_CRCDOM) 1351 error = EDOM; 1352 else 1353 error = EIO; 1354 } 1355 failed: 1356 if (error) { 1357 _hammer_rel_node(node, 1); 1358 } else { 1359 hammer_ref_interlock_done(&node->lock); 1360 } 1361 return (error); 1362 } 1363 1364 /* 1365 * Safely reference a node, interlock against flushes via the IO subsystem. 1366 */ 1367 hammer_node_t 1368 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache, 1369 int *errorp) 1370 { 1371 hammer_node_t node; 1372 int doload; 1373 1374 node = cache->node; 1375 if (node != NULL) { 1376 doload = hammer_ref_interlock(&node->lock); 1377 if (doload) { 1378 *errorp = hammer_load_node(trans, node, 0); 1379 if (*errorp) 1380 node = NULL; 1381 } else { 1382 KKASSERT(node->ondisk); 1383 if (node->flags & HAMMER_NODE_CRCBAD) { 1384 if (trans->flags & HAMMER_TRANSF_CRCDOM) 1385 *errorp = EDOM; 1386 else 1387 *errorp = EIO; 1388 _hammer_rel_node(node, 0); 1389 node = NULL; 1390 } else { 1391 *errorp = 0; 1392 } 1393 } 1394 } else { 1395 *errorp = ENOENT; 1396 } 1397 return(node); 1398 } 1399 1400 /* 1401 * Release a hammer_node. On the last release the node dereferences 1402 * its underlying buffer and may or may not be destroyed. 1403 * 1404 * If locked is non-zero the passed node has been interlocked by the 1405 * caller and we are in the failure/unload path, otherwise it has not and 1406 * we are doing a normal release. 1407 * 1408 * This function will dispose of the interlock and the reference. 1409 * On return the node pointer is stale. 1410 */ 1411 void 1412 _hammer_rel_node(hammer_node_t node, int locked) 1413 { 1414 hammer_buffer_t buffer; 1415 1416 /* 1417 * Deref the node. If this isn't the 1->0 transition we're basically 1418 * done. If locked is non-zero this function will just deref the 1419 * locked node and return 1, otherwise it will deref the locked 1420 * node and either lock and return 1 on the 1->0 transition or 1421 * not lock and return 0. 1422 */ 1423 if (hammer_rel_interlock(&node->lock, locked) == 0) 1424 return; 1425 1426 /* 1427 * Either locked was non-zero and we are interlocked, or the 1428 * hammer_rel_interlock() call returned non-zero and we are 1429 * interlocked. 1430 * 1431 * The ref-count must still be decremented if locked != 0 so 1432 * the cleanup required still varies a bit. 1433 * 1434 * hammer_flush_node() when called with 1 or 2 will dispose of 1435 * the lock and possible ref-count. 1436 */ 1437 if (node->ondisk == NULL) { 1438 hammer_flush_node(node, locked + 1); 1439 /* node is stale now */ 1440 return; 1441 } 1442 1443 /* 1444 * Do not disassociate the node from the buffer if it represents 1445 * a modified B-Tree node that still needs its crc to be generated. 1446 */ 1447 if (node->flags & HAMMER_NODE_NEEDSCRC) { 1448 hammer_rel_interlock_done(&node->lock, locked); 1449 return; 1450 } 1451 1452 /* 1453 * Do final cleanups and then either destroy the node and leave it 1454 * passively cached. The buffer reference is removed regardless. 1455 */ 1456 buffer = node->buffer; 1457 node->ondisk = NULL; 1458 1459 if ((node->flags & HAMMER_NODE_FLUSH) == 0) { 1460 /* 1461 * Normal release. 1462 */ 1463 hammer_rel_interlock_done(&node->lock, locked); 1464 } else { 1465 /* 1466 * Destroy the node. 1467 */ 1468 hammer_flush_node(node, locked + 1); 1469 /* node is stale */ 1470 1471 } 1472 hammer_rel_buffer(buffer, 0); 1473 } 1474 1475 void 1476 hammer_rel_node(hammer_node_t node) 1477 { 1478 _hammer_rel_node(node, 0); 1479 } 1480 1481 /* 1482 * Free space on-media associated with a B-Tree node. 1483 */ 1484 void 1485 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node) 1486 { 1487 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0); 1488 node->flags |= HAMMER_NODE_DELETED; 1489 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk)); 1490 } 1491 1492 /* 1493 * Passively cache a referenced hammer_node. The caller may release 1494 * the node on return. 1495 */ 1496 void 1497 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node) 1498 { 1499 /* 1500 * If the node doesn't exist, or is being deleted, don't cache it! 1501 * 1502 * The node can only ever be NULL in the I/O failure path. 1503 */ 1504 if (node == NULL || (node->flags & HAMMER_NODE_DELETED)) 1505 return; 1506 if (cache->node == node) 1507 return; 1508 while (cache->node) 1509 hammer_uncache_node(cache); 1510 if (node->flags & HAMMER_NODE_DELETED) 1511 return; 1512 cache->node = node; 1513 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry); 1514 } 1515 1516 void 1517 hammer_uncache_node(hammer_node_cache_t cache) 1518 { 1519 hammer_node_t node; 1520 1521 if ((node = cache->node) != NULL) { 1522 TAILQ_REMOVE(&node->cache_list, cache, entry); 1523 cache->node = NULL; 1524 if (TAILQ_EMPTY(&node->cache_list)) 1525 hammer_flush_node(node, 0); 1526 } 1527 } 1528 1529 /* 1530 * Remove a node's cache references and destroy the node if it has no 1531 * other references or backing store. 1532 * 1533 * locked == 0 Normal unlocked operation 1534 * locked == 1 Call hammer_rel_interlock_done(..., 0); 1535 * locked == 2 Call hammer_rel_interlock_done(..., 1); 1536 * 1537 * XXX for now this isn't even close to being MPSAFE so the refs check 1538 * is sufficient. 1539 */ 1540 void 1541 hammer_flush_node(hammer_node_t node, int locked) 1542 { 1543 hammer_node_cache_t cache; 1544 hammer_buffer_t buffer; 1545 hammer_mount_t hmp = node->hmp; 1546 int dofree; 1547 1548 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) { 1549 TAILQ_REMOVE(&node->cache_list, cache, entry); 1550 cache->node = NULL; 1551 } 1552 1553 /* 1554 * NOTE: refs is predisposed if another thread is blocking and 1555 * will be larger than 0 in that case. We aren't MPSAFE 1556 * here. 1557 */ 1558 if (node->ondisk == NULL && hammer_norefs(&node->lock)) { 1559 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1560 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node); 1561 if ((buffer = node->buffer) != NULL) { 1562 node->buffer = NULL; 1563 TAILQ_REMOVE(&buffer->node_list, node, entry); 1564 /* buffer is unreferenced because ondisk is NULL */ 1565 } 1566 dofree = 1; 1567 } else { 1568 dofree = 0; 1569 } 1570 1571 /* 1572 * Deal with the interlock if locked == 1 or locked == 2. 1573 */ 1574 if (locked) 1575 hammer_rel_interlock_done(&node->lock, locked - 1); 1576 1577 /* 1578 * Destroy if requested 1579 */ 1580 if (dofree) { 1581 --hammer_count_nodes; 1582 kfree(node, hmp->m_misc); 1583 } 1584 } 1585 1586 /* 1587 * Flush passively cached B-Tree nodes associated with this buffer. 1588 * This is only called when the buffer is about to be destroyed, so 1589 * none of the nodes should have any references. The buffer is locked. 1590 * 1591 * We may be interlocked with the buffer. 1592 */ 1593 void 1594 hammer_flush_buffer_nodes(hammer_buffer_t buffer) 1595 { 1596 hammer_node_t node; 1597 1598 while ((node = TAILQ_FIRST(&buffer->node_list)) != NULL) { 1599 KKASSERT(node->ondisk == NULL); 1600 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1601 1602 if (hammer_try_interlock_norefs(&node->lock)) { 1603 hammer_ref(&node->lock); 1604 node->flags |= HAMMER_NODE_FLUSH; 1605 _hammer_rel_node(node, 1); 1606 } else { 1607 KKASSERT(node->buffer != NULL); 1608 buffer = node->buffer; 1609 node->buffer = NULL; 1610 TAILQ_REMOVE(&buffer->node_list, node, entry); 1611 /* buffer is unreferenced because ondisk is NULL */ 1612 } 1613 } 1614 } 1615 1616 1617 /************************************************************************ 1618 * ALLOCATORS * 1619 ************************************************************************/ 1620 1621 /* 1622 * Allocate a B-Tree node. 1623 */ 1624 hammer_node_t 1625 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp) 1626 { 1627 hammer_buffer_t buffer = NULL; 1628 hammer_node_t node = NULL; 1629 hammer_off_t node_offset; 1630 1631 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX, 1632 sizeof(struct hammer_node_ondisk), 1633 hint, errorp); 1634 if (*errorp == 0) { 1635 node = hammer_get_node(trans, node_offset, 1, errorp); 1636 hammer_modify_node_noundo(trans, node); 1637 bzero(node->ondisk, sizeof(*node->ondisk)); 1638 hammer_modify_node_done(node); 1639 } 1640 if (buffer) 1641 hammer_rel_buffer(buffer, 0); 1642 return(node); 1643 } 1644 1645 /* 1646 * Allocate data. If the address of a data buffer is supplied then 1647 * any prior non-NULL *data_bufferp will be released and *data_bufferp 1648 * will be set to the related buffer. The caller must release it when 1649 * finally done. The initial *data_bufferp should be set to NULL by 1650 * the caller. 1651 * 1652 * The caller is responsible for making hammer_modify*() calls on the 1653 * *data_bufferp. 1654 */ 1655 void * 1656 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 1657 uint16_t rec_type, hammer_off_t *data_offsetp, 1658 hammer_buffer_t *data_bufferp, 1659 hammer_off_t hint, int *errorp) 1660 { 1661 void *data; 1662 int zone; 1663 1664 /* 1665 * Allocate data directly from blockmap. 1666 */ 1667 if (data_len) { 1668 switch(rec_type) { 1669 case HAMMER_RECTYPE_INODE: 1670 case HAMMER_RECTYPE_DIRENTRY: 1671 case HAMMER_RECTYPE_EXT: 1672 case HAMMER_RECTYPE_FIX: 1673 case HAMMER_RECTYPE_PFS: 1674 case HAMMER_RECTYPE_SNAPSHOT: 1675 case HAMMER_RECTYPE_CONFIG: 1676 zone = HAMMER_ZONE_META_INDEX; 1677 break; 1678 case HAMMER_RECTYPE_DATA: 1679 case HAMMER_RECTYPE_DB: 1680 /* 1681 * Only mirror-write comes here. 1682 * Regular allocation path uses blockmap reservation. 1683 */ 1684 zone = hammer_data_zone_index(data_len); 1685 if (zone == HAMMER_ZONE_LARGE_DATA_INDEX) { 1686 /* round up */ 1687 data_len = (data_len + HAMMER_BUFMASK) & 1688 ~HAMMER_BUFMASK; 1689 } 1690 break; 1691 default: 1692 hpanic("rec_type %04x unknown", rec_type); 1693 zone = HAMMER_ZONE_UNAVAIL_INDEX; /* NOT REACHED */ 1694 break; 1695 } 1696 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len, 1697 hint, errorp); 1698 } else { 1699 *data_offsetp = 0; 1700 } 1701 1702 data = NULL; 1703 if (*errorp == 0 && data_bufferp && data_len) 1704 data = hammer_bread_ext(trans->hmp, *data_offsetp, data_len, 1705 errorp, data_bufferp); 1706 return(data); 1707 } 1708 1709 /* 1710 * Sync dirty buffers to the media and clean-up any loose ends. 1711 * 1712 * These functions do not start the flusher going, they simply 1713 * queue everything up to the flusher. 1714 */ 1715 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data); 1716 1717 struct hammer_sync_info { 1718 int error; 1719 }; 1720 1721 int 1722 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor) 1723 { 1724 struct hammer_sync_info info; 1725 1726 info.error = 0; 1727 if (waitfor == MNT_WAIT) { 1728 vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS, 1729 hammer_sync_scan2, &info); 1730 } else { 1731 vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS | VMSC_NOWAIT, 1732 hammer_sync_scan2, &info); 1733 } 1734 return(info.error); 1735 } 1736 1737 /* 1738 * Filesystem sync. If doing a synchronous sync make a second pass on 1739 * the vnodes in case any were already flushing during the first pass, 1740 * and activate the flusher twice (the second time brings the UNDO FIFO's 1741 * start position up to the end position after the first call). 1742 * 1743 * If doing a lazy sync make just one pass on the vnode list, ignoring 1744 * any new vnodes added to the list while the sync is in progress. 1745 */ 1746 int 1747 hammer_sync_hmp(hammer_mount_t hmp, int waitfor) 1748 { 1749 struct hammer_sync_info info; 1750 int flags; 1751 1752 flags = VMSC_GETVP; 1753 if (waitfor & MNT_LAZY) 1754 flags |= VMSC_ONEPASS; 1755 1756 info.error = 0; 1757 vsyncscan(hmp->mp, flags | VMSC_NOWAIT, hammer_sync_scan2, &info); 1758 1759 if (info.error == 0 && (waitfor & MNT_WAIT)) { 1760 vsyncscan(hmp->mp, flags, hammer_sync_scan2, &info); 1761 } 1762 if (waitfor == MNT_WAIT) { 1763 hammer_flusher_sync(hmp); 1764 hammer_flusher_sync(hmp); 1765 } else { 1766 hammer_flusher_async(hmp, NULL); 1767 hammer_flusher_async(hmp, NULL); 1768 } 1769 return(info.error); 1770 } 1771 1772 static int 1773 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data) 1774 { 1775 struct hammer_sync_info *info = data; 1776 hammer_inode_t ip; 1777 int error; 1778 1779 ip = VTOI(vp); 1780 if (ip == NULL) 1781 return(0); 1782 if (vp->v_type == VNON || vp->v_type == VBAD) { 1783 vclrisdirty(vp); 1784 return(0); 1785 } 1786 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 1787 RB_EMPTY(&vp->v_rbdirty_tree)) { 1788 vclrisdirty(vp); 1789 return(0); 1790 } 1791 error = VOP_FSYNC(vp, MNT_NOWAIT, 0); 1792 if (error) 1793 info->error = error; 1794 return(0); 1795 } 1796