1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $ 35 */ 36 /* 37 * Manage HAMMER's on-disk structures. These routines are primarily 38 * responsible for interfacing with the kernel's I/O subsystem and for 39 * managing in-memory structures. 40 */ 41 42 #include <sys/nlookup.h> 43 #include <sys/buf2.h> 44 45 #include "hammer.h" 46 47 static void hammer_free_volume(hammer_volume_t volume); 48 static int hammer_load_volume(hammer_volume_t volume); 49 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew); 50 static int hammer_load_node(hammer_transaction_t trans, 51 hammer_node_t node, int isnew); 52 static void _hammer_rel_node(hammer_node_t node, int locked); 53 54 static int 55 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2) 56 { 57 if (vol1->vol_no < vol2->vol_no) 58 return(-1); 59 if (vol1->vol_no > vol2->vol_no) 60 return(1); 61 return(0); 62 } 63 64 /* 65 * hammer_buffer structures are indexed via their zoneX_offset, not 66 * their zone2_offset. 67 */ 68 static int 69 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2) 70 { 71 if (buf1->zoneX_offset < buf2->zoneX_offset) 72 return(-1); 73 if (buf1->zoneX_offset > buf2->zoneX_offset) 74 return(1); 75 return(0); 76 } 77 78 static int 79 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2) 80 { 81 if (node1->node_offset < node2->node_offset) 82 return(-1); 83 if (node1->node_offset > node2->node_offset) 84 return(1); 85 return(0); 86 } 87 88 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node, 89 hammer_vol_rb_compare, int32_t, vol_no); 90 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node, 91 hammer_buf_rb_compare, hammer_off_t, zoneX_offset); 92 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node, 93 hammer_nod_rb_compare, hammer_off_t, node_offset); 94 95 /************************************************************************ 96 * VOLUMES * 97 ************************************************************************ 98 * 99 * Load a HAMMER volume by name. Returns 0 on success or a positive error 100 * code on failure. Volumes must be loaded at mount time or via hammer 101 * volume-add command, hammer_get_volume() will not load a new volume. 102 * 103 * The passed devvp is vref()'d but not locked. This function consumes the 104 * ref (typically by associating it with the volume structure). 105 * 106 * Calls made to hammer_load_volume() or single-threaded 107 */ 108 int 109 hammer_install_volume(hammer_mount_t hmp, const char *volname, 110 struct vnode *devvp, void *data) 111 { 112 struct mount *mp; 113 hammer_volume_t volume; 114 struct hammer_volume_ondisk *ondisk; 115 struct hammer_volume_ondisk *img; 116 struct nlookupdata nd; 117 struct buf *bp = NULL; 118 int error; 119 int ronly; 120 int setmp = 0; 121 int i; 122 123 mp = hmp->mp; 124 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 125 126 /* 127 * Allocate a volume structure 128 */ 129 ++hammer_count_volumes; 130 volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO); 131 volume->vol_name = kstrdup(volname, hmp->m_misc); 132 volume->io.hmp = hmp; /* bootstrap */ 133 hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME); 134 volume->io.offset = 0LL; 135 volume->io.bytes = HAMMER_BUFSIZE; 136 137 /* 138 * Get the device vnode 139 */ 140 if (devvp == NULL) { 141 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW); 142 if (error == 0) 143 error = nlookup(&nd); 144 if (error == 0) 145 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp); 146 nlookup_done(&nd); 147 } else { 148 error = 0; 149 volume->devvp = devvp; 150 } 151 152 if (error == 0) { 153 if (vn_isdisk(volume->devvp, &error)) { 154 error = vfs_mountedon(volume->devvp); 155 } 156 } 157 if (error == 0 && vcount(volume->devvp) > 0) 158 error = EBUSY; 159 if (error == 0) { 160 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 161 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0); 162 if (error == 0) { 163 error = VOP_OPEN(volume->devvp, 164 (ronly ? FREAD : FREAD|FWRITE), 165 FSCRED, NULL); 166 } 167 vn_unlock(volume->devvp); 168 } 169 if (error) { 170 hammer_free_volume(volume); 171 return(error); 172 } 173 volume->devvp->v_rdev->si_mountpoint = mp; 174 setmp = 1; 175 176 /* 177 * Extract the volume number from the volume header and do various 178 * sanity checks. 179 */ 180 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp); 181 if (error) 182 goto late_failure; 183 ondisk = (void *)bp->b_data; 184 185 /* 186 * Initialize the volume header with data if the data is specified. 187 */ 188 if (ronly == 0 && data) { 189 img = (struct hammer_volume_ondisk *)data; 190 if (ondisk->vol_signature == HAMMER_FSBUF_VOLUME) { 191 hkprintf("Formatting of valid HAMMER volume " 192 "%s denied. Erase with dd!\n", volname); 193 error = EFTYPE; 194 goto late_failure; 195 } 196 bcopy(img, ondisk, sizeof(*img)); 197 } 198 199 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) { 200 hkprintf("volume %s has an invalid header\n", 201 volume->vol_name); 202 for (i = 0; i < (int)sizeof(ondisk->vol_signature); i++) { 203 kprintf("%02x", ((char*)&ondisk->vol_signature)[i] & 0xFF); 204 if (i != (int)sizeof(ondisk->vol_signature) - 1) 205 kprintf(" "); 206 } 207 kprintf("\n"); 208 error = EFTYPE; 209 goto late_failure; 210 } 211 volume->vol_no = ondisk->vol_no; 212 volume->buffer_base = ondisk->vol_buf_beg; 213 volume->vol_flags = ondisk->vol_flags; 214 volume->nblocks = ondisk->vol_nblocks; 215 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 216 ondisk->vol_buf_end - ondisk->vol_buf_beg); 217 volume->maxraw_off = ondisk->vol_buf_end; 218 219 if (RB_EMPTY(&hmp->rb_vols_root)) { 220 hmp->fsid = ondisk->vol_fsid; 221 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) { 222 hkprintf("volume %s's fsid does not match other volumes\n", 223 volume->vol_name); 224 error = EFTYPE; 225 goto late_failure; 226 } 227 228 /* 229 * Insert the volume structure into the red-black tree. 230 */ 231 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) { 232 hkprintf("volume %s has a duplicate vol_no %d\n", 233 volume->vol_name, volume->vol_no); 234 error = EEXIST; 235 } 236 237 if (error == 0) 238 HAMMER_VOLUME_NUMBER_ADD(hmp, volume); 239 240 /* 241 * Set the root volume . HAMMER special cases rootvol the structure. 242 * We do not hold a ref because this would prevent related I/O 243 * from being flushed. 244 */ 245 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) { 246 hmp->rootvol = volume; 247 hmp->nvolumes = ondisk->vol_count; 248 if (bp) { 249 brelse(bp); 250 bp = NULL; 251 } 252 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks * 253 HAMMER_BUFFERS_PER_BIGBLOCK; 254 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks * 255 HAMMER_BUFFERS_PER_BIGBLOCK; 256 } 257 late_failure: 258 if (bp) 259 brelse(bp); 260 if (error) { 261 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/ 262 if (setmp) 263 volume->devvp->v_rdev->si_mountpoint = NULL; 264 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 265 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE, NULL); 266 vn_unlock(volume->devvp); 267 hammer_free_volume(volume); 268 } 269 return (error); 270 } 271 272 /* 273 * This is called for each volume when updating the mount point from 274 * read-write to read-only or vise-versa. 275 */ 276 int 277 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused) 278 { 279 if (volume->devvp) { 280 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 281 if (volume->io.hmp->ronly) { 282 /* do not call vinvalbuf */ 283 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL); 284 VOP_CLOSE(volume->devvp, FREAD|FWRITE, NULL); 285 } else { 286 /* do not call vinvalbuf */ 287 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL); 288 VOP_CLOSE(volume->devvp, FREAD, NULL); 289 } 290 vn_unlock(volume->devvp); 291 } 292 return(0); 293 } 294 295 /* 296 * Unload and free a HAMMER volume. Must return >= 0 to continue scan 297 * so returns -1 on failure. 298 */ 299 int 300 hammer_unload_volume(hammer_volume_t volume, void *data) 301 { 302 hammer_mount_t hmp = volume->io.hmp; 303 struct buf *bp = NULL; 304 struct hammer_volume_ondisk *img; 305 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 306 int error; 307 308 /* 309 * Clear the volume header with data if the data is specified. 310 */ 311 if (ronly == 0 && data && volume->devvp) { 312 img = (struct hammer_volume_ondisk *)data; 313 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp); 314 if (error || bp->b_bcount < sizeof(*img)) { 315 hmkprintf(hmp, "Failed to read volume header: %d\n", error); 316 brelse(bp); 317 } else { 318 bcopy(img, bp->b_data, sizeof(*img)); 319 error = bwrite(bp); 320 if (error) 321 hmkprintf(hmp, "Failed to clear volume header: %d\n", 322 error); 323 } 324 } 325 326 /* 327 * Clean up the root volume pointer, which is held unlocked in hmp. 328 */ 329 if (hmp->rootvol == volume) 330 hmp->rootvol = NULL; 331 332 /* 333 * We must not flush a dirty buffer to disk on umount. It should 334 * have already been dealt with by the flusher, or we may be in 335 * catastrophic failure. 336 */ 337 hammer_io_clear_modify(&volume->io, 1); 338 volume->io.waitdep = 1; 339 340 /* 341 * Clean up the persistent ref ioerror might have on the volume 342 */ 343 if (volume->io.ioerror) 344 hammer_io_clear_error_noassert(&volume->io); 345 346 /* 347 * This should release the bp. Releasing the volume with flush set 348 * implies the interlock is set. 349 */ 350 hammer_ref_interlock_true(&volume->io.lock); 351 hammer_rel_volume(volume, 1); 352 KKASSERT(volume->io.bp == NULL); 353 354 /* 355 * There should be no references on the volume. 356 */ 357 KKASSERT(hammer_norefs(&volume->io.lock)); 358 359 volume->ondisk = NULL; 360 if (volume->devvp) { 361 if (volume->devvp->v_rdev && 362 volume->devvp->v_rdev->si_mountpoint == hmp->mp) { 363 volume->devvp->v_rdev->si_mountpoint = NULL; 364 } 365 if (ronly) { 366 /* 367 * Make sure we don't sync anything to disk if we 368 * are in read-only mode (1) or critically-errored 369 * (2). Note that there may be dirty buffers in 370 * normal read-only mode from crash recovery. 371 */ 372 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 373 vinvalbuf(volume->devvp, 0, 0, 0); 374 VOP_CLOSE(volume->devvp, FREAD, NULL); 375 vn_unlock(volume->devvp); 376 } else { 377 /* 378 * Normal termination, save any dirty buffers 379 * (XXX there really shouldn't be any). 380 */ 381 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 382 vinvalbuf(volume->devvp, V_SAVE, 0, 0); 383 VOP_CLOSE(volume->devvp, FREAD|FWRITE, NULL); 384 vn_unlock(volume->devvp); 385 } 386 } 387 388 /* 389 * Destroy the structure 390 */ 391 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume); 392 HAMMER_VOLUME_NUMBER_DEL(hmp, volume); 393 hammer_free_volume(volume); 394 return(0); 395 } 396 397 static 398 void 399 hammer_free_volume(hammer_volume_t volume) 400 { 401 hammer_mount_t hmp = volume->io.hmp; 402 403 if (volume->vol_name) { 404 kfree(volume->vol_name, hmp->m_misc); 405 volume->vol_name = NULL; 406 } 407 if (volume->devvp) { 408 vrele(volume->devvp); 409 volume->devvp = NULL; 410 } 411 --hammer_count_volumes; 412 kfree(volume, hmp->m_misc); 413 } 414 415 /* 416 * Get a HAMMER volume. The volume must already exist. 417 */ 418 hammer_volume_t 419 hammer_get_volume(hammer_mount_t hmp, int32_t vol_no, int *errorp) 420 { 421 struct hammer_volume *volume; 422 423 /* 424 * Locate the volume structure 425 */ 426 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no); 427 if (volume == NULL) { 428 *errorp = ENOENT; 429 return(NULL); 430 } 431 432 /* 433 * Reference the volume, load/check the data on the 0->1 transition. 434 * hammer_load_volume() will dispose of the interlock on return, 435 * and also clean up the ref count on error. 436 */ 437 if (hammer_ref_interlock(&volume->io.lock)) { 438 *errorp = hammer_load_volume(volume); 439 if (*errorp) 440 volume = NULL; 441 } else { 442 KKASSERT(volume->ondisk); 443 *errorp = 0; 444 } 445 return(volume); 446 } 447 448 int 449 hammer_ref_volume(hammer_volume_t volume) 450 { 451 int error; 452 453 /* 454 * Reference the volume and deal with the check condition used to 455 * load its ondisk info. 456 */ 457 if (hammer_ref_interlock(&volume->io.lock)) { 458 error = hammer_load_volume(volume); 459 } else { 460 KKASSERT(volume->ondisk); 461 error = 0; 462 } 463 return (error); 464 } 465 466 /* 467 * May be called without fs_token 468 */ 469 hammer_volume_t 470 hammer_get_root_volume(hammer_mount_t hmp, int *errorp) 471 { 472 hammer_volume_t volume; 473 474 volume = hmp->rootvol; 475 KKASSERT(volume != NULL); 476 477 /* 478 * Reference the volume and deal with the check condition used to 479 * load its ondisk info. 480 */ 481 if (hammer_ref_interlock(&volume->io.lock)) { 482 lwkt_gettoken(&volume->io.hmp->fs_token); 483 *errorp = hammer_load_volume(volume); 484 lwkt_reltoken(&volume->io.hmp->fs_token); 485 if (*errorp) 486 volume = NULL; 487 } else { 488 KKASSERT(volume->ondisk); 489 *errorp = 0; 490 } 491 return (volume); 492 } 493 494 /* 495 * Load a volume's on-disk information. The volume must be referenced and 496 * the interlock is held on call. The interlock will be released on return. 497 * The reference will also be released on return if an error occurs. 498 */ 499 static int 500 hammer_load_volume(hammer_volume_t volume) 501 { 502 int error; 503 504 if (volume->ondisk == NULL) { 505 error = hammer_io_read(volume->devvp, &volume->io, 506 HAMMER_BUFSIZE); 507 if (error == 0) { 508 volume->ondisk = (void *)volume->io.bp->b_data; 509 hammer_ref_interlock_done(&volume->io.lock); 510 } else { 511 hammer_rel_volume(volume, 1); 512 } 513 } else { 514 error = 0; 515 } 516 return(error); 517 } 518 519 /* 520 * Release a previously acquired reference on the volume. 521 * 522 * Volumes are not unloaded from memory during normal operation. 523 * 524 * May be called without fs_token 525 */ 526 void 527 hammer_rel_volume(hammer_volume_t volume, int locked) 528 { 529 struct buf *bp; 530 531 if (hammer_rel_interlock(&volume->io.lock, locked)) { 532 lwkt_gettoken(&volume->io.hmp->fs_token); 533 volume->ondisk = NULL; 534 bp = hammer_io_release(&volume->io, locked); 535 lwkt_reltoken(&volume->io.hmp->fs_token); 536 hammer_rel_interlock_done(&volume->io.lock, locked); 537 if (bp) 538 brelse(bp); 539 } 540 } 541 542 int 543 hammer_mountcheck_volumes(hammer_mount_t hmp) 544 { 545 hammer_volume_t vol; 546 int i; 547 548 HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) { 549 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i); 550 if (vol == NULL) 551 return(EINVAL); 552 } 553 return(0); 554 } 555 556 int 557 hammer_get_installed_volumes(hammer_mount_t hmp) 558 { 559 int i, ret = 0; 560 561 HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) 562 ret++; 563 return(ret); 564 } 565 566 /************************************************************************ 567 * BUFFERS * 568 ************************************************************************ 569 * 570 * Manage buffers. Currently most blockmap-backed zones are direct-mapped 571 * to zone-2 buffer offsets, without a translation stage. However, the 572 * hammer_buffer structure is indexed by its zoneX_offset, not its 573 * zone2_offset. 574 * 575 * The proper zone must be maintained throughout the code-base all the way 576 * through to the big-block allocator, or routines like hammer_del_buffers() 577 * will not be able to locate all potentially conflicting buffers. 578 */ 579 580 /* 581 * Helper function returns whether a zone offset can be directly translated 582 * to a raw buffer index or not. Really only the volume and undo zones 583 * can't be directly translated. Volumes are special-cased and undo zones 584 * shouldn't be aliased accessed in read-only mode. 585 * 586 * This function is ONLY used to detect aliased zones during a read-only 587 * mount. 588 */ 589 static __inline int 590 hammer_direct_zone(hammer_off_t buf_offset) 591 { 592 switch(HAMMER_ZONE_DECODE(buf_offset)) { 593 case HAMMER_ZONE_RAW_BUFFER_INDEX: 594 case HAMMER_ZONE_FREEMAP_INDEX: 595 case HAMMER_ZONE_BTREE_INDEX: 596 case HAMMER_ZONE_META_INDEX: 597 case HAMMER_ZONE_LARGE_DATA_INDEX: 598 case HAMMER_ZONE_SMALL_DATA_INDEX: 599 return(1); 600 default: 601 return(0); 602 } 603 /* NOT REACHED */ 604 } 605 606 hammer_buffer_t 607 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, 608 int bytes, int isnew, int *errorp) 609 { 610 hammer_buffer_t buffer; 611 hammer_volume_t volume; 612 hammer_off_t zone2_offset; 613 hammer_io_type_t iotype; 614 int vol_no; 615 int zone; 616 617 buf_offset &= ~HAMMER_BUFMASK64; 618 again: 619 /* 620 * Shortcut if the buffer is already cached 621 */ 622 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset); 623 if (buffer) { 624 /* 625 * Once refed the ondisk field will not be cleared by 626 * any other action. Shortcut the operation if the 627 * ondisk structure is valid. 628 */ 629 found_aliased: 630 if (hammer_ref_interlock(&buffer->io.lock) == 0) { 631 hammer_io_advance(&buffer->io); 632 KKASSERT(buffer->ondisk); 633 *errorp = 0; 634 return(buffer); 635 } 636 637 /* 638 * 0->1 transition or defered 0->1 transition (CHECK), 639 * interlock now held. Shortcut if ondisk is already 640 * assigned. 641 */ 642 atomic_add_int(&hammer_count_refedbufs, 1); 643 if (buffer->ondisk) { 644 hammer_io_advance(&buffer->io); 645 hammer_ref_interlock_done(&buffer->io.lock); 646 *errorp = 0; 647 return(buffer); 648 } 649 650 /* 651 * The buffer is no longer loose if it has a ref, and 652 * cannot become loose once it gains a ref. Loose 653 * buffers will never be in a modified state. This should 654 * only occur on the 0->1 transition of refs. 655 * 656 * lose_root can be modified via a biodone() interrupt 657 * so the io_token must be held. 658 */ 659 if (buffer->io.mod_root == &hmp->lose_root) { 660 lwkt_gettoken(&hmp->io_token); 661 if (buffer->io.mod_root == &hmp->lose_root) { 662 RB_REMOVE(hammer_mod_rb_tree, 663 buffer->io.mod_root, &buffer->io); 664 buffer->io.mod_root = NULL; 665 KKASSERT(buffer->io.modified == 0); 666 } 667 lwkt_reltoken(&hmp->io_token); 668 } 669 goto found; 670 } else if (hmp->ronly && hammer_direct_zone(buf_offset)) { 671 /* 672 * If this is a read-only mount there could be an alias 673 * in the raw-zone. If there is we use that buffer instead. 674 * 675 * rw mounts will not have aliases. Also note when going 676 * from ro -> rw the recovered raw buffers are flushed and 677 * reclaimed, so again there will not be any aliases once 678 * the mount is rw. 679 */ 680 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 681 hammer_xlate_to_zone2(buf_offset)); 682 if (buffer) { 683 if (hammer_debug_general & 0x0001) { 684 hkrateprintf(&hmp->kdiag, 685 "recovered aliased %016jx\n", 686 (intmax_t)buf_offset); 687 } 688 goto found_aliased; 689 } 690 } 691 692 /* 693 * What is the buffer class? 694 */ 695 zone = HAMMER_ZONE_DECODE(buf_offset); 696 697 switch(zone) { 698 case HAMMER_ZONE_LARGE_DATA_INDEX: 699 case HAMMER_ZONE_SMALL_DATA_INDEX: 700 iotype = HAMMER_STRUCTURE_DATA_BUFFER; 701 break; 702 case HAMMER_ZONE_UNDO_INDEX: 703 iotype = HAMMER_STRUCTURE_UNDO_BUFFER; 704 break; 705 case HAMMER_ZONE_META_INDEX: 706 default: 707 /* 708 * NOTE: inode data and directory entries are placed in this 709 * zone. inode atime/mtime is updated in-place and thus 710 * buffers containing inodes must be synchronized as 711 * meta-buffers, same as buffers containing B-Tree info. 712 */ 713 iotype = HAMMER_STRUCTURE_META_BUFFER; 714 break; 715 } 716 717 /* 718 * Handle blockmap offset translations 719 */ 720 if (zone >= HAMMER_ZONE2_MAPPED_INDEX) { 721 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp); 722 } else if (zone == HAMMER_ZONE_UNDO_INDEX) { 723 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp); 724 } else { 725 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX); 726 zone2_offset = buf_offset; 727 *errorp = 0; 728 } 729 if (*errorp) 730 return(NULL); 731 732 /* 733 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset 734 * specifications. 735 */ 736 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) == 737 HAMMER_ZONE_RAW_BUFFER); 738 vol_no = HAMMER_VOL_DECODE(zone2_offset); 739 volume = hammer_get_volume(hmp, vol_no, errorp); 740 if (volume == NULL) 741 return(NULL); 742 743 KKASSERT(zone2_offset < volume->maxbuf_off); 744 745 /* 746 * Allocate a new buffer structure. We will check for races later. 747 */ 748 ++hammer_count_buffers; 749 buffer = kmalloc(sizeof(*buffer), hmp->m_misc, 750 M_WAITOK|M_ZERO|M_USE_RESERVE); 751 buffer->zone2_offset = zone2_offset; 752 buffer->zoneX_offset = buf_offset; 753 754 hammer_io_init(&buffer->io, volume, iotype); 755 buffer->io.offset = volume->ondisk->vol_buf_beg + 756 (zone2_offset & HAMMER_OFF_SHORT_MASK); 757 buffer->io.bytes = bytes; 758 TAILQ_INIT(&buffer->clist); 759 hammer_ref_interlock_true(&buffer->io.lock); 760 761 /* 762 * Insert the buffer into the RB tree and handle late collisions. 763 */ 764 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) { 765 hammer_rel_volume(volume, 0); 766 buffer->io.volume = NULL; /* safety */ 767 if (hammer_rel_interlock(&buffer->io.lock, 1)) /* safety */ 768 hammer_rel_interlock_done(&buffer->io.lock, 1); 769 --hammer_count_buffers; 770 kfree(buffer, hmp->m_misc); 771 goto again; 772 } 773 atomic_add_int(&hammer_count_refedbufs, 1); 774 found: 775 776 /* 777 * The buffer is referenced and interlocked. Load the buffer 778 * if necessary. hammer_load_buffer() deals with the interlock 779 * and, if an error is returned, also deals with the ref. 780 */ 781 if (buffer->ondisk == NULL) { 782 *errorp = hammer_load_buffer(buffer, isnew); 783 if (*errorp) 784 buffer = NULL; 785 } else { 786 hammer_io_advance(&buffer->io); 787 hammer_ref_interlock_done(&buffer->io.lock); 788 *errorp = 0; 789 } 790 return(buffer); 791 } 792 793 /* 794 * This is used by the direct-read code to deal with large-data buffers 795 * created by the reblocker and mirror-write code. The direct-read code 796 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write- 797 * running hammer buffers must be fully synced to disk before we can issue 798 * the direct-read. 799 * 800 * This code path is not considered critical as only the rebocker and 801 * mirror-write code will create large-data buffers via the HAMMER buffer 802 * subsystem. They do that because they operate at the B-Tree level and 803 * do not access the vnode/inode structures. 804 */ 805 void 806 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes) 807 { 808 hammer_buffer_t buffer; 809 int error; 810 811 KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) == 812 HAMMER_ZONE_LARGE_DATA); 813 814 while (bytes > 0) { 815 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 816 base_offset); 817 if (buffer && (buffer->io.modified || buffer->io.running)) { 818 error = hammer_ref_buffer(buffer); 819 if (error == 0) { 820 hammer_io_wait(&buffer->io); 821 if (buffer->io.modified) { 822 hammer_io_write_interlock(&buffer->io); 823 hammer_io_flush(&buffer->io, 0); 824 hammer_io_done_interlock(&buffer->io); 825 hammer_io_wait(&buffer->io); 826 } 827 hammer_rel_buffer(buffer, 0); 828 } 829 } 830 base_offset += HAMMER_BUFSIZE; 831 bytes -= HAMMER_BUFSIZE; 832 } 833 } 834 835 /* 836 * Destroy all buffers covering the specified zoneX offset range. This 837 * is called when the related blockmap layer2 entry is freed or when 838 * a direct write bypasses our buffer/buffer-cache subsystem. 839 * 840 * The buffers may be referenced by the caller itself. Setting reclaim 841 * will cause the buffer to be destroyed when it's ref count reaches zero. 842 * 843 * Return 0 on success, EAGAIN if some buffers could not be destroyed due 844 * to additional references held by other threads, or some other (typically 845 * fatal) error. 846 */ 847 int 848 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset, 849 hammer_off_t zone2_offset, int bytes, 850 int report_conflicts) 851 { 852 hammer_buffer_t buffer; 853 hammer_volume_t volume; 854 int vol_no; 855 int error; 856 int ret_error; 857 858 vol_no = HAMMER_VOL_DECODE(zone2_offset); 859 volume = hammer_get_volume(hmp, vol_no, &ret_error); 860 KKASSERT(ret_error == 0); 861 862 while (bytes > 0) { 863 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 864 base_offset); 865 if (buffer) { 866 error = hammer_ref_buffer(buffer); 867 if (hammer_debug_general & 0x20000) { 868 hkprintf("delbufr %016jx rerr=%d 1ref=%d\n", 869 (intmax_t)buffer->zoneX_offset, 870 error, 871 hammer_oneref(&buffer->io.lock)); 872 } 873 if (error == 0 && !hammer_oneref(&buffer->io.lock)) { 874 error = EAGAIN; 875 hammer_rel_buffer(buffer, 0); 876 } 877 if (error == 0) { 878 KKASSERT(buffer->zone2_offset == zone2_offset); 879 hammer_io_clear_modify(&buffer->io, 1); 880 buffer->io.reclaim = 1; 881 buffer->io.waitdep = 1; 882 KKASSERT(buffer->io.volume == volume); 883 hammer_rel_buffer(buffer, 0); 884 } 885 } else { 886 error = hammer_io_inval(volume, zone2_offset); 887 } 888 if (error) { 889 ret_error = error; 890 if (report_conflicts || 891 (hammer_debug_general & 0x8000)) { 892 krateprintf(&hmp->kdiag, 893 "hammer_del_buffers: unable to " 894 "invalidate %016llx buffer=%p " 895 "rep=%d lkrefs=%08x\n", 896 (long long)base_offset, 897 buffer, report_conflicts, 898 (buffer ? buffer->io.lock.refs : -1)); 899 } 900 } 901 base_offset += HAMMER_BUFSIZE; 902 zone2_offset += HAMMER_BUFSIZE; 903 bytes -= HAMMER_BUFSIZE; 904 } 905 hammer_rel_volume(volume, 0); 906 return (ret_error); 907 } 908 909 /* 910 * Given a referenced and interlocked buffer load/validate the data. 911 * 912 * The buffer interlock will be released on return. If an error is 913 * returned the buffer reference will also be released (and the buffer 914 * pointer will thus be stale). 915 */ 916 static int 917 hammer_load_buffer(hammer_buffer_t buffer, int isnew) 918 { 919 hammer_volume_t volume; 920 int error; 921 922 /* 923 * Load the buffer's on-disk info 924 */ 925 volume = buffer->io.volume; 926 927 if (hammer_debug_io & 0x0004) { 928 hdkprintf("load_buffer %016llx %016llx isnew=%d od=%p\n", 929 (long long)buffer->zoneX_offset, 930 (long long)buffer->zone2_offset, 931 isnew, buffer->ondisk); 932 } 933 934 if (buffer->ondisk == NULL) { 935 /* 936 * Issue the read or generate a new buffer. When reading 937 * the limit argument controls any read-ahead clustering 938 * hammer_io_read() is allowed to do. 939 * 940 * We cannot read-ahead in the large-data zone and we cannot 941 * cross a big-block boundary as the next big-block might 942 * use a different buffer size. 943 */ 944 if (isnew) { 945 error = hammer_io_new(volume->devvp, &buffer->io); 946 } else if ((buffer->zoneX_offset & HAMMER_OFF_ZONE_MASK) == 947 HAMMER_ZONE_LARGE_DATA) { 948 error = hammer_io_read(volume->devvp, &buffer->io, 949 buffer->io.bytes); 950 } else { 951 hammer_off_t limit; 952 953 limit = (buffer->zone2_offset + 954 HAMMER_BIGBLOCK_MASK64) & 955 ~HAMMER_BIGBLOCK_MASK64; 956 limit -= buffer->zone2_offset; 957 error = hammer_io_read(volume->devvp, &buffer->io, 958 limit); 959 } 960 if (error == 0) 961 buffer->ondisk = (void *)buffer->io.bp->b_data; 962 } else if (isnew) { 963 error = hammer_io_new(volume->devvp, &buffer->io); 964 } else { 965 error = 0; 966 } 967 if (error == 0) { 968 hammer_io_advance(&buffer->io); 969 hammer_ref_interlock_done(&buffer->io.lock); 970 } else { 971 hammer_rel_buffer(buffer, 1); 972 } 973 return (error); 974 } 975 976 /* 977 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue. 978 * This routine is only called during unmount or when a volume is 979 * removed. 980 * 981 * If data != NULL, it specifies a volume whoose buffers should 982 * be unloaded. 983 */ 984 int 985 hammer_unload_buffer(hammer_buffer_t buffer, void *data) 986 { 987 struct hammer_volume *volume = (struct hammer_volume *) data; 988 989 /* 990 * If volume != NULL we are only interested in unloading buffers 991 * associated with a particular volume. 992 */ 993 if (volume != NULL && volume != buffer->io.volume) 994 return 0; 995 996 /* 997 * Clean up the persistent ref ioerror might have on the buffer 998 * and acquire a ref. Expect a 0->1 transition. 999 */ 1000 if (buffer->io.ioerror) { 1001 hammer_io_clear_error_noassert(&buffer->io); 1002 atomic_add_int(&hammer_count_refedbufs, -1); 1003 } 1004 hammer_ref_interlock_true(&buffer->io.lock); 1005 atomic_add_int(&hammer_count_refedbufs, 1); 1006 1007 /* 1008 * We must not flush a dirty buffer to disk on umount. It should 1009 * have already been dealt with by the flusher, or we may be in 1010 * catastrophic failure. 1011 * 1012 * We must set waitdep to ensure that a running buffer is waited 1013 * on and released prior to us trying to unload the volume. 1014 */ 1015 hammer_io_clear_modify(&buffer->io, 1); 1016 hammer_flush_buffer_nodes(buffer); 1017 buffer->io.waitdep = 1; 1018 hammer_rel_buffer(buffer, 1); 1019 return(0); 1020 } 1021 1022 /* 1023 * Reference a buffer that is either already referenced or via a specially 1024 * handled pointer (aka cursor->buffer). 1025 */ 1026 int 1027 hammer_ref_buffer(hammer_buffer_t buffer) 1028 { 1029 hammer_mount_t hmp; 1030 int error; 1031 int locked; 1032 1033 /* 1034 * Acquire a ref, plus the buffer will be interlocked on the 1035 * 0->1 transition. 1036 */ 1037 locked = hammer_ref_interlock(&buffer->io.lock); 1038 hmp = buffer->io.hmp; 1039 1040 /* 1041 * At this point a biodone() will not touch the buffer other then 1042 * incidental bits. However, lose_root can be modified via 1043 * a biodone() interrupt. 1044 * 1045 * No longer loose. lose_root requires the io_token. 1046 */ 1047 if (buffer->io.mod_root == &hmp->lose_root) { 1048 lwkt_gettoken(&hmp->io_token); 1049 if (buffer->io.mod_root == &hmp->lose_root) { 1050 RB_REMOVE(hammer_mod_rb_tree, 1051 buffer->io.mod_root, &buffer->io); 1052 buffer->io.mod_root = NULL; 1053 } 1054 lwkt_reltoken(&hmp->io_token); 1055 } 1056 1057 if (locked) { 1058 atomic_add_int(&hammer_count_refedbufs, 1); 1059 error = hammer_load_buffer(buffer, 0); 1060 /* NOTE: on error the buffer pointer is stale */ 1061 } else { 1062 error = 0; 1063 } 1064 return(error); 1065 } 1066 1067 /* 1068 * Release a reference on the buffer. On the 1->0 transition the 1069 * underlying IO will be released but the data reference is left 1070 * cached. 1071 * 1072 * Only destroy the structure itself if the related buffer cache buffer 1073 * was disassociated from it. This ties the management of the structure 1074 * to the buffer cache subsystem. buffer->ondisk determines whether the 1075 * embedded io is referenced or not. 1076 */ 1077 void 1078 hammer_rel_buffer(hammer_buffer_t buffer, int locked) 1079 { 1080 hammer_volume_t volume; 1081 hammer_mount_t hmp; 1082 struct buf *bp = NULL; 1083 int freeme = 0; 1084 1085 hmp = buffer->io.hmp; 1086 1087 if (hammer_rel_interlock(&buffer->io.lock, locked) == 0) 1088 return; 1089 1090 /* 1091 * hammer_count_refedbufs accounting. Decrement if we are in 1092 * the error path or if CHECK is clear. 1093 * 1094 * If we are not in the error path and CHECK is set the caller 1095 * probably just did a hammer_ref() and didn't account for it, 1096 * so we don't account for the loss here. 1097 */ 1098 if (locked || (buffer->io.lock.refs & HAMMER_REFS_CHECK) == 0) 1099 atomic_add_int(&hammer_count_refedbufs, -1); 1100 1101 /* 1102 * If the caller locked us or the normal released transitions 1103 * from 1->0 (and acquired the lock) attempt to release the 1104 * io. If the called locked us we tell hammer_io_release() 1105 * to flush (which would be the unload or failure path). 1106 */ 1107 bp = hammer_io_release(&buffer->io, locked); 1108 1109 /* 1110 * If the buffer has no bp association and no refs we can destroy 1111 * it. 1112 * 1113 * NOTE: It is impossible for any associated B-Tree nodes to have 1114 * refs if the buffer has no additional refs. 1115 */ 1116 if (buffer->io.bp == NULL && hammer_norefs(&buffer->io.lock)) { 1117 RB_REMOVE(hammer_buf_rb_tree, 1118 &buffer->io.hmp->rb_bufs_root, 1119 buffer); 1120 volume = buffer->io.volume; 1121 buffer->io.volume = NULL; /* sanity */ 1122 hammer_rel_volume(volume, 0); 1123 hammer_io_clear_modlist(&buffer->io); 1124 hammer_flush_buffer_nodes(buffer); 1125 KKASSERT(TAILQ_EMPTY(&buffer->clist)); 1126 freeme = 1; 1127 } 1128 1129 /* 1130 * Cleanup 1131 */ 1132 hammer_rel_interlock_done(&buffer->io.lock, locked); 1133 if (bp) 1134 brelse(bp); 1135 if (freeme) { 1136 --hammer_count_buffers; 1137 kfree(buffer, hmp->m_misc); 1138 } 1139 } 1140 1141 /* 1142 * Access the filesystem buffer containing the specified hammer offset. 1143 * buf_offset is a conglomeration of the volume number and vol_buf_beg 1144 * relative buffer offset. It must also have bit 55 set to be valid. 1145 * (see hammer_off_t in hammer_disk.h). 1146 * 1147 * Any prior buffer in *bufferp will be released and replaced by the 1148 * requested buffer. 1149 * 1150 * NOTE: The buffer is indexed via its zoneX_offset but we allow the 1151 * passed cached *bufferp to match against either zoneX or zone2. 1152 */ 1153 static __inline 1154 void * 1155 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1156 int *errorp, struct hammer_buffer **bufferp) 1157 { 1158 hammer_buffer_t buffer; 1159 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 1160 1161 buf_offset &= ~HAMMER_BUFMASK64; 1162 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0); 1163 1164 buffer = *bufferp; 1165 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 1166 buffer->zoneX_offset != buf_offset)) { 1167 if (buffer) 1168 hammer_rel_buffer(buffer, 0); 1169 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp); 1170 *bufferp = buffer; 1171 } else { 1172 *errorp = 0; 1173 } 1174 1175 /* 1176 * Return a pointer to the buffer data. 1177 */ 1178 if (buffer == NULL) 1179 return(NULL); 1180 else 1181 return((char *)buffer->ondisk + xoff); 1182 } 1183 1184 void * 1185 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, 1186 int *errorp, struct hammer_buffer **bufferp) 1187 { 1188 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp)); 1189 } 1190 1191 void * 1192 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1193 int *errorp, struct hammer_buffer **bufferp) 1194 { 1195 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 1196 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp)); 1197 } 1198 1199 /* 1200 * Access the filesystem buffer containing the specified hammer offset. 1201 * No disk read operation occurs. The result buffer may contain garbage. 1202 * 1203 * Any prior buffer in *bufferp will be released and replaced by the 1204 * requested buffer. 1205 * 1206 * This function marks the buffer dirty but does not increment its 1207 * modify_refs count. 1208 */ 1209 static __inline 1210 void * 1211 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1212 int *errorp, struct hammer_buffer **bufferp) 1213 { 1214 hammer_buffer_t buffer; 1215 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 1216 1217 buf_offset &= ~HAMMER_BUFMASK64; 1218 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0); 1219 1220 buffer = *bufferp; 1221 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 1222 buffer->zoneX_offset != buf_offset)) { 1223 if (buffer) 1224 hammer_rel_buffer(buffer, 0); 1225 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp); 1226 *bufferp = buffer; 1227 } else { 1228 *errorp = 0; 1229 } 1230 1231 /* 1232 * Return a pointer to the buffer data. 1233 */ 1234 if (buffer == NULL) 1235 return(NULL); 1236 else 1237 return((char *)buffer->ondisk + xoff); 1238 } 1239 1240 void * 1241 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, 1242 int *errorp, struct hammer_buffer **bufferp) 1243 { 1244 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp)); 1245 } 1246 1247 void * 1248 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1249 int *errorp, struct hammer_buffer **bufferp) 1250 { 1251 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 1252 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp)); 1253 } 1254 1255 /************************************************************************ 1256 * NODES * 1257 ************************************************************************ 1258 * 1259 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing 1260 * method used by the HAMMER filesystem. 1261 * 1262 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY 1263 * associated with its buffer, and will only referenced the buffer while 1264 * the node itself is referenced. 1265 * 1266 * A hammer_node can also be passively associated with other HAMMER 1267 * structures, such as inodes, while retaining 0 references. These 1268 * associations can be cleared backwards using a pointer-to-pointer in 1269 * the hammer_node. 1270 * 1271 * This allows the HAMMER implementation to cache hammer_nodes long-term 1272 * and short-cut a great deal of the infrastructure's complexity. In 1273 * most cases a cached node can be reacquired without having to dip into 1274 * the B-Tree. 1275 */ 1276 hammer_node_t 1277 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset, 1278 int isnew, int *errorp) 1279 { 1280 hammer_mount_t hmp = trans->hmp; 1281 hammer_node_t node; 1282 int doload; 1283 1284 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE); 1285 1286 /* 1287 * Locate the structure, allocating one if necessary. 1288 */ 1289 again: 1290 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset); 1291 if (node == NULL) { 1292 ++hammer_count_nodes; 1293 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE); 1294 node->node_offset = node_offset; 1295 node->hmp = hmp; 1296 TAILQ_INIT(&node->cursor_list); 1297 TAILQ_INIT(&node->cache_list); 1298 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) { 1299 --hammer_count_nodes; 1300 kfree(node, hmp->m_misc); 1301 goto again; 1302 } 1303 doload = hammer_ref_interlock_true(&node->lock); 1304 } else { 1305 doload = hammer_ref_interlock(&node->lock); 1306 } 1307 if (doload) { 1308 *errorp = hammer_load_node(trans, node, isnew); 1309 trans->flags |= HAMMER_TRANSF_DIDIO; 1310 if (*errorp) 1311 node = NULL; 1312 } else { 1313 KKASSERT(node->ondisk); 1314 *errorp = 0; 1315 hammer_io_advance(&node->buffer->io); 1316 } 1317 return(node); 1318 } 1319 1320 /* 1321 * Reference an already-referenced node. 0->1 transitions should assert 1322 * so we do not have to deal with hammer_ref() setting CHECK. 1323 */ 1324 void 1325 hammer_ref_node(hammer_node_t node) 1326 { 1327 KKASSERT(hammer_isactive(&node->lock) && node->ondisk != NULL); 1328 hammer_ref(&node->lock); 1329 } 1330 1331 /* 1332 * Load a node's on-disk data reference. Called with the node referenced 1333 * and interlocked. 1334 * 1335 * On return the node interlock will be unlocked. If a non-zero error code 1336 * is returned the node will also be dereferenced (and the caller's pointer 1337 * will be stale). 1338 */ 1339 static int 1340 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew) 1341 { 1342 hammer_buffer_t buffer; 1343 hammer_off_t buf_offset; 1344 int error; 1345 1346 error = 0; 1347 if (node->ondisk == NULL) { 1348 /* 1349 * This is a little confusing but the jist is that 1350 * node->buffer determines whether the node is on 1351 * the buffer's clist and node->ondisk determines 1352 * whether the buffer is referenced. 1353 * 1354 * We could be racing a buffer release, in which case 1355 * node->buffer may become NULL while we are blocked 1356 * referencing the buffer. 1357 */ 1358 if ((buffer = node->buffer) != NULL) { 1359 error = hammer_ref_buffer(buffer); 1360 if (error == 0 && node->buffer == NULL) { 1361 TAILQ_INSERT_TAIL(&buffer->clist, node, entry); 1362 node->buffer = buffer; 1363 } 1364 } else { 1365 buf_offset = node->node_offset & ~HAMMER_BUFMASK64; 1366 buffer = hammer_get_buffer(node->hmp, buf_offset, 1367 HAMMER_BUFSIZE, 0, &error); 1368 if (buffer) { 1369 KKASSERT(error == 0); 1370 TAILQ_INSERT_TAIL(&buffer->clist, node, entry); 1371 node->buffer = buffer; 1372 } 1373 } 1374 if (error) 1375 goto failed; 1376 node->ondisk = (void *)((char *)buffer->ondisk + 1377 (node->node_offset & HAMMER_BUFMASK)); 1378 1379 /* 1380 * Check CRC. NOTE: Neither flag is set and the CRC is not 1381 * generated on new B-Tree nodes. 1382 */ 1383 if (isnew == 0 && 1384 (node->flags & HAMMER_NODE_CRCANY) == 0) { 1385 if (hammer_crc_test_btree(node->ondisk) == 0) { 1386 hdkprintf("CRC B-TREE NODE @ %016llx/%lu FAILED\n", 1387 (long long)node->node_offset, 1388 sizeof(*node->ondisk)); 1389 if (hammer_debug_critical) 1390 Debugger("CRC FAILED: B-TREE NODE"); 1391 node->flags |= HAMMER_NODE_CRCBAD; 1392 } else { 1393 node->flags |= HAMMER_NODE_CRCGOOD; 1394 } 1395 } 1396 } 1397 if (node->flags & HAMMER_NODE_CRCBAD) { 1398 if (trans->flags & HAMMER_TRANSF_CRCDOM) 1399 error = EDOM; 1400 else 1401 error = EIO; 1402 } 1403 failed: 1404 if (error) { 1405 _hammer_rel_node(node, 1); 1406 } else { 1407 hammer_ref_interlock_done(&node->lock); 1408 } 1409 return (error); 1410 } 1411 1412 /* 1413 * Safely reference a node, interlock against flushes via the IO subsystem. 1414 */ 1415 hammer_node_t 1416 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache, 1417 int *errorp) 1418 { 1419 hammer_node_t node; 1420 int doload; 1421 1422 node = cache->node; 1423 if (node != NULL) { 1424 doload = hammer_ref_interlock(&node->lock); 1425 if (doload) { 1426 *errorp = hammer_load_node(trans, node, 0); 1427 if (*errorp) 1428 node = NULL; 1429 } else { 1430 KKASSERT(node->ondisk); 1431 if (node->flags & HAMMER_NODE_CRCBAD) { 1432 if (trans->flags & HAMMER_TRANSF_CRCDOM) 1433 *errorp = EDOM; 1434 else 1435 *errorp = EIO; 1436 _hammer_rel_node(node, 0); 1437 node = NULL; 1438 } else { 1439 *errorp = 0; 1440 } 1441 } 1442 } else { 1443 *errorp = ENOENT; 1444 } 1445 return(node); 1446 } 1447 1448 /* 1449 * Release a hammer_node. On the last release the node dereferences 1450 * its underlying buffer and may or may not be destroyed. 1451 * 1452 * If locked is non-zero the passed node has been interlocked by the 1453 * caller and we are in the failure/unload path, otherwise it has not and 1454 * we are doing a normal release. 1455 * 1456 * This function will dispose of the interlock and the reference. 1457 * On return the node pointer is stale. 1458 */ 1459 void 1460 _hammer_rel_node(hammer_node_t node, int locked) 1461 { 1462 hammer_buffer_t buffer; 1463 1464 /* 1465 * Deref the node. If this isn't the 1->0 transition we're basically 1466 * done. If locked is non-zero this function will just deref the 1467 * locked node and return 1, otherwise it will deref the locked 1468 * node and either lock and return 1 on the 1->0 transition or 1469 * not lock and return 0. 1470 */ 1471 if (hammer_rel_interlock(&node->lock, locked) == 0) 1472 return; 1473 1474 /* 1475 * Either locked was non-zero and we are interlocked, or the 1476 * hammer_rel_interlock() call returned non-zero and we are 1477 * interlocked. 1478 * 1479 * The ref-count must still be decremented if locked != 0 so 1480 * the cleanup required still varies a bit. 1481 * 1482 * hammer_flush_node() when called with 1 or 2 will dispose of 1483 * the lock and possible ref-count. 1484 */ 1485 if (node->ondisk == NULL) { 1486 hammer_flush_node(node, locked + 1); 1487 /* node is stale now */ 1488 return; 1489 } 1490 1491 /* 1492 * Do not disassociate the node from the buffer if it represents 1493 * a modified B-Tree node that still needs its crc to be generated. 1494 */ 1495 if (node->flags & HAMMER_NODE_NEEDSCRC) { 1496 hammer_rel_interlock_done(&node->lock, locked); 1497 return; 1498 } 1499 1500 /* 1501 * Do final cleanups and then either destroy the node and leave it 1502 * passively cached. The buffer reference is removed regardless. 1503 */ 1504 buffer = node->buffer; 1505 node->ondisk = NULL; 1506 1507 if ((node->flags & HAMMER_NODE_FLUSH) == 0) { 1508 /* 1509 * Normal release. 1510 */ 1511 hammer_rel_interlock_done(&node->lock, locked); 1512 } else { 1513 /* 1514 * Destroy the node. 1515 */ 1516 hammer_flush_node(node, locked + 1); 1517 /* node is stale */ 1518 1519 } 1520 hammer_rel_buffer(buffer, 0); 1521 } 1522 1523 void 1524 hammer_rel_node(hammer_node_t node) 1525 { 1526 _hammer_rel_node(node, 0); 1527 } 1528 1529 /* 1530 * Free space on-media associated with a B-Tree node. 1531 */ 1532 void 1533 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node) 1534 { 1535 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0); 1536 node->flags |= HAMMER_NODE_DELETED; 1537 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk)); 1538 } 1539 1540 /* 1541 * Passively cache a referenced hammer_node. The caller may release 1542 * the node on return. 1543 */ 1544 void 1545 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node) 1546 { 1547 /* 1548 * If the node doesn't exist, or is being deleted, don't cache it! 1549 * 1550 * The node can only ever be NULL in the I/O failure path. 1551 */ 1552 if (node == NULL || (node->flags & HAMMER_NODE_DELETED)) 1553 return; 1554 if (cache->node == node) 1555 return; 1556 while (cache->node) 1557 hammer_uncache_node(cache); 1558 if (node->flags & HAMMER_NODE_DELETED) 1559 return; 1560 cache->node = node; 1561 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry); 1562 } 1563 1564 void 1565 hammer_uncache_node(hammer_node_cache_t cache) 1566 { 1567 hammer_node_t node; 1568 1569 if ((node = cache->node) != NULL) { 1570 TAILQ_REMOVE(&node->cache_list, cache, entry); 1571 cache->node = NULL; 1572 if (TAILQ_EMPTY(&node->cache_list)) 1573 hammer_flush_node(node, 0); 1574 } 1575 } 1576 1577 /* 1578 * Remove a node's cache references and destroy the node if it has no 1579 * other references or backing store. 1580 * 1581 * locked == 0 Normal unlocked operation 1582 * locked == 1 Call hammer_rel_interlock_done(..., 0); 1583 * locked == 2 Call hammer_rel_interlock_done(..., 1); 1584 * 1585 * XXX for now this isn't even close to being MPSAFE so the refs check 1586 * is sufficient. 1587 */ 1588 void 1589 hammer_flush_node(hammer_node_t node, int locked) 1590 { 1591 hammer_node_cache_t cache; 1592 hammer_buffer_t buffer; 1593 hammer_mount_t hmp = node->hmp; 1594 int dofree; 1595 1596 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) { 1597 TAILQ_REMOVE(&node->cache_list, cache, entry); 1598 cache->node = NULL; 1599 } 1600 1601 /* 1602 * NOTE: refs is predisposed if another thread is blocking and 1603 * will be larger than 0 in that case. We aren't MPSAFE 1604 * here. 1605 */ 1606 if (node->ondisk == NULL && hammer_norefs(&node->lock)) { 1607 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1608 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node); 1609 if ((buffer = node->buffer) != NULL) { 1610 node->buffer = NULL; 1611 TAILQ_REMOVE(&buffer->clist, node, entry); 1612 /* buffer is unreferenced because ondisk is NULL */ 1613 } 1614 dofree = 1; 1615 } else { 1616 dofree = 0; 1617 } 1618 1619 /* 1620 * Deal with the interlock if locked == 1 or locked == 2. 1621 */ 1622 if (locked) 1623 hammer_rel_interlock_done(&node->lock, locked - 1); 1624 1625 /* 1626 * Destroy if requested 1627 */ 1628 if (dofree) { 1629 --hammer_count_nodes; 1630 kfree(node, hmp->m_misc); 1631 } 1632 } 1633 1634 /* 1635 * Flush passively cached B-Tree nodes associated with this buffer. 1636 * This is only called when the buffer is about to be destroyed, so 1637 * none of the nodes should have any references. The buffer is locked. 1638 * 1639 * We may be interlocked with the buffer. 1640 */ 1641 void 1642 hammer_flush_buffer_nodes(hammer_buffer_t buffer) 1643 { 1644 hammer_node_t node; 1645 1646 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) { 1647 KKASSERT(node->ondisk == NULL); 1648 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1649 1650 if (hammer_try_interlock_norefs(&node->lock)) { 1651 hammer_ref(&node->lock); 1652 node->flags |= HAMMER_NODE_FLUSH; 1653 _hammer_rel_node(node, 1); 1654 } else { 1655 KKASSERT(node->buffer != NULL); 1656 buffer = node->buffer; 1657 node->buffer = NULL; 1658 TAILQ_REMOVE(&buffer->clist, node, entry); 1659 /* buffer is unreferenced because ondisk is NULL */ 1660 } 1661 } 1662 } 1663 1664 1665 /************************************************************************ 1666 * ALLOCATORS * 1667 ************************************************************************/ 1668 1669 /* 1670 * Allocate a B-Tree node. 1671 */ 1672 hammer_node_t 1673 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp) 1674 { 1675 hammer_buffer_t buffer = NULL; 1676 hammer_node_t node = NULL; 1677 hammer_off_t node_offset; 1678 1679 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX, 1680 sizeof(struct hammer_node_ondisk), 1681 hint, errorp); 1682 if (*errorp == 0) { 1683 node = hammer_get_node(trans, node_offset, 1, errorp); 1684 hammer_modify_node_noundo(trans, node); 1685 bzero(node->ondisk, sizeof(*node->ondisk)); 1686 hammer_modify_node_done(node); 1687 } 1688 if (buffer) 1689 hammer_rel_buffer(buffer, 0); 1690 return(node); 1691 } 1692 1693 /* 1694 * Allocate data. If the address of a data buffer is supplied then 1695 * any prior non-NULL *data_bufferp will be released and *data_bufferp 1696 * will be set to the related buffer. The caller must release it when 1697 * finally done. The initial *data_bufferp should be set to NULL by 1698 * the caller. 1699 * 1700 * The caller is responsible for making hammer_modify*() calls on the 1701 * *data_bufferp. 1702 */ 1703 void * 1704 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 1705 u_int16_t rec_type, hammer_off_t *data_offsetp, 1706 struct hammer_buffer **data_bufferp, 1707 hammer_off_t hint, int *errorp) 1708 { 1709 void *data; 1710 int zone; 1711 1712 /* 1713 * Allocate data 1714 */ 1715 if (data_len) { 1716 switch(rec_type) { 1717 case HAMMER_RECTYPE_INODE: 1718 case HAMMER_RECTYPE_DIRENTRY: 1719 case HAMMER_RECTYPE_EXT: 1720 case HAMMER_RECTYPE_FIX: 1721 case HAMMER_RECTYPE_PFS: 1722 case HAMMER_RECTYPE_SNAPSHOT: 1723 case HAMMER_RECTYPE_CONFIG: 1724 zone = HAMMER_ZONE_META_INDEX; 1725 break; 1726 case HAMMER_RECTYPE_DATA: 1727 case HAMMER_RECTYPE_DB: 1728 /* 1729 * Only mirror-write comes here. 1730 */ 1731 zone = hammer_data_zone_index(data_len); 1732 if (zone == HAMMER_ZONE_LARGE_DATA_INDEX) { 1733 /* round up */ 1734 data_len = (data_len + HAMMER_BUFMASK) & 1735 ~HAMMER_BUFMASK; 1736 } 1737 break; 1738 default: 1739 hpanic("rec_type %04x unknown", rec_type); 1740 zone = 0; /* NOT REACHED */ 1741 break; 1742 } 1743 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len, 1744 hint, errorp); 1745 } else { 1746 *data_offsetp = 0; 1747 } 1748 if (*errorp == 0 && data_bufferp) { 1749 if (data_len) { 1750 data = hammer_bread_ext(trans->hmp, *data_offsetp, 1751 data_len, errorp, data_bufferp); 1752 } else { 1753 data = NULL; 1754 } 1755 } else { 1756 data = NULL; 1757 } 1758 return(data); 1759 } 1760 1761 /* 1762 * Sync dirty buffers to the media and clean-up any loose ends. 1763 * 1764 * These functions do not start the flusher going, they simply 1765 * queue everything up to the flusher. 1766 */ 1767 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data); 1768 1769 int 1770 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor) 1771 { 1772 struct hammer_sync_info info; 1773 1774 info.error = 0; 1775 info.waitfor = waitfor; 1776 if (waitfor == MNT_WAIT) { 1777 vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS, 1778 hammer_sync_scan2, &info); 1779 } else { 1780 vsyncscan(hmp->mp, VMSC_GETVP | VMSC_ONEPASS | VMSC_NOWAIT, 1781 hammer_sync_scan2, &info); 1782 } 1783 return(info.error); 1784 } 1785 1786 /* 1787 * Filesystem sync. If doing a synchronous sync make a second pass on 1788 * the vnodes in case any were already flushing during the first pass, 1789 * and activate the flusher twice (the second time brings the UNDO FIFO's 1790 * start position up to the end position after the first call). 1791 * 1792 * If doing a lazy sync make just one pass on the vnode list, ignoring 1793 * any new vnodes added to the list while the sync is in progress. 1794 */ 1795 int 1796 hammer_sync_hmp(hammer_mount_t hmp, int waitfor) 1797 { 1798 struct hammer_sync_info info; 1799 int flags; 1800 1801 flags = VMSC_GETVP; 1802 if (waitfor & MNT_LAZY) 1803 flags |= VMSC_ONEPASS; 1804 1805 info.error = 0; 1806 info.waitfor = MNT_NOWAIT; 1807 vsyncscan(hmp->mp, flags | VMSC_NOWAIT, hammer_sync_scan2, &info); 1808 1809 if (info.error == 0 && (waitfor & MNT_WAIT)) { 1810 info.waitfor = waitfor; 1811 vsyncscan(hmp->mp, flags, hammer_sync_scan2, &info); 1812 } 1813 if (waitfor == MNT_WAIT) { 1814 hammer_flusher_sync(hmp); 1815 hammer_flusher_sync(hmp); 1816 } else { 1817 hammer_flusher_async(hmp, NULL); 1818 hammer_flusher_async(hmp, NULL); 1819 } 1820 return(info.error); 1821 } 1822 1823 static int 1824 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data) 1825 { 1826 struct hammer_sync_info *info = data; 1827 struct hammer_inode *ip; 1828 int error; 1829 1830 ip = VTOI(vp); 1831 if (ip == NULL) 1832 return(0); 1833 if (vp->v_type == VNON || vp->v_type == VBAD) { 1834 vclrisdirty(vp); 1835 return(0); 1836 } 1837 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 1838 RB_EMPTY(&vp->v_rbdirty_tree)) { 1839 vclrisdirty(vp); 1840 return(0); 1841 } 1842 error = VOP_FSYNC(vp, MNT_NOWAIT, 0); 1843 if (error) 1844 info->error = error; 1845 return(0); 1846 } 1847