1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $ 35 */ 36 /* 37 * Manage HAMMER's on-disk structures. These routines are primarily 38 * responsible for interfacing with the kernel's I/O subsystem and for 39 * managing in-memory structures. 40 */ 41 42 #include "hammer.h" 43 #include <sys/fcntl.h> 44 #include <sys/nlookup.h> 45 #include <sys/buf.h> 46 47 #include <sys/buf2.h> 48 49 static void hammer_free_volume(hammer_volume_t volume); 50 static int hammer_load_volume(hammer_volume_t volume); 51 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew); 52 static int hammer_load_node(hammer_transaction_t trans, 53 hammer_node_t node, int isnew); 54 static void _hammer_rel_node(hammer_node_t node, int locked); 55 56 static int 57 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2) 58 { 59 if (vol1->vol_no < vol2->vol_no) 60 return(-1); 61 if (vol1->vol_no > vol2->vol_no) 62 return(1); 63 return(0); 64 } 65 66 /* 67 * hammer_buffer structures are indexed via their zoneX_offset, not 68 * their zone2_offset. 69 */ 70 static int 71 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2) 72 { 73 if (buf1->zoneX_offset < buf2->zoneX_offset) 74 return(-1); 75 if (buf1->zoneX_offset > buf2->zoneX_offset) 76 return(1); 77 return(0); 78 } 79 80 static int 81 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2) 82 { 83 if (node1->node_offset < node2->node_offset) 84 return(-1); 85 if (node1->node_offset > node2->node_offset) 86 return(1); 87 return(0); 88 } 89 90 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node, 91 hammer_vol_rb_compare, int32_t, vol_no); 92 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node, 93 hammer_buf_rb_compare, hammer_off_t, zoneX_offset); 94 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node, 95 hammer_nod_rb_compare, hammer_off_t, node_offset); 96 97 /************************************************************************ 98 * VOLUMES * 99 ************************************************************************ 100 * 101 * Load a HAMMER volume by name. Returns 0 on success or a positive error 102 * code on failure. Volumes must be loaded at mount time, get_volume() will 103 * not load a new volume. 104 * 105 * The passed devvp is vref()'d but not locked. This function consumes the 106 * ref (typically by associating it with the volume structure). 107 * 108 * Calls made to hammer_load_volume() or single-threaded 109 */ 110 int 111 hammer_install_volume(struct hammer_mount *hmp, const char *volname, 112 struct vnode *devvp) 113 { 114 struct mount *mp; 115 hammer_volume_t volume; 116 struct hammer_volume_ondisk *ondisk; 117 struct nlookupdata nd; 118 struct buf *bp = NULL; 119 int error; 120 int ronly; 121 int setmp = 0; 122 123 mp = hmp->mp; 124 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 125 126 /* 127 * Allocate a volume structure 128 */ 129 ++hammer_count_volumes; 130 volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO); 131 volume->vol_name = kstrdup(volname, hmp->m_misc); 132 volume->io.hmp = hmp; /* bootstrap */ 133 hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME); 134 volume->io.offset = 0LL; 135 volume->io.bytes = HAMMER_BUFSIZE; 136 137 /* 138 * Get the device vnode 139 */ 140 if (devvp == NULL) { 141 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW); 142 if (error == 0) 143 error = nlookup(&nd); 144 if (error == 0) 145 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp); 146 nlookup_done(&nd); 147 } else { 148 error = 0; 149 volume->devvp = devvp; 150 } 151 152 if (error == 0) { 153 if (vn_isdisk(volume->devvp, &error)) { 154 error = vfs_mountedon(volume->devvp); 155 } 156 } 157 if (error == 0 && vcount(volume->devvp) > 0) 158 error = EBUSY; 159 if (error == 0) { 160 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 161 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0); 162 if (error == 0) { 163 error = VOP_OPEN(volume->devvp, 164 (ronly ? FREAD : FREAD|FWRITE), 165 FSCRED, NULL); 166 } 167 vn_unlock(volume->devvp); 168 } 169 if (error) { 170 hammer_free_volume(volume); 171 return(error); 172 } 173 volume->devvp->v_rdev->si_mountpoint = mp; 174 setmp = 1; 175 176 /* 177 * Extract the volume number from the volume header and do various 178 * sanity checks. 179 */ 180 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp); 181 if (error) 182 goto late_failure; 183 ondisk = (void *)bp->b_data; 184 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) { 185 kprintf("hammer_mount: volume %s has an invalid header\n", 186 volume->vol_name); 187 error = EFTYPE; 188 goto late_failure; 189 } 190 volume->vol_no = ondisk->vol_no; 191 volume->buffer_base = ondisk->vol_buf_beg; 192 volume->vol_flags = ondisk->vol_flags; 193 volume->nblocks = ondisk->vol_nblocks; 194 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 195 ondisk->vol_buf_end - ondisk->vol_buf_beg); 196 volume->maxraw_off = ondisk->vol_buf_end; 197 198 if (RB_EMPTY(&hmp->rb_vols_root)) { 199 hmp->fsid = ondisk->vol_fsid; 200 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) { 201 kprintf("hammer_mount: volume %s's fsid does not match " 202 "other volumes\n", volume->vol_name); 203 error = EFTYPE; 204 goto late_failure; 205 } 206 207 /* 208 * Insert the volume structure into the red-black tree. 209 */ 210 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) { 211 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n", 212 volume->vol_name, volume->vol_no); 213 error = EEXIST; 214 } 215 216 /* 217 * Set the root volume . HAMMER special cases rootvol the structure. 218 * We do not hold a ref because this would prevent related I/O 219 * from being flushed. 220 */ 221 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) { 222 hmp->rootvol = volume; 223 hmp->nvolumes = ondisk->vol_count; 224 if (bp) { 225 brelse(bp); 226 bp = NULL; 227 } 228 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks * 229 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); 230 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks * 231 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); 232 } 233 late_failure: 234 if (bp) 235 brelse(bp); 236 if (error) { 237 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/ 238 if (setmp) 239 volume->devvp->v_rdev->si_mountpoint = NULL; 240 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE); 241 hammer_free_volume(volume); 242 } 243 return (error); 244 } 245 246 /* 247 * This is called for each volume when updating the mount point from 248 * read-write to read-only or vise-versa. 249 */ 250 int 251 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused) 252 { 253 if (volume->devvp) { 254 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 255 if (volume->io.hmp->ronly) { 256 /* do not call vinvalbuf */ 257 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL); 258 VOP_CLOSE(volume->devvp, FREAD|FWRITE); 259 } else { 260 /* do not call vinvalbuf */ 261 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL); 262 VOP_CLOSE(volume->devvp, FREAD); 263 } 264 vn_unlock(volume->devvp); 265 } 266 return(0); 267 } 268 269 /* 270 * Unload and free a HAMMER volume. Must return >= 0 to continue scan 271 * so returns -1 on failure. 272 */ 273 int 274 hammer_unload_volume(hammer_volume_t volume, void *data __unused) 275 { 276 hammer_mount_t hmp = volume->io.hmp; 277 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 278 279 /* 280 * Clean up the root volume pointer, which is held unlocked in hmp. 281 */ 282 if (hmp->rootvol == volume) 283 hmp->rootvol = NULL; 284 285 /* 286 * We must not flush a dirty buffer to disk on umount. It should 287 * have already been dealt with by the flusher, or we may be in 288 * catastrophic failure. 289 */ 290 hammer_io_clear_modify(&volume->io, 1); 291 volume->io.waitdep = 1; 292 293 /* 294 * Clean up the persistent ref ioerror might have on the volume 295 */ 296 if (volume->io.ioerror) 297 hammer_io_clear_error_noassert(&volume->io); 298 299 /* 300 * This should release the bp. Releasing the volume with flush set 301 * implies the interlock is set. 302 */ 303 hammer_ref_interlock_true(&volume->io.lock); 304 hammer_rel_volume(volume, 1); 305 KKASSERT(volume->io.bp == NULL); 306 307 /* 308 * There should be no references on the volume, no clusters, and 309 * no super-clusters. 310 */ 311 KKASSERT(hammer_norefs(&volume->io.lock)); 312 313 volume->ondisk = NULL; 314 if (volume->devvp) { 315 if (volume->devvp->v_rdev && 316 volume->devvp->v_rdev->si_mountpoint == hmp->mp 317 ) { 318 volume->devvp->v_rdev->si_mountpoint = NULL; 319 } 320 if (ronly) { 321 /* 322 * Make sure we don't sync anything to disk if we 323 * are in read-only mode (1) or critically-errored 324 * (2). Note that there may be dirty buffers in 325 * normal read-only mode from crash recovery. 326 */ 327 vinvalbuf(volume->devvp, 0, 0, 0); 328 VOP_CLOSE(volume->devvp, FREAD); 329 } else { 330 /* 331 * Normal termination, save any dirty buffers 332 * (XXX there really shouldn't be any). 333 */ 334 vinvalbuf(volume->devvp, V_SAVE, 0, 0); 335 VOP_CLOSE(volume->devvp, FREAD|FWRITE); 336 } 337 } 338 339 /* 340 * Destroy the structure 341 */ 342 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume); 343 hammer_free_volume(volume); 344 return(0); 345 } 346 347 static 348 void 349 hammer_free_volume(hammer_volume_t volume) 350 { 351 hammer_mount_t hmp = volume->io.hmp; 352 353 if (volume->vol_name) { 354 kfree(volume->vol_name, hmp->m_misc); 355 volume->vol_name = NULL; 356 } 357 if (volume->devvp) { 358 vrele(volume->devvp); 359 volume->devvp = NULL; 360 } 361 --hammer_count_volumes; 362 kfree(volume, hmp->m_misc); 363 } 364 365 /* 366 * Get a HAMMER volume. The volume must already exist. 367 */ 368 hammer_volume_t 369 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp) 370 { 371 struct hammer_volume *volume; 372 373 /* 374 * Locate the volume structure 375 */ 376 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no); 377 if (volume == NULL) { 378 *errorp = ENOENT; 379 return(NULL); 380 } 381 382 /* 383 * Reference the volume, load/check the data on the 0->1 transition. 384 * hammer_load_volume() will dispose of the interlock on return, 385 * and also clean up the ref count on error. 386 */ 387 if (hammer_ref_interlock(&volume->io.lock)) { 388 *errorp = hammer_load_volume(volume); 389 if (*errorp) 390 volume = NULL; 391 } else { 392 KKASSERT(volume->ondisk); 393 *errorp = 0; 394 } 395 return(volume); 396 } 397 398 int 399 hammer_ref_volume(hammer_volume_t volume) 400 { 401 int error; 402 403 /* 404 * Reference the volume and deal with the check condition used to 405 * load its ondisk info. 406 */ 407 if (hammer_ref_interlock(&volume->io.lock)) { 408 error = hammer_load_volume(volume); 409 } else { 410 KKASSERT(volume->ondisk); 411 error = 0; 412 } 413 return (error); 414 } 415 416 hammer_volume_t 417 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp) 418 { 419 hammer_volume_t volume; 420 421 volume = hmp->rootvol; 422 KKASSERT(volume != NULL); 423 424 /* 425 * Reference the volume and deal with the check condition used to 426 * load its ondisk info. 427 */ 428 if (hammer_ref_interlock(&volume->io.lock)) { 429 *errorp = hammer_load_volume(volume); 430 if (*errorp) 431 volume = NULL; 432 } else { 433 KKASSERT(volume->ondisk); 434 *errorp = 0; 435 } 436 return (volume); 437 } 438 439 /* 440 * Load a volume's on-disk information. The volume must be referenced and 441 * the interlock is held on call. The interlock will be released on return. 442 * The reference will also be released on return if an error occurs. 443 */ 444 static int 445 hammer_load_volume(hammer_volume_t volume) 446 { 447 int error; 448 449 if (volume->ondisk == NULL) { 450 error = hammer_io_read(volume->devvp, &volume->io, 451 HAMMER_BUFSIZE); 452 if (error == 0) { 453 volume->ondisk = (void *)volume->io.bp->b_data; 454 hammer_ref_interlock_done(&volume->io.lock); 455 } else { 456 hammer_rel_volume(volume, 1); 457 } 458 } else { 459 error = 0; 460 } 461 return(error); 462 } 463 464 /* 465 * Release a previously acquired reference on the volume. 466 * 467 * Volumes are not unloaded from memory during normal operation. 468 */ 469 void 470 hammer_rel_volume(hammer_volume_t volume, int locked) 471 { 472 struct buf *bp; 473 474 if (hammer_rel_interlock(&volume->io.lock, locked)) { 475 volume->ondisk = NULL; 476 bp = hammer_io_release(&volume->io, locked); 477 hammer_rel_interlock_done(&volume->io.lock, locked); 478 if (bp) 479 brelse(bp); 480 } 481 } 482 483 int 484 hammer_mountcheck_volumes(struct hammer_mount *hmp) 485 { 486 hammer_volume_t vol; 487 int i; 488 489 for (i = 0; i < hmp->nvolumes; ++i) { 490 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i); 491 if (vol == NULL) 492 return(EINVAL); 493 } 494 return(0); 495 } 496 497 /************************************************************************ 498 * BUFFERS * 499 ************************************************************************ 500 * 501 * Manage buffers. Currently most blockmap-backed zones are direct-mapped 502 * to zone-2 buffer offsets, without a translation stage. However, the 503 * hammer_buffer structure is indexed by its zoneX_offset, not its 504 * zone2_offset. 505 * 506 * The proper zone must be maintained throughout the code-base all the way 507 * through to the big-block allocator, or routines like hammer_del_buffers() 508 * will not be able to locate all potentially conflicting buffers. 509 */ 510 511 /* 512 * Helper function returns whether a zone offset can be directly translated 513 * to a raw buffer index or not. Really only the volume and undo zones 514 * can't be directly translated. Volumes are special-cased and undo zones 515 * shouldn't be aliased accessed in read-only mode. 516 * 517 * This function is ONLY used to detect aliased zones during a read-only 518 * mount. 519 */ 520 static __inline int 521 hammer_direct_zone(hammer_off_t buf_offset) 522 { 523 switch(HAMMER_ZONE_DECODE(buf_offset)) { 524 case HAMMER_ZONE_RAW_BUFFER_INDEX: 525 case HAMMER_ZONE_FREEMAP_INDEX: 526 case HAMMER_ZONE_BTREE_INDEX: 527 case HAMMER_ZONE_META_INDEX: 528 case HAMMER_ZONE_LARGE_DATA_INDEX: 529 case HAMMER_ZONE_SMALL_DATA_INDEX: 530 return(1); 531 default: 532 return(0); 533 } 534 /* NOT REACHED */ 535 } 536 537 hammer_buffer_t 538 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, 539 int bytes, int isnew, int *errorp) 540 { 541 hammer_buffer_t buffer; 542 hammer_volume_t volume; 543 hammer_off_t zone2_offset; 544 hammer_io_type_t iotype; 545 int vol_no; 546 int zone; 547 548 buf_offset &= ~HAMMER_BUFMASK64; 549 again: 550 /* 551 * Shortcut if the buffer is already cached 552 */ 553 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset); 554 if (buffer) { 555 /* 556 * Once refed the ondisk field will not be cleared by 557 * any other action. Shortcut the operation if the 558 * ondisk structure is valid. 559 */ 560 found_aliased: 561 if (hammer_ref_interlock(&buffer->io.lock) == 0) { 562 hammer_io_advance(&buffer->io); 563 KKASSERT(buffer->ondisk); 564 *errorp = 0; 565 return(buffer); 566 } 567 568 /* 569 * 0->1 transition or defered 0->1 transition (CHECK), 570 * interlock now held. Shortcut if ondisk is already 571 * assigned. 572 */ 573 ++hammer_count_refedbufs; 574 if (buffer->ondisk) { 575 hammer_io_advance(&buffer->io); 576 hammer_ref_interlock_done(&buffer->io.lock); 577 *errorp = 0; 578 return(buffer); 579 } 580 581 /* 582 * The buffer is no longer loose if it has a ref, and 583 * cannot become loose once it gains a ref. Loose 584 * buffers will never be in a modified state. This should 585 * only occur on the 0->1 transition of refs. 586 * 587 * lose_list can be modified via a biodone() interrupt 588 * so the io_token must be held. 589 */ 590 if (buffer->io.mod_root == &hmp->lose_root) { 591 lwkt_gettoken(&hmp->io_token); 592 if (buffer->io.mod_root == &hmp->lose_root) { 593 RB_REMOVE(hammer_mod_rb_tree, 594 buffer->io.mod_root, &buffer->io); 595 buffer->io.mod_root = NULL; 596 KKASSERT(buffer->io.modified == 0); 597 } 598 lwkt_reltoken(&hmp->io_token); 599 } 600 goto found; 601 } else if (hmp->ronly && hammer_direct_zone(buf_offset)) { 602 /* 603 * If this is a read-only mount there could be an alias 604 * in the raw-zone. If there is we use that buffer instead. 605 * 606 * rw mounts will not have aliases. Also note when going 607 * from ro -> rw the recovered raw buffers are flushed and 608 * reclaimed, so again there will not be any aliases once 609 * the mount is rw. 610 */ 611 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 612 (buf_offset & ~HAMMER_OFF_ZONE_MASK) | 613 HAMMER_ZONE_RAW_BUFFER); 614 if (buffer) { 615 kprintf("HAMMER: recovered aliased %016jx\n", 616 (intmax_t)buf_offset); 617 goto found_aliased; 618 } 619 } 620 621 /* 622 * What is the buffer class? 623 */ 624 zone = HAMMER_ZONE_DECODE(buf_offset); 625 626 switch(zone) { 627 case HAMMER_ZONE_LARGE_DATA_INDEX: 628 case HAMMER_ZONE_SMALL_DATA_INDEX: 629 iotype = HAMMER_STRUCTURE_DATA_BUFFER; 630 break; 631 case HAMMER_ZONE_UNDO_INDEX: 632 iotype = HAMMER_STRUCTURE_UNDO_BUFFER; 633 break; 634 case HAMMER_ZONE_META_INDEX: 635 default: 636 /* 637 * NOTE: inode data and directory entries are placed in this 638 * zone. inode atime/mtime is updated in-place and thus 639 * buffers containing inodes must be synchronized as 640 * meta-buffers, same as buffers containing B-Tree info. 641 */ 642 iotype = HAMMER_STRUCTURE_META_BUFFER; 643 break; 644 } 645 646 /* 647 * Handle blockmap offset translations 648 */ 649 if (zone >= HAMMER_ZONE_BTREE_INDEX) { 650 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp); 651 } else if (zone == HAMMER_ZONE_UNDO_INDEX) { 652 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp); 653 } else { 654 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX); 655 zone2_offset = buf_offset; 656 *errorp = 0; 657 } 658 if (*errorp) 659 return(NULL); 660 661 /* 662 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset 663 * specifications. 664 */ 665 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) == 666 HAMMER_ZONE_RAW_BUFFER); 667 vol_no = HAMMER_VOL_DECODE(zone2_offset); 668 volume = hammer_get_volume(hmp, vol_no, errorp); 669 if (volume == NULL) 670 return(NULL); 671 672 KKASSERT(zone2_offset < volume->maxbuf_off); 673 674 /* 675 * Allocate a new buffer structure. We will check for races later. 676 */ 677 ++hammer_count_buffers; 678 buffer = kmalloc(sizeof(*buffer), hmp->m_misc, 679 M_WAITOK|M_ZERO|M_USE_RESERVE); 680 buffer->zone2_offset = zone2_offset; 681 buffer->zoneX_offset = buf_offset; 682 683 hammer_io_init(&buffer->io, volume, iotype); 684 buffer->io.offset = volume->ondisk->vol_buf_beg + 685 (zone2_offset & HAMMER_OFF_SHORT_MASK); 686 buffer->io.bytes = bytes; 687 TAILQ_INIT(&buffer->clist); 688 hammer_ref_interlock_true(&buffer->io.lock); 689 690 /* 691 * Insert the buffer into the RB tree and handle late collisions. 692 */ 693 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) { 694 hammer_rel_volume(volume, 0); 695 buffer->io.volume = NULL; /* safety */ 696 if (hammer_rel_interlock(&buffer->io.lock, 1)) /* safety */ 697 hammer_rel_interlock_done(&buffer->io.lock, 1); 698 --hammer_count_buffers; 699 kfree(buffer, hmp->m_misc); 700 goto again; 701 } 702 ++hammer_count_refedbufs; 703 found: 704 705 /* 706 * The buffer is referenced and interlocked. Load the buffer 707 * if necessary. hammer_load_buffer() deals with the interlock 708 * and, if an error is returned, also deals with the ref. 709 */ 710 if (buffer->ondisk == NULL) { 711 *errorp = hammer_load_buffer(buffer, isnew); 712 if (*errorp) 713 buffer = NULL; 714 } else { 715 hammer_io_advance(&buffer->io); 716 hammer_ref_interlock_done(&buffer->io.lock); 717 *errorp = 0; 718 } 719 return(buffer); 720 } 721 722 /* 723 * This is used by the direct-read code to deal with large-data buffers 724 * created by the reblocker and mirror-write code. The direct-read code 725 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write- 726 * running hammer buffers must be fully synced to disk before we can issue 727 * the direct-read. 728 * 729 * This code path is not considered critical as only the rebocker and 730 * mirror-write code will create large-data buffers via the HAMMER buffer 731 * subsystem. They do that because they operate at the B-Tree level and 732 * do not access the vnode/inode structures. 733 */ 734 void 735 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes) 736 { 737 hammer_buffer_t buffer; 738 int error; 739 740 KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) == 741 HAMMER_ZONE_LARGE_DATA); 742 743 while (bytes > 0) { 744 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 745 base_offset); 746 if (buffer && (buffer->io.modified || buffer->io.running)) { 747 error = hammer_ref_buffer(buffer); 748 if (error == 0) { 749 hammer_io_wait(&buffer->io); 750 if (buffer->io.modified) { 751 hammer_io_write_interlock(&buffer->io); 752 hammer_io_flush(&buffer->io, 0); 753 hammer_io_done_interlock(&buffer->io); 754 hammer_io_wait(&buffer->io); 755 } 756 hammer_rel_buffer(buffer, 0); 757 } 758 } 759 base_offset += HAMMER_BUFSIZE; 760 bytes -= HAMMER_BUFSIZE; 761 } 762 } 763 764 /* 765 * Destroy all buffers covering the specified zoneX offset range. This 766 * is called when the related blockmap layer2 entry is freed or when 767 * a direct write bypasses our buffer/buffer-cache subsystem. 768 * 769 * The buffers may be referenced by the caller itself. Setting reclaim 770 * will cause the buffer to be destroyed when it's ref count reaches zero. 771 * 772 * Return 0 on success, EAGAIN if some buffers could not be destroyed due 773 * to additional references held by other threads, or some other (typically 774 * fatal) error. 775 */ 776 int 777 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset, 778 hammer_off_t zone2_offset, int bytes, 779 int report_conflicts) 780 { 781 hammer_buffer_t buffer; 782 hammer_volume_t volume; 783 int vol_no; 784 int error; 785 int ret_error; 786 787 vol_no = HAMMER_VOL_DECODE(zone2_offset); 788 volume = hammer_get_volume(hmp, vol_no, &ret_error); 789 KKASSERT(ret_error == 0); 790 791 while (bytes > 0) { 792 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 793 base_offset); 794 if (buffer) { 795 error = hammer_ref_buffer(buffer); 796 if (hammer_debug_general & 0x20000) { 797 kprintf("hammer: delbufr %016jx " 798 "rerr=%d 1ref=%d\n", 799 (intmax_t)buffer->zoneX_offset, 800 error, 801 hammer_oneref(&buffer->io.lock)); 802 } 803 if (error == 0 && !hammer_oneref(&buffer->io.lock)) { 804 error = EAGAIN; 805 hammer_rel_buffer(buffer, 0); 806 } 807 if (error == 0) { 808 KKASSERT(buffer->zone2_offset == zone2_offset); 809 hammer_io_clear_modify(&buffer->io, 1); 810 buffer->io.reclaim = 1; 811 buffer->io.waitdep = 1; 812 KKASSERT(buffer->io.volume == volume); 813 hammer_rel_buffer(buffer, 0); 814 } 815 } else { 816 error = hammer_io_inval(volume, zone2_offset); 817 } 818 if (error) { 819 ret_error = error; 820 if (report_conflicts || 821 (hammer_debug_general & 0x8000)) { 822 kprintf("hammer_del_buffers: unable to " 823 "invalidate %016llx buffer=%p rep=%d\n", 824 (long long)base_offset, 825 buffer, report_conflicts); 826 } 827 } 828 base_offset += HAMMER_BUFSIZE; 829 zone2_offset += HAMMER_BUFSIZE; 830 bytes -= HAMMER_BUFSIZE; 831 } 832 hammer_rel_volume(volume, 0); 833 return (ret_error); 834 } 835 836 /* 837 * Given a referenced and interlocked buffer load/validate the data. 838 * 839 * The buffer interlock will be released on return. If an error is 840 * returned the buffer reference will also be released (and the buffer 841 * pointer will thus be stale). 842 */ 843 static int 844 hammer_load_buffer(hammer_buffer_t buffer, int isnew) 845 { 846 hammer_volume_t volume; 847 int error; 848 849 /* 850 * Load the buffer's on-disk info 851 */ 852 volume = buffer->io.volume; 853 854 if (hammer_debug_io & 0x0004) { 855 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n", 856 (long long)buffer->zoneX_offset, 857 (long long)buffer->zone2_offset, 858 isnew, buffer->ondisk); 859 } 860 861 if (buffer->ondisk == NULL) { 862 /* 863 * Issue the read or generate a new buffer. When reading 864 * the limit argument controls any read-ahead clustering 865 * hammer_io_read() is allowed to do. 866 * 867 * We cannot read-ahead in the large-data zone and we cannot 868 * cross a largeblock boundary as the next largeblock might 869 * use a different buffer size. 870 */ 871 if (isnew) { 872 error = hammer_io_new(volume->devvp, &buffer->io); 873 } else if ((buffer->zoneX_offset & HAMMER_OFF_ZONE_MASK) == 874 HAMMER_ZONE_LARGE_DATA) { 875 error = hammer_io_read(volume->devvp, &buffer->io, 876 buffer->io.bytes); 877 } else { 878 hammer_off_t limit; 879 880 limit = (buffer->zone2_offset + 881 HAMMER_LARGEBLOCK_MASK64) & 882 ~HAMMER_LARGEBLOCK_MASK64; 883 limit -= buffer->zone2_offset; 884 error = hammer_io_read(volume->devvp, &buffer->io, 885 limit); 886 } 887 if (error == 0) 888 buffer->ondisk = (void *)buffer->io.bp->b_data; 889 } else if (isnew) { 890 error = hammer_io_new(volume->devvp, &buffer->io); 891 } else { 892 error = 0; 893 } 894 if (error == 0) { 895 hammer_io_advance(&buffer->io); 896 hammer_ref_interlock_done(&buffer->io.lock); 897 } else { 898 hammer_rel_buffer(buffer, 1); 899 } 900 return (error); 901 } 902 903 /* 904 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue. 905 * This routine is only called during unmount or when a volume is 906 * removed. 907 * 908 * If data != NULL, it specifies a volume whoose buffers should 909 * be unloaded. 910 */ 911 int 912 hammer_unload_buffer(hammer_buffer_t buffer, void *data) 913 { 914 struct hammer_volume *volume = (struct hammer_volume *) data; 915 916 /* 917 * If volume != NULL we are only interested in unloading buffers 918 * associated with a particular volume. 919 */ 920 if (volume != NULL && volume != buffer->io.volume) 921 return 0; 922 923 /* 924 * Clean up the persistent ref ioerror might have on the buffer 925 * and acquire a ref. Expect a 0->1 transition. 926 */ 927 if (buffer->io.ioerror) { 928 hammer_io_clear_error_noassert(&buffer->io); 929 --hammer_count_refedbufs; 930 } 931 hammer_ref_interlock_true(&buffer->io.lock); 932 ++hammer_count_refedbufs; 933 934 /* 935 * We must not flush a dirty buffer to disk on umount. It should 936 * have already been dealt with by the flusher, or we may be in 937 * catastrophic failure. 938 * 939 * We must set waitdep to ensure that a running buffer is waited 940 * on and released prior to us trying to unload the volume. 941 */ 942 hammer_io_clear_modify(&buffer->io, 1); 943 hammer_flush_buffer_nodes(buffer); 944 buffer->io.waitdep = 1; 945 hammer_rel_buffer(buffer, 1); 946 return(0); 947 } 948 949 /* 950 * Reference a buffer that is either already referenced or via a specially 951 * handled pointer (aka cursor->buffer). 952 */ 953 int 954 hammer_ref_buffer(hammer_buffer_t buffer) 955 { 956 hammer_mount_t hmp; 957 int error; 958 int locked; 959 960 /* 961 * Acquire a ref, plus the buffer will be interlocked on the 962 * 0->1 transition. 963 */ 964 locked = hammer_ref_interlock(&buffer->io.lock); 965 hmp = buffer->io.hmp; 966 967 /* 968 * At this point a biodone() will not touch the buffer other then 969 * incidental bits. However, lose_list can be modified via 970 * a biodone() interrupt. 971 * 972 * No longer loose. lose_list requires the io_token. 973 */ 974 if (buffer->io.mod_root == &hmp->lose_root) { 975 lwkt_gettoken(&hmp->io_token); 976 if (buffer->io.mod_root == &hmp->lose_root) { 977 RB_REMOVE(hammer_mod_rb_tree, 978 buffer->io.mod_root, &buffer->io); 979 buffer->io.mod_root = NULL; 980 } 981 lwkt_reltoken(&hmp->io_token); 982 } 983 984 if (locked) { 985 ++hammer_count_refedbufs; 986 error = hammer_load_buffer(buffer, 0); 987 /* NOTE: on error the buffer pointer is stale */ 988 } else { 989 error = 0; 990 } 991 return(error); 992 } 993 994 /* 995 * Release a reference on the buffer. On the 1->0 transition the 996 * underlying IO will be released but the data reference is left 997 * cached. 998 * 999 * Only destroy the structure itself if the related buffer cache buffer 1000 * was disassociated from it. This ties the management of the structure 1001 * to the buffer cache subsystem. buffer->ondisk determines whether the 1002 * embedded io is referenced or not. 1003 */ 1004 void 1005 hammer_rel_buffer(hammer_buffer_t buffer, int locked) 1006 { 1007 hammer_volume_t volume; 1008 hammer_mount_t hmp; 1009 struct buf *bp = NULL; 1010 int freeme = 0; 1011 1012 hmp = buffer->io.hmp; 1013 1014 if (hammer_rel_interlock(&buffer->io.lock, locked) == 0) 1015 return; 1016 1017 /* 1018 * hammer_count_refedbufs accounting. Decrement if we are in 1019 * the error path or if CHECK is clear. 1020 * 1021 * If we are not in the error path and CHECK is set the caller 1022 * probably just did a hammer_ref() and didn't account for it, 1023 * so we don't account for the loss here. 1024 */ 1025 if (locked || (buffer->io.lock.refs & HAMMER_REFS_CHECK) == 0) 1026 --hammer_count_refedbufs; 1027 1028 /* 1029 * If the caller locked us or the normal released transitions 1030 * from 1->0 (and acquired the lock) attempt to release the 1031 * io. If the called locked us we tell hammer_io_release() 1032 * to flush (which would be the unload or failure path). 1033 */ 1034 bp = hammer_io_release(&buffer->io, locked); 1035 1036 /* 1037 * If the buffer has no bp association and no refs we can destroy 1038 * it. 1039 * 1040 * NOTE: It is impossible for any associated B-Tree nodes to have 1041 * refs if the buffer has no additional refs. 1042 */ 1043 if (buffer->io.bp == NULL && hammer_norefs(&buffer->io.lock)) { 1044 RB_REMOVE(hammer_buf_rb_tree, 1045 &buffer->io.hmp->rb_bufs_root, 1046 buffer); 1047 volume = buffer->io.volume; 1048 buffer->io.volume = NULL; /* sanity */ 1049 hammer_rel_volume(volume, 0); 1050 hammer_io_clear_modlist(&buffer->io); 1051 hammer_flush_buffer_nodes(buffer); 1052 KKASSERT(TAILQ_EMPTY(&buffer->clist)); 1053 freeme = 1; 1054 } 1055 1056 /* 1057 * Cleanup 1058 */ 1059 hammer_rel_interlock_done(&buffer->io.lock, locked); 1060 if (bp) 1061 brelse(bp); 1062 if (freeme) { 1063 --hammer_count_buffers; 1064 kfree(buffer, hmp->m_misc); 1065 } 1066 } 1067 1068 /* 1069 * Access the filesystem buffer containing the specified hammer offset. 1070 * buf_offset is a conglomeration of the volume number and vol_buf_beg 1071 * relative buffer offset. It must also have bit 55 set to be valid. 1072 * (see hammer_off_t in hammer_disk.h). 1073 * 1074 * Any prior buffer in *bufferp will be released and replaced by the 1075 * requested buffer. 1076 * 1077 * NOTE: The buffer is indexed via its zoneX_offset but we allow the 1078 * passed cached *bufferp to match against either zoneX or zone2. 1079 */ 1080 static __inline 1081 void * 1082 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1083 int *errorp, struct hammer_buffer **bufferp) 1084 { 1085 hammer_buffer_t buffer; 1086 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 1087 1088 buf_offset &= ~HAMMER_BUFMASK64; 1089 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0); 1090 1091 buffer = *bufferp; 1092 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 1093 buffer->zoneX_offset != buf_offset)) { 1094 if (buffer) 1095 hammer_rel_buffer(buffer, 0); 1096 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp); 1097 *bufferp = buffer; 1098 } else { 1099 *errorp = 0; 1100 } 1101 1102 /* 1103 * Return a pointer to the buffer data. 1104 */ 1105 if (buffer == NULL) 1106 return(NULL); 1107 else 1108 return((char *)buffer->ondisk + xoff); 1109 } 1110 1111 void * 1112 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, 1113 int *errorp, struct hammer_buffer **bufferp) 1114 { 1115 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp)); 1116 } 1117 1118 void * 1119 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1120 int *errorp, struct hammer_buffer **bufferp) 1121 { 1122 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 1123 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp)); 1124 } 1125 1126 /* 1127 * Access the filesystem buffer containing the specified hammer offset. 1128 * No disk read operation occurs. The result buffer may contain garbage. 1129 * 1130 * Any prior buffer in *bufferp will be released and replaced by the 1131 * requested buffer. 1132 * 1133 * This function marks the buffer dirty but does not increment its 1134 * modify_refs count. 1135 */ 1136 static __inline 1137 void * 1138 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1139 int *errorp, struct hammer_buffer **bufferp) 1140 { 1141 hammer_buffer_t buffer; 1142 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 1143 1144 buf_offset &= ~HAMMER_BUFMASK64; 1145 1146 buffer = *bufferp; 1147 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 1148 buffer->zoneX_offset != buf_offset)) { 1149 if (buffer) 1150 hammer_rel_buffer(buffer, 0); 1151 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp); 1152 *bufferp = buffer; 1153 } else { 1154 *errorp = 0; 1155 } 1156 1157 /* 1158 * Return a pointer to the buffer data. 1159 */ 1160 if (buffer == NULL) 1161 return(NULL); 1162 else 1163 return((char *)buffer->ondisk + xoff); 1164 } 1165 1166 void * 1167 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, 1168 int *errorp, struct hammer_buffer **bufferp) 1169 { 1170 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp)); 1171 } 1172 1173 void * 1174 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1175 int *errorp, struct hammer_buffer **bufferp) 1176 { 1177 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 1178 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp)); 1179 } 1180 1181 /************************************************************************ 1182 * NODES * 1183 ************************************************************************ 1184 * 1185 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing 1186 * method used by the HAMMER filesystem. 1187 * 1188 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY 1189 * associated with its buffer, and will only referenced the buffer while 1190 * the node itself is referenced. 1191 * 1192 * A hammer_node can also be passively associated with other HAMMER 1193 * structures, such as inodes, while retaining 0 references. These 1194 * associations can be cleared backwards using a pointer-to-pointer in 1195 * the hammer_node. 1196 * 1197 * This allows the HAMMER implementation to cache hammer_nodes long-term 1198 * and short-cut a great deal of the infrastructure's complexity. In 1199 * most cases a cached node can be reacquired without having to dip into 1200 * either the buffer or cluster management code. 1201 * 1202 * The caller must pass a referenced cluster on call and will retain 1203 * ownership of the reference on return. The node will acquire its own 1204 * additional references, if necessary. 1205 */ 1206 hammer_node_t 1207 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset, 1208 int isnew, int *errorp) 1209 { 1210 hammer_mount_t hmp = trans->hmp; 1211 hammer_node_t node; 1212 int doload; 1213 1214 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE); 1215 1216 /* 1217 * Locate the structure, allocating one if necessary. 1218 */ 1219 again: 1220 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset); 1221 if (node == NULL) { 1222 ++hammer_count_nodes; 1223 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE); 1224 node->node_offset = node_offset; 1225 node->hmp = hmp; 1226 TAILQ_INIT(&node->cursor_list); 1227 TAILQ_INIT(&node->cache_list); 1228 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) { 1229 --hammer_count_nodes; 1230 kfree(node, hmp->m_misc); 1231 goto again; 1232 } 1233 doload = hammer_ref_interlock_true(&node->lock); 1234 } else { 1235 doload = hammer_ref_interlock(&node->lock); 1236 } 1237 if (doload) { 1238 *errorp = hammer_load_node(trans, node, isnew); 1239 trans->flags |= HAMMER_TRANSF_DIDIO; 1240 if (*errorp) 1241 node = NULL; 1242 } else { 1243 KKASSERT(node->ondisk); 1244 *errorp = 0; 1245 hammer_io_advance(&node->buffer->io); 1246 } 1247 return(node); 1248 } 1249 1250 /* 1251 * Reference an already-referenced node. 0->1 transitions should assert 1252 * so we do not have to deal with hammer_ref() setting CHECK. 1253 */ 1254 void 1255 hammer_ref_node(hammer_node_t node) 1256 { 1257 KKASSERT(hammer_isactive(&node->lock) && node->ondisk != NULL); 1258 hammer_ref(&node->lock); 1259 } 1260 1261 /* 1262 * Load a node's on-disk data reference. Called with the node referenced 1263 * and interlocked. 1264 * 1265 * On return the node interlock will be unlocked. If a non-zero error code 1266 * is returned the node will also be dereferenced (and the caller's pointer 1267 * will be stale). 1268 */ 1269 static int 1270 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew) 1271 { 1272 hammer_buffer_t buffer; 1273 hammer_off_t buf_offset; 1274 int error; 1275 1276 error = 0; 1277 if (node->ondisk == NULL) { 1278 /* 1279 * This is a little confusing but the jist is that 1280 * node->buffer determines whether the node is on 1281 * the buffer's clist and node->ondisk determines 1282 * whether the buffer is referenced. 1283 * 1284 * We could be racing a buffer release, in which case 1285 * node->buffer may become NULL while we are blocked 1286 * referencing the buffer. 1287 */ 1288 if ((buffer = node->buffer) != NULL) { 1289 error = hammer_ref_buffer(buffer); 1290 if (error == 0 && node->buffer == NULL) { 1291 TAILQ_INSERT_TAIL(&buffer->clist, 1292 node, entry); 1293 node->buffer = buffer; 1294 } 1295 } else { 1296 buf_offset = node->node_offset & ~HAMMER_BUFMASK64; 1297 buffer = hammer_get_buffer(node->hmp, buf_offset, 1298 HAMMER_BUFSIZE, 0, &error); 1299 if (buffer) { 1300 KKASSERT(error == 0); 1301 TAILQ_INSERT_TAIL(&buffer->clist, 1302 node, entry); 1303 node->buffer = buffer; 1304 } 1305 } 1306 if (error) 1307 goto failed; 1308 node->ondisk = (void *)((char *)buffer->ondisk + 1309 (node->node_offset & HAMMER_BUFMASK)); 1310 1311 /* 1312 * Check CRC. NOTE: Neither flag is set and the CRC is not 1313 * generated on new B-Tree nodes. 1314 */ 1315 if (isnew == 0 && 1316 (node->flags & HAMMER_NODE_CRCANY) == 0) { 1317 if (hammer_crc_test_btree(node->ondisk) == 0) { 1318 if (hammer_debug_critical) 1319 Debugger("CRC FAILED: B-TREE NODE"); 1320 node->flags |= HAMMER_NODE_CRCBAD; 1321 } else { 1322 node->flags |= HAMMER_NODE_CRCGOOD; 1323 } 1324 } 1325 } 1326 if (node->flags & HAMMER_NODE_CRCBAD) { 1327 if (trans->flags & HAMMER_TRANSF_CRCDOM) 1328 error = EDOM; 1329 else 1330 error = EIO; 1331 } 1332 failed: 1333 if (error) { 1334 _hammer_rel_node(node, 1); 1335 } else { 1336 hammer_ref_interlock_done(&node->lock); 1337 } 1338 return (error); 1339 } 1340 1341 /* 1342 * Safely reference a node, interlock against flushes via the IO subsystem. 1343 */ 1344 hammer_node_t 1345 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache, 1346 int *errorp) 1347 { 1348 hammer_node_t node; 1349 int doload; 1350 1351 node = cache->node; 1352 if (node != NULL) { 1353 doload = hammer_ref_interlock(&node->lock); 1354 if (doload) { 1355 *errorp = hammer_load_node(trans, node, 0); 1356 if (*errorp) 1357 node = NULL; 1358 } else { 1359 KKASSERT(node->ondisk); 1360 if (node->flags & HAMMER_NODE_CRCBAD) { 1361 if (trans->flags & HAMMER_TRANSF_CRCDOM) 1362 *errorp = EDOM; 1363 else 1364 *errorp = EIO; 1365 _hammer_rel_node(node, 0); 1366 node = NULL; 1367 } else { 1368 *errorp = 0; 1369 } 1370 } 1371 } else { 1372 *errorp = ENOENT; 1373 } 1374 return(node); 1375 } 1376 1377 /* 1378 * Release a hammer_node. On the last release the node dereferences 1379 * its underlying buffer and may or may not be destroyed. 1380 * 1381 * If locked is non-zero the passed node has been interlocked by the 1382 * caller and we are in the failure/unload path, otherwise it has not and 1383 * we are doing a normal release. 1384 * 1385 * This function will dispose of the interlock and the reference. 1386 * On return the node pointer is stale. 1387 */ 1388 void 1389 _hammer_rel_node(hammer_node_t node, int locked) 1390 { 1391 hammer_buffer_t buffer; 1392 1393 /* 1394 * Deref the node. If this isn't the 1->0 transition we're basically 1395 * done. If locked is non-zero this function will just deref the 1396 * locked node and return TRUE, otherwise it will deref the locked 1397 * node and either lock and return TRUE on the 1->0 transition or 1398 * not lock and return FALSE. 1399 */ 1400 if (hammer_rel_interlock(&node->lock, locked) == 0) 1401 return; 1402 1403 /* 1404 * Either locked was non-zero and we are interlocked, or the 1405 * hammer_rel_interlock() call returned non-zero and we are 1406 * interlocked. 1407 * 1408 * The ref-count must still be decremented if locked != 0 so 1409 * the cleanup required still varies a bit. 1410 * 1411 * hammer_flush_node() when called with 1 or 2 will dispose of 1412 * the lock and possible ref-count. 1413 */ 1414 if (node->ondisk == NULL) { 1415 hammer_flush_node(node, locked + 1); 1416 /* node is stale now */ 1417 return; 1418 } 1419 1420 /* 1421 * Do not disassociate the node from the buffer if it represents 1422 * a modified B-Tree node that still needs its crc to be generated. 1423 */ 1424 if (node->flags & HAMMER_NODE_NEEDSCRC) { 1425 hammer_rel_interlock_done(&node->lock, locked); 1426 return; 1427 } 1428 1429 /* 1430 * Do final cleanups and then either destroy the node and leave it 1431 * passively cached. The buffer reference is removed regardless. 1432 */ 1433 buffer = node->buffer; 1434 node->ondisk = NULL; 1435 1436 if ((node->flags & HAMMER_NODE_FLUSH) == 0) { 1437 /* 1438 * Normal release. 1439 */ 1440 hammer_rel_interlock_done(&node->lock, locked); 1441 } else { 1442 /* 1443 * Destroy the node. 1444 */ 1445 hammer_flush_node(node, locked + 1); 1446 /* node is stale */ 1447 1448 } 1449 hammer_rel_buffer(buffer, 0); 1450 } 1451 1452 void 1453 hammer_rel_node(hammer_node_t node) 1454 { 1455 _hammer_rel_node(node, 0); 1456 } 1457 1458 /* 1459 * Free space on-media associated with a B-Tree node. 1460 */ 1461 void 1462 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node) 1463 { 1464 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0); 1465 node->flags |= HAMMER_NODE_DELETED; 1466 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk)); 1467 } 1468 1469 /* 1470 * Passively cache a referenced hammer_node. The caller may release 1471 * the node on return. 1472 */ 1473 void 1474 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node) 1475 { 1476 /* 1477 * If the node doesn't exist, or is being deleted, don't cache it! 1478 * 1479 * The node can only ever be NULL in the I/O failure path. 1480 */ 1481 if (node == NULL || (node->flags & HAMMER_NODE_DELETED)) 1482 return; 1483 if (cache->node == node) 1484 return; 1485 while (cache->node) 1486 hammer_uncache_node(cache); 1487 if (node->flags & HAMMER_NODE_DELETED) 1488 return; 1489 cache->node = node; 1490 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry); 1491 } 1492 1493 void 1494 hammer_uncache_node(hammer_node_cache_t cache) 1495 { 1496 hammer_node_t node; 1497 1498 if ((node = cache->node) != NULL) { 1499 TAILQ_REMOVE(&node->cache_list, cache, entry); 1500 cache->node = NULL; 1501 if (TAILQ_EMPTY(&node->cache_list)) 1502 hammer_flush_node(node, 0); 1503 } 1504 } 1505 1506 /* 1507 * Remove a node's cache references and destroy the node if it has no 1508 * other references or backing store. 1509 * 1510 * locked == 0 Normal unlocked operation 1511 * locked == 1 Call hammer_rel_interlock_done(..., 0); 1512 * locked == 2 Call hammer_rel_interlock_done(..., 1); 1513 * 1514 * XXX for now this isn't even close to being MPSAFE so the refs check 1515 * is sufficient. 1516 */ 1517 void 1518 hammer_flush_node(hammer_node_t node, int locked) 1519 { 1520 hammer_node_cache_t cache; 1521 hammer_buffer_t buffer; 1522 hammer_mount_t hmp = node->hmp; 1523 int dofree; 1524 1525 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) { 1526 TAILQ_REMOVE(&node->cache_list, cache, entry); 1527 cache->node = NULL; 1528 } 1529 1530 /* 1531 * NOTE: refs is predisposed if another thread is blocking and 1532 * will be larger than 0 in that case. We aren't MPSAFE 1533 * here. 1534 */ 1535 if (node->ondisk == NULL && hammer_norefs(&node->lock)) { 1536 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1537 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node); 1538 if ((buffer = node->buffer) != NULL) { 1539 node->buffer = NULL; 1540 TAILQ_REMOVE(&buffer->clist, node, entry); 1541 /* buffer is unreferenced because ondisk is NULL */ 1542 } 1543 dofree = 1; 1544 } else { 1545 dofree = 0; 1546 } 1547 1548 /* 1549 * Deal with the interlock if locked == 1 or locked == 2. 1550 */ 1551 if (locked) 1552 hammer_rel_interlock_done(&node->lock, locked - 1); 1553 1554 /* 1555 * Destroy if requested 1556 */ 1557 if (dofree) { 1558 --hammer_count_nodes; 1559 kfree(node, hmp->m_misc); 1560 } 1561 } 1562 1563 /* 1564 * Flush passively cached B-Tree nodes associated with this buffer. 1565 * This is only called when the buffer is about to be destroyed, so 1566 * none of the nodes should have any references. The buffer is locked. 1567 * 1568 * We may be interlocked with the buffer. 1569 */ 1570 void 1571 hammer_flush_buffer_nodes(hammer_buffer_t buffer) 1572 { 1573 hammer_node_t node; 1574 1575 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) { 1576 KKASSERT(node->ondisk == NULL); 1577 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1578 1579 if (hammer_try_interlock_norefs(&node->lock)) { 1580 hammer_ref(&node->lock); 1581 node->flags |= HAMMER_NODE_FLUSH; 1582 _hammer_rel_node(node, 1); 1583 } else { 1584 KKASSERT(node->buffer != NULL); 1585 buffer = node->buffer; 1586 node->buffer = NULL; 1587 TAILQ_REMOVE(&buffer->clist, node, entry); 1588 /* buffer is unreferenced because ondisk is NULL */ 1589 } 1590 } 1591 } 1592 1593 1594 /************************************************************************ 1595 * ALLOCATORS * 1596 ************************************************************************/ 1597 1598 /* 1599 * Allocate a B-Tree node. 1600 */ 1601 hammer_node_t 1602 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp) 1603 { 1604 hammer_buffer_t buffer = NULL; 1605 hammer_node_t node = NULL; 1606 hammer_off_t node_offset; 1607 1608 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX, 1609 sizeof(struct hammer_node_ondisk), 1610 hint, errorp); 1611 if (*errorp == 0) { 1612 node = hammer_get_node(trans, node_offset, 1, errorp); 1613 hammer_modify_node_noundo(trans, node); 1614 bzero(node->ondisk, sizeof(*node->ondisk)); 1615 hammer_modify_node_done(node); 1616 } 1617 if (buffer) 1618 hammer_rel_buffer(buffer, 0); 1619 return(node); 1620 } 1621 1622 /* 1623 * Allocate data. If the address of a data buffer is supplied then 1624 * any prior non-NULL *data_bufferp will be released and *data_bufferp 1625 * will be set to the related buffer. The caller must release it when 1626 * finally done. The initial *data_bufferp should be set to NULL by 1627 * the caller. 1628 * 1629 * The caller is responsible for making hammer_modify*() calls on the 1630 * *data_bufferp. 1631 */ 1632 void * 1633 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 1634 u_int16_t rec_type, hammer_off_t *data_offsetp, 1635 struct hammer_buffer **data_bufferp, 1636 hammer_off_t hint, int *errorp) 1637 { 1638 void *data; 1639 int zone; 1640 1641 /* 1642 * Allocate data 1643 */ 1644 if (data_len) { 1645 switch(rec_type) { 1646 case HAMMER_RECTYPE_INODE: 1647 case HAMMER_RECTYPE_DIRENTRY: 1648 case HAMMER_RECTYPE_EXT: 1649 case HAMMER_RECTYPE_FIX: 1650 case HAMMER_RECTYPE_PFS: 1651 case HAMMER_RECTYPE_SNAPSHOT: 1652 case HAMMER_RECTYPE_CONFIG: 1653 zone = HAMMER_ZONE_META_INDEX; 1654 break; 1655 case HAMMER_RECTYPE_DATA: 1656 case HAMMER_RECTYPE_DB: 1657 if (data_len <= HAMMER_BUFSIZE / 2) { 1658 zone = HAMMER_ZONE_SMALL_DATA_INDEX; 1659 } else { 1660 data_len = (data_len + HAMMER_BUFMASK) & 1661 ~HAMMER_BUFMASK; 1662 zone = HAMMER_ZONE_LARGE_DATA_INDEX; 1663 } 1664 break; 1665 default: 1666 panic("hammer_alloc_data: rec_type %04x unknown", 1667 rec_type); 1668 zone = 0; /* NOT REACHED */ 1669 break; 1670 } 1671 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len, 1672 hint, errorp); 1673 } else { 1674 *data_offsetp = 0; 1675 } 1676 if (*errorp == 0 && data_bufferp) { 1677 if (data_len) { 1678 data = hammer_bread_ext(trans->hmp, *data_offsetp, 1679 data_len, errorp, data_bufferp); 1680 } else { 1681 data = NULL; 1682 } 1683 } else { 1684 data = NULL; 1685 } 1686 return(data); 1687 } 1688 1689 /* 1690 * Sync dirty buffers to the media and clean-up any loose ends. 1691 * 1692 * These functions do not start the flusher going, they simply 1693 * queue everything up to the flusher. 1694 */ 1695 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data); 1696 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data); 1697 1698 int 1699 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor) 1700 { 1701 struct hammer_sync_info info; 1702 1703 info.error = 0; 1704 info.waitfor = waitfor; 1705 if (waitfor == MNT_WAIT) { 1706 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS, 1707 hammer_sync_scan1, hammer_sync_scan2, &info); 1708 } else { 1709 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT, 1710 hammer_sync_scan1, hammer_sync_scan2, &info); 1711 } 1712 return(info.error); 1713 } 1714 1715 /* 1716 * Filesystem sync. If doing a synchronous sync make a second pass on 1717 * the vnodes in case any were already flushing during the first pass, 1718 * and activate the flusher twice (the second time brings the UNDO FIFO's 1719 * start position up to the end position after the first call). 1720 * 1721 * If doing a lazy sync make just one pass on the vnode list, ignoring 1722 * any new vnodes added to the list while the sync is in progress. 1723 */ 1724 int 1725 hammer_sync_hmp(hammer_mount_t hmp, int waitfor) 1726 { 1727 struct hammer_sync_info info; 1728 int flags; 1729 1730 flags = VMSC_GETVP; 1731 if (waitfor & MNT_LAZY) 1732 flags |= VMSC_ONEPASS; 1733 1734 info.error = 0; 1735 info.waitfor = MNT_NOWAIT; 1736 vmntvnodescan(hmp->mp, flags | VMSC_NOWAIT, 1737 hammer_sync_scan1, hammer_sync_scan2, &info); 1738 1739 if (info.error == 0 && (waitfor & MNT_WAIT)) { 1740 info.waitfor = waitfor; 1741 vmntvnodescan(hmp->mp, flags, 1742 hammer_sync_scan1, hammer_sync_scan2, &info); 1743 } 1744 if (waitfor == MNT_WAIT) { 1745 hammer_flusher_sync(hmp); 1746 hammer_flusher_sync(hmp); 1747 } else { 1748 hammer_flusher_async(hmp, NULL); 1749 hammer_flusher_async(hmp, NULL); 1750 } 1751 return(info.error); 1752 } 1753 1754 static int 1755 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data) 1756 { 1757 struct hammer_inode *ip; 1758 1759 ip = VTOI(vp); 1760 if (vp->v_type == VNON || ip == NULL || 1761 ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 1762 RB_EMPTY(&vp->v_rbdirty_tree))) { 1763 return(-1); 1764 } 1765 return(0); 1766 } 1767 1768 static int 1769 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data) 1770 { 1771 struct hammer_sync_info *info = data; 1772 struct hammer_inode *ip; 1773 int error; 1774 1775 ip = VTOI(vp); 1776 if (vp->v_type == VNON || vp->v_type == VBAD || 1777 ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 1778 RB_EMPTY(&vp->v_rbdirty_tree))) { 1779 return(0); 1780 } 1781 error = VOP_FSYNC(vp, MNT_NOWAIT, 0); 1782 if (error) 1783 info->error = error; 1784 return(0); 1785 } 1786 1787