1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $ 35 */ 36 /* 37 * Manage HAMMER's on-disk structures. These routines are primarily 38 * responsible for interfacing with the kernel's I/O subsystem and for 39 * managing in-memory structures. 40 */ 41 42 #include "hammer.h" 43 #include <sys/fcntl.h> 44 #include <sys/nlookup.h> 45 #include <sys/buf.h> 46 #include <sys/buf2.h> 47 48 static void hammer_free_volume(hammer_volume_t volume); 49 static int hammer_load_volume(hammer_volume_t volume); 50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew); 51 static int hammer_load_node(hammer_transaction_t trans, 52 hammer_node_t node, int isnew); 53 static void _hammer_rel_node(hammer_node_t node, int locked); 54 55 static int 56 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2) 57 { 58 if (vol1->vol_no < vol2->vol_no) 59 return(-1); 60 if (vol1->vol_no > vol2->vol_no) 61 return(1); 62 return(0); 63 } 64 65 /* 66 * hammer_buffer structures are indexed via their zoneX_offset, not 67 * their zone2_offset. 68 */ 69 static int 70 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2) 71 { 72 if (buf1->zoneX_offset < buf2->zoneX_offset) 73 return(-1); 74 if (buf1->zoneX_offset > buf2->zoneX_offset) 75 return(1); 76 return(0); 77 } 78 79 static int 80 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2) 81 { 82 if (node1->node_offset < node2->node_offset) 83 return(-1); 84 if (node1->node_offset > node2->node_offset) 85 return(1); 86 return(0); 87 } 88 89 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node, 90 hammer_vol_rb_compare, int32_t, vol_no); 91 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node, 92 hammer_buf_rb_compare, hammer_off_t, zoneX_offset); 93 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node, 94 hammer_nod_rb_compare, hammer_off_t, node_offset); 95 96 /************************************************************************ 97 * VOLUMES * 98 ************************************************************************ 99 * 100 * Load a HAMMER volume by name. Returns 0 on success or a positive error 101 * code on failure. Volumes must be loaded at mount time, get_volume() will 102 * not load a new volume. 103 * 104 * Calls made to hammer_load_volume() or single-threaded 105 */ 106 int 107 hammer_install_volume(struct hammer_mount *hmp, const char *volname, 108 struct vnode *devvp) 109 { 110 struct mount *mp; 111 hammer_volume_t volume; 112 struct hammer_volume_ondisk *ondisk; 113 struct nlookupdata nd; 114 struct buf *bp = NULL; 115 int error; 116 int ronly; 117 int setmp = 0; 118 119 mp = hmp->mp; 120 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 121 122 /* 123 * Allocate a volume structure 124 */ 125 ++hammer_count_volumes; 126 volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO); 127 volume->vol_name = kstrdup(volname, hmp->m_misc); 128 volume->io.hmp = hmp; /* bootstrap */ 129 hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME); 130 volume->io.offset = 0LL; 131 volume->io.bytes = HAMMER_BUFSIZE; 132 133 /* 134 * Get the device vnode 135 */ 136 if (devvp == NULL) { 137 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW); 138 if (error == 0) 139 error = nlookup(&nd); 140 if (error == 0) 141 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp); 142 nlookup_done(&nd); 143 } else { 144 error = 0; 145 volume->devvp = devvp; 146 } 147 148 if (error == 0) { 149 if (vn_isdisk(volume->devvp, &error)) { 150 error = vfs_mountedon(volume->devvp); 151 } 152 } 153 if (error == 0 && vcount(volume->devvp) > 0) 154 error = EBUSY; 155 if (error == 0) { 156 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 157 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0); 158 if (error == 0) { 159 error = VOP_OPEN(volume->devvp, 160 (ronly ? FREAD : FREAD|FWRITE), 161 FSCRED, NULL); 162 } 163 vn_unlock(volume->devvp); 164 } 165 if (error) { 166 hammer_free_volume(volume); 167 return(error); 168 } 169 volume->devvp->v_rdev->si_mountpoint = mp; 170 setmp = 1; 171 172 /* 173 * Extract the volume number from the volume header and do various 174 * sanity checks. 175 */ 176 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp); 177 if (error) 178 goto late_failure; 179 ondisk = (void *)bp->b_data; 180 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) { 181 kprintf("hammer_mount: volume %s has an invalid header\n", 182 volume->vol_name); 183 error = EFTYPE; 184 goto late_failure; 185 } 186 volume->vol_no = ondisk->vol_no; 187 volume->buffer_base = ondisk->vol_buf_beg; 188 volume->vol_flags = ondisk->vol_flags; 189 volume->nblocks = ondisk->vol_nblocks; 190 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 191 ondisk->vol_buf_end - ondisk->vol_buf_beg); 192 volume->maxraw_off = ondisk->vol_buf_end; 193 194 if (RB_EMPTY(&hmp->rb_vols_root)) { 195 hmp->fsid = ondisk->vol_fsid; 196 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) { 197 kprintf("hammer_mount: volume %s's fsid does not match " 198 "other volumes\n", volume->vol_name); 199 error = EFTYPE; 200 goto late_failure; 201 } 202 203 /* 204 * Insert the volume structure into the red-black tree. 205 */ 206 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) { 207 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n", 208 volume->vol_name, volume->vol_no); 209 error = EEXIST; 210 } 211 212 /* 213 * Set the root volume . HAMMER special cases rootvol the structure. 214 * We do not hold a ref because this would prevent related I/O 215 * from being flushed. 216 */ 217 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) { 218 hmp->rootvol = volume; 219 hmp->nvolumes = ondisk->vol_count; 220 if (bp) { 221 brelse(bp); 222 bp = NULL; 223 } 224 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks * 225 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); 226 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks * 227 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); 228 } 229 late_failure: 230 if (bp) 231 brelse(bp); 232 if (error) { 233 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/ 234 if (setmp) 235 volume->devvp->v_rdev->si_mountpoint = NULL; 236 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE); 237 hammer_free_volume(volume); 238 } 239 return (error); 240 } 241 242 /* 243 * This is called for each volume when updating the mount point from 244 * read-write to read-only or vise-versa. 245 */ 246 int 247 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused) 248 { 249 if (volume->devvp) { 250 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 251 if (volume->io.hmp->ronly) { 252 /* do not call vinvalbuf */ 253 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL); 254 VOP_CLOSE(volume->devvp, FREAD|FWRITE); 255 } else { 256 /* do not call vinvalbuf */ 257 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL); 258 VOP_CLOSE(volume->devvp, FREAD); 259 } 260 vn_unlock(volume->devvp); 261 } 262 return(0); 263 } 264 265 /* 266 * Unload and free a HAMMER volume. Must return >= 0 to continue scan 267 * so returns -1 on failure. 268 */ 269 int 270 hammer_unload_volume(hammer_volume_t volume, void *data __unused) 271 { 272 hammer_mount_t hmp = volume->io.hmp; 273 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 274 275 /* 276 * Clean up the root volume pointer, which is held unlocked in hmp. 277 */ 278 if (hmp->rootvol == volume) 279 hmp->rootvol = NULL; 280 281 /* 282 * We must not flush a dirty buffer to disk on umount. It should 283 * have already been dealt with by the flusher, or we may be in 284 * catastrophic failure. 285 */ 286 hammer_io_clear_modify(&volume->io, 1); 287 volume->io.waitdep = 1; 288 289 /* 290 * Clean up the persistent ref ioerror might have on the volume 291 */ 292 if (volume->io.ioerror) 293 hammer_io_clear_error_noassert(&volume->io); 294 295 /* 296 * This should release the bp. Releasing the volume with flush set 297 * implies the interlock is set. 298 */ 299 hammer_ref_interlock_true(&volume->io.lock); 300 hammer_rel_volume(volume, 1); 301 KKASSERT(volume->io.bp == NULL); 302 303 /* 304 * There should be no references on the volume, no clusters, and 305 * no super-clusters. 306 */ 307 KKASSERT(hammer_norefs(&volume->io.lock)); 308 309 volume->ondisk = NULL; 310 if (volume->devvp) { 311 if (volume->devvp->v_rdev && 312 volume->devvp->v_rdev->si_mountpoint == hmp->mp 313 ) { 314 volume->devvp->v_rdev->si_mountpoint = NULL; 315 } 316 if (ronly) { 317 /* 318 * Make sure we don't sync anything to disk if we 319 * are in read-only mode (1) or critically-errored 320 * (2). Note that there may be dirty buffers in 321 * normal read-only mode from crash recovery. 322 */ 323 vinvalbuf(volume->devvp, 0, 0, 0); 324 VOP_CLOSE(volume->devvp, FREAD); 325 } else { 326 /* 327 * Normal termination, save any dirty buffers 328 * (XXX there really shouldn't be any). 329 */ 330 vinvalbuf(volume->devvp, V_SAVE, 0, 0); 331 VOP_CLOSE(volume->devvp, FREAD|FWRITE); 332 } 333 } 334 335 /* 336 * Destroy the structure 337 */ 338 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume); 339 hammer_free_volume(volume); 340 return(0); 341 } 342 343 static 344 void 345 hammer_free_volume(hammer_volume_t volume) 346 { 347 hammer_mount_t hmp = volume->io.hmp; 348 349 if (volume->vol_name) { 350 kfree(volume->vol_name, hmp->m_misc); 351 volume->vol_name = NULL; 352 } 353 if (volume->devvp) { 354 vrele(volume->devvp); 355 volume->devvp = NULL; 356 } 357 --hammer_count_volumes; 358 kfree(volume, hmp->m_misc); 359 } 360 361 /* 362 * Get a HAMMER volume. The volume must already exist. 363 */ 364 hammer_volume_t 365 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp) 366 { 367 struct hammer_volume *volume; 368 369 /* 370 * Locate the volume structure 371 */ 372 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no); 373 if (volume == NULL) { 374 *errorp = ENOENT; 375 return(NULL); 376 } 377 378 /* 379 * Reference the volume, load/check the data on the 0->1 transition. 380 * hammer_load_volume() will dispose of the interlock on return, 381 * and also clean up the ref count on error. 382 */ 383 if (hammer_ref_interlock(&volume->io.lock)) { 384 *errorp = hammer_load_volume(volume); 385 if (*errorp) 386 volume = NULL; 387 } else { 388 KKASSERT(volume->ondisk); 389 *errorp = 0; 390 } 391 return(volume); 392 } 393 394 int 395 hammer_ref_volume(hammer_volume_t volume) 396 { 397 int error; 398 399 /* 400 * Reference the volume and deal with the check condition used to 401 * load its ondisk info. 402 */ 403 if (hammer_ref_interlock(&volume->io.lock)) { 404 error = hammer_load_volume(volume); 405 } else { 406 KKASSERT(volume->ondisk); 407 error = 0; 408 } 409 return (error); 410 } 411 412 hammer_volume_t 413 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp) 414 { 415 hammer_volume_t volume; 416 417 volume = hmp->rootvol; 418 KKASSERT(volume != NULL); 419 420 /* 421 * Reference the volume and deal with the check condition used to 422 * load its ondisk info. 423 */ 424 if (hammer_ref_interlock(&volume->io.lock)) { 425 *errorp = hammer_load_volume(volume); 426 if (*errorp) 427 volume = NULL; 428 } else { 429 KKASSERT(volume->ondisk); 430 *errorp = 0; 431 } 432 return (volume); 433 } 434 435 /* 436 * Load a volume's on-disk information. The volume must be referenced and 437 * the interlock is held on call. The interlock will be released on return. 438 * The reference will also be released on return if an error occurs. 439 */ 440 static int 441 hammer_load_volume(hammer_volume_t volume) 442 { 443 int error; 444 445 if (volume->ondisk == NULL) { 446 error = hammer_io_read(volume->devvp, &volume->io, 447 HAMMER_BUFSIZE); 448 if (error == 0) { 449 volume->ondisk = (void *)volume->io.bp->b_data; 450 hammer_ref_interlock_done(&volume->io.lock); 451 } else { 452 hammer_rel_volume(volume, 1); 453 } 454 } else { 455 error = 0; 456 } 457 return(error); 458 } 459 460 /* 461 * Release a previously acquired reference on the volume. 462 * 463 * Volumes are not unloaded from memory during normal operation. 464 */ 465 void 466 hammer_rel_volume(hammer_volume_t volume, int locked) 467 { 468 struct buf *bp; 469 470 if (hammer_rel_interlock(&volume->io.lock, locked)) { 471 volume->ondisk = NULL; 472 bp = hammer_io_release(&volume->io, locked); 473 hammer_rel_interlock_done(&volume->io.lock, locked); 474 if (bp) 475 brelse(bp); 476 } 477 } 478 479 int 480 hammer_mountcheck_volumes(struct hammer_mount *hmp) 481 { 482 hammer_volume_t vol; 483 int i; 484 485 for (i = 0; i < hmp->nvolumes; ++i) { 486 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i); 487 if (vol == NULL) 488 return(EINVAL); 489 } 490 return(0); 491 } 492 493 /************************************************************************ 494 * BUFFERS * 495 ************************************************************************ 496 * 497 * Manage buffers. Currently most blockmap-backed zones are direct-mapped 498 * to zone-2 buffer offsets, without a translation stage. However, the 499 * hammer_buffer structure is indexed by its zoneX_offset, not its 500 * zone2_offset. 501 * 502 * The proper zone must be maintained throughout the code-base all the way 503 * through to the big-block allocator, or routines like hammer_del_buffers() 504 * will not be able to locate all potentially conflicting buffers. 505 */ 506 507 /* 508 * Helper function returns whether a zone offset can be directly translated 509 * to a raw buffer index or not. Really only the volume and undo zones 510 * can't be directly translated. Volumes are special-cased and undo zones 511 * shouldn't be aliased accessed in read-only mode. 512 * 513 * This function is ONLY used to detect aliased zones during a read-only 514 * mount. 515 */ 516 static __inline int 517 hammer_direct_zone(hammer_off_t buf_offset) 518 { 519 switch(HAMMER_ZONE_DECODE(buf_offset)) { 520 case HAMMER_ZONE_RAW_BUFFER_INDEX: 521 case HAMMER_ZONE_FREEMAP_INDEX: 522 case HAMMER_ZONE_BTREE_INDEX: 523 case HAMMER_ZONE_META_INDEX: 524 case HAMMER_ZONE_LARGE_DATA_INDEX: 525 case HAMMER_ZONE_SMALL_DATA_INDEX: 526 return(1); 527 default: 528 return(0); 529 } 530 /* NOT REACHED */ 531 } 532 533 hammer_buffer_t 534 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, 535 int bytes, int isnew, int *errorp) 536 { 537 hammer_buffer_t buffer; 538 hammer_volume_t volume; 539 hammer_off_t zone2_offset; 540 hammer_io_type_t iotype; 541 int vol_no; 542 int zone; 543 544 buf_offset &= ~HAMMER_BUFMASK64; 545 again: 546 /* 547 * Shortcut if the buffer is already cached 548 */ 549 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset); 550 if (buffer) { 551 /* 552 * Once refed the ondisk field will not be cleared by 553 * any other action. Shortcut the operation if the 554 * ondisk structure is valid. 555 */ 556 found_aliased: 557 if (hammer_ref_interlock(&buffer->io.lock) == 0) { 558 hammer_io_advance(&buffer->io); 559 KKASSERT(buffer->ondisk); 560 *errorp = 0; 561 return(buffer); 562 } 563 564 /* 565 * 0->1 transition or defered 0->1 transition (CHECK), 566 * interlock now held. Shortcut if ondisk is already 567 * assigned. 568 */ 569 ++hammer_count_refedbufs; 570 if (buffer->ondisk) { 571 hammer_io_advance(&buffer->io); 572 hammer_ref_interlock_done(&buffer->io.lock); 573 *errorp = 0; 574 return(buffer); 575 } 576 577 /* 578 * The buffer is no longer loose if it has a ref, and 579 * cannot become loose once it gains a ref. Loose 580 * buffers will never be in a modified state. This should 581 * only occur on the 0->1 transition of refs. 582 * 583 * lose_list can be modified via a biodone() interrupt 584 * so the io_token must be held. 585 */ 586 if (buffer->io.mod_root == &hmp->lose_root) { 587 lwkt_gettoken(&hmp->io_token); 588 if (buffer->io.mod_root == &hmp->lose_root) { 589 RB_REMOVE(hammer_mod_rb_tree, 590 buffer->io.mod_root, &buffer->io); 591 buffer->io.mod_root = NULL; 592 KKASSERT(buffer->io.modified == 0); 593 } 594 lwkt_reltoken(&hmp->io_token); 595 } 596 goto found; 597 } else if (hmp->ronly && hammer_direct_zone(buf_offset)) { 598 /* 599 * If this is a read-only mount there could be an alias 600 * in the raw-zone. If there is we use that buffer instead. 601 * 602 * rw mounts will not have aliases. Also note when going 603 * from ro -> rw the recovered raw buffers are flushed and 604 * reclaimed, so again there will not be any aliases once 605 * the mount is rw. 606 */ 607 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 608 (buf_offset & ~HAMMER_OFF_ZONE_MASK) | 609 HAMMER_ZONE_RAW_BUFFER); 610 if (buffer) { 611 kprintf("HAMMER: recovered aliased %016jx\n", 612 (intmax_t)buf_offset); 613 goto found_aliased; 614 } 615 } 616 617 /* 618 * What is the buffer class? 619 */ 620 zone = HAMMER_ZONE_DECODE(buf_offset); 621 622 switch(zone) { 623 case HAMMER_ZONE_LARGE_DATA_INDEX: 624 case HAMMER_ZONE_SMALL_DATA_INDEX: 625 iotype = HAMMER_STRUCTURE_DATA_BUFFER; 626 break; 627 case HAMMER_ZONE_UNDO_INDEX: 628 iotype = HAMMER_STRUCTURE_UNDO_BUFFER; 629 break; 630 case HAMMER_ZONE_META_INDEX: 631 default: 632 /* 633 * NOTE: inode data and directory entries are placed in this 634 * zone. inode atime/mtime is updated in-place and thus 635 * buffers containing inodes must be synchronized as 636 * meta-buffers, same as buffers containing B-Tree info. 637 */ 638 iotype = HAMMER_STRUCTURE_META_BUFFER; 639 break; 640 } 641 642 /* 643 * Handle blockmap offset translations 644 */ 645 if (zone >= HAMMER_ZONE_BTREE_INDEX) { 646 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp); 647 } else if (zone == HAMMER_ZONE_UNDO_INDEX) { 648 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp); 649 } else { 650 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX); 651 zone2_offset = buf_offset; 652 *errorp = 0; 653 } 654 if (*errorp) 655 return(NULL); 656 657 /* 658 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset 659 * specifications. 660 */ 661 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) == 662 HAMMER_ZONE_RAW_BUFFER); 663 vol_no = HAMMER_VOL_DECODE(zone2_offset); 664 volume = hammer_get_volume(hmp, vol_no, errorp); 665 if (volume == NULL) 666 return(NULL); 667 668 KKASSERT(zone2_offset < volume->maxbuf_off); 669 670 /* 671 * Allocate a new buffer structure. We will check for races later. 672 */ 673 ++hammer_count_buffers; 674 buffer = kmalloc(sizeof(*buffer), hmp->m_misc, 675 M_WAITOK|M_ZERO|M_USE_RESERVE); 676 buffer->zone2_offset = zone2_offset; 677 buffer->zoneX_offset = buf_offset; 678 679 hammer_io_init(&buffer->io, volume, iotype); 680 buffer->io.offset = volume->ondisk->vol_buf_beg + 681 (zone2_offset & HAMMER_OFF_SHORT_MASK); 682 buffer->io.bytes = bytes; 683 TAILQ_INIT(&buffer->clist); 684 hammer_ref_interlock_true(&buffer->io.lock); 685 686 /* 687 * Insert the buffer into the RB tree and handle late collisions. 688 */ 689 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) { 690 hammer_rel_volume(volume, 0); 691 buffer->io.volume = NULL; /* safety */ 692 if (hammer_rel_interlock(&buffer->io.lock, 1)) /* safety */ 693 hammer_rel_interlock_done(&buffer->io.lock, 1); 694 --hammer_count_buffers; 695 kfree(buffer, hmp->m_misc); 696 goto again; 697 } 698 ++hammer_count_refedbufs; 699 found: 700 701 /* 702 * The buffer is referenced and interlocked. Load the buffer 703 * if necessary. hammer_load_buffer() deals with the interlock 704 * and, if an error is returned, also deals with the ref. 705 */ 706 if (buffer->ondisk == NULL) { 707 *errorp = hammer_load_buffer(buffer, isnew); 708 if (*errorp) 709 buffer = NULL; 710 } else { 711 hammer_io_advance(&buffer->io); 712 hammer_ref_interlock_done(&buffer->io.lock); 713 *errorp = 0; 714 } 715 return(buffer); 716 } 717 718 /* 719 * This is used by the direct-read code to deal with large-data buffers 720 * created by the reblocker and mirror-write code. The direct-read code 721 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write- 722 * running hammer buffers must be fully synced to disk before we can issue 723 * the direct-read. 724 * 725 * This code path is not considered critical as only the rebocker and 726 * mirror-write code will create large-data buffers via the HAMMER buffer 727 * subsystem. They do that because they operate at the B-Tree level and 728 * do not access the vnode/inode structures. 729 */ 730 void 731 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes) 732 { 733 hammer_buffer_t buffer; 734 int error; 735 736 KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) == 737 HAMMER_ZONE_LARGE_DATA); 738 739 while (bytes > 0) { 740 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 741 base_offset); 742 if (buffer && (buffer->io.modified || buffer->io.running)) { 743 error = hammer_ref_buffer(buffer); 744 if (error == 0) { 745 hammer_io_wait(&buffer->io); 746 if (buffer->io.modified) { 747 hammer_io_write_interlock(&buffer->io); 748 hammer_io_flush(&buffer->io, 0); 749 hammer_io_done_interlock(&buffer->io); 750 hammer_io_wait(&buffer->io); 751 } 752 hammer_rel_buffer(buffer, 0); 753 } 754 } 755 base_offset += HAMMER_BUFSIZE; 756 bytes -= HAMMER_BUFSIZE; 757 } 758 } 759 760 /* 761 * Destroy all buffers covering the specified zoneX offset range. This 762 * is called when the related blockmap layer2 entry is freed or when 763 * a direct write bypasses our buffer/buffer-cache subsystem. 764 * 765 * The buffers may be referenced by the caller itself. Setting reclaim 766 * will cause the buffer to be destroyed when it's ref count reaches zero. 767 * 768 * Return 0 on success, EAGAIN if some buffers could not be destroyed due 769 * to additional references held by other threads, or some other (typically 770 * fatal) error. 771 */ 772 int 773 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset, 774 hammer_off_t zone2_offset, int bytes, 775 int report_conflicts) 776 { 777 hammer_buffer_t buffer; 778 hammer_volume_t volume; 779 int vol_no; 780 int error; 781 int ret_error; 782 783 vol_no = HAMMER_VOL_DECODE(zone2_offset); 784 volume = hammer_get_volume(hmp, vol_no, &ret_error); 785 KKASSERT(ret_error == 0); 786 787 while (bytes > 0) { 788 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 789 base_offset); 790 if (buffer) { 791 error = hammer_ref_buffer(buffer); 792 if (hammer_debug_general & 0x20000) { 793 kprintf("hammer: delbufr %016jx " 794 "rerr=%d 1ref=%d\n", 795 (intmax_t)buffer->zoneX_offset, 796 error, 797 hammer_oneref(&buffer->io.lock)); 798 } 799 if (error == 0 && !hammer_oneref(&buffer->io.lock)) { 800 error = EAGAIN; 801 hammer_rel_buffer(buffer, 0); 802 } 803 if (error == 0) { 804 KKASSERT(buffer->zone2_offset == zone2_offset); 805 hammer_io_clear_modify(&buffer->io, 1); 806 buffer->io.reclaim = 1; 807 buffer->io.waitdep = 1; 808 KKASSERT(buffer->io.volume == volume); 809 hammer_rel_buffer(buffer, 0); 810 } 811 } else { 812 error = hammer_io_inval(volume, zone2_offset); 813 } 814 if (error) { 815 ret_error = error; 816 if (report_conflicts || 817 (hammer_debug_general & 0x8000)) { 818 kprintf("hammer_del_buffers: unable to " 819 "invalidate %016llx buffer=%p rep=%d\n", 820 (long long)base_offset, 821 buffer, report_conflicts); 822 } 823 } 824 base_offset += HAMMER_BUFSIZE; 825 zone2_offset += HAMMER_BUFSIZE; 826 bytes -= HAMMER_BUFSIZE; 827 } 828 hammer_rel_volume(volume, 0); 829 return (ret_error); 830 } 831 832 /* 833 * Given a referenced and interlocked buffer load/validate the data. 834 * 835 * The buffer interlock will be released on return. If an error is 836 * returned the buffer reference will also be released (and the buffer 837 * pointer will thus be stale). 838 */ 839 static int 840 hammer_load_buffer(hammer_buffer_t buffer, int isnew) 841 { 842 hammer_volume_t volume; 843 int error; 844 845 /* 846 * Load the buffer's on-disk info 847 */ 848 volume = buffer->io.volume; 849 850 if (hammer_debug_io & 0x0004) { 851 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n", 852 (long long)buffer->zoneX_offset, 853 (long long)buffer->zone2_offset, 854 isnew, buffer->ondisk); 855 } 856 857 if (buffer->ondisk == NULL) { 858 /* 859 * Issue the read or generate a new buffer. When reading 860 * the limit argument controls any read-ahead clustering 861 * hammer_io_read() is allowed to do. 862 * 863 * We cannot read-ahead in the large-data zone and we cannot 864 * cross a largeblock boundary as the next largeblock might 865 * use a different buffer size. 866 */ 867 if (isnew) { 868 error = hammer_io_new(volume->devvp, &buffer->io); 869 } else if ((buffer->zoneX_offset & HAMMER_OFF_ZONE_MASK) == 870 HAMMER_ZONE_LARGE_DATA) { 871 error = hammer_io_read(volume->devvp, &buffer->io, 872 buffer->io.bytes); 873 } else { 874 hammer_off_t limit; 875 876 limit = (buffer->zone2_offset + 877 HAMMER_LARGEBLOCK_MASK64) & 878 ~HAMMER_LARGEBLOCK_MASK64; 879 limit -= buffer->zone2_offset; 880 error = hammer_io_read(volume->devvp, &buffer->io, 881 limit); 882 } 883 if (error == 0) 884 buffer->ondisk = (void *)buffer->io.bp->b_data; 885 } else if (isnew) { 886 error = hammer_io_new(volume->devvp, &buffer->io); 887 } else { 888 error = 0; 889 } 890 if (error == 0) { 891 hammer_io_advance(&buffer->io); 892 hammer_ref_interlock_done(&buffer->io.lock); 893 } else { 894 hammer_rel_buffer(buffer, 1); 895 } 896 return (error); 897 } 898 899 /* 900 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue. 901 * This routine is only called during unmount or when a volume is 902 * removed. 903 * 904 * If data != NULL, it specifies a volume whoose buffers should 905 * be unloaded. 906 */ 907 int 908 hammer_unload_buffer(hammer_buffer_t buffer, void *data) 909 { 910 struct hammer_volume *volume = (struct hammer_volume *) data; 911 912 /* 913 * If volume != NULL we are only interested in unloading buffers 914 * associated with a particular volume. 915 */ 916 if (volume != NULL && volume != buffer->io.volume) 917 return 0; 918 919 /* 920 * Clean up the persistent ref ioerror might have on the buffer 921 * and acquire a ref. Expect a 0->1 transition. 922 */ 923 if (buffer->io.ioerror) { 924 hammer_io_clear_error_noassert(&buffer->io); 925 --hammer_count_refedbufs; 926 } 927 hammer_ref_interlock_true(&buffer->io.lock); 928 ++hammer_count_refedbufs; 929 930 /* 931 * We must not flush a dirty buffer to disk on umount. It should 932 * have already been dealt with by the flusher, or we may be in 933 * catastrophic failure. 934 * 935 * We must set waitdep to ensure that a running buffer is waited 936 * on and released prior to us trying to unload the volume. 937 */ 938 hammer_io_clear_modify(&buffer->io, 1); 939 hammer_flush_buffer_nodes(buffer); 940 buffer->io.waitdep = 1; 941 hammer_rel_buffer(buffer, 1); 942 return(0); 943 } 944 945 /* 946 * Reference a buffer that is either already referenced or via a specially 947 * handled pointer (aka cursor->buffer). 948 */ 949 int 950 hammer_ref_buffer(hammer_buffer_t buffer) 951 { 952 hammer_mount_t hmp; 953 int error; 954 int locked; 955 956 /* 957 * Acquire a ref, plus the buffer will be interlocked on the 958 * 0->1 transition. 959 */ 960 locked = hammer_ref_interlock(&buffer->io.lock); 961 hmp = buffer->io.hmp; 962 963 /* 964 * At this point a biodone() will not touch the buffer other then 965 * incidental bits. However, lose_list can be modified via 966 * a biodone() interrupt. 967 * 968 * No longer loose. lose_list requires the io_token. 969 */ 970 if (buffer->io.mod_root == &hmp->lose_root) { 971 lwkt_gettoken(&hmp->io_token); 972 if (buffer->io.mod_root == &hmp->lose_root) { 973 RB_REMOVE(hammer_mod_rb_tree, 974 buffer->io.mod_root, &buffer->io); 975 buffer->io.mod_root = NULL; 976 } 977 lwkt_reltoken(&hmp->io_token); 978 } 979 980 if (locked) { 981 ++hammer_count_refedbufs; 982 error = hammer_load_buffer(buffer, 0); 983 /* NOTE: on error the buffer pointer is stale */ 984 } else { 985 error = 0; 986 } 987 return(error); 988 } 989 990 /* 991 * Release a reference on the buffer. On the 1->0 transition the 992 * underlying IO will be released but the data reference is left 993 * cached. 994 * 995 * Only destroy the structure itself if the related buffer cache buffer 996 * was disassociated from it. This ties the management of the structure 997 * to the buffer cache subsystem. buffer->ondisk determines whether the 998 * embedded io is referenced or not. 999 */ 1000 void 1001 hammer_rel_buffer(hammer_buffer_t buffer, int locked) 1002 { 1003 hammer_volume_t volume; 1004 hammer_mount_t hmp; 1005 struct buf *bp = NULL; 1006 int freeme = 0; 1007 1008 hmp = buffer->io.hmp; 1009 1010 if (hammer_rel_interlock(&buffer->io.lock, locked) == 0) 1011 return; 1012 1013 /* 1014 * hammer_count_refedbufs accounting. Decrement if we are in 1015 * the error path or if CHECK is clear. 1016 * 1017 * If we are not in the error path and CHECK is set the caller 1018 * probably just did a hammer_ref() and didn't account for it, 1019 * so we don't account for the loss here. 1020 */ 1021 if (locked || (buffer->io.lock.refs & HAMMER_REFS_CHECK) == 0) 1022 --hammer_count_refedbufs; 1023 1024 /* 1025 * If the caller locked us or the normal released transitions 1026 * from 1->0 (and acquired the lock) attempt to release the 1027 * io. If the called locked us we tell hammer_io_release() 1028 * to flush (which would be the unload or failure path). 1029 */ 1030 bp = hammer_io_release(&buffer->io, locked); 1031 1032 /* 1033 * If the buffer has no bp association and no refs we can destroy 1034 * it. 1035 * 1036 * NOTE: It is impossible for any associated B-Tree nodes to have 1037 * refs if the buffer has no additional refs. 1038 */ 1039 if (buffer->io.bp == NULL && hammer_norefs(&buffer->io.lock)) { 1040 RB_REMOVE(hammer_buf_rb_tree, 1041 &buffer->io.hmp->rb_bufs_root, 1042 buffer); 1043 volume = buffer->io.volume; 1044 buffer->io.volume = NULL; /* sanity */ 1045 hammer_rel_volume(volume, 0); 1046 hammer_io_clear_modlist(&buffer->io); 1047 hammer_flush_buffer_nodes(buffer); 1048 KKASSERT(TAILQ_EMPTY(&buffer->clist)); 1049 freeme = 1; 1050 } 1051 1052 /* 1053 * Cleanup 1054 */ 1055 hammer_rel_interlock_done(&buffer->io.lock, locked); 1056 if (bp) 1057 brelse(bp); 1058 if (freeme) { 1059 --hammer_count_buffers; 1060 kfree(buffer, hmp->m_misc); 1061 } 1062 } 1063 1064 /* 1065 * Access the filesystem buffer containing the specified hammer offset. 1066 * buf_offset is a conglomeration of the volume number and vol_buf_beg 1067 * relative buffer offset. It must also have bit 55 set to be valid. 1068 * (see hammer_off_t in hammer_disk.h). 1069 * 1070 * Any prior buffer in *bufferp will be released and replaced by the 1071 * requested buffer. 1072 * 1073 * NOTE: The buffer is indexed via its zoneX_offset but we allow the 1074 * passed cached *bufferp to match against either zoneX or zone2. 1075 */ 1076 static __inline 1077 void * 1078 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1079 int *errorp, struct hammer_buffer **bufferp) 1080 { 1081 hammer_buffer_t buffer; 1082 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 1083 1084 buf_offset &= ~HAMMER_BUFMASK64; 1085 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0); 1086 1087 buffer = *bufferp; 1088 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 1089 buffer->zoneX_offset != buf_offset)) { 1090 if (buffer) 1091 hammer_rel_buffer(buffer, 0); 1092 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp); 1093 *bufferp = buffer; 1094 } else { 1095 *errorp = 0; 1096 } 1097 1098 /* 1099 * Return a pointer to the buffer data. 1100 */ 1101 if (buffer == NULL) 1102 return(NULL); 1103 else 1104 return((char *)buffer->ondisk + xoff); 1105 } 1106 1107 void * 1108 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, 1109 int *errorp, struct hammer_buffer **bufferp) 1110 { 1111 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp)); 1112 } 1113 1114 void * 1115 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1116 int *errorp, struct hammer_buffer **bufferp) 1117 { 1118 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 1119 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp)); 1120 } 1121 1122 /* 1123 * Access the filesystem buffer containing the specified hammer offset. 1124 * No disk read operation occurs. The result buffer may contain garbage. 1125 * 1126 * Any prior buffer in *bufferp will be released and replaced by the 1127 * requested buffer. 1128 * 1129 * This function marks the buffer dirty but does not increment its 1130 * modify_refs count. 1131 */ 1132 static __inline 1133 void * 1134 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1135 int *errorp, struct hammer_buffer **bufferp) 1136 { 1137 hammer_buffer_t buffer; 1138 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 1139 1140 buf_offset &= ~HAMMER_BUFMASK64; 1141 1142 buffer = *bufferp; 1143 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 1144 buffer->zoneX_offset != buf_offset)) { 1145 if (buffer) 1146 hammer_rel_buffer(buffer, 0); 1147 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp); 1148 *bufferp = buffer; 1149 } else { 1150 *errorp = 0; 1151 } 1152 1153 /* 1154 * Return a pointer to the buffer data. 1155 */ 1156 if (buffer == NULL) 1157 return(NULL); 1158 else 1159 return((char *)buffer->ondisk + xoff); 1160 } 1161 1162 void * 1163 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, 1164 int *errorp, struct hammer_buffer **bufferp) 1165 { 1166 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp)); 1167 } 1168 1169 void * 1170 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1171 int *errorp, struct hammer_buffer **bufferp) 1172 { 1173 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 1174 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp)); 1175 } 1176 1177 /************************************************************************ 1178 * NODES * 1179 ************************************************************************ 1180 * 1181 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing 1182 * method used by the HAMMER filesystem. 1183 * 1184 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY 1185 * associated with its buffer, and will only referenced the buffer while 1186 * the node itself is referenced. 1187 * 1188 * A hammer_node can also be passively associated with other HAMMER 1189 * structures, such as inodes, while retaining 0 references. These 1190 * associations can be cleared backwards using a pointer-to-pointer in 1191 * the hammer_node. 1192 * 1193 * This allows the HAMMER implementation to cache hammer_nodes long-term 1194 * and short-cut a great deal of the infrastructure's complexity. In 1195 * most cases a cached node can be reacquired without having to dip into 1196 * either the buffer or cluster management code. 1197 * 1198 * The caller must pass a referenced cluster on call and will retain 1199 * ownership of the reference on return. The node will acquire its own 1200 * additional references, if necessary. 1201 */ 1202 hammer_node_t 1203 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset, 1204 int isnew, int *errorp) 1205 { 1206 hammer_mount_t hmp = trans->hmp; 1207 hammer_node_t node; 1208 int doload; 1209 1210 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE); 1211 1212 /* 1213 * Locate the structure, allocating one if necessary. 1214 */ 1215 again: 1216 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset); 1217 if (node == NULL) { 1218 ++hammer_count_nodes; 1219 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE); 1220 node->node_offset = node_offset; 1221 node->hmp = hmp; 1222 TAILQ_INIT(&node->cursor_list); 1223 TAILQ_INIT(&node->cache_list); 1224 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) { 1225 --hammer_count_nodes; 1226 kfree(node, hmp->m_misc); 1227 goto again; 1228 } 1229 doload = hammer_ref_interlock_true(&node->lock); 1230 } else { 1231 doload = hammer_ref_interlock(&node->lock); 1232 } 1233 if (doload) { 1234 *errorp = hammer_load_node(trans, node, isnew); 1235 trans->flags |= HAMMER_TRANSF_DIDIO; 1236 if (*errorp) 1237 node = NULL; 1238 } else { 1239 KKASSERT(node->ondisk); 1240 *errorp = 0; 1241 hammer_io_advance(&node->buffer->io); 1242 } 1243 return(node); 1244 } 1245 1246 /* 1247 * Reference an already-referenced node. 0->1 transitions should assert 1248 * so we do not have to deal with hammer_ref() setting CHECK. 1249 */ 1250 void 1251 hammer_ref_node(hammer_node_t node) 1252 { 1253 KKASSERT(hammer_isactive(&node->lock) && node->ondisk != NULL); 1254 hammer_ref(&node->lock); 1255 } 1256 1257 /* 1258 * Load a node's on-disk data reference. Called with the node referenced 1259 * and interlocked. 1260 * 1261 * On return the node interlock will be unlocked. If a non-zero error code 1262 * is returned the node will also be dereferenced (and the caller's pointer 1263 * will be stale). 1264 */ 1265 static int 1266 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew) 1267 { 1268 hammer_buffer_t buffer; 1269 hammer_off_t buf_offset; 1270 int error; 1271 1272 error = 0; 1273 if (node->ondisk == NULL) { 1274 /* 1275 * This is a little confusing but the jist is that 1276 * node->buffer determines whether the node is on 1277 * the buffer's clist and node->ondisk determines 1278 * whether the buffer is referenced. 1279 * 1280 * We could be racing a buffer release, in which case 1281 * node->buffer may become NULL while we are blocked 1282 * referencing the buffer. 1283 */ 1284 if ((buffer = node->buffer) != NULL) { 1285 error = hammer_ref_buffer(buffer); 1286 if (error == 0 && node->buffer == NULL) { 1287 TAILQ_INSERT_TAIL(&buffer->clist, 1288 node, entry); 1289 node->buffer = buffer; 1290 } 1291 } else { 1292 buf_offset = node->node_offset & ~HAMMER_BUFMASK64; 1293 buffer = hammer_get_buffer(node->hmp, buf_offset, 1294 HAMMER_BUFSIZE, 0, &error); 1295 if (buffer) { 1296 KKASSERT(error == 0); 1297 TAILQ_INSERT_TAIL(&buffer->clist, 1298 node, entry); 1299 node->buffer = buffer; 1300 } 1301 } 1302 if (error) 1303 goto failed; 1304 node->ondisk = (void *)((char *)buffer->ondisk + 1305 (node->node_offset & HAMMER_BUFMASK)); 1306 1307 /* 1308 * Check CRC. NOTE: Neither flag is set and the CRC is not 1309 * generated on new B-Tree nodes. 1310 */ 1311 if (isnew == 0 && 1312 (node->flags & HAMMER_NODE_CRCANY) == 0) { 1313 if (hammer_crc_test_btree(node->ondisk) == 0) { 1314 if (hammer_debug_critical) 1315 Debugger("CRC FAILED: B-TREE NODE"); 1316 node->flags |= HAMMER_NODE_CRCBAD; 1317 } else { 1318 node->flags |= HAMMER_NODE_CRCGOOD; 1319 } 1320 } 1321 } 1322 if (node->flags & HAMMER_NODE_CRCBAD) { 1323 if (trans->flags & HAMMER_TRANSF_CRCDOM) 1324 error = EDOM; 1325 else 1326 error = EIO; 1327 } 1328 failed: 1329 if (error) { 1330 _hammer_rel_node(node, 1); 1331 } else { 1332 hammer_ref_interlock_done(&node->lock); 1333 } 1334 return (error); 1335 } 1336 1337 /* 1338 * Safely reference a node, interlock against flushes via the IO subsystem. 1339 */ 1340 hammer_node_t 1341 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache, 1342 int *errorp) 1343 { 1344 hammer_node_t node; 1345 int doload; 1346 1347 node = cache->node; 1348 if (node != NULL) { 1349 doload = hammer_ref_interlock(&node->lock); 1350 if (doload) { 1351 *errorp = hammer_load_node(trans, node, 0); 1352 if (*errorp) 1353 node = NULL; 1354 } else { 1355 KKASSERT(node->ondisk); 1356 if (node->flags & HAMMER_NODE_CRCBAD) { 1357 if (trans->flags & HAMMER_TRANSF_CRCDOM) 1358 *errorp = EDOM; 1359 else 1360 *errorp = EIO; 1361 _hammer_rel_node(node, 0); 1362 node = NULL; 1363 } else { 1364 *errorp = 0; 1365 } 1366 } 1367 } else { 1368 *errorp = ENOENT; 1369 } 1370 return(node); 1371 } 1372 1373 /* 1374 * Release a hammer_node. On the last release the node dereferences 1375 * its underlying buffer and may or may not be destroyed. 1376 * 1377 * If locked is non-zero the passed node has been interlocked by the 1378 * caller and we are in the failure/unload path, otherwise it has not and 1379 * we are doing a normal release. 1380 * 1381 * This function will dispose of the interlock and the reference. 1382 * On return the node pointer is stale. 1383 */ 1384 void 1385 _hammer_rel_node(hammer_node_t node, int locked) 1386 { 1387 hammer_buffer_t buffer; 1388 1389 /* 1390 * Deref the node. If this isn't the 1->0 transition we're basically 1391 * done. If locked is non-zero this function will just deref the 1392 * locked node and return TRUE, otherwise it will deref the locked 1393 * node and either lock and return TRUE on the 1->0 transition or 1394 * not lock and return FALSE. 1395 */ 1396 if (hammer_rel_interlock(&node->lock, locked) == 0) 1397 return; 1398 1399 /* 1400 * Either locked was non-zero and we are interlocked, or the 1401 * hammer_rel_interlock() call returned non-zero and we are 1402 * interlocked. 1403 * 1404 * The ref-count must still be decremented if locked != 0 so 1405 * the cleanup required still varies a bit. 1406 * 1407 * hammer_flush_node() when called with 1 or 2 will dispose of 1408 * the lock and possible ref-count. 1409 */ 1410 if (node->ondisk == NULL) { 1411 hammer_flush_node(node, locked + 1); 1412 /* node is stale now */ 1413 return; 1414 } 1415 1416 /* 1417 * Do not disassociate the node from the buffer if it represents 1418 * a modified B-Tree node that still needs its crc to be generated. 1419 */ 1420 if (node->flags & HAMMER_NODE_NEEDSCRC) { 1421 hammer_rel_interlock_done(&node->lock, locked); 1422 return; 1423 } 1424 1425 /* 1426 * Do final cleanups and then either destroy the node and leave it 1427 * passively cached. The buffer reference is removed regardless. 1428 */ 1429 buffer = node->buffer; 1430 node->ondisk = NULL; 1431 1432 if ((node->flags & HAMMER_NODE_FLUSH) == 0) { 1433 /* 1434 * Normal release. 1435 */ 1436 hammer_rel_interlock_done(&node->lock, locked); 1437 } else { 1438 /* 1439 * Destroy the node. 1440 */ 1441 hammer_flush_node(node, locked + 1); 1442 /* node is stale */ 1443 1444 } 1445 hammer_rel_buffer(buffer, 0); 1446 } 1447 1448 void 1449 hammer_rel_node(hammer_node_t node) 1450 { 1451 _hammer_rel_node(node, 0); 1452 } 1453 1454 /* 1455 * Free space on-media associated with a B-Tree node. 1456 */ 1457 void 1458 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node) 1459 { 1460 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0); 1461 node->flags |= HAMMER_NODE_DELETED; 1462 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk)); 1463 } 1464 1465 /* 1466 * Passively cache a referenced hammer_node. The caller may release 1467 * the node on return. 1468 */ 1469 void 1470 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node) 1471 { 1472 /* 1473 * If the node doesn't exist, or is being deleted, don't cache it! 1474 * 1475 * The node can only ever be NULL in the I/O failure path. 1476 */ 1477 if (node == NULL || (node->flags & HAMMER_NODE_DELETED)) 1478 return; 1479 if (cache->node == node) 1480 return; 1481 while (cache->node) 1482 hammer_uncache_node(cache); 1483 if (node->flags & HAMMER_NODE_DELETED) 1484 return; 1485 cache->node = node; 1486 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry); 1487 } 1488 1489 void 1490 hammer_uncache_node(hammer_node_cache_t cache) 1491 { 1492 hammer_node_t node; 1493 1494 if ((node = cache->node) != NULL) { 1495 TAILQ_REMOVE(&node->cache_list, cache, entry); 1496 cache->node = NULL; 1497 if (TAILQ_EMPTY(&node->cache_list)) 1498 hammer_flush_node(node, 0); 1499 } 1500 } 1501 1502 /* 1503 * Remove a node's cache references and destroy the node if it has no 1504 * other references or backing store. 1505 * 1506 * locked == 0 Normal unlocked operation 1507 * locked == 1 Call hammer_rel_interlock_done(..., 0); 1508 * locked == 2 Call hammer_rel_interlock_done(..., 1); 1509 * 1510 * XXX for now this isn't even close to being MPSAFE so the refs check 1511 * is sufficient. 1512 */ 1513 void 1514 hammer_flush_node(hammer_node_t node, int locked) 1515 { 1516 hammer_node_cache_t cache; 1517 hammer_buffer_t buffer; 1518 hammer_mount_t hmp = node->hmp; 1519 int dofree; 1520 1521 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) { 1522 TAILQ_REMOVE(&node->cache_list, cache, entry); 1523 cache->node = NULL; 1524 } 1525 1526 /* 1527 * NOTE: refs is predisposed if another thread is blocking and 1528 * will be larger than 0 in that case. We aren't MPSAFE 1529 * here. 1530 */ 1531 if (node->ondisk == NULL && hammer_norefs(&node->lock)) { 1532 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1533 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node); 1534 if ((buffer = node->buffer) != NULL) { 1535 node->buffer = NULL; 1536 TAILQ_REMOVE(&buffer->clist, node, entry); 1537 /* buffer is unreferenced because ondisk is NULL */ 1538 } 1539 dofree = 1; 1540 } else { 1541 dofree = 0; 1542 } 1543 1544 /* 1545 * Deal with the interlock if locked == 1 or locked == 2. 1546 */ 1547 if (locked) 1548 hammer_rel_interlock_done(&node->lock, locked - 1); 1549 1550 /* 1551 * Destroy if requested 1552 */ 1553 if (dofree) { 1554 --hammer_count_nodes; 1555 kfree(node, hmp->m_misc); 1556 } 1557 } 1558 1559 /* 1560 * Flush passively cached B-Tree nodes associated with this buffer. 1561 * This is only called when the buffer is about to be destroyed, so 1562 * none of the nodes should have any references. The buffer is locked. 1563 * 1564 * We may be interlocked with the buffer. 1565 */ 1566 void 1567 hammer_flush_buffer_nodes(hammer_buffer_t buffer) 1568 { 1569 hammer_node_t node; 1570 1571 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) { 1572 KKASSERT(node->ondisk == NULL); 1573 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1574 1575 if (hammer_try_interlock_norefs(&node->lock)) { 1576 hammer_ref(&node->lock); 1577 node->flags |= HAMMER_NODE_FLUSH; 1578 _hammer_rel_node(node, 1); 1579 } else { 1580 KKASSERT(node->buffer != NULL); 1581 buffer = node->buffer; 1582 node->buffer = NULL; 1583 TAILQ_REMOVE(&buffer->clist, node, entry); 1584 /* buffer is unreferenced because ondisk is NULL */ 1585 } 1586 } 1587 } 1588 1589 1590 /************************************************************************ 1591 * ALLOCATORS * 1592 ************************************************************************/ 1593 1594 /* 1595 * Allocate a B-Tree node. 1596 */ 1597 hammer_node_t 1598 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp) 1599 { 1600 hammer_buffer_t buffer = NULL; 1601 hammer_node_t node = NULL; 1602 hammer_off_t node_offset; 1603 1604 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX, 1605 sizeof(struct hammer_node_ondisk), 1606 hint, errorp); 1607 if (*errorp == 0) { 1608 node = hammer_get_node(trans, node_offset, 1, errorp); 1609 hammer_modify_node_noundo(trans, node); 1610 bzero(node->ondisk, sizeof(*node->ondisk)); 1611 hammer_modify_node_done(node); 1612 } 1613 if (buffer) 1614 hammer_rel_buffer(buffer, 0); 1615 return(node); 1616 } 1617 1618 /* 1619 * Allocate data. If the address of a data buffer is supplied then 1620 * any prior non-NULL *data_bufferp will be released and *data_bufferp 1621 * will be set to the related buffer. The caller must release it when 1622 * finally done. The initial *data_bufferp should be set to NULL by 1623 * the caller. 1624 * 1625 * The caller is responsible for making hammer_modify*() calls on the 1626 * *data_bufferp. 1627 */ 1628 void * 1629 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 1630 u_int16_t rec_type, hammer_off_t *data_offsetp, 1631 struct hammer_buffer **data_bufferp, 1632 hammer_off_t hint, int *errorp) 1633 { 1634 void *data; 1635 int zone; 1636 1637 /* 1638 * Allocate data 1639 */ 1640 if (data_len) { 1641 switch(rec_type) { 1642 case HAMMER_RECTYPE_INODE: 1643 case HAMMER_RECTYPE_DIRENTRY: 1644 case HAMMER_RECTYPE_EXT: 1645 case HAMMER_RECTYPE_FIX: 1646 case HAMMER_RECTYPE_PFS: 1647 case HAMMER_RECTYPE_SNAPSHOT: 1648 case HAMMER_RECTYPE_CONFIG: 1649 zone = HAMMER_ZONE_META_INDEX; 1650 break; 1651 case HAMMER_RECTYPE_DATA: 1652 case HAMMER_RECTYPE_DB: 1653 if (data_len <= HAMMER_BUFSIZE / 2) { 1654 zone = HAMMER_ZONE_SMALL_DATA_INDEX; 1655 } else { 1656 data_len = (data_len + HAMMER_BUFMASK) & 1657 ~HAMMER_BUFMASK; 1658 zone = HAMMER_ZONE_LARGE_DATA_INDEX; 1659 } 1660 break; 1661 default: 1662 panic("hammer_alloc_data: rec_type %04x unknown", 1663 rec_type); 1664 zone = 0; /* NOT REACHED */ 1665 break; 1666 } 1667 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len, 1668 hint, errorp); 1669 } else { 1670 *data_offsetp = 0; 1671 } 1672 if (*errorp == 0 && data_bufferp) { 1673 if (data_len) { 1674 data = hammer_bread_ext(trans->hmp, *data_offsetp, 1675 data_len, errorp, data_bufferp); 1676 } else { 1677 data = NULL; 1678 } 1679 } else { 1680 data = NULL; 1681 } 1682 return(data); 1683 } 1684 1685 /* 1686 * Sync dirty buffers to the media and clean-up any loose ends. 1687 * 1688 * These functions do not start the flusher going, they simply 1689 * queue everything up to the flusher. 1690 */ 1691 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data); 1692 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data); 1693 1694 int 1695 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor) 1696 { 1697 struct hammer_sync_info info; 1698 1699 info.error = 0; 1700 info.waitfor = waitfor; 1701 if (waitfor == MNT_WAIT) { 1702 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS, 1703 hammer_sync_scan1, hammer_sync_scan2, &info); 1704 } else { 1705 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT, 1706 hammer_sync_scan1, hammer_sync_scan2, &info); 1707 } 1708 return(info.error); 1709 } 1710 1711 /* 1712 * Filesystem sync. If doing a synchronous sync make a second pass on 1713 * the vnodes in case any were already flushing during the first pass, 1714 * and activate the flusher twice (the second time brings the UNDO FIFO's 1715 * start position up to the end position after the first call). 1716 * 1717 * If doing a lazy sync make just one pass on the vnode list, ignoring 1718 * any new vnodes added to the list while the sync is in progress. 1719 */ 1720 int 1721 hammer_sync_hmp(hammer_mount_t hmp, int waitfor) 1722 { 1723 struct hammer_sync_info info; 1724 int flags; 1725 1726 flags = VMSC_GETVP; 1727 if (waitfor & MNT_LAZY) 1728 flags |= VMSC_ONEPASS; 1729 1730 info.error = 0; 1731 info.waitfor = MNT_NOWAIT; 1732 vmntvnodescan(hmp->mp, flags | VMSC_NOWAIT, 1733 hammer_sync_scan1, hammer_sync_scan2, &info); 1734 1735 if (info.error == 0 && (waitfor & MNT_WAIT)) { 1736 info.waitfor = waitfor; 1737 vmntvnodescan(hmp->mp, flags, 1738 hammer_sync_scan1, hammer_sync_scan2, &info); 1739 } 1740 if (waitfor == MNT_WAIT) { 1741 hammer_flusher_sync(hmp); 1742 hammer_flusher_sync(hmp); 1743 } else { 1744 hammer_flusher_async(hmp, NULL); 1745 hammer_flusher_async(hmp, NULL); 1746 } 1747 return(info.error); 1748 } 1749 1750 static int 1751 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data) 1752 { 1753 struct hammer_inode *ip; 1754 1755 ip = VTOI(vp); 1756 if (vp->v_type == VNON || ip == NULL || 1757 ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 1758 RB_EMPTY(&vp->v_rbdirty_tree))) { 1759 return(-1); 1760 } 1761 return(0); 1762 } 1763 1764 static int 1765 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data) 1766 { 1767 struct hammer_sync_info *info = data; 1768 struct hammer_inode *ip; 1769 int error; 1770 1771 ip = VTOI(vp); 1772 if (vp->v_type == VNON || vp->v_type == VBAD || 1773 ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 1774 RB_EMPTY(&vp->v_rbdirty_tree))) { 1775 return(0); 1776 } 1777 error = VOP_FSYNC(vp, MNT_NOWAIT, 0); 1778 if (error) 1779 info->error = error; 1780 return(0); 1781 } 1782 1783