1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $ 35 */ 36 /* 37 * Manage HAMMER's on-disk structures. These routines are primarily 38 * responsible for interfacing with the kernel's I/O subsystem and for 39 * managing in-memory structures. 40 */ 41 42 #include "hammer.h" 43 #include <sys/fcntl.h> 44 #include <sys/nlookup.h> 45 #include <sys/buf.h> 46 #include <sys/buf2.h> 47 48 static void hammer_free_volume(hammer_volume_t volume); 49 static int hammer_load_volume(hammer_volume_t volume); 50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew); 51 static int hammer_load_node(hammer_transaction_t trans, 52 hammer_node_t node, int isnew); 53 54 static int 55 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2) 56 { 57 if (vol1->vol_no < vol2->vol_no) 58 return(-1); 59 if (vol1->vol_no > vol2->vol_no) 60 return(1); 61 return(0); 62 } 63 64 /* 65 * hammer_buffer structures are indexed via their zoneX_offset, not 66 * their zone2_offset. 67 */ 68 static int 69 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2) 70 { 71 if (buf1->zoneX_offset < buf2->zoneX_offset) 72 return(-1); 73 if (buf1->zoneX_offset > buf2->zoneX_offset) 74 return(1); 75 return(0); 76 } 77 78 static int 79 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2) 80 { 81 if (node1->node_offset < node2->node_offset) 82 return(-1); 83 if (node1->node_offset > node2->node_offset) 84 return(1); 85 return(0); 86 } 87 88 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node, 89 hammer_vol_rb_compare, int32_t, vol_no); 90 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node, 91 hammer_buf_rb_compare, hammer_off_t, zoneX_offset); 92 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node, 93 hammer_nod_rb_compare, hammer_off_t, node_offset); 94 95 /************************************************************************ 96 * VOLUMES * 97 ************************************************************************ 98 * 99 * Load a HAMMER volume by name. Returns 0 on success or a positive error 100 * code on failure. Volumes must be loaded at mount time, get_volume() will 101 * not load a new volume. 102 * 103 * Calls made to hammer_load_volume() or single-threaded 104 */ 105 int 106 hammer_install_volume(struct hammer_mount *hmp, const char *volname, 107 struct vnode *devvp) 108 { 109 struct mount *mp; 110 hammer_volume_t volume; 111 struct hammer_volume_ondisk *ondisk; 112 struct nlookupdata nd; 113 struct buf *bp = NULL; 114 int error; 115 int ronly; 116 int setmp = 0; 117 118 mp = hmp->mp; 119 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 120 121 /* 122 * Allocate a volume structure 123 */ 124 ++hammer_count_volumes; 125 volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO); 126 volume->vol_name = kstrdup(volname, hmp->m_misc); 127 volume->io.hmp = hmp; /* bootstrap */ 128 hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME); 129 volume->io.offset = 0LL; 130 volume->io.bytes = HAMMER_BUFSIZE; 131 132 /* 133 * Get the device vnode 134 */ 135 if (devvp == NULL) { 136 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW); 137 if (error == 0) 138 error = nlookup(&nd); 139 if (error == 0) 140 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp); 141 nlookup_done(&nd); 142 } else { 143 error = 0; 144 volume->devvp = devvp; 145 } 146 147 if (error == 0) { 148 if (vn_isdisk(volume->devvp, &error)) { 149 error = vfs_mountedon(volume->devvp); 150 } 151 } 152 if (error == 0 && vcount(volume->devvp) > 0) 153 error = EBUSY; 154 if (error == 0) { 155 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 156 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0); 157 if (error == 0) { 158 error = VOP_OPEN(volume->devvp, 159 (ronly ? FREAD : FREAD|FWRITE), 160 FSCRED, NULL); 161 } 162 vn_unlock(volume->devvp); 163 } 164 if (error) { 165 hammer_free_volume(volume); 166 return(error); 167 } 168 volume->devvp->v_rdev->si_mountpoint = mp; 169 setmp = 1; 170 171 /* 172 * Extract the volume number from the volume header and do various 173 * sanity checks. 174 */ 175 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp); 176 if (error) 177 goto late_failure; 178 ondisk = (void *)bp->b_data; 179 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) { 180 kprintf("hammer_mount: volume %s has an invalid header\n", 181 volume->vol_name); 182 error = EFTYPE; 183 goto late_failure; 184 } 185 volume->vol_no = ondisk->vol_no; 186 volume->buffer_base = ondisk->vol_buf_beg; 187 volume->vol_flags = ondisk->vol_flags; 188 volume->nblocks = ondisk->vol_nblocks; 189 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 190 ondisk->vol_buf_end - ondisk->vol_buf_beg); 191 volume->maxraw_off = ondisk->vol_buf_end; 192 193 if (RB_EMPTY(&hmp->rb_vols_root)) { 194 hmp->fsid = ondisk->vol_fsid; 195 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) { 196 kprintf("hammer_mount: volume %s's fsid does not match " 197 "other volumes\n", volume->vol_name); 198 error = EFTYPE; 199 goto late_failure; 200 } 201 202 /* 203 * Insert the volume structure into the red-black tree. 204 */ 205 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) { 206 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n", 207 volume->vol_name, volume->vol_no); 208 error = EEXIST; 209 } 210 211 /* 212 * Set the root volume . HAMMER special cases rootvol the structure. 213 * We do not hold a ref because this would prevent related I/O 214 * from being flushed. 215 */ 216 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) { 217 hmp->rootvol = volume; 218 hmp->nvolumes = ondisk->vol_count; 219 if (bp) { 220 brelse(bp); 221 bp = NULL; 222 } 223 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks * 224 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); 225 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks * 226 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); 227 } 228 late_failure: 229 if (bp) 230 brelse(bp); 231 if (error) { 232 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/ 233 if (setmp) 234 volume->devvp->v_rdev->si_mountpoint = NULL; 235 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE); 236 hammer_free_volume(volume); 237 } 238 return (error); 239 } 240 241 /* 242 * This is called for each volume when updating the mount point from 243 * read-write to read-only or vise-versa. 244 */ 245 int 246 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused) 247 { 248 if (volume->devvp) { 249 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 250 if (volume->io.hmp->ronly) { 251 /* do not call vinvalbuf */ 252 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL); 253 VOP_CLOSE(volume->devvp, FREAD|FWRITE); 254 } else { 255 /* do not call vinvalbuf */ 256 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL); 257 VOP_CLOSE(volume->devvp, FREAD); 258 } 259 vn_unlock(volume->devvp); 260 } 261 return(0); 262 } 263 264 /* 265 * Unload and free a HAMMER volume. Must return >= 0 to continue scan 266 * so returns -1 on failure. 267 */ 268 int 269 hammer_unload_volume(hammer_volume_t volume, void *data __unused) 270 { 271 hammer_mount_t hmp = volume->io.hmp; 272 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 273 struct buf *bp; 274 275 /* 276 * Clean up the root volume pointer, which is held unlocked in hmp. 277 */ 278 if (hmp->rootvol == volume) 279 hmp->rootvol = NULL; 280 281 /* 282 * We must not flush a dirty buffer to disk on umount. It should 283 * have already been dealt with by the flusher, or we may be in 284 * catastrophic failure. 285 */ 286 hammer_io_clear_modify(&volume->io, 1); 287 volume->io.waitdep = 1; 288 bp = hammer_io_release(&volume->io, 1); 289 290 /* 291 * Clean up the persistent ref ioerror might have on the volume 292 */ 293 if (volume->io.ioerror) { 294 volume->io.ioerror = 0; 295 hammer_unref(&volume->io.lock); 296 } 297 298 /* 299 * There should be no references on the volume, no clusters, and 300 * no super-clusters. 301 */ 302 KKASSERT(volume->io.lock.refs == 0); 303 if (bp) 304 brelse(bp); 305 306 volume->ondisk = NULL; 307 if (volume->devvp) { 308 if (volume->devvp->v_rdev && 309 volume->devvp->v_rdev->si_mountpoint == hmp->mp 310 ) { 311 volume->devvp->v_rdev->si_mountpoint = NULL; 312 } 313 if (ronly) { 314 /* 315 * Make sure we don't sync anything to disk if we 316 * are in read-only mode (1) or critically-errored 317 * (2). Note that there may be dirty buffers in 318 * normal read-only mode from crash recovery. 319 */ 320 vinvalbuf(volume->devvp, 0, 0, 0); 321 VOP_CLOSE(volume->devvp, FREAD); 322 } else { 323 /* 324 * Normal termination, save any dirty buffers 325 * (XXX there really shouldn't be any). 326 */ 327 vinvalbuf(volume->devvp, V_SAVE, 0, 0); 328 VOP_CLOSE(volume->devvp, FREAD|FWRITE); 329 } 330 } 331 332 /* 333 * Destroy the structure 334 */ 335 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume); 336 hammer_free_volume(volume); 337 return(0); 338 } 339 340 static 341 void 342 hammer_free_volume(hammer_volume_t volume) 343 { 344 hammer_mount_t hmp = volume->io.hmp; 345 346 if (volume->vol_name) { 347 kfree(volume->vol_name, hmp->m_misc); 348 volume->vol_name = NULL; 349 } 350 if (volume->devvp) { 351 vrele(volume->devvp); 352 volume->devvp = NULL; 353 } 354 --hammer_count_volumes; 355 kfree(volume, hmp->m_misc); 356 } 357 358 /* 359 * Get a HAMMER volume. The volume must already exist. 360 */ 361 hammer_volume_t 362 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp) 363 { 364 struct hammer_volume *volume; 365 366 /* 367 * Locate the volume structure 368 */ 369 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no); 370 if (volume == NULL) { 371 *errorp = ENOENT; 372 return(NULL); 373 } 374 hammer_ref(&volume->io.lock); 375 376 /* 377 * Deal with on-disk info 378 */ 379 if (volume->ondisk == NULL || volume->io.loading) { 380 *errorp = hammer_load_volume(volume); 381 if (*errorp) { 382 hammer_rel_volume(volume, 1); 383 volume = NULL; 384 } 385 } else { 386 *errorp = 0; 387 } 388 return(volume); 389 } 390 391 int 392 hammer_ref_volume(hammer_volume_t volume) 393 { 394 int error; 395 396 hammer_ref(&volume->io.lock); 397 398 /* 399 * Deal with on-disk info 400 */ 401 if (volume->ondisk == NULL || volume->io.loading) { 402 error = hammer_load_volume(volume); 403 if (error) 404 hammer_rel_volume(volume, 1); 405 } else { 406 error = 0; 407 } 408 return (error); 409 } 410 411 hammer_volume_t 412 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp) 413 { 414 hammer_volume_t volume; 415 416 volume = hmp->rootvol; 417 KKASSERT(volume != NULL); 418 hammer_ref(&volume->io.lock); 419 420 /* 421 * Deal with on-disk info 422 */ 423 if (volume->ondisk == NULL || volume->io.loading) { 424 *errorp = hammer_load_volume(volume); 425 if (*errorp) { 426 hammer_rel_volume(volume, 1); 427 volume = NULL; 428 } 429 } else { 430 *errorp = 0; 431 } 432 return (volume); 433 } 434 435 /* 436 * Load a volume's on-disk information. The volume must be referenced and 437 * not locked. We temporarily acquire an exclusive lock to interlock 438 * against releases or multiple get's. 439 */ 440 static int 441 hammer_load_volume(hammer_volume_t volume) 442 { 443 int error; 444 445 ++volume->io.loading; 446 hammer_lock_ex(&volume->io.lock); 447 448 if (volume->ondisk == NULL) { 449 error = hammer_io_read(volume->devvp, &volume->io, 450 volume->maxraw_off); 451 if (error == 0) 452 volume->ondisk = (void *)volume->io.bp->b_data; 453 } else { 454 error = 0; 455 } 456 --volume->io.loading; 457 hammer_unlock(&volume->io.lock); 458 return(error); 459 } 460 461 /* 462 * Release a volume. Call hammer_io_release on the last reference. We have 463 * to acquire an exclusive lock to interlock against volume->ondisk tests 464 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive 465 * lock to be held. 466 * 467 * Volumes are not unloaded from memory during normal operation. 468 */ 469 void 470 hammer_rel_volume(hammer_volume_t volume, int flush) 471 { 472 struct buf *bp = NULL; 473 474 crit_enter(); 475 if (volume->io.lock.refs == 1) { 476 ++volume->io.loading; 477 hammer_lock_ex(&volume->io.lock); 478 if (volume->io.lock.refs == 1) { 479 volume->ondisk = NULL; 480 bp = hammer_io_release(&volume->io, flush); 481 } 482 --volume->io.loading; 483 hammer_unlock(&volume->io.lock); 484 } 485 hammer_unref(&volume->io.lock); 486 if (bp) 487 brelse(bp); 488 crit_exit(); 489 } 490 491 int 492 hammer_mountcheck_volumes(struct hammer_mount *hmp) 493 { 494 hammer_volume_t vol; 495 int i; 496 497 for (i = 0; i < hmp->nvolumes; ++i) { 498 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i); 499 if (vol == NULL) 500 return(EINVAL); 501 } 502 return(0); 503 } 504 505 /************************************************************************ 506 * BUFFERS * 507 ************************************************************************ 508 * 509 * Manage buffers. Currently all blockmap-backed zones are direct-mapped 510 * to zone-2 buffer offsets, without a translation stage. However, the 511 * hammer_buffer structure is indexed by its zoneX_offset, not its 512 * zone2_offset. 513 * 514 * The proper zone must be maintained throughout the code-base all the way 515 * through to the big-block allocator, or routines like hammer_del_buffers() 516 * will not be able to locate all potentially conflicting buffers. 517 */ 518 hammer_buffer_t 519 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, 520 int bytes, int isnew, int *errorp) 521 { 522 hammer_buffer_t buffer; 523 hammer_volume_t volume; 524 hammer_off_t zone2_offset; 525 hammer_io_type_t iotype; 526 int vol_no; 527 int zone; 528 529 buf_offset &= ~HAMMER_BUFMASK64; 530 again: 531 /* 532 * Shortcut if the buffer is already cached 533 */ 534 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset); 535 if (buffer) { 536 if (buffer->io.lock.refs == 0) 537 ++hammer_count_refedbufs; 538 hammer_ref(&buffer->io.lock); 539 540 /* 541 * Once refed the ondisk field will not be cleared by 542 * any other action. 543 */ 544 if (buffer->ondisk && buffer->io.loading == 0) { 545 *errorp = 0; 546 hammer_io_advance(&buffer->io); 547 return(buffer); 548 } 549 550 /* 551 * The buffer is no longer loose if it has a ref, and 552 * cannot become loose once it gains a ref. Loose 553 * buffers will never be in a modified state. This should 554 * only occur on the 0->1 transition of refs. 555 * 556 * lose_list can be modified via a biodone() interrupt. 557 */ 558 if (buffer->io.mod_list == &hmp->lose_list) { 559 crit_enter(); /* biodone race against list */ 560 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, 561 mod_entry); 562 crit_exit(); 563 buffer->io.mod_list = NULL; 564 KKASSERT(buffer->io.modified == 0); 565 } 566 goto found; 567 } 568 569 /* 570 * What is the buffer class? 571 */ 572 zone = HAMMER_ZONE_DECODE(buf_offset); 573 574 switch(zone) { 575 case HAMMER_ZONE_LARGE_DATA_INDEX: 576 case HAMMER_ZONE_SMALL_DATA_INDEX: 577 iotype = HAMMER_STRUCTURE_DATA_BUFFER; 578 break; 579 case HAMMER_ZONE_UNDO_INDEX: 580 iotype = HAMMER_STRUCTURE_UNDO_BUFFER; 581 break; 582 case HAMMER_ZONE_META_INDEX: 583 default: 584 /* 585 * NOTE: inode data and directory entries are placed in this 586 * zone. inode atime/mtime is updated in-place and thus 587 * buffers containing inodes must be synchronized as 588 * meta-buffers, same as buffers containing B-Tree info. 589 */ 590 iotype = HAMMER_STRUCTURE_META_BUFFER; 591 break; 592 } 593 594 /* 595 * Handle blockmap offset translations 596 */ 597 if (zone >= HAMMER_ZONE_BTREE_INDEX) { 598 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp); 599 } else if (zone == HAMMER_ZONE_UNDO_INDEX) { 600 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp); 601 } else { 602 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX); 603 zone2_offset = buf_offset; 604 *errorp = 0; 605 } 606 if (*errorp) 607 return(NULL); 608 609 /* 610 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset 611 * specifications. 612 */ 613 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) == 614 HAMMER_ZONE_RAW_BUFFER); 615 vol_no = HAMMER_VOL_DECODE(zone2_offset); 616 volume = hammer_get_volume(hmp, vol_no, errorp); 617 if (volume == NULL) 618 return(NULL); 619 620 KKASSERT(zone2_offset < volume->maxbuf_off); 621 622 /* 623 * Allocate a new buffer structure. We will check for races later. 624 */ 625 ++hammer_count_buffers; 626 buffer = kmalloc(sizeof(*buffer), hmp->m_misc, 627 M_WAITOK|M_ZERO|M_USE_RESERVE); 628 buffer->zone2_offset = zone2_offset; 629 buffer->zoneX_offset = buf_offset; 630 631 hammer_io_init(&buffer->io, volume, iotype); 632 buffer->io.offset = volume->ondisk->vol_buf_beg + 633 (zone2_offset & HAMMER_OFF_SHORT_MASK); 634 buffer->io.bytes = bytes; 635 TAILQ_INIT(&buffer->clist); 636 hammer_ref(&buffer->io.lock); 637 638 /* 639 * Insert the buffer into the RB tree and handle late collisions. 640 */ 641 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) { 642 hammer_unref(&buffer->io.lock); /* safety */ 643 --hammer_count_buffers; 644 hammer_rel_volume(volume, 0); 645 buffer->io.volume = NULL; /* safety */ 646 kfree(buffer, hmp->m_misc); 647 goto again; 648 } 649 ++hammer_count_refedbufs; 650 found: 651 652 /* 653 * Deal with on-disk info and loading races. 654 */ 655 if (buffer->ondisk == NULL || buffer->io.loading) { 656 *errorp = hammer_load_buffer(buffer, isnew); 657 if (*errorp) { 658 hammer_rel_buffer(buffer, 1); 659 buffer = NULL; 660 } 661 } else { 662 *errorp = 0; 663 } 664 hammer_io_advance(&buffer->io); 665 return(buffer); 666 } 667 668 /* 669 * This is used by the direct-read code to deal with large-data buffers 670 * created by the reblocker and mirror-write code. The direct-read code 671 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write- 672 * running hammer buffers must be fully synced to disk before we can issue 673 * the direct-read. 674 * 675 * This code path is not considered critical as only the rebocker and 676 * mirror-write code will create large-data buffers via the HAMMER buffer 677 * subsystem. They do that because they operate at the B-Tree level and 678 * do not access the vnode/inode structures. 679 */ 680 void 681 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes) 682 { 683 hammer_buffer_t buffer; 684 int error; 685 686 KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) == 687 HAMMER_ZONE_LARGE_DATA); 688 689 while (bytes > 0) { 690 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 691 base_offset); 692 if (buffer && (buffer->io.modified || buffer->io.running)) { 693 error = hammer_ref_buffer(buffer); 694 if (error == 0) { 695 hammer_io_wait(&buffer->io); 696 if (buffer->io.modified) { 697 hammer_io_write_interlock(&buffer->io); 698 hammer_io_flush(&buffer->io, 0); 699 hammer_io_done_interlock(&buffer->io); 700 hammer_io_wait(&buffer->io); 701 } 702 hammer_rel_buffer(buffer, 0); 703 } 704 } 705 base_offset += HAMMER_BUFSIZE; 706 bytes -= HAMMER_BUFSIZE; 707 } 708 } 709 710 /* 711 * Destroy all buffers covering the specified zoneX offset range. This 712 * is called when the related blockmap layer2 entry is freed or when 713 * a direct write bypasses our buffer/buffer-cache subsystem. 714 * 715 * The buffers may be referenced by the caller itself. Setting reclaim 716 * will cause the buffer to be destroyed when it's ref count reaches zero. 717 * 718 * Return 0 on success, EAGAIN if some buffers could not be destroyed due 719 * to additional references held by other threads, or some other (typically 720 * fatal) error. 721 */ 722 int 723 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset, 724 hammer_off_t zone2_offset, int bytes, 725 int report_conflicts) 726 { 727 hammer_buffer_t buffer; 728 hammer_volume_t volume; 729 int vol_no; 730 int error; 731 int ret_error; 732 733 vol_no = HAMMER_VOL_DECODE(zone2_offset); 734 volume = hammer_get_volume(hmp, vol_no, &ret_error); 735 KKASSERT(ret_error == 0); 736 737 while (bytes > 0) { 738 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 739 base_offset); 740 if (buffer) { 741 error = hammer_ref_buffer(buffer); 742 if (error == 0 && buffer->io.lock.refs != 1) { 743 error = EAGAIN; 744 hammer_rel_buffer(buffer, 0); 745 } 746 if (error == 0) { 747 KKASSERT(buffer->zone2_offset == zone2_offset); 748 hammer_io_clear_modify(&buffer->io, 1); 749 buffer->io.reclaim = 1; 750 buffer->io.waitdep = 1; 751 KKASSERT(buffer->io.volume == volume); 752 hammer_rel_buffer(buffer, 0); 753 } 754 } else { 755 error = hammer_io_inval(volume, zone2_offset); 756 } 757 if (error) { 758 ret_error = error; 759 if (report_conflicts || 760 (hammer_debug_general & 0x8000)) { 761 kprintf("hammer_del_buffers: unable to " 762 "invalidate %016llx buffer=%p rep=%d\n", 763 (long long)base_offset, 764 buffer, report_conflicts); 765 } 766 } 767 base_offset += HAMMER_BUFSIZE; 768 zone2_offset += HAMMER_BUFSIZE; 769 bytes -= HAMMER_BUFSIZE; 770 } 771 hammer_rel_volume(volume, 0); 772 return (ret_error); 773 } 774 775 static int 776 hammer_load_buffer(hammer_buffer_t buffer, int isnew) 777 { 778 hammer_volume_t volume; 779 int error; 780 781 /* 782 * Load the buffer's on-disk info 783 */ 784 volume = buffer->io.volume; 785 ++buffer->io.loading; 786 hammer_lock_ex(&buffer->io.lock); 787 788 if (hammer_debug_io & 0x0001) { 789 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n", 790 (long long)buffer->zoneX_offset, 791 (long long)buffer->zone2_offset, 792 isnew, buffer->ondisk); 793 } 794 795 if (buffer->ondisk == NULL) { 796 if (isnew) { 797 error = hammer_io_new(volume->devvp, &buffer->io); 798 } else { 799 error = hammer_io_read(volume->devvp, &buffer->io, 800 volume->maxraw_off); 801 } 802 if (error == 0) 803 buffer->ondisk = (void *)buffer->io.bp->b_data; 804 } else if (isnew) { 805 error = hammer_io_new(volume->devvp, &buffer->io); 806 } else { 807 error = 0; 808 } 809 --buffer->io.loading; 810 hammer_unlock(&buffer->io.lock); 811 return (error); 812 } 813 814 /* 815 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue. 816 * This routine is only called during unmount. 817 */ 818 int 819 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused) 820 { 821 /* 822 * Clean up the persistent ref ioerror might have on the buffer 823 * and acquire a ref (steal ioerror's if we can). 824 */ 825 if (buffer->io.ioerror) { 826 buffer->io.ioerror = 0; 827 } else { 828 if (buffer->io.lock.refs == 0) 829 ++hammer_count_refedbufs; 830 hammer_ref(&buffer->io.lock); 831 } 832 833 /* 834 * We must not flush a dirty buffer to disk on umount. It should 835 * have already been dealt with by the flusher, or we may be in 836 * catastrophic failure. 837 * 838 * We must set waitdep to ensure that a running buffer is waited 839 * on and released prior to us trying to unload the volume. 840 */ 841 hammer_io_clear_modify(&buffer->io, 1); 842 hammer_flush_buffer_nodes(buffer); 843 KKASSERT(buffer->io.lock.refs == 1); 844 buffer->io.waitdep = 1; 845 hammer_rel_buffer(buffer, 2); 846 return(0); 847 } 848 849 /* 850 * Reference a buffer that is either already referenced or via a specially 851 * handled pointer (aka cursor->buffer). 852 */ 853 int 854 hammer_ref_buffer(hammer_buffer_t buffer) 855 { 856 int error; 857 858 if (buffer->io.lock.refs == 0) 859 ++hammer_count_refedbufs; 860 hammer_ref(&buffer->io.lock); 861 862 /* 863 * At this point a biodone() will not touch the buffer other then 864 * incidental bits. However, lose_list can be modified via 865 * a biodone() interrupt. 866 * 867 * No longer loose 868 */ 869 if (buffer->io.mod_list == &buffer->io.hmp->lose_list) { 870 crit_enter(); 871 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry); 872 buffer->io.mod_list = NULL; 873 crit_exit(); 874 } 875 876 if (buffer->ondisk == NULL || buffer->io.loading) { 877 error = hammer_load_buffer(buffer, 0); 878 if (error) { 879 hammer_rel_buffer(buffer, 1); 880 /* 881 * NOTE: buffer pointer can become stale after 882 * the above release. 883 */ 884 } 885 } else { 886 error = 0; 887 } 888 return(error); 889 } 890 891 /* 892 * Release a buffer. We have to deal with several places where 893 * another thread can ref the buffer. 894 * 895 * Only destroy the structure itself if the related buffer cache buffer 896 * was disassociated from it. This ties the management of the structure 897 * to the buffer cache subsystem. buffer->ondisk determines whether the 898 * embedded io is referenced or not. 899 */ 900 void 901 hammer_rel_buffer(hammer_buffer_t buffer, int flush) 902 { 903 hammer_volume_t volume; 904 hammer_mount_t hmp; 905 struct buf *bp = NULL; 906 int freeme = 0; 907 908 hmp = buffer->io.hmp; 909 910 crit_enter(); 911 if (buffer->io.lock.refs == 1) { 912 ++buffer->io.loading; /* force interlock check */ 913 hammer_lock_ex(&buffer->io.lock); 914 if (buffer->io.lock.refs == 1) { 915 bp = hammer_io_release(&buffer->io, flush); 916 917 if (buffer->io.lock.refs == 1) 918 --hammer_count_refedbufs; 919 920 if (buffer->io.bp == NULL && 921 buffer->io.lock.refs == 1) { 922 /* 923 * Final cleanup 924 * 925 * NOTE: It is impossible for any associated 926 * B-Tree nodes to have refs if the buffer 927 * has no additional refs. 928 */ 929 RB_REMOVE(hammer_buf_rb_tree, 930 &buffer->io.hmp->rb_bufs_root, 931 buffer); 932 volume = buffer->io.volume; 933 buffer->io.volume = NULL; /* sanity */ 934 hammer_rel_volume(volume, 0); 935 hammer_io_clear_modlist(&buffer->io); 936 hammer_flush_buffer_nodes(buffer); 937 KKASSERT(TAILQ_EMPTY(&buffer->clist)); 938 freeme = 1; 939 } 940 } 941 --buffer->io.loading; 942 hammer_unlock(&buffer->io.lock); 943 } 944 hammer_unref(&buffer->io.lock); 945 crit_exit(); 946 if (bp) 947 brelse(bp); 948 if (freeme) { 949 --hammer_count_buffers; 950 kfree(buffer, hmp->m_misc); 951 } 952 } 953 954 /* 955 * Access the filesystem buffer containing the specified hammer offset. 956 * buf_offset is a conglomeration of the volume number and vol_buf_beg 957 * relative buffer offset. It must also have bit 55 set to be valid. 958 * (see hammer_off_t in hammer_disk.h). 959 * 960 * Any prior buffer in *bufferp will be released and replaced by the 961 * requested buffer. 962 * 963 * NOTE: The buffer is indexed via its zoneX_offset but we allow the 964 * passed cached *bufferp to match against either zoneX or zone2. 965 */ 966 static __inline 967 void * 968 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 969 int *errorp, struct hammer_buffer **bufferp) 970 { 971 hammer_buffer_t buffer; 972 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 973 974 buf_offset &= ~HAMMER_BUFMASK64; 975 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0); 976 977 buffer = *bufferp; 978 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 979 buffer->zoneX_offset != buf_offset)) { 980 if (buffer) 981 hammer_rel_buffer(buffer, 0); 982 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp); 983 *bufferp = buffer; 984 } else { 985 *errorp = 0; 986 } 987 988 /* 989 * Return a pointer to the buffer data. 990 */ 991 if (buffer == NULL) 992 return(NULL); 993 else 994 return((char *)buffer->ondisk + xoff); 995 } 996 997 void * 998 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, 999 int *errorp, struct hammer_buffer **bufferp) 1000 { 1001 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp)); 1002 } 1003 1004 void * 1005 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1006 int *errorp, struct hammer_buffer **bufferp) 1007 { 1008 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 1009 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp)); 1010 } 1011 1012 /* 1013 * Access the filesystem buffer containing the specified hammer offset. 1014 * No disk read operation occurs. The result buffer may contain garbage. 1015 * 1016 * Any prior buffer in *bufferp will be released and replaced by the 1017 * requested buffer. 1018 * 1019 * This function marks the buffer dirty but does not increment its 1020 * modify_refs count. 1021 */ 1022 static __inline 1023 void * 1024 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1025 int *errorp, struct hammer_buffer **bufferp) 1026 { 1027 hammer_buffer_t buffer; 1028 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 1029 1030 buf_offset &= ~HAMMER_BUFMASK64; 1031 1032 buffer = *bufferp; 1033 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 1034 buffer->zoneX_offset != buf_offset)) { 1035 if (buffer) 1036 hammer_rel_buffer(buffer, 0); 1037 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp); 1038 *bufferp = buffer; 1039 } else { 1040 *errorp = 0; 1041 } 1042 1043 /* 1044 * Return a pointer to the buffer data. 1045 */ 1046 if (buffer == NULL) 1047 return(NULL); 1048 else 1049 return((char *)buffer->ondisk + xoff); 1050 } 1051 1052 void * 1053 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, 1054 int *errorp, struct hammer_buffer **bufferp) 1055 { 1056 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp)); 1057 } 1058 1059 void * 1060 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1061 int *errorp, struct hammer_buffer **bufferp) 1062 { 1063 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 1064 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp)); 1065 } 1066 1067 /************************************************************************ 1068 * NODES * 1069 ************************************************************************ 1070 * 1071 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing 1072 * method used by the HAMMER filesystem. 1073 * 1074 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY 1075 * associated with its buffer, and will only referenced the buffer while 1076 * the node itself is referenced. 1077 * 1078 * A hammer_node can also be passively associated with other HAMMER 1079 * structures, such as inodes, while retaining 0 references. These 1080 * associations can be cleared backwards using a pointer-to-pointer in 1081 * the hammer_node. 1082 * 1083 * This allows the HAMMER implementation to cache hammer_nodes long-term 1084 * and short-cut a great deal of the infrastructure's complexity. In 1085 * most cases a cached node can be reacquired without having to dip into 1086 * either the buffer or cluster management code. 1087 * 1088 * The caller must pass a referenced cluster on call and will retain 1089 * ownership of the reference on return. The node will acquire its own 1090 * additional references, if necessary. 1091 */ 1092 hammer_node_t 1093 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset, 1094 int isnew, int *errorp) 1095 { 1096 hammer_mount_t hmp = trans->hmp; 1097 hammer_node_t node; 1098 1099 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE); 1100 1101 /* 1102 * Locate the structure, allocating one if necessary. 1103 */ 1104 again: 1105 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset); 1106 if (node == NULL) { 1107 ++hammer_count_nodes; 1108 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE); 1109 node->node_offset = node_offset; 1110 node->hmp = hmp; 1111 TAILQ_INIT(&node->cursor_list); 1112 TAILQ_INIT(&node->cache_list); 1113 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) { 1114 --hammer_count_nodes; 1115 kfree(node, hmp->m_misc); 1116 goto again; 1117 } 1118 } 1119 hammer_ref(&node->lock); 1120 if (node->ondisk) { 1121 *errorp = 0; 1122 hammer_io_advance(&node->buffer->io); 1123 } else { 1124 *errorp = hammer_load_node(trans, node, isnew); 1125 trans->flags |= HAMMER_TRANSF_DIDIO; 1126 } 1127 if (*errorp) { 1128 hammer_rel_node(node); 1129 node = NULL; 1130 } 1131 return(node); 1132 } 1133 1134 /* 1135 * Reference an already-referenced node. 1136 */ 1137 void 1138 hammer_ref_node(hammer_node_t node) 1139 { 1140 KKASSERT(node->lock.refs > 0 && node->ondisk != NULL); 1141 hammer_ref(&node->lock); 1142 } 1143 1144 /* 1145 * Load a node's on-disk data reference. 1146 */ 1147 static int 1148 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew) 1149 { 1150 hammer_buffer_t buffer; 1151 hammer_off_t buf_offset; 1152 int error; 1153 1154 error = 0; 1155 ++node->loading; 1156 hammer_lock_ex(&node->lock); 1157 if (node->ondisk == NULL) { 1158 /* 1159 * This is a little confusing but the jist is that 1160 * node->buffer determines whether the node is on 1161 * the buffer's clist and node->ondisk determines 1162 * whether the buffer is referenced. 1163 * 1164 * We could be racing a buffer release, in which case 1165 * node->buffer may become NULL while we are blocked 1166 * referencing the buffer. 1167 */ 1168 if ((buffer = node->buffer) != NULL) { 1169 error = hammer_ref_buffer(buffer); 1170 if (error == 0 && node->buffer == NULL) { 1171 TAILQ_INSERT_TAIL(&buffer->clist, 1172 node, entry); 1173 node->buffer = buffer; 1174 } 1175 } else { 1176 buf_offset = node->node_offset & ~HAMMER_BUFMASK64; 1177 buffer = hammer_get_buffer(node->hmp, buf_offset, 1178 HAMMER_BUFSIZE, 0, &error); 1179 if (buffer) { 1180 KKASSERT(error == 0); 1181 TAILQ_INSERT_TAIL(&buffer->clist, 1182 node, entry); 1183 node->buffer = buffer; 1184 } 1185 } 1186 if (error) 1187 goto failed; 1188 node->ondisk = (void *)((char *)buffer->ondisk + 1189 (node->node_offset & HAMMER_BUFMASK)); 1190 1191 /* 1192 * Check CRC. NOTE: Neither flag is set and the CRC is not 1193 * generated on new B-Tree nodes. 1194 */ 1195 if (isnew == 0 && 1196 (node->flags & HAMMER_NODE_CRCANY) == 0) { 1197 if (hammer_crc_test_btree(node->ondisk) == 0) { 1198 if (hammer_debug_debug & 0x0002) 1199 Debugger("CRC FAILED: B-TREE NODE"); 1200 node->flags |= HAMMER_NODE_CRCBAD; 1201 } else { 1202 node->flags |= HAMMER_NODE_CRCGOOD; 1203 } 1204 } 1205 } 1206 if (node->flags & HAMMER_NODE_CRCBAD) { 1207 if (trans->flags & HAMMER_TRANSF_CRCDOM) 1208 error = EDOM; 1209 else 1210 error = EIO; 1211 } 1212 failed: 1213 --node->loading; 1214 hammer_unlock(&node->lock); 1215 return (error); 1216 } 1217 1218 /* 1219 * Safely reference a node, interlock against flushes via the IO subsystem. 1220 */ 1221 hammer_node_t 1222 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache, 1223 int *errorp) 1224 { 1225 hammer_node_t node; 1226 1227 node = cache->node; 1228 if (node != NULL) { 1229 hammer_ref(&node->lock); 1230 if (node->ondisk) { 1231 if (node->flags & HAMMER_NODE_CRCBAD) { 1232 if (trans->flags & HAMMER_TRANSF_CRCDOM) 1233 *errorp = EDOM; 1234 else 1235 *errorp = EIO; 1236 } else { 1237 *errorp = 0; 1238 } 1239 } else { 1240 *errorp = hammer_load_node(trans, node, 0); 1241 } 1242 if (*errorp) { 1243 hammer_rel_node(node); 1244 node = NULL; 1245 } 1246 } else { 1247 *errorp = ENOENT; 1248 } 1249 return(node); 1250 } 1251 1252 /* 1253 * Release a hammer_node. On the last release the node dereferences 1254 * its underlying buffer and may or may not be destroyed. 1255 */ 1256 void 1257 hammer_rel_node(hammer_node_t node) 1258 { 1259 hammer_buffer_t buffer; 1260 1261 /* 1262 * If this isn't the last ref just decrement the ref count and 1263 * return. 1264 */ 1265 if (node->lock.refs > 1) { 1266 hammer_unref(&node->lock); 1267 return; 1268 } 1269 1270 /* 1271 * If there is no ondisk info or no buffer the node failed to load, 1272 * remove the last reference and destroy the node. 1273 */ 1274 if (node->ondisk == NULL) { 1275 hammer_unref(&node->lock); 1276 hammer_flush_node(node); 1277 /* node is stale now */ 1278 return; 1279 } 1280 1281 /* 1282 * Do not disassociate the node from the buffer if it represents 1283 * a modified B-Tree node that still needs its crc to be generated. 1284 */ 1285 if (node->flags & HAMMER_NODE_NEEDSCRC) 1286 return; 1287 1288 /* 1289 * Do final cleanups and then either destroy the node and leave it 1290 * passively cached. The buffer reference is removed regardless. 1291 */ 1292 buffer = node->buffer; 1293 node->ondisk = NULL; 1294 1295 if ((node->flags & HAMMER_NODE_FLUSH) == 0) { 1296 hammer_unref(&node->lock); 1297 hammer_rel_buffer(buffer, 0); 1298 return; 1299 } 1300 1301 /* 1302 * Destroy the node. 1303 */ 1304 hammer_unref(&node->lock); 1305 hammer_flush_node(node); 1306 /* node is stale */ 1307 hammer_rel_buffer(buffer, 0); 1308 } 1309 1310 /* 1311 * Free space on-media associated with a B-Tree node. 1312 */ 1313 void 1314 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node) 1315 { 1316 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0); 1317 node->flags |= HAMMER_NODE_DELETED; 1318 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk)); 1319 } 1320 1321 /* 1322 * Passively cache a referenced hammer_node. The caller may release 1323 * the node on return. 1324 */ 1325 void 1326 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node) 1327 { 1328 /* 1329 * If the node doesn't exist, or is being deleted, don't cache it! 1330 * 1331 * The node can only ever be NULL in the I/O failure path. 1332 */ 1333 if (node == NULL || (node->flags & HAMMER_NODE_DELETED)) 1334 return; 1335 if (cache->node == node) 1336 return; 1337 while (cache->node) 1338 hammer_uncache_node(cache); 1339 if (node->flags & HAMMER_NODE_DELETED) 1340 return; 1341 cache->node = node; 1342 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry); 1343 } 1344 1345 void 1346 hammer_uncache_node(hammer_node_cache_t cache) 1347 { 1348 hammer_node_t node; 1349 1350 if ((node = cache->node) != NULL) { 1351 TAILQ_REMOVE(&node->cache_list, cache, entry); 1352 cache->node = NULL; 1353 if (TAILQ_EMPTY(&node->cache_list)) 1354 hammer_flush_node(node); 1355 } 1356 } 1357 1358 /* 1359 * Remove a node's cache references and destroy the node if it has no 1360 * other references or backing store. 1361 */ 1362 void 1363 hammer_flush_node(hammer_node_t node) 1364 { 1365 hammer_node_cache_t cache; 1366 hammer_buffer_t buffer; 1367 hammer_mount_t hmp = node->hmp; 1368 1369 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) { 1370 TAILQ_REMOVE(&node->cache_list, cache, entry); 1371 cache->node = NULL; 1372 } 1373 if (node->lock.refs == 0 && node->ondisk == NULL) { 1374 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1375 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node); 1376 if ((buffer = node->buffer) != NULL) { 1377 node->buffer = NULL; 1378 TAILQ_REMOVE(&buffer->clist, node, entry); 1379 /* buffer is unreferenced because ondisk is NULL */ 1380 } 1381 --hammer_count_nodes; 1382 kfree(node, hmp->m_misc); 1383 } 1384 } 1385 1386 /* 1387 * Flush passively cached B-Tree nodes associated with this buffer. 1388 * This is only called when the buffer is about to be destroyed, so 1389 * none of the nodes should have any references. The buffer is locked. 1390 * 1391 * We may be interlocked with the buffer. 1392 */ 1393 void 1394 hammer_flush_buffer_nodes(hammer_buffer_t buffer) 1395 { 1396 hammer_node_t node; 1397 1398 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) { 1399 KKASSERT(node->ondisk == NULL); 1400 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1401 1402 if (node->lock.refs == 0) { 1403 hammer_ref(&node->lock); 1404 node->flags |= HAMMER_NODE_FLUSH; 1405 hammer_rel_node(node); 1406 } else { 1407 KKASSERT(node->loading != 0); 1408 KKASSERT(node->buffer != NULL); 1409 buffer = node->buffer; 1410 node->buffer = NULL; 1411 TAILQ_REMOVE(&buffer->clist, node, entry); 1412 /* buffer is unreferenced because ondisk is NULL */ 1413 } 1414 } 1415 } 1416 1417 1418 /************************************************************************ 1419 * ALLOCATORS * 1420 ************************************************************************/ 1421 1422 /* 1423 * Allocate a B-Tree node. 1424 */ 1425 hammer_node_t 1426 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp) 1427 { 1428 hammer_buffer_t buffer = NULL; 1429 hammer_node_t node = NULL; 1430 hammer_off_t node_offset; 1431 1432 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX, 1433 sizeof(struct hammer_node_ondisk), 1434 hint, errorp); 1435 if (*errorp == 0) { 1436 node = hammer_get_node(trans, node_offset, 1, errorp); 1437 hammer_modify_node_noundo(trans, node); 1438 bzero(node->ondisk, sizeof(*node->ondisk)); 1439 hammer_modify_node_done(node); 1440 } 1441 if (buffer) 1442 hammer_rel_buffer(buffer, 0); 1443 return(node); 1444 } 1445 1446 /* 1447 * Allocate data. If the address of a data buffer is supplied then 1448 * any prior non-NULL *data_bufferp will be released and *data_bufferp 1449 * will be set to the related buffer. The caller must release it when 1450 * finally done. The initial *data_bufferp should be set to NULL by 1451 * the caller. 1452 * 1453 * The caller is responsible for making hammer_modify*() calls on the 1454 * *data_bufferp. 1455 */ 1456 void * 1457 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 1458 u_int16_t rec_type, hammer_off_t *data_offsetp, 1459 struct hammer_buffer **data_bufferp, 1460 hammer_off_t hint, int *errorp) 1461 { 1462 void *data; 1463 int zone; 1464 1465 /* 1466 * Allocate data 1467 */ 1468 if (data_len) { 1469 switch(rec_type) { 1470 case HAMMER_RECTYPE_INODE: 1471 case HAMMER_RECTYPE_DIRENTRY: 1472 case HAMMER_RECTYPE_EXT: 1473 case HAMMER_RECTYPE_FIX: 1474 case HAMMER_RECTYPE_PFS: 1475 case HAMMER_RECTYPE_SNAPSHOT: 1476 case HAMMER_RECTYPE_CONFIG: 1477 zone = HAMMER_ZONE_META_INDEX; 1478 break; 1479 case HAMMER_RECTYPE_DATA: 1480 case HAMMER_RECTYPE_DB: 1481 if (data_len <= HAMMER_BUFSIZE / 2) { 1482 zone = HAMMER_ZONE_SMALL_DATA_INDEX; 1483 } else { 1484 data_len = (data_len + HAMMER_BUFMASK) & 1485 ~HAMMER_BUFMASK; 1486 zone = HAMMER_ZONE_LARGE_DATA_INDEX; 1487 } 1488 break; 1489 default: 1490 panic("hammer_alloc_data: rec_type %04x unknown", 1491 rec_type); 1492 zone = 0; /* NOT REACHED */ 1493 break; 1494 } 1495 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len, 1496 hint, errorp); 1497 } else { 1498 *data_offsetp = 0; 1499 } 1500 if (*errorp == 0 && data_bufferp) { 1501 if (data_len) { 1502 data = hammer_bread_ext(trans->hmp, *data_offsetp, 1503 data_len, errorp, data_bufferp); 1504 } else { 1505 data = NULL; 1506 } 1507 } else { 1508 data = NULL; 1509 } 1510 return(data); 1511 } 1512 1513 /* 1514 * Sync dirty buffers to the media and clean-up any loose ends. 1515 * 1516 * These functions do not start the flusher going, they simply 1517 * queue everything up to the flusher. 1518 */ 1519 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data); 1520 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data); 1521 1522 int 1523 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor) 1524 { 1525 struct hammer_sync_info info; 1526 1527 info.error = 0; 1528 info.waitfor = waitfor; 1529 if (waitfor == MNT_WAIT) { 1530 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS, 1531 hammer_sync_scan1, hammer_sync_scan2, &info); 1532 } else { 1533 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT, 1534 hammer_sync_scan1, hammer_sync_scan2, &info); 1535 } 1536 return(info.error); 1537 } 1538 1539 /* 1540 * Filesystem sync. If doing a synchronous sync make a second pass on 1541 * the vnodes in case any were already flushing during the first pass, 1542 * and activate the flusher twice (the second time brings the UNDO FIFO's 1543 * start position up to the end position after the first call). 1544 */ 1545 int 1546 hammer_sync_hmp(hammer_mount_t hmp, int waitfor) 1547 { 1548 struct hammer_sync_info info; 1549 1550 info.error = 0; 1551 info.waitfor = MNT_NOWAIT; 1552 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT, 1553 hammer_sync_scan1, hammer_sync_scan2, &info); 1554 if (info.error == 0 && waitfor == MNT_WAIT) { 1555 info.waitfor = waitfor; 1556 vmntvnodescan(hmp->mp, VMSC_GETVP, 1557 hammer_sync_scan1, hammer_sync_scan2, &info); 1558 } 1559 if (waitfor == MNT_WAIT) { 1560 hammer_flusher_sync(hmp); 1561 hammer_flusher_sync(hmp); 1562 } else { 1563 hammer_flusher_async(hmp, NULL); 1564 hammer_flusher_async(hmp, NULL); 1565 } 1566 return(info.error); 1567 } 1568 1569 static int 1570 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data) 1571 { 1572 struct hammer_inode *ip; 1573 1574 ip = VTOI(vp); 1575 if (vp->v_type == VNON || ip == NULL || 1576 ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 1577 RB_EMPTY(&vp->v_rbdirty_tree))) { 1578 return(-1); 1579 } 1580 return(0); 1581 } 1582 1583 static int 1584 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data) 1585 { 1586 struct hammer_sync_info *info = data; 1587 struct hammer_inode *ip; 1588 int error; 1589 1590 ip = VTOI(vp); 1591 if (vp->v_type == VNON || vp->v_type == VBAD || 1592 ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 1593 RB_EMPTY(&vp->v_rbdirty_tree))) { 1594 return(0); 1595 } 1596 error = VOP_FSYNC(vp, MNT_NOWAIT, 0); 1597 if (error) 1598 info->error = error; 1599 return(0); 1600 } 1601 1602