1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $ 35 */ 36 /* 37 * Manage HAMMER's on-disk structures. These routines are primarily 38 * responsible for interfacing with the kernel's I/O subsystem and for 39 * managing in-memory structures. 40 */ 41 42 #include "hammer.h" 43 #include <sys/fcntl.h> 44 #include <sys/nlookup.h> 45 #include <sys/buf.h> 46 #include <sys/buf2.h> 47 48 static void hammer_free_volume(hammer_volume_t volume); 49 static int hammer_load_volume(hammer_volume_t volume); 50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew); 51 static int hammer_load_node(hammer_node_t node, int isnew); 52 53 static int 54 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2) 55 { 56 if (vol1->vol_no < vol2->vol_no) 57 return(-1); 58 if (vol1->vol_no > vol2->vol_no) 59 return(1); 60 return(0); 61 } 62 63 /* 64 * hammer_buffer structures are indexed via their zoneX_offset, not 65 * their zone2_offset. 66 */ 67 static int 68 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2) 69 { 70 if (buf1->zoneX_offset < buf2->zoneX_offset) 71 return(-1); 72 if (buf1->zoneX_offset > buf2->zoneX_offset) 73 return(1); 74 return(0); 75 } 76 77 static int 78 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2) 79 { 80 if (node1->node_offset < node2->node_offset) 81 return(-1); 82 if (node1->node_offset > node2->node_offset) 83 return(1); 84 return(0); 85 } 86 87 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node, 88 hammer_vol_rb_compare, int32_t, vol_no); 89 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node, 90 hammer_buf_rb_compare, hammer_off_t, zoneX_offset); 91 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node, 92 hammer_nod_rb_compare, hammer_off_t, node_offset); 93 94 /************************************************************************ 95 * VOLUMES * 96 ************************************************************************ 97 * 98 * Load a HAMMER volume by name. Returns 0 on success or a positive error 99 * code on failure. Volumes must be loaded at mount time, get_volume() will 100 * not load a new volume. 101 * 102 * Calls made to hammer_load_volume() or single-threaded 103 */ 104 int 105 hammer_install_volume(struct hammer_mount *hmp, const char *volname, 106 struct vnode *devvp) 107 { 108 struct mount *mp; 109 hammer_volume_t volume; 110 struct hammer_volume_ondisk *ondisk; 111 struct nlookupdata nd; 112 struct buf *bp = NULL; 113 int error; 114 int ronly; 115 int setmp = 0; 116 117 mp = hmp->mp; 118 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 119 120 /* 121 * Allocate a volume structure 122 */ 123 ++hammer_count_volumes; 124 volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO); 125 volume->vol_name = kstrdup(volname, hmp->m_misc); 126 volume->io.hmp = hmp; /* bootstrap */ 127 hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME); 128 volume->io.offset = 0LL; 129 volume->io.bytes = HAMMER_BUFSIZE; 130 131 /* 132 * Get the device vnode 133 */ 134 if (devvp == NULL) { 135 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW); 136 if (error == 0) 137 error = nlookup(&nd); 138 if (error == 0) 139 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp); 140 nlookup_done(&nd); 141 } else { 142 error = 0; 143 volume->devvp = devvp; 144 } 145 146 if (error == 0) { 147 if (vn_isdisk(volume->devvp, &error)) { 148 error = vfs_mountedon(volume->devvp); 149 } 150 } 151 if (error == 0 && 152 count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) { 153 error = EBUSY; 154 } 155 if (error == 0) { 156 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 157 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0); 158 if (error == 0) { 159 error = VOP_OPEN(volume->devvp, 160 (ronly ? FREAD : FREAD|FWRITE), 161 FSCRED, NULL); 162 } 163 vn_unlock(volume->devvp); 164 } 165 if (error) { 166 hammer_free_volume(volume); 167 return(error); 168 } 169 volume->devvp->v_rdev->si_mountpoint = mp; 170 setmp = 1; 171 172 /* 173 * Extract the volume number from the volume header and do various 174 * sanity checks. 175 */ 176 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp); 177 if (error) 178 goto late_failure; 179 ondisk = (void *)bp->b_data; 180 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) { 181 kprintf("hammer_mount: volume %s has an invalid header\n", 182 volume->vol_name); 183 error = EFTYPE; 184 goto late_failure; 185 } 186 volume->vol_no = ondisk->vol_no; 187 volume->buffer_base = ondisk->vol_buf_beg; 188 volume->vol_flags = ondisk->vol_flags; 189 volume->nblocks = ondisk->vol_nblocks; 190 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 191 ondisk->vol_buf_end - ondisk->vol_buf_beg); 192 volume->maxraw_off = ondisk->vol_buf_end; 193 194 if (RB_EMPTY(&hmp->rb_vols_root)) { 195 hmp->fsid = ondisk->vol_fsid; 196 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) { 197 kprintf("hammer_mount: volume %s's fsid does not match " 198 "other volumes\n", volume->vol_name); 199 error = EFTYPE; 200 goto late_failure; 201 } 202 203 /* 204 * Insert the volume structure into the red-black tree. 205 */ 206 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) { 207 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n", 208 volume->vol_name, volume->vol_no); 209 error = EEXIST; 210 } 211 212 /* 213 * Set the root volume . HAMMER special cases rootvol the structure. 214 * We do not hold a ref because this would prevent related I/O 215 * from being flushed. 216 */ 217 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) { 218 hmp->rootvol = volume; 219 hmp->nvolumes = ondisk->vol_count; 220 if (bp) { 221 brelse(bp); 222 bp = NULL; 223 } 224 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks * 225 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); 226 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks * 227 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); 228 } 229 late_failure: 230 if (bp) 231 brelse(bp); 232 if (error) { 233 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/ 234 if (setmp) 235 volume->devvp->v_rdev->si_mountpoint = NULL; 236 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE); 237 hammer_free_volume(volume); 238 } 239 return (error); 240 } 241 242 /* 243 * This is called for each volume when updating the mount point from 244 * read-write to read-only or vise-versa. 245 */ 246 int 247 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused) 248 { 249 if (volume->devvp) { 250 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 251 if (volume->io.hmp->ronly) { 252 /* do not call vinvalbuf */ 253 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL); 254 VOP_CLOSE(volume->devvp, FREAD|FWRITE); 255 } else { 256 /* do not call vinvalbuf */ 257 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL); 258 VOP_CLOSE(volume->devvp, FREAD); 259 } 260 vn_unlock(volume->devvp); 261 } 262 return(0); 263 } 264 265 /* 266 * Unload and free a HAMMER volume. Must return >= 0 to continue scan 267 * so returns -1 on failure. 268 */ 269 int 270 hammer_unload_volume(hammer_volume_t volume, void *data __unused) 271 { 272 hammer_mount_t hmp = volume->io.hmp; 273 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 274 struct buf *bp; 275 276 /* 277 * Clean up the root volume pointer, which is held unlocked in hmp. 278 */ 279 if (hmp->rootvol == volume) 280 hmp->rootvol = NULL; 281 282 /* 283 * We must not flush a dirty buffer to disk on umount. It should 284 * have already been dealt with by the flusher, or we may be in 285 * catastrophic failure. 286 */ 287 hammer_io_clear_modify(&volume->io, 1); 288 volume->io.waitdep = 1; 289 bp = hammer_io_release(&volume->io, 1); 290 291 /* 292 * Clean up the persistent ref ioerror might have on the volume 293 */ 294 if (volume->io.ioerror) { 295 volume->io.ioerror = 0; 296 hammer_unref(&volume->io.lock); 297 } 298 299 /* 300 * There should be no references on the volume, no clusters, and 301 * no super-clusters. 302 */ 303 KKASSERT(volume->io.lock.refs == 0); 304 if (bp) 305 brelse(bp); 306 307 volume->ondisk = NULL; 308 if (volume->devvp) { 309 if (volume->devvp->v_rdev && 310 volume->devvp->v_rdev->si_mountpoint == hmp->mp 311 ) { 312 volume->devvp->v_rdev->si_mountpoint = NULL; 313 } 314 if (ronly) { 315 /* 316 * Make sure we don't sync anything to disk if we 317 * are in read-only mode (1) or critically-errored 318 * (2). Note that there may be dirty buffers in 319 * normal read-only mode from crash recovery. 320 */ 321 vinvalbuf(volume->devvp, 0, 0, 0); 322 VOP_CLOSE(volume->devvp, FREAD); 323 } else { 324 /* 325 * Normal termination, save any dirty buffers 326 * (XXX there really shouldn't be any). 327 */ 328 vinvalbuf(volume->devvp, V_SAVE, 0, 0); 329 VOP_CLOSE(volume->devvp, FREAD|FWRITE); 330 } 331 } 332 333 /* 334 * Destroy the structure 335 */ 336 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume); 337 hammer_free_volume(volume); 338 return(0); 339 } 340 341 static 342 void 343 hammer_free_volume(hammer_volume_t volume) 344 { 345 hammer_mount_t hmp = volume->io.hmp; 346 347 if (volume->vol_name) { 348 kfree(volume->vol_name, hmp->m_misc); 349 volume->vol_name = NULL; 350 } 351 if (volume->devvp) { 352 vrele(volume->devvp); 353 volume->devvp = NULL; 354 } 355 --hammer_count_volumes; 356 kfree(volume, hmp->m_misc); 357 } 358 359 /* 360 * Get a HAMMER volume. The volume must already exist. 361 */ 362 hammer_volume_t 363 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp) 364 { 365 struct hammer_volume *volume; 366 367 /* 368 * Locate the volume structure 369 */ 370 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no); 371 if (volume == NULL) { 372 *errorp = ENOENT; 373 return(NULL); 374 } 375 hammer_ref(&volume->io.lock); 376 377 /* 378 * Deal with on-disk info 379 */ 380 if (volume->ondisk == NULL || volume->io.loading) { 381 *errorp = hammer_load_volume(volume); 382 if (*errorp) { 383 hammer_rel_volume(volume, 1); 384 volume = NULL; 385 } 386 } else { 387 *errorp = 0; 388 } 389 return(volume); 390 } 391 392 int 393 hammer_ref_volume(hammer_volume_t volume) 394 { 395 int error; 396 397 hammer_ref(&volume->io.lock); 398 399 /* 400 * Deal with on-disk info 401 */ 402 if (volume->ondisk == NULL || volume->io.loading) { 403 error = hammer_load_volume(volume); 404 if (error) 405 hammer_rel_volume(volume, 1); 406 } else { 407 error = 0; 408 } 409 return (error); 410 } 411 412 hammer_volume_t 413 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp) 414 { 415 hammer_volume_t volume; 416 417 volume = hmp->rootvol; 418 KKASSERT(volume != NULL); 419 hammer_ref(&volume->io.lock); 420 421 /* 422 * Deal with on-disk info 423 */ 424 if (volume->ondisk == NULL || volume->io.loading) { 425 *errorp = hammer_load_volume(volume); 426 if (*errorp) { 427 hammer_rel_volume(volume, 1); 428 volume = NULL; 429 } 430 } else { 431 *errorp = 0; 432 } 433 return (volume); 434 } 435 436 /* 437 * Load a volume's on-disk information. The volume must be referenced and 438 * not locked. We temporarily acquire an exclusive lock to interlock 439 * against releases or multiple get's. 440 */ 441 static int 442 hammer_load_volume(hammer_volume_t volume) 443 { 444 int error; 445 446 ++volume->io.loading; 447 hammer_lock_ex(&volume->io.lock); 448 449 if (volume->ondisk == NULL) { 450 error = hammer_io_read(volume->devvp, &volume->io, 451 volume->maxraw_off); 452 if (error == 0) 453 volume->ondisk = (void *)volume->io.bp->b_data; 454 } else { 455 error = 0; 456 } 457 --volume->io.loading; 458 hammer_unlock(&volume->io.lock); 459 return(error); 460 } 461 462 /* 463 * Release a volume. Call hammer_io_release on the last reference. We have 464 * to acquire an exclusive lock to interlock against volume->ondisk tests 465 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive 466 * lock to be held. 467 * 468 * Volumes are not unloaded from memory during normal operation. 469 */ 470 void 471 hammer_rel_volume(hammer_volume_t volume, int flush) 472 { 473 struct buf *bp = NULL; 474 475 crit_enter(); 476 if (volume->io.lock.refs == 1) { 477 ++volume->io.loading; 478 hammer_lock_ex(&volume->io.lock); 479 if (volume->io.lock.refs == 1) { 480 volume->ondisk = NULL; 481 bp = hammer_io_release(&volume->io, flush); 482 } 483 --volume->io.loading; 484 hammer_unlock(&volume->io.lock); 485 } 486 hammer_unref(&volume->io.lock); 487 if (bp) 488 brelse(bp); 489 crit_exit(); 490 } 491 492 int 493 hammer_mountcheck_volumes(struct hammer_mount *hmp) 494 { 495 hammer_volume_t vol; 496 int i; 497 498 for (i = 0; i < hmp->nvolumes; ++i) { 499 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i); 500 if (vol == NULL) 501 return(EINVAL); 502 } 503 return(0); 504 } 505 506 /************************************************************************ 507 * BUFFERS * 508 ************************************************************************ 509 * 510 * Manage buffers. Currently all blockmap-backed zones are direct-mapped 511 * to zone-2 buffer offsets, without a translation stage. However, the 512 * hammer_buffer structure is indexed by its zoneX_offset, not its 513 * zone2_offset. 514 * 515 * The proper zone must be maintained throughout the code-base all the way 516 * through to the big-block allocator, or routines like hammer_del_buffers() 517 * will not be able to locate all potentially conflicting buffers. 518 */ 519 hammer_buffer_t 520 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, 521 int bytes, int isnew, int *errorp) 522 { 523 hammer_buffer_t buffer; 524 hammer_volume_t volume; 525 hammer_off_t zone2_offset; 526 hammer_io_type_t iotype; 527 int vol_no; 528 int zone; 529 530 buf_offset &= ~HAMMER_BUFMASK64; 531 again: 532 /* 533 * Shortcut if the buffer is already cached 534 */ 535 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset); 536 if (buffer) { 537 if (buffer->io.lock.refs == 0) 538 ++hammer_count_refedbufs; 539 hammer_ref(&buffer->io.lock); 540 541 /* 542 * Once refed the ondisk field will not be cleared by 543 * any other action. 544 */ 545 if (buffer->ondisk && buffer->io.loading == 0) { 546 *errorp = 0; 547 return(buffer); 548 } 549 550 /* 551 * The buffer is no longer loose if it has a ref, and 552 * cannot become loose once it gains a ref. Loose 553 * buffers will never be in a modified state. This should 554 * only occur on the 0->1 transition of refs. 555 * 556 * lose_list can be modified via a biodone() interrupt. 557 */ 558 if (buffer->io.mod_list == &hmp->lose_list) { 559 crit_enter(); /* biodone race against list */ 560 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, 561 mod_entry); 562 crit_exit(); 563 buffer->io.mod_list = NULL; 564 KKASSERT(buffer->io.modified == 0); 565 } 566 goto found; 567 } 568 569 /* 570 * What is the buffer class? 571 */ 572 zone = HAMMER_ZONE_DECODE(buf_offset); 573 574 switch(zone) { 575 case HAMMER_ZONE_LARGE_DATA_INDEX: 576 case HAMMER_ZONE_SMALL_DATA_INDEX: 577 iotype = HAMMER_STRUCTURE_DATA_BUFFER; 578 break; 579 case HAMMER_ZONE_UNDO_INDEX: 580 iotype = HAMMER_STRUCTURE_UNDO_BUFFER; 581 break; 582 case HAMMER_ZONE_META_INDEX: 583 default: 584 /* 585 * NOTE: inode data and directory entries are placed in this 586 * zone. inode atime/mtime is updated in-place and thus 587 * buffers containing inodes must be synchronized as 588 * meta-buffers, same as buffers containing B-Tree info. 589 */ 590 iotype = HAMMER_STRUCTURE_META_BUFFER; 591 break; 592 } 593 594 /* 595 * Handle blockmap offset translations 596 */ 597 if (zone >= HAMMER_ZONE_BTREE_INDEX) { 598 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp); 599 } else if (zone == HAMMER_ZONE_UNDO_INDEX) { 600 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp); 601 } else { 602 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX); 603 zone2_offset = buf_offset; 604 *errorp = 0; 605 } 606 if (*errorp) 607 return(NULL); 608 609 /* 610 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset 611 * specifications. 612 */ 613 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) == 614 HAMMER_ZONE_RAW_BUFFER); 615 vol_no = HAMMER_VOL_DECODE(zone2_offset); 616 volume = hammer_get_volume(hmp, vol_no, errorp); 617 if (volume == NULL) 618 return(NULL); 619 620 KKASSERT(zone2_offset < volume->maxbuf_off); 621 622 /* 623 * Allocate a new buffer structure. We will check for races later. 624 */ 625 ++hammer_count_buffers; 626 buffer = kmalloc(sizeof(*buffer), hmp->m_misc, 627 M_WAITOK|M_ZERO|M_USE_RESERVE); 628 buffer->zone2_offset = zone2_offset; 629 buffer->zoneX_offset = buf_offset; 630 631 hammer_io_init(&buffer->io, volume, iotype); 632 buffer->io.offset = volume->ondisk->vol_buf_beg + 633 (zone2_offset & HAMMER_OFF_SHORT_MASK); 634 buffer->io.bytes = bytes; 635 TAILQ_INIT(&buffer->clist); 636 hammer_ref(&buffer->io.lock); 637 638 /* 639 * Insert the buffer into the RB tree and handle late collisions. 640 */ 641 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) { 642 hammer_unref(&buffer->io.lock); 643 --hammer_count_buffers; 644 kfree(buffer, hmp->m_misc); 645 goto again; 646 } 647 ++hammer_count_refedbufs; 648 found: 649 650 /* 651 * Deal with on-disk info and loading races. 652 */ 653 if (buffer->ondisk == NULL || buffer->io.loading) { 654 *errorp = hammer_load_buffer(buffer, isnew); 655 if (*errorp) { 656 hammer_rel_buffer(buffer, 1); 657 buffer = NULL; 658 } 659 } else { 660 *errorp = 0; 661 } 662 return(buffer); 663 } 664 665 /* 666 * This is used by the direct-read code to deal with large-data buffers 667 * created by the reblocker and mirror-write code. The direct-read code 668 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write- 669 * running hammer buffers must be fully synced to disk before we can issue 670 * the direct-read. 671 * 672 * This code path is not considered critical as only the rebocker and 673 * mirror-write code will create large-data buffers via the HAMMER buffer 674 * subsystem. They do that because they operate at the B-Tree level and 675 * do not access the vnode/inode structures. 676 */ 677 void 678 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes) 679 { 680 hammer_buffer_t buffer; 681 int error; 682 683 KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) == 684 HAMMER_ZONE_LARGE_DATA); 685 686 while (bytes > 0) { 687 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 688 base_offset); 689 if (buffer && (buffer->io.modified || buffer->io.running)) { 690 error = hammer_ref_buffer(buffer); 691 if (error == 0) { 692 hammer_io_wait(&buffer->io); 693 if (buffer->io.modified) { 694 hammer_io_write_interlock(&buffer->io); 695 hammer_io_flush(&buffer->io); 696 hammer_io_done_interlock(&buffer->io); 697 hammer_io_wait(&buffer->io); 698 } 699 hammer_rel_buffer(buffer, 0); 700 } 701 } 702 base_offset += HAMMER_BUFSIZE; 703 bytes -= HAMMER_BUFSIZE; 704 } 705 } 706 707 /* 708 * Destroy all buffers covering the specified zoneX offset range. This 709 * is called when the related blockmap layer2 entry is freed or when 710 * a direct write bypasses our buffer/buffer-cache subsystem. 711 * 712 * The buffers may be referenced by the caller itself. Setting reclaim 713 * will cause the buffer to be destroyed when it's ref count reaches zero. 714 * 715 * Return 0 on success, EAGAIN if some buffers could not be destroyed due 716 * to additional references held by other threads, or some other (typically 717 * fatal) error. 718 */ 719 int 720 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset, 721 hammer_off_t zone2_offset, int bytes, 722 int report_conflicts) 723 { 724 hammer_buffer_t buffer; 725 hammer_volume_t volume; 726 int vol_no; 727 int error; 728 int ret_error; 729 730 vol_no = HAMMER_VOL_DECODE(zone2_offset); 731 volume = hammer_get_volume(hmp, vol_no, &ret_error); 732 KKASSERT(ret_error == 0); 733 734 while (bytes > 0) { 735 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 736 base_offset); 737 if (buffer) { 738 error = hammer_ref_buffer(buffer); 739 if (error == 0 && buffer->io.lock.refs != 1) 740 error = EAGAIN; 741 if (error == 0) { 742 KKASSERT(buffer->zone2_offset == zone2_offset); 743 hammer_io_clear_modify(&buffer->io, 1); 744 buffer->io.reclaim = 1; 745 buffer->io.waitdep = 1; 746 KKASSERT(buffer->io.volume == volume); 747 hammer_rel_buffer(buffer, 0); 748 } 749 } else { 750 error = hammer_io_inval(volume, zone2_offset); 751 } 752 if (error) { 753 ret_error = error; 754 if (report_conflicts || (hammer_debug_general & 0x8000)) 755 kprintf("hammer_del_buffers: unable to invalidate %016llx rep=%d\n", base_offset, report_conflicts); 756 } 757 base_offset += HAMMER_BUFSIZE; 758 zone2_offset += HAMMER_BUFSIZE; 759 bytes -= HAMMER_BUFSIZE; 760 } 761 hammer_rel_volume(volume, 0); 762 return (ret_error); 763 } 764 765 static int 766 hammer_load_buffer(hammer_buffer_t buffer, int isnew) 767 { 768 hammer_volume_t volume; 769 int error; 770 771 /* 772 * Load the buffer's on-disk info 773 */ 774 volume = buffer->io.volume; 775 ++buffer->io.loading; 776 hammer_lock_ex(&buffer->io.lock); 777 778 if (hammer_debug_io & 0x0001) { 779 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n", 780 buffer->zoneX_offset, buffer->zone2_offset, isnew, 781 buffer->ondisk); 782 } 783 784 if (buffer->ondisk == NULL) { 785 if (isnew) { 786 error = hammer_io_new(volume->devvp, &buffer->io); 787 } else { 788 error = hammer_io_read(volume->devvp, &buffer->io, 789 volume->maxraw_off); 790 } 791 if (error == 0) 792 buffer->ondisk = (void *)buffer->io.bp->b_data; 793 } else if (isnew) { 794 error = hammer_io_new(volume->devvp, &buffer->io); 795 } else { 796 error = 0; 797 } 798 --buffer->io.loading; 799 hammer_unlock(&buffer->io.lock); 800 return (error); 801 } 802 803 /* 804 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue. 805 * This routine is only called during unmount. 806 */ 807 int 808 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused) 809 { 810 /* 811 * Clean up the persistent ref ioerror might have on the buffer 812 * and acquire a ref (steal ioerror's if we can). 813 */ 814 if (buffer->io.ioerror) { 815 buffer->io.ioerror = 0; 816 } else { 817 if (buffer->io.lock.refs == 0) 818 ++hammer_count_refedbufs; 819 hammer_ref(&buffer->io.lock); 820 } 821 822 /* 823 * We must not flush a dirty buffer to disk on umount. It should 824 * have already been dealt with by the flusher, or we may be in 825 * catastrophic failure. 826 */ 827 hammer_io_clear_modify(&buffer->io, 1); 828 hammer_flush_buffer_nodes(buffer); 829 KKASSERT(buffer->io.lock.refs == 1); 830 hammer_rel_buffer(buffer, 2); 831 return(0); 832 } 833 834 /* 835 * Reference a buffer that is either already referenced or via a specially 836 * handled pointer (aka cursor->buffer). 837 */ 838 int 839 hammer_ref_buffer(hammer_buffer_t buffer) 840 { 841 int error; 842 843 if (buffer->io.lock.refs == 0) 844 ++hammer_count_refedbufs; 845 hammer_ref(&buffer->io.lock); 846 847 /* 848 * At this point a biodone() will not touch the buffer other then 849 * incidental bits. However, lose_list can be modified via 850 * a biodone() interrupt. 851 * 852 * No longer loose 853 */ 854 if (buffer->io.mod_list == &buffer->io.hmp->lose_list) { 855 crit_enter(); 856 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry); 857 buffer->io.mod_list = NULL; 858 crit_exit(); 859 } 860 861 if (buffer->ondisk == NULL || buffer->io.loading) { 862 error = hammer_load_buffer(buffer, 0); 863 if (error) { 864 hammer_rel_buffer(buffer, 1); 865 /* 866 * NOTE: buffer pointer can become stale after 867 * the above release. 868 */ 869 } 870 } else { 871 error = 0; 872 } 873 return(error); 874 } 875 876 /* 877 * Release a buffer. We have to deal with several places where 878 * another thread can ref the buffer. 879 * 880 * Only destroy the structure itself if the related buffer cache buffer 881 * was disassociated from it. This ties the management of the structure 882 * to the buffer cache subsystem. buffer->ondisk determines whether the 883 * embedded io is referenced or not. 884 */ 885 void 886 hammer_rel_buffer(hammer_buffer_t buffer, int flush) 887 { 888 hammer_volume_t volume; 889 hammer_mount_t hmp; 890 struct buf *bp = NULL; 891 int freeme = 0; 892 893 hmp = buffer->io.hmp; 894 895 crit_enter(); 896 if (buffer->io.lock.refs == 1) { 897 ++buffer->io.loading; /* force interlock check */ 898 hammer_lock_ex(&buffer->io.lock); 899 if (buffer->io.lock.refs == 1) { 900 bp = hammer_io_release(&buffer->io, flush); 901 902 if (buffer->io.lock.refs == 1) 903 --hammer_count_refedbufs; 904 905 if (buffer->io.bp == NULL && 906 buffer->io.lock.refs == 1) { 907 /* 908 * Final cleanup 909 * 910 * NOTE: It is impossible for any associated 911 * B-Tree nodes to have refs if the buffer 912 * has no additional refs. 913 */ 914 RB_REMOVE(hammer_buf_rb_tree, 915 &buffer->io.hmp->rb_bufs_root, 916 buffer); 917 volume = buffer->io.volume; 918 buffer->io.volume = NULL; /* sanity */ 919 hammer_rel_volume(volume, 0); 920 hammer_io_clear_modlist(&buffer->io); 921 hammer_flush_buffer_nodes(buffer); 922 KKASSERT(TAILQ_EMPTY(&buffer->clist)); 923 freeme = 1; 924 } 925 } 926 --buffer->io.loading; 927 hammer_unlock(&buffer->io.lock); 928 } 929 hammer_unref(&buffer->io.lock); 930 crit_exit(); 931 if (bp) 932 brelse(bp); 933 if (freeme) { 934 --hammer_count_buffers; 935 kfree(buffer, hmp->m_misc); 936 } 937 } 938 939 /* 940 * Access the filesystem buffer containing the specified hammer offset. 941 * buf_offset is a conglomeration of the volume number and vol_buf_beg 942 * relative buffer offset. It must also have bit 55 set to be valid. 943 * (see hammer_off_t in hammer_disk.h). 944 * 945 * Any prior buffer in *bufferp will be released and replaced by the 946 * requested buffer. 947 * 948 * NOTE: The buffer is indexed via its zoneX_offset but we allow the 949 * passed cached *bufferp to match against either zoneX or zone2. 950 */ 951 static __inline 952 void * 953 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 954 int *errorp, struct hammer_buffer **bufferp) 955 { 956 hammer_buffer_t buffer; 957 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 958 959 buf_offset &= ~HAMMER_BUFMASK64; 960 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0); 961 962 buffer = *bufferp; 963 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 964 buffer->zoneX_offset != buf_offset)) { 965 if (buffer) 966 hammer_rel_buffer(buffer, 0); 967 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp); 968 *bufferp = buffer; 969 } else { 970 *errorp = 0; 971 } 972 973 /* 974 * Return a pointer to the buffer data. 975 */ 976 if (buffer == NULL) 977 return(NULL); 978 else 979 return((char *)buffer->ondisk + xoff); 980 } 981 982 void * 983 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, 984 int *errorp, struct hammer_buffer **bufferp) 985 { 986 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp)); 987 } 988 989 void * 990 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 991 int *errorp, struct hammer_buffer **bufferp) 992 { 993 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 994 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp)); 995 } 996 997 /* 998 * Access the filesystem buffer containing the specified hammer offset. 999 * No disk read operation occurs. The result buffer may contain garbage. 1000 * 1001 * Any prior buffer in *bufferp will be released and replaced by the 1002 * requested buffer. 1003 * 1004 * This function marks the buffer dirty but does not increment its 1005 * modify_refs count. 1006 */ 1007 static __inline 1008 void * 1009 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1010 int *errorp, struct hammer_buffer **bufferp) 1011 { 1012 hammer_buffer_t buffer; 1013 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 1014 1015 buf_offset &= ~HAMMER_BUFMASK64; 1016 1017 buffer = *bufferp; 1018 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 1019 buffer->zoneX_offset != buf_offset)) { 1020 if (buffer) 1021 hammer_rel_buffer(buffer, 0); 1022 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp); 1023 *bufferp = buffer; 1024 } else { 1025 *errorp = 0; 1026 } 1027 1028 /* 1029 * Return a pointer to the buffer data. 1030 */ 1031 if (buffer == NULL) 1032 return(NULL); 1033 else 1034 return((char *)buffer->ondisk + xoff); 1035 } 1036 1037 void * 1038 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, 1039 int *errorp, struct hammer_buffer **bufferp) 1040 { 1041 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp)); 1042 } 1043 1044 void * 1045 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1046 int *errorp, struct hammer_buffer **bufferp) 1047 { 1048 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 1049 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp)); 1050 } 1051 1052 /************************************************************************ 1053 * NODES * 1054 ************************************************************************ 1055 * 1056 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing 1057 * method used by the HAMMER filesystem. 1058 * 1059 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY 1060 * associated with its buffer, and will only referenced the buffer while 1061 * the node itself is referenced. 1062 * 1063 * A hammer_node can also be passively associated with other HAMMER 1064 * structures, such as inodes, while retaining 0 references. These 1065 * associations can be cleared backwards using a pointer-to-pointer in 1066 * the hammer_node. 1067 * 1068 * This allows the HAMMER implementation to cache hammer_nodes long-term 1069 * and short-cut a great deal of the infrastructure's complexity. In 1070 * most cases a cached node can be reacquired without having to dip into 1071 * either the buffer or cluster management code. 1072 * 1073 * The caller must pass a referenced cluster on call and will retain 1074 * ownership of the reference on return. The node will acquire its own 1075 * additional references, if necessary. 1076 */ 1077 hammer_node_t 1078 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset, 1079 int isnew, int *errorp) 1080 { 1081 hammer_mount_t hmp = trans->hmp; 1082 hammer_node_t node; 1083 1084 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE); 1085 1086 /* 1087 * Locate the structure, allocating one if necessary. 1088 */ 1089 again: 1090 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset); 1091 if (node == NULL) { 1092 ++hammer_count_nodes; 1093 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE); 1094 node->node_offset = node_offset; 1095 node->hmp = hmp; 1096 TAILQ_INIT(&node->cursor_list); 1097 TAILQ_INIT(&node->cache_list); 1098 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) { 1099 --hammer_count_nodes; 1100 kfree(node, hmp->m_misc); 1101 goto again; 1102 } 1103 } 1104 hammer_ref(&node->lock); 1105 if (node->ondisk) { 1106 *errorp = 0; 1107 } else { 1108 *errorp = hammer_load_node(node, isnew); 1109 trans->flags |= HAMMER_TRANSF_DIDIO; 1110 } 1111 if (*errorp) { 1112 hammer_rel_node(node); 1113 node = NULL; 1114 } 1115 return(node); 1116 } 1117 1118 /* 1119 * Reference an already-referenced node. 1120 */ 1121 void 1122 hammer_ref_node(hammer_node_t node) 1123 { 1124 KKASSERT(node->lock.refs > 0 && node->ondisk != NULL); 1125 hammer_ref(&node->lock); 1126 } 1127 1128 /* 1129 * Load a node's on-disk data reference. 1130 */ 1131 static int 1132 hammer_load_node(hammer_node_t node, int isnew) 1133 { 1134 hammer_buffer_t buffer; 1135 hammer_off_t buf_offset; 1136 int error; 1137 1138 error = 0; 1139 ++node->loading; 1140 hammer_lock_ex(&node->lock); 1141 if (node->ondisk == NULL) { 1142 /* 1143 * This is a little confusing but the jist is that 1144 * node->buffer determines whether the node is on 1145 * the buffer's clist and node->ondisk determines 1146 * whether the buffer is referenced. 1147 * 1148 * We could be racing a buffer release, in which case 1149 * node->buffer may become NULL while we are blocked 1150 * referencing the buffer. 1151 */ 1152 if ((buffer = node->buffer) != NULL) { 1153 error = hammer_ref_buffer(buffer); 1154 if (error == 0 && node->buffer == NULL) { 1155 TAILQ_INSERT_TAIL(&buffer->clist, 1156 node, entry); 1157 node->buffer = buffer; 1158 } 1159 } else { 1160 buf_offset = node->node_offset & ~HAMMER_BUFMASK64; 1161 buffer = hammer_get_buffer(node->hmp, buf_offset, 1162 HAMMER_BUFSIZE, 0, &error); 1163 if (buffer) { 1164 KKASSERT(error == 0); 1165 TAILQ_INSERT_TAIL(&buffer->clist, 1166 node, entry); 1167 node->buffer = buffer; 1168 } 1169 } 1170 if (error) 1171 goto failed; 1172 node->ondisk = (void *)((char *)buffer->ondisk + 1173 (node->node_offset & HAMMER_BUFMASK)); 1174 if (isnew == 0 && 1175 (node->flags & HAMMER_NODE_CRCGOOD) == 0) { 1176 if (hammer_crc_test_btree(node->ondisk) == 0) 1177 Debugger("CRC FAILED: B-TREE NODE"); 1178 node->flags |= HAMMER_NODE_CRCGOOD; 1179 } 1180 } 1181 failed: 1182 --node->loading; 1183 hammer_unlock(&node->lock); 1184 return (error); 1185 } 1186 1187 /* 1188 * Safely reference a node, interlock against flushes via the IO subsystem. 1189 */ 1190 hammer_node_t 1191 hammer_ref_node_safe(struct hammer_mount *hmp, hammer_node_cache_t cache, 1192 int *errorp) 1193 { 1194 hammer_node_t node; 1195 1196 node = cache->node; 1197 if (node != NULL) { 1198 hammer_ref(&node->lock); 1199 if (node->ondisk) 1200 *errorp = 0; 1201 else 1202 *errorp = hammer_load_node(node, 0); 1203 if (*errorp) { 1204 hammer_rel_node(node); 1205 node = NULL; 1206 } 1207 } else { 1208 *errorp = ENOENT; 1209 } 1210 return(node); 1211 } 1212 1213 /* 1214 * Release a hammer_node. On the last release the node dereferences 1215 * its underlying buffer and may or may not be destroyed. 1216 */ 1217 void 1218 hammer_rel_node(hammer_node_t node) 1219 { 1220 hammer_buffer_t buffer; 1221 1222 /* 1223 * If this isn't the last ref just decrement the ref count and 1224 * return. 1225 */ 1226 if (node->lock.refs > 1) { 1227 hammer_unref(&node->lock); 1228 return; 1229 } 1230 1231 /* 1232 * If there is no ondisk info or no buffer the node failed to load, 1233 * remove the last reference and destroy the node. 1234 */ 1235 if (node->ondisk == NULL) { 1236 hammer_unref(&node->lock); 1237 hammer_flush_node(node); 1238 /* node is stale now */ 1239 return; 1240 } 1241 1242 /* 1243 * Do not disassociate the node from the buffer if it represents 1244 * a modified B-Tree node that still needs its crc to be generated. 1245 */ 1246 if (node->flags & HAMMER_NODE_NEEDSCRC) 1247 return; 1248 1249 /* 1250 * Do final cleanups and then either destroy the node and leave it 1251 * passively cached. The buffer reference is removed regardless. 1252 */ 1253 buffer = node->buffer; 1254 node->ondisk = NULL; 1255 1256 if ((node->flags & HAMMER_NODE_FLUSH) == 0) { 1257 hammer_unref(&node->lock); 1258 hammer_rel_buffer(buffer, 0); 1259 return; 1260 } 1261 1262 /* 1263 * Destroy the node. 1264 */ 1265 hammer_unref(&node->lock); 1266 hammer_flush_node(node); 1267 /* node is stale */ 1268 hammer_rel_buffer(buffer, 0); 1269 } 1270 1271 /* 1272 * Free space on-media associated with a B-Tree node. 1273 */ 1274 void 1275 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node) 1276 { 1277 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0); 1278 node->flags |= HAMMER_NODE_DELETED; 1279 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk)); 1280 } 1281 1282 /* 1283 * Passively cache a referenced hammer_node. The caller may release 1284 * the node on return. 1285 */ 1286 void 1287 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node) 1288 { 1289 /* 1290 * If the node doesn't exist, or is being deleted, don't cache it! 1291 * 1292 * The node can only ever be NULL in the I/O failure path. 1293 */ 1294 if (node == NULL || (node->flags & HAMMER_NODE_DELETED)) 1295 return; 1296 if (cache->node == node) 1297 return; 1298 while (cache->node) 1299 hammer_uncache_node(cache); 1300 if (node->flags & HAMMER_NODE_DELETED) 1301 return; 1302 cache->node = node; 1303 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry); 1304 } 1305 1306 void 1307 hammer_uncache_node(hammer_node_cache_t cache) 1308 { 1309 hammer_node_t node; 1310 1311 if ((node = cache->node) != NULL) { 1312 TAILQ_REMOVE(&node->cache_list, cache, entry); 1313 cache->node = NULL; 1314 if (TAILQ_EMPTY(&node->cache_list)) 1315 hammer_flush_node(node); 1316 } 1317 } 1318 1319 /* 1320 * Remove a node's cache references and destroy the node if it has no 1321 * other references or backing store. 1322 */ 1323 void 1324 hammer_flush_node(hammer_node_t node) 1325 { 1326 hammer_node_cache_t cache; 1327 hammer_buffer_t buffer; 1328 hammer_mount_t hmp = node->hmp; 1329 1330 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) { 1331 TAILQ_REMOVE(&node->cache_list, cache, entry); 1332 cache->node = NULL; 1333 } 1334 if (node->lock.refs == 0 && node->ondisk == NULL) { 1335 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1336 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node); 1337 if ((buffer = node->buffer) != NULL) { 1338 node->buffer = NULL; 1339 TAILQ_REMOVE(&buffer->clist, node, entry); 1340 /* buffer is unreferenced because ondisk is NULL */ 1341 } 1342 --hammer_count_nodes; 1343 kfree(node, hmp->m_misc); 1344 } 1345 } 1346 1347 /* 1348 * Flush passively cached B-Tree nodes associated with this buffer. 1349 * This is only called when the buffer is about to be destroyed, so 1350 * none of the nodes should have any references. The buffer is locked. 1351 * 1352 * We may be interlocked with the buffer. 1353 */ 1354 void 1355 hammer_flush_buffer_nodes(hammer_buffer_t buffer) 1356 { 1357 hammer_node_t node; 1358 1359 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) { 1360 KKASSERT(node->ondisk == NULL); 1361 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1362 1363 if (node->lock.refs == 0) { 1364 hammer_ref(&node->lock); 1365 node->flags |= HAMMER_NODE_FLUSH; 1366 hammer_rel_node(node); 1367 } else { 1368 KKASSERT(node->loading != 0); 1369 KKASSERT(node->buffer != NULL); 1370 buffer = node->buffer; 1371 node->buffer = NULL; 1372 TAILQ_REMOVE(&buffer->clist, node, entry); 1373 /* buffer is unreferenced because ondisk is NULL */ 1374 } 1375 } 1376 } 1377 1378 1379 /************************************************************************ 1380 * ALLOCATORS * 1381 ************************************************************************/ 1382 1383 /* 1384 * Allocate a B-Tree node. 1385 */ 1386 hammer_node_t 1387 hammer_alloc_btree(hammer_transaction_t trans, int *errorp) 1388 { 1389 hammer_buffer_t buffer = NULL; 1390 hammer_node_t node = NULL; 1391 hammer_off_t node_offset; 1392 1393 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX, 1394 sizeof(struct hammer_node_ondisk), 1395 errorp); 1396 if (*errorp == 0) { 1397 node = hammer_get_node(trans, node_offset, 1, errorp); 1398 hammer_modify_node_noundo(trans, node); 1399 bzero(node->ondisk, sizeof(*node->ondisk)); 1400 hammer_modify_node_done(node); 1401 } 1402 if (buffer) 1403 hammer_rel_buffer(buffer, 0); 1404 return(node); 1405 } 1406 1407 /* 1408 * Allocate data. If the address of a data buffer is supplied then 1409 * any prior non-NULL *data_bufferp will be released and *data_bufferp 1410 * will be set to the related buffer. The caller must release it when 1411 * finally done. The initial *data_bufferp should be set to NULL by 1412 * the caller. 1413 * 1414 * The caller is responsible for making hammer_modify*() calls on the 1415 * *data_bufferp. 1416 */ 1417 void * 1418 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 1419 u_int16_t rec_type, hammer_off_t *data_offsetp, 1420 struct hammer_buffer **data_bufferp, int *errorp) 1421 { 1422 void *data; 1423 int zone; 1424 1425 /* 1426 * Allocate data 1427 */ 1428 if (data_len) { 1429 switch(rec_type) { 1430 case HAMMER_RECTYPE_INODE: 1431 case HAMMER_RECTYPE_DIRENTRY: 1432 case HAMMER_RECTYPE_EXT: 1433 case HAMMER_RECTYPE_FIX: 1434 case HAMMER_RECTYPE_PFS: 1435 zone = HAMMER_ZONE_META_INDEX; 1436 break; 1437 case HAMMER_RECTYPE_DATA: 1438 case HAMMER_RECTYPE_DB: 1439 if (data_len <= HAMMER_BUFSIZE / 2) { 1440 zone = HAMMER_ZONE_SMALL_DATA_INDEX; 1441 } else { 1442 data_len = (data_len + HAMMER_BUFMASK) & 1443 ~HAMMER_BUFMASK; 1444 zone = HAMMER_ZONE_LARGE_DATA_INDEX; 1445 } 1446 break; 1447 default: 1448 panic("hammer_alloc_data: rec_type %04x unknown", 1449 rec_type); 1450 zone = 0; /* NOT REACHED */ 1451 break; 1452 } 1453 *data_offsetp = hammer_blockmap_alloc(trans, zone, 1454 data_len, errorp); 1455 } else { 1456 *data_offsetp = 0; 1457 } 1458 if (*errorp == 0 && data_bufferp) { 1459 if (data_len) { 1460 data = hammer_bread_ext(trans->hmp, *data_offsetp, 1461 data_len, errorp, data_bufferp); 1462 } else { 1463 data = NULL; 1464 } 1465 } else { 1466 data = NULL; 1467 } 1468 return(data); 1469 } 1470 1471 /* 1472 * Sync dirty buffers to the media and clean-up any loose ends. 1473 * 1474 * These functions do not start the flusher going, they simply 1475 * queue everything up to the flusher. 1476 */ 1477 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data); 1478 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data); 1479 1480 int 1481 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor) 1482 { 1483 struct hammer_sync_info info; 1484 1485 info.error = 0; 1486 info.waitfor = waitfor; 1487 if (waitfor == MNT_WAIT) { 1488 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS, 1489 hammer_sync_scan1, hammer_sync_scan2, &info); 1490 } else { 1491 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT, 1492 hammer_sync_scan1, hammer_sync_scan2, &info); 1493 } 1494 return(info.error); 1495 } 1496 1497 /* 1498 * Filesystem sync. If doing a synchronous sync make a second pass on 1499 * the vnodes in case any were already flushing during the first pass, 1500 * and activate the flusher twice (the second time brings the UNDO FIFO's 1501 * start position up to the end position after the first call). 1502 */ 1503 int 1504 hammer_sync_hmp(hammer_mount_t hmp, int waitfor) 1505 { 1506 struct hammer_sync_info info; 1507 1508 info.error = 0; 1509 info.waitfor = MNT_NOWAIT; 1510 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT, 1511 hammer_sync_scan1, hammer_sync_scan2, &info); 1512 if (info.error == 0 && waitfor == MNT_WAIT) { 1513 info.waitfor = waitfor; 1514 vmntvnodescan(hmp->mp, VMSC_GETVP, 1515 hammer_sync_scan1, hammer_sync_scan2, &info); 1516 } 1517 if (waitfor == MNT_WAIT) { 1518 hammer_flusher_sync(hmp); 1519 hammer_flusher_sync(hmp); 1520 } else { 1521 hammer_flusher_async(hmp, NULL); 1522 hammer_flusher_async(hmp, NULL); 1523 } 1524 return(info.error); 1525 } 1526 1527 static int 1528 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data) 1529 { 1530 struct hammer_inode *ip; 1531 1532 ip = VTOI(vp); 1533 if (vp->v_type == VNON || ip == NULL || 1534 ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 1535 RB_EMPTY(&vp->v_rbdirty_tree))) { 1536 return(-1); 1537 } 1538 return(0); 1539 } 1540 1541 static int 1542 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data) 1543 { 1544 struct hammer_sync_info *info = data; 1545 struct hammer_inode *ip; 1546 int error; 1547 1548 ip = VTOI(vp); 1549 if (vp->v_type == VNON || vp->v_type == VBAD || 1550 ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 1551 RB_EMPTY(&vp->v_rbdirty_tree))) { 1552 return(0); 1553 } 1554 error = VOP_FSYNC(vp, MNT_NOWAIT); 1555 if (error) 1556 info->error = error; 1557 return(0); 1558 } 1559 1560