1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $ 35 */ 36 /* 37 * Manage HAMMER's on-disk structures. These routines are primarily 38 * responsible for interfacing with the kernel's I/O subsystem and for 39 * managing in-memory structures. 40 */ 41 42 #include "hammer.h" 43 #include <sys/fcntl.h> 44 #include <sys/nlookup.h> 45 #include <sys/buf.h> 46 #include <sys/buf2.h> 47 48 static void hammer_free_volume(hammer_volume_t volume); 49 static int hammer_load_volume(hammer_volume_t volume); 50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew); 51 static int hammer_load_node(hammer_node_t node, int isnew); 52 53 static int 54 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2) 55 { 56 if (vol1->vol_no < vol2->vol_no) 57 return(-1); 58 if (vol1->vol_no > vol2->vol_no) 59 return(1); 60 return(0); 61 } 62 63 static int 64 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2) 65 { 66 if (buf1->zoneX_offset < buf2->zoneX_offset) 67 return(-1); 68 if (buf1->zoneX_offset > buf2->zoneX_offset) 69 return(1); 70 return(0); 71 } 72 73 static int 74 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2) 75 { 76 if (node1->node_offset < node2->node_offset) 77 return(-1); 78 if (node1->node_offset > node2->node_offset) 79 return(1); 80 return(0); 81 } 82 83 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node, 84 hammer_vol_rb_compare, int32_t, vol_no); 85 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node, 86 hammer_buf_rb_compare, hammer_off_t, zoneX_offset); 87 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node, 88 hammer_nod_rb_compare, hammer_off_t, node_offset); 89 90 /************************************************************************ 91 * VOLUMES * 92 ************************************************************************ 93 * 94 * Load a HAMMER volume by name. Returns 0 on success or a positive error 95 * code on failure. Volumes must be loaded at mount time, get_volume() will 96 * not load a new volume. 97 * 98 * Calls made to hammer_load_volume() or single-threaded 99 */ 100 int 101 hammer_install_volume(struct hammer_mount *hmp, const char *volname, 102 struct vnode *devvp) 103 { 104 struct mount *mp; 105 hammer_volume_t volume; 106 struct hammer_volume_ondisk *ondisk; 107 struct nlookupdata nd; 108 struct buf *bp = NULL; 109 int error; 110 int ronly; 111 int setmp = 0; 112 113 mp = hmp->mp; 114 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 115 116 /* 117 * Allocate a volume structure 118 */ 119 ++hammer_count_volumes; 120 volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO); 121 volume->vol_name = kstrdup(volname, hmp->m_misc); 122 volume->io.hmp = hmp; /* bootstrap */ 123 hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME); 124 volume->io.offset = 0LL; 125 volume->io.bytes = HAMMER_BUFSIZE; 126 127 /* 128 * Get the device vnode 129 */ 130 if (devvp == NULL) { 131 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW); 132 if (error == 0) 133 error = nlookup(&nd); 134 if (error == 0) 135 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp); 136 nlookup_done(&nd); 137 } else { 138 error = 0; 139 volume->devvp = devvp; 140 } 141 142 if (error == 0) { 143 if (vn_isdisk(volume->devvp, &error)) { 144 error = vfs_mountedon(volume->devvp); 145 } 146 } 147 if (error == 0 && 148 count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) { 149 error = EBUSY; 150 } 151 if (error == 0) { 152 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 153 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0); 154 if (error == 0) { 155 error = VOP_OPEN(volume->devvp, 156 (ronly ? FREAD : FREAD|FWRITE), 157 FSCRED, NULL); 158 } 159 vn_unlock(volume->devvp); 160 } 161 if (error) { 162 hammer_free_volume(volume); 163 return(error); 164 } 165 volume->devvp->v_rdev->si_mountpoint = mp; 166 setmp = 1; 167 168 /* 169 * Extract the volume number from the volume header and do various 170 * sanity checks. 171 */ 172 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp); 173 if (error) 174 goto late_failure; 175 ondisk = (void *)bp->b_data; 176 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) { 177 kprintf("hammer_mount: volume %s has an invalid header\n", 178 volume->vol_name); 179 error = EFTYPE; 180 goto late_failure; 181 } 182 volume->vol_no = ondisk->vol_no; 183 volume->buffer_base = ondisk->vol_buf_beg; 184 volume->vol_flags = ondisk->vol_flags; 185 volume->nblocks = ondisk->vol_nblocks; 186 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 187 ondisk->vol_buf_end - ondisk->vol_buf_beg); 188 volume->maxraw_off = ondisk->vol_buf_end; 189 190 if (RB_EMPTY(&hmp->rb_vols_root)) { 191 hmp->fsid = ondisk->vol_fsid; 192 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) { 193 kprintf("hammer_mount: volume %s's fsid does not match " 194 "other volumes\n", volume->vol_name); 195 error = EFTYPE; 196 goto late_failure; 197 } 198 199 /* 200 * Insert the volume structure into the red-black tree. 201 */ 202 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) { 203 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n", 204 volume->vol_name, volume->vol_no); 205 error = EEXIST; 206 } 207 208 /* 209 * Set the root volume . HAMMER special cases rootvol the structure. 210 * We do not hold a ref because this would prevent related I/O 211 * from being flushed. 212 */ 213 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) { 214 hmp->rootvol = volume; 215 hmp->nvolumes = ondisk->vol_count; 216 if (bp) { 217 brelse(bp); 218 bp = NULL; 219 } 220 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks * 221 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); 222 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks * 223 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); 224 } 225 late_failure: 226 if (bp) 227 brelse(bp); 228 if (error) { 229 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/ 230 if (setmp) 231 volume->devvp->v_rdev->si_mountpoint = NULL; 232 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE); 233 hammer_free_volume(volume); 234 } 235 return (error); 236 } 237 238 /* 239 * This is called for each volume when updating the mount point from 240 * read-write to read-only or vise-versa. 241 */ 242 int 243 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused) 244 { 245 if (volume->devvp) { 246 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY); 247 if (volume->io.hmp->ronly) { 248 /* do not call vinvalbuf */ 249 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL); 250 VOP_CLOSE(volume->devvp, FREAD|FWRITE); 251 } else { 252 /* do not call vinvalbuf */ 253 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL); 254 VOP_CLOSE(volume->devvp, FREAD); 255 } 256 vn_unlock(volume->devvp); 257 } 258 return(0); 259 } 260 261 /* 262 * Unload and free a HAMMER volume. Must return >= 0 to continue scan 263 * so returns -1 on failure. 264 */ 265 int 266 hammer_unload_volume(hammer_volume_t volume, void *data __unused) 267 { 268 hammer_mount_t hmp = volume->io.hmp; 269 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0); 270 struct buf *bp; 271 272 /* 273 * Clean up the root volume pointer, which is held unlocked in hmp. 274 */ 275 if (hmp->rootvol == volume) 276 hmp->rootvol = NULL; 277 278 /* 279 * We must not flush a dirty buffer to disk on umount. It should 280 * have already been dealt with by the flusher, or we may be in 281 * catastrophic failure. 282 */ 283 hammer_io_clear_modify(&volume->io, 1); 284 volume->io.waitdep = 1; 285 bp = hammer_io_release(&volume->io, 1); 286 287 /* 288 * Clean up the persistent ref ioerror might have on the volume 289 */ 290 if (volume->io.ioerror) { 291 volume->io.ioerror = 0; 292 hammer_unref(&volume->io.lock); 293 } 294 295 /* 296 * There should be no references on the volume, no clusters, and 297 * no super-clusters. 298 */ 299 KKASSERT(volume->io.lock.refs == 0); 300 if (bp) 301 brelse(bp); 302 303 volume->ondisk = NULL; 304 if (volume->devvp) { 305 if (volume->devvp->v_rdev && 306 volume->devvp->v_rdev->si_mountpoint == hmp->mp 307 ) { 308 volume->devvp->v_rdev->si_mountpoint = NULL; 309 } 310 if (ronly) { 311 /* 312 * Make sure we don't sync anything to disk if we 313 * are in read-only mode (1) or critically-errored 314 * (2). Note that there may be dirty buffers in 315 * normal read-only mode from crash recovery. 316 */ 317 vinvalbuf(volume->devvp, 0, 0, 0); 318 VOP_CLOSE(volume->devvp, FREAD); 319 } else { 320 /* 321 * Normal termination, save any dirty buffers 322 * (XXX there really shouldn't be any). 323 */ 324 vinvalbuf(volume->devvp, V_SAVE, 0, 0); 325 VOP_CLOSE(volume->devvp, FREAD|FWRITE); 326 } 327 } 328 329 /* 330 * Destroy the structure 331 */ 332 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume); 333 hammer_free_volume(volume); 334 return(0); 335 } 336 337 static 338 void 339 hammer_free_volume(hammer_volume_t volume) 340 { 341 hammer_mount_t hmp = volume->io.hmp; 342 343 if (volume->vol_name) { 344 kfree(volume->vol_name, hmp->m_misc); 345 volume->vol_name = NULL; 346 } 347 if (volume->devvp) { 348 vrele(volume->devvp); 349 volume->devvp = NULL; 350 } 351 --hammer_count_volumes; 352 kfree(volume, hmp->m_misc); 353 } 354 355 /* 356 * Get a HAMMER volume. The volume must already exist. 357 */ 358 hammer_volume_t 359 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp) 360 { 361 struct hammer_volume *volume; 362 363 /* 364 * Locate the volume structure 365 */ 366 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no); 367 if (volume == NULL) { 368 *errorp = ENOENT; 369 return(NULL); 370 } 371 hammer_ref(&volume->io.lock); 372 373 /* 374 * Deal with on-disk info 375 */ 376 if (volume->ondisk == NULL || volume->io.loading) { 377 *errorp = hammer_load_volume(volume); 378 if (*errorp) { 379 hammer_rel_volume(volume, 1); 380 volume = NULL; 381 } 382 } else { 383 *errorp = 0; 384 } 385 return(volume); 386 } 387 388 int 389 hammer_ref_volume(hammer_volume_t volume) 390 { 391 int error; 392 393 hammer_ref(&volume->io.lock); 394 395 /* 396 * Deal with on-disk info 397 */ 398 if (volume->ondisk == NULL || volume->io.loading) { 399 error = hammer_load_volume(volume); 400 if (error) 401 hammer_rel_volume(volume, 1); 402 } else { 403 error = 0; 404 } 405 return (error); 406 } 407 408 hammer_volume_t 409 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp) 410 { 411 hammer_volume_t volume; 412 413 volume = hmp->rootvol; 414 KKASSERT(volume != NULL); 415 hammer_ref(&volume->io.lock); 416 417 /* 418 * Deal with on-disk info 419 */ 420 if (volume->ondisk == NULL || volume->io.loading) { 421 *errorp = hammer_load_volume(volume); 422 if (*errorp) { 423 hammer_rel_volume(volume, 1); 424 volume = NULL; 425 } 426 } else { 427 *errorp = 0; 428 } 429 return (volume); 430 } 431 432 /* 433 * Load a volume's on-disk information. The volume must be referenced and 434 * not locked. We temporarily acquire an exclusive lock to interlock 435 * against releases or multiple get's. 436 */ 437 static int 438 hammer_load_volume(hammer_volume_t volume) 439 { 440 int error; 441 442 ++volume->io.loading; 443 hammer_lock_ex(&volume->io.lock); 444 445 if (volume->ondisk == NULL) { 446 error = hammer_io_read(volume->devvp, &volume->io, 447 volume->maxraw_off); 448 if (error == 0) 449 volume->ondisk = (void *)volume->io.bp->b_data; 450 } else { 451 error = 0; 452 } 453 --volume->io.loading; 454 hammer_unlock(&volume->io.lock); 455 return(error); 456 } 457 458 /* 459 * Release a volume. Call hammer_io_release on the last reference. We have 460 * to acquire an exclusive lock to interlock against volume->ondisk tests 461 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive 462 * lock to be held. 463 * 464 * Volumes are not unloaded from memory during normal operation. 465 */ 466 void 467 hammer_rel_volume(hammer_volume_t volume, int flush) 468 { 469 struct buf *bp = NULL; 470 471 crit_enter(); 472 if (volume->io.lock.refs == 1) { 473 ++volume->io.loading; 474 hammer_lock_ex(&volume->io.lock); 475 if (volume->io.lock.refs == 1) { 476 volume->ondisk = NULL; 477 bp = hammer_io_release(&volume->io, flush); 478 } 479 --volume->io.loading; 480 hammer_unlock(&volume->io.lock); 481 } 482 hammer_unref(&volume->io.lock); 483 if (bp) 484 brelse(bp); 485 crit_exit(); 486 } 487 488 int 489 hammer_mountcheck_volumes(struct hammer_mount *hmp) 490 { 491 hammer_volume_t vol; 492 int i; 493 494 for (i = 0; i < hmp->nvolumes; ++i) { 495 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i); 496 if (vol == NULL) 497 return(EINVAL); 498 } 499 return(0); 500 } 501 502 /************************************************************************ 503 * BUFFERS * 504 ************************************************************************ 505 * 506 * Manage buffers. Currently all blockmap-backed zones are translated 507 * to zone-2 buffer offsets. 508 */ 509 hammer_buffer_t 510 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset, 511 int bytes, int isnew, int *errorp) 512 { 513 hammer_buffer_t buffer; 514 hammer_volume_t volume; 515 hammer_off_t zone2_offset; 516 hammer_io_type_t iotype; 517 int vol_no; 518 int zone; 519 520 buf_offset &= ~HAMMER_BUFMASK64; 521 again: 522 /* 523 * Shortcut if the buffer is already cached 524 */ 525 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset); 526 if (buffer) { 527 if (buffer->io.lock.refs == 0) 528 ++hammer_count_refedbufs; 529 hammer_ref(&buffer->io.lock); 530 531 /* 532 * Once refed the ondisk field will not be cleared by 533 * any other action. 534 */ 535 if (buffer->ondisk && buffer->io.loading == 0) { 536 *errorp = 0; 537 return(buffer); 538 } 539 540 /* 541 * The buffer is no longer loose if it has a ref, and 542 * cannot become loose once it gains a ref. Loose 543 * buffers will never be in a modified state. This should 544 * only occur on the 0->1 transition of refs. 545 * 546 * lose_list can be modified via a biodone() interrupt. 547 */ 548 if (buffer->io.mod_list == &hmp->lose_list) { 549 crit_enter(); /* biodone race against list */ 550 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, 551 mod_entry); 552 crit_exit(); 553 buffer->io.mod_list = NULL; 554 KKASSERT(buffer->io.modified == 0); 555 } 556 goto found; 557 } 558 559 /* 560 * What is the buffer class? 561 */ 562 zone = HAMMER_ZONE_DECODE(buf_offset); 563 564 switch(zone) { 565 case HAMMER_ZONE_LARGE_DATA_INDEX: 566 case HAMMER_ZONE_SMALL_DATA_INDEX: 567 iotype = HAMMER_STRUCTURE_DATA_BUFFER; 568 break; 569 case HAMMER_ZONE_UNDO_INDEX: 570 iotype = HAMMER_STRUCTURE_UNDO_BUFFER; 571 break; 572 case HAMMER_ZONE_META_INDEX: 573 default: 574 /* 575 * NOTE: inode data and directory entries are placed in this 576 * zone. inode atime/mtime is updated in-place and thus 577 * buffers containing inodes must be synchronized as 578 * meta-buffers, same as buffers containing B-Tree info. 579 */ 580 iotype = HAMMER_STRUCTURE_META_BUFFER; 581 break; 582 } 583 584 /* 585 * Handle blockmap offset translations 586 */ 587 if (zone >= HAMMER_ZONE_BTREE_INDEX) { 588 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp); 589 } else if (zone == HAMMER_ZONE_UNDO_INDEX) { 590 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp); 591 } else { 592 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX); 593 zone2_offset = buf_offset; 594 *errorp = 0; 595 } 596 if (*errorp) 597 return(NULL); 598 599 /* 600 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset 601 * specifications. 602 */ 603 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) == 604 HAMMER_ZONE_RAW_BUFFER); 605 vol_no = HAMMER_VOL_DECODE(zone2_offset); 606 volume = hammer_get_volume(hmp, vol_no, errorp); 607 if (volume == NULL) 608 return(NULL); 609 610 KKASSERT(zone2_offset < volume->maxbuf_off); 611 612 /* 613 * Allocate a new buffer structure. We will check for races later. 614 */ 615 ++hammer_count_buffers; 616 buffer = kmalloc(sizeof(*buffer), hmp->m_misc, 617 M_WAITOK|M_ZERO|M_USE_RESERVE); 618 buffer->zone2_offset = zone2_offset; 619 buffer->zoneX_offset = buf_offset; 620 621 hammer_io_init(&buffer->io, volume, iotype); 622 buffer->io.offset = volume->ondisk->vol_buf_beg + 623 (zone2_offset & HAMMER_OFF_SHORT_MASK); 624 buffer->io.bytes = bytes; 625 TAILQ_INIT(&buffer->clist); 626 hammer_ref(&buffer->io.lock); 627 628 /* 629 * Insert the buffer into the RB tree and handle late collisions. 630 */ 631 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) { 632 hammer_unref(&buffer->io.lock); 633 --hammer_count_buffers; 634 kfree(buffer, hmp->m_misc); 635 goto again; 636 } 637 ++hammer_count_refedbufs; 638 found: 639 640 /* 641 * Deal with on-disk info and loading races. 642 */ 643 if (buffer->ondisk == NULL || buffer->io.loading) { 644 *errorp = hammer_load_buffer(buffer, isnew); 645 if (*errorp) { 646 hammer_rel_buffer(buffer, 1); 647 buffer = NULL; 648 } 649 } else { 650 *errorp = 0; 651 } 652 return(buffer); 653 } 654 655 /* 656 * This is used by the direct-read code to deal with large-data buffers 657 * created by the reblocker and mirror-write code. The direct-read code 658 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write- 659 * running hammer buffers must be fully synced to disk before we can issue 660 * the direct-read. 661 * 662 * This code path is not considered critical as only the rebocker and 663 * mirror-write code will create large-data buffers via the HAMMER buffer 664 * subsystem. They do that because they operate at the B-Tree level and 665 * do not access the vnode/inode structures. 666 */ 667 void 668 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes) 669 { 670 hammer_buffer_t buffer; 671 int error; 672 673 KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) == 674 HAMMER_ZONE_LARGE_DATA); 675 676 while (bytes > 0) { 677 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 678 base_offset); 679 if (buffer && (buffer->io.modified || buffer->io.running)) { 680 error = hammer_ref_buffer(buffer); 681 if (error == 0) { 682 hammer_io_wait(&buffer->io); 683 if (buffer->io.modified) { 684 hammer_io_write_interlock(&buffer->io); 685 hammer_io_flush(&buffer->io); 686 hammer_io_done_interlock(&buffer->io); 687 hammer_io_wait(&buffer->io); 688 } 689 hammer_rel_buffer(buffer, 0); 690 } 691 } 692 base_offset += HAMMER_BUFSIZE; 693 bytes -= HAMMER_BUFSIZE; 694 } 695 } 696 697 /* 698 * Destroy all buffers covering the specified zoneX offset range. This 699 * is called when the related blockmap layer2 entry is freed or when 700 * a direct write bypasses our buffer/buffer-cache subsystem. 701 * 702 * The buffers may be referenced by the caller itself. Setting reclaim 703 * will cause the buffer to be destroyed when it's ref count reaches zero. 704 */ 705 void 706 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset, 707 hammer_off_t zone2_offset, int bytes) 708 { 709 hammer_buffer_t buffer; 710 hammer_volume_t volume; 711 int vol_no; 712 int error; 713 714 vol_no = HAMMER_VOL_DECODE(zone2_offset); 715 volume = hammer_get_volume(hmp, vol_no, &error); 716 KKASSERT(error == 0); 717 718 while (bytes > 0) { 719 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, 720 base_offset); 721 if (buffer) { 722 error = hammer_ref_buffer(buffer); 723 if (error == 0) { 724 KKASSERT(buffer->zone2_offset == zone2_offset); 725 hammer_io_clear_modify(&buffer->io, 1); 726 buffer->io.reclaim = 1; 727 buffer->io.waitdep = 1; 728 KKASSERT(buffer->io.volume == volume); 729 hammer_rel_buffer(buffer, 0); 730 } 731 } else { 732 hammer_io_inval(volume, zone2_offset); 733 } 734 base_offset += HAMMER_BUFSIZE; 735 zone2_offset += HAMMER_BUFSIZE; 736 bytes -= HAMMER_BUFSIZE; 737 } 738 hammer_rel_volume(volume, 0); 739 } 740 741 static int 742 hammer_load_buffer(hammer_buffer_t buffer, int isnew) 743 { 744 hammer_volume_t volume; 745 int error; 746 747 /* 748 * Load the buffer's on-disk info 749 */ 750 volume = buffer->io.volume; 751 ++buffer->io.loading; 752 hammer_lock_ex(&buffer->io.lock); 753 754 if (hammer_debug_io & 0x0001) { 755 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n", 756 buffer->zoneX_offset, buffer->zone2_offset, isnew, 757 buffer->ondisk); 758 } 759 760 if (buffer->ondisk == NULL) { 761 if (isnew) { 762 error = hammer_io_new(volume->devvp, &buffer->io); 763 } else { 764 error = hammer_io_read(volume->devvp, &buffer->io, 765 volume->maxraw_off); 766 } 767 if (error == 0) 768 buffer->ondisk = (void *)buffer->io.bp->b_data; 769 } else if (isnew) { 770 error = hammer_io_new(volume->devvp, &buffer->io); 771 } else { 772 error = 0; 773 } 774 --buffer->io.loading; 775 hammer_unlock(&buffer->io.lock); 776 return (error); 777 } 778 779 /* 780 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue. 781 * This routine is only called during unmount. 782 */ 783 int 784 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused) 785 { 786 /* 787 * Clean up the persistent ref ioerror might have on the buffer 788 * and acquire a ref (steal ioerror's if we can). 789 */ 790 if (buffer->io.ioerror) { 791 buffer->io.ioerror = 0; 792 } else { 793 if (buffer->io.lock.refs == 0) 794 ++hammer_count_refedbufs; 795 hammer_ref(&buffer->io.lock); 796 } 797 798 /* 799 * We must not flush a dirty buffer to disk on umount. It should 800 * have already been dealt with by the flusher, or we may be in 801 * catastrophic failure. 802 */ 803 hammer_io_clear_modify(&buffer->io, 1); 804 hammer_flush_buffer_nodes(buffer); 805 KKASSERT(buffer->io.lock.refs == 1); 806 hammer_rel_buffer(buffer, 2); 807 return(0); 808 } 809 810 /* 811 * Reference a buffer that is either already referenced or via a specially 812 * handled pointer (aka cursor->buffer). 813 */ 814 int 815 hammer_ref_buffer(hammer_buffer_t buffer) 816 { 817 int error; 818 819 if (buffer->io.lock.refs == 0) 820 ++hammer_count_refedbufs; 821 hammer_ref(&buffer->io.lock); 822 823 /* 824 * At this point a biodone() will not touch the buffer other then 825 * incidental bits. However, lose_list can be modified via 826 * a biodone() interrupt. 827 * 828 * No longer loose 829 */ 830 if (buffer->io.mod_list == &buffer->io.hmp->lose_list) { 831 crit_enter(); 832 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry); 833 buffer->io.mod_list = NULL; 834 crit_exit(); 835 } 836 837 if (buffer->ondisk == NULL || buffer->io.loading) { 838 error = hammer_load_buffer(buffer, 0); 839 if (error) { 840 hammer_rel_buffer(buffer, 1); 841 /* 842 * NOTE: buffer pointer can become stale after 843 * the above release. 844 */ 845 } 846 } else { 847 error = 0; 848 } 849 return(error); 850 } 851 852 /* 853 * Release a buffer. We have to deal with several places where 854 * another thread can ref the buffer. 855 * 856 * Only destroy the structure itself if the related buffer cache buffer 857 * was disassociated from it. This ties the management of the structure 858 * to the buffer cache subsystem. buffer->ondisk determines whether the 859 * embedded io is referenced or not. 860 */ 861 void 862 hammer_rel_buffer(hammer_buffer_t buffer, int flush) 863 { 864 hammer_volume_t volume; 865 hammer_mount_t hmp; 866 struct buf *bp = NULL; 867 int freeme = 0; 868 869 hmp = buffer->io.hmp; 870 871 crit_enter(); 872 if (buffer->io.lock.refs == 1) { 873 ++buffer->io.loading; /* force interlock check */ 874 hammer_lock_ex(&buffer->io.lock); 875 if (buffer->io.lock.refs == 1) { 876 bp = hammer_io_release(&buffer->io, flush); 877 878 if (buffer->io.lock.refs == 1) 879 --hammer_count_refedbufs; 880 881 if (buffer->io.bp == NULL && 882 buffer->io.lock.refs == 1) { 883 /* 884 * Final cleanup 885 * 886 * NOTE: It is impossible for any associated 887 * B-Tree nodes to have refs if the buffer 888 * has no additional refs. 889 */ 890 RB_REMOVE(hammer_buf_rb_tree, 891 &buffer->io.hmp->rb_bufs_root, 892 buffer); 893 volume = buffer->io.volume; 894 buffer->io.volume = NULL; /* sanity */ 895 hammer_rel_volume(volume, 0); 896 hammer_io_clear_modlist(&buffer->io); 897 hammer_flush_buffer_nodes(buffer); 898 KKASSERT(TAILQ_EMPTY(&buffer->clist)); 899 freeme = 1; 900 } 901 } 902 --buffer->io.loading; 903 hammer_unlock(&buffer->io.lock); 904 } 905 hammer_unref(&buffer->io.lock); 906 crit_exit(); 907 if (bp) 908 brelse(bp); 909 if (freeme) { 910 --hammer_count_buffers; 911 kfree(buffer, hmp->m_misc); 912 } 913 } 914 915 /* 916 * Access the filesystem buffer containing the specified hammer offset. 917 * buf_offset is a conglomeration of the volume number and vol_buf_beg 918 * relative buffer offset. It must also have bit 55 set to be valid. 919 * (see hammer_off_t in hammer_disk.h). 920 * 921 * Any prior buffer in *bufferp will be released and replaced by the 922 * requested buffer. 923 */ 924 static __inline 925 void * 926 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 927 int *errorp, struct hammer_buffer **bufferp) 928 { 929 hammer_buffer_t buffer; 930 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 931 932 buf_offset &= ~HAMMER_BUFMASK64; 933 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0); 934 935 buffer = *bufferp; 936 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 937 buffer->zoneX_offset != buf_offset)) { 938 if (buffer) 939 hammer_rel_buffer(buffer, 0); 940 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp); 941 *bufferp = buffer; 942 } else { 943 *errorp = 0; 944 } 945 946 /* 947 * Return a pointer to the buffer data. 948 */ 949 if (buffer == NULL) 950 return(NULL); 951 else 952 return((char *)buffer->ondisk + xoff); 953 } 954 955 void * 956 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, 957 int *errorp, struct hammer_buffer **bufferp) 958 { 959 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp)); 960 } 961 962 void * 963 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 964 int *errorp, struct hammer_buffer **bufferp) 965 { 966 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 967 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp)); 968 } 969 970 /* 971 * Access the filesystem buffer containing the specified hammer offset. 972 * No disk read operation occurs. The result buffer may contain garbage. 973 * 974 * Any prior buffer in *bufferp will be released and replaced by the 975 * requested buffer. 976 * 977 * This function marks the buffer dirty but does not increment its 978 * modify_refs count. 979 */ 980 static __inline 981 void * 982 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 983 int *errorp, struct hammer_buffer **bufferp) 984 { 985 hammer_buffer_t buffer; 986 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK; 987 988 buf_offset &= ~HAMMER_BUFMASK64; 989 990 buffer = *bufferp; 991 if (buffer == NULL || (buffer->zone2_offset != buf_offset && 992 buffer->zoneX_offset != buf_offset)) { 993 if (buffer) 994 hammer_rel_buffer(buffer, 0); 995 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp); 996 *bufferp = buffer; 997 } else { 998 *errorp = 0; 999 } 1000 1001 /* 1002 * Return a pointer to the buffer data. 1003 */ 1004 if (buffer == NULL) 1005 return(NULL); 1006 else 1007 return((char *)buffer->ondisk + xoff); 1008 } 1009 1010 void * 1011 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, 1012 int *errorp, struct hammer_buffer **bufferp) 1013 { 1014 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp)); 1015 } 1016 1017 void * 1018 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes, 1019 int *errorp, struct hammer_buffer **bufferp) 1020 { 1021 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK; 1022 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp)); 1023 } 1024 1025 /************************************************************************ 1026 * NODES * 1027 ************************************************************************ 1028 * 1029 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing 1030 * method used by the HAMMER filesystem. 1031 * 1032 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY 1033 * associated with its buffer, and will only referenced the buffer while 1034 * the node itself is referenced. 1035 * 1036 * A hammer_node can also be passively associated with other HAMMER 1037 * structures, such as inodes, while retaining 0 references. These 1038 * associations can be cleared backwards using a pointer-to-pointer in 1039 * the hammer_node. 1040 * 1041 * This allows the HAMMER implementation to cache hammer_nodes long-term 1042 * and short-cut a great deal of the infrastructure's complexity. In 1043 * most cases a cached node can be reacquired without having to dip into 1044 * either the buffer or cluster management code. 1045 * 1046 * The caller must pass a referenced cluster on call and will retain 1047 * ownership of the reference on return. The node will acquire its own 1048 * additional references, if necessary. 1049 */ 1050 hammer_node_t 1051 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset, 1052 int isnew, int *errorp) 1053 { 1054 hammer_node_t node; 1055 1056 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE); 1057 1058 /* 1059 * Locate the structure, allocating one if necessary. 1060 */ 1061 again: 1062 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset); 1063 if (node == NULL) { 1064 ++hammer_count_nodes; 1065 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE); 1066 node->node_offset = node_offset; 1067 node->hmp = hmp; 1068 TAILQ_INIT(&node->cursor_list); 1069 TAILQ_INIT(&node->cache_list); 1070 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) { 1071 --hammer_count_nodes; 1072 kfree(node, hmp->m_misc); 1073 goto again; 1074 } 1075 } 1076 hammer_ref(&node->lock); 1077 if (node->ondisk) 1078 *errorp = 0; 1079 else 1080 *errorp = hammer_load_node(node, isnew); 1081 if (*errorp) { 1082 hammer_rel_node(node); 1083 node = NULL; 1084 } 1085 return(node); 1086 } 1087 1088 /* 1089 * Reference an already-referenced node. 1090 */ 1091 void 1092 hammer_ref_node(hammer_node_t node) 1093 { 1094 KKASSERT(node->lock.refs > 0 && node->ondisk != NULL); 1095 hammer_ref(&node->lock); 1096 } 1097 1098 /* 1099 * Load a node's on-disk data reference. 1100 */ 1101 static int 1102 hammer_load_node(hammer_node_t node, int isnew) 1103 { 1104 hammer_buffer_t buffer; 1105 hammer_off_t buf_offset; 1106 int error; 1107 1108 error = 0; 1109 ++node->loading; 1110 hammer_lock_ex(&node->lock); 1111 if (node->ondisk == NULL) { 1112 /* 1113 * This is a little confusing but the jist is that 1114 * node->buffer determines whether the node is on 1115 * the buffer's clist and node->ondisk determines 1116 * whether the buffer is referenced. 1117 * 1118 * We could be racing a buffer release, in which case 1119 * node->buffer may become NULL while we are blocked 1120 * referencing the buffer. 1121 */ 1122 if ((buffer = node->buffer) != NULL) { 1123 error = hammer_ref_buffer(buffer); 1124 if (error == 0 && node->buffer == NULL) { 1125 TAILQ_INSERT_TAIL(&buffer->clist, 1126 node, entry); 1127 node->buffer = buffer; 1128 } 1129 } else { 1130 buf_offset = node->node_offset & ~HAMMER_BUFMASK64; 1131 buffer = hammer_get_buffer(node->hmp, buf_offset, 1132 HAMMER_BUFSIZE, 0, &error); 1133 if (buffer) { 1134 KKASSERT(error == 0); 1135 TAILQ_INSERT_TAIL(&buffer->clist, 1136 node, entry); 1137 node->buffer = buffer; 1138 } 1139 } 1140 if (error) 1141 goto failed; 1142 node->ondisk = (void *)((char *)buffer->ondisk + 1143 (node->node_offset & HAMMER_BUFMASK)); 1144 if (isnew == 0 && 1145 (node->flags & HAMMER_NODE_CRCGOOD) == 0) { 1146 if (hammer_crc_test_btree(node->ondisk) == 0) 1147 Debugger("CRC FAILED: B-TREE NODE"); 1148 node->flags |= HAMMER_NODE_CRCGOOD; 1149 } 1150 } 1151 failed: 1152 --node->loading; 1153 hammer_unlock(&node->lock); 1154 return (error); 1155 } 1156 1157 /* 1158 * Safely reference a node, interlock against flushes via the IO subsystem. 1159 */ 1160 hammer_node_t 1161 hammer_ref_node_safe(struct hammer_mount *hmp, hammer_node_cache_t cache, 1162 int *errorp) 1163 { 1164 hammer_node_t node; 1165 1166 node = cache->node; 1167 if (node != NULL) { 1168 hammer_ref(&node->lock); 1169 if (node->ondisk) 1170 *errorp = 0; 1171 else 1172 *errorp = hammer_load_node(node, 0); 1173 if (*errorp) { 1174 hammer_rel_node(node); 1175 node = NULL; 1176 } 1177 } else { 1178 *errorp = ENOENT; 1179 } 1180 return(node); 1181 } 1182 1183 /* 1184 * Release a hammer_node. On the last release the node dereferences 1185 * its underlying buffer and may or may not be destroyed. 1186 */ 1187 void 1188 hammer_rel_node(hammer_node_t node) 1189 { 1190 hammer_buffer_t buffer; 1191 1192 /* 1193 * If this isn't the last ref just decrement the ref count and 1194 * return. 1195 */ 1196 if (node->lock.refs > 1) { 1197 hammer_unref(&node->lock); 1198 return; 1199 } 1200 1201 /* 1202 * If there is no ondisk info or no buffer the node failed to load, 1203 * remove the last reference and destroy the node. 1204 */ 1205 if (node->ondisk == NULL) { 1206 hammer_unref(&node->lock); 1207 hammer_flush_node(node); 1208 /* node is stale now */ 1209 return; 1210 } 1211 1212 /* 1213 * Do not disassociate the node from the buffer if it represents 1214 * a modified B-Tree node that still needs its crc to be generated. 1215 */ 1216 if (node->flags & HAMMER_NODE_NEEDSCRC) 1217 return; 1218 1219 /* 1220 * Do final cleanups and then either destroy the node and leave it 1221 * passively cached. The buffer reference is removed regardless. 1222 */ 1223 buffer = node->buffer; 1224 node->ondisk = NULL; 1225 1226 if ((node->flags & HAMMER_NODE_FLUSH) == 0) { 1227 hammer_unref(&node->lock); 1228 hammer_rel_buffer(buffer, 0); 1229 return; 1230 } 1231 1232 /* 1233 * Destroy the node. 1234 */ 1235 hammer_unref(&node->lock); 1236 hammer_flush_node(node); 1237 /* node is stale */ 1238 hammer_rel_buffer(buffer, 0); 1239 } 1240 1241 /* 1242 * Free space on-media associated with a B-Tree node. 1243 */ 1244 void 1245 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node) 1246 { 1247 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0); 1248 node->flags |= HAMMER_NODE_DELETED; 1249 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk)); 1250 } 1251 1252 /* 1253 * Passively cache a referenced hammer_node. The caller may release 1254 * the node on return. 1255 */ 1256 void 1257 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node) 1258 { 1259 /* 1260 * If the node doesn't exist, or is being deleted, don't cache it! 1261 * 1262 * The node can only ever be NULL in the I/O failure path. 1263 */ 1264 if (node == NULL || (node->flags & HAMMER_NODE_DELETED)) 1265 return; 1266 if (cache->node == node) 1267 return; 1268 while (cache->node) 1269 hammer_uncache_node(cache); 1270 if (node->flags & HAMMER_NODE_DELETED) 1271 return; 1272 cache->node = node; 1273 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry); 1274 } 1275 1276 void 1277 hammer_uncache_node(hammer_node_cache_t cache) 1278 { 1279 hammer_node_t node; 1280 1281 if ((node = cache->node) != NULL) { 1282 TAILQ_REMOVE(&node->cache_list, cache, entry); 1283 cache->node = NULL; 1284 if (TAILQ_EMPTY(&node->cache_list)) 1285 hammer_flush_node(node); 1286 } 1287 } 1288 1289 /* 1290 * Remove a node's cache references and destroy the node if it has no 1291 * other references or backing store. 1292 */ 1293 void 1294 hammer_flush_node(hammer_node_t node) 1295 { 1296 hammer_node_cache_t cache; 1297 hammer_buffer_t buffer; 1298 hammer_mount_t hmp = node->hmp; 1299 1300 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) { 1301 TAILQ_REMOVE(&node->cache_list, cache, entry); 1302 cache->node = NULL; 1303 } 1304 if (node->lock.refs == 0 && node->ondisk == NULL) { 1305 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1306 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node); 1307 if ((buffer = node->buffer) != NULL) { 1308 node->buffer = NULL; 1309 TAILQ_REMOVE(&buffer->clist, node, entry); 1310 /* buffer is unreferenced because ondisk is NULL */ 1311 } 1312 --hammer_count_nodes; 1313 kfree(node, hmp->m_misc); 1314 } 1315 } 1316 1317 /* 1318 * Flush passively cached B-Tree nodes associated with this buffer. 1319 * This is only called when the buffer is about to be destroyed, so 1320 * none of the nodes should have any references. The buffer is locked. 1321 * 1322 * We may be interlocked with the buffer. 1323 */ 1324 void 1325 hammer_flush_buffer_nodes(hammer_buffer_t buffer) 1326 { 1327 hammer_node_t node; 1328 1329 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) { 1330 KKASSERT(node->ondisk == NULL); 1331 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0); 1332 1333 if (node->lock.refs == 0) { 1334 hammer_ref(&node->lock); 1335 node->flags |= HAMMER_NODE_FLUSH; 1336 hammer_rel_node(node); 1337 } else { 1338 KKASSERT(node->loading != 0); 1339 KKASSERT(node->buffer != NULL); 1340 buffer = node->buffer; 1341 node->buffer = NULL; 1342 TAILQ_REMOVE(&buffer->clist, node, entry); 1343 /* buffer is unreferenced because ondisk is NULL */ 1344 } 1345 } 1346 } 1347 1348 1349 /************************************************************************ 1350 * ALLOCATORS * 1351 ************************************************************************/ 1352 1353 /* 1354 * Allocate a B-Tree node. 1355 */ 1356 hammer_node_t 1357 hammer_alloc_btree(hammer_transaction_t trans, int *errorp) 1358 { 1359 hammer_buffer_t buffer = NULL; 1360 hammer_node_t node = NULL; 1361 hammer_off_t node_offset; 1362 1363 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX, 1364 sizeof(struct hammer_node_ondisk), 1365 errorp); 1366 if (*errorp == 0) { 1367 node = hammer_get_node(trans->hmp, node_offset, 1, errorp); 1368 hammer_modify_node_noundo(trans, node); 1369 bzero(node->ondisk, sizeof(*node->ondisk)); 1370 hammer_modify_node_done(node); 1371 } 1372 if (buffer) 1373 hammer_rel_buffer(buffer, 0); 1374 return(node); 1375 } 1376 1377 /* 1378 * Allocate data. If the address of a data buffer is supplied then 1379 * any prior non-NULL *data_bufferp will be released and *data_bufferp 1380 * will be set to the related buffer. The caller must release it when 1381 * finally done. The initial *data_bufferp should be set to NULL by 1382 * the caller. 1383 * 1384 * The caller is responsible for making hammer_modify*() calls on the 1385 * *data_bufferp. 1386 */ 1387 void * 1388 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len, 1389 u_int16_t rec_type, hammer_off_t *data_offsetp, 1390 struct hammer_buffer **data_bufferp, int *errorp) 1391 { 1392 void *data; 1393 int zone; 1394 1395 /* 1396 * Allocate data 1397 */ 1398 if (data_len) { 1399 switch(rec_type) { 1400 case HAMMER_RECTYPE_INODE: 1401 case HAMMER_RECTYPE_DIRENTRY: 1402 case HAMMER_RECTYPE_EXT: 1403 case HAMMER_RECTYPE_FIX: 1404 case HAMMER_RECTYPE_PFS: 1405 zone = HAMMER_ZONE_META_INDEX; 1406 break; 1407 case HAMMER_RECTYPE_DATA: 1408 case HAMMER_RECTYPE_DB: 1409 if (data_len <= HAMMER_BUFSIZE / 2) { 1410 zone = HAMMER_ZONE_SMALL_DATA_INDEX; 1411 } else { 1412 data_len = (data_len + HAMMER_BUFMASK) & 1413 ~HAMMER_BUFMASK; 1414 zone = HAMMER_ZONE_LARGE_DATA_INDEX; 1415 } 1416 break; 1417 default: 1418 panic("hammer_alloc_data: rec_type %04x unknown", 1419 rec_type); 1420 zone = 0; /* NOT REACHED */ 1421 break; 1422 } 1423 *data_offsetp = hammer_blockmap_alloc(trans, zone, 1424 data_len, errorp); 1425 } else { 1426 *data_offsetp = 0; 1427 } 1428 if (*errorp == 0 && data_bufferp) { 1429 if (data_len) { 1430 data = hammer_bread_ext(trans->hmp, *data_offsetp, 1431 data_len, errorp, data_bufferp); 1432 } else { 1433 data = NULL; 1434 } 1435 } else { 1436 data = NULL; 1437 } 1438 return(data); 1439 } 1440 1441 /* 1442 * Sync dirty buffers to the media and clean-up any loose ends. 1443 * 1444 * These functions do not start the flusher going, they simply 1445 * queue everything up to the flusher. 1446 */ 1447 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data); 1448 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data); 1449 1450 int 1451 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor) 1452 { 1453 struct hammer_sync_info info; 1454 1455 info.error = 0; 1456 info.waitfor = waitfor; 1457 if (waitfor == MNT_WAIT) { 1458 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS, 1459 hammer_sync_scan1, hammer_sync_scan2, &info); 1460 } else { 1461 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT, 1462 hammer_sync_scan1, hammer_sync_scan2, &info); 1463 } 1464 return(info.error); 1465 } 1466 1467 /* 1468 * Filesystem sync. If doing a synchronous sync make a second pass on 1469 * the vnodes in case any were already flushing during the first pass, 1470 * and activate the flusher twice (the second time brings the UNDO FIFO's 1471 * start position up to the end position after the first call). 1472 */ 1473 int 1474 hammer_sync_hmp(hammer_mount_t hmp, int waitfor) 1475 { 1476 struct hammer_sync_info info; 1477 1478 info.error = 0; 1479 info.waitfor = MNT_NOWAIT; 1480 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT, 1481 hammer_sync_scan1, hammer_sync_scan2, &info); 1482 if (info.error == 0 && waitfor == MNT_WAIT) { 1483 info.waitfor = waitfor; 1484 vmntvnodescan(hmp->mp, VMSC_GETVP, 1485 hammer_sync_scan1, hammer_sync_scan2, &info); 1486 } 1487 if (waitfor == MNT_WAIT) { 1488 hammer_flusher_sync(hmp); 1489 hammer_flusher_sync(hmp); 1490 } else { 1491 hammer_flusher_async(hmp, NULL); 1492 hammer_flusher_async(hmp, NULL); 1493 } 1494 return(info.error); 1495 } 1496 1497 static int 1498 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data) 1499 { 1500 struct hammer_inode *ip; 1501 1502 ip = VTOI(vp); 1503 if (vp->v_type == VNON || ip == NULL || 1504 ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 1505 RB_EMPTY(&vp->v_rbdirty_tree))) { 1506 return(-1); 1507 } 1508 return(0); 1509 } 1510 1511 static int 1512 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data) 1513 { 1514 struct hammer_sync_info *info = data; 1515 struct hammer_inode *ip; 1516 int error; 1517 1518 ip = VTOI(vp); 1519 if (vp->v_type == VNON || vp->v_type == VBAD || 1520 ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 1521 RB_EMPTY(&vp->v_rbdirty_tree))) { 1522 return(0); 1523 } 1524 error = VOP_FSYNC(vp, MNT_NOWAIT); 1525 if (error) 1526 info->error = error; 1527 return(0); 1528 } 1529 1530