1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> and 6 * Michael Neumann <mneumann@ntecs.de> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 */ 36 37 #include "hammer.h" 38 #include <sys/fcntl.h> 39 #include <sys/nlookup.h> 40 #include <sys/buf.h> 41 42 #include <sys/buf2.h> 43 44 static int 45 hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly); 46 47 static void 48 hammer_close_device(struct vnode **devvpp, int ronly); 49 50 static int 51 hammer_format_volume_header(struct hammer_mount *hmp, struct vnode *devvp, 52 const char *vol_name, int vol_no, int vol_count, 53 int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size); 54 55 static int 56 hammer_clear_volume_header(struct vnode *devvp); 57 58 struct bigblock_stat { 59 uint64_t total_bigblocks; 60 uint64_t total_free_bigblocks; 61 uint64_t counter; 62 }; 63 64 static int 65 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume, 66 struct bigblock_stat *stat); 67 68 static int 69 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume, 70 struct bigblock_stat *stat); 71 72 int 73 hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip, 74 struct hammer_ioc_volume *ioc) 75 { 76 struct hammer_mount *hmp = trans->hmp; 77 struct mount *mp = hmp->mp; 78 hammer_volume_t volume; 79 int error; 80 81 if (mp->mnt_flag & MNT_RDONLY) { 82 kprintf("Cannot add volume to read-only HAMMER filesystem\n"); 83 return (EINVAL); 84 } 85 86 if (hmp->nvolumes + 1 >= HAMMER_MAX_VOLUMES) { 87 kprintf("Max number of HAMMER volumes exceeded\n"); 88 return (EINVAL); 89 } 90 91 if (hammer_lock_ex_try(&hmp->volume_lock) != 0) { 92 kprintf("Another volume operation is in progress!\n"); 93 return (EAGAIN); 94 } 95 96 /* 97 * Find an unused volume number. 98 */ 99 int free_vol_no = 0; 100 while (free_vol_no < HAMMER_MAX_VOLUMES && 101 RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, free_vol_no)) { 102 ++free_vol_no; 103 } 104 if (free_vol_no >= HAMMER_MAX_VOLUMES) { 105 kprintf("Max number of HAMMER volumes exceeded\n"); 106 hammer_unlock(&hmp->volume_lock); 107 return (EINVAL); 108 } 109 110 struct vnode *devvp = NULL; 111 error = hammer_setup_device(&devvp, ioc->device_name, 0); 112 if (error) 113 goto end; 114 KKASSERT(devvp); 115 error = hammer_format_volume_header( 116 hmp, 117 devvp, 118 hmp->rootvol->ondisk->vol_name, 119 free_vol_no, 120 hmp->nvolumes+1, 121 ioc->vol_size, 122 ioc->boot_area_size, 123 ioc->mem_area_size); 124 hammer_close_device(&devvp, 0); 125 if (error) 126 goto end; 127 128 error = hammer_install_volume(hmp, ioc->device_name, NULL); 129 if (error) 130 goto end; 131 132 hammer_sync_lock_sh(trans); 133 hammer_lock_ex(&hmp->blkmap_lock); 134 135 ++hmp->nvolumes; 136 137 /* 138 * Set each volumes new value of the vol_count field. 139 */ 140 for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) { 141 volume = hammer_get_volume(hmp, vol_no, &error); 142 if (volume == NULL && error == ENOENT) { 143 /* 144 * Skip unused volume numbers 145 */ 146 error = 0; 147 continue; 148 } 149 KKASSERT(volume != NULL && error == 0); 150 hammer_modify_volume_field(trans, volume, vol_count); 151 volume->ondisk->vol_count = hmp->nvolumes; 152 hammer_modify_volume_done(volume); 153 154 /* 155 * Only changes to the header of the root volume 156 * are automatically flushed to disk. For all 157 * other volumes that we modify we do it here. 158 * 159 * No interlock is needed, volume buffers are not 160 * messed with by bioops. 161 */ 162 if (volume != trans->rootvol && volume->io.modified) { 163 hammer_crc_set_volume(volume->ondisk); 164 hammer_io_flush(&volume->io, 0); 165 } 166 167 hammer_rel_volume(volume, 0); 168 } 169 170 volume = hammer_get_volume(hmp, free_vol_no, &error); 171 KKASSERT(volume != NULL && error == 0); 172 173 struct bigblock_stat stat; 174 error = hammer_format_freemap(trans, volume, &stat); 175 KKASSERT(error == 0); 176 177 /* 178 * Increase the total number of bigblocks and update stat/vstat totals. 179 */ 180 hammer_modify_volume_field(trans, trans->rootvol, 181 vol0_stat_bigblocks); 182 trans->rootvol->ondisk->vol0_stat_bigblocks += stat.total_bigblocks; 183 hammer_modify_volume_done(trans->rootvol); 184 mp->mnt_stat.f_blocks += trans->rootvol->ondisk->vol0_stat_bigblocks * 185 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); 186 mp->mnt_vstat.f_blocks += trans->rootvol->ondisk->vol0_stat_bigblocks * 187 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE); 188 189 /* 190 * Increase the number of free bigblocks 191 * (including the copy in hmp) 192 */ 193 hammer_modify_volume_field(trans, trans->rootvol, 194 vol0_stat_freebigblocks); 195 trans->rootvol->ondisk->vol0_stat_freebigblocks += stat.total_free_bigblocks; 196 hmp->copy_stat_freebigblocks = 197 trans->rootvol->ondisk->vol0_stat_freebigblocks; 198 hammer_modify_volume_done(trans->rootvol); 199 200 hammer_rel_volume(volume, 0); 201 202 hammer_unlock(&hmp->blkmap_lock); 203 hammer_sync_unlock(trans); 204 205 KKASSERT(error == 0); 206 end: 207 hammer_unlock(&hmp->volume_lock); 208 if (error) 209 kprintf("An error occurred: %d\n", error); 210 return (error); 211 } 212 213 214 /* 215 * Remove a volume. 216 */ 217 int 218 hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip, 219 struct hammer_ioc_volume *ioc) 220 { 221 struct hammer_mount *hmp = trans->hmp; 222 struct mount *mp = hmp->mp; 223 hammer_volume_t volume; 224 int error = 0; 225 226 if (mp->mnt_flag & MNT_RDONLY) { 227 kprintf("Cannot del volume from read-only HAMMER filesystem\n"); 228 return (EINVAL); 229 } 230 231 if (hammer_lock_ex_try(&hmp->volume_lock) != 0) { 232 kprintf("Another volume operation is in progress!\n"); 233 return (EAGAIN); 234 } 235 236 volume = NULL; 237 238 /* 239 * find volume by volname 240 */ 241 for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) { 242 volume = hammer_get_volume(hmp, vol_no, &error); 243 if (volume == NULL && error == ENOENT) { 244 /* 245 * Skip unused volume numbers 246 */ 247 error = 0; 248 continue; 249 } 250 KKASSERT(volume != NULL && error == 0); 251 if (strcmp(volume->vol_name, ioc->device_name) == 0) { 252 break; 253 } 254 hammer_rel_volume(volume, 0); 255 volume = NULL; 256 } 257 258 if (volume == NULL) { 259 kprintf("Couldn't find volume\n"); 260 error = EINVAL; 261 goto end; 262 } 263 264 if (volume == trans->rootvol) { 265 kprintf("Cannot remove root-volume\n"); 266 hammer_rel_volume(volume, 0); 267 error = EINVAL; 268 goto end; 269 } 270 271 /* 272 * 273 */ 274 275 hmp->volume_to_remove = volume->vol_no; 276 277 struct hammer_ioc_reblock reblock; 278 bzero(&reblock, sizeof(reblock)); 279 280 reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION; 281 reblock.key_beg.obj_id = HAMMER_MIN_OBJID; 282 reblock.key_end.localization = HAMMER_MAX_LOCALIZATION; 283 reblock.key_end.obj_id = HAMMER_MAX_OBJID; 284 reblock.head.flags = HAMMER_IOC_DO_FLAGS; 285 reblock.free_level = 0; 286 287 error = hammer_ioc_reblock(trans, ip, &reblock); 288 289 if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) { 290 error = EINTR; 291 } 292 293 if (error) { 294 if (error == EINTR) { 295 kprintf("reblock was interrupted\n"); 296 } else { 297 kprintf("reblock failed: %d\n", error); 298 } 299 hmp->volume_to_remove = -1; 300 hammer_rel_volume(volume, 0); 301 goto end; 302 } 303 304 /* 305 * Sync filesystem 306 */ 307 int count = 0; 308 while (hammer_flusher_haswork(hmp)) { 309 hammer_flusher_sync(hmp); 310 ++count; 311 if (count >= 5) { 312 if (count == 5) 313 kprintf("HAMMER: flushing."); 314 else 315 kprintf("."); 316 tsleep(&count, 0, "hmrufl", hz); 317 } 318 if (count == 30) { 319 kprintf("giving up"); 320 break; 321 } 322 } 323 kprintf("\n"); 324 325 hammer_sync_lock_sh(trans); 326 hammer_lock_ex(&hmp->blkmap_lock); 327 328 /* 329 * We use stat later to update rootvol's bigblock stats 330 */ 331 struct bigblock_stat stat; 332 error = hammer_free_freemap(trans, volume, &stat); 333 if (error) { 334 kprintf("Failed to free volume. Volume not empty!\n"); 335 hmp->volume_to_remove = -1; 336 hammer_rel_volume(volume, 0); 337 hammer_unlock(&hmp->blkmap_lock); 338 hammer_sync_unlock(trans); 339 goto end; 340 } 341 342 hmp->volume_to_remove = -1; 343 344 hammer_rel_volume(volume, 0); 345 346 /* 347 * Unload buffers 348 */ 349 RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL, 350 hammer_unload_buffer, volume); 351 352 error = hammer_unload_volume(volume, NULL); 353 if (error == -1) { 354 kprintf("Failed to unload volume\n"); 355 hammer_unlock(&hmp->blkmap_lock); 356 hammer_sync_unlock(trans); 357 goto end; 358 } 359 360 volume = NULL; 361 --hmp->nvolumes; 362 363 /* 364 * Set each volume's new value of the vol_count field. 365 */ 366 for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) { 367 volume = hammer_get_volume(hmp, vol_no, &error); 368 if (volume == NULL && error == ENOENT) { 369 /* 370 * Skip unused volume numbers 371 */ 372 error = 0; 373 continue; 374 } 375 376 KKASSERT(volume != NULL && error == 0); 377 hammer_modify_volume_field(trans, volume, vol_count); 378 volume->ondisk->vol_count = hmp->nvolumes; 379 hammer_modify_volume_done(volume); 380 381 /* 382 * Only changes to the header of the root volume 383 * are automatically flushed to disk. For all 384 * other volumes that we modify we do it here. 385 * 386 * No interlock is needed, volume buffers are not 387 * messed with by bioops. 388 */ 389 if (volume != trans->rootvol && volume->io.modified) { 390 hammer_crc_set_volume(volume->ondisk); 391 hammer_io_flush(&volume->io, 0); 392 } 393 394 hammer_rel_volume(volume, 0); 395 } 396 397 /* 398 * Update the total number of bigblocks 399 */ 400 hammer_modify_volume_field(trans, trans->rootvol, 401 vol0_stat_bigblocks); 402 trans->rootvol->ondisk->vol0_stat_bigblocks -= stat.total_bigblocks; 403 hammer_modify_volume_done(trans->rootvol); 404 405 /* 406 * Update the number of free bigblocks 407 * (including the copy in hmp) 408 */ 409 hammer_modify_volume_field(trans, trans->rootvol, 410 vol0_stat_freebigblocks); 411 trans->rootvol->ondisk->vol0_stat_freebigblocks -= stat.total_free_bigblocks; 412 hmp->copy_stat_freebigblocks = 413 trans->rootvol->ondisk->vol0_stat_freebigblocks; 414 hammer_modify_volume_done(trans->rootvol); 415 416 417 hammer_unlock(&hmp->blkmap_lock); 418 hammer_sync_unlock(trans); 419 420 /* 421 * Erase the volume header of the removed device. 422 * 423 * This is to not accidentally mount the volume again. 424 */ 425 struct vnode *devvp = NULL; 426 error = hammer_setup_device(&devvp, ioc->device_name, 0); 427 if (error) { 428 kprintf("Failed to open device: %s\n", ioc->device_name); 429 goto end; 430 } 431 KKASSERT(devvp); 432 error = hammer_clear_volume_header(devvp); 433 if (error) { 434 kprintf("Failed to clear volume header of device: %s\n", 435 ioc->device_name); 436 goto end; 437 } 438 hammer_close_device(&devvp, 0); 439 440 KKASSERT(error == 0); 441 end: 442 hammer_unlock(&hmp->volume_lock); 443 return (error); 444 } 445 446 447 int 448 hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip, 449 struct hammer_ioc_volume_list *ioc) 450 { 451 struct hammer_mount *hmp = trans->hmp; 452 hammer_volume_t volume; 453 int error = 0; 454 int i, cnt, len; 455 456 for (i = 0, cnt = 0; i < HAMMER_MAX_VOLUMES && cnt < ioc->nvols; i++) { 457 volume = hammer_get_volume(hmp, i, &error); 458 if (volume == NULL && error == ENOENT) { 459 error = 0; 460 continue; 461 } 462 KKASSERT(volume != NULL && error == 0); 463 464 len = strlen(volume->vol_name) + 1; 465 KKASSERT(len <= MAXPATHLEN); 466 467 error = copyout(volume->vol_name, ioc->vols[cnt].device_name, 468 len); 469 if (error) { 470 hammer_rel_volume(volume, 0); 471 return (error); 472 } 473 cnt++; 474 hammer_rel_volume(volume, 0); 475 } 476 ioc->nvols = cnt; 477 478 return (error); 479 } 480 481 /* 482 * Iterate over all usable L1 entries of the volume and 483 * the corresponding L2 entries. 484 */ 485 static int 486 hammer_iterate_l1l2_entries(hammer_transaction_t trans, hammer_volume_t volume, 487 int (*callback)(hammer_transaction_t, hammer_volume_t, hammer_buffer_t*, 488 struct hammer_blockmap_layer1*, struct hammer_blockmap_layer2*, 489 hammer_off_t, hammer_off_t, void*), 490 void *data) 491 { 492 struct hammer_mount *hmp = trans->hmp; 493 hammer_blockmap_t freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 494 hammer_buffer_t buffer = NULL; 495 int error = 0; 496 497 hammer_off_t phys_off; 498 hammer_off_t block_off; 499 hammer_off_t layer1_off; 500 hammer_off_t layer2_off; 501 hammer_off_t aligned_buf_end_off; 502 struct hammer_blockmap_layer1 *layer1; 503 struct hammer_blockmap_layer2 *layer2; 504 505 /* 506 * Calculate the usable size of the volume, which 507 * must be aligned at a bigblock (8 MB) boundary. 508 */ 509 aligned_buf_end_off = (HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 510 (volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg) 511 & ~HAMMER_LARGEBLOCK_MASK64)); 512 513 /* 514 * Iterate the volume's address space in chunks of 4 TB, where each 515 * chunk consists of at least one physically available 8 MB bigblock. 516 * 517 * For each chunk we need one L1 entry and one L2 bigblock. 518 * We use the first bigblock of each chunk as L2 block. 519 */ 520 for (phys_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0); 521 phys_off < aligned_buf_end_off; 522 phys_off += HAMMER_BLOCKMAP_LAYER2) { 523 for (block_off = 0; 524 block_off < HAMMER_BLOCKMAP_LAYER2; 525 block_off += HAMMER_LARGEBLOCK_SIZE) { 526 layer2_off = phys_off + 527 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_off); 528 layer2 = hammer_bread(hmp, layer2_off, &error, &buffer); 529 if (error) 530 goto end; 531 532 error = callback(trans, volume, &buffer, NULL, 533 layer2, phys_off, block_off, data); 534 if (error) 535 goto end; 536 } 537 538 layer1_off = freemap->phys_offset + 539 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_off); 540 layer1 = hammer_bread(hmp, layer1_off, &error, &buffer); 541 if (error) 542 goto end; 543 544 error = callback(trans, volume, &buffer, layer1, NULL, 545 phys_off, 0, data); 546 if (error) 547 goto end; 548 } 549 550 end: 551 if (buffer) { 552 hammer_rel_buffer(buffer, 0); 553 buffer = NULL; 554 } 555 556 return error; 557 } 558 559 560 static int 561 format_callback(hammer_transaction_t trans, hammer_volume_t volume, 562 hammer_buffer_t *bufferp, 563 struct hammer_blockmap_layer1 *layer1, 564 struct hammer_blockmap_layer2 *layer2, 565 hammer_off_t phys_off, 566 hammer_off_t block_off, 567 void *data) 568 { 569 struct bigblock_stat *stat = (struct bigblock_stat*)data; 570 571 /* 572 * Calculate the usable size of the volume, which must be aligned 573 * at a bigblock (8 MB) boundary. 574 */ 575 hammer_off_t aligned_buf_end_off; 576 aligned_buf_end_off = (HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 577 (volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg) 578 & ~HAMMER_LARGEBLOCK_MASK64)); 579 580 if (layer1) { 581 KKASSERT(layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL); 582 583 hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1)); 584 bzero(layer1, sizeof(*layer1)); 585 layer1->phys_offset = phys_off; 586 layer1->blocks_free = stat->counter; 587 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE); 588 hammer_modify_buffer_done(*bufferp); 589 590 stat->total_free_bigblocks += stat->counter; 591 stat->counter = 0; /* reset */ 592 } else if (layer2) { 593 hammer_modify_buffer(trans, *bufferp, layer2, sizeof(*layer2)); 594 bzero(layer2, sizeof(*layer2)); 595 596 if (block_off == 0) { 597 /* 598 * The first entry represents the L2 bigblock itself. 599 */ 600 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX; 601 layer2->append_off = HAMMER_LARGEBLOCK_SIZE; 602 layer2->bytes_free = 0; 603 ++stat->total_bigblocks; 604 } else if (phys_off + block_off < aligned_buf_end_off) { 605 /* 606 * Available bigblock 607 */ 608 layer2->zone = 0; 609 layer2->append_off = 0; 610 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE; 611 ++stat->total_bigblocks; 612 ++stat->counter; 613 } else { 614 /* 615 * Bigblock outside of physically available 616 * space 617 */ 618 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX; 619 layer2->append_off = HAMMER_LARGEBLOCK_SIZE; 620 layer2->bytes_free = 0; 621 } 622 623 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE); 624 hammer_modify_buffer_done(*bufferp); 625 } else { 626 KKASSERT(0); 627 } 628 629 return 0; 630 } 631 632 static int 633 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume, 634 struct bigblock_stat *stat) 635 { 636 stat->total_bigblocks = 0; 637 stat->total_free_bigblocks = 0; 638 stat->counter = 0; 639 return hammer_iterate_l1l2_entries(trans, volume, format_callback, stat); 640 } 641 642 static int 643 free_callback(hammer_transaction_t trans, hammer_volume_t volume __unused, 644 hammer_buffer_t *bufferp, 645 struct hammer_blockmap_layer1 *layer1, 646 struct hammer_blockmap_layer2 *layer2, 647 hammer_off_t phys_off, 648 hammer_off_t block_off __unused, 649 void *data) 650 { 651 struct bigblock_stat *stat = (struct bigblock_stat*)data; 652 653 /* 654 * No modifications to ondisk structures 655 */ 656 int testonly = (stat == NULL); 657 658 if (layer1) { 659 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) { 660 /* 661 * This layer1 entry is already free. 662 */ 663 return 0; 664 } 665 666 KKASSERT((int)HAMMER_VOL_DECODE(layer1->phys_offset) == 667 trans->hmp->volume_to_remove); 668 669 if (testonly) 670 return 0; 671 672 /* 673 * Free the L1 entry 674 */ 675 hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1)); 676 bzero(layer1, sizeof(*layer1)); 677 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL; 678 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE); 679 hammer_modify_buffer_done(*bufferp); 680 681 return 0; 682 } else if (layer2) { 683 if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) { 684 return 0; 685 } 686 687 if (layer2->zone == HAMMER_ZONE_FREEMAP_INDEX) { 688 if (stat) { 689 ++stat->total_bigblocks; 690 } 691 return 0; 692 } 693 694 if (layer2->append_off == 0 && 695 layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) { 696 if (stat) { 697 ++stat->total_bigblocks; 698 ++stat->total_free_bigblocks; 699 } 700 return 0; 701 } 702 703 /* 704 * We found a layer2 entry that is not empty! 705 */ 706 return EBUSY; 707 } else { 708 KKASSERT(0); 709 } 710 711 return EINVAL; 712 } 713 714 static int 715 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume, 716 struct bigblock_stat *stat) 717 { 718 int error; 719 720 stat->total_bigblocks = 0; 721 stat->total_free_bigblocks = 0; 722 stat->counter = 0; 723 724 error = hammer_iterate_l1l2_entries(trans, volume, free_callback, NULL); 725 if (error) 726 return error; 727 728 error = hammer_iterate_l1l2_entries(trans, volume, free_callback, stat); 729 return error; 730 } 731 732 /************************************************************************ 733 * MISC * 734 ************************************************************************ 735 */ 736 737 static int 738 hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly) 739 { 740 int error; 741 struct nlookupdata nd; 742 743 /* 744 * Get the device vnode 745 */ 746 if (*devvpp == NULL) { 747 error = nlookup_init(&nd, dev_path, UIO_SYSSPACE, NLC_FOLLOW); 748 if (error == 0) 749 error = nlookup(&nd); 750 if (error == 0) 751 error = cache_vref(&nd.nl_nch, nd.nl_cred, devvpp); 752 nlookup_done(&nd); 753 } else { 754 error = 0; 755 } 756 757 if (error == 0) { 758 if (vn_isdisk(*devvpp, &error)) { 759 error = vfs_mountedon(*devvpp); 760 } 761 } 762 if (error == 0 && vcount(*devvpp) > 0) 763 error = EBUSY; 764 if (error == 0) { 765 vn_lock(*devvpp, LK_EXCLUSIVE | LK_RETRY); 766 error = vinvalbuf(*devvpp, V_SAVE, 0, 0); 767 if (error == 0) { 768 error = VOP_OPEN(*devvpp, 769 (ronly ? FREAD : FREAD|FWRITE), 770 FSCRED, NULL); 771 } 772 vn_unlock(*devvpp); 773 } 774 if (error && *devvpp) { 775 vrele(*devvpp); 776 *devvpp = NULL; 777 } 778 return (error); 779 } 780 781 static void 782 hammer_close_device(struct vnode **devvpp, int ronly) 783 { 784 if (*devvpp) { 785 vinvalbuf(*devvpp, ronly ? 0 : V_SAVE, 0, 0); 786 VOP_CLOSE(*devvpp, (ronly ? FREAD : FREAD|FWRITE)); 787 vrele(*devvpp); 788 *devvpp = NULL; 789 } 790 } 791 792 static int 793 hammer_format_volume_header(struct hammer_mount *hmp, struct vnode *devvp, 794 const char *vol_name, int vol_no, int vol_count, 795 int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size) 796 { 797 struct buf *bp = NULL; 798 struct hammer_volume_ondisk *ondisk; 799 int error; 800 801 /* 802 * Extract the volume number from the volume header and do various 803 * sanity checks. 804 */ 805 KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk)); 806 error = bread(devvp, 0LL, HAMMER_BUFSIZE, &bp); 807 if (error || bp->b_bcount < sizeof(struct hammer_volume_ondisk)) 808 goto late_failure; 809 810 ondisk = (struct hammer_volume_ondisk*) bp->b_data; 811 812 /* 813 * Note that we do NOT allow to use a device that contains 814 * a valid HAMMER signature. It has to be cleaned up with dd 815 * before. 816 */ 817 if (ondisk->vol_signature == HAMMER_FSBUF_VOLUME) { 818 kprintf("hammer_volume_add: Formatting of valid HAMMER volume " 819 "%s denied. Erase with dd!\n", vol_name); 820 error = EFTYPE; 821 goto late_failure; 822 } 823 824 bzero(ondisk, sizeof(struct hammer_volume_ondisk)); 825 ksnprintf(ondisk->vol_name, sizeof(ondisk->vol_name), "%s", vol_name); 826 ondisk->vol_fstype = hmp->rootvol->ondisk->vol_fstype; 827 ondisk->vol_signature = HAMMER_FSBUF_VOLUME; 828 ondisk->vol_fsid = hmp->fsid; 829 ondisk->vol_rootvol = hmp->rootvol->vol_no; 830 ondisk->vol_no = vol_no; 831 ondisk->vol_count = vol_count; 832 ondisk->vol_version = hmp->version; 833 834 /* 835 * Reserve space for (future) header junk, setup our poor-man's 836 * bigblock allocator. 837 */ 838 int64_t vol_alloc = HAMMER_BUFSIZE * 16; 839 840 ondisk->vol_bot_beg = vol_alloc; 841 vol_alloc += boot_area_size; 842 ondisk->vol_mem_beg = vol_alloc; 843 vol_alloc += mem_area_size; 844 845 /* 846 * The remaining area is the zone 2 buffer allocation area. These 847 * buffers 848 */ 849 ondisk->vol_buf_beg = vol_alloc; 850 ondisk->vol_buf_end = vol_size & ~(int64_t)HAMMER_BUFMASK; 851 852 if (ondisk->vol_buf_end < ondisk->vol_buf_beg) { 853 kprintf("volume %d %s is too small to hold the volume header", 854 ondisk->vol_no, ondisk->vol_name); 855 error = EFTYPE; 856 goto late_failure; 857 } 858 859 ondisk->vol_nblocks = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 860 HAMMER_BUFSIZE; 861 ondisk->vol_blocksize = HAMMER_BUFSIZE; 862 863 /* 864 * Write volume header to disk 865 */ 866 error = bwrite(bp); 867 bp = NULL; 868 869 late_failure: 870 if (bp) 871 brelse(bp); 872 return (error); 873 } 874 875 /* 876 * Invalidates the volume header. Used by volume-del. 877 */ 878 static int 879 hammer_clear_volume_header(struct vnode *devvp) 880 { 881 struct buf *bp = NULL; 882 struct hammer_volume_ondisk *ondisk; 883 int error; 884 885 KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk)); 886 error = bread(devvp, 0LL, HAMMER_BUFSIZE, &bp); 887 if (error || bp->b_bcount < sizeof(struct hammer_volume_ondisk)) 888 goto late_failure; 889 890 ondisk = (struct hammer_volume_ondisk*) bp->b_data; 891 bzero(ondisk, sizeof(struct hammer_volume_ondisk)); 892 893 error = bwrite(bp); 894 bp = NULL; 895 896 late_failure: 897 if (bp) 898 brelse(bp); 899 return (error); 900 } 901