1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> and 6 * Michael Neumann <mneumann@ntecs.de> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 */ 36 37 #include "hammer.h" 38 39 static int 40 hammer_format_volume_header(hammer_mount_t hmp, 41 struct hammer_ioc_volume *ioc, 42 hammer_volume_ondisk_t ondisk, 43 int vol_no); 44 45 static int 46 hammer_update_volumes_header(hammer_transaction_t trans, 47 int64_t total_bigblocks, int64_t empty_bigblocks); 48 49 static int 50 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip); 51 52 static int 53 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume); 54 55 static int 56 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume); 57 58 static int 59 hammer_count_bigblocks(hammer_mount_t hmp, hammer_volume_t volume, 60 int64_t *total_bigblocks, int64_t *empty_bigblocks); 61 62 int 63 hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip, 64 struct hammer_ioc_volume *ioc) 65 { 66 hammer_mount_t hmp = trans->hmp; 67 struct mount *mp = hmp->mp; 68 struct hammer_volume_ondisk ondisk; 69 hammer_volume_t volume; 70 int64_t total_bigblocks, empty_bigblocks; 71 int free_vol_no = 0; 72 int error; 73 74 if (mp->mnt_flag & MNT_RDONLY) { 75 hmkprintf(hmp, "Cannot add volume to read-only HAMMER filesystem\n"); 76 return (EINVAL); 77 } 78 79 if (hammer_lock_ex_try(&hmp->volume_lock) != 0) { 80 hmkprintf(hmp, "Another volume operation is in progress!\n"); 81 return (EAGAIN); 82 } 83 84 if (hmp->nvolumes >= HAMMER_MAX_VOLUMES) { 85 hammer_unlock(&hmp->volume_lock); 86 hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n"); 87 return (EINVAL); 88 } 89 90 /* 91 * Find an unused volume number. 92 */ 93 while (free_vol_no < HAMMER_MAX_VOLUMES && 94 hammer_volume_number_test(hmp, free_vol_no)) { 95 ++free_vol_no; 96 } 97 if (free_vol_no >= HAMMER_MAX_VOLUMES) { 98 hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n"); 99 error = EINVAL; 100 goto end; 101 } 102 103 error = hammer_format_volume_header(hmp, ioc, &ondisk, free_vol_no); 104 if (error) 105 goto end; 106 107 error = hammer_install_volume(hmp, ioc->device_name, NULL, &ondisk); 108 if (error) 109 goto end; 110 111 hammer_sync_lock_sh(trans); 112 hammer_lock_ex(&hmp->blkmap_lock); 113 114 volume = hammer_get_volume(hmp, free_vol_no, &error); 115 KKASSERT(volume != NULL && error == 0); 116 117 error = hammer_format_freemap(trans, volume); 118 KKASSERT(error == 0); 119 120 error = hammer_count_bigblocks(hmp, volume, 121 &total_bigblocks, &empty_bigblocks); 122 KKASSERT(error == 0); 123 KKASSERT(total_bigblocks == empty_bigblocks); 124 125 hammer_rel_volume(volume, 0); 126 127 ++hmp->nvolumes; 128 error = hammer_update_volumes_header(trans, 129 total_bigblocks, empty_bigblocks); 130 KKASSERT(error == 0); 131 132 hammer_unlock(&hmp->blkmap_lock); 133 hammer_sync_unlock(trans); 134 135 KKASSERT(error == 0); 136 end: 137 hammer_unlock(&hmp->volume_lock); 138 if (error) 139 hmkprintf(hmp, "An error occurred: %d\n", error); 140 return (error); 141 } 142 143 144 /* 145 * Remove a volume. 146 */ 147 int 148 hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip, 149 struct hammer_ioc_volume *ioc) 150 { 151 hammer_mount_t hmp = trans->hmp; 152 struct mount *mp = hmp->mp; 153 struct hammer_volume_ondisk ondisk; 154 hammer_volume_t volume; 155 int64_t total_bigblocks, empty_bigblocks; 156 int vol_no; 157 int error = 0; 158 159 if (mp->mnt_flag & MNT_RDONLY) { 160 hmkprintf(hmp, "Cannot del volume from read-only HAMMER filesystem\n"); 161 return (EINVAL); 162 } 163 164 if (hammer_lock_ex_try(&hmp->volume_lock) != 0) { 165 hmkprintf(hmp, "Another volume operation is in progress!\n"); 166 return (EAGAIN); 167 } 168 169 if (hmp->nvolumes <= 1) { 170 hammer_unlock(&hmp->volume_lock); 171 hmkprintf(hmp, "No HAMMER volume to delete\n"); 172 return (EINVAL); 173 } 174 175 /* 176 * find volume by volname 177 */ 178 volume = NULL; 179 HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) { 180 volume = hammer_get_volume(hmp, vol_no, &error); 181 KKASSERT(volume != NULL && error == 0); 182 if (strcmp(volume->vol_name, ioc->device_name) == 0) { 183 break; 184 } 185 hammer_rel_volume(volume, 0); 186 volume = NULL; 187 } 188 189 if (volume == NULL) { 190 hmkprintf(hmp, "Couldn't find volume\n"); 191 error = EINVAL; 192 goto end; 193 } 194 195 if (volume == trans->rootvol) { 196 hmkprintf(hmp, "Cannot remove root-volume\n"); 197 hammer_rel_volume(volume, 0); 198 error = EINVAL; 199 goto end; 200 } 201 202 /* 203 * Reblock filesystem if the volume is not empty 204 */ 205 hmp->volume_to_remove = volume->vol_no; 206 207 error = hammer_count_bigblocks(hmp, volume, 208 &total_bigblocks, &empty_bigblocks); 209 KKASSERT(error == 0); 210 211 if (total_bigblocks == empty_bigblocks) { 212 hmkprintf(hmp, "%s is already empty\n", volume->vol_name); 213 } else if (ioc->flag & HAMMER_IOC_VOLUME_REBLOCK) { 214 error = hammer_do_reblock(trans, ip); 215 if (error) { 216 hmp->volume_to_remove = -1; 217 hammer_rel_volume(volume, 0); 218 goto end; 219 } 220 } else { 221 hmkprintf(hmp, "%s is not empty\n", volume->vol_name); 222 hammer_rel_volume(volume, 0); 223 error = ENOTEMPTY; 224 goto end; 225 } 226 227 hammer_sync_lock_sh(trans); 228 hammer_lock_ex(&hmp->blkmap_lock); 229 230 error = hammer_count_bigblocks(hmp, volume, 231 &total_bigblocks, &empty_bigblocks); 232 KKASSERT(error == 0); 233 234 error = hammer_free_freemap(trans, volume); 235 if (error) { 236 hmkprintf(hmp, "Failed to free volume: "); 237 if (error == EBUSY) 238 kprintf("Volume %d not empty\n", volume->vol_no); 239 else 240 kprintf("%d\n", error); 241 hmp->volume_to_remove = -1; 242 hammer_rel_volume(volume, 0); 243 goto end1; 244 } 245 hammer_rel_volume(volume, 0); 246 247 /* 248 * XXX: Temporary solution for 249 * http://lists.dragonflybsd.org/pipermail/kernel/2015-August/175027.html 250 */ 251 hammer_unlock(&hmp->blkmap_lock); 252 hammer_sync_unlock(trans); 253 hammer_flusher_sync(hmp); /* 1 */ 254 hammer_flusher_sync(hmp); /* 2 */ 255 hammer_flusher_sync(hmp); /* 3 */ 256 hammer_sync_lock_sh(trans); 257 hammer_lock_ex(&hmp->blkmap_lock); 258 259 /* 260 * Unload buffers 261 */ 262 RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL, 263 hammer_unload_buffer, volume); 264 265 bzero(&ondisk, sizeof(ondisk)); 266 error = hammer_unload_volume(volume, &ondisk); 267 if (error == -1) { 268 hmkprintf(hmp, "Failed to unload volume\n"); 269 goto end1; 270 } 271 272 --hmp->nvolumes; 273 error = hammer_update_volumes_header(trans, 274 -total_bigblocks, -empty_bigblocks); 275 KKASSERT(error == 0); 276 hmp->volume_to_remove = -1; 277 278 end1: 279 hammer_unlock(&hmp->blkmap_lock); 280 hammer_sync_unlock(trans); 281 282 end: 283 hammer_unlock(&hmp->volume_lock); 284 if (error) 285 hmkprintf(hmp, "An error occurred: %d\n", error); 286 return (error); 287 } 288 289 290 int 291 hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip, 292 struct hammer_ioc_volume_list *ioc) 293 { 294 hammer_mount_t hmp = trans->hmp; 295 hammer_volume_t volume; 296 int error = 0; 297 int i, len, cnt = 0; 298 299 if (hammer_lock_ex_try(&hmp->volume_lock) != 0) { 300 hmkprintf(hmp, "Another volume operation is in progress!\n"); 301 return (EAGAIN); 302 } 303 304 HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) { 305 if (cnt >= ioc->nvols) 306 break; 307 volume = hammer_get_volume(hmp, i, &error); 308 KKASSERT(volume != NULL && error == 0); 309 310 len = strlen(volume->vol_name) + 1; 311 KKASSERT(len <= MAXPATHLEN); 312 313 ioc->vols[cnt].vol_no = volume->vol_no; 314 error = copyout(volume->vol_name, ioc->vols[cnt].device_name, 315 len); 316 hammer_rel_volume(volume, 0); 317 if (error) 318 goto end; 319 cnt++; 320 } 321 ioc->nvols = cnt; 322 323 end: 324 hammer_unlock(&hmp->volume_lock); 325 return (error); 326 } 327 328 static 329 int 330 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip) 331 { 332 hammer_mount_t hmp = trans->hmp; 333 int error; 334 int vol_no; 335 336 struct hammer_ioc_reblock reblock; 337 bzero(&reblock, sizeof(reblock)); 338 339 vol_no = trans->hmp->volume_to_remove; 340 KKASSERT(vol_no != -1); 341 342 reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION; 343 reblock.key_beg.obj_id = HAMMER_MIN_OBJID; 344 reblock.key_end.localization = HAMMER_MAX_LOCALIZATION; 345 reblock.key_end.obj_id = HAMMER_MAX_OBJID; 346 reblock.head.flags = HAMMER_IOC_DO_FLAGS; 347 reblock.free_level = 0; /* reblock all big-blocks */ 348 reblock.allpfs = 1; /* reblock all PFS */ 349 reblock.vol_no = vol_no; 350 351 hmkprintf(hmp, "reblock started\n"); 352 error = hammer_ioc_reblock(trans, ip, &reblock); 353 354 if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) { 355 error = EINTR; 356 } 357 358 if (error) { 359 if (error == EINTR) { 360 hmkprintf(hmp, "reblock was interrupted\n"); 361 } else { 362 hmkprintf(hmp, "reblock failed: %d\n", error); 363 } 364 return(error); 365 } 366 367 return(0); 368 } 369 370 /* 371 * XXX This somehow needs to stop doing hammer_modify_buffer() for 372 * layer2 entries. In theory adding a large block device could 373 * blow away UNDO fifo. The best way is to format layer2 entries 374 * in userspace without UNDO getting involved before the device is 375 * safely added to the filesystem. HAMMER has no interest in what 376 * has happened to the device before it safely joins the filesystem. 377 */ 378 static int 379 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume) 380 { 381 hammer_mount_t hmp = trans->hmp; 382 hammer_volume_ondisk_t ondisk; 383 hammer_blockmap_t freemap; 384 hammer_off_t alloc_offset; 385 hammer_off_t phys_offset; 386 hammer_off_t block_offset; 387 hammer_off_t layer1_offset; 388 hammer_off_t layer2_offset; 389 hammer_off_t vol_free_end; 390 hammer_off_t aligned_vol_free_end; 391 hammer_blockmap_layer1_t layer1; 392 hammer_blockmap_layer2_t layer2; 393 hammer_buffer_t buffer1 = NULL; 394 hammer_buffer_t buffer2 = NULL; 395 int64_t vol_buf_size; 396 int64_t layer1_count = 0; 397 int error = 0; 398 399 KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO); 400 401 ondisk = volume->ondisk; 402 vol_buf_size = HAMMER_VOL_BUF_SIZE(ondisk); 403 KKASSERT((vol_buf_size & ~HAMMER_OFF_SHORT_MASK) == 0); 404 vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no, 405 vol_buf_size & ~HAMMER_BIGBLOCK_MASK64); 406 aligned_vol_free_end = HAMMER_BLOCKMAP_LAYER2_DOALIGN(vol_free_end); 407 408 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 409 alloc_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0); 410 411 hmkprintf(hmp, "Initialize freemap volume %d\n", volume->vol_no); 412 413 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0); 414 phys_offset < aligned_vol_free_end; 415 phys_offset += HAMMER_BLOCKMAP_LAYER2) { 416 layer1_offset = freemap->phys_offset + 417 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset); 418 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1); 419 if (error) 420 goto end; 421 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) { 422 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1)); 423 bzero(layer1, sizeof(*layer1)); 424 layer1->phys_offset = alloc_offset; 425 layer1->blocks_free = 0; 426 hammer_crc_set_layer1(hmp->version, layer1); 427 hammer_modify_buffer_done(buffer1); 428 alloc_offset += HAMMER_BIGBLOCK_SIZE; 429 } 430 } 431 432 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0); 433 phys_offset < aligned_vol_free_end; 434 phys_offset += HAMMER_BLOCKMAP_LAYER2) { 435 layer1_count = 0; 436 layer1_offset = freemap->phys_offset + 437 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset); 438 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1); 439 if (error) 440 goto end; 441 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); 442 443 for (block_offset = 0; 444 block_offset < HAMMER_BLOCKMAP_LAYER2; 445 block_offset += HAMMER_BIGBLOCK_SIZE) { 446 layer2_offset = layer1->phys_offset + 447 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset); 448 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2); 449 if (error) 450 goto end; 451 452 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2)); 453 bzero(layer2, sizeof(*layer2)); 454 455 if (phys_offset + block_offset < alloc_offset) { 456 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX; 457 layer2->append_off = HAMMER_BIGBLOCK_SIZE; 458 layer2->bytes_free = 0; 459 } else if (phys_offset + block_offset < vol_free_end) { 460 layer2->zone = 0; 461 layer2->append_off = 0; 462 layer2->bytes_free = HAMMER_BIGBLOCK_SIZE; 463 ++layer1_count; 464 } else { 465 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX; 466 layer2->append_off = HAMMER_BIGBLOCK_SIZE; 467 layer2->bytes_free = 0; 468 } 469 470 hammer_crc_set_layer2(hmp->version, layer2); 471 hammer_modify_buffer_done(buffer2); 472 } 473 474 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1)); 475 layer1->blocks_free += layer1_count; 476 hammer_crc_set_layer1(hmp->version, layer1); 477 hammer_modify_buffer_done(buffer1); 478 } 479 480 end: 481 if (buffer1) 482 hammer_rel_buffer(buffer1, 0); 483 if (buffer2) 484 hammer_rel_buffer(buffer2, 0); 485 486 return error; 487 } 488 489 /* 490 * XXX This somehow needs to stop doing hammer_modify_buffer() for 491 * layer2 entries. In theory removing a large block device could 492 * blow away UNDO fifo. The best way is to erase layer2 entries 493 * in userspace without UNDO getting involved after the device has 494 * been safely removed from the filesystem. HAMMER has no interest 495 * in what happens to the device once it's safely removed. 496 */ 497 static int 498 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume) 499 { 500 hammer_mount_t hmp = trans->hmp; 501 hammer_volume_ondisk_t ondisk; 502 hammer_blockmap_t freemap; 503 hammer_off_t phys_offset; 504 hammer_off_t block_offset; 505 hammer_off_t layer1_offset; 506 hammer_off_t layer2_offset; 507 hammer_off_t vol_free_end; 508 hammer_off_t aligned_vol_free_end; 509 hammer_blockmap_layer1_t layer1; 510 hammer_blockmap_layer2_t layer2; 511 hammer_buffer_t buffer1 = NULL; 512 hammer_buffer_t buffer2 = NULL; 513 int64_t vol_buf_size; 514 int error = 0; 515 516 KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO); 517 518 ondisk = volume->ondisk; 519 vol_buf_size = HAMMER_VOL_BUF_SIZE(ondisk); 520 KKASSERT((vol_buf_size & ~HAMMER_OFF_SHORT_MASK) == 0); 521 vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no, 522 vol_buf_size & ~HAMMER_BIGBLOCK_MASK64); 523 aligned_vol_free_end = HAMMER_BLOCKMAP_LAYER2_DOALIGN(vol_free_end); 524 525 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 526 527 hmkprintf(hmp, "Free freemap volume %d\n", volume->vol_no); 528 529 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0); 530 phys_offset < aligned_vol_free_end; 531 phys_offset += HAMMER_BLOCKMAP_LAYER2) { 532 layer1_offset = freemap->phys_offset + 533 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset); 534 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1); 535 if (error) 536 goto end; 537 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); 538 539 for (block_offset = 0; 540 block_offset < HAMMER_BLOCKMAP_LAYER2; 541 block_offset += HAMMER_BIGBLOCK_SIZE) { 542 layer2_offset = layer1->phys_offset + 543 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset); 544 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2); 545 if (error) 546 goto end; 547 548 switch (layer2->zone) { 549 case HAMMER_ZONE_UNDO_INDEX: 550 KKASSERT(0); 551 case HAMMER_ZONE_FREEMAP_INDEX: 552 case HAMMER_ZONE_UNAVAIL_INDEX: 553 continue; 554 default: 555 KKASSERT(phys_offset + block_offset < aligned_vol_free_end); 556 if (layer2->append_off == 0 && 557 layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) 558 continue; 559 break; 560 } 561 return EBUSY; /* Not empty */ 562 } 563 } 564 565 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0); 566 phys_offset < aligned_vol_free_end; 567 phys_offset += HAMMER_BLOCKMAP_LAYER2) { 568 layer1_offset = freemap->phys_offset + 569 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset); 570 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1); 571 if (error) 572 goto end; 573 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); 574 575 for (block_offset = 0; 576 block_offset < HAMMER_BLOCKMAP_LAYER2; 577 block_offset += HAMMER_BIGBLOCK_SIZE) { 578 layer2_offset = layer1->phys_offset + 579 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset); 580 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2); 581 if (error) 582 goto end; 583 584 switch (layer2->zone) { 585 case HAMMER_ZONE_UNDO_INDEX: 586 KKASSERT(0); 587 default: 588 KKASSERT(phys_offset + block_offset < aligned_vol_free_end); 589 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2)); 590 bzero(layer2, sizeof(*layer2)); 591 hammer_modify_buffer_done(buffer2); 592 break; 593 } 594 } 595 596 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1)); 597 bzero(layer1, sizeof(*layer1)); 598 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL; 599 hammer_crc_set_layer1(hmp->version, layer1); 600 hammer_modify_buffer_done(buffer1); 601 } 602 603 end: 604 if (buffer1) 605 hammer_rel_buffer(buffer1, 0); 606 if (buffer2) 607 hammer_rel_buffer(buffer2, 0); 608 609 return error; 610 } 611 612 static int 613 hammer_format_volume_header(hammer_mount_t hmp, 614 struct hammer_ioc_volume *ioc, 615 hammer_volume_ondisk_t ondisk, 616 int vol_no) 617 { 618 hammer_volume_ondisk_t root_ondisk; 619 int64_t vol_alloc; 620 621 KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk)); 622 623 /* 624 * Just copy from the root volume header. 625 */ 626 root_ondisk = hmp->rootvol->ondisk; 627 bzero(ondisk, sizeof(struct hammer_volume_ondisk)); 628 ondisk->vol_fsid = root_ondisk->vol_fsid; 629 ondisk->vol_fstype = root_ondisk->vol_fstype; 630 ksnprintf(ondisk->vol_label, sizeof(ondisk->vol_label), "%s", 631 root_ondisk->vol_label); 632 ondisk->vol_version = root_ondisk->vol_version; 633 ondisk->vol_rootvol = root_ondisk->vol_no; 634 ondisk->vol_signature = root_ondisk->vol_signature; 635 636 KKASSERT(ondisk->vol_rootvol == HAMMER_ROOT_VOLNO); 637 KKASSERT(ondisk->vol_signature == HAMMER_FSBUF_VOLUME); 638 639 /* 640 * Assign the new vol_no and vol_count. 641 */ 642 ondisk->vol_no = vol_no; 643 ondisk->vol_count = root_ondisk->vol_count + 1; 644 645 /* 646 * Reserve space for (future) header junk. 647 */ 648 vol_alloc = root_ondisk->vol_bot_beg; 649 ondisk->vol_bot_beg = vol_alloc; 650 vol_alloc += ioc->boot_area_size; 651 ondisk->vol_mem_beg = vol_alloc; 652 vol_alloc += ioc->memory_log_size; 653 654 /* 655 * The remaining area is the zone 2 buffer allocation area. 656 */ 657 ondisk->vol_buf_beg = vol_alloc; 658 ondisk->vol_buf_end = ioc->vol_size & ~(int64_t)HAMMER_BUFMASK; 659 660 if (HAMMER_VOL_BUF_SIZE(ondisk) < 0) { /* int64_t */ 661 hmkprintf(hmp, "volume %d is too small to hold the volume header\n", 662 ondisk->vol_no); 663 return(EFTYPE); 664 } 665 666 return(0); 667 } 668 669 static int 670 hammer_update_volumes_header(hammer_transaction_t trans, 671 int64_t total_bigblocks, int64_t empty_bigblocks) 672 { 673 hammer_mount_t hmp = trans->hmp; 674 struct mount *mp = hmp->mp; 675 hammer_volume_t volume; 676 int vol_no; 677 int error = 0; 678 679 /* 680 * Set each volume's new value of the vol_count field. 681 */ 682 HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) { 683 volume = hammer_get_volume(hmp, vol_no, &error); 684 KKASSERT(volume != NULL && error == 0); 685 hammer_modify_volume_field(trans, volume, vol_count); 686 volume->ondisk->vol_count = hmp->nvolumes; 687 hammer_modify_volume_done(volume); 688 689 /* 690 * Only changes to the header of the root volume 691 * are automatically flushed to disk. For all 692 * other volumes that we modify we do it here. 693 * 694 * No interlock is needed, volume buffers are not 695 * messed with by bioops. 696 */ 697 if (volume != trans->rootvol && volume->io.modified) { 698 hammer_crc_set_volume(hmp->version, volume->ondisk); 699 hammer_io_flush(&volume->io, 0); 700 } 701 702 hammer_rel_volume(volume, 0); 703 } 704 705 /* 706 * Update the total number of big-blocks. 707 */ 708 hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_bigblocks); 709 trans->rootvol->ondisk->vol0_stat_bigblocks += total_bigblocks; 710 hammer_modify_volume_done(trans->rootvol); 711 712 /* 713 * Big-block count changed so recompute the total number of blocks. 714 */ 715 mp->mnt_stat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks * 716 HAMMER_BUFFERS_PER_BIGBLOCK; 717 mp->mnt_vstat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks * 718 HAMMER_BUFFERS_PER_BIGBLOCK; 719 720 /* 721 * Update the total number of free big-blocks. 722 */ 723 hammer_modify_volume_field(trans, trans->rootvol, 724 vol0_stat_freebigblocks); 725 trans->rootvol->ondisk->vol0_stat_freebigblocks += empty_bigblocks; 726 hammer_modify_volume_done(trans->rootvol); 727 728 /* 729 * Update the copy in hmp. 730 */ 731 hmp->copy_stat_freebigblocks = 732 trans->rootvol->ondisk->vol0_stat_freebigblocks; 733 734 return(error); 735 } 736 737 /* 738 * Count total big-blocks and empty big-blocks within the volume. 739 * The volume must be a non-root volume. 740 * 741 * Note that total big-blocks doesn't include big-blocks for layer2 742 * (and obviously layer1 and undomap). This is requirement of the 743 * volume header and this function is to retrieve that information. 744 */ 745 static int 746 hammer_count_bigblocks(hammer_mount_t hmp, hammer_volume_t volume, 747 int64_t *total_bigblocks, int64_t *empty_bigblocks) 748 { 749 hammer_volume_ondisk_t ondisk; 750 hammer_blockmap_t freemap; 751 hammer_off_t phys_offset; 752 hammer_off_t block_offset; 753 hammer_off_t layer1_offset; 754 hammer_off_t layer2_offset; 755 hammer_off_t vol_free_end; 756 hammer_off_t aligned_vol_free_end; 757 hammer_blockmap_layer1_t layer1; 758 hammer_blockmap_layer2_t layer2; 759 hammer_buffer_t buffer1 = NULL; 760 hammer_buffer_t buffer2 = NULL; 761 int64_t vol_buf_size; 762 int64_t total = 0; 763 int64_t empty = 0; 764 int error = 0; 765 766 KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO); 767 768 *total_bigblocks = 0; /* avoid gcc warnings */ 769 *empty_bigblocks = 0; /* avoid gcc warnings */ 770 771 ondisk = volume->ondisk; 772 vol_buf_size = HAMMER_VOL_BUF_SIZE(ondisk); 773 KKASSERT((vol_buf_size & ~HAMMER_OFF_SHORT_MASK) == 0); 774 vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no, 775 vol_buf_size & ~HAMMER_BIGBLOCK_MASK64); 776 aligned_vol_free_end = HAMMER_BLOCKMAP_LAYER2_DOALIGN(vol_free_end); 777 778 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 779 780 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0); 781 phys_offset < aligned_vol_free_end; 782 phys_offset += HAMMER_BLOCKMAP_LAYER2) { 783 layer1_offset = freemap->phys_offset + 784 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset); 785 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1); 786 if (error) 787 goto end; 788 789 for (block_offset = 0; 790 block_offset < HAMMER_BLOCKMAP_LAYER2; 791 block_offset += HAMMER_BIGBLOCK_SIZE) { 792 layer2_offset = layer1->phys_offset + 793 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset); 794 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2); 795 if (error) 796 goto end; 797 798 switch (layer2->zone) { 799 case HAMMER_ZONE_UNDO_INDEX: 800 KKASSERT(0); 801 case HAMMER_ZONE_FREEMAP_INDEX: 802 case HAMMER_ZONE_UNAVAIL_INDEX: 803 continue; 804 default: 805 KKASSERT(phys_offset + block_offset < aligned_vol_free_end); 806 total++; 807 if (layer2->append_off == 0 && 808 layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) 809 empty++; 810 break; 811 } 812 } 813 } 814 815 hmkprintf(hmp, "big-blocks total=%jd empty=%jd\n", total, empty); 816 *total_bigblocks = total; 817 *empty_bigblocks = empty; 818 end: 819 if (buffer1) 820 hammer_rel_buffer(buffer1, 0); 821 if (buffer2) 822 hammer_rel_buffer(buffer2, 0); 823 824 return error; 825 } 826