1 /* 2 * Copyright (c) 2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_ioctl.c,v 1.32 2008/11/13 02:23:29 dillon Exp $ 35 */ 36 37 #include "hammer.h" 38 39 static int hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip, 40 struct hammer_ioc_history *hist); 41 static int hammer_ioc_synctid(hammer_transaction_t trans, hammer_inode_t ip, 42 struct hammer_ioc_synctid *std); 43 static int hammer_ioc_get_version(hammer_transaction_t trans, 44 hammer_inode_t ip, 45 struct hammer_ioc_version *ver); 46 static int hammer_ioc_set_version(hammer_transaction_t trans, 47 hammer_inode_t ip, 48 struct hammer_ioc_version *ver); 49 static int hammer_ioc_get_info(hammer_transaction_t trans, 50 struct hammer_ioc_info *info); 51 static int hammer_ioc_pfs_iterate(hammer_transaction_t trans, 52 struct hammer_ioc_pfs_iterate *pi); 53 static int hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 54 struct hammer_ioc_snapshot *snap); 55 static int hammer_ioc_del_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 56 struct hammer_ioc_snapshot *snap); 57 static int hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 58 struct hammer_ioc_snapshot *snap); 59 static int hammer_ioc_get_config(hammer_transaction_t trans, hammer_inode_t ip, 60 struct hammer_ioc_config *snap); 61 static int hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip, 62 struct hammer_ioc_config *snap); 63 static int hammer_ioc_get_data(hammer_transaction_t trans, hammer_inode_t ip, 64 struct hammer_ioc_data *data); 65 66 int 67 hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag, 68 struct ucred *cred) 69 { 70 struct hammer_transaction trans; 71 int error; 72 73 error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0); 74 75 hammer_start_transaction(&trans, ip->hmp); 76 77 switch(com) { 78 case HAMMERIOC_PRUNE: 79 if (error == 0) { 80 error = hammer_ioc_prune(&trans, ip, 81 (struct hammer_ioc_prune *)data); 82 } 83 break; 84 case HAMMERIOC_GETHISTORY: 85 error = hammer_ioc_gethistory(&trans, ip, 86 (struct hammer_ioc_history *)data); 87 break; 88 case HAMMERIOC_REBLOCK: 89 if (error == 0) { 90 error = hammer_ioc_reblock(&trans, ip, 91 (struct hammer_ioc_reblock *)data); 92 } 93 break; 94 case HAMMERIOC_REBALANCE: 95 /* 96 * Rebalancing needs to lock a lot of B-Tree nodes. The 97 * children and children's children. Systems with very 98 * little memory will not be able to do it. 99 */ 100 if (error == 0 && nbuf < HAMMER_REBALANCE_MIN_BUFS) { 101 kprintf("hammer: System has insufficient buffers " 102 "to rebalance the tree. nbuf < %d\n", 103 HAMMER_REBALANCE_MIN_BUFS); 104 error = ENOSPC; 105 } 106 if (error == 0) { 107 error = hammer_ioc_rebalance(&trans, ip, 108 (struct hammer_ioc_rebalance *)data); 109 } 110 break; 111 case HAMMERIOC_SYNCTID: 112 error = hammer_ioc_synctid(&trans, ip, 113 (struct hammer_ioc_synctid *)data); 114 break; 115 case HAMMERIOC_GET_PSEUDOFS: 116 error = hammer_ioc_get_pseudofs(&trans, ip, 117 (struct hammer_ioc_pseudofs_rw *)data); 118 break; 119 case HAMMERIOC_SET_PSEUDOFS: 120 if (error == 0) { 121 error = hammer_ioc_set_pseudofs(&trans, ip, cred, 122 (struct hammer_ioc_pseudofs_rw *)data); 123 } 124 break; 125 case HAMMERIOC_UPG_PSEUDOFS: 126 if (error == 0) { 127 error = hammer_ioc_upgrade_pseudofs(&trans, ip, 128 (struct hammer_ioc_pseudofs_rw *)data); 129 } 130 break; 131 case HAMMERIOC_DGD_PSEUDOFS: 132 if (error == 0) { 133 error = hammer_ioc_downgrade_pseudofs(&trans, ip, 134 (struct hammer_ioc_pseudofs_rw *)data); 135 } 136 break; 137 case HAMMERIOC_RMR_PSEUDOFS: 138 if (error == 0) { 139 error = hammer_ioc_destroy_pseudofs(&trans, ip, 140 (struct hammer_ioc_pseudofs_rw *)data); 141 } 142 break; 143 case HAMMERIOC_WAI_PSEUDOFS: 144 if (error == 0) { 145 error = hammer_ioc_wait_pseudofs(&trans, ip, 146 (struct hammer_ioc_pseudofs_rw *)data); 147 } 148 break; 149 case HAMMERIOC_MIRROR_READ: 150 if (error == 0) { 151 error = hammer_ioc_mirror_read(&trans, ip, 152 (struct hammer_ioc_mirror_rw *)data); 153 } 154 break; 155 case HAMMERIOC_MIRROR_WRITE: 156 if (error == 0) { 157 error = hammer_ioc_mirror_write(&trans, ip, 158 (struct hammer_ioc_mirror_rw *)data); 159 } 160 break; 161 case HAMMERIOC_GET_VERSION: 162 error = hammer_ioc_get_version(&trans, ip, 163 (struct hammer_ioc_version *)data); 164 break; 165 case HAMMERIOC_GET_INFO: 166 error = hammer_ioc_get_info(&trans, 167 (struct hammer_ioc_info *)data); 168 break; 169 case HAMMERIOC_SET_VERSION: 170 if (error == 0) { 171 error = hammer_ioc_set_version(&trans, ip, 172 (struct hammer_ioc_version *)data); 173 } 174 break; 175 case HAMMERIOC_ADD_VOLUME: 176 if (error == 0) { 177 error = priv_check_cred(cred, PRIV_HAMMER_VOLUME, 0); 178 if (error == 0) 179 error = hammer_ioc_volume_add(&trans, ip, 180 (struct hammer_ioc_volume *)data); 181 } 182 break; 183 case HAMMERIOC_DEL_VOLUME: 184 if (error == 0) { 185 error = priv_check_cred(cred, PRIV_HAMMER_VOLUME, 0); 186 if (error == 0) 187 error = hammer_ioc_volume_del(&trans, ip, 188 (struct hammer_ioc_volume *)data); 189 } 190 break; 191 case HAMMERIOC_LIST_VOLUMES: 192 error = hammer_ioc_volume_list(&trans, ip, 193 (struct hammer_ioc_volume_list *)data); 194 break; 195 case HAMMERIOC_ADD_SNAPSHOT: 196 if (error == 0) { 197 error = hammer_ioc_add_snapshot( 198 &trans, ip, (struct hammer_ioc_snapshot *)data); 199 } 200 break; 201 case HAMMERIOC_DEL_SNAPSHOT: 202 if (error == 0) { 203 error = hammer_ioc_del_snapshot( 204 &trans, ip, (struct hammer_ioc_snapshot *)data); 205 } 206 break; 207 case HAMMERIOC_GET_SNAPSHOT: 208 error = hammer_ioc_get_snapshot( 209 &trans, ip, (struct hammer_ioc_snapshot *)data); 210 break; 211 case HAMMERIOC_GET_CONFIG: 212 error = hammer_ioc_get_config( 213 &trans, ip, (struct hammer_ioc_config *)data); 214 break; 215 case HAMMERIOC_SET_CONFIG: 216 if (error == 0) { 217 error = hammer_ioc_set_config( 218 &trans, ip, (struct hammer_ioc_config *)data); 219 } 220 break; 221 case HAMMERIOC_DEDUP: 222 if (error == 0) { 223 error = hammer_ioc_dedup( 224 &trans, ip, (struct hammer_ioc_dedup *)data); 225 } 226 break; 227 case HAMMERIOC_GET_DATA: 228 if (error == 0) { 229 error = hammer_ioc_get_data( 230 &trans, ip, (struct hammer_ioc_data *)data); 231 } 232 break; 233 case HAMMERIOC_PFS_ITERATE: 234 if (error == 0) { 235 error = hammer_ioc_pfs_iterate( 236 &trans, (struct hammer_ioc_pfs_iterate *)data); 237 } 238 break; 239 default: 240 error = EOPNOTSUPP; 241 break; 242 } 243 hammer_done_transaction(&trans); 244 return (error); 245 } 246 247 /* 248 * Iterate through an object's inode or an object's records and record 249 * modification TIDs. 250 */ 251 static void add_history(hammer_inode_t ip, struct hammer_ioc_history *hist, 252 hammer_btree_elm_t elm); 253 254 static 255 int 256 hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip, 257 struct hammer_ioc_history *hist) 258 { 259 struct hammer_cursor cursor; 260 hammer_btree_elm_t elm; 261 int error; 262 263 /* 264 * Validate the structure and initialize for return. 265 */ 266 if (hist->beg_tid > hist->end_tid) 267 return(EINVAL); 268 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) { 269 if (hist->key > hist->nxt_key) 270 return(EINVAL); 271 } 272 273 hist->obj_id = ip->obj_id; 274 hist->count = 0; 275 hist->nxt_tid = hist->end_tid; 276 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_TID; 277 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_KEY; 278 hist->head.flags &= ~HAMMER_IOC_HISTORY_EOF; 279 hist->head.flags &= ~HAMMER_IOC_HISTORY_UNSYNCED; 280 if ((ip->flags & HAMMER_INODE_MODMASK) & 281 ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) { 282 hist->head.flags |= HAMMER_IOC_HISTORY_UNSYNCED; 283 } 284 285 /* 286 * Setup the cursor. We can't handle undeletable records 287 * (create_tid of 0) at the moment. A create_tid of 0 has 288 * a special meaning and cannot be specified in the cursor. 289 */ 290 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 291 if (error) { 292 hammer_done_cursor(&cursor); 293 return(error); 294 } 295 296 cursor.key_beg.obj_id = hist->obj_id; 297 cursor.key_beg.create_tid = hist->beg_tid; 298 cursor.key_beg.delete_tid = 0; 299 cursor.key_beg.obj_type = 0; 300 if (cursor.key_beg.create_tid == HAMMER_MIN_TID) 301 cursor.key_beg.create_tid = 1; 302 303 cursor.key_end.obj_id = hist->obj_id; 304 cursor.key_end.create_tid = hist->end_tid; 305 cursor.key_end.delete_tid = 0; 306 cursor.key_end.obj_type = 0; 307 308 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE; 309 310 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) { 311 /* 312 * key-range within the file. For a regular file the 313 * on-disk key represents BASE+LEN, not BASE, so the 314 * first possible record containing the offset 'key' 315 * has an on-disk key of (key + 1). 316 */ 317 cursor.key_beg.key = hist->key; 318 cursor.key_end.key = HAMMER_MAX_KEY; 319 cursor.key_beg.localization = ip->obj_localization + 320 HAMMER_LOCALIZE_MISC; 321 cursor.key_end.localization = ip->obj_localization + 322 HAMMER_LOCALIZE_MISC; 323 324 switch(ip->ino_data.obj_type) { 325 case HAMMER_OBJTYPE_REGFILE: 326 ++cursor.key_beg.key; 327 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA; 328 break; 329 case HAMMER_OBJTYPE_DIRECTORY: 330 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY; 331 cursor.key_beg.localization = ip->obj_localization + 332 hammer_dir_localization(ip); 333 cursor.key_end.localization = ip->obj_localization + 334 hammer_dir_localization(ip); 335 break; 336 case HAMMER_OBJTYPE_DBFILE: 337 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB; 338 break; 339 default: 340 error = EINVAL; 341 break; 342 } 343 cursor.key_end.rec_type = cursor.key_beg.rec_type; 344 } else { 345 /* 346 * The inode itself. 347 */ 348 cursor.key_beg.key = 0; 349 cursor.key_end.key = 0; 350 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE; 351 cursor.key_end.rec_type = HAMMER_RECTYPE_INODE; 352 cursor.key_beg.localization = ip->obj_localization + 353 HAMMER_LOCALIZE_INODE; 354 cursor.key_end.localization = ip->obj_localization + 355 HAMMER_LOCALIZE_INODE; 356 } 357 358 error = hammer_btree_first(&cursor); 359 while (error == 0) { 360 elm = &cursor.node->ondisk->elms[cursor.index]; 361 362 add_history(ip, hist, elm); 363 if (hist->head.flags & (HAMMER_IOC_HISTORY_NEXT_TID | 364 HAMMER_IOC_HISTORY_NEXT_KEY | 365 HAMMER_IOC_HISTORY_EOF)) { 366 break; 367 } 368 error = hammer_btree_iterate(&cursor); 369 } 370 if (error == ENOENT) { 371 hist->head.flags |= HAMMER_IOC_HISTORY_EOF; 372 error = 0; 373 } 374 hammer_done_cursor(&cursor); 375 return(error); 376 } 377 378 /* 379 * Add the scanned element to the ioctl return structure. Some special 380 * casing is required for regular files to accomodate how data ranges are 381 * stored on-disk. 382 */ 383 static void 384 add_history(hammer_inode_t ip, struct hammer_ioc_history *hist, 385 hammer_btree_elm_t elm) 386 { 387 int i; 388 389 if (elm->base.btype != HAMMER_BTREE_TYPE_RECORD) 390 return; 391 if ((hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) && 392 ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE) { 393 /* 394 * Adjust nxt_key 395 */ 396 if (hist->nxt_key > elm->leaf.base.key - elm->leaf.data_len && 397 hist->key < elm->leaf.base.key - elm->leaf.data_len) { 398 hist->nxt_key = elm->leaf.base.key - elm->leaf.data_len; 399 } 400 if (hist->nxt_key > elm->leaf.base.key) 401 hist->nxt_key = elm->leaf.base.key; 402 403 /* 404 * Record is beyond MAXPHYS, there won't be any more records 405 * in the iteration covering the requested offset (key). 406 */ 407 if (elm->leaf.base.key >= MAXPHYS && 408 elm->leaf.base.key - MAXPHYS > hist->key) { 409 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY; 410 } 411 412 /* 413 * Data-range of record does not cover the key. 414 */ 415 if (elm->leaf.base.key - elm->leaf.data_len > hist->key) 416 return; 417 418 } else if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) { 419 /* 420 * Adjust nxt_key 421 */ 422 if (hist->nxt_key > elm->leaf.base.key && 423 hist->key < elm->leaf.base.key) { 424 hist->nxt_key = elm->leaf.base.key; 425 } 426 427 /* 428 * Record is beyond the requested key. 429 */ 430 if (elm->leaf.base.key > hist->key) 431 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY; 432 } 433 434 /* 435 * Add create_tid if it is in-bounds. 436 */ 437 i = hist->count; 438 if ((i == 0 || 439 elm->leaf.base.create_tid != hist->hist_ary[i - 1].tid) && 440 elm->leaf.base.create_tid >= hist->beg_tid && 441 elm->leaf.base.create_tid < hist->end_tid) { 442 if (hist->count == HAMMER_MAX_HISTORY_ELMS) { 443 hist->nxt_tid = elm->leaf.base.create_tid; 444 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID; 445 return; 446 } 447 hist->hist_ary[i].tid = elm->leaf.base.create_tid; 448 hist->hist_ary[i].time32 = elm->leaf.create_ts; 449 ++hist->count; 450 } 451 452 /* 453 * Add delete_tid if it is in-bounds. Note that different portions 454 * of the history may have overlapping data ranges with different 455 * delete_tid's. If this case occurs the delete_tid may match the 456 * create_tid of a following record. XXX 457 * 458 * [ ] 459 * [ ] 460 */ 461 i = hist->count; 462 if (elm->leaf.base.delete_tid && 463 elm->leaf.base.delete_tid >= hist->beg_tid && 464 elm->leaf.base.delete_tid < hist->end_tid) { 465 if (i == HAMMER_MAX_HISTORY_ELMS) { 466 hist->nxt_tid = elm->leaf.base.delete_tid; 467 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID; 468 return; 469 } 470 hist->hist_ary[i].tid = elm->leaf.base.delete_tid; 471 hist->hist_ary[i].time32 = elm->leaf.delete_ts; 472 ++hist->count; 473 } 474 } 475 476 /* 477 * Acquire synchronization TID 478 */ 479 static 480 int 481 hammer_ioc_synctid(hammer_transaction_t trans, hammer_inode_t ip, 482 struct hammer_ioc_synctid *std) 483 { 484 hammer_mount_t hmp = ip->hmp; 485 int error = 0; 486 487 switch(std->op) { 488 case HAMMER_SYNCTID_NONE: 489 std->tid = hmp->flusher.tid; /* inaccurate */ 490 break; 491 case HAMMER_SYNCTID_ASYNC: 492 hammer_queue_inodes_flusher(hmp, MNT_NOWAIT); 493 hammer_flusher_async(hmp, NULL); 494 std->tid = hmp->flusher.tid; /* inaccurate */ 495 break; 496 case HAMMER_SYNCTID_SYNC1: 497 hammer_queue_inodes_flusher(hmp, MNT_WAIT); 498 hammer_flusher_sync(hmp); 499 std->tid = hmp->flusher.tid; 500 break; 501 case HAMMER_SYNCTID_SYNC2: 502 hammer_queue_inodes_flusher(hmp, MNT_WAIT); 503 hammer_flusher_sync(hmp); 504 std->tid = hmp->flusher.tid; 505 hammer_flusher_sync(hmp); 506 break; 507 default: 508 error = EOPNOTSUPP; 509 break; 510 } 511 return(error); 512 } 513 514 /* 515 * Retrieve version info. 516 * 517 * Load min_version, wip_version, and max_versino. If cur_version is passed 518 * as 0 then load the current version into cur_version. Load the description 519 * for cur_version into the description array. 520 * 521 * Returns 0 on success, EINVAL if cur_version is non-zero and set to an 522 * unsupported value. 523 */ 524 static 525 int 526 hammer_ioc_get_version(hammer_transaction_t trans, hammer_inode_t ip, 527 struct hammer_ioc_version *ver) 528 { 529 int error = 0; 530 531 ver->min_version = HAMMER_VOL_VERSION_MIN; 532 ver->wip_version = HAMMER_VOL_VERSION_WIP; 533 ver->max_version = HAMMER_VOL_VERSION_MAX; 534 if (ver->cur_version == 0) 535 ver->cur_version = trans->hmp->version; 536 switch(ver->cur_version) { 537 case 1: 538 ksnprintf(ver->description, sizeof(ver->description), 539 "First HAMMER release (DragonFly 2.0+)"); 540 break; 541 case 2: 542 ksnprintf(ver->description, sizeof(ver->description), 543 "New directory entry layout (DragonFly 2.3+)"); 544 break; 545 case 3: 546 ksnprintf(ver->description, sizeof(ver->description), 547 "New snapshot management (DragonFly 2.5+)"); 548 break; 549 case 4: 550 ksnprintf(ver->description, sizeof(ver->description), 551 "New undo/flush, faster flush/sync (DragonFly 2.5+)"); 552 break; 553 case 5: 554 ksnprintf(ver->description, sizeof(ver->description), 555 "Adjustments for dedup support (DragonFly 2.9+)"); 556 break; 557 case 6: 558 ksnprintf(ver->description, sizeof(ver->description), 559 "Directory Hash ALG1 (tmp/rename resistance)"); 560 break; 561 default: 562 ksnprintf(ver->description, sizeof(ver->description), 563 "Unknown"); 564 error = EINVAL; 565 break; 566 } 567 return(error); 568 }; 569 570 /* 571 * Set version info 572 */ 573 static 574 int 575 hammer_ioc_set_version(hammer_transaction_t trans, hammer_inode_t ip, 576 struct hammer_ioc_version *ver) 577 { 578 hammer_mount_t hmp = trans->hmp; 579 struct hammer_cursor cursor; 580 hammer_volume_t volume; 581 int error; 582 int over = hmp->version; 583 584 /* 585 * Generally do not allow downgrades. However, version 4 can 586 * be downgraded to version 3. 587 */ 588 if (ver->cur_version < hmp->version) { 589 if (!(ver->cur_version == 3 && hmp->version == 4)) 590 return(EINVAL); 591 } 592 if (ver->cur_version == hmp->version) 593 return(0); 594 if (ver->cur_version > HAMMER_VOL_VERSION_MAX) 595 return(EINVAL); 596 if (hmp->ronly) 597 return(EROFS); 598 599 /* 600 * Update the root volume header and the version cached in 601 * the hammer_mount structure. 602 */ 603 error = hammer_init_cursor(trans, &cursor, NULL, NULL); 604 if (error) 605 goto failed; 606 hammer_lock_ex(&hmp->flusher.finalize_lock); 607 hammer_sync_lock_ex(trans); 608 hmp->version = ver->cur_version; 609 610 /* 611 * If upgrading from version < 4 to version >= 4 the UNDO FIFO 612 * must be reinitialized. 613 */ 614 if (over < HAMMER_VOL_VERSION_FOUR && 615 ver->cur_version >= HAMMER_VOL_VERSION_FOUR) { 616 kprintf("upgrade undo to version 4\n"); 617 error = hammer_upgrade_undo_4(trans); 618 if (error) 619 goto failed; 620 } 621 622 /* 623 * Adjust the version in the volume header 624 */ 625 volume = hammer_get_root_volume(hmp, &error); 626 KKASSERT(error == 0); 627 hammer_modify_volume_field(cursor.trans, volume, vol_version); 628 volume->ondisk->vol_version = ver->cur_version; 629 hammer_modify_volume_done(volume); 630 hammer_rel_volume(volume, 0); 631 632 hammer_sync_unlock(trans); 633 hammer_unlock(&hmp->flusher.finalize_lock); 634 failed: 635 ver->head.error = error; 636 hammer_done_cursor(&cursor); 637 return(0); 638 } 639 640 /* 641 * Get information 642 */ 643 static 644 int 645 hammer_ioc_get_info(hammer_transaction_t trans, struct hammer_ioc_info *info) { 646 647 struct hammer_volume_ondisk *od = trans->hmp->rootvol->ondisk; 648 struct hammer_mount *hm = trans->hmp; 649 650 /* Fill the structure with the necessary information */ 651 _hammer_checkspace(hm, HAMMER_CHKSPC_WRITE, &info->rsvbigblocks); 652 info->rsvbigblocks = info->rsvbigblocks >> HAMMER_LARGEBLOCK_BITS; 653 strlcpy(info->vol_name, od->vol_name, sizeof(od->vol_name)); 654 655 info->vol_fsid = hm->fsid; 656 info->vol_fstype = od->vol_fstype; 657 info->version = hm->version; 658 659 info->inodes = od->vol0_stat_inodes; 660 info->bigblocks = od->vol0_stat_bigblocks; 661 info->freebigblocks = od->vol0_stat_freebigblocks; 662 info->nvolumes = hm->nvolumes; 663 664 return 0; 665 } 666 667 /* 668 * Add a snapshot transction id(s) to the list of snapshots. 669 * 670 * NOTE: Records are created with an allocated TID. If a flush cycle 671 * is in progress the record may be synced in the current flush 672 * cycle and the volume header will reflect the allocation of the 673 * TID, but the synchronization point may not catch up to the 674 * TID until the next flush cycle. 675 */ 676 static 677 int 678 hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 679 struct hammer_ioc_snapshot *snap) 680 { 681 hammer_mount_t hmp = ip->hmp; 682 struct hammer_btree_leaf_elm leaf; 683 struct hammer_cursor cursor; 684 int error; 685 686 /* 687 * Validate structure 688 */ 689 if (snap->count > HAMMER_SNAPS_PER_IOCTL) 690 return (EINVAL); 691 if (snap->index > snap->count) 692 return (EINVAL); 693 694 hammer_lock_ex(&hmp->snapshot_lock); 695 again: 696 /* 697 * Look for keys starting after the previous iteration, or at 698 * the beginning if snap->count is 0. 699 */ 700 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 701 if (error) { 702 hammer_done_cursor(&cursor); 703 return(error); 704 } 705 706 cursor.asof = HAMMER_MAX_TID; 707 cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF; 708 709 bzero(&leaf, sizeof(leaf)); 710 leaf.base.obj_id = HAMMER_OBJID_ROOT; 711 leaf.base.rec_type = HAMMER_RECTYPE_SNAPSHOT; 712 leaf.base.create_tid = hammer_alloc_tid(hmp, 1); 713 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 714 leaf.base.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 715 leaf.data_len = sizeof(struct hammer_snapshot_data); 716 717 while (snap->index < snap->count) { 718 leaf.base.key = (int64_t)snap->snaps[snap->index].tid; 719 cursor.key_beg = leaf.base; 720 error = hammer_btree_lookup(&cursor); 721 if (error == 0) { 722 error = EEXIST; 723 break; 724 } 725 726 /* 727 * NOTE: Must reload key_beg after an ASOF search because 728 * the create_tid may have been modified during the 729 * search. 730 */ 731 cursor.flags &= ~HAMMER_CURSOR_ASOF; 732 cursor.key_beg = leaf.base; 733 error = hammer_create_at_cursor(&cursor, &leaf, 734 &snap->snaps[snap->index], 735 HAMMER_CREATE_MODE_SYS); 736 if (error == EDEADLK) { 737 hammer_done_cursor(&cursor); 738 goto again; 739 } 740 cursor.flags |= HAMMER_CURSOR_ASOF; 741 if (error) 742 break; 743 ++snap->index; 744 } 745 snap->head.error = error; 746 hammer_done_cursor(&cursor); 747 hammer_unlock(&hmp->snapshot_lock); 748 return(0); 749 } 750 751 /* 752 * Delete snapshot transaction id(s) from the list of snapshots. 753 */ 754 static 755 int 756 hammer_ioc_del_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 757 struct hammer_ioc_snapshot *snap) 758 { 759 hammer_mount_t hmp = ip->hmp; 760 struct hammer_cursor cursor; 761 int error; 762 763 /* 764 * Validate structure 765 */ 766 if (snap->count > HAMMER_SNAPS_PER_IOCTL) 767 return (EINVAL); 768 if (snap->index > snap->count) 769 return (EINVAL); 770 771 hammer_lock_ex(&hmp->snapshot_lock); 772 again: 773 /* 774 * Look for keys starting after the previous iteration, or at 775 * the beginning if snap->count is 0. 776 */ 777 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 778 if (error) { 779 hammer_done_cursor(&cursor); 780 return(error); 781 } 782 783 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 784 cursor.key_beg.create_tid = 0; 785 cursor.key_beg.delete_tid = 0; 786 cursor.key_beg.obj_type = 0; 787 cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT; 788 cursor.key_beg.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 789 cursor.asof = HAMMER_MAX_TID; 790 cursor.flags |= HAMMER_CURSOR_ASOF; 791 792 while (snap->index < snap->count) { 793 cursor.key_beg.key = (int64_t)snap->snaps[snap->index].tid; 794 error = hammer_btree_lookup(&cursor); 795 if (error) 796 break; 797 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF); 798 if (error) 799 break; 800 error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY, 801 0, 0, 0, NULL); 802 if (error == EDEADLK) { 803 hammer_done_cursor(&cursor); 804 goto again; 805 } 806 if (error) 807 break; 808 ++snap->index; 809 } 810 snap->head.error = error; 811 hammer_done_cursor(&cursor); 812 hammer_unlock(&hmp->snapshot_lock); 813 return(0); 814 } 815 816 /* 817 * Retrieve as many snapshot ids as possible or until the array is 818 * full, starting after the last transction id passed in. If count 819 * is 0 we retrieve starting at the beginning. 820 * 821 * NOTE: Because the b-tree key field is signed but transaction ids 822 * are unsigned the returned list will be signed-sorted instead 823 * of unsigned sorted. The Caller must still sort the aggregate 824 * results. 825 */ 826 static 827 int 828 hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 829 struct hammer_ioc_snapshot *snap) 830 { 831 struct hammer_cursor cursor; 832 int error; 833 834 /* 835 * Validate structure 836 */ 837 if (snap->index != 0) 838 return (EINVAL); 839 if (snap->count > HAMMER_SNAPS_PER_IOCTL) 840 return (EINVAL); 841 842 /* 843 * Look for keys starting after the previous iteration, or at 844 * the beginning if snap->count is 0. 845 */ 846 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 847 if (error) { 848 hammer_done_cursor(&cursor); 849 return(error); 850 } 851 852 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 853 cursor.key_beg.create_tid = 0; 854 cursor.key_beg.delete_tid = 0; 855 cursor.key_beg.obj_type = 0; 856 cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT; 857 cursor.key_beg.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 858 if (snap->count == 0) 859 cursor.key_beg.key = HAMMER_MIN_KEY; 860 else 861 cursor.key_beg.key = (int64_t)snap->snaps[snap->count - 1].tid + 1; 862 863 cursor.key_end = cursor.key_beg; 864 cursor.key_end.key = HAMMER_MAX_KEY; 865 cursor.asof = HAMMER_MAX_TID; 866 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE | HAMMER_CURSOR_ASOF; 867 868 snap->count = 0; 869 870 error = hammer_btree_first(&cursor); 871 while (error == 0 && snap->count < HAMMER_SNAPS_PER_IOCTL) { 872 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF); 873 if (error) 874 break; 875 if (cursor.leaf->base.rec_type == HAMMER_RECTYPE_SNAPSHOT) { 876 error = hammer_btree_extract( 877 &cursor, HAMMER_CURSOR_GET_LEAF | 878 HAMMER_CURSOR_GET_DATA); 879 snap->snaps[snap->count] = cursor.data->snap; 880 881 /* 882 * The snap data tid should match the key but might 883 * not due to a bug in the HAMMER v3 conversion code. 884 * 885 * This error will work itself out over time but we 886 * have to force a match or the snapshot will not 887 * be deletable. 888 */ 889 if (cursor.data->snap.tid != 890 (hammer_tid_t)cursor.leaf->base.key) { 891 kprintf("HAMMER: lo=%08x snapshot key " 892 "0x%016jx data mismatch 0x%016jx\n", 893 cursor.key_beg.localization, 894 (uintmax_t)cursor.data->snap.tid, 895 cursor.leaf->base.key); 896 kprintf("HAMMER: Probably left over from the " 897 "original v3 conversion, hammer " 898 "cleanup should get it eventually\n"); 899 snap->snaps[snap->count].tid = 900 cursor.leaf->base.key; 901 } 902 ++snap->count; 903 } 904 error = hammer_btree_iterate(&cursor); 905 } 906 907 if (error == ENOENT) { 908 snap->head.flags |= HAMMER_IOC_SNAPSHOT_EOF; 909 error = 0; 910 } 911 snap->head.error = error; 912 hammer_done_cursor(&cursor); 913 return(0); 914 } 915 916 /* 917 * Retrieve the PFS hammer cleanup utility config record. This is 918 * different (newer than) the PFS config. 919 */ 920 static 921 int 922 hammer_ioc_get_config(hammer_transaction_t trans, hammer_inode_t ip, 923 struct hammer_ioc_config *config) 924 { 925 struct hammer_cursor cursor; 926 int error; 927 928 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 929 if (error) { 930 hammer_done_cursor(&cursor); 931 return(error); 932 } 933 934 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 935 cursor.key_beg.create_tid = 0; 936 cursor.key_beg.delete_tid = 0; 937 cursor.key_beg.obj_type = 0; 938 cursor.key_beg.rec_type = HAMMER_RECTYPE_CONFIG; 939 cursor.key_beg.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 940 cursor.key_beg.key = 0; /* config space page 0 */ 941 942 cursor.asof = HAMMER_MAX_TID; 943 cursor.flags |= HAMMER_CURSOR_ASOF; 944 945 error = hammer_btree_lookup(&cursor); 946 if (error == 0) { 947 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF | 948 HAMMER_CURSOR_GET_DATA); 949 if (error == 0) 950 config->config = cursor.data->config; 951 } 952 /* error can be ENOENT */ 953 config->head.error = error; 954 hammer_done_cursor(&cursor); 955 return(0); 956 } 957 958 /* 959 * Retrieve the PFS hammer cleanup utility config record. This is 960 * different (newer than) the PFS config. 961 * 962 * This is kinda a hack. 963 */ 964 static 965 int 966 hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip, 967 struct hammer_ioc_config *config) 968 { 969 struct hammer_btree_leaf_elm leaf; 970 struct hammer_cursor cursor; 971 hammer_mount_t hmp = ip->hmp; 972 int error; 973 974 again: 975 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 976 if (error) { 977 hammer_done_cursor(&cursor); 978 return(error); 979 } 980 981 bzero(&leaf, sizeof(leaf)); 982 leaf.base.obj_id = HAMMER_OBJID_ROOT; 983 leaf.base.rec_type = HAMMER_RECTYPE_CONFIG; 984 leaf.base.create_tid = hammer_alloc_tid(hmp, 1); 985 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 986 leaf.base.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 987 leaf.base.key = 0; /* page 0 */ 988 leaf.data_len = sizeof(struct hammer_config_data); 989 990 cursor.key_beg = leaf.base; 991 992 cursor.asof = HAMMER_MAX_TID; 993 cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF; 994 995 error = hammer_btree_lookup(&cursor); 996 if (error == 0) { 997 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF | 998 HAMMER_CURSOR_GET_DATA); 999 error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY, 1000 0, 0, 0, NULL); 1001 if (error == EDEADLK) { 1002 hammer_done_cursor(&cursor); 1003 goto again; 1004 } 1005 } 1006 if (error == ENOENT) 1007 error = 0; 1008 if (error == 0) { 1009 /* 1010 * NOTE: Must reload key_beg after an ASOF search because 1011 * the create_tid may have been modified during the 1012 * search. 1013 */ 1014 cursor.flags &= ~HAMMER_CURSOR_ASOF; 1015 cursor.key_beg = leaf.base; 1016 error = hammer_create_at_cursor(&cursor, &leaf, 1017 &config->config, 1018 HAMMER_CREATE_MODE_SYS); 1019 if (error == EDEADLK) { 1020 hammer_done_cursor(&cursor); 1021 goto again; 1022 } 1023 } 1024 config->head.error = error; 1025 hammer_done_cursor(&cursor); 1026 return(0); 1027 } 1028 1029 static 1030 int 1031 hammer_ioc_pfs_iterate(hammer_transaction_t trans, 1032 struct hammer_ioc_pfs_iterate *pi) 1033 { 1034 struct hammer_cursor cursor; 1035 hammer_inode_t ip; 1036 int error; 1037 1038 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 1039 HAMMER_DEF_LOCALIZATION, 0, &error); 1040 1041 error = hammer_init_cursor(trans, &cursor, 1042 (ip ? &ip->cache[1] : NULL), ip); 1043 if (error) 1044 goto out; 1045 1046 pi->head.flags &= ~HAMMER_PFSD_DELETED; 1047 1048 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION + 1049 HAMMER_LOCALIZE_MISC; 1050 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 1051 cursor.key_beg.create_tid = 0; 1052 cursor.key_beg.delete_tid = 0; 1053 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 1054 cursor.key_beg.obj_type = 0; 1055 cursor.key_end = cursor.key_beg; 1056 cursor.key_end.key = HAMMER_MAX_KEY; 1057 cursor.asof = HAMMER_MAX_TID; 1058 cursor.flags |= HAMMER_CURSOR_ASOF; 1059 1060 if (pi->pos < 0) /* Sanity check */ 1061 pi->pos = 0; 1062 1063 pi->pos <<= 16; 1064 cursor.key_beg.key = pi->pos; 1065 error = hammer_ip_lookup(&cursor); 1066 1067 if (error == 0) { 1068 error = hammer_ip_resolve_data(&cursor); 1069 if (error) 1070 goto out; 1071 if (cursor.data->pfsd.mirror_flags & HAMMER_PFSD_DELETED) 1072 pi->head.flags |= HAMMER_PFSD_DELETED; 1073 else 1074 copyout(cursor.data, pi->ondisk, cursor.leaf->data_len); 1075 pi->pos = (u_int32_t)(cursor.leaf->base.key >> 16); 1076 } 1077 1078 out: 1079 hammer_done_cursor(&cursor); 1080 if (ip) 1081 hammer_rel_inode(ip, 0); 1082 1083 return (error); 1084 } 1085 1086 static 1087 int 1088 hammer_ioc_get_data(hammer_transaction_t trans, hammer_inode_t ip, 1089 struct hammer_ioc_data *data) 1090 { 1091 struct hammer_cursor cursor; 1092 int bytes; 1093 int error; 1094 1095 /* XXX cached inode ? */ 1096 error = hammer_init_cursor(trans, &cursor, NULL, NULL); 1097 if (error) 1098 goto failed; 1099 1100 cursor.key_beg = data->elm; 1101 cursor.flags |= HAMMER_CURSOR_BACKEND; 1102 1103 error = hammer_btree_lookup(&cursor); 1104 if (error == 0) { 1105 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF | 1106 HAMMER_CURSOR_GET_DATA); 1107 if (error == 0) { 1108 data->leaf = *cursor.leaf; 1109 bytes = cursor.leaf->data_len; 1110 if (bytes > data->size) 1111 bytes = data->size; 1112 error = copyout(cursor.data, data->ubuf, bytes); 1113 } 1114 } 1115 1116 failed: 1117 hammer_done_cursor(&cursor); 1118 return (error); 1119 } 1120