1 /* 2 * Copyright (c) 2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_ioctl.c,v 1.32 2008/11/13 02:23:29 dillon Exp $ 35 */ 36 37 #include "hammer.h" 38 39 static int hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip, 40 struct hammer_ioc_history *hist); 41 static int hammer_ioc_synctid(hammer_transaction_t trans, hammer_inode_t ip, 42 struct hammer_ioc_synctid *std); 43 static int hammer_ioc_get_version(hammer_transaction_t trans, 44 hammer_inode_t ip, 45 struct hammer_ioc_version *ver); 46 static int hammer_ioc_set_version(hammer_transaction_t trans, 47 hammer_inode_t ip, 48 struct hammer_ioc_version *ver); 49 static int hammer_ioc_get_info(hammer_transaction_t trans, 50 struct hammer_ioc_info *info); 51 static int hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 52 struct hammer_ioc_snapshot *snap); 53 static int hammer_ioc_del_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 54 struct hammer_ioc_snapshot *snap); 55 static int hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 56 struct hammer_ioc_snapshot *snap); 57 static int hammer_ioc_get_config(hammer_transaction_t trans, hammer_inode_t ip, 58 struct hammer_ioc_config *snap); 59 static int hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip, 60 struct hammer_ioc_config *snap); 61 static int hammer_ioc_get_data(hammer_transaction_t trans, hammer_inode_t ip, 62 struct hammer_ioc_data *data); 63 64 int 65 hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag, 66 struct ucred *cred) 67 { 68 struct hammer_transaction trans; 69 int error; 70 71 error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0); 72 73 hammer_start_transaction(&trans, ip->hmp); 74 75 switch(com) { 76 case HAMMERIOC_PRUNE: 77 if (error == 0) { 78 error = hammer_ioc_prune(&trans, ip, 79 (struct hammer_ioc_prune *)data); 80 } 81 break; 82 case HAMMERIOC_GETHISTORY: 83 error = hammer_ioc_gethistory(&trans, ip, 84 (struct hammer_ioc_history *)data); 85 break; 86 case HAMMERIOC_REBLOCK: 87 if (error == 0) { 88 error = hammer_ioc_reblock(&trans, ip, 89 (struct hammer_ioc_reblock *)data); 90 } 91 break; 92 case HAMMERIOC_REBALANCE: 93 /* 94 * Rebalancing needs to lock a lot of B-Tree nodes. The 95 * children and children's children. Systems with very 96 * little memory will not be able to do it. 97 */ 98 if (error == 0 && nbuf < HAMMER_REBALANCE_MIN_BUFS) { 99 hkprintf("System has insufficient buffers " 100 "to rebalance the tree. nbuf < %d\n", 101 HAMMER_REBALANCE_MIN_BUFS); 102 error = ENOSPC; 103 } 104 if (error == 0) { 105 error = hammer_ioc_rebalance(&trans, ip, 106 (struct hammer_ioc_rebalance *)data); 107 } 108 break; 109 case HAMMERIOC_SYNCTID: 110 error = hammer_ioc_synctid(&trans, ip, 111 (struct hammer_ioc_synctid *)data); 112 break; 113 case HAMMERIOC_GET_PSEUDOFS: 114 error = hammer_ioc_get_pseudofs(&trans, ip, 115 (struct hammer_ioc_pseudofs_rw *)data); 116 break; 117 case HAMMERIOC_SET_PSEUDOFS: 118 if (error == 0) { 119 error = hammer_ioc_set_pseudofs(&trans, ip, cred, 120 (struct hammer_ioc_pseudofs_rw *)data); 121 } 122 break; 123 case HAMMERIOC_UPG_PSEUDOFS: 124 if (error == 0) { 125 error = hammer_ioc_upgrade_pseudofs(&trans, ip, 126 (struct hammer_ioc_pseudofs_rw *)data); 127 } 128 break; 129 case HAMMERIOC_DGD_PSEUDOFS: 130 if (error == 0) { 131 error = hammer_ioc_downgrade_pseudofs(&trans, ip, 132 (struct hammer_ioc_pseudofs_rw *)data); 133 } 134 break; 135 case HAMMERIOC_RMR_PSEUDOFS: 136 if (error == 0) { 137 error = hammer_ioc_destroy_pseudofs(&trans, ip, 138 (struct hammer_ioc_pseudofs_rw *)data); 139 } 140 break; 141 case HAMMERIOC_WAI_PSEUDOFS: 142 if (error == 0) { 143 error = hammer_ioc_wait_pseudofs(&trans, ip, 144 (struct hammer_ioc_pseudofs_rw *)data); 145 } 146 break; 147 case HAMMERIOC_MIRROR_READ: 148 if (error == 0) { 149 error = hammer_ioc_mirror_read(&trans, ip, 150 (struct hammer_ioc_mirror_rw *)data); 151 } 152 break; 153 case HAMMERIOC_MIRROR_WRITE: 154 if (error == 0) { 155 error = hammer_ioc_mirror_write(&trans, ip, 156 (struct hammer_ioc_mirror_rw *)data); 157 } 158 break; 159 case HAMMERIOC_GET_VERSION: 160 error = hammer_ioc_get_version(&trans, ip, 161 (struct hammer_ioc_version *)data); 162 break; 163 case HAMMERIOC_GET_INFO: 164 error = hammer_ioc_get_info(&trans, 165 (struct hammer_ioc_info *)data); 166 break; 167 case HAMMERIOC_SET_VERSION: 168 if (error == 0) { 169 error = hammer_ioc_set_version(&trans, ip, 170 (struct hammer_ioc_version *)data); 171 } 172 break; 173 case HAMMERIOC_ADD_VOLUME: 174 if (error == 0) { 175 error = priv_check_cred(cred, PRIV_HAMMER_VOLUME, 0); 176 if (error == 0) 177 error = hammer_ioc_volume_add(&trans, ip, 178 (struct hammer_ioc_volume *)data); 179 } 180 break; 181 case HAMMERIOC_DEL_VOLUME: 182 if (error == 0) { 183 error = priv_check_cred(cred, PRIV_HAMMER_VOLUME, 0); 184 if (error == 0) 185 error = hammer_ioc_volume_del(&trans, ip, 186 (struct hammer_ioc_volume *)data); 187 } 188 break; 189 case HAMMERIOC_LIST_VOLUMES: 190 error = hammer_ioc_volume_list(&trans, ip, 191 (struct hammer_ioc_volume_list *)data); 192 break; 193 case HAMMERIOC_ADD_SNAPSHOT: 194 if (error == 0) { 195 error = hammer_ioc_add_snapshot( 196 &trans, ip, (struct hammer_ioc_snapshot *)data); 197 } 198 break; 199 case HAMMERIOC_DEL_SNAPSHOT: 200 if (error == 0) { 201 error = hammer_ioc_del_snapshot( 202 &trans, ip, (struct hammer_ioc_snapshot *)data); 203 } 204 break; 205 case HAMMERIOC_GET_SNAPSHOT: 206 error = hammer_ioc_get_snapshot( 207 &trans, ip, (struct hammer_ioc_snapshot *)data); 208 break; 209 case HAMMERIOC_GET_CONFIG: 210 error = hammer_ioc_get_config( 211 &trans, ip, (struct hammer_ioc_config *)data); 212 break; 213 case HAMMERIOC_SET_CONFIG: 214 if (error == 0) { 215 error = hammer_ioc_set_config( 216 &trans, ip, (struct hammer_ioc_config *)data); 217 } 218 break; 219 case HAMMERIOC_DEDUP: 220 if (error == 0) { 221 error = hammer_ioc_dedup( 222 &trans, ip, (struct hammer_ioc_dedup *)data); 223 } 224 break; 225 case HAMMERIOC_GET_DATA: 226 if (error == 0) { 227 error = hammer_ioc_get_data( 228 &trans, ip, (struct hammer_ioc_data *)data); 229 } 230 break; 231 case HAMMERIOC_PFS_ITERATE: 232 error = hammer_ioc_iterate_pseudofs( 233 &trans, ip, (struct hammer_ioc_pfs_iterate *)data); 234 break; 235 default: 236 error = EOPNOTSUPP; 237 break; 238 } 239 hammer_done_transaction(&trans); 240 return (error); 241 } 242 243 /* 244 * Iterate through an object's inode or an object's records and record 245 * modification TIDs. 246 */ 247 static void add_history(hammer_inode_t ip, struct hammer_ioc_history *hist, 248 hammer_btree_elm_t elm); 249 250 static 251 int 252 hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip, 253 struct hammer_ioc_history *hist) 254 { 255 struct hammer_cursor cursor; 256 hammer_btree_elm_t elm; 257 int error; 258 259 /* 260 * Validate the structure and initialize for return. 261 */ 262 if (hist->beg_tid > hist->end_tid) 263 return(EINVAL); 264 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) { 265 if (hist->key > hist->nxt_key) 266 return(EINVAL); 267 } 268 269 hist->obj_id = ip->obj_id; 270 hist->count = 0; 271 hist->nxt_tid = hist->end_tid; 272 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_TID; 273 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_KEY; 274 hist->head.flags &= ~HAMMER_IOC_HISTORY_EOF; 275 hist->head.flags &= ~HAMMER_IOC_HISTORY_UNSYNCED; 276 if ((ip->flags & HAMMER_INODE_MODMASK) & 277 ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) { 278 hist->head.flags |= HAMMER_IOC_HISTORY_UNSYNCED; 279 } 280 281 /* 282 * Setup the cursor. We can't handle undeletable records 283 * (create_tid of 0) at the moment. A create_tid of 0 has 284 * a special meaning and cannot be specified in the cursor. 285 */ 286 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 287 if (error) { 288 hammer_done_cursor(&cursor); 289 return(error); 290 } 291 292 cursor.key_beg.obj_id = hist->obj_id; 293 cursor.key_beg.create_tid = hist->beg_tid; 294 cursor.key_beg.delete_tid = 0; 295 cursor.key_beg.obj_type = 0; 296 if (cursor.key_beg.create_tid == HAMMER_MIN_TID) 297 cursor.key_beg.create_tid = 1; 298 299 cursor.key_end.obj_id = hist->obj_id; 300 cursor.key_end.create_tid = hist->end_tid; 301 cursor.key_end.delete_tid = 0; 302 cursor.key_end.obj_type = 0; 303 304 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE; 305 306 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) { 307 /* 308 * key-range within the file. For a regular file the 309 * on-disk key represents BASE+LEN, not BASE, so the 310 * first possible record containing the offset 'key' 311 * has an on-disk key of (key + 1). 312 */ 313 cursor.key_beg.key = hist->key; 314 cursor.key_end.key = HAMMER_MAX_KEY; 315 cursor.key_beg.localization = ip->obj_localization | 316 HAMMER_LOCALIZE_MISC; 317 cursor.key_end.localization = ip->obj_localization | 318 HAMMER_LOCALIZE_MISC; 319 320 switch(ip->ino_data.obj_type) { 321 case HAMMER_OBJTYPE_REGFILE: 322 ++cursor.key_beg.key; 323 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA; 324 break; 325 case HAMMER_OBJTYPE_DIRECTORY: 326 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY; 327 cursor.key_beg.localization = ip->obj_localization | 328 hammer_dir_localization(ip); 329 cursor.key_end.localization = ip->obj_localization | 330 hammer_dir_localization(ip); 331 break; 332 case HAMMER_OBJTYPE_DBFILE: 333 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB; 334 break; 335 default: 336 error = EINVAL; 337 break; 338 } 339 cursor.key_end.rec_type = cursor.key_beg.rec_type; 340 } else { 341 /* 342 * The inode itself. 343 */ 344 cursor.key_beg.key = 0; 345 cursor.key_end.key = 0; 346 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE; 347 cursor.key_end.rec_type = HAMMER_RECTYPE_INODE; 348 cursor.key_beg.localization = ip->obj_localization | 349 HAMMER_LOCALIZE_INODE; 350 cursor.key_end.localization = ip->obj_localization | 351 HAMMER_LOCALIZE_INODE; 352 } 353 354 error = hammer_btree_first(&cursor); 355 while (error == 0) { 356 elm = &cursor.node->ondisk->elms[cursor.index]; 357 358 add_history(ip, hist, elm); 359 if (hist->head.flags & (HAMMER_IOC_HISTORY_NEXT_TID | 360 HAMMER_IOC_HISTORY_NEXT_KEY | 361 HAMMER_IOC_HISTORY_EOF)) { 362 break; 363 } 364 error = hammer_btree_iterate(&cursor); 365 } 366 if (error == ENOENT) { 367 hist->head.flags |= HAMMER_IOC_HISTORY_EOF; 368 error = 0; 369 } 370 hammer_done_cursor(&cursor); 371 return(error); 372 } 373 374 /* 375 * Add the scanned element to the ioctl return structure. Some special 376 * casing is required for regular files to accomodate how data ranges are 377 * stored on-disk. 378 */ 379 static void 380 add_history(hammer_inode_t ip, struct hammer_ioc_history *hist, 381 hammer_btree_elm_t elm) 382 { 383 int i; 384 385 if (elm->base.btype != HAMMER_BTREE_TYPE_RECORD) 386 return; 387 if ((hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) && 388 ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE) { 389 /* 390 * Adjust nxt_key 391 */ 392 if (hist->nxt_key > elm->leaf.base.key - elm->leaf.data_len && 393 hist->key < elm->leaf.base.key - elm->leaf.data_len) { 394 hist->nxt_key = elm->leaf.base.key - elm->leaf.data_len; 395 } 396 if (hist->nxt_key > elm->leaf.base.key) 397 hist->nxt_key = elm->leaf.base.key; 398 399 /* 400 * Record is beyond MAXPHYS, there won't be any more records 401 * in the iteration covering the requested offset (key). 402 */ 403 if (elm->leaf.base.key >= MAXPHYS && 404 elm->leaf.base.key - MAXPHYS > hist->key) { 405 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY; 406 } 407 408 /* 409 * Data-range of record does not cover the key. 410 */ 411 if (elm->leaf.base.key - elm->leaf.data_len > hist->key) 412 return; 413 414 } else if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) { 415 /* 416 * Adjust nxt_key 417 */ 418 if (hist->nxt_key > elm->leaf.base.key && 419 hist->key < elm->leaf.base.key) { 420 hist->nxt_key = elm->leaf.base.key; 421 } 422 423 /* 424 * Record is beyond the requested key. 425 */ 426 if (elm->leaf.base.key > hist->key) 427 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY; 428 } 429 430 /* 431 * Add create_tid if it is in-bounds. 432 */ 433 i = hist->count; 434 if ((i == 0 || 435 elm->leaf.base.create_tid != hist->hist_ary[i - 1].tid) && 436 elm->leaf.base.create_tid >= hist->beg_tid && 437 elm->leaf.base.create_tid < hist->end_tid) { 438 if (hist->count == HAMMER_MAX_HISTORY_ELMS) { 439 hist->nxt_tid = elm->leaf.base.create_tid; 440 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID; 441 return; 442 } 443 hist->hist_ary[i].tid = elm->leaf.base.create_tid; 444 hist->hist_ary[i].time32 = elm->leaf.create_ts; 445 ++hist->count; 446 } 447 448 /* 449 * Add delete_tid if it is in-bounds. Note that different portions 450 * of the history may have overlapping data ranges with different 451 * delete_tid's. If this case occurs the delete_tid may match the 452 * create_tid of a following record. XXX 453 * 454 * [ ] 455 * [ ] 456 */ 457 i = hist->count; 458 if (elm->leaf.base.delete_tid && 459 elm->leaf.base.delete_tid >= hist->beg_tid && 460 elm->leaf.base.delete_tid < hist->end_tid) { 461 if (i == HAMMER_MAX_HISTORY_ELMS) { 462 hist->nxt_tid = elm->leaf.base.delete_tid; 463 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID; 464 return; 465 } 466 hist->hist_ary[i].tid = elm->leaf.base.delete_tid; 467 hist->hist_ary[i].time32 = elm->leaf.delete_ts; 468 ++hist->count; 469 } 470 } 471 472 /* 473 * Acquire synchronization TID 474 */ 475 static 476 int 477 hammer_ioc_synctid(hammer_transaction_t trans, hammer_inode_t ip, 478 struct hammer_ioc_synctid *std) 479 { 480 hammer_mount_t hmp = ip->hmp; 481 int error = 0; 482 483 switch(std->op) { 484 case HAMMER_SYNCTID_NONE: 485 std->tid = hmp->flusher.tid; /* inaccurate */ 486 break; 487 case HAMMER_SYNCTID_ASYNC: 488 hammer_queue_inodes_flusher(hmp, MNT_NOWAIT); 489 hammer_flusher_async(hmp, NULL); 490 std->tid = hmp->flusher.tid; /* inaccurate */ 491 break; 492 case HAMMER_SYNCTID_SYNC1: 493 hammer_queue_inodes_flusher(hmp, MNT_WAIT); 494 hammer_flusher_sync(hmp); 495 std->tid = hmp->flusher.tid; 496 break; 497 case HAMMER_SYNCTID_SYNC2: 498 hammer_queue_inodes_flusher(hmp, MNT_WAIT); 499 hammer_flusher_sync(hmp); 500 std->tid = hmp->flusher.tid; 501 hammer_flusher_sync(hmp); 502 break; 503 default: 504 error = EOPNOTSUPP; 505 break; 506 } 507 return(error); 508 } 509 510 /* 511 * Retrieve version info. 512 * 513 * Load min_version, wip_version, and max_versino. If cur_version is passed 514 * as 0 then load the current version into cur_version. Load the description 515 * for cur_version into the description array. 516 * 517 * Returns 0 on success, EINVAL if cur_version is non-zero and set to an 518 * unsupported value. 519 */ 520 static 521 int 522 hammer_ioc_get_version(hammer_transaction_t trans, hammer_inode_t ip, 523 struct hammer_ioc_version *ver) 524 { 525 int error = 0; 526 527 ver->min_version = HAMMER_VOL_VERSION_MIN; 528 ver->wip_version = HAMMER_VOL_VERSION_WIP; 529 ver->max_version = HAMMER_VOL_VERSION_MAX; 530 if (ver->cur_version == 0) 531 ver->cur_version = trans->hmp->version; 532 switch(ver->cur_version) { 533 case 1: 534 ksnprintf(ver->description, sizeof(ver->description), 535 "First HAMMER release (DragonFly 2.0+)"); 536 break; 537 case 2: 538 ksnprintf(ver->description, sizeof(ver->description), 539 "New directory entry layout (DragonFly 2.3+)"); 540 break; 541 case 3: 542 ksnprintf(ver->description, sizeof(ver->description), 543 "New snapshot management (DragonFly 2.5+)"); 544 break; 545 case 4: 546 ksnprintf(ver->description, sizeof(ver->description), 547 "New undo/flush, faster flush/sync (DragonFly 2.5+)"); 548 break; 549 case 5: 550 ksnprintf(ver->description, sizeof(ver->description), 551 "Adjustments for dedup support (DragonFly 2.9+)"); 552 break; 553 case 6: 554 ksnprintf(ver->description, sizeof(ver->description), 555 "Directory Hash ALG1 (tmp/rename resistance)"); 556 break; 557 default: 558 ksnprintf(ver->description, sizeof(ver->description), 559 "Unknown"); 560 error = EINVAL; 561 break; 562 } 563 return(error); 564 }; 565 566 /* 567 * Set version info 568 */ 569 static 570 int 571 hammer_ioc_set_version(hammer_transaction_t trans, hammer_inode_t ip, 572 struct hammer_ioc_version *ver) 573 { 574 hammer_mount_t hmp = trans->hmp; 575 struct hammer_cursor cursor; 576 hammer_volume_t volume; 577 int error; 578 int over = hmp->version; 579 580 /* 581 * Generally do not allow downgrades. However, version 4 can 582 * be downgraded to version 3. 583 */ 584 if (ver->cur_version < hmp->version) { 585 if (!(ver->cur_version == 3 && hmp->version == 4)) 586 return(EINVAL); 587 } 588 if (ver->cur_version == hmp->version) 589 return(0); 590 if (ver->cur_version > HAMMER_VOL_VERSION_MAX) 591 return(EINVAL); 592 if (hmp->ronly) 593 return(EROFS); 594 595 /* 596 * Update the root volume header and the version cached in 597 * the hammer_mount structure. 598 */ 599 error = hammer_init_cursor(trans, &cursor, NULL, NULL); 600 if (error) 601 goto failed; 602 hammer_lock_ex(&hmp->flusher.finalize_lock); 603 hammer_sync_lock_ex(trans); 604 hmp->version = ver->cur_version; 605 606 /* 607 * If upgrading from version < 4 to version >= 4 the UNDO FIFO 608 * must be reinitialized. 609 */ 610 if (over < HAMMER_VOL_VERSION_FOUR && 611 ver->cur_version >= HAMMER_VOL_VERSION_FOUR) { 612 hkprintf("upgrade undo to version 4\n"); 613 error = hammer_upgrade_undo_4(trans); 614 if (error) 615 goto failed; 616 } 617 618 /* 619 * Adjust the version in the volume header 620 */ 621 volume = hammer_get_root_volume(hmp, &error); 622 KKASSERT(error == 0); 623 hammer_modify_volume_field(cursor.trans, volume, vol_version); 624 volume->ondisk->vol_version = ver->cur_version; 625 hammer_modify_volume_done(volume); 626 hammer_rel_volume(volume, 0); 627 628 hammer_sync_unlock(trans); 629 hammer_unlock(&hmp->flusher.finalize_lock); 630 failed: 631 ver->head.error = error; 632 hammer_done_cursor(&cursor); 633 return(0); 634 } 635 636 /* 637 * Get information 638 */ 639 static 640 int 641 hammer_ioc_get_info(hammer_transaction_t trans, struct hammer_ioc_info *info) 642 { 643 struct hammer_volume_ondisk *od = trans->hmp->rootvol->ondisk; 644 struct hammer_mount *hm = trans->hmp; 645 646 /* Fill the structure with the necessary information */ 647 _hammer_checkspace(hm, HAMMER_CHKSPC_WRITE, &info->rsvbigblocks); 648 info->rsvbigblocks = info->rsvbigblocks >> HAMMER_BIGBLOCK_BITS; 649 strlcpy(info->vol_label, od->vol_label, sizeof(od->vol_label)); 650 651 info->vol_fsid = hm->fsid; 652 info->vol_fstype = od->vol_fstype; 653 info->version = hm->version; 654 655 info->inodes = od->vol0_stat_inodes; 656 info->bigblocks = od->vol0_stat_bigblocks; 657 info->freebigblocks = od->vol0_stat_freebigblocks; 658 info->nvolumes = hm->nvolumes; 659 info->rootvol = od->vol_rootvol; /* must be 0 */ 660 661 return 0; 662 } 663 664 /* 665 * Add a snapshot transaction id(s) to the list of snapshots. 666 * 667 * NOTE: Records are created with an allocated TID. If a flush cycle 668 * is in progress the record may be synced in the current flush 669 * cycle and the volume header will reflect the allocation of the 670 * TID, but the synchronization point may not catch up to the 671 * TID until the next flush cycle. 672 */ 673 static 674 int 675 hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 676 struct hammer_ioc_snapshot *snap) 677 { 678 hammer_mount_t hmp = ip->hmp; 679 struct hammer_btree_leaf_elm leaf; 680 struct hammer_cursor cursor; 681 int error; 682 683 /* 684 * Validate structure 685 */ 686 if (snap->count > HAMMER_SNAPS_PER_IOCTL) 687 return (EINVAL); 688 if (snap->index >= snap->count) 689 return (EINVAL); 690 691 hammer_lock_ex(&hmp->snapshot_lock); 692 again: 693 /* 694 * Look for keys starting after the previous iteration, or at 695 * the beginning if snap->count is 0. 696 */ 697 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 698 if (error) { 699 hammer_done_cursor(&cursor); 700 return(error); 701 } 702 703 cursor.asof = HAMMER_MAX_TID; 704 cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF; 705 706 bzero(&leaf, sizeof(leaf)); 707 leaf.base.obj_id = HAMMER_OBJID_ROOT; 708 leaf.base.rec_type = HAMMER_RECTYPE_SNAPSHOT; 709 leaf.base.create_tid = hammer_alloc_tid(hmp, 1); 710 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 711 leaf.base.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE; 712 leaf.data_len = sizeof(struct hammer_snapshot_data); 713 714 while (snap->index < snap->count) { 715 leaf.base.key = (int64_t)snap->snaps[snap->index].tid; 716 cursor.key_beg = leaf.base; 717 error = hammer_btree_lookup(&cursor); 718 if (error == 0) { 719 error = EEXIST; 720 break; 721 } 722 723 /* 724 * NOTE: Must reload key_beg after an ASOF search because 725 * the create_tid may have been modified during the 726 * search. 727 */ 728 cursor.flags &= ~HAMMER_CURSOR_ASOF; 729 cursor.key_beg = leaf.base; 730 error = hammer_create_at_cursor(&cursor, &leaf, 731 &snap->snaps[snap->index], 732 HAMMER_CREATE_MODE_SYS); 733 if (error == EDEADLK) { 734 hammer_done_cursor(&cursor); 735 goto again; 736 } 737 cursor.flags |= HAMMER_CURSOR_ASOF; 738 if (error) 739 break; 740 ++snap->index; 741 } 742 snap->head.error = error; 743 hammer_done_cursor(&cursor); 744 hammer_unlock(&hmp->snapshot_lock); 745 return(0); 746 } 747 748 /* 749 * Delete snapshot transaction id(s) from the list of snapshots. 750 */ 751 static 752 int 753 hammer_ioc_del_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 754 struct hammer_ioc_snapshot *snap) 755 { 756 hammer_mount_t hmp = ip->hmp; 757 struct hammer_cursor cursor; 758 int error; 759 760 /* 761 * Validate structure 762 */ 763 if (snap->count > HAMMER_SNAPS_PER_IOCTL) 764 return (EINVAL); 765 if (snap->index >= snap->count) 766 return (EINVAL); 767 768 hammer_lock_ex(&hmp->snapshot_lock); 769 again: 770 /* 771 * Look for keys starting after the previous iteration, or at 772 * the beginning if snap->count is 0. 773 */ 774 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 775 if (error) { 776 hammer_done_cursor(&cursor); 777 return(error); 778 } 779 780 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 781 cursor.key_beg.create_tid = 0; 782 cursor.key_beg.delete_tid = 0; 783 cursor.key_beg.obj_type = 0; 784 cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT; 785 cursor.key_beg.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE; 786 cursor.asof = HAMMER_MAX_TID; 787 cursor.flags |= HAMMER_CURSOR_ASOF; 788 789 while (snap->index < snap->count) { 790 cursor.key_beg.key = (int64_t)snap->snaps[snap->index].tid; 791 error = hammer_btree_lookup(&cursor); 792 if (error) 793 break; 794 error = hammer_btree_extract_leaf(&cursor); 795 if (error) 796 break; 797 error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY, 798 0, 0, 0, NULL); 799 if (error == EDEADLK) { 800 hammer_done_cursor(&cursor); 801 goto again; 802 } 803 if (error) 804 break; 805 ++snap->index; 806 } 807 snap->head.error = error; 808 hammer_done_cursor(&cursor); 809 hammer_unlock(&hmp->snapshot_lock); 810 return(0); 811 } 812 813 /* 814 * Retrieve as many snapshot ids as possible or until the array is 815 * full, starting after the last transaction id passed in. If count 816 * is 0 we retrieve starting at the beginning. 817 * 818 * NOTE: Because the b-tree key field is signed but transaction ids 819 * are unsigned the returned list will be signed-sorted instead 820 * of unsigned sorted. The Caller must still sort the aggregate 821 * results. 822 */ 823 static 824 int 825 hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 826 struct hammer_ioc_snapshot *snap) 827 { 828 struct hammer_cursor cursor; 829 int error; 830 831 /* 832 * Validate structure 833 */ 834 if (snap->index != 0) 835 return (EINVAL); 836 if (snap->count > HAMMER_SNAPS_PER_IOCTL) 837 return (EINVAL); 838 839 /* 840 * Look for keys starting after the previous iteration, or at 841 * the beginning if snap->count is 0. 842 */ 843 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 844 if (error) { 845 hammer_done_cursor(&cursor); 846 return(error); 847 } 848 849 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 850 cursor.key_beg.create_tid = 0; 851 cursor.key_beg.delete_tid = 0; 852 cursor.key_beg.obj_type = 0; 853 cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT; 854 cursor.key_beg.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE; 855 if (snap->count == 0) 856 cursor.key_beg.key = HAMMER_MIN_KEY; 857 else 858 cursor.key_beg.key = (int64_t)snap->snaps[snap->count - 1].tid + 1; 859 860 cursor.key_end = cursor.key_beg; 861 cursor.key_end.key = HAMMER_MAX_KEY; 862 cursor.asof = HAMMER_MAX_TID; 863 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE | HAMMER_CURSOR_ASOF; 864 865 snap->count = 0; 866 867 error = hammer_btree_first(&cursor); 868 while (error == 0 && snap->count < HAMMER_SNAPS_PER_IOCTL) { 869 error = hammer_btree_extract_leaf(&cursor); 870 if (error) 871 break; 872 if (cursor.leaf->base.rec_type == HAMMER_RECTYPE_SNAPSHOT) { 873 error = hammer_btree_extract_data(&cursor); 874 snap->snaps[snap->count] = cursor.data->snap; 875 876 /* 877 * The snap data tid should match the key but might 878 * not due to a bug in the HAMMER v3 conversion code. 879 * 880 * This error will work itself out over time but we 881 * have to force a match or the snapshot will not 882 * be deletable. 883 */ 884 if (cursor.data->snap.tid != 885 (hammer_tid_t)cursor.leaf->base.key) { 886 hkprintf("lo=%08x snapshot key " 887 "0x%016jx data mismatch 0x%016jx\n", 888 cursor.key_beg.localization, 889 (uintmax_t)cursor.data->snap.tid, 890 cursor.leaf->base.key); 891 hkprintf("Probably left over from the " 892 "original v3 conversion, hammer " 893 "cleanup should get it eventually\n"); 894 snap->snaps[snap->count].tid = 895 cursor.leaf->base.key; 896 } 897 ++snap->count; 898 } 899 error = hammer_btree_iterate(&cursor); 900 } 901 902 if (error == ENOENT) { 903 snap->head.flags |= HAMMER_IOC_SNAPSHOT_EOF; 904 error = 0; 905 } 906 snap->head.error = error; 907 hammer_done_cursor(&cursor); 908 return(0); 909 } 910 911 /* 912 * Retrieve the PFS hammer cleanup utility config record. This is 913 * different (newer than) the PFS config. 914 */ 915 static 916 int 917 hammer_ioc_get_config(hammer_transaction_t trans, hammer_inode_t ip, 918 struct hammer_ioc_config *config) 919 { 920 struct hammer_cursor cursor; 921 int error; 922 923 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 924 if (error) { 925 hammer_done_cursor(&cursor); 926 return(error); 927 } 928 929 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 930 cursor.key_beg.create_tid = 0; 931 cursor.key_beg.delete_tid = 0; 932 cursor.key_beg.obj_type = 0; 933 cursor.key_beg.rec_type = HAMMER_RECTYPE_CONFIG; 934 cursor.key_beg.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE; 935 cursor.key_beg.key = 0; /* config space page 0 */ 936 937 cursor.asof = HAMMER_MAX_TID; 938 cursor.flags |= HAMMER_CURSOR_ASOF; 939 940 error = hammer_btree_lookup(&cursor); 941 if (error == 0) { 942 error = hammer_btree_extract_data(&cursor); 943 if (error == 0) 944 config->config = cursor.data->config; 945 } 946 /* error can be ENOENT */ 947 config->head.error = error; 948 hammer_done_cursor(&cursor); 949 return(0); 950 } 951 952 /* 953 * Retrieve the PFS hammer cleanup utility config record. This is 954 * different (newer than) the PFS config. 955 * 956 * This is kinda a hack. 957 */ 958 static 959 int 960 hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip, 961 struct hammer_ioc_config *config) 962 { 963 struct hammer_btree_leaf_elm leaf; 964 struct hammer_cursor cursor; 965 hammer_mount_t hmp = ip->hmp; 966 int error; 967 968 again: 969 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 970 if (error) { 971 hammer_done_cursor(&cursor); 972 return(error); 973 } 974 975 bzero(&leaf, sizeof(leaf)); 976 leaf.base.obj_id = HAMMER_OBJID_ROOT; 977 leaf.base.rec_type = HAMMER_RECTYPE_CONFIG; 978 leaf.base.create_tid = hammer_alloc_tid(hmp, 1); 979 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 980 leaf.base.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE; 981 leaf.base.key = 0; /* page 0 */ 982 leaf.data_len = sizeof(struct hammer_config_data); 983 984 cursor.key_beg = leaf.base; 985 986 cursor.asof = HAMMER_MAX_TID; 987 cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF; 988 989 error = hammer_btree_lookup(&cursor); 990 if (error == 0) { 991 error = hammer_btree_extract_data(&cursor); 992 error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY, 993 0, 0, 0, NULL); 994 if (error == EDEADLK) { 995 hammer_done_cursor(&cursor); 996 goto again; 997 } 998 } 999 if (error == ENOENT) 1000 error = 0; 1001 if (error == 0) { 1002 /* 1003 * NOTE: Must reload key_beg after an ASOF search because 1004 * the create_tid may have been modified during the 1005 * search. 1006 */ 1007 cursor.flags &= ~HAMMER_CURSOR_ASOF; 1008 cursor.key_beg = leaf.base; 1009 error = hammer_create_at_cursor(&cursor, &leaf, 1010 &config->config, 1011 HAMMER_CREATE_MODE_SYS); 1012 if (error == EDEADLK) { 1013 hammer_done_cursor(&cursor); 1014 goto again; 1015 } 1016 } 1017 config->head.error = error; 1018 hammer_done_cursor(&cursor); 1019 return(0); 1020 } 1021 1022 static 1023 int 1024 hammer_ioc_get_data(hammer_transaction_t trans, hammer_inode_t ip, 1025 struct hammer_ioc_data *data) 1026 { 1027 struct hammer_cursor cursor; 1028 int bytes; 1029 int error; 1030 1031 /* XXX cached inode ? */ 1032 error = hammer_init_cursor(trans, &cursor, NULL, NULL); 1033 if (error) 1034 goto failed; 1035 1036 cursor.key_beg = data->elm; 1037 cursor.flags |= HAMMER_CURSOR_BACKEND; 1038 1039 error = hammer_btree_lookup(&cursor); 1040 if (error == 0) { 1041 error = hammer_btree_extract_data(&cursor); 1042 if (error == 0) { 1043 data->leaf = *cursor.leaf; 1044 bytes = cursor.leaf->data_len; 1045 if (bytes > data->size) 1046 bytes = data->size; 1047 error = copyout(cursor.data, data->ubuf, bytes); 1048 } 1049 } 1050 1051 failed: 1052 hammer_done_cursor(&cursor); 1053 return (error); 1054 } 1055