1 /* 2 * Copyright (c) 2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_ioctl.c,v 1.32 2008/11/13 02:23:29 dillon Exp $ 35 */ 36 37 #include "hammer.h" 38 39 static int hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip, 40 struct hammer_ioc_history *hist); 41 static int hammer_ioc_synctid(hammer_transaction_t trans, hammer_inode_t ip, 42 struct hammer_ioc_synctid *std); 43 static int hammer_ioc_get_version(hammer_transaction_t trans, 44 hammer_inode_t ip, 45 struct hammer_ioc_version *ver); 46 static int hammer_ioc_set_version(hammer_transaction_t trans, 47 hammer_inode_t ip, 48 struct hammer_ioc_version *ver); 49 static int hammer_ioc_get_info(hammer_transaction_t trans, 50 struct hammer_ioc_info *info); 51 static int hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 52 struct hammer_ioc_snapshot *snap); 53 static int hammer_ioc_del_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 54 struct hammer_ioc_snapshot *snap); 55 static int hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 56 struct hammer_ioc_snapshot *snap); 57 static int hammer_ioc_get_config(hammer_transaction_t trans, hammer_inode_t ip, 58 struct hammer_ioc_config *snap); 59 static int hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip, 60 struct hammer_ioc_config *snap); 61 static int hammer_ioc_get_data(hammer_transaction_t trans, hammer_inode_t ip, 62 struct hammer_ioc_data *data); 63 64 int 65 hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag, 66 struct ucred *cred) 67 { 68 struct hammer_transaction trans; 69 int error; 70 71 error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0); 72 73 hammer_start_transaction(&trans, ip->hmp); 74 75 switch(com) { 76 case HAMMERIOC_PRUNE: 77 if (error == 0) { 78 error = hammer_ioc_prune(&trans, ip, 79 (struct hammer_ioc_prune *)data); 80 } 81 break; 82 case HAMMERIOC_GETHISTORY: 83 error = hammer_ioc_gethistory(&trans, ip, 84 (struct hammer_ioc_history *)data); 85 break; 86 case HAMMERIOC_REBLOCK: 87 if (error == 0) { 88 error = hammer_ioc_reblock(&trans, ip, 89 (struct hammer_ioc_reblock *)data); 90 } 91 break; 92 case HAMMERIOC_REBALANCE: 93 /* 94 * Rebalancing needs to lock a lot of B-Tree nodes. The 95 * children and children's children. Systems with very 96 * little memory will not be able to do it. 97 */ 98 if (error == 0 && nbuf < HAMMER_REBALANCE_MIN_BUFS) { 99 kprintf("hammer: System has insufficient buffers " 100 "to rebalance the tree. nbuf < %d\n", 101 HAMMER_REBALANCE_MIN_BUFS); 102 error = ENOSPC; 103 } 104 if (error == 0) { 105 error = hammer_ioc_rebalance(&trans, ip, 106 (struct hammer_ioc_rebalance *)data); 107 } 108 break; 109 case HAMMERIOC_SYNCTID: 110 error = hammer_ioc_synctid(&trans, ip, 111 (struct hammer_ioc_synctid *)data); 112 break; 113 case HAMMERIOC_GET_PSEUDOFS: 114 error = hammer_ioc_get_pseudofs(&trans, ip, 115 (struct hammer_ioc_pseudofs_rw *)data); 116 break; 117 case HAMMERIOC_SET_PSEUDOFS: 118 if (error == 0) { 119 error = hammer_ioc_set_pseudofs(&trans, ip, cred, 120 (struct hammer_ioc_pseudofs_rw *)data); 121 } 122 break; 123 case HAMMERIOC_UPG_PSEUDOFS: 124 if (error == 0) { 125 error = hammer_ioc_upgrade_pseudofs(&trans, ip, 126 (struct hammer_ioc_pseudofs_rw *)data); 127 } 128 break; 129 case HAMMERIOC_DGD_PSEUDOFS: 130 if (error == 0) { 131 error = hammer_ioc_downgrade_pseudofs(&trans, ip, 132 (struct hammer_ioc_pseudofs_rw *)data); 133 } 134 break; 135 case HAMMERIOC_RMR_PSEUDOFS: 136 if (error == 0) { 137 error = hammer_ioc_destroy_pseudofs(&trans, ip, 138 (struct hammer_ioc_pseudofs_rw *)data); 139 } 140 break; 141 case HAMMERIOC_WAI_PSEUDOFS: 142 if (error == 0) { 143 error = hammer_ioc_wait_pseudofs(&trans, ip, 144 (struct hammer_ioc_pseudofs_rw *)data); 145 } 146 break; 147 case HAMMERIOC_MIRROR_READ: 148 if (error == 0) { 149 error = hammer_ioc_mirror_read(&trans, ip, 150 (struct hammer_ioc_mirror_rw *)data); 151 } 152 break; 153 case HAMMERIOC_MIRROR_WRITE: 154 if (error == 0) { 155 error = hammer_ioc_mirror_write(&trans, ip, 156 (struct hammer_ioc_mirror_rw *)data); 157 } 158 break; 159 case HAMMERIOC_GET_VERSION: 160 error = hammer_ioc_get_version(&trans, ip, 161 (struct hammer_ioc_version *)data); 162 break; 163 case HAMMERIOC_GET_INFO: 164 error = hammer_ioc_get_info(&trans, 165 (struct hammer_ioc_info *)data); 166 break; 167 case HAMMERIOC_SET_VERSION: 168 if (error == 0) { 169 error = hammer_ioc_set_version(&trans, ip, 170 (struct hammer_ioc_version *)data); 171 } 172 break; 173 case HAMMERIOC_ADD_VOLUME: 174 if (error == 0) { 175 error = priv_check_cred(cred, PRIV_HAMMER_VOLUME, 0); 176 if (error == 0) 177 error = hammer_ioc_volume_add(&trans, ip, 178 (struct hammer_ioc_volume *)data); 179 } 180 break; 181 case HAMMERIOC_DEL_VOLUME: 182 if (error == 0) { 183 error = priv_check_cred(cred, PRIV_HAMMER_VOLUME, 0); 184 if (error == 0) 185 error = hammer_ioc_volume_del(&trans, ip, 186 (struct hammer_ioc_volume *)data); 187 } 188 break; 189 case HAMMERIOC_LIST_VOLUMES: 190 error = hammer_ioc_volume_list(&trans, ip, 191 (struct hammer_ioc_volume_list *)data); 192 break; 193 case HAMMERIOC_ADD_SNAPSHOT: 194 if (error == 0) { 195 error = hammer_ioc_add_snapshot( 196 &trans, ip, (struct hammer_ioc_snapshot *)data); 197 } 198 break; 199 case HAMMERIOC_DEL_SNAPSHOT: 200 if (error == 0) { 201 error = hammer_ioc_del_snapshot( 202 &trans, ip, (struct hammer_ioc_snapshot *)data); 203 } 204 break; 205 case HAMMERIOC_GET_SNAPSHOT: 206 error = hammer_ioc_get_snapshot( 207 &trans, ip, (struct hammer_ioc_snapshot *)data); 208 break; 209 case HAMMERIOC_GET_CONFIG: 210 error = hammer_ioc_get_config( 211 &trans, ip, (struct hammer_ioc_config *)data); 212 break; 213 case HAMMERIOC_SET_CONFIG: 214 if (error == 0) { 215 error = hammer_ioc_set_config( 216 &trans, ip, (struct hammer_ioc_config *)data); 217 } 218 break; 219 case HAMMERIOC_DEDUP: 220 if (error == 0) { 221 error = hammer_ioc_dedup( 222 &trans, ip, (struct hammer_ioc_dedup *)data); 223 } 224 break; 225 case HAMMERIOC_GET_DATA: 226 if (error == 0) { 227 error = hammer_ioc_get_data( 228 &trans, ip, (struct hammer_ioc_data *)data); 229 } 230 break; 231 default: 232 error = EOPNOTSUPP; 233 break; 234 } 235 hammer_done_transaction(&trans); 236 return (error); 237 } 238 239 /* 240 * Iterate through an object's inode or an object's records and record 241 * modification TIDs. 242 */ 243 static void add_history(hammer_inode_t ip, struct hammer_ioc_history *hist, 244 hammer_btree_elm_t elm); 245 246 static 247 int 248 hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip, 249 struct hammer_ioc_history *hist) 250 { 251 struct hammer_cursor cursor; 252 hammer_btree_elm_t elm; 253 int error; 254 255 /* 256 * Validate the structure and initialize for return. 257 */ 258 if (hist->beg_tid > hist->end_tid) 259 return(EINVAL); 260 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) { 261 if (hist->key > hist->nxt_key) 262 return(EINVAL); 263 } 264 265 hist->obj_id = ip->obj_id; 266 hist->count = 0; 267 hist->nxt_tid = hist->end_tid; 268 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_TID; 269 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_KEY; 270 hist->head.flags &= ~HAMMER_IOC_HISTORY_EOF; 271 hist->head.flags &= ~HAMMER_IOC_HISTORY_UNSYNCED; 272 if ((ip->flags & HAMMER_INODE_MODMASK) & 273 ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) { 274 hist->head.flags |= HAMMER_IOC_HISTORY_UNSYNCED; 275 } 276 277 /* 278 * Setup the cursor. We can't handle undeletable records 279 * (create_tid of 0) at the moment. A create_tid of 0 has 280 * a special meaning and cannot be specified in the cursor. 281 */ 282 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 283 if (error) { 284 hammer_done_cursor(&cursor); 285 return(error); 286 } 287 288 cursor.key_beg.obj_id = hist->obj_id; 289 cursor.key_beg.create_tid = hist->beg_tid; 290 cursor.key_beg.delete_tid = 0; 291 cursor.key_beg.obj_type = 0; 292 if (cursor.key_beg.create_tid == HAMMER_MIN_TID) 293 cursor.key_beg.create_tid = 1; 294 295 cursor.key_end.obj_id = hist->obj_id; 296 cursor.key_end.create_tid = hist->end_tid; 297 cursor.key_end.delete_tid = 0; 298 cursor.key_end.obj_type = 0; 299 300 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE; 301 302 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) { 303 /* 304 * key-range within the file. For a regular file the 305 * on-disk key represents BASE+LEN, not BASE, so the 306 * first possible record containing the offset 'key' 307 * has an on-disk key of (key + 1). 308 */ 309 cursor.key_beg.key = hist->key; 310 cursor.key_end.key = HAMMER_MAX_KEY; 311 cursor.key_beg.localization = ip->obj_localization + 312 HAMMER_LOCALIZE_MISC; 313 cursor.key_end.localization = ip->obj_localization + 314 HAMMER_LOCALIZE_MISC; 315 316 switch(ip->ino_data.obj_type) { 317 case HAMMER_OBJTYPE_REGFILE: 318 ++cursor.key_beg.key; 319 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA; 320 break; 321 case HAMMER_OBJTYPE_DIRECTORY: 322 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY; 323 cursor.key_beg.localization = ip->obj_localization + 324 hammer_dir_localization(ip); 325 cursor.key_end.localization = ip->obj_localization + 326 hammer_dir_localization(ip); 327 break; 328 case HAMMER_OBJTYPE_DBFILE: 329 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB; 330 break; 331 default: 332 error = EINVAL; 333 break; 334 } 335 cursor.key_end.rec_type = cursor.key_beg.rec_type; 336 } else { 337 /* 338 * The inode itself. 339 */ 340 cursor.key_beg.key = 0; 341 cursor.key_end.key = 0; 342 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE; 343 cursor.key_end.rec_type = HAMMER_RECTYPE_INODE; 344 cursor.key_beg.localization = ip->obj_localization + 345 HAMMER_LOCALIZE_INODE; 346 cursor.key_end.localization = ip->obj_localization + 347 HAMMER_LOCALIZE_INODE; 348 } 349 350 error = hammer_btree_first(&cursor); 351 while (error == 0) { 352 elm = &cursor.node->ondisk->elms[cursor.index]; 353 354 add_history(ip, hist, elm); 355 if (hist->head.flags & (HAMMER_IOC_HISTORY_NEXT_TID | 356 HAMMER_IOC_HISTORY_NEXT_KEY | 357 HAMMER_IOC_HISTORY_EOF)) { 358 break; 359 } 360 error = hammer_btree_iterate(&cursor); 361 } 362 if (error == ENOENT) { 363 hist->head.flags |= HAMMER_IOC_HISTORY_EOF; 364 error = 0; 365 } 366 hammer_done_cursor(&cursor); 367 return(error); 368 } 369 370 /* 371 * Add the scanned element to the ioctl return structure. Some special 372 * casing is required for regular files to accomodate how data ranges are 373 * stored on-disk. 374 */ 375 static void 376 add_history(hammer_inode_t ip, struct hammer_ioc_history *hist, 377 hammer_btree_elm_t elm) 378 { 379 int i; 380 381 if (elm->base.btype != HAMMER_BTREE_TYPE_RECORD) 382 return; 383 if ((hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) && 384 ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE) { 385 /* 386 * Adjust nxt_key 387 */ 388 if (hist->nxt_key > elm->leaf.base.key - elm->leaf.data_len && 389 hist->key < elm->leaf.base.key - elm->leaf.data_len) { 390 hist->nxt_key = elm->leaf.base.key - elm->leaf.data_len; 391 } 392 if (hist->nxt_key > elm->leaf.base.key) 393 hist->nxt_key = elm->leaf.base.key; 394 395 /* 396 * Record is beyond MAXPHYS, there won't be any more records 397 * in the iteration covering the requested offset (key). 398 */ 399 if (elm->leaf.base.key >= MAXPHYS && 400 elm->leaf.base.key - MAXPHYS > hist->key) { 401 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY; 402 } 403 404 /* 405 * Data-range of record does not cover the key. 406 */ 407 if (elm->leaf.base.key - elm->leaf.data_len > hist->key) 408 return; 409 410 } else if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) { 411 /* 412 * Adjust nxt_key 413 */ 414 if (hist->nxt_key > elm->leaf.base.key && 415 hist->key < elm->leaf.base.key) { 416 hist->nxt_key = elm->leaf.base.key; 417 } 418 419 /* 420 * Record is beyond the requested key. 421 */ 422 if (elm->leaf.base.key > hist->key) 423 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY; 424 } 425 426 /* 427 * Add create_tid if it is in-bounds. 428 */ 429 i = hist->count; 430 if ((i == 0 || 431 elm->leaf.base.create_tid != hist->hist_ary[i - 1].tid) && 432 elm->leaf.base.create_tid >= hist->beg_tid && 433 elm->leaf.base.create_tid < hist->end_tid) { 434 if (hist->count == HAMMER_MAX_HISTORY_ELMS) { 435 hist->nxt_tid = elm->leaf.base.create_tid; 436 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID; 437 return; 438 } 439 hist->hist_ary[i].tid = elm->leaf.base.create_tid; 440 hist->hist_ary[i].time32 = elm->leaf.create_ts; 441 ++hist->count; 442 } 443 444 /* 445 * Add delete_tid if it is in-bounds. Note that different portions 446 * of the history may have overlapping data ranges with different 447 * delete_tid's. If this case occurs the delete_tid may match the 448 * create_tid of a following record. XXX 449 * 450 * [ ] 451 * [ ] 452 */ 453 i = hist->count; 454 if (elm->leaf.base.delete_tid && 455 elm->leaf.base.delete_tid >= hist->beg_tid && 456 elm->leaf.base.delete_tid < hist->end_tid) { 457 if (i == HAMMER_MAX_HISTORY_ELMS) { 458 hist->nxt_tid = elm->leaf.base.delete_tid; 459 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID; 460 return; 461 } 462 hist->hist_ary[i].tid = elm->leaf.base.delete_tid; 463 hist->hist_ary[i].time32 = elm->leaf.delete_ts; 464 ++hist->count; 465 } 466 } 467 468 /* 469 * Acquire synchronization TID 470 */ 471 static 472 int 473 hammer_ioc_synctid(hammer_transaction_t trans, hammer_inode_t ip, 474 struct hammer_ioc_synctid *std) 475 { 476 hammer_mount_t hmp = ip->hmp; 477 int error = 0; 478 479 switch(std->op) { 480 case HAMMER_SYNCTID_NONE: 481 std->tid = hmp->flusher.tid; /* inaccurate */ 482 break; 483 case HAMMER_SYNCTID_ASYNC: 484 hammer_queue_inodes_flusher(hmp, MNT_NOWAIT); 485 hammer_flusher_async(hmp, NULL); 486 std->tid = hmp->flusher.tid; /* inaccurate */ 487 break; 488 case HAMMER_SYNCTID_SYNC1: 489 hammer_queue_inodes_flusher(hmp, MNT_WAIT); 490 hammer_flusher_sync(hmp); 491 std->tid = hmp->flusher.tid; 492 break; 493 case HAMMER_SYNCTID_SYNC2: 494 hammer_queue_inodes_flusher(hmp, MNT_WAIT); 495 hammer_flusher_sync(hmp); 496 std->tid = hmp->flusher.tid; 497 hammer_flusher_sync(hmp); 498 break; 499 default: 500 error = EOPNOTSUPP; 501 break; 502 } 503 return(error); 504 } 505 506 /* 507 * Retrieve version info. 508 * 509 * Load min_version, wip_version, and max_versino. If cur_version is passed 510 * as 0 then load the current version into cur_version. Load the description 511 * for cur_version into the description array. 512 * 513 * Returns 0 on success, EINVAL if cur_version is non-zero and set to an 514 * unsupported value. 515 */ 516 static 517 int 518 hammer_ioc_get_version(hammer_transaction_t trans, hammer_inode_t ip, 519 struct hammer_ioc_version *ver) 520 { 521 int error = 0; 522 523 ver->min_version = HAMMER_VOL_VERSION_MIN; 524 ver->wip_version = HAMMER_VOL_VERSION_WIP; 525 ver->max_version = HAMMER_VOL_VERSION_MAX; 526 if (ver->cur_version == 0) 527 ver->cur_version = trans->hmp->version; 528 switch(ver->cur_version) { 529 case 1: 530 ksnprintf(ver->description, sizeof(ver->description), 531 "First HAMMER release (DragonFly 2.0+)"); 532 break; 533 case 2: 534 ksnprintf(ver->description, sizeof(ver->description), 535 "New directory entry layout (DragonFly 2.3+)"); 536 break; 537 case 3: 538 ksnprintf(ver->description, sizeof(ver->description), 539 "New snapshot management (DragonFly 2.5+)"); 540 break; 541 case 4: 542 ksnprintf(ver->description, sizeof(ver->description), 543 "New undo/flush, faster flush/sync (DragonFly 2.5+)"); 544 break; 545 case 5: 546 ksnprintf(ver->description, sizeof(ver->description), 547 "Adjustments for dedup support (DragonFly 2.9+)"); 548 break; 549 default: 550 ksnprintf(ver->description, sizeof(ver->description), 551 "Unknown"); 552 error = EINVAL; 553 break; 554 } 555 return(error); 556 }; 557 558 /* 559 * Set version info 560 */ 561 static 562 int 563 hammer_ioc_set_version(hammer_transaction_t trans, hammer_inode_t ip, 564 struct hammer_ioc_version *ver) 565 { 566 hammer_mount_t hmp = trans->hmp; 567 struct hammer_cursor cursor; 568 hammer_volume_t volume; 569 int error; 570 int over = hmp->version; 571 572 /* 573 * Generally do not allow downgrades. However, version 4 can 574 * be downgraded to version 3. 575 */ 576 if (ver->cur_version < hmp->version) { 577 if (!(ver->cur_version == 3 && hmp->version == 4)) 578 return(EINVAL); 579 } 580 if (ver->cur_version == hmp->version) 581 return(0); 582 if (ver->cur_version > HAMMER_VOL_VERSION_MAX) 583 return(EINVAL); 584 if (hmp->ronly) 585 return(EROFS); 586 587 /* 588 * Update the root volume header and the version cached in 589 * the hammer_mount structure. 590 */ 591 error = hammer_init_cursor(trans, &cursor, NULL, NULL); 592 if (error) 593 goto failed; 594 hammer_lock_ex(&hmp->flusher.finalize_lock); 595 hammer_sync_lock_ex(trans); 596 hmp->version = ver->cur_version; 597 598 /* 599 * If upgrading from version < 4 to version >= 4 the UNDO FIFO 600 * must be reinitialized. 601 */ 602 if (over < HAMMER_VOL_VERSION_FOUR && 603 ver->cur_version >= HAMMER_VOL_VERSION_FOUR) { 604 kprintf("upgrade undo to version 4\n"); 605 error = hammer_upgrade_undo_4(trans); 606 if (error) 607 goto failed; 608 } 609 610 /* 611 * Adjust the version in the volume header 612 */ 613 volume = hammer_get_root_volume(hmp, &error); 614 KKASSERT(error == 0); 615 hammer_modify_volume_field(cursor.trans, volume, vol_version); 616 volume->ondisk->vol_version = ver->cur_version; 617 hammer_modify_volume_done(volume); 618 hammer_rel_volume(volume, 0); 619 620 hammer_sync_unlock(trans); 621 hammer_unlock(&hmp->flusher.finalize_lock); 622 failed: 623 ver->head.error = error; 624 hammer_done_cursor(&cursor); 625 return(0); 626 } 627 628 /* 629 * Get information 630 */ 631 static 632 int 633 hammer_ioc_get_info(hammer_transaction_t trans, struct hammer_ioc_info *info) { 634 635 struct hammer_volume_ondisk *od = trans->hmp->rootvol->ondisk; 636 struct hammer_mount *hm = trans->hmp; 637 638 /* Fill the structure with the necessary information */ 639 _hammer_checkspace(hm, HAMMER_CHKSPC_WRITE, &info->rsvbigblocks); 640 info->rsvbigblocks = info->rsvbigblocks >> HAMMER_LARGEBLOCK_BITS; 641 strlcpy(info->vol_name, od->vol_name, sizeof(od->vol_name)); 642 643 info->vol_fsid = hm->fsid; 644 info->vol_fstype = od->vol_fstype; 645 info->version = hm->version; 646 647 info->inodes = od->vol0_stat_inodes; 648 info->bigblocks = od->vol0_stat_bigblocks; 649 info->freebigblocks = od->vol0_stat_freebigblocks; 650 info->nvolumes = hm->nvolumes; 651 652 return 0; 653 } 654 655 /* 656 * Add a snapshot transction id(s) to the list of snapshots. 657 * 658 * NOTE: Records are created with an allocated TID. If a flush cycle 659 * is in progress the record may be synced in the current flush 660 * cycle and the volume header will reflect the allocation of the 661 * TID, but the synchronization point may not catch up to the 662 * TID until the next flush cycle. 663 */ 664 static 665 int 666 hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 667 struct hammer_ioc_snapshot *snap) 668 { 669 hammer_mount_t hmp = ip->hmp; 670 struct hammer_btree_leaf_elm leaf; 671 struct hammer_cursor cursor; 672 int error; 673 674 /* 675 * Validate structure 676 */ 677 if (snap->count > HAMMER_SNAPS_PER_IOCTL) 678 return (EINVAL); 679 if (snap->index > snap->count) 680 return (EINVAL); 681 682 hammer_lock_ex(&hmp->snapshot_lock); 683 again: 684 /* 685 * Look for keys starting after the previous iteration, or at 686 * the beginning if snap->count is 0. 687 */ 688 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 689 if (error) { 690 hammer_done_cursor(&cursor); 691 return(error); 692 } 693 694 cursor.asof = HAMMER_MAX_TID; 695 cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF; 696 697 bzero(&leaf, sizeof(leaf)); 698 leaf.base.obj_id = HAMMER_OBJID_ROOT; 699 leaf.base.rec_type = HAMMER_RECTYPE_SNAPSHOT; 700 leaf.base.create_tid = hammer_alloc_tid(hmp, 1); 701 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 702 leaf.base.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 703 leaf.data_len = sizeof(struct hammer_snapshot_data); 704 705 while (snap->index < snap->count) { 706 leaf.base.key = (int64_t)snap->snaps[snap->index].tid; 707 cursor.key_beg = leaf.base; 708 error = hammer_btree_lookup(&cursor); 709 if (error == 0) { 710 error = EEXIST; 711 break; 712 } 713 714 /* 715 * NOTE: Must reload key_beg after an ASOF search because 716 * the create_tid may have been modified during the 717 * search. 718 */ 719 cursor.flags &= ~HAMMER_CURSOR_ASOF; 720 cursor.key_beg = leaf.base; 721 error = hammer_create_at_cursor(&cursor, &leaf, 722 &snap->snaps[snap->index], 723 HAMMER_CREATE_MODE_SYS); 724 if (error == EDEADLK) { 725 hammer_done_cursor(&cursor); 726 goto again; 727 } 728 cursor.flags |= HAMMER_CURSOR_ASOF; 729 if (error) 730 break; 731 ++snap->index; 732 } 733 snap->head.error = error; 734 hammer_done_cursor(&cursor); 735 hammer_unlock(&hmp->snapshot_lock); 736 return(0); 737 } 738 739 /* 740 * Delete snapshot transaction id(s) from the list of snapshots. 741 */ 742 static 743 int 744 hammer_ioc_del_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 745 struct hammer_ioc_snapshot *snap) 746 { 747 hammer_mount_t hmp = ip->hmp; 748 struct hammer_cursor cursor; 749 int error; 750 751 /* 752 * Validate structure 753 */ 754 if (snap->count > HAMMER_SNAPS_PER_IOCTL) 755 return (EINVAL); 756 if (snap->index > snap->count) 757 return (EINVAL); 758 759 hammer_lock_ex(&hmp->snapshot_lock); 760 again: 761 /* 762 * Look for keys starting after the previous iteration, or at 763 * the beginning if snap->count is 0. 764 */ 765 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 766 if (error) { 767 hammer_done_cursor(&cursor); 768 return(error); 769 } 770 771 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 772 cursor.key_beg.create_tid = 0; 773 cursor.key_beg.delete_tid = 0; 774 cursor.key_beg.obj_type = 0; 775 cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT; 776 cursor.key_beg.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 777 cursor.asof = HAMMER_MAX_TID; 778 cursor.flags |= HAMMER_CURSOR_ASOF; 779 780 while (snap->index < snap->count) { 781 cursor.key_beg.key = (int64_t)snap->snaps[snap->index].tid; 782 error = hammer_btree_lookup(&cursor); 783 if (error) 784 break; 785 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF); 786 if (error) 787 break; 788 error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY, 789 0, 0, 0, NULL); 790 if (error == EDEADLK) { 791 hammer_done_cursor(&cursor); 792 goto again; 793 } 794 if (error) 795 break; 796 ++snap->index; 797 } 798 snap->head.error = error; 799 hammer_done_cursor(&cursor); 800 hammer_unlock(&hmp->snapshot_lock); 801 return(0); 802 } 803 804 /* 805 * Retrieve as many snapshot ids as possible or until the array is 806 * full, starting after the last transction id passed in. If count 807 * is 0 we retrieve starting at the beginning. 808 * 809 * NOTE: Because the b-tree key field is signed but transaction ids 810 * are unsigned the returned list will be signed-sorted instead 811 * of unsigned sorted. The Caller must still sort the aggregate 812 * results. 813 */ 814 static 815 int 816 hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 817 struct hammer_ioc_snapshot *snap) 818 { 819 struct hammer_cursor cursor; 820 int error; 821 822 /* 823 * Validate structure 824 */ 825 if (snap->index != 0) 826 return (EINVAL); 827 if (snap->count > HAMMER_SNAPS_PER_IOCTL) 828 return (EINVAL); 829 830 /* 831 * Look for keys starting after the previous iteration, or at 832 * the beginning if snap->count is 0. 833 */ 834 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 835 if (error) { 836 hammer_done_cursor(&cursor); 837 return(error); 838 } 839 840 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 841 cursor.key_beg.create_tid = 0; 842 cursor.key_beg.delete_tid = 0; 843 cursor.key_beg.obj_type = 0; 844 cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT; 845 cursor.key_beg.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 846 if (snap->count == 0) 847 cursor.key_beg.key = HAMMER_MIN_KEY; 848 else 849 cursor.key_beg.key = (int64_t)snap->snaps[snap->count - 1].tid + 1; 850 851 cursor.key_end = cursor.key_beg; 852 cursor.key_end.key = HAMMER_MAX_KEY; 853 cursor.asof = HAMMER_MAX_TID; 854 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE | HAMMER_CURSOR_ASOF; 855 856 snap->count = 0; 857 858 error = hammer_btree_first(&cursor); 859 while (error == 0 && snap->count < HAMMER_SNAPS_PER_IOCTL) { 860 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF); 861 if (error) 862 break; 863 if (cursor.leaf->base.rec_type == HAMMER_RECTYPE_SNAPSHOT) { 864 error = hammer_btree_extract( 865 &cursor, HAMMER_CURSOR_GET_LEAF | 866 HAMMER_CURSOR_GET_DATA); 867 snap->snaps[snap->count] = cursor.data->snap; 868 869 /* 870 * The snap data tid should match the key but might 871 * not due to a bug in the HAMMER v3 conversion code. 872 * 873 * This error will work itself out over time but we 874 * have to force a match or the snapshot will not 875 * be deletable. 876 */ 877 if (cursor.data->snap.tid != 878 (hammer_tid_t)cursor.leaf->base.key) { 879 kprintf("HAMMER: lo=%08x snapshot key " 880 "0x%016jx data mismatch 0x%016jx\n", 881 cursor.key_beg.localization, 882 (uintmax_t)cursor.data->snap.tid, 883 cursor.leaf->base.key); 884 kprintf("HAMMER: Probably left over from the " 885 "original v3 conversion, hammer " 886 "cleanup should get it eventually\n"); 887 snap->snaps[snap->count].tid = 888 cursor.leaf->base.key; 889 } 890 ++snap->count; 891 } 892 error = hammer_btree_iterate(&cursor); 893 } 894 895 if (error == ENOENT) { 896 snap->head.flags |= HAMMER_IOC_SNAPSHOT_EOF; 897 error = 0; 898 } 899 snap->head.error = error; 900 hammer_done_cursor(&cursor); 901 return(0); 902 } 903 904 /* 905 * Retrieve the PFS hammer cleanup utility config record. This is 906 * different (newer than) the PFS config. 907 */ 908 static 909 int 910 hammer_ioc_get_config(hammer_transaction_t trans, hammer_inode_t ip, 911 struct hammer_ioc_config *config) 912 { 913 struct hammer_cursor cursor; 914 int error; 915 916 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 917 if (error) { 918 hammer_done_cursor(&cursor); 919 return(error); 920 } 921 922 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 923 cursor.key_beg.create_tid = 0; 924 cursor.key_beg.delete_tid = 0; 925 cursor.key_beg.obj_type = 0; 926 cursor.key_beg.rec_type = HAMMER_RECTYPE_CONFIG; 927 cursor.key_beg.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 928 cursor.key_beg.key = 0; /* config space page 0 */ 929 930 cursor.asof = HAMMER_MAX_TID; 931 cursor.flags |= HAMMER_CURSOR_ASOF; 932 933 error = hammer_btree_lookup(&cursor); 934 if (error == 0) { 935 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF | 936 HAMMER_CURSOR_GET_DATA); 937 if (error == 0) 938 config->config = cursor.data->config; 939 } 940 /* error can be ENOENT */ 941 config->head.error = error; 942 hammer_done_cursor(&cursor); 943 return(0); 944 } 945 946 /* 947 * Retrieve the PFS hammer cleanup utility config record. This is 948 * different (newer than) the PFS config. 949 * 950 * This is kinda a hack. 951 */ 952 static 953 int 954 hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip, 955 struct hammer_ioc_config *config) 956 { 957 struct hammer_btree_leaf_elm leaf; 958 struct hammer_cursor cursor; 959 hammer_mount_t hmp = ip->hmp; 960 int error; 961 962 again: 963 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 964 if (error) { 965 hammer_done_cursor(&cursor); 966 return(error); 967 } 968 969 bzero(&leaf, sizeof(leaf)); 970 leaf.base.obj_id = HAMMER_OBJID_ROOT; 971 leaf.base.rec_type = HAMMER_RECTYPE_CONFIG; 972 leaf.base.create_tid = hammer_alloc_tid(hmp, 1); 973 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 974 leaf.base.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 975 leaf.base.key = 0; /* page 0 */ 976 leaf.data_len = sizeof(struct hammer_config_data); 977 978 cursor.key_beg = leaf.base; 979 980 cursor.asof = HAMMER_MAX_TID; 981 cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF; 982 983 error = hammer_btree_lookup(&cursor); 984 if (error == 0) { 985 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF | 986 HAMMER_CURSOR_GET_DATA); 987 error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY, 988 0, 0, 0, NULL); 989 if (error == EDEADLK) { 990 hammer_done_cursor(&cursor); 991 goto again; 992 } 993 } 994 if (error == ENOENT) 995 error = 0; 996 if (error == 0) { 997 /* 998 * NOTE: Must reload key_beg after an ASOF search because 999 * the create_tid may have been modified during the 1000 * search. 1001 */ 1002 cursor.flags &= ~HAMMER_CURSOR_ASOF; 1003 cursor.key_beg = leaf.base; 1004 error = hammer_create_at_cursor(&cursor, &leaf, 1005 &config->config, 1006 HAMMER_CREATE_MODE_SYS); 1007 if (error == EDEADLK) { 1008 hammer_done_cursor(&cursor); 1009 goto again; 1010 } 1011 } 1012 config->head.error = error; 1013 hammer_done_cursor(&cursor); 1014 return(0); 1015 } 1016 1017 static 1018 int 1019 hammer_ioc_get_data(hammer_transaction_t trans, hammer_inode_t ip, 1020 struct hammer_ioc_data *data) 1021 { 1022 struct hammer_cursor cursor; 1023 int bytes; 1024 int error; 1025 1026 /* XXX cached inode ? */ 1027 error = hammer_init_cursor(trans, &cursor, NULL, NULL); 1028 if (error) 1029 goto failed; 1030 1031 cursor.key_beg = data->elm; 1032 cursor.flags |= HAMMER_CURSOR_BACKEND; 1033 1034 error = hammer_btree_lookup(&cursor); 1035 if (error == 0) { 1036 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF | 1037 HAMMER_CURSOR_GET_DATA); 1038 if (error == 0) { 1039 data->leaf = *cursor.leaf; 1040 bytes = cursor.leaf->data_len; 1041 if (bytes > data->size) 1042 bytes = data->size; 1043 error = copyout(cursor.data, data->ubuf, bytes); 1044 } 1045 } 1046 1047 failed: 1048 hammer_done_cursor(&cursor); 1049 return (error); 1050 } 1051