1 /* 2 * Copyright (c) 2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_ioctl.c,v 1.32 2008/11/13 02:23:29 dillon Exp $ 35 */ 36 37 #include "hammer.h" 38 39 static int hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip, 40 struct hammer_ioc_history *hist); 41 static int hammer_ioc_synctid(hammer_transaction_t trans, hammer_inode_t ip, 42 struct hammer_ioc_synctid *std); 43 static int hammer_ioc_get_version(hammer_transaction_t trans, 44 hammer_inode_t ip, 45 struct hammer_ioc_version *ver); 46 static int hammer_ioc_set_version(hammer_transaction_t trans, 47 hammer_inode_t ip, 48 struct hammer_ioc_version *ver); 49 static int hammer_ioc_get_info(hammer_transaction_t trans, 50 struct hammer_ioc_info *info); 51 static int hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 52 struct hammer_ioc_snapshot *snap); 53 static int hammer_ioc_del_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 54 struct hammer_ioc_snapshot *snap); 55 static int hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 56 struct hammer_ioc_snapshot *snap); 57 static int hammer_ioc_get_config(hammer_transaction_t trans, hammer_inode_t ip, 58 struct hammer_ioc_config *snap); 59 static int hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip, 60 struct hammer_ioc_config *snap); 61 62 int 63 hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag, 64 struct ucred *cred) 65 { 66 struct hammer_transaction trans; 67 int error; 68 69 error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0); 70 71 hammer_start_transaction(&trans, ip->hmp); 72 73 switch(com) { 74 case HAMMERIOC_PRUNE: 75 if (error == 0) { 76 error = hammer_ioc_prune(&trans, ip, 77 (struct hammer_ioc_prune *)data); 78 } 79 break; 80 case HAMMERIOC_GETHISTORY: 81 error = hammer_ioc_gethistory(&trans, ip, 82 (struct hammer_ioc_history *)data); 83 break; 84 case HAMMERIOC_REBLOCK: 85 if (error == 0) { 86 error = hammer_ioc_reblock(&trans, ip, 87 (struct hammer_ioc_reblock *)data); 88 } 89 break; 90 case HAMMERIOC_REBALANCE: 91 if (error == 0) { 92 error = hammer_ioc_rebalance(&trans, ip, 93 (struct hammer_ioc_rebalance *)data); 94 } 95 break; 96 case HAMMERIOC_SYNCTID: 97 error = hammer_ioc_synctid(&trans, ip, 98 (struct hammer_ioc_synctid *)data); 99 break; 100 case HAMMERIOC_GET_PSEUDOFS: 101 error = hammer_ioc_get_pseudofs(&trans, ip, 102 (struct hammer_ioc_pseudofs_rw *)data); 103 break; 104 case HAMMERIOC_SET_PSEUDOFS: 105 if (error == 0) { 106 error = hammer_ioc_set_pseudofs(&trans, ip, cred, 107 (struct hammer_ioc_pseudofs_rw *)data); 108 } 109 break; 110 case HAMMERIOC_UPG_PSEUDOFS: 111 if (error == 0) { 112 error = hammer_ioc_upgrade_pseudofs(&trans, ip, 113 (struct hammer_ioc_pseudofs_rw *)data); 114 } 115 break; 116 case HAMMERIOC_DGD_PSEUDOFS: 117 if (error == 0) { 118 error = hammer_ioc_downgrade_pseudofs(&trans, ip, 119 (struct hammer_ioc_pseudofs_rw *)data); 120 } 121 break; 122 case HAMMERIOC_RMR_PSEUDOFS: 123 if (error == 0) { 124 error = hammer_ioc_destroy_pseudofs(&trans, ip, 125 (struct hammer_ioc_pseudofs_rw *)data); 126 } 127 break; 128 case HAMMERIOC_WAI_PSEUDOFS: 129 if (error == 0) { 130 error = hammer_ioc_wait_pseudofs(&trans, ip, 131 (struct hammer_ioc_pseudofs_rw *)data); 132 } 133 break; 134 case HAMMERIOC_MIRROR_READ: 135 if (error == 0) { 136 error = hammer_ioc_mirror_read(&trans, ip, 137 (struct hammer_ioc_mirror_rw *)data); 138 } 139 break; 140 case HAMMERIOC_MIRROR_WRITE: 141 if (error == 0) { 142 error = hammer_ioc_mirror_write(&trans, ip, 143 (struct hammer_ioc_mirror_rw *)data); 144 } 145 break; 146 case HAMMERIOC_GET_VERSION: 147 error = hammer_ioc_get_version(&trans, ip, 148 (struct hammer_ioc_version *)data); 149 break; 150 case HAMMERIOC_GET_INFO: 151 error = hammer_ioc_get_info(&trans, 152 (struct hammer_ioc_info *)data); 153 break; 154 case HAMMERIOC_SET_VERSION: 155 if (error == 0) { 156 error = hammer_ioc_set_version(&trans, ip, 157 (struct hammer_ioc_version *)data); 158 } 159 break; 160 case HAMMERIOC_ADD_VOLUME: 161 if (error == 0) { 162 error = priv_check_cred(cred, PRIV_HAMMER_VOLUME, 0); 163 if (error == 0) 164 error = hammer_ioc_volume_add(&trans, ip, 165 (struct hammer_ioc_volume *)data); 166 } 167 break; 168 case HAMMERIOC_DEL_VOLUME: 169 if (error == 0) { 170 error = priv_check_cred(cred, PRIV_HAMMER_VOLUME, 0); 171 if (error == 0) 172 error = hammer_ioc_volume_del(&trans, ip, 173 (struct hammer_ioc_volume *)data); 174 } 175 break; 176 case HAMMERIOC_ADD_SNAPSHOT: 177 if (error == 0) { 178 error = hammer_ioc_add_snapshot( 179 &trans, ip, (struct hammer_ioc_snapshot *)data); 180 } 181 break; 182 case HAMMERIOC_DEL_SNAPSHOT: 183 if (error == 0) { 184 error = hammer_ioc_del_snapshot( 185 &trans, ip, (struct hammer_ioc_snapshot *)data); 186 } 187 break; 188 case HAMMERIOC_GET_SNAPSHOT: 189 error = hammer_ioc_get_snapshot( 190 &trans, ip, (struct hammer_ioc_snapshot *)data); 191 break; 192 case HAMMERIOC_GET_CONFIG: 193 error = hammer_ioc_get_config( 194 &trans, ip, (struct hammer_ioc_config *)data); 195 break; 196 case HAMMERIOC_SET_CONFIG: 197 if (error == 0) { 198 error = hammer_ioc_set_config( 199 &trans, ip, (struct hammer_ioc_config *)data); 200 } 201 break; 202 default: 203 error = EOPNOTSUPP; 204 break; 205 } 206 hammer_done_transaction(&trans); 207 return (error); 208 } 209 210 /* 211 * Iterate through an object's inode or an object's records and record 212 * modification TIDs. 213 */ 214 static void add_history(hammer_inode_t ip, struct hammer_ioc_history *hist, 215 hammer_btree_elm_t elm); 216 217 static 218 int 219 hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip, 220 struct hammer_ioc_history *hist) 221 { 222 struct hammer_cursor cursor; 223 hammer_btree_elm_t elm; 224 int error; 225 226 /* 227 * Validate the structure and initialize for return. 228 */ 229 if (hist->beg_tid > hist->end_tid) 230 return(EINVAL); 231 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) { 232 if (hist->key > hist->nxt_key) 233 return(EINVAL); 234 } 235 236 hist->obj_id = ip->obj_id; 237 hist->count = 0; 238 hist->nxt_tid = hist->end_tid; 239 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_TID; 240 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_KEY; 241 hist->head.flags &= ~HAMMER_IOC_HISTORY_EOF; 242 hist->head.flags &= ~HAMMER_IOC_HISTORY_UNSYNCED; 243 if ((ip->flags & HAMMER_INODE_MODMASK) & 244 ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) { 245 hist->head.flags |= HAMMER_IOC_HISTORY_UNSYNCED; 246 } 247 248 /* 249 * Setup the cursor. We can't handle undeletable records 250 * (create_tid of 0) at the moment. A create_tid of 0 has 251 * a special meaning and cannot be specified in the cursor. 252 */ 253 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 254 if (error) { 255 hammer_done_cursor(&cursor); 256 return(error); 257 } 258 259 cursor.key_beg.obj_id = hist->obj_id; 260 cursor.key_beg.create_tid = hist->beg_tid; 261 cursor.key_beg.delete_tid = 0; 262 cursor.key_beg.obj_type = 0; 263 if (cursor.key_beg.create_tid == HAMMER_MIN_TID) 264 cursor.key_beg.create_tid = 1; 265 266 cursor.key_end.obj_id = hist->obj_id; 267 cursor.key_end.create_tid = hist->end_tid; 268 cursor.key_end.delete_tid = 0; 269 cursor.key_end.obj_type = 0; 270 271 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE; 272 273 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) { 274 /* 275 * key-range within the file. For a regular file the 276 * on-disk key represents BASE+LEN, not BASE, so the 277 * first possible record containing the offset 'key' 278 * has an on-disk key of (key + 1). 279 */ 280 cursor.key_beg.key = hist->key; 281 cursor.key_end.key = HAMMER_MAX_KEY; 282 cursor.key_beg.localization = ip->obj_localization + 283 HAMMER_LOCALIZE_MISC; 284 cursor.key_end.localization = ip->obj_localization + 285 HAMMER_LOCALIZE_MISC; 286 287 switch(ip->ino_data.obj_type) { 288 case HAMMER_OBJTYPE_REGFILE: 289 ++cursor.key_beg.key; 290 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA; 291 break; 292 case HAMMER_OBJTYPE_DIRECTORY: 293 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY; 294 cursor.key_beg.localization = ip->obj_localization + 295 hammer_dir_localization(ip); 296 cursor.key_end.localization = ip->obj_localization + 297 hammer_dir_localization(ip); 298 break; 299 case HAMMER_OBJTYPE_DBFILE: 300 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB; 301 break; 302 default: 303 error = EINVAL; 304 break; 305 } 306 cursor.key_end.rec_type = cursor.key_beg.rec_type; 307 } else { 308 /* 309 * The inode itself. 310 */ 311 cursor.key_beg.key = 0; 312 cursor.key_end.key = 0; 313 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE; 314 cursor.key_end.rec_type = HAMMER_RECTYPE_INODE; 315 cursor.key_beg.localization = ip->obj_localization + 316 HAMMER_LOCALIZE_INODE; 317 cursor.key_end.localization = ip->obj_localization + 318 HAMMER_LOCALIZE_INODE; 319 } 320 321 error = hammer_btree_first(&cursor); 322 while (error == 0) { 323 elm = &cursor.node->ondisk->elms[cursor.index]; 324 325 add_history(ip, hist, elm); 326 if (hist->head.flags & (HAMMER_IOC_HISTORY_NEXT_TID | 327 HAMMER_IOC_HISTORY_NEXT_KEY | 328 HAMMER_IOC_HISTORY_EOF)) { 329 break; 330 } 331 error = hammer_btree_iterate(&cursor); 332 } 333 if (error == ENOENT) { 334 hist->head.flags |= HAMMER_IOC_HISTORY_EOF; 335 error = 0; 336 } 337 hammer_done_cursor(&cursor); 338 return(error); 339 } 340 341 /* 342 * Add the scanned element to the ioctl return structure. Some special 343 * casing is required for regular files to accomodate how data ranges are 344 * stored on-disk. 345 */ 346 static void 347 add_history(hammer_inode_t ip, struct hammer_ioc_history *hist, 348 hammer_btree_elm_t elm) 349 { 350 int i; 351 352 if (elm->base.btype != HAMMER_BTREE_TYPE_RECORD) 353 return; 354 if ((hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) && 355 ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE) { 356 /* 357 * Adjust nxt_key 358 */ 359 if (hist->nxt_key > elm->leaf.base.key - elm->leaf.data_len && 360 hist->key < elm->leaf.base.key - elm->leaf.data_len) { 361 hist->nxt_key = elm->leaf.base.key - elm->leaf.data_len; 362 } 363 if (hist->nxt_key > elm->leaf.base.key) 364 hist->nxt_key = elm->leaf.base.key; 365 366 /* 367 * Record is beyond MAXPHYS, there won't be any more records 368 * in the iteration covering the requested offset (key). 369 */ 370 if (elm->leaf.base.key >= MAXPHYS && 371 elm->leaf.base.key - MAXPHYS > hist->key) { 372 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY; 373 } 374 375 /* 376 * Data-range of record does not cover the key. 377 */ 378 if (elm->leaf.base.key - elm->leaf.data_len > hist->key) 379 return; 380 381 } else if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) { 382 /* 383 * Adjust nxt_key 384 */ 385 if (hist->nxt_key > elm->leaf.base.key && 386 hist->key < elm->leaf.base.key) { 387 hist->nxt_key = elm->leaf.base.key; 388 } 389 390 /* 391 * Record is beyond the requested key. 392 */ 393 if (elm->leaf.base.key > hist->key) 394 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY; 395 } 396 397 /* 398 * Add create_tid if it is in-bounds. 399 */ 400 i = hist->count; 401 if ((i == 0 || 402 elm->leaf.base.create_tid != hist->hist_ary[i - 1].tid) && 403 elm->leaf.base.create_tid >= hist->beg_tid && 404 elm->leaf.base.create_tid < hist->end_tid) { 405 if (hist->count == HAMMER_MAX_HISTORY_ELMS) { 406 hist->nxt_tid = elm->leaf.base.create_tid; 407 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID; 408 return; 409 } 410 hist->hist_ary[i].tid = elm->leaf.base.create_tid; 411 hist->hist_ary[i].time32 = elm->leaf.create_ts; 412 ++hist->count; 413 } 414 415 /* 416 * Add delete_tid if it is in-bounds. Note that different portions 417 * of the history may have overlapping data ranges with different 418 * delete_tid's. If this case occurs the delete_tid may match the 419 * create_tid of a following record. XXX 420 * 421 * [ ] 422 * [ ] 423 */ 424 i = hist->count; 425 if (elm->leaf.base.delete_tid && 426 elm->leaf.base.delete_tid >= hist->beg_tid && 427 elm->leaf.base.delete_tid < hist->end_tid) { 428 if (i == HAMMER_MAX_HISTORY_ELMS) { 429 hist->nxt_tid = elm->leaf.base.delete_tid; 430 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID; 431 return; 432 } 433 hist->hist_ary[i].tid = elm->leaf.base.delete_tid; 434 hist->hist_ary[i].time32 = elm->leaf.delete_ts; 435 ++hist->count; 436 } 437 } 438 439 /* 440 * Acquire synchronization TID 441 */ 442 static 443 int 444 hammer_ioc_synctid(hammer_transaction_t trans, hammer_inode_t ip, 445 struct hammer_ioc_synctid *std) 446 { 447 hammer_mount_t hmp = ip->hmp; 448 int error = 0; 449 450 switch(std->op) { 451 case HAMMER_SYNCTID_NONE: 452 std->tid = hmp->flusher.tid; /* inaccurate */ 453 break; 454 case HAMMER_SYNCTID_ASYNC: 455 hammer_queue_inodes_flusher(hmp, MNT_NOWAIT); 456 hammer_flusher_async(hmp, NULL); 457 std->tid = hmp->flusher.tid; /* inaccurate */ 458 break; 459 case HAMMER_SYNCTID_SYNC1: 460 hammer_queue_inodes_flusher(hmp, MNT_WAIT); 461 hammer_flusher_sync(hmp); 462 std->tid = hmp->flusher.tid; 463 break; 464 case HAMMER_SYNCTID_SYNC2: 465 hammer_queue_inodes_flusher(hmp, MNT_WAIT); 466 hammer_flusher_sync(hmp); 467 std->tid = hmp->flusher.tid; 468 hammer_flusher_sync(hmp); 469 break; 470 default: 471 error = EOPNOTSUPP; 472 break; 473 } 474 return(error); 475 } 476 477 /* 478 * Retrieve version info. 479 * 480 * Load min_version, wip_version, and max_versino. If cur_version is passed 481 * as 0 then load the current version into cur_version. Load the description 482 * for cur_version into the description array. 483 * 484 * Returns 0 on success, EINVAL if cur_version is non-zero and set to an 485 * unsupported value. 486 */ 487 static 488 int 489 hammer_ioc_get_version(hammer_transaction_t trans, hammer_inode_t ip, 490 struct hammer_ioc_version *ver) 491 { 492 int error = 0; 493 494 ver->min_version = HAMMER_VOL_VERSION_MIN; 495 ver->wip_version = HAMMER_VOL_VERSION_WIP; 496 ver->max_version = HAMMER_VOL_VERSION_MAX; 497 if (ver->cur_version == 0) 498 ver->cur_version = trans->hmp->version; 499 switch(ver->cur_version) { 500 case 1: 501 ksnprintf(ver->description, sizeof(ver->description), 502 "First HAMMER release (DragonFly 2.0+)"); 503 break; 504 case 2: 505 ksnprintf(ver->description, sizeof(ver->description), 506 "New directory entry layout (DragonFly 2.3+)"); 507 break; 508 case 3: 509 ksnprintf(ver->description, sizeof(ver->description), 510 "New snapshot management (DragonFly 2.5+)"); 511 break; 512 case 4: 513 ksnprintf(ver->description, sizeof(ver->description), 514 "New undo/flush, faster flush/sync (DragonFly 2.5+)"); 515 break; 516 default: 517 ksnprintf(ver->description, sizeof(ver->description), 518 "Unknown"); 519 error = EINVAL; 520 break; 521 } 522 return(error); 523 }; 524 525 /* 526 * Set version info 527 */ 528 static 529 int 530 hammer_ioc_set_version(hammer_transaction_t trans, hammer_inode_t ip, 531 struct hammer_ioc_version *ver) 532 { 533 hammer_mount_t hmp = trans->hmp; 534 struct hammer_cursor cursor; 535 hammer_volume_t volume; 536 int error; 537 int over = hmp->version; 538 539 /* 540 * Generally do not allow downgrades. However, version 4 can 541 * be downgraded to version 3. 542 */ 543 if (ver->cur_version < hmp->version) { 544 if (!(ver->cur_version == 3 && hmp->version == 4)) 545 return(EINVAL); 546 } 547 if (ver->cur_version == hmp->version) 548 return(0); 549 if (ver->cur_version > HAMMER_VOL_VERSION_MAX) 550 return(EINVAL); 551 if (hmp->ronly) 552 return(EROFS); 553 554 /* 555 * Update the root volume header and the version cached in 556 * the hammer_mount structure. 557 */ 558 error = hammer_init_cursor(trans, &cursor, NULL, NULL); 559 if (error) 560 goto failed; 561 hammer_lock_ex(&hmp->flusher.finalize_lock); 562 hammer_sync_lock_ex(trans); 563 hmp->version = ver->cur_version; 564 565 /* 566 * If upgrading from version < 4 to version >= 4 the UNDO FIFO 567 * must be reinitialized. 568 */ 569 if (over < HAMMER_VOL_VERSION_FOUR && 570 ver->cur_version >= HAMMER_VOL_VERSION_FOUR) { 571 kprintf("upgrade undo to version 4\n"); 572 error = hammer_upgrade_undo_4(trans); 573 if (error) 574 goto failed; 575 } 576 577 /* 578 * Adjust the version in the volume header 579 */ 580 volume = hammer_get_root_volume(hmp, &error); 581 KKASSERT(error == 0); 582 hammer_modify_volume_field(cursor.trans, volume, vol_version); 583 volume->ondisk->vol_version = ver->cur_version; 584 hammer_modify_volume_done(volume); 585 hammer_rel_volume(volume, 0); 586 587 hammer_sync_unlock(trans); 588 hammer_unlock(&hmp->flusher.finalize_lock); 589 failed: 590 ver->head.error = error; 591 hammer_done_cursor(&cursor); 592 return(0); 593 } 594 595 /* 596 * Get information 597 */ 598 static 599 int 600 hammer_ioc_get_info(hammer_transaction_t trans, struct hammer_ioc_info *info) { 601 602 struct hammer_volume_ondisk *od = trans->hmp->rootvol->ondisk; 603 struct hammer_mount *hm = trans->hmp; 604 605 /* Fill the structure with the necessary information */ 606 _hammer_checkspace(hm, HAMMER_CHKSPC_WRITE, &info->rsvbigblocks); 607 info->rsvbigblocks = info->rsvbigblocks >> HAMMER_LARGEBLOCK_BITS; 608 strlcpy(info->vol_name, od->vol_name, sizeof(od->vol_name)); 609 610 info->vol_fsid = hm->fsid; 611 info->vol_fstype = od->vol_fstype; 612 info->version = hm->version; 613 614 info->inodes = od->vol0_stat_inodes; 615 info->bigblocks = od->vol0_stat_bigblocks; 616 info->freebigblocks = od->vol0_stat_freebigblocks; 617 info->nvolumes = hm->nvolumes; 618 619 return 0; 620 } 621 622 /* 623 * Add a snapshot transction id(s) to the list of snapshots. 624 * 625 * NOTE: Records are created with an allocated TID. If a flush cycle 626 * is in progress the record may be synced in the current flush 627 * cycle and the volume header will reflect the allocation of the 628 * TID, but the synchronization point may not catch up to the 629 * TID until the next flush cycle. 630 */ 631 static 632 int 633 hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 634 struct hammer_ioc_snapshot *snap) 635 { 636 hammer_mount_t hmp = ip->hmp; 637 struct hammer_btree_leaf_elm leaf; 638 struct hammer_cursor cursor; 639 int error; 640 641 /* 642 * Validate structure 643 */ 644 if (snap->count > HAMMER_SNAPS_PER_IOCTL) 645 return (EINVAL); 646 if (snap->index > snap->count) 647 return (EINVAL); 648 649 hammer_lock_ex(&hmp->snapshot_lock); 650 again: 651 /* 652 * Look for keys starting after the previous iteration, or at 653 * the beginning if snap->count is 0. 654 */ 655 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 656 if (error) { 657 hammer_done_cursor(&cursor); 658 return(error); 659 } 660 661 cursor.asof = HAMMER_MAX_TID; 662 cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF; 663 664 bzero(&leaf, sizeof(leaf)); 665 leaf.base.obj_id = HAMMER_OBJID_ROOT; 666 leaf.base.rec_type = HAMMER_RECTYPE_SNAPSHOT; 667 leaf.base.create_tid = hammer_alloc_tid(hmp, 1); 668 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 669 leaf.base.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 670 leaf.data_len = sizeof(struct hammer_snapshot_data); 671 672 while (snap->index < snap->count) { 673 leaf.base.key = (int64_t)snap->snaps[snap->index].tid; 674 cursor.key_beg = leaf.base; 675 error = hammer_btree_lookup(&cursor); 676 if (error == 0) { 677 error = EEXIST; 678 break; 679 } 680 681 cursor.flags &= ~HAMMER_CURSOR_ASOF; 682 error = hammer_create_at_cursor(&cursor, &leaf, 683 &snap->snaps[snap->index], 684 HAMMER_CREATE_MODE_SYS); 685 if (error == EDEADLK) { 686 hammer_done_cursor(&cursor); 687 goto again; 688 } 689 cursor.flags |= HAMMER_CURSOR_ASOF; 690 if (error) 691 break; 692 ++snap->index; 693 } 694 snap->head.error = error; 695 hammer_done_cursor(&cursor); 696 hammer_unlock(&hmp->snapshot_lock); 697 return(0); 698 } 699 700 /* 701 * Delete snapshot transaction id(s) from the list of snapshots. 702 */ 703 static 704 int 705 hammer_ioc_del_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 706 struct hammer_ioc_snapshot *snap) 707 { 708 hammer_mount_t hmp = ip->hmp; 709 struct hammer_cursor cursor; 710 int error; 711 712 /* 713 * Validate structure 714 */ 715 if (snap->count > HAMMER_SNAPS_PER_IOCTL) 716 return (EINVAL); 717 if (snap->index > snap->count) 718 return (EINVAL); 719 720 hammer_lock_ex(&hmp->snapshot_lock); 721 again: 722 /* 723 * Look for keys starting after the previous iteration, or at 724 * the beginning if snap->count is 0. 725 */ 726 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 727 if (error) { 728 hammer_done_cursor(&cursor); 729 return(error); 730 } 731 732 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 733 cursor.key_beg.create_tid = 0; 734 cursor.key_beg.delete_tid = 0; 735 cursor.key_beg.obj_type = 0; 736 cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT; 737 cursor.key_beg.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 738 cursor.asof = HAMMER_MAX_TID; 739 cursor.flags |= HAMMER_CURSOR_ASOF; 740 741 while (snap->index < snap->count) { 742 cursor.key_beg.key = (int64_t)snap->snaps[snap->index].tid; 743 error = hammer_btree_lookup(&cursor); 744 if (error) 745 break; 746 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF); 747 if (error) 748 break; 749 error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY, 750 0, 0, 0, NULL); 751 if (error == EDEADLK) { 752 hammer_done_cursor(&cursor); 753 goto again; 754 } 755 if (error) 756 break; 757 ++snap->index; 758 } 759 snap->head.error = error; 760 hammer_done_cursor(&cursor); 761 hammer_unlock(&hmp->snapshot_lock); 762 return(0); 763 } 764 765 /* 766 * Retrieve as many snapshot ids as possible or until the array is 767 * full, starting after the last transction id passed in. If count 768 * is 0 we retrieve starting at the beginning. 769 * 770 * NOTE: Because the b-tree key field is signed but transaction ids 771 * are unsigned the returned list will be signed-sorted instead 772 * of unsigned sorted. The Caller must still sort the aggregate 773 * results. 774 */ 775 static 776 int 777 hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip, 778 struct hammer_ioc_snapshot *snap) 779 { 780 struct hammer_cursor cursor; 781 int error; 782 783 /* 784 * Validate structure 785 */ 786 if (snap->index != 0) 787 return (EINVAL); 788 if (snap->count > HAMMER_SNAPS_PER_IOCTL) 789 return (EINVAL); 790 791 /* 792 * Look for keys starting after the previous iteration, or at 793 * the beginning if snap->count is 0. 794 */ 795 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 796 if (error) { 797 hammer_done_cursor(&cursor); 798 return(error); 799 } 800 801 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 802 cursor.key_beg.create_tid = 0; 803 cursor.key_beg.delete_tid = 0; 804 cursor.key_beg.obj_type = 0; 805 cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT; 806 cursor.key_beg.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 807 if (snap->count == 0) 808 cursor.key_beg.key = HAMMER_MIN_KEY; 809 else 810 cursor.key_beg.key = (int64_t)snap->snaps[snap->count - 1].tid + 1; 811 812 cursor.key_end = cursor.key_beg; 813 cursor.key_end.key = HAMMER_MAX_KEY; 814 cursor.asof = HAMMER_MAX_TID; 815 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE | HAMMER_CURSOR_ASOF; 816 817 snap->count = 0; 818 819 error = hammer_btree_first(&cursor); 820 while (error == 0 && snap->count < HAMMER_SNAPS_PER_IOCTL) { 821 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF); 822 if (error) 823 break; 824 if (cursor.leaf->base.rec_type == HAMMER_RECTYPE_SNAPSHOT) { 825 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF | 826 HAMMER_CURSOR_GET_DATA); 827 snap->snaps[snap->count] = cursor.data->snap; 828 ++snap->count; 829 } 830 error = hammer_btree_iterate(&cursor); 831 } 832 833 if (error == ENOENT) { 834 snap->head.flags |= HAMMER_IOC_SNAPSHOT_EOF; 835 error = 0; 836 } 837 snap->head.error = error; 838 hammer_done_cursor(&cursor); 839 return(0); 840 } 841 842 /* 843 * Retrieve the PFS hammer cleanup utility config record. This is 844 * different (newer than) the PFS config. 845 */ 846 static 847 int 848 hammer_ioc_get_config(hammer_transaction_t trans, hammer_inode_t ip, 849 struct hammer_ioc_config *config) 850 { 851 struct hammer_cursor cursor; 852 int error; 853 854 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 855 if (error) { 856 hammer_done_cursor(&cursor); 857 return(error); 858 } 859 860 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 861 cursor.key_beg.create_tid = 0; 862 cursor.key_beg.delete_tid = 0; 863 cursor.key_beg.obj_type = 0; 864 cursor.key_beg.rec_type = HAMMER_RECTYPE_CONFIG; 865 cursor.key_beg.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 866 cursor.key_beg.key = 0; /* config space page 0 */ 867 868 cursor.asof = HAMMER_MAX_TID; 869 cursor.flags |= HAMMER_CURSOR_ASOF; 870 871 error = hammer_btree_lookup(&cursor); 872 if (error == 0) { 873 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF | 874 HAMMER_CURSOR_GET_DATA); 875 if (error == 0) 876 config->config = cursor.data->config; 877 } 878 /* error can be ENOENT */ 879 config->head.error = error; 880 hammer_done_cursor(&cursor); 881 return(0); 882 } 883 884 /* 885 * Retrieve the PFS hammer cleanup utility config record. This is 886 * different (newer than) the PFS config. 887 * 888 * This is kinda a hack. 889 */ 890 static 891 int 892 hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip, 893 struct hammer_ioc_config *config) 894 { 895 struct hammer_btree_leaf_elm leaf; 896 struct hammer_cursor cursor; 897 hammer_mount_t hmp = ip->hmp; 898 int error; 899 900 again: 901 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL); 902 if (error) { 903 hammer_done_cursor(&cursor); 904 return(error); 905 } 906 907 bzero(&leaf, sizeof(leaf)); 908 leaf.base.obj_id = HAMMER_OBJID_ROOT; 909 leaf.base.rec_type = HAMMER_RECTYPE_CONFIG; 910 leaf.base.create_tid = hammer_alloc_tid(hmp, 1); 911 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 912 leaf.base.localization = ip->obj_localization + HAMMER_LOCALIZE_INODE; 913 leaf.base.key = 0; /* page 0 */ 914 leaf.data_len = sizeof(struct hammer_config_data); 915 916 cursor.key_beg = leaf.base; 917 918 cursor.asof = HAMMER_MAX_TID; 919 cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF; 920 921 error = hammer_btree_lookup(&cursor); 922 if (error == 0) { 923 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_LEAF | 924 HAMMER_CURSOR_GET_DATA); 925 error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY, 926 0, 0, 0, NULL); 927 if (error == EDEADLK) { 928 hammer_done_cursor(&cursor); 929 goto again; 930 } 931 } 932 if (error == ENOENT) 933 error = 0; 934 if (error == 0) { 935 cursor.flags &= ~HAMMER_CURSOR_ASOF; 936 cursor.key_beg = leaf.base; 937 error = hammer_create_at_cursor(&cursor, &leaf, 938 &config->config, 939 HAMMER_CREATE_MODE_SYS); 940 if (error == EDEADLK) { 941 hammer_done_cursor(&cursor); 942 goto again; 943 } 944 } 945 config->head.error = error; 946 hammer_done_cursor(&cursor); 947 return(0); 948 } 949