1 /* 2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 /* 36 * Ioctl Functions. 37 * 38 * WARNING! The ioctl functions which manipulate the connection state need 39 * to be able to run without deadlock on the volume's chain lock. 40 * Most of these functions use a separate lock. 41 */ 42 43 #include "hammer2.h" 44 45 static int hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data); 46 static int hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data); 47 static int hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data); 48 static int hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data); 49 static int hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data); 50 static int hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data); 51 static int hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data); 52 static int hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data); 53 static int hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data); 54 static int hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data); 55 static int hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data); 56 static int hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data); 57 static int hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data); 58 static int hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data); 59 static int hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data); 60 static int hammer2_ioctl_debug_dump(hammer2_inode_t *ip, u_int flags); 61 static int hammer2_ioctl_emerg_mode(hammer2_inode_t *ip, u_int mode); 62 //static int hammer2_ioctl_inode_comp_set(hammer2_inode_t *ip, void *data); 63 //static int hammer2_ioctl_inode_comp_rec_set(hammer2_inode_t *ip, void *data); 64 //static int hammer2_ioctl_inode_comp_rec_set2(hammer2_inode_t *ip, void *data); 65 static int hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data); 66 static int hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data); 67 68 int 69 hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data, int fflag, 70 struct ucred *cred) 71 { 72 int error; 73 74 /* 75 * Standard root cred checks, will be selectively ignored below 76 * for ioctls that do not require root creds. 77 */ 78 error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0); 79 80 switch(com) { 81 case HAMMER2IOC_VERSION_GET: 82 error = hammer2_ioctl_version_get(ip, data); 83 break; 84 case HAMMER2IOC_RECLUSTER: 85 if (error == 0) 86 error = hammer2_ioctl_recluster(ip, data); 87 break; 88 case HAMMER2IOC_REMOTE_SCAN: 89 if (error == 0) 90 error = hammer2_ioctl_remote_scan(ip, data); 91 break; 92 case HAMMER2IOC_REMOTE_ADD: 93 if (error == 0) 94 error = hammer2_ioctl_remote_add(ip, data); 95 break; 96 case HAMMER2IOC_REMOTE_DEL: 97 if (error == 0) 98 error = hammer2_ioctl_remote_del(ip, data); 99 break; 100 case HAMMER2IOC_REMOTE_REP: 101 if (error == 0) 102 error = hammer2_ioctl_remote_rep(ip, data); 103 break; 104 case HAMMER2IOC_SOCKET_GET: 105 if (error == 0) 106 error = hammer2_ioctl_socket_get(ip, data); 107 break; 108 case HAMMER2IOC_SOCKET_SET: 109 if (error == 0) 110 error = hammer2_ioctl_socket_set(ip, data); 111 break; 112 case HAMMER2IOC_PFS_GET: 113 if (error == 0) 114 error = hammer2_ioctl_pfs_get(ip, data); 115 break; 116 case HAMMER2IOC_PFS_LOOKUP: 117 if (error == 0) 118 error = hammer2_ioctl_pfs_lookup(ip, data); 119 break; 120 case HAMMER2IOC_PFS_CREATE: 121 if (error == 0) 122 error = hammer2_ioctl_pfs_create(ip, data); 123 break; 124 case HAMMER2IOC_PFS_DELETE: 125 if (error == 0) 126 error = hammer2_ioctl_pfs_delete(ip, data); 127 break; 128 case HAMMER2IOC_PFS_SNAPSHOT: 129 if (error == 0) 130 error = hammer2_ioctl_pfs_snapshot(ip, data); 131 break; 132 case HAMMER2IOC_INODE_GET: 133 error = hammer2_ioctl_inode_get(ip, data); 134 break; 135 case HAMMER2IOC_INODE_SET: 136 if (error == 0) 137 error = hammer2_ioctl_inode_set(ip, data); 138 break; 139 case HAMMER2IOC_BULKFREE_SCAN: 140 error = hammer2_ioctl_bulkfree_scan(ip, data); 141 break; 142 case HAMMER2IOC_BULKFREE_ASYNC: 143 error = hammer2_ioctl_bulkfree_scan(ip, NULL); 144 break; 145 case HAMMER2IOC_DESTROY: 146 if (error == 0) 147 error = hammer2_ioctl_destroy(ip, data); 148 break; 149 case HAMMER2IOC_DEBUG_DUMP: 150 error = hammer2_ioctl_debug_dump(ip, *(u_int *)data); 151 break; 152 case HAMMER2IOC_EMERG_MODE: 153 if (error == 0) 154 error = hammer2_ioctl_emerg_mode(ip, *(u_int *)data); 155 break; 156 default: 157 error = EOPNOTSUPP; 158 break; 159 } 160 return (error); 161 } 162 163 /* 164 * Retrieve version and basic info 165 */ 166 static int 167 hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data) 168 { 169 hammer2_ioc_version_t *version = data; 170 hammer2_dev_t *hmp; 171 172 hmp = ip->pmp->pfs_hmps[0]; 173 if (hmp) 174 version->version = hmp->voldata.version; 175 else 176 version->version = -1; 177 return 0; 178 } 179 180 static int 181 hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data) 182 { 183 hammer2_ioc_recluster_t *recl = data; 184 struct vnode *vproot; 185 struct file *fp; 186 hammer2_cluster_t *cluster; 187 int error; 188 189 fp = holdfp(curthread, recl->fd, -1); 190 if (fp) { 191 error = VFS_ROOT(ip->pmp->mp, &vproot); 192 if (error == 0) { 193 cluster = &ip->pmp->iroot->cluster; 194 kprintf("reconnect to cluster: nc=%d focus=%p\n", 195 cluster->nchains, cluster->focus); 196 if (cluster->nchains != 1 || cluster->focus == NULL) { 197 kprintf("not a local device mount\n"); 198 error = EINVAL; 199 } else { 200 hammer2_cluster_reconnect(cluster->focus->hmp, 201 fp); 202 kprintf("ok\n"); 203 error = 0; 204 } 205 vput(vproot); 206 } 207 } else { 208 error = EINVAL; 209 } 210 return error; 211 } 212 213 /* 214 * Retrieve information about a remote 215 */ 216 static int 217 hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data) 218 { 219 hammer2_dev_t *hmp; 220 hammer2_ioc_remote_t *remote = data; 221 int copyid = remote->copyid; 222 223 hmp = ip->pmp->pfs_hmps[0]; 224 if (hmp == NULL) 225 return (EINVAL); 226 227 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT) 228 return (EINVAL); 229 230 hammer2_voldata_lock(hmp); 231 remote->copy1 = hmp->voldata.copyinfo[copyid]; 232 hammer2_voldata_unlock(hmp); 233 234 /* 235 * Adjust nextid (GET only) 236 */ 237 while (++copyid < HAMMER2_COPYID_COUNT && 238 hmp->voldata.copyinfo[copyid].copyid == 0) { 239 ; 240 } 241 if (copyid == HAMMER2_COPYID_COUNT) 242 remote->nextid = -1; 243 else 244 remote->nextid = copyid; 245 246 return(0); 247 } 248 249 /* 250 * Add new remote entry 251 */ 252 static int 253 hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data) 254 { 255 hammer2_ioc_remote_t *remote = data; 256 hammer2_pfs_t *pmp = ip->pmp; 257 hammer2_dev_t *hmp; 258 int copyid = remote->copyid; 259 int error = 0; 260 261 hmp = pmp->pfs_hmps[0]; 262 if (hmp == NULL) 263 return (EINVAL); 264 if (copyid >= HAMMER2_COPYID_COUNT) 265 return (EINVAL); 266 267 hammer2_voldata_lock(hmp); 268 if (copyid < 0) { 269 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) { 270 if (hmp->voldata.copyinfo[copyid].copyid == 0) 271 break; 272 } 273 if (copyid == HAMMER2_COPYID_COUNT) { 274 error = ENOSPC; 275 goto failed; 276 } 277 } 278 hammer2_voldata_modify(hmp); 279 remote->copy1.copyid = copyid; 280 hmp->voldata.copyinfo[copyid] = remote->copy1; 281 hammer2_volconf_update(hmp, copyid); 282 failed: 283 hammer2_voldata_unlock(hmp); 284 return (error); 285 } 286 287 /* 288 * Delete existing remote entry 289 */ 290 static int 291 hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data) 292 { 293 hammer2_ioc_remote_t *remote = data; 294 hammer2_pfs_t *pmp = ip->pmp; 295 hammer2_dev_t *hmp; 296 int copyid = remote->copyid; 297 int error = 0; 298 299 hmp = pmp->pfs_hmps[0]; 300 if (hmp == NULL) 301 return (EINVAL); 302 if (copyid >= HAMMER2_COPYID_COUNT) 303 return (EINVAL); 304 remote->copy1.path[sizeof(remote->copy1.path) - 1] = 0; 305 hammer2_voldata_lock(hmp); 306 if (copyid < 0) { 307 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) { 308 if (hmp->voldata.copyinfo[copyid].copyid == 0) 309 continue; 310 if (strcmp(remote->copy1.path, 311 hmp->voldata.copyinfo[copyid].path) == 0) { 312 break; 313 } 314 } 315 if (copyid == HAMMER2_COPYID_COUNT) { 316 error = ENOENT; 317 goto failed; 318 } 319 } 320 hammer2_voldata_modify(hmp); 321 hmp->voldata.copyinfo[copyid].copyid = 0; 322 hammer2_volconf_update(hmp, copyid); 323 failed: 324 hammer2_voldata_unlock(hmp); 325 return (error); 326 } 327 328 /* 329 * Replace existing remote entry 330 */ 331 static int 332 hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data) 333 { 334 hammer2_ioc_remote_t *remote = data; 335 hammer2_dev_t *hmp; 336 int copyid = remote->copyid; 337 338 hmp = ip->pmp->pfs_hmps[0]; 339 if (hmp == NULL) 340 return (EINVAL); 341 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT) 342 return (EINVAL); 343 344 hammer2_voldata_lock(hmp); 345 hammer2_voldata_modify(hmp); 346 /*hammer2_volconf_update(hmp, copyid);*/ 347 hammer2_voldata_unlock(hmp); 348 349 return(0); 350 } 351 352 /* 353 * Retrieve communications socket 354 */ 355 static int 356 hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data) 357 { 358 return (EOPNOTSUPP); 359 } 360 361 /* 362 * Set communications socket for connection 363 */ 364 static int 365 hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data) 366 { 367 hammer2_ioc_remote_t *remote = data; 368 hammer2_dev_t *hmp; 369 int copyid = remote->copyid; 370 371 hmp = ip->pmp->pfs_hmps[0]; 372 if (hmp == NULL) 373 return (EINVAL); 374 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT) 375 return (EINVAL); 376 377 hammer2_voldata_lock(hmp); 378 hammer2_voldata_unlock(hmp); 379 380 return(0); 381 } 382 383 /* 384 * Used to scan and retrieve PFS information. PFS's are directories under 385 * the super-root. 386 * 387 * To scan PFSs pass name_key=0. The function will scan for the next 388 * PFS and set all fields, as well as set name_next to the next key. 389 * When no PFSs remain, name_next is set to (hammer2_key_t)-1. 390 * 391 * To retrieve a particular PFS by key, specify the key but note that 392 * the ioctl will return the lowest key >= specified_key, so the caller 393 * must verify the key. 394 * 395 * To retrieve the PFS associated with the file descriptor, pass 396 * name_key set to (hammer2_key_t)-1. 397 */ 398 static int 399 hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data) 400 { 401 const hammer2_inode_data_t *ripdata; 402 hammer2_dev_t *hmp; 403 hammer2_ioc_pfs_t *pfs; 404 hammer2_chain_t *parent; 405 hammer2_chain_t *chain; 406 hammer2_key_t key_next; 407 hammer2_key_t save_key; 408 int error; 409 410 hmp = ip->pmp->pfs_hmps[0]; 411 if (hmp == NULL) 412 return (EINVAL); 413 414 pfs = data; 415 save_key = pfs->name_key; 416 error = 0; 417 418 /* 419 * Setup 420 */ 421 if (save_key == (hammer2_key_t)-1) { 422 hammer2_inode_lock(ip->pmp->iroot, 0); 423 parent = NULL; 424 chain = hammer2_inode_chain(ip->pmp->iroot, 0, 425 HAMMER2_RESOLVE_ALWAYS | 426 HAMMER2_RESOLVE_SHARED); 427 } else { 428 hammer2_inode_lock(hmp->spmp->iroot, 0); 429 parent = hammer2_inode_chain(hmp->spmp->iroot, 0, 430 HAMMER2_RESOLVE_ALWAYS | 431 HAMMER2_RESOLVE_SHARED); 432 chain = hammer2_chain_lookup(&parent, &key_next, 433 pfs->name_key, HAMMER2_KEY_MAX, 434 &error, 435 HAMMER2_LOOKUP_SHARED); 436 } 437 438 /* 439 * Locate next PFS 440 */ 441 while (chain) { 442 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) 443 break; 444 if (parent == NULL) { 445 hammer2_chain_unlock(chain); 446 hammer2_chain_drop(chain); 447 chain = NULL; 448 break; 449 } 450 chain = hammer2_chain_next(&parent, chain, &key_next, 451 key_next, HAMMER2_KEY_MAX, 452 &error, 453 HAMMER2_LOOKUP_SHARED); 454 } 455 error = hammer2_error_to_errno(error); 456 457 /* 458 * Load the data being returned by the ioctl. 459 */ 460 if (chain && chain->error == 0) { 461 ripdata = &chain->data->ipdata; 462 pfs->name_key = ripdata->meta.name_key; 463 pfs->pfs_type = ripdata->meta.pfs_type; 464 pfs->pfs_subtype = ripdata->meta.pfs_subtype; 465 pfs->pfs_clid = ripdata->meta.pfs_clid; 466 pfs->pfs_fsid = ripdata->meta.pfs_fsid; 467 KKASSERT(ripdata->meta.name_len < sizeof(pfs->name)); 468 bcopy(ripdata->filename, pfs->name, ripdata->meta.name_len); 469 pfs->name[ripdata->meta.name_len] = 0; 470 ripdata = NULL; /* safety */ 471 472 /* 473 * Calculate name_next, if any. We are only accessing 474 * chain->bref so we can ignore chain->error (if the key 475 * is used later it will error then). 476 */ 477 if (parent == NULL) { 478 pfs->name_next = (hammer2_key_t)-1; 479 } else { 480 chain = hammer2_chain_next(&parent, chain, &key_next, 481 key_next, HAMMER2_KEY_MAX, 482 &error, 483 HAMMER2_LOOKUP_SHARED); 484 if (chain) 485 pfs->name_next = chain->bref.key; 486 else 487 pfs->name_next = (hammer2_key_t)-1; 488 } 489 } else { 490 pfs->name_next = (hammer2_key_t)-1; 491 error = ENOENT; 492 } 493 494 /* 495 * Cleanup 496 */ 497 if (chain) { 498 hammer2_chain_unlock(chain); 499 hammer2_chain_drop(chain); 500 } 501 if (parent) { 502 hammer2_chain_unlock(parent); 503 hammer2_chain_drop(parent); 504 } 505 if (save_key == (hammer2_key_t)-1) { 506 hammer2_inode_unlock(ip->pmp->iroot); 507 } else { 508 hammer2_inode_unlock(hmp->spmp->iroot); 509 } 510 511 return (error); 512 } 513 514 /* 515 * Find a specific PFS by name 516 */ 517 static int 518 hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data) 519 { 520 const hammer2_inode_data_t *ripdata; 521 hammer2_dev_t *hmp; 522 hammer2_ioc_pfs_t *pfs; 523 hammer2_chain_t *parent; 524 hammer2_chain_t *chain; 525 hammer2_key_t key_next; 526 hammer2_key_t lhc; 527 int error; 528 size_t len; 529 530 hmp = ip->pmp->pfs_hmps[0]; 531 if (hmp == NULL) 532 return (EINVAL); 533 534 pfs = data; 535 error = 0; 536 537 hammer2_inode_lock(hmp->spmp->iroot, HAMMER2_RESOLVE_SHARED); 538 parent = hammer2_inode_chain(hmp->spmp->iroot, 0, 539 HAMMER2_RESOLVE_ALWAYS | 540 HAMMER2_RESOLVE_SHARED); 541 542 pfs->name[sizeof(pfs->name) - 1] = 0; 543 len = strlen(pfs->name); 544 lhc = hammer2_dirhash(pfs->name, len); 545 546 chain = hammer2_chain_lookup(&parent, &key_next, 547 lhc, lhc + HAMMER2_DIRHASH_LOMASK, 548 &error, HAMMER2_LOOKUP_SHARED); 549 while (chain) { 550 if (hammer2_chain_dirent_test(chain, pfs->name, len)) 551 break; 552 chain = hammer2_chain_next(&parent, chain, &key_next, 553 key_next, 554 lhc + HAMMER2_DIRHASH_LOMASK, 555 &error, HAMMER2_LOOKUP_SHARED); 556 } 557 error = hammer2_error_to_errno(error); 558 559 /* 560 * Load the data being returned by the ioctl. 561 */ 562 if (chain && chain->error == 0) { 563 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INODE); 564 ripdata = &chain->data->ipdata; 565 pfs->name_key = ripdata->meta.name_key; 566 pfs->pfs_type = ripdata->meta.pfs_type; 567 pfs->pfs_subtype = ripdata->meta.pfs_subtype; 568 pfs->pfs_clid = ripdata->meta.pfs_clid; 569 pfs->pfs_fsid = ripdata->meta.pfs_fsid; 570 ripdata = NULL; 571 572 hammer2_chain_unlock(chain); 573 hammer2_chain_drop(chain); 574 } else if (error == 0) { 575 error = ENOENT; 576 } 577 if (parent) { 578 hammer2_chain_unlock(parent); 579 hammer2_chain_drop(parent); 580 } 581 hammer2_inode_unlock(hmp->spmp->iroot); 582 583 return (error); 584 } 585 586 /* 587 * Create a new PFS under the super-root 588 */ 589 static int 590 hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data) 591 { 592 hammer2_inode_data_t *nipdata; 593 hammer2_chain_t *nchain; 594 hammer2_dev_t *hmp; 595 hammer2_dev_t *force_local; 596 hammer2_ioc_pfs_t *pfs; 597 hammer2_inode_t *nip; 598 hammer2_tid_t mtid; 599 int error; 600 601 hmp = ip->pmp->pfs_hmps[0]; /* XXX */ 602 if (hmp == NULL) 603 return (EINVAL); 604 605 pfs = data; 606 nip = NULL; 607 608 if (pfs->name[0] == 0) 609 return(EINVAL); 610 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure 0-termination */ 611 612 if (hammer2_ioctl_pfs_lookup(ip, pfs) == 0) 613 return(EEXIST); 614 615 hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH); 616 mtid = hammer2_trans_sub(hmp->spmp); 617 nip = hammer2_inode_create_pfs(hmp->spmp, pfs->name, strlen(pfs->name), 618 &error); 619 if (error == 0) { 620 atomic_set_int(&nip->flags, HAMMER2_INODE_NOSIDEQ); 621 hammer2_inode_modify(nip); 622 nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS); 623 error = hammer2_chain_modify(nchain, mtid, 0, 0); 624 KKASSERT(error == 0); 625 nipdata = &nchain->data->ipdata; 626 627 nip->meta.pfs_type = pfs->pfs_type; 628 nip->meta.pfs_subtype = pfs->pfs_subtype; 629 nip->meta.pfs_clid = pfs->pfs_clid; 630 nip->meta.pfs_fsid = pfs->pfs_fsid; 631 nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT; 632 633 /* 634 * Set default compression and check algorithm. This 635 * can be changed later. 636 * 637 * Do not allow compression on PFS's with the special name 638 * "boot", the boot loader can't decompress (yet). 639 */ 640 nip->meta.comp_algo = 641 HAMMER2_ENC_ALGO(HAMMER2_COMP_NEWFS_DEFAULT); 642 nip->meta.check_algo = 643 HAMMER2_ENC_ALGO( HAMMER2_CHECK_XXHASH64); 644 645 if (strcasecmp(pfs->name, "boot") == 0) { 646 nip->meta.comp_algo = 647 HAMMER2_ENC_ALGO(HAMMER2_COMP_AUTOZERO); 648 } 649 650 /* 651 * Super-root isn't mounted, fsync it 652 */ 653 hammer2_chain_unlock(nchain); 654 hammer2_inode_ref(nip); 655 hammer2_inode_unlock(nip); 656 hammer2_inode_chain_sync(nip); 657 hammer2_inode_chain_flush(nip, HAMMER2_XOP_INODE_STOP | 658 HAMMER2_XOP_FSSYNC); 659 hammer2_inode_drop(nip); 660 /* nip is dead */ 661 662 /* 663 * We still have a ref on the chain, relock and associate 664 * with an appropriate PFS. 665 */ 666 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 667 668 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS); 669 nipdata = &nchain->data->ipdata; 670 kprintf("ADD LOCAL PFS (IOCTL): %s\n", nipdata->filename); 671 hammer2_pfsalloc(nchain, nipdata, 672 nchain->bref.modify_tid, force_local); 673 674 hammer2_chain_unlock(nchain); 675 hammer2_chain_drop(nchain); 676 } 677 hammer2_trans_done(hmp->spmp, HAMMER2_TRANS_ISFLUSH | 678 HAMMER2_TRANS_SIDEQ); 679 680 return (error); 681 } 682 683 /* 684 * Destroy an existing PFS under the super-root 685 */ 686 static int 687 hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data) 688 { 689 hammer2_ioc_pfs_t *pfs = data; 690 hammer2_dev_t *hmp; 691 hammer2_pfs_t *spmp; 692 hammer2_pfs_t *pmp; 693 hammer2_xop_unlink_t *xop; 694 hammer2_inode_t *dip; 695 hammer2_inode_t *iroot; 696 int error; 697 int i; 698 699 /* 700 * The PFS should be probed, so we should be able to 701 * locate it. We only delete the PFS from the 702 * specific H2 block device (hmp), not all of 703 * them. We must remove the PFS from the cluster 704 * before we can destroy it. 705 */ 706 hmp = ip->pmp->pfs_hmps[0]; 707 if (hmp == NULL) 708 return (EINVAL); 709 710 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure termination */ 711 712 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 713 714 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 715 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 716 if (pmp->pfs_hmps[i] != hmp) 717 continue; 718 if (pmp->pfs_names[i] && 719 strcmp(pmp->pfs_names[i], pfs->name) == 0) { 720 break; 721 } 722 } 723 if (i != HAMMER2_MAXCLUSTER) 724 break; 725 } 726 727 if (pmp == NULL) { 728 lockmgr(&hammer2_mntlk, LK_RELEASE); 729 return ENOENT; 730 } 731 732 /* 733 * Ok, we found the pmp and we have the index. Permanently remove 734 * the PFS from the cluster 735 */ 736 iroot = pmp->iroot; 737 kprintf("FOUND PFS %s CLINDEX %d\n", pfs->name, i); 738 hammer2_pfsdealloc(pmp, i, 1); 739 740 lockmgr(&hammer2_mntlk, LK_RELEASE); 741 742 /* 743 * Now destroy the PFS under its device using the per-device 744 * super-root. 745 */ 746 spmp = hmp->spmp; 747 dip = spmp->iroot; 748 hammer2_trans_init(spmp, 0); 749 hammer2_inode_lock(dip, 0); 750 751 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 752 hammer2_xop_setname(&xop->head, pfs->name, strlen(pfs->name)); 753 xop->isdir = 2; 754 xop->dopermanent = H2DOPERM_PERMANENT | H2DOPERM_FORCE; 755 hammer2_xop_start(&xop->head, &hammer2_unlink_desc); 756 757 error = hammer2_xop_collect(&xop->head, 0); 758 759 hammer2_inode_unlock(dip); 760 761 #if 0 762 if (error == 0) { 763 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 764 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 765 if (ip) { 766 hammer2_inode_unlink_finisher(ip, 0); 767 hammer2_inode_unlock(ip); 768 } 769 } else { 770 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 771 } 772 #endif 773 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 774 775 hammer2_trans_done(spmp, HAMMER2_TRANS_SIDEQ); 776 777 return (hammer2_error_to_errno(error)); 778 } 779 780 static int 781 hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data) 782 { 783 hammer2_ioc_pfs_t *pfs = data; 784 hammer2_dev_t *hmp; 785 hammer2_pfs_t *pmp; 786 hammer2_chain_t *chain; 787 hammer2_inode_t *nip; 788 hammer2_tid_t mtid; 789 size_t name_len; 790 hammer2_key_t lhc; 791 int error; 792 #if 0 793 uuid_t opfs_clid; 794 #endif 795 796 if (pfs->name[0] == 0) 797 return(EINVAL); 798 if (pfs->name[sizeof(pfs->name)-1] != 0) 799 return(EINVAL); 800 801 pmp = ip->pmp; 802 ip = pmp->iroot; 803 804 hmp = pmp->pfs_hmps[0]; 805 if (hmp == NULL) 806 return (EINVAL); 807 808 lockmgr(&hmp->bulklk, LK_EXCLUSIVE); 809 810 /* 811 * NOSYNC is for debugging. We skip the filesystem sync and use 812 * a normal transaction (which is less likely to stall). used for 813 * testing filesystem consistency. 814 * 815 * In normal mode we sync the filesystem and use a flush transaction. 816 */ 817 if (pfs->pfs_flags & HAMMER2_PFSFLAGS_NOSYNC) { 818 hammer2_trans_init(pmp, 0); 819 } else { 820 hammer2_vfs_sync(pmp->mp, MNT_WAIT); 821 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 822 } 823 mtid = hammer2_trans_sub(pmp); 824 hammer2_inode_lock(ip, 0); 825 hammer2_inode_modify(ip); 826 ip->meta.pfs_lsnap_tid = mtid; 827 828 /* XXX cluster it! */ 829 chain = hammer2_inode_chain(ip, 0, HAMMER2_RESOLVE_ALWAYS); 830 831 name_len = strlen(pfs->name); 832 lhc = hammer2_dirhash(pfs->name, name_len); 833 834 /* 835 * Get the clid 836 */ 837 hmp = chain->hmp; 838 839 /* 840 * Create the snapshot directory under the super-root 841 * 842 * Set PFS type, generate a unique filesystem id, and generate 843 * a cluster id. Use the same clid when snapshotting a PFS root, 844 * which theoretically allows the snapshot to be used as part of 845 * the same cluster (perhaps as a cache). 846 * 847 * Note that pfs_lsnap_tid must be set in the snapshot as well, 848 * ensuring that any nocrc/nocomp file data modifications force 849 * a copy-on-write. 850 * 851 * Copy the (flushed) blockref array. Theoretically we could use 852 * chain_duplicate() but it becomes difficult to disentangle 853 * the shared core so for now just brute-force it. 854 */ 855 hammer2_chain_unlock(chain); 856 nip = hammer2_inode_create_pfs(hmp->spmp, pfs->name, name_len, &error); 857 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS); 858 859 if (nip) { 860 hammer2_dev_t *force_local; 861 hammer2_chain_t *nchain; 862 hammer2_inode_data_t *wipdata; 863 hammer2_tid_t starting_inum; 864 865 atomic_set_int(&nip->flags, HAMMER2_INODE_NOSIDEQ); 866 hammer2_inode_modify(nip); 867 nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS); 868 error = hammer2_chain_modify(nchain, mtid, 0, 0); 869 KKASSERT(error == 0); 870 wipdata = &nchain->data->ipdata; 871 872 starting_inum = ip->pmp->inode_tid + 1; 873 nip->meta.pfs_inum = starting_inum; 874 nip->meta.pfs_type = HAMMER2_PFSTYPE_MASTER; 875 nip->meta.pfs_subtype = HAMMER2_PFSSUBTYPE_SNAPSHOT; 876 nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT; 877 nip->meta.pfs_lsnap_tid = mtid; 878 nchain->bref.embed.stats = chain->bref.embed.stats; 879 880 kern_uuidgen(&nip->meta.pfs_fsid, 1); 881 882 #if 0 883 /* 884 * Give the snapshot its own private cluster id. As a 885 * snapshot no further synchronization with the original 886 * cluster will be done. 887 */ 888 if (chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) 889 nip->meta.pfs_clid = opfs_clid; 890 else 891 kern_uuidgen(&nip->meta.pfs_clid, 1); 892 #endif 893 kern_uuidgen(&nip->meta.pfs_clid, 1); 894 nchain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT; 895 896 /* XXX hack blockset copy */ 897 /* XXX doesn't work with real cluster */ 898 wipdata->meta = nip->meta; 899 hammer2_spin_ex(&pmp->inum_spin); 900 wipdata->u.blockset = pmp->pfs_iroot_blocksets[0]; 901 hammer2_spin_unex(&pmp->inum_spin); 902 903 KKASSERT(wipdata == &nchain->data->ipdata); 904 905 hammer2_chain_unlock(nchain); 906 hammer2_inode_ref(nip); 907 hammer2_inode_unlock(nip); 908 hammer2_inode_chain_sync(nip); 909 hammer2_inode_chain_flush(nip, HAMMER2_XOP_INODE_STOP | 910 HAMMER2_XOP_FSSYNC); 911 /* XXX | HAMMER2_XOP_VOLHDR */ 912 hammer2_inode_drop(nip); 913 /* nip is dead */ 914 915 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 916 917 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS); 918 wipdata = &nchain->data->ipdata; 919 kprintf("SNAPSHOT LOCAL PFS (IOCTL): %s\n", wipdata->filename); 920 hammer2_pfsalloc(nchain, wipdata, nchain->bref.modify_tid, 921 force_local); 922 nchain->pmp->inode_tid = starting_inum; 923 924 hammer2_chain_unlock(nchain); 925 hammer2_chain_drop(nchain); 926 } 927 928 hammer2_chain_unlock(chain); 929 hammer2_chain_drop(chain); 930 931 hammer2_inode_unlock(ip); 932 if (pfs->pfs_flags & HAMMER2_PFSFLAGS_NOSYNC) { 933 hammer2_trans_done(pmp, 0); 934 } else { 935 hammer2_trans_done(pmp, HAMMER2_TRANS_ISFLUSH | 936 HAMMER2_TRANS_SIDEQ); 937 } 938 939 lockmgr(&hmp->bulklk, LK_RELEASE); 940 941 return (hammer2_error_to_errno(error)); 942 } 943 944 /* 945 * Retrieve the raw inode structure, non-inclusive of node-specific data. 946 */ 947 static int 948 hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data) 949 { 950 hammer2_ioc_inode_t *ino; 951 hammer2_chain_t *chain; 952 int error; 953 int i; 954 955 ino = data; 956 error = 0; 957 958 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 959 ino->data_count = 0; 960 ino->inode_count = 0; 961 for (i = 0; i < ip->cluster.nchains; ++i) { 962 if ((chain = ip->cluster.array[i].chain) != NULL) { 963 if (ino->data_count < 964 chain->bref.embed.stats.data_count) { 965 ino->data_count = 966 chain->bref.embed.stats.data_count; 967 } 968 if (ino->inode_count < 969 chain->bref.embed.stats.inode_count) { 970 ino->inode_count = 971 chain->bref.embed.stats.inode_count; 972 } 973 } 974 } 975 bzero(&ino->ip_data, sizeof(ino->ip_data)); 976 ino->ip_data.meta = ip->meta; 977 ino->kdata = ip; 978 hammer2_inode_unlock(ip); 979 980 return hammer2_error_to_errno(error); 981 } 982 983 /* 984 * Set various parameters in an inode which cannot be set through 985 * normal filesystem VNOPS. 986 */ 987 static int 988 hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data) 989 { 990 hammer2_ioc_inode_t *ino = data; 991 int error = 0; 992 993 hammer2_trans_init(ip->pmp, 0); 994 hammer2_inode_lock(ip, 0); 995 996 if ((ino->flags & HAMMER2IOC_INODE_FLAG_CHECK) && 997 ip->meta.check_algo != ino->ip_data.meta.check_algo) { 998 hammer2_inode_modify(ip); 999 ip->meta.check_algo = ino->ip_data.meta.check_algo; 1000 } 1001 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COMP) && 1002 ip->meta.comp_algo != ino->ip_data.meta.comp_algo) { 1003 hammer2_inode_modify(ip); 1004 ip->meta.comp_algo = ino->ip_data.meta.comp_algo; 1005 } 1006 ino->kdata = ip; 1007 1008 /* Ignore these flags for now...*/ 1009 if ((ino->flags & HAMMER2IOC_INODE_FLAG_IQUOTA) && 1010 ip->meta.inode_quota != ino->ip_data.meta.inode_quota) { 1011 hammer2_inode_modify(ip); 1012 ip->meta.inode_quota = ino->ip_data.meta.inode_quota; 1013 } 1014 if ((ino->flags & HAMMER2IOC_INODE_FLAG_DQUOTA) && 1015 ip->meta.data_quota != ino->ip_data.meta.data_quota) { 1016 hammer2_inode_modify(ip); 1017 ip->meta.data_quota = ino->ip_data.meta.data_quota; 1018 } 1019 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COPIES) && 1020 ip->meta.ncopies != ino->ip_data.meta.ncopies) { 1021 hammer2_inode_modify(ip); 1022 ip->meta.ncopies = ino->ip_data.meta.ncopies; 1023 } 1024 hammer2_inode_unlock(ip); 1025 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 1026 1027 return (hammer2_error_to_errno(error)); 1028 } 1029 1030 static 1031 int 1032 hammer2_ioctl_debug_dump(hammer2_inode_t *ip, u_int flags) 1033 { 1034 hammer2_chain_t *chain; 1035 int count = 100000; 1036 int i; 1037 1038 for (i = 0; i < ip->cluster.nchains; ++i) { 1039 chain = ip->cluster.array[i].chain; 1040 if (chain == NULL) 1041 continue; 1042 hammer2_dump_chain(chain, 0, &count, 'i', flags); 1043 } 1044 return 0; 1045 } 1046 1047 /* 1048 * Turn on or off emergency mode on a filesystem. 1049 */ 1050 static 1051 int 1052 hammer2_ioctl_emerg_mode(hammer2_inode_t *ip, u_int mode) 1053 { 1054 hammer2_pfs_t *pmp; 1055 hammer2_dev_t *hmp; 1056 int i; 1057 1058 pmp = ip->pmp; 1059 if (mode) { 1060 kprintf("hammer2: WARNING: Emergency mode enabled\n"); 1061 atomic_set_int(&pmp->flags, HAMMER2_PMPF_EMERG); 1062 } else { 1063 kprintf("hammer2: WARNING: Emergency mode disabled\n"); 1064 atomic_clear_int(&pmp->flags, HAMMER2_PMPF_EMERG); 1065 } 1066 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 1067 hmp = pmp->pfs_hmps[i]; 1068 if (hmp == NULL) 1069 continue; 1070 if (mode) 1071 atomic_set_int(&hmp->hflags, HMNT2_EMERG); 1072 else 1073 atomic_clear_int(&hmp->hflags, HMNT2_EMERG); 1074 } 1075 return 0; 1076 } 1077 1078 /* 1079 * Executes one flush/free pass per call. If trying to recover 1080 * data we just freed up a moment ago it can take up to six passes 1081 * to fully free the blocks. Note that passes occur automatically based 1082 * on free space as the storage fills up, but manual passes may be needed 1083 * if storage becomes almost completely full. 1084 */ 1085 static 1086 int 1087 hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data) 1088 { 1089 hammer2_ioc_bulkfree_t *bfi = data; 1090 hammer2_dev_t *hmp; 1091 hammer2_pfs_t *pmp; 1092 hammer2_chain_t *vchain; 1093 int error; 1094 int didsnap; 1095 1096 pmp = ip->pmp; 1097 ip = pmp->iroot; 1098 1099 hmp = pmp->pfs_hmps[0]; 1100 if (hmp == NULL) 1101 return (EINVAL); 1102 if (bfi == NULL) 1103 return (EINVAL); 1104 1105 /* 1106 * Bulkfree has to be serialized to guarantee at least one sync 1107 * inbetween bulkfrees. 1108 */ 1109 error = lockmgr(&hmp->bflock, LK_EXCLUSIVE | LK_PCATCH); 1110 if (error) 1111 return error; 1112 1113 /* 1114 * sync the filesystem and obtain a snapshot of the synchronized 1115 * hmp volume header. We treat the snapshot as an independent 1116 * entity. 1117 * 1118 * If ENOSPC occurs we should continue, because bulkfree is the only 1119 * way to fix that. The flush will have flushed everything it could 1120 * and not left any modified chains. Otherwise an error is fatal. 1121 */ 1122 error = hammer2_vfs_sync(pmp->mp, MNT_WAIT); 1123 if (error && error != ENOSPC) 1124 goto failed; 1125 1126 /* 1127 * If we have an ENOSPC error we have to bulkfree on the live 1128 * topology. Otherwise we can bulkfree on a snapshot. 1129 */ 1130 if (error) { 1131 kprintf("hammer2: WARNING! Bulkfree forced to use live " 1132 "topology\n"); 1133 vchain = &hmp->vchain; 1134 hammer2_chain_ref(vchain); 1135 didsnap = 0; 1136 } else { 1137 vchain = hammer2_chain_bulksnap(hmp); 1138 didsnap = 1; 1139 } 1140 1141 /* 1142 * Bulkfree on a snapshot does not need a transaction, which allows 1143 * it to run concurrently with any operation other than another 1144 * bulkfree. 1145 * 1146 * If we are running bulkfree on the live topology we have to be 1147 * in a FLUSH transaction. 1148 */ 1149 if (didsnap == 0) 1150 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 1151 1152 if (bfi) { 1153 hammer2_thr_freeze(&hmp->bfthr); 1154 error = hammer2_bulkfree_pass(hmp, vchain, bfi); 1155 hammer2_thr_unfreeze(&hmp->bfthr); 1156 } 1157 if (didsnap) { 1158 hammer2_chain_bulkdrop(vchain); 1159 } else { 1160 hammer2_chain_drop(vchain); 1161 hammer2_trans_done(pmp, HAMMER2_TRANS_ISFLUSH | 1162 HAMMER2_TRANS_SIDEQ); 1163 } 1164 error = hammer2_error_to_errno(error); 1165 1166 failed: 1167 lockmgr(&hmp->bflock, LK_RELEASE); 1168 return error; 1169 } 1170 1171 /* 1172 * Unconditionally delete meta-data in a hammer2 filesystem 1173 */ 1174 static 1175 int 1176 hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data) 1177 { 1178 hammer2_ioc_destroy_t *iocd = data; 1179 hammer2_pfs_t *pmp = ip->pmp; 1180 int error; 1181 1182 if (pmp->ronly) { 1183 error = EROFS; 1184 return error; 1185 } 1186 1187 switch(iocd->cmd) { 1188 case HAMMER2_DELETE_FILE: 1189 /* 1190 * Destroy a bad directory entry by name. Caller must 1191 * pass the directory as fd. 1192 */ 1193 { 1194 hammer2_xop_unlink_t *xop; 1195 1196 if (iocd->path[sizeof(iocd->path)-1]) { 1197 error = EINVAL; 1198 break; 1199 } 1200 if (ip->meta.type != HAMMER2_OBJTYPE_DIRECTORY) { 1201 error = EINVAL; 1202 break; 1203 } 1204 hammer2_pfs_memory_wait(pmp); 1205 hammer2_trans_init(pmp, 0); 1206 hammer2_inode_lock(ip, 0); 1207 1208 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1209 hammer2_xop_setname(&xop->head, iocd->path, strlen(iocd->path)); 1210 xop->isdir = -1; 1211 xop->dopermanent = H2DOPERM_PERMANENT | 1212 H2DOPERM_FORCE | 1213 H2DOPERM_IGNINO; 1214 hammer2_xop_start(&xop->head, &hammer2_unlink_desc); 1215 1216 error = hammer2_xop_collect(&xop->head, 0); 1217 error = hammer2_error_to_errno(error); 1218 hammer2_inode_unlock(ip); 1219 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1220 hammer2_trans_done(pmp, HAMMER2_TRANS_SIDEQ); 1221 } 1222 break; 1223 case HAMMER2_DELETE_INUM: 1224 /* 1225 * Destroy a bad inode by inode number. 1226 */ 1227 { 1228 hammer2_xop_lookup_t *xop; 1229 1230 if (iocd->inum < 1) { 1231 error = EINVAL; 1232 break; 1233 } 1234 hammer2_pfs_memory_wait(pmp); 1235 hammer2_trans_init(pmp, 0); 1236 1237 xop = hammer2_xop_alloc(pmp->iroot, HAMMER2_XOP_MODIFYING); 1238 xop->lhc = iocd->inum; 1239 hammer2_xop_start(&xop->head, &hammer2_delete_desc); 1240 error = hammer2_xop_collect(&xop->head, 0); 1241 error = hammer2_error_to_errno(error); 1242 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1243 hammer2_trans_done(pmp, HAMMER2_TRANS_SIDEQ); 1244 } 1245 break; 1246 default: 1247 error = EINVAL; 1248 break; 1249 } 1250 return error; 1251 } 1252