1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 /* 36 * Ioctl Functions. 37 * 38 * WARNING! The ioctl functions which manipulate the connection state need 39 * to be able to run without deadlock on the volume's chain lock. 40 * Most of these functions use a separate lock. 41 */ 42 43 #include "hammer2.h" 44 45 static int hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data); 46 static int hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data); 47 static int hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data); 48 static int hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data); 49 static int hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data); 50 static int hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data); 51 static int hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data); 52 static int hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data); 53 static int hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data); 54 static int hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data); 55 static int hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data); 56 static int hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data); 57 static int hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data); 58 static int hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data); 59 static int hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data); 60 static int hammer2_ioctl_debug_dump(hammer2_inode_t *ip); 61 //static int hammer2_ioctl_inode_comp_set(hammer2_inode_t *ip, void *data); 62 //static int hammer2_ioctl_inode_comp_rec_set(hammer2_inode_t *ip, void *data); 63 //static int hammer2_ioctl_inode_comp_rec_set2(hammer2_inode_t *ip, void *data); 64 static int hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data); 65 static int hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data); 66 67 int 68 hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data, int fflag, 69 struct ucred *cred) 70 { 71 int error; 72 73 /* 74 * Standard root cred checks, will be selectively ignored below 75 * for ioctls that do not require root creds. 76 */ 77 error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0); 78 79 switch(com) { 80 case HAMMER2IOC_VERSION_GET: 81 error = hammer2_ioctl_version_get(ip, data); 82 break; 83 case HAMMER2IOC_RECLUSTER: 84 if (error == 0) 85 error = hammer2_ioctl_recluster(ip, data); 86 break; 87 case HAMMER2IOC_REMOTE_SCAN: 88 if (error == 0) 89 error = hammer2_ioctl_remote_scan(ip, data); 90 break; 91 case HAMMER2IOC_REMOTE_ADD: 92 if (error == 0) 93 error = hammer2_ioctl_remote_add(ip, data); 94 break; 95 case HAMMER2IOC_REMOTE_DEL: 96 if (error == 0) 97 error = hammer2_ioctl_remote_del(ip, data); 98 break; 99 case HAMMER2IOC_REMOTE_REP: 100 if (error == 0) 101 error = hammer2_ioctl_remote_rep(ip, data); 102 break; 103 case HAMMER2IOC_SOCKET_GET: 104 if (error == 0) 105 error = hammer2_ioctl_socket_get(ip, data); 106 break; 107 case HAMMER2IOC_SOCKET_SET: 108 if (error == 0) 109 error = hammer2_ioctl_socket_set(ip, data); 110 break; 111 case HAMMER2IOC_PFS_GET: 112 if (error == 0) 113 error = hammer2_ioctl_pfs_get(ip, data); 114 break; 115 case HAMMER2IOC_PFS_LOOKUP: 116 if (error == 0) 117 error = hammer2_ioctl_pfs_lookup(ip, data); 118 break; 119 case HAMMER2IOC_PFS_CREATE: 120 if (error == 0) 121 error = hammer2_ioctl_pfs_create(ip, data); 122 break; 123 case HAMMER2IOC_PFS_DELETE: 124 if (error == 0) 125 error = hammer2_ioctl_pfs_delete(ip, data); 126 break; 127 case HAMMER2IOC_PFS_SNAPSHOT: 128 if (error == 0) 129 error = hammer2_ioctl_pfs_snapshot(ip, data); 130 break; 131 case HAMMER2IOC_INODE_GET: 132 error = hammer2_ioctl_inode_get(ip, data); 133 break; 134 case HAMMER2IOC_INODE_SET: 135 if (error == 0) 136 error = hammer2_ioctl_inode_set(ip, data); 137 break; 138 case HAMMER2IOC_BULKFREE_SCAN: 139 error = hammer2_ioctl_bulkfree_scan(ip, data); 140 break; 141 case HAMMER2IOC_BULKFREE_ASYNC: 142 error = hammer2_ioctl_bulkfree_scan(ip, NULL); 143 break; 144 /*case HAMMER2IOC_INODE_COMP_SET: 145 error = hammer2_ioctl_inode_comp_set(ip, data); 146 break; 147 case HAMMER2IOC_INODE_COMP_REC_SET: 148 error = hammer2_ioctl_inode_comp_rec_set(ip, data); 149 break; 150 case HAMMER2IOC_INODE_COMP_REC_SET2: 151 error = hammer2_ioctl_inode_comp_rec_set2(ip, data); 152 break;*/ 153 case HAMMER2IOC_DESTROY: 154 if (error == 0) 155 error = hammer2_ioctl_destroy(ip, data); 156 break; 157 case HAMMER2IOC_DEBUG_DUMP: 158 error = hammer2_ioctl_debug_dump(ip); 159 break; 160 default: 161 error = EOPNOTSUPP; 162 break; 163 } 164 return (error); 165 } 166 167 /* 168 * Retrieve version and basic info 169 */ 170 static int 171 hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data) 172 { 173 hammer2_ioc_version_t *version = data; 174 hammer2_dev_t *hmp; 175 176 hmp = ip->pmp->pfs_hmps[0]; 177 if (hmp) 178 version->version = hmp->voldata.version; 179 else 180 version->version = -1; 181 return 0; 182 } 183 184 static int 185 hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data) 186 { 187 hammer2_ioc_recluster_t *recl = data; 188 struct vnode *vproot; 189 struct file *fp; 190 hammer2_cluster_t *cluster; 191 int error; 192 193 fp = holdfp(curproc->p_fd, recl->fd, -1); 194 if (fp) { 195 error = VFS_ROOT(ip->pmp->mp, &vproot); 196 if (error == 0) { 197 cluster = &ip->pmp->iroot->cluster; 198 kprintf("reconnect to cluster: nc=%d focus=%p\n", 199 cluster->nchains, cluster->focus); 200 if (cluster->nchains != 1 || cluster->focus == NULL) { 201 kprintf("not a local device mount\n"); 202 error = EINVAL; 203 } else { 204 hammer2_cluster_reconnect(cluster->focus->hmp, 205 fp); 206 kprintf("ok\n"); 207 error = 0; 208 } 209 vput(vproot); 210 } 211 } else { 212 error = EINVAL; 213 } 214 return error; 215 } 216 217 /* 218 * Retrieve information about a remote 219 */ 220 static int 221 hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data) 222 { 223 hammer2_dev_t *hmp; 224 hammer2_ioc_remote_t *remote = data; 225 int copyid = remote->copyid; 226 227 hmp = ip->pmp->pfs_hmps[0]; 228 if (hmp == NULL) 229 return (EINVAL); 230 231 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT) 232 return (EINVAL); 233 234 hammer2_voldata_lock(hmp); 235 remote->copy1 = hmp->voldata.copyinfo[copyid]; 236 hammer2_voldata_unlock(hmp); 237 238 /* 239 * Adjust nextid (GET only) 240 */ 241 while (++copyid < HAMMER2_COPYID_COUNT && 242 hmp->voldata.copyinfo[copyid].copyid == 0) { 243 ; 244 } 245 if (copyid == HAMMER2_COPYID_COUNT) 246 remote->nextid = -1; 247 else 248 remote->nextid = copyid; 249 250 return(0); 251 } 252 253 /* 254 * Add new remote entry 255 */ 256 static int 257 hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data) 258 { 259 hammer2_ioc_remote_t *remote = data; 260 hammer2_pfs_t *pmp = ip->pmp; 261 hammer2_dev_t *hmp; 262 int copyid = remote->copyid; 263 int error = 0; 264 265 hmp = pmp->pfs_hmps[0]; 266 if (hmp == NULL) 267 return (EINVAL); 268 if (copyid >= HAMMER2_COPYID_COUNT) 269 return (EINVAL); 270 271 hammer2_voldata_lock(hmp); 272 if (copyid < 0) { 273 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) { 274 if (hmp->voldata.copyinfo[copyid].copyid == 0) 275 break; 276 } 277 if (copyid == HAMMER2_COPYID_COUNT) { 278 error = ENOSPC; 279 goto failed; 280 } 281 } 282 hammer2_voldata_modify(hmp); 283 remote->copy1.copyid = copyid; 284 hmp->voldata.copyinfo[copyid] = remote->copy1; 285 hammer2_volconf_update(hmp, copyid); 286 failed: 287 hammer2_voldata_unlock(hmp); 288 return (error); 289 } 290 291 /* 292 * Delete existing remote entry 293 */ 294 static int 295 hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data) 296 { 297 hammer2_ioc_remote_t *remote = data; 298 hammer2_pfs_t *pmp = ip->pmp; 299 hammer2_dev_t *hmp; 300 int copyid = remote->copyid; 301 int error = 0; 302 303 hmp = pmp->pfs_hmps[0]; 304 if (hmp == NULL) 305 return (EINVAL); 306 if (copyid >= HAMMER2_COPYID_COUNT) 307 return (EINVAL); 308 remote->copy1.path[sizeof(remote->copy1.path) - 1] = 0; 309 hammer2_voldata_lock(hmp); 310 if (copyid < 0) { 311 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) { 312 if (hmp->voldata.copyinfo[copyid].copyid == 0) 313 continue; 314 if (strcmp(remote->copy1.path, 315 hmp->voldata.copyinfo[copyid].path) == 0) { 316 break; 317 } 318 } 319 if (copyid == HAMMER2_COPYID_COUNT) { 320 error = ENOENT; 321 goto failed; 322 } 323 } 324 hammer2_voldata_modify(hmp); 325 hmp->voldata.copyinfo[copyid].copyid = 0; 326 hammer2_volconf_update(hmp, copyid); 327 failed: 328 hammer2_voldata_unlock(hmp); 329 return (error); 330 } 331 332 /* 333 * Replace existing remote entry 334 */ 335 static int 336 hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data) 337 { 338 hammer2_ioc_remote_t *remote = data; 339 hammer2_dev_t *hmp; 340 int copyid = remote->copyid; 341 342 hmp = ip->pmp->pfs_hmps[0]; 343 if (hmp == NULL) 344 return (EINVAL); 345 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT) 346 return (EINVAL); 347 348 hammer2_voldata_lock(hmp); 349 hammer2_voldata_modify(hmp); 350 /*hammer2_volconf_update(hmp, copyid);*/ 351 hammer2_voldata_unlock(hmp); 352 353 return(0); 354 } 355 356 /* 357 * Retrieve communications socket 358 */ 359 static int 360 hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data) 361 { 362 return (EOPNOTSUPP); 363 } 364 365 /* 366 * Set communications socket for connection 367 */ 368 static int 369 hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data) 370 { 371 hammer2_ioc_remote_t *remote = data; 372 hammer2_dev_t *hmp; 373 int copyid = remote->copyid; 374 375 hmp = ip->pmp->pfs_hmps[0]; 376 if (hmp == NULL) 377 return (EINVAL); 378 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT) 379 return (EINVAL); 380 381 hammer2_voldata_lock(hmp); 382 hammer2_voldata_unlock(hmp); 383 384 return(0); 385 } 386 387 /* 388 * Used to scan and retrieve PFS information. PFS's are directories under 389 * the super-root. 390 * 391 * To scan PFSs pass name_key=0. The function will scan for the next 392 * PFS and set all fields, as well as set name_next to the next key. 393 * When no PFSs remain, name_next is set to (hammer2_key_t)-1. 394 * 395 * To retrieve a particular PFS by key, specify the key but note that 396 * the ioctl will return the lowest key >= specified_key, so the caller 397 * must verify the key. 398 * 399 * To retrieve the PFS associated with the file descriptor, pass 400 * name_key set to (hammer2_key_t)-1. 401 */ 402 static int 403 hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data) 404 { 405 const hammer2_inode_data_t *ripdata; 406 hammer2_dev_t *hmp; 407 hammer2_ioc_pfs_t *pfs; 408 hammer2_chain_t *parent; 409 hammer2_chain_t *chain; 410 hammer2_key_t key_next; 411 hammer2_key_t save_key; 412 int error; 413 414 hmp = ip->pmp->pfs_hmps[0]; 415 if (hmp == NULL) 416 return (EINVAL); 417 418 pfs = data; 419 save_key = pfs->name_key; 420 error = 0; 421 422 /* 423 * Setup 424 */ 425 if (save_key == (hammer2_key_t)-1) { 426 hammer2_inode_lock(ip->pmp->iroot, 0); 427 parent = NULL; 428 chain = hammer2_inode_chain(ip->pmp->iroot, 0, 429 HAMMER2_RESOLVE_ALWAYS | 430 HAMMER2_RESOLVE_SHARED); 431 } else { 432 hammer2_inode_lock(hmp->spmp->iroot, 0); 433 parent = hammer2_inode_chain(hmp->spmp->iroot, 0, 434 HAMMER2_RESOLVE_ALWAYS | 435 HAMMER2_RESOLVE_SHARED); 436 chain = hammer2_chain_lookup(&parent, &key_next, 437 pfs->name_key, HAMMER2_KEY_MAX, 438 &error, 439 HAMMER2_LOOKUP_SHARED); 440 } 441 442 /* 443 * Locate next PFS 444 */ 445 while (chain) { 446 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) 447 break; 448 if (parent == NULL) { 449 hammer2_chain_unlock(chain); 450 hammer2_chain_drop(chain); 451 chain = NULL; 452 break; 453 } 454 chain = hammer2_chain_next(&parent, chain, &key_next, 455 key_next, HAMMER2_KEY_MAX, 456 &error, 457 HAMMER2_LOOKUP_SHARED); 458 } 459 error = hammer2_error_to_errno(error); 460 461 /* 462 * Load the data being returned by the ioctl. 463 */ 464 if (chain && chain->error == 0) { 465 ripdata = &chain->data->ipdata; 466 pfs->name_key = ripdata->meta.name_key; 467 pfs->pfs_type = ripdata->meta.pfs_type; 468 pfs->pfs_subtype = ripdata->meta.pfs_subtype; 469 pfs->pfs_clid = ripdata->meta.pfs_clid; 470 pfs->pfs_fsid = ripdata->meta.pfs_fsid; 471 KKASSERT(ripdata->meta.name_len < sizeof(pfs->name)); 472 bcopy(ripdata->filename, pfs->name, ripdata->meta.name_len); 473 pfs->name[ripdata->meta.name_len] = 0; 474 ripdata = NULL; /* safety */ 475 476 /* 477 * Calculate name_next, if any. We are only accessing 478 * chain->bref so we can ignore chain->error (if the key 479 * is used later it will error then). 480 */ 481 if (parent == NULL) { 482 pfs->name_next = (hammer2_key_t)-1; 483 } else { 484 chain = hammer2_chain_next(&parent, chain, &key_next, 485 key_next, HAMMER2_KEY_MAX, 486 &error, 487 HAMMER2_LOOKUP_SHARED); 488 if (chain) 489 pfs->name_next = chain->bref.key; 490 else 491 pfs->name_next = (hammer2_key_t)-1; 492 } 493 } else { 494 pfs->name_next = (hammer2_key_t)-1; 495 error = ENOENT; 496 } 497 498 /* 499 * Cleanup 500 */ 501 if (chain) { 502 hammer2_chain_unlock(chain); 503 hammer2_chain_drop(chain); 504 } 505 if (parent) { 506 hammer2_chain_unlock(parent); 507 hammer2_chain_drop(parent); 508 } 509 if (save_key == (hammer2_key_t)-1) { 510 hammer2_inode_unlock(ip->pmp->iroot); 511 } else { 512 hammer2_inode_unlock(hmp->spmp->iroot); 513 } 514 515 return (error); 516 } 517 518 /* 519 * Find a specific PFS by name 520 */ 521 static int 522 hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data) 523 { 524 const hammer2_inode_data_t *ripdata; 525 hammer2_dev_t *hmp; 526 hammer2_ioc_pfs_t *pfs; 527 hammer2_chain_t *parent; 528 hammer2_chain_t *chain; 529 hammer2_key_t key_next; 530 hammer2_key_t lhc; 531 int error; 532 size_t len; 533 534 hmp = ip->pmp->pfs_hmps[0]; 535 if (hmp == NULL) 536 return (EINVAL); 537 538 pfs = data; 539 error = 0; 540 541 hammer2_inode_lock(hmp->spmp->iroot, HAMMER2_RESOLVE_SHARED); 542 parent = hammer2_inode_chain(hmp->spmp->iroot, 0, 543 HAMMER2_RESOLVE_ALWAYS | 544 HAMMER2_RESOLVE_SHARED); 545 546 pfs->name[sizeof(pfs->name) - 1] = 0; 547 len = strlen(pfs->name); 548 lhc = hammer2_dirhash(pfs->name, len); 549 550 chain = hammer2_chain_lookup(&parent, &key_next, 551 lhc, lhc + HAMMER2_DIRHASH_LOMASK, 552 &error, HAMMER2_LOOKUP_SHARED); 553 while (chain) { 554 if (hammer2_chain_dirent_test(chain, pfs->name, len)) 555 break; 556 chain = hammer2_chain_next(&parent, chain, &key_next, 557 key_next, 558 lhc + HAMMER2_DIRHASH_LOMASK, 559 &error, HAMMER2_LOOKUP_SHARED); 560 } 561 error = hammer2_error_to_errno(error); 562 563 /* 564 * Load the data being returned by the ioctl. 565 */ 566 if (chain && chain->error == 0) { 567 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INODE); 568 ripdata = &chain->data->ipdata; 569 pfs->name_key = ripdata->meta.name_key; 570 pfs->pfs_type = ripdata->meta.pfs_type; 571 pfs->pfs_subtype = ripdata->meta.pfs_subtype; 572 pfs->pfs_clid = ripdata->meta.pfs_clid; 573 pfs->pfs_fsid = ripdata->meta.pfs_fsid; 574 ripdata = NULL; 575 576 hammer2_chain_unlock(chain); 577 hammer2_chain_drop(chain); 578 } else if (error == 0) { 579 error = ENOENT; 580 } 581 if (parent) { 582 hammer2_chain_unlock(parent); 583 hammer2_chain_drop(parent); 584 } 585 hammer2_inode_unlock(hmp->spmp->iroot); 586 587 return (error); 588 } 589 590 /* 591 * Create a new PFS under the super-root 592 */ 593 static int 594 hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data) 595 { 596 hammer2_inode_data_t *nipdata; 597 hammer2_chain_t *nchain; 598 hammer2_dev_t *hmp; 599 hammer2_dev_t *force_local; 600 hammer2_ioc_pfs_t *pfs; 601 hammer2_inode_t *nip; 602 hammer2_tid_t mtid; 603 int error; 604 605 hmp = ip->pmp->pfs_hmps[0]; /* XXX */ 606 if (hmp == NULL) 607 return (EINVAL); 608 609 pfs = data; 610 nip = NULL; 611 612 if (pfs->name[0] == 0) 613 return(EINVAL); 614 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure 0-termination */ 615 616 if (hammer2_ioctl_pfs_lookup(ip, pfs) == 0) 617 return(EEXIST); 618 619 hammer2_trans_init(hmp->spmp, 0); 620 mtid = hammer2_trans_sub(hmp->spmp); 621 nip = hammer2_inode_create(hmp->spmp->iroot, hmp->spmp->iroot, 622 NULL, NULL, 623 pfs->name, strlen(pfs->name), 0, 624 1, HAMMER2_OBJTYPE_DIRECTORY, 0, 625 HAMMER2_INSERT_PFSROOT, &error); 626 if (error == 0) { 627 nip->flags |= HAMMER2_INODE_NOSIDEQ; 628 hammer2_inode_modify(nip); 629 nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS); 630 error = hammer2_chain_modify(nchain, mtid, 0, 0); 631 KKASSERT(error == 0); 632 nipdata = &nchain->data->ipdata; 633 634 nip->meta.pfs_type = pfs->pfs_type; 635 nip->meta.pfs_subtype = pfs->pfs_subtype; 636 nip->meta.pfs_clid = pfs->pfs_clid; 637 nip->meta.pfs_fsid = pfs->pfs_fsid; 638 nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT; 639 640 /* 641 * Set default compression and check algorithm. This 642 * can be changed later. 643 * 644 * Do not allow compression on PFS's with the special name 645 * "boot", the boot loader can't decompress (yet). 646 */ 647 nip->meta.comp_algo = 648 HAMMER2_ENC_ALGO(HAMMER2_COMP_NEWFS_DEFAULT); 649 nip->meta.check_algo = 650 HAMMER2_ENC_ALGO( HAMMER2_CHECK_XXHASH64); 651 652 if (strcasecmp(pfs->name, "boot") == 0) { 653 nip->meta.comp_algo = 654 HAMMER2_ENC_ALGO(HAMMER2_COMP_AUTOZERO); 655 } 656 657 /* 658 * Super-root isn't mounted, fsync it 659 */ 660 hammer2_chain_unlock(nchain); 661 hammer2_inode_ref(nip); 662 hammer2_inode_unlock(nip); 663 hammer2_inode_chain_sync(nip); 664 KKASSERT(nip->refs == 1); 665 hammer2_inode_drop(nip); 666 667 /* 668 * We still have a ref on the chain, relock and associate 669 * with an appropriate PFS. 670 */ 671 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 672 673 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS); 674 nipdata = &nchain->data->ipdata; 675 kprintf("ADD LOCAL PFS (IOCTL): %s\n", nipdata->filename); 676 hammer2_pfsalloc(nchain, nipdata, 677 nchain->bref.modify_tid, force_local); 678 679 hammer2_chain_unlock(nchain); 680 hammer2_chain_drop(nchain); 681 682 } 683 hammer2_trans_done(hmp->spmp); 684 685 return (error); 686 } 687 688 /* 689 * Destroy an existing PFS under the super-root 690 */ 691 static int 692 hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data) 693 { 694 hammer2_ioc_pfs_t *pfs = data; 695 hammer2_dev_t *hmp; 696 hammer2_pfs_t *spmp; 697 hammer2_pfs_t *pmp; 698 hammer2_xop_unlink_t *xop; 699 hammer2_inode_t *dip; 700 hammer2_inode_t *iroot; 701 int error; 702 int i; 703 704 /* 705 * The PFS should be probed, so we should be able to 706 * locate it. We only delete the PFS from the 707 * specific H2 block device (hmp), not all of 708 * them. We must remove the PFS from the cluster 709 * before we can destroy it. 710 */ 711 hmp = ip->pmp->pfs_hmps[0]; 712 if (hmp == NULL) 713 return (EINVAL); 714 715 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure termination */ 716 717 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE); 718 719 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) { 720 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) { 721 if (pmp->pfs_hmps[i] != hmp) 722 continue; 723 if (pmp->pfs_names[i] && 724 strcmp(pmp->pfs_names[i], pfs->name) == 0) { 725 break; 726 } 727 } 728 if (i != HAMMER2_MAXCLUSTER) 729 break; 730 } 731 732 if (pmp == NULL) { 733 lockmgr(&hammer2_mntlk, LK_RELEASE); 734 return ENOENT; 735 } 736 737 /* 738 * Ok, we found the pmp and we have the index. Permanently remove 739 * the PFS from the cluster 740 */ 741 iroot = pmp->iroot; 742 kprintf("FOUND PFS %s CLINDEX %d\n", pfs->name, i); 743 hammer2_pfsdealloc(pmp, i, 1); 744 745 lockmgr(&hammer2_mntlk, LK_RELEASE); 746 747 /* 748 * Now destroy the PFS under its device using the per-device 749 * super-root. 750 */ 751 spmp = hmp->spmp; 752 dip = spmp->iroot; 753 hammer2_trans_init(spmp, 0); 754 hammer2_inode_lock(dip, 0); 755 756 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 757 hammer2_xop_setname(&xop->head, pfs->name, strlen(pfs->name)); 758 xop->isdir = 2; 759 xop->dopermanent = H2DOPERM_PERMANENT | H2DOPERM_FORCE; 760 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 761 762 error = hammer2_xop_collect(&xop->head, 0); 763 764 hammer2_inode_unlock(dip); 765 766 #if 0 767 if (error == 0) { 768 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 769 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 770 if (ip) { 771 hammer2_inode_unlink_finisher(ip, 0); 772 hammer2_inode_unlock(ip); 773 } 774 } else { 775 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 776 } 777 #endif 778 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 779 780 hammer2_trans_done(spmp); 781 782 return (hammer2_error_to_errno(error)); 783 } 784 785 static int 786 hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data) 787 { 788 const hammer2_inode_data_t *ripdata; 789 hammer2_ioc_pfs_t *pfs = data; 790 hammer2_dev_t *hmp; 791 hammer2_pfs_t *pmp; 792 hammer2_chain_t *chain; 793 hammer2_inode_t *nip; 794 hammer2_tid_t mtid; 795 size_t name_len; 796 hammer2_key_t lhc; 797 struct vattr vat; 798 int error; 799 #if 0 800 uuid_t opfs_clid; 801 #endif 802 803 if (pfs->name[0] == 0) 804 return(EINVAL); 805 if (pfs->name[sizeof(pfs->name)-1] != 0) 806 return(EINVAL); 807 808 pmp = ip->pmp; 809 ip = pmp->iroot; 810 811 hmp = pmp->pfs_hmps[0]; 812 if (hmp == NULL) 813 return (EINVAL); 814 815 lockmgr(&hmp->bulklk, LK_EXCLUSIVE); 816 817 hammer2_vfs_sync(pmp->mp, MNT_WAIT); 818 819 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 820 mtid = hammer2_trans_sub(pmp); 821 hammer2_inode_lock(ip, 0); 822 hammer2_inode_modify(ip); 823 ip->meta.pfs_lsnap_tid = mtid; 824 825 /* XXX cluster it! */ 826 chain = hammer2_inode_chain(ip, 0, HAMMER2_RESOLVE_ALWAYS); 827 828 name_len = strlen(pfs->name); 829 lhc = hammer2_dirhash(pfs->name, name_len); 830 831 /* 832 * Get the clid 833 */ 834 ripdata = &chain->data->ipdata; 835 #if 0 836 opfs_clid = ripdata->meta.pfs_clid; 837 #endif 838 hmp = chain->hmp; 839 840 /* 841 * Create the snapshot directory under the super-root 842 * 843 * Set PFS type, generate a unique filesystem id, and generate 844 * a cluster id. Use the same clid when snapshotting a PFS root, 845 * which theoretically allows the snapshot to be used as part of 846 * the same cluster (perhaps as a cache). 847 * 848 * Copy the (flushed) blockref array. Theoretically we could use 849 * chain_duplicate() but it becomes difficult to disentangle 850 * the shared core so for now just brute-force it. 851 */ 852 VATTR_NULL(&vat); 853 vat.va_type = VDIR; 854 vat.va_mode = 0755; 855 hammer2_chain_unlock(chain); 856 nip = hammer2_inode_create(hmp->spmp->iroot, hmp->spmp->iroot, 857 &vat, proc0.p_ucred, 858 pfs->name, name_len, 0, 859 1, 0, 0, 860 HAMMER2_INSERT_PFSROOT, &error); 861 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS); 862 ripdata = &chain->data->ipdata; 863 864 if (nip) { 865 hammer2_dev_t *force_local; 866 hammer2_chain_t *nchain; 867 hammer2_inode_data_t *wipdata; 868 hammer2_key_t starting_inum; 869 870 nip->flags |= HAMMER2_INODE_NOSIDEQ; 871 hammer2_inode_modify(nip); 872 nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS); 873 error = hammer2_chain_modify(nchain, mtid, 0, 0); 874 KKASSERT(error == 0); 875 wipdata = &nchain->data->ipdata; 876 877 starting_inum = ip->pmp->inode_tid + 1; 878 nip->meta.pfs_inum = starting_inum; 879 nip->meta.pfs_type = HAMMER2_PFSTYPE_MASTER; 880 nip->meta.pfs_subtype = HAMMER2_PFSSUBTYPE_SNAPSHOT; 881 nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT; 882 kern_uuidgen(&nip->meta.pfs_fsid, 1); 883 884 #if 0 885 /* 886 * Give the snapshot its own private cluster id. As a 887 * snapshot no further synchronization with the original 888 * cluster will be done. 889 */ 890 if (chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) 891 nip->meta.pfs_clid = opfs_clid; 892 else 893 kern_uuidgen(&nip->meta.pfs_clid, 1); 894 #endif 895 kern_uuidgen(&nip->meta.pfs_clid, 1); 896 nchain->bref.flags |= HAMMER2_BREF_FLAG_PFSROOT; 897 898 /* XXX hack blockset copy */ 899 /* XXX doesn't work with real cluster */ 900 wipdata->meta = nip->meta; 901 wipdata->u.blockset = ripdata->u.blockset; 902 903 KKASSERT(wipdata == &nchain->data->ipdata); 904 905 hammer2_chain_unlock(nchain); 906 hammer2_inode_ref(nip); 907 hammer2_inode_unlock(nip); 908 hammer2_inode_chain_sync(nip); 909 KKASSERT(nip->refs == 1); 910 hammer2_inode_drop(nip); 911 912 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL; 913 914 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS); 915 wipdata = &nchain->data->ipdata; 916 kprintf("SNAPSHOT LOCAL PFS (IOCTL): %s\n", wipdata->filename); 917 hammer2_pfsalloc(nchain, wipdata, nchain->bref.modify_tid, 918 force_local); 919 nchain->pmp->inode_tid = starting_inum; 920 921 hammer2_chain_unlock(nchain); 922 hammer2_chain_drop(nchain); 923 } 924 925 hammer2_chain_unlock(chain); 926 hammer2_chain_drop(chain); 927 928 hammer2_inode_unlock(ip); 929 hammer2_trans_done(pmp); 930 931 lockmgr(&hmp->bulklk, LK_RELEASE); 932 933 return (hammer2_error_to_errno(error)); 934 } 935 936 /* 937 * Retrieve the raw inode structure, non-inclusive of node-specific data. 938 */ 939 static int 940 hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data) 941 { 942 hammer2_ioc_inode_t *ino; 943 hammer2_chain_t *chain; 944 int error; 945 int i; 946 947 ino = data; 948 error = 0; 949 950 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 951 ino->data_count = 0; 952 ino->inode_count = 0; 953 for (i = 0; i < ip->cluster.nchains; ++i) { 954 if ((chain = ip->cluster.array[i].chain) != NULL) { 955 if (ino->data_count < 956 chain->bref.embed.stats.data_count) { 957 ino->data_count = 958 chain->bref.embed.stats.data_count; 959 } 960 if (ino->inode_count < 961 chain->bref.embed.stats.inode_count) { 962 ino->inode_count = 963 chain->bref.embed.stats.inode_count; 964 } 965 } 966 } 967 bzero(&ino->ip_data, sizeof(ino->ip_data)); 968 ino->ip_data.meta = ip->meta; 969 ino->kdata = ip; 970 hammer2_inode_unlock(ip); 971 972 return hammer2_error_to_errno(error); 973 } 974 975 /* 976 * Set various parameters in an inode which cannot be set through 977 * normal filesystem VNOPS. 978 */ 979 static int 980 hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data) 981 { 982 hammer2_ioc_inode_t *ino = data; 983 int error = 0; 984 985 hammer2_trans_init(ip->pmp, 0); 986 hammer2_inode_lock(ip, 0); 987 988 if ((ino->flags & HAMMER2IOC_INODE_FLAG_CHECK) && 989 ip->meta.check_algo != ino->ip_data.meta.check_algo) { 990 hammer2_inode_modify(ip); 991 ip->meta.check_algo = ino->ip_data.meta.check_algo; 992 } 993 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COMP) && 994 ip->meta.comp_algo != ino->ip_data.meta.comp_algo) { 995 hammer2_inode_modify(ip); 996 ip->meta.comp_algo = ino->ip_data.meta.comp_algo; 997 } 998 ino->kdata = ip; 999 1000 /* Ignore these flags for now...*/ 1001 if ((ino->flags & HAMMER2IOC_INODE_FLAG_IQUOTA) && 1002 ip->meta.inode_quota != ino->ip_data.meta.inode_quota) { 1003 hammer2_inode_modify(ip); 1004 ip->meta.inode_quota = ino->ip_data.meta.inode_quota; 1005 } 1006 if ((ino->flags & HAMMER2IOC_INODE_FLAG_DQUOTA) && 1007 ip->meta.data_quota != ino->ip_data.meta.data_quota) { 1008 hammer2_inode_modify(ip); 1009 ip->meta.data_quota = ino->ip_data.meta.data_quota; 1010 } 1011 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COPIES) && 1012 ip->meta.ncopies != ino->ip_data.meta.ncopies) { 1013 hammer2_inode_modify(ip); 1014 ip->meta.ncopies = ino->ip_data.meta.ncopies; 1015 } 1016 hammer2_inode_unlock(ip); 1017 hammer2_trans_done(ip->pmp); 1018 1019 return (hammer2_error_to_errno(error)); 1020 } 1021 1022 static 1023 int 1024 hammer2_ioctl_debug_dump(hammer2_inode_t *ip) 1025 { 1026 hammer2_chain_t *chain; 1027 int count = 1000; 1028 int i; 1029 1030 for (i = 0; i < ip->cluster.nchains; ++i) { 1031 chain = ip->cluster.array[i].chain; 1032 if (chain == NULL) 1033 continue; 1034 hammer2_dump_chain(chain, 0, &count, 'i'); 1035 } 1036 return 0; 1037 } 1038 1039 /* 1040 * Executes one flush/free pass per call. If trying to recover 1041 * data we just freed up a moment ago it can take up to six passes 1042 * to fully free the blocks. Note that passes occur automatically based 1043 * on free space as the storage fills up, but manual passes may be needed 1044 * if storage becomes almost completely full. 1045 */ 1046 static 1047 int 1048 hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data) 1049 { 1050 hammer2_ioc_bulkfree_t *bfi = data; 1051 hammer2_dev_t *hmp; 1052 hammer2_pfs_t *pmp; 1053 hammer2_chain_t *vchain; 1054 int error; 1055 int didsnap; 1056 1057 pmp = ip->pmp; 1058 ip = pmp->iroot; 1059 1060 hmp = pmp->pfs_hmps[0]; 1061 if (hmp == NULL) 1062 return (EINVAL); 1063 if (bfi == NULL) 1064 return (EINVAL); 1065 1066 /* 1067 * Bulkfree has to be serialized to guarantee at least one sync 1068 * inbetween bulkfrees. 1069 */ 1070 error = lockmgr(&hmp->bflock, LK_EXCLUSIVE | LK_PCATCH); 1071 if (error) 1072 return error; 1073 1074 /* 1075 * sync the filesystem and obtain a snapshot of the synchronized 1076 * hmp volume header. We treat the snapshot as an independent 1077 * entity. 1078 * 1079 * If ENOSPC occurs we should continue, because bulkfree is the only 1080 * way to fix that. The flush will have flushed everything it could 1081 * and not left any modified chains. Otherwise an error is fatal. 1082 */ 1083 error = hammer2_vfs_sync(pmp->mp, MNT_WAIT); 1084 if (error && error != ENOSPC) 1085 goto failed; 1086 1087 /* 1088 * If we have an ENOSPC error we have to bulkfree on the live 1089 * topology. Otherwise we can bulkfree on a snapshot. 1090 */ 1091 if (error) { 1092 kprintf("hammer2: WARNING! Bulkfree forced to use live " 1093 "topology\n"); 1094 vchain = &hmp->vchain; 1095 hammer2_chain_ref(vchain); 1096 didsnap = 0; 1097 } else { 1098 vchain = hammer2_chain_bulksnap(hmp); 1099 didsnap = 1; 1100 } 1101 1102 /* 1103 * Bulkfree on a snapshot does not need a transaction, which allows 1104 * it to run concurrently with any operation other than another 1105 * bulkfree. 1106 * 1107 * If we are running bulkfree on the live topology we have to be 1108 * in a FLUSH transaction. 1109 */ 1110 if (didsnap == 0) 1111 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH); 1112 1113 if (bfi) { 1114 hammer2_thr_freeze(&hmp->bfthr); 1115 error = hammer2_bulkfree_pass(hmp, vchain, bfi); 1116 hammer2_thr_unfreeze(&hmp->bfthr); 1117 } 1118 if (didsnap) { 1119 hammer2_chain_bulkdrop(vchain); 1120 } else { 1121 hammer2_chain_drop(vchain); 1122 hammer2_trans_done(pmp); 1123 } 1124 error = hammer2_error_to_errno(error); 1125 1126 failed: 1127 lockmgr(&hmp->bflock, LK_RELEASE); 1128 return error; 1129 } 1130 1131 /* 1132 * Unconditionally delete meta-data in a hammer2 filesystem 1133 */ 1134 static 1135 int 1136 hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data) 1137 { 1138 hammer2_ioc_destroy_t *iocd = data; 1139 hammer2_pfs_t *pmp = ip->pmp; 1140 int error; 1141 1142 if (pmp->ronly) { 1143 error = EROFS; 1144 return error; 1145 } 1146 1147 switch(iocd->cmd) { 1148 case HAMMER2_DELETE_FILE: 1149 /* 1150 * Destroy a bad directory entry by name. Caller must 1151 * pass the directory as fd. 1152 */ 1153 { 1154 hammer2_xop_unlink_t *xop; 1155 1156 if (iocd->path[sizeof(iocd->path)-1]) { 1157 error = EINVAL; 1158 break; 1159 } 1160 if (ip->meta.type != HAMMER2_OBJTYPE_DIRECTORY) { 1161 error = EINVAL; 1162 break; 1163 } 1164 hammer2_pfs_memory_wait(pmp); 1165 hammer2_trans_init(pmp, 0); 1166 hammer2_inode_lock(ip, 0); 1167 1168 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1169 hammer2_xop_setname(&xop->head, iocd->path, strlen(iocd->path)); 1170 xop->isdir = -1; 1171 xop->dopermanent = H2DOPERM_PERMANENT | 1172 H2DOPERM_FORCE | 1173 H2DOPERM_IGNINO; 1174 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 1175 1176 error = hammer2_xop_collect(&xop->head, 0); 1177 error = hammer2_error_to_errno(error); 1178 hammer2_inode_unlock(ip); 1179 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1180 hammer2_trans_done(pmp); 1181 } 1182 break; 1183 case HAMMER2_DELETE_INUM: 1184 /* 1185 * Destroy a bad inode by inode number. 1186 */ 1187 { 1188 hammer2_xop_lookup_t *xop; 1189 1190 if (iocd->inum < 1) { 1191 error = EINVAL; 1192 break; 1193 } 1194 hammer2_pfs_memory_wait(pmp); 1195 hammer2_trans_init(pmp, 0); 1196 1197 xop = hammer2_xop_alloc(pmp->iroot, 0); 1198 xop->lhc = iocd->inum; 1199 hammer2_xop_start(&xop->head, hammer2_xop_lookup); 1200 error = hammer2_xop_collect(&xop->head, 0); 1201 if (error == 0) { 1202 ip = hammer2_inode_get(pmp, NULL, 1203 &xop->head.cluster, -1); 1204 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1205 if (ip) { 1206 ip->meta.nlinks = 1; 1207 hammer2_inode_unlink_finisher(ip, 0); 1208 hammer2_inode_unlock(ip); 1209 } 1210 } else { 1211 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1212 } 1213 } 1214 break; 1215 default: 1216 error = EINVAL; 1217 break; 1218 } 1219 return error; 1220 } 1221