1 /* 2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/cdefs.h> 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/lock.h> 40 #include <sys/uuid.h> 41 #include <sys/vnode.h> 42 43 #include "hammer2.h" 44 45 #define INODE_DEBUG 0 46 47 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp, 48 hammer2_tid_t, meta.inum); 49 50 int 51 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2) 52 { 53 if (ip1->meta.inum < ip2->meta.inum) 54 return(-1); 55 if (ip1->meta.inum > ip2->meta.inum) 56 return(1); 57 return(0); 58 } 59 60 /* 61 * Caller holds pmp->list_spin and the inode should be locked. Merge ip 62 * with the specified depend. 63 * 64 * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating 65 * that successive calls must ensure the ip is on a pass2 depend (or they are 66 * all SYNCQ). If the passed-in depend is not NULL and not (void *)-1 then 67 * we can set pass2 on it and return. 68 * 69 * If the ip is not on SYNCQ it is merged with the passed-in depend, creating 70 * a self-depend if necessary, and depend->pass2 is set according 71 * to the PASS2 flag. SIDEQ is set. 72 */ 73 static __noinline 74 hammer2_depend_t * 75 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend) 76 { 77 hammer2_pfs_t *pmp = ip->pmp; 78 hammer2_depend_t *dtmp; 79 hammer2_inode_t *iptmp; 80 81 /* 82 * If ip is SYNCQ its entry is used for the syncq list and it will 83 * no longer be associated with a dependency. Merging this status 84 * with a passed-in depend implies PASS2. 85 */ 86 if (ip->flags & HAMMER2_INODE_SYNCQ) { 87 if (depend == (void *)-1 || 88 depend == NULL) { 89 return ((void *)-1); 90 } 91 depend->pass2 = 1; 92 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN); 93 94 return depend; 95 } 96 97 /* 98 * If ip is already SIDEQ, merge ip->depend into the passed-in depend. 99 * If it is not, associate the ip with the passed-in depend, creating 100 * a single-entry dependency using depend_static if necessary. 101 * 102 * NOTE: The use of ip->depend_static always requires that the 103 * specific ip containing the structure is part of that 104 * particular depend_static's dependency group. 105 */ 106 if (ip->flags & HAMMER2_INODE_SIDEQ) { 107 /* 108 * Merge ip->depend with the passed-in depend. If the 109 * passed-in depend is not a special case, all ips associated 110 * with ip->depend (including the original ip) must be moved 111 * to the passed-in depend. 112 */ 113 if (depend == NULL) { 114 depend = ip->depend; 115 } else if (depend == (void *)-1) { 116 depend = ip->depend; 117 depend->pass2 = 1; 118 } else if (depend != ip->depend) { 119 #ifdef INVARIANTS 120 int sanitychk = 0; 121 #endif 122 dtmp = ip->depend; 123 while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) { 124 #ifdef INVARIANTS 125 if (iptmp == ip) 126 sanitychk = 1; 127 #endif 128 TAILQ_REMOVE(&dtmp->sideq, iptmp, entry); 129 TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry); 130 iptmp->depend = depend; 131 } 132 KKASSERT(sanitychk == 1); 133 depend->count += dtmp->count; 134 depend->pass2 |= dtmp->pass2; 135 TAILQ_REMOVE(&pmp->depq, dtmp, entry); 136 dtmp->count = 0; 137 dtmp->pass2 = 0; 138 } 139 } else { 140 /* 141 * Add ip to the sideq, creating a self-dependency if 142 * necessary. 143 */ 144 hammer2_inode_ref(ip); 145 atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ); 146 if (depend == NULL) { 147 depend = &ip->depend_static; 148 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry); 149 } else if (depend == (void *)-1) { 150 depend = &ip->depend_static; 151 depend->pass2 = 1; 152 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry); 153 } /* else add ip to passed-in depend */ 154 TAILQ_INSERT_TAIL(&depend->sideq, ip, entry); 155 ip->depend = depend; 156 ++depend->count; 157 ++pmp->sideq_count; 158 } 159 160 if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2) 161 depend->pass2 = 1; 162 if (depend->pass2) 163 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN); 164 165 return depend; 166 } 167 168 /* 169 * Put a solo inode on the SIDEQ (meaning that its dirty). This can also 170 * occur from inode_lock4() and inode_depend(). 171 * 172 * Caller must pass-in a locked inode. 173 */ 174 void 175 hammer2_inode_delayed_sideq(hammer2_inode_t *ip) 176 { 177 hammer2_pfs_t *pmp = ip->pmp; 178 179 /* 180 * Optimize case to avoid pmp spinlock. 181 */ 182 if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) { 183 hammer2_spin_ex(&pmp->list_spin); 184 hammer2_inode_setdepend_locked(ip, NULL); 185 hammer2_spin_unex(&pmp->list_spin); 186 } 187 } 188 189 /* 190 * Lock an inode, with SYNCQ semantics. 191 * 192 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of 193 * flags for options: 194 * 195 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired. The 196 * inode locking function will automatically set the RDONLY flag. 197 * shared locks are not subject to SYNCQ semantics, exclusive locks 198 * are. 199 * 200 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data. 201 * Most front-end inode locks do. 202 * 203 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that 204 * the inode data be resolved. This is used by the syncthr because 205 * it can run on an unresolved/out-of-sync cluster, and also by the 206 * vnode reclamation code to avoid unnecessary I/O (particularly when 207 * disposing of hundreds of thousands of cached vnodes). 208 * 209 * This function, along with lock4, has SYNCQ semantics. If the inode being 210 * locked is on the SYNCQ, that is it has been staged by the syncer, we must 211 * block until the operation is complete (even if we can lock the inode). In 212 * order to reduce the stall time, we re-order the inode to the front of the 213 * pmp->syncq prior to blocking. This reordering VERY significantly improves 214 * performance. 215 * 216 * The inode locking function locks the inode itself, resolves any stale 217 * chains in the inode's cluster, and allocates a fresh copy of the 218 * cluster with 1 ref and all the underlying chains locked. 219 * 220 * ip->cluster will be stable while the inode is locked. 221 * 222 * NOTE: We don't combine the inode/chain lock because putting away an 223 * inode would otherwise confuse multiple lock holders of the inode. 224 */ 225 void 226 hammer2_inode_lock(hammer2_inode_t *ip, int how) 227 { 228 hammer2_pfs_t *pmp; 229 230 hammer2_inode_ref(ip); 231 pmp = ip->pmp; 232 233 /* 234 * Inode structure mutex - Shared lock 235 */ 236 if (how & HAMMER2_RESOLVE_SHARED) { 237 hammer2_mtx_sh(&ip->lock); 238 return; 239 } 240 241 /* 242 * Inode structure mutex - Exclusive lock 243 * 244 * An exclusive lock (if not recursive) must wait for inodes on 245 * SYNCQ to flush first, to ensure that meta-data dependencies such 246 * as the nlink count and related directory entries are not split 247 * across flushes. 248 * 249 * If the vnode is locked by the current thread it must be unlocked 250 * across the tsleep() to avoid a deadlock. 251 */ 252 hammer2_mtx_ex(&ip->lock); 253 if (hammer2_mtx_refs(&ip->lock) > 1) 254 return; 255 while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) { 256 hammer2_spin_ex(&pmp->list_spin); 257 if (ip->flags & HAMMER2_INODE_SYNCQ) { 258 tsleep_interlock(&ip->flags, 0); 259 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP); 260 TAILQ_REMOVE(&pmp->syncq, ip, entry); 261 TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry); 262 hammer2_spin_unex(&pmp->list_spin); 263 hammer2_mtx_unlock(&ip->lock); 264 tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0); 265 hammer2_mtx_ex(&ip->lock); 266 continue; 267 } 268 hammer2_spin_unex(&pmp->list_spin); 269 break; 270 } 271 } 272 273 /* 274 * Exclusively lock up to four inodes, in order, with SYNCQ semantics. 275 * ip1 and ip2 must not be NULL. ip3 and ip4 may be NULL, but if ip3 is 276 * NULL then ip4 must also be NULL. 277 * 278 * This creates a dependency between up to four inodes. 279 */ 280 void 281 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2, 282 hammer2_inode_t *ip3, hammer2_inode_t *ip4) 283 { 284 hammer2_inode_t *ips[4]; 285 hammer2_inode_t *iptmp; 286 hammer2_inode_t *ipslp; 287 hammer2_depend_t *depend; 288 hammer2_pfs_t *pmp; 289 size_t count; 290 size_t i; 291 292 pmp = ip1->pmp; /* may be NULL */ 293 KKASSERT(pmp == ip2->pmp); 294 295 ips[0] = ip1; 296 ips[1] = ip2; 297 if (ip3 == NULL) { 298 count = 2; 299 } else if (ip4 == NULL) { 300 count = 3; 301 ips[2] = ip3; 302 KKASSERT(pmp == ip3->pmp); 303 } else { 304 count = 4; 305 ips[2] = ip3; 306 ips[3] = ip4; 307 KKASSERT(pmp == ip3->pmp); 308 KKASSERT(pmp == ip4->pmp); 309 } 310 311 for (i = 0; i < count; ++i) 312 hammer2_inode_ref(ips[i]); 313 314 restart: 315 /* 316 * Lock the inodes in order 317 */ 318 for (i = 0; i < count; ++i) { 319 hammer2_mtx_ex(&ips[i]->lock); 320 } 321 322 /* 323 * Associate dependencies, record the first inode found on SYNCQ 324 * (operation is allowed to proceed for inodes on PASS2) for our 325 * sleep operation, this inode is theoretically the last one sync'd 326 * in the sequence. 327 * 328 * All inodes found on SYNCQ are moved to the head of the syncq 329 * to reduce stalls. 330 */ 331 hammer2_spin_ex(&pmp->list_spin); 332 depend = NULL; 333 ipslp = NULL; 334 for (i = 0; i < count; ++i) { 335 iptmp = ips[i]; 336 depend = hammer2_inode_setdepend_locked(iptmp, depend); 337 if (iptmp->flags & HAMMER2_INODE_SYNCQ) { 338 TAILQ_REMOVE(&pmp->syncq, iptmp, entry); 339 TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry); 340 if (ipslp == NULL) 341 ipslp = iptmp; 342 } 343 } 344 hammer2_spin_unex(&pmp->list_spin); 345 346 /* 347 * Block and retry if any of the inodes are on SYNCQ. It is 348 * important that we allow the operation to proceed in the 349 * PASS2 case, to avoid deadlocking against the vnode. 350 */ 351 if (ipslp) { 352 for (i = 0; i < count; ++i) 353 hammer2_mtx_unlock(&ips[i]->lock); 354 tsleep(&ipslp->flags, 0, "h2sync", 2); 355 goto restart; 356 } 357 } 358 359 /* 360 * Release an inode lock. If another thread is blocked on SYNCQ_WAKEUP 361 * we wake them up. 362 */ 363 void 364 hammer2_inode_unlock(hammer2_inode_t *ip) 365 { 366 if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) { 367 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP); 368 hammer2_mtx_unlock(&ip->lock); 369 wakeup(&ip->flags); 370 } else { 371 hammer2_mtx_unlock(&ip->lock); 372 } 373 hammer2_inode_drop(ip); 374 } 375 376 /* 377 * If either ip1 or ip2 have been tapped by the syncer, make sure that both 378 * are. This ensure that dependencies (e.g. dirent-v-inode) are synced 379 * together. For dirent-v-inode depends, pass the dirent as ip1. 380 * 381 * If neither ip1 or ip2 have been tapped by the syncer, merge them into a 382 * single dependency. Dependencies are entered into pmp->depq. This 383 * effectively flags the inodes SIDEQ. 384 * 385 * Both ip1 and ip2 must be locked by the caller. This also ensures 386 * that we can't race the end of the syncer's queue run. 387 */ 388 void 389 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2) 390 { 391 hammer2_pfs_t *pmp; 392 hammer2_depend_t *depend; 393 394 pmp = ip1->pmp; 395 hammer2_spin_ex(&pmp->list_spin); 396 depend = hammer2_inode_setdepend_locked(ip1, NULL); 397 depend = hammer2_inode_setdepend_locked(ip2, depend); 398 hammer2_spin_unex(&pmp->list_spin); 399 } 400 401 /* 402 * Select a chain out of an inode's cluster and lock it. 403 * 404 * The inode does not have to be locked. 405 */ 406 hammer2_chain_t * 407 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how) 408 { 409 hammer2_chain_t *chain; 410 hammer2_cluster_t *cluster; 411 412 hammer2_spin_sh(&ip->cluster_spin); 413 cluster = &ip->cluster; 414 if (clindex >= cluster->nchains) 415 chain = NULL; 416 else 417 chain = cluster->array[clindex].chain; 418 if (chain) { 419 hammer2_chain_ref(chain); 420 hammer2_spin_unsh(&ip->cluster_spin); 421 hammer2_chain_lock(chain, how); 422 } else { 423 hammer2_spin_unsh(&ip->cluster_spin); 424 } 425 return chain; 426 } 427 428 hammer2_chain_t * 429 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex, 430 hammer2_chain_t **parentp, int how) 431 { 432 hammer2_chain_t *chain; 433 hammer2_chain_t *parent; 434 435 for (;;) { 436 hammer2_spin_sh(&ip->cluster_spin); 437 if (clindex >= ip->cluster.nchains) 438 chain = NULL; 439 else 440 chain = ip->cluster.array[clindex].chain; 441 if (chain) { 442 hammer2_chain_ref(chain); 443 hammer2_spin_unsh(&ip->cluster_spin); 444 hammer2_chain_lock(chain, how); 445 } else { 446 hammer2_spin_unsh(&ip->cluster_spin); 447 } 448 449 /* 450 * Get parent, lock order must be (parent, chain). 451 */ 452 parent = chain->parent; 453 if (parent) { 454 hammer2_chain_ref(parent); 455 hammer2_chain_unlock(chain); 456 hammer2_chain_lock(parent, how); 457 hammer2_chain_lock(chain, how); 458 } 459 if (ip->cluster.array[clindex].chain == chain && 460 chain->parent == parent) { 461 break; 462 } 463 464 /* 465 * Retry 466 */ 467 hammer2_chain_unlock(chain); 468 hammer2_chain_drop(chain); 469 if (parent) { 470 hammer2_chain_unlock(parent); 471 hammer2_chain_drop(parent); 472 } 473 } 474 *parentp = parent; 475 476 return chain; 477 } 478 479 /* 480 * Temporarily release a lock held shared or exclusive. Caller must 481 * hold the lock shared or exclusive on call and lock will be released 482 * on return. 483 * 484 * Restore a lock that was temporarily released. 485 */ 486 hammer2_mtx_state_t 487 hammer2_inode_lock_temp_release(hammer2_inode_t *ip) 488 { 489 return hammer2_mtx_temp_release(&ip->lock); 490 } 491 492 void 493 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate) 494 { 495 hammer2_mtx_temp_restore(&ip->lock, ostate); 496 } 497 498 /* 499 * Upgrade a shared inode lock to exclusive and return. If the inode lock 500 * is already held exclusively this is a NOP. 501 * 502 * The caller MUST hold the inode lock either shared or exclusive on call 503 * and will own the lock exclusively on return. 504 * 505 * Returns non-zero if the lock was already exclusive prior to the upgrade. 506 */ 507 int 508 hammer2_inode_lock_upgrade(hammer2_inode_t *ip) 509 { 510 int wasexclusive; 511 512 if (mtx_islocked_ex(&ip->lock)) { 513 wasexclusive = 1; 514 } else { 515 hammer2_mtx_unlock(&ip->lock); 516 hammer2_mtx_ex(&ip->lock); 517 wasexclusive = 0; 518 } 519 return wasexclusive; 520 } 521 522 /* 523 * Downgrade an inode lock from exclusive to shared only if the inode 524 * lock was previously shared. If the inode lock was previously exclusive, 525 * this is a NOP. 526 */ 527 void 528 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive) 529 { 530 if (wasexclusive == 0) 531 hammer2_mtx_downgrade(&ip->lock); 532 } 533 534 /* 535 * Lookup an inode by inode number 536 */ 537 hammer2_inode_t * 538 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum) 539 { 540 hammer2_inode_t *ip; 541 542 KKASSERT(pmp); 543 if (pmp->spmp_hmp) { 544 ip = NULL; 545 } else { 546 hammer2_spin_ex(&pmp->inum_spin); 547 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum); 548 if (ip) 549 hammer2_inode_ref(ip); 550 hammer2_spin_unex(&pmp->inum_spin); 551 } 552 return(ip); 553 } 554 555 /* 556 * Adding a ref to an inode is only legal if the inode already has at least 557 * one ref. 558 * 559 * (can be called with spinlock held) 560 */ 561 void 562 hammer2_inode_ref(hammer2_inode_t *ip) 563 { 564 atomic_add_int(&ip->refs, 1); 565 if (hammer2_debug & 0x80000) { 566 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs); 567 print_backtrace(8); 568 } 569 } 570 571 /* 572 * Drop an inode reference, freeing the inode when the last reference goes 573 * away. 574 */ 575 void 576 hammer2_inode_drop(hammer2_inode_t *ip) 577 { 578 hammer2_pfs_t *pmp; 579 u_int refs; 580 581 while (ip) { 582 if (hammer2_debug & 0x80000) { 583 kprintf("INODE-1 %p (%d->%d)\n", 584 ip, ip->refs, ip->refs - 1); 585 print_backtrace(8); 586 } 587 refs = ip->refs; 588 cpu_ccfence(); 589 if (refs == 1) { 590 /* 591 * Transition to zero, must interlock with 592 * the inode inumber lookup tree (if applicable). 593 * It should not be possible for anyone to race 594 * the transition to 0. 595 */ 596 pmp = ip->pmp; 597 KKASSERT(pmp); 598 hammer2_spin_ex(&pmp->inum_spin); 599 600 if (atomic_cmpset_int(&ip->refs, 1, 0)) { 601 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0); 602 if (ip->flags & HAMMER2_INODE_ONRBTREE) { 603 atomic_clear_int(&ip->flags, 604 HAMMER2_INODE_ONRBTREE); 605 RB_REMOVE(hammer2_inode_tree, 606 &pmp->inum_tree, ip); 607 --pmp->inum_count; 608 } 609 hammer2_spin_unex(&pmp->inum_spin); 610 611 ip->pmp = NULL; 612 613 /* 614 * Cleaning out ip->cluster isn't entirely 615 * trivial. 616 */ 617 hammer2_inode_repoint(ip, NULL); 618 619 kfree_obj(ip, pmp->minode); 620 atomic_add_long(&pmp->inmem_inodes, -1); 621 ip = NULL; /* will terminate loop */ 622 } else { 623 hammer2_spin_unex(&ip->pmp->inum_spin); 624 } 625 } else { 626 /* 627 * Non zero transition 628 */ 629 if (atomic_cmpset_int(&ip->refs, refs, refs - 1)) 630 break; 631 } 632 } 633 } 634 635 /* 636 * Get the vnode associated with the given inode, allocating the vnode if 637 * necessary. The vnode will be returned exclusively locked. 638 * 639 * *errorp is set to a UNIX error, not a HAMMER2 error. 640 * 641 * The caller must lock the inode (shared or exclusive). 642 * 643 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim 644 * races. 645 */ 646 struct vnode * 647 hammer2_igetv(hammer2_inode_t *ip, int *errorp) 648 { 649 hammer2_pfs_t *pmp; 650 struct vnode *vp; 651 652 pmp = ip->pmp; 653 KKASSERT(pmp != NULL); 654 *errorp = 0; 655 656 for (;;) { 657 /* 658 * Attempt to reuse an existing vnode assignment. It is 659 * possible to race a reclaim so the vget() may fail. The 660 * inode must be unlocked during the vget() to avoid a 661 * deadlock against a reclaim. 662 */ 663 int wasexclusive; 664 665 vp = ip->vp; 666 if (vp) { 667 /* 668 * Inode must be unlocked during the vget() to avoid 669 * possible deadlocks, but leave the ip ref intact. 670 * 671 * vnode is held to prevent destruction during the 672 * vget(). The vget() can still fail if we lost 673 * a reclaim race on the vnode. 674 */ 675 hammer2_mtx_state_t ostate; 676 677 vhold(vp); 678 ostate = hammer2_inode_lock_temp_release(ip); 679 if (vget(vp, LK_EXCLUSIVE)) { 680 vdrop(vp); 681 hammer2_inode_lock_temp_restore(ip, ostate); 682 continue; 683 } 684 hammer2_inode_lock_temp_restore(ip, ostate); 685 vdrop(vp); 686 /* vp still locked and ref from vget */ 687 if (ip->vp != vp) { 688 kprintf("hammer2: igetv race %p/%p\n", 689 ip->vp, vp); 690 vput(vp); 691 continue; 692 } 693 *errorp = 0; 694 break; 695 } 696 697 /* 698 * No vnode exists, allocate a new vnode. Beware of 699 * allocation races. This function will return an 700 * exclusively locked and referenced vnode. 701 */ 702 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0); 703 if (*errorp) { 704 kprintf("hammer2: igetv getnewvnode failed %d\n", 705 *errorp); 706 vp = NULL; 707 break; 708 } 709 710 /* 711 * Lock the inode and check for an allocation race. 712 */ 713 wasexclusive = hammer2_inode_lock_upgrade(ip); 714 if (ip->vp != NULL) { 715 vp->v_type = VBAD; 716 vx_put(vp); 717 hammer2_inode_lock_downgrade(ip, wasexclusive); 718 continue; 719 } 720 721 switch (ip->meta.type) { 722 case HAMMER2_OBJTYPE_DIRECTORY: 723 vp->v_type = VDIR; 724 break; 725 case HAMMER2_OBJTYPE_REGFILE: 726 /* 727 * Regular file must use buffer cache I/O 728 * (VKVABIO cpu sync semantics supported) 729 */ 730 vp->v_type = VREG; 731 vsetflags(vp, VKVABIO); 732 vinitvmio(vp, ip->meta.size, 733 HAMMER2_LBUFSIZE, 734 (int)ip->meta.size & HAMMER2_LBUFMASK); 735 break; 736 case HAMMER2_OBJTYPE_SOFTLINK: 737 /* 738 * XXX for now we are using the generic file_read 739 * and file_write code so we need a buffer cache 740 * association. 741 * 742 * (VKVABIO cpu sync semantics supported) 743 */ 744 vp->v_type = VLNK; 745 vsetflags(vp, VKVABIO); 746 vinitvmio(vp, ip->meta.size, 747 HAMMER2_LBUFSIZE, 748 (int)ip->meta.size & HAMMER2_LBUFMASK); 749 break; 750 case HAMMER2_OBJTYPE_CDEV: 751 vp->v_type = VCHR; 752 /* fall through */ 753 case HAMMER2_OBJTYPE_BDEV: 754 vp->v_ops = &pmp->mp->mnt_vn_spec_ops; 755 if (ip->meta.type != HAMMER2_OBJTYPE_CDEV) 756 vp->v_type = VBLK; 757 addaliasu(vp, 758 ip->meta.rmajor, 759 ip->meta.rminor); 760 break; 761 case HAMMER2_OBJTYPE_FIFO: 762 vp->v_type = VFIFO; 763 vp->v_ops = &pmp->mp->mnt_vn_fifo_ops; 764 break; 765 case HAMMER2_OBJTYPE_SOCKET: 766 vp->v_type = VSOCK; 767 break; 768 default: 769 panic("hammer2: unhandled objtype %d", 770 ip->meta.type); 771 break; 772 } 773 774 if (ip == pmp->iroot) 775 vsetflags(vp, VROOT); 776 777 vp->v_data = ip; 778 ip->vp = vp; 779 hammer2_inode_ref(ip); /* vp association */ 780 hammer2_inode_lock_downgrade(ip, wasexclusive); 781 vx_downgrade(vp); 782 break; 783 } 784 785 /* 786 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0. 787 */ 788 if (hammer2_debug & 0x0002) { 789 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n", 790 vp, vp->v_refcnt, vp->v_auxrefs); 791 } 792 return (vp); 793 } 794 795 /* 796 * XXX this API needs a rewrite. It needs to be split into a 797 * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get 798 * rid of the inode/chain lock reversal fudge. 799 * 800 * Returns the inode associated with the passed-in cluster, allocating a new 801 * hammer2_inode structure if necessary, then synchronizing it to the passed 802 * xop cluster. When synchronizing, if idx >= 0, only cluster index (idx) 803 * is synchronized. Otherwise the whole cluster is synchronized. inum will 804 * be extracted from the passed-in xop and the inum argument will be ignored. 805 * 806 * If xop is passed as NULL then a new hammer2_inode is allocated with the 807 * specified inum, and returned. For normal inodes, the inode will be 808 * indexed in memory and if it already exists the existing ip will be 809 * returned instead of allocating a new one. The superroot and PFS inodes 810 * are not indexed in memory. 811 * 812 * The passed-in cluster must be locked and will remain locked on return. 813 * The returned inode will be locked and the caller may dispose of both 814 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller 815 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode. 816 * 817 * The hammer2_inode structure regulates the interface between the high level 818 * kernel VNOPS API and the filesystem backend (the chains). 819 * 820 * On return the inode is locked with the supplied cluster. 821 */ 822 hammer2_inode_t * 823 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop, 824 hammer2_tid_t inum, int idx) 825 { 826 hammer2_inode_t *nip; 827 const hammer2_inode_data_t *iptmp; 828 const hammer2_inode_data_t *nipdata; 829 830 KKASSERT(xop == NULL || 831 hammer2_cluster_type(&xop->cluster) == 832 HAMMER2_BREF_TYPE_INODE); 833 KKASSERT(pmp); 834 835 /* 836 * Interlocked lookup/ref of the inode. This code is only needed 837 * when looking up inodes with nlinks != 0 (TODO: optimize out 838 * otherwise and test for duplicates). 839 * 840 * Cluster can be NULL during the initial pfs allocation. 841 */ 842 if (xop) { 843 iptmp = &hammer2_xop_gdata(xop)->ipdata; 844 inum = iptmp->meta.inum; 845 hammer2_xop_pdata(xop); 846 } 847 again: 848 nip = hammer2_inode_lookup(pmp, inum); 849 if (nip) { 850 /* 851 * We may have to unhold the cluster to avoid a deadlock 852 * against vnlru (and possibly other XOPs). 853 */ 854 if (xop) { 855 if (hammer2_mtx_ex_try(&nip->lock) != 0) { 856 hammer2_cluster_unhold(&xop->cluster); 857 hammer2_mtx_ex(&nip->lock); 858 hammer2_cluster_rehold(&xop->cluster); 859 } 860 } else { 861 hammer2_mtx_ex(&nip->lock); 862 } 863 864 /* 865 * Handle SMP race (not applicable to the super-root spmp 866 * which can't index inodes due to duplicative inode numbers). 867 */ 868 if (pmp->spmp_hmp == NULL && 869 (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) { 870 hammer2_mtx_unlock(&nip->lock); 871 hammer2_inode_drop(nip); 872 goto again; 873 } 874 if (xop) { 875 if (idx >= 0) 876 hammer2_inode_repoint_one(nip, &xop->cluster, 877 idx); 878 else 879 hammer2_inode_repoint(nip, &xop->cluster); 880 } 881 return nip; 882 } 883 884 /* 885 * We couldn't find the inode number, create a new inode and try to 886 * insert it, handle insertion races. 887 */ 888 nip = kmalloc_obj(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO); 889 spin_init(&nip->cluster_spin, "h2clspin"); 890 atomic_add_long(&pmp->inmem_inodes, 1); 891 892 /* 893 * Initialize nip's cluster. A cluster is provided for normal 894 * inodes but typically not for the super-root or PFS inodes. 895 */ 896 { 897 hammer2_inode_t *nnip = nip; 898 nip->ihash = (int)hammer2_icrc32(&nnip, sizeof(nnip)); 899 } 900 901 nip->cluster.refs = 1; 902 nip->cluster.pmp = pmp; 903 nip->cluster.flags |= HAMMER2_CLUSTER_INODE; 904 if (xop) { 905 nipdata = &hammer2_xop_gdata(xop)->ipdata; 906 nip->meta = nipdata->meta; 907 hammer2_xop_pdata(xop); 908 hammer2_inode_repoint(nip, &xop->cluster); 909 } else { 910 nip->meta.inum = inum; /* PFS inum is always 1 XXX */ 911 /* mtime will be updated when a cluster is available */ 912 } 913 914 nip->pmp = pmp; 915 916 /* 917 * ref and lock on nip gives it state compatible to after a 918 * hammer2_inode_lock() call. 919 */ 920 nip->refs = 1; 921 hammer2_mtx_init(&nip->lock, "h2inode"); 922 hammer2_mtx_init(&nip->truncate_lock, "h2trunc"); 923 hammer2_mtx_ex(&nip->lock); 924 TAILQ_INIT(&nip->depend_static.sideq); 925 /* combination of thread lock and chain lock == inode lock */ 926 927 /* 928 * Attempt to add the inode. If it fails we raced another inode 929 * get. Undo all the work and try again. 930 */ 931 if (pmp->spmp_hmp == NULL) { 932 hammer2_spin_ex(&pmp->inum_spin); 933 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) { 934 hammer2_spin_unex(&pmp->inum_spin); 935 hammer2_mtx_unlock(&nip->lock); 936 hammer2_inode_drop(nip); 937 goto again; 938 } 939 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE); 940 ++pmp->inum_count; 941 hammer2_spin_unex(&pmp->inum_spin); 942 } 943 return (nip); 944 } 945 946 /* 947 * Create a PFS inode under the superroot. This function will create the 948 * inode, its media chains, and also insert it into the media. 949 * 950 * Caller must be in a flush transaction because we are inserting the inode 951 * onto the media. 952 */ 953 hammer2_inode_t * 954 hammer2_inode_create_pfs(hammer2_pfs_t *spmp, 955 const uint8_t *name, size_t name_len, 956 int *errorp) 957 { 958 hammer2_xop_create_t *xop; 959 hammer2_inode_t *pip; 960 hammer2_inode_t *nip; 961 int error; 962 uuid_t pip_uid; 963 uuid_t pip_gid; 964 uint32_t pip_mode; 965 uint8_t pip_comp_algo; 966 uint8_t pip_check_algo; 967 hammer2_tid_t pip_inum; 968 hammer2_key_t lhc; 969 970 pip = spmp->iroot; 971 nip = NULL; 972 973 lhc = hammer2_dirhash(name, name_len); 974 *errorp = 0; 975 976 /* 977 * Locate the inode or indirect block to create the new 978 * entry in. At the same time check for key collisions 979 * and iterate until we don't get one. 980 * 981 * Lock the directory exclusively for now to guarantee that 982 * we can find an unused lhc for the name. Due to collisions, 983 * two different creates can end up with the same lhc so we 984 * cannot depend on the OS to prevent the collision. 985 */ 986 hammer2_inode_lock(pip, 0); 987 988 pip_uid = pip->meta.uid; 989 pip_gid = pip->meta.gid; 990 pip_mode = pip->meta.mode; 991 pip_comp_algo = pip->meta.comp_algo; 992 pip_check_algo = pip->meta.check_algo; 993 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum; 994 995 /* 996 * Locate an unused key in the collision space. 997 */ 998 { 999 hammer2_xop_scanlhc_t *sxop; 1000 hammer2_key_t lhcbase; 1001 1002 lhcbase = lhc; 1003 sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1004 sxop->lhc = lhc; 1005 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 1006 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1007 if (lhc != sxop->head.cluster.focus->bref.key) 1008 break; 1009 ++lhc; 1010 } 1011 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1012 1013 if (error) { 1014 if (error != HAMMER2_ERROR_ENOENT) 1015 goto done2; 1016 ++lhc; 1017 error = 0; 1018 } 1019 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { 1020 error = HAMMER2_ERROR_ENOSPC; 1021 goto done2; 1022 } 1023 } 1024 1025 /* 1026 * Create the inode with the lhc as the key. 1027 */ 1028 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1029 xop->lhc = lhc; 1030 xop->flags = HAMMER2_INSERT_PFSROOT; 1031 bzero(&xop->meta, sizeof(xop->meta)); 1032 1033 xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY; 1034 xop->meta.inum = 1; 1035 xop->meta.iparent = pip_inum; 1036 1037 /* Inherit parent's inode compression mode. */ 1038 xop->meta.comp_algo = pip_comp_algo; 1039 xop->meta.check_algo = pip_check_algo; 1040 xop->meta.version = HAMMER2_INODE_VERSION_ONE; 1041 hammer2_update_time(&xop->meta.ctime); 1042 xop->meta.mtime = xop->meta.ctime; 1043 xop->meta.mode = 0755; 1044 xop->meta.nlinks = 1; 1045 1046 /* 1047 * Regular files and softlinks allow a small amount of data to be 1048 * directly embedded in the inode. This flag will be cleared if 1049 * the size is extended past the embedded limit. 1050 */ 1051 if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE || 1052 xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) { 1053 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA; 1054 } 1055 hammer2_xop_setname(&xop->head, name, name_len); 1056 xop->meta.name_len = name_len; 1057 xop->meta.name_key = lhc; 1058 KKASSERT(name_len < HAMMER2_INODE_MAXNAME); 1059 1060 hammer2_xop_start(&xop->head, &hammer2_inode_create_desc); 1061 1062 error = hammer2_xop_collect(&xop->head, 0); 1063 #if INODE_DEBUG 1064 kprintf("CREATE INODE %*.*s\n", 1065 (int)name_len, (int)name_len, name); 1066 #endif 1067 1068 if (error) { 1069 *errorp = error; 1070 goto done; 1071 } 1072 1073 /* 1074 * Set up the new inode if not a hardlink pointer. 1075 * 1076 * NOTE: *_get() integrates chain's lock into the inode lock. 1077 * 1078 * NOTE: Only one new inode can currently be created per 1079 * transaction. If the need arises we can adjust 1080 * hammer2_trans_init() to allow more. 1081 * 1082 * NOTE: nipdata will have chain's blockset data. 1083 */ 1084 nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1); 1085 nip->comp_heuristic = 0; 1086 done: 1087 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1088 done2: 1089 hammer2_inode_unlock(pip); 1090 1091 return (nip); 1092 } 1093 1094 /* 1095 * Create a new, normal inode. This function will create the inode, 1096 * the media chains, but will not insert the chains onto the media topology 1097 * (doing so would require a flush transaction and cause long stalls). 1098 * 1099 * Caller must be in a normal transaction. 1100 */ 1101 hammer2_inode_t * 1102 hammer2_inode_create_normal(hammer2_inode_t *pip, 1103 struct vattr *vap, struct ucred *cred, 1104 hammer2_key_t inum, int *errorp) 1105 { 1106 hammer2_xop_create_t *xop; 1107 hammer2_inode_t *dip; 1108 hammer2_inode_t *nip; 1109 int error; 1110 uid_t xuid; 1111 uuid_t pip_uid; 1112 uuid_t pip_gid; 1113 uint32_t pip_mode; 1114 uint8_t pip_comp_algo; 1115 uint8_t pip_check_algo; 1116 hammer2_tid_t pip_inum; 1117 uint8_t type; 1118 1119 dip = pip->pmp->iroot; 1120 KKASSERT(dip != NULL); 1121 1122 *errorp = 0; 1123 1124 /*hammer2_inode_lock(dip, 0);*/ 1125 1126 pip_uid = pip->meta.uid; 1127 pip_gid = pip->meta.gid; 1128 pip_mode = pip->meta.mode; 1129 pip_comp_algo = pip->meta.comp_algo; 1130 pip_check_algo = pip->meta.check_algo; 1131 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum; 1132 1133 /* 1134 * Create the in-memory hammer2_inode structure for the specified 1135 * inode. 1136 */ 1137 nip = hammer2_inode_get(dip->pmp, NULL, inum, -1); 1138 nip->comp_heuristic = 0; 1139 KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 && 1140 nip->cluster.nchains == 0); 1141 atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING); 1142 1143 /* 1144 * Setup the inode meta-data 1145 */ 1146 nip->meta.type = hammer2_get_obj_type(vap->va_type); 1147 1148 switch (nip->meta.type) { 1149 case HAMMER2_OBJTYPE_CDEV: 1150 case HAMMER2_OBJTYPE_BDEV: 1151 nip->meta.rmajor = vap->va_rmajor; 1152 nip->meta.rminor = vap->va_rminor; 1153 break; 1154 default: 1155 break; 1156 } 1157 type = nip->meta.type; 1158 1159 KKASSERT(nip->meta.inum == inum); 1160 nip->meta.iparent = pip_inum; 1161 1162 /* Inherit parent's inode compression mode. */ 1163 nip->meta.comp_algo = pip_comp_algo; 1164 nip->meta.check_algo = pip_check_algo; 1165 nip->meta.version = HAMMER2_INODE_VERSION_ONE; 1166 hammer2_update_time(&nip->meta.ctime); 1167 nip->meta.mtime = nip->meta.ctime; 1168 nip->meta.mode = vap->va_mode; 1169 nip->meta.nlinks = 1; 1170 1171 xuid = hammer2_to_unix_xid(&pip_uid); 1172 xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode, 1173 xuid, cred, 1174 &vap->va_mode); 1175 if (vap->va_vaflags & VA_UID_UUID_VALID) 1176 nip->meta.uid = vap->va_uid_uuid; 1177 else if (vap->va_uid != (uid_t)VNOVAL) 1178 hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid); 1179 else 1180 hammer2_guid_to_uuid(&nip->meta.uid, xuid); 1181 1182 if (vap->va_vaflags & VA_GID_UUID_VALID) 1183 nip->meta.gid = vap->va_gid_uuid; 1184 else if (vap->va_gid != (gid_t)VNOVAL) 1185 hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid); 1186 else 1187 nip->meta.gid = pip_gid; 1188 1189 /* 1190 * Regular files and softlinks allow a small amount of data to be 1191 * directly embedded in the inode. This flag will be cleared if 1192 * the size is extended past the embedded limit. 1193 */ 1194 if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE || 1195 nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) { 1196 nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA; 1197 } 1198 1199 /* 1200 * Create the inode using (inum) as the key. Pass pip for 1201 * method inheritance. 1202 */ 1203 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1204 xop->lhc = inum; 1205 xop->flags = 0; 1206 xop->meta = nip->meta; 1207 KKASSERT(vap); 1208 1209 xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum); 1210 xop->meta.name_key = inum; 1211 nip->meta.name_len = xop->meta.name_len; 1212 nip->meta.name_key = xop->meta.name_key; 1213 hammer2_inode_modify(nip); 1214 1215 /* 1216 * Create the inode media chains but leave them detached. We are 1217 * not in a flush transaction so we can't mess with media topology 1218 * above normal inodes (i.e. the index of the inodes themselves). 1219 * 1220 * We've already set the INODE_CREATING flag. The inode's media 1221 * chains will be inserted onto the media topology on the next 1222 * filesystem sync. 1223 */ 1224 hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc); 1225 1226 error = hammer2_xop_collect(&xop->head, 0); 1227 #if INODE_DEBUG 1228 kprintf("create inode type %d error %d\n", nip->meta.type, error); 1229 #endif 1230 1231 if (error) { 1232 *errorp = error; 1233 goto done; 1234 } 1235 1236 /* 1237 * Associate the media chains created by the backend with the 1238 * frontend inode. 1239 */ 1240 hammer2_inode_repoint(nip, &xop->head.cluster); 1241 done: 1242 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1243 /*hammer2_inode_unlock(dip);*/ 1244 1245 return (nip); 1246 } 1247 1248 /* 1249 * Create a directory entry under dip with the specified name, inode number, 1250 * and OBJTYPE (type). 1251 * 1252 * This returns a UNIX errno code, not a HAMMER2_ERROR_* code. 1253 * 1254 * Caller must hold dip locked. 1255 */ 1256 int 1257 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len, 1258 hammer2_key_t inum, uint8_t type) 1259 { 1260 hammer2_xop_mkdirent_t *xop; 1261 hammer2_key_t lhc; 1262 int error; 1263 1264 lhc = 0; 1265 error = 0; 1266 1267 KKASSERT(name != NULL); 1268 lhc = hammer2_dirhash(name, name_len); 1269 1270 /* 1271 * Locate the inode or indirect block to create the new 1272 * entry in. At the same time check for key collisions 1273 * and iterate until we don't get one. 1274 * 1275 * Lock the directory exclusively for now to guarantee that 1276 * we can find an unused lhc for the name. Due to collisions, 1277 * two different creates can end up with the same lhc so we 1278 * cannot depend on the OS to prevent the collision. 1279 */ 1280 hammer2_inode_modify(dip); 1281 1282 /* 1283 * If name specified, locate an unused key in the collision space. 1284 * Otherwise use the passed-in lhc directly. 1285 */ 1286 { 1287 hammer2_xop_scanlhc_t *sxop; 1288 hammer2_key_t lhcbase; 1289 1290 lhcbase = lhc; 1291 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1292 sxop->lhc = lhc; 1293 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 1294 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1295 if (lhc != sxop->head.cluster.focus->bref.key) 1296 break; 1297 ++lhc; 1298 } 1299 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1300 1301 if (error) { 1302 if (error != HAMMER2_ERROR_ENOENT) 1303 goto done2; 1304 ++lhc; 1305 error = 0; 1306 } 1307 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { 1308 error = HAMMER2_ERROR_ENOSPC; 1309 goto done2; 1310 } 1311 } 1312 1313 /* 1314 * Create the directory entry with the lhc as the key. 1315 */ 1316 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1317 xop->lhc = lhc; 1318 bzero(&xop->dirent, sizeof(xop->dirent)); 1319 xop->dirent.inum = inum; 1320 xop->dirent.type = type; 1321 xop->dirent.namlen = name_len; 1322 1323 KKASSERT(name_len < HAMMER2_INODE_MAXNAME); 1324 hammer2_xop_setname(&xop->head, name, name_len); 1325 1326 hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc); 1327 1328 error = hammer2_xop_collect(&xop->head, 0); 1329 1330 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1331 done2: 1332 error = hammer2_error_to_errno(error); 1333 1334 return error; 1335 } 1336 1337 /* 1338 * Repoint ip->cluster's chains to cluster's chains and fixup the default 1339 * focus. All items, valid or invalid, are repointed. hammer2_xop_start() 1340 * filters out invalid or non-matching elements. 1341 * 1342 * Caller must hold the inode and cluster exclusive locked, if not NULL, 1343 * must also be locked. 1344 * 1345 * Cluster may be NULL to clean out any chains in ip->cluster. 1346 */ 1347 void 1348 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_cluster_t *cluster) 1349 { 1350 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER]; 1351 hammer2_chain_t *ochain; 1352 hammer2_chain_t *nchain; 1353 int i; 1354 1355 bzero(dropch, sizeof(dropch)); 1356 1357 /* 1358 * Replace chains in ip->cluster with chains from cluster and 1359 * adjust the focus if necessary. 1360 * 1361 * NOTE: nchain and/or ochain can be NULL due to gaps 1362 * in the cluster arrays. 1363 */ 1364 hammer2_spin_ex(&ip->cluster_spin); 1365 for (i = 0; cluster && i < cluster->nchains; ++i) { 1366 /* 1367 * Do not replace elements which are the same. Also handle 1368 * element count discrepancies. 1369 */ 1370 nchain = cluster->array[i].chain; 1371 if (i < ip->cluster.nchains) { 1372 ochain = ip->cluster.array[i].chain; 1373 if (ochain == nchain) 1374 continue; 1375 } else { 1376 ochain = NULL; 1377 } 1378 1379 /* 1380 * Make adjustments 1381 */ 1382 ip->cluster.array[i].chain = nchain; 1383 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID; 1384 ip->cluster.array[i].flags |= cluster->array[i].flags & 1385 HAMMER2_CITEM_INVALID; 1386 if (nchain) 1387 hammer2_chain_ref(nchain); 1388 dropch[i] = ochain; 1389 } 1390 1391 /* 1392 * Release any left-over chains in ip->cluster. 1393 */ 1394 while (i < ip->cluster.nchains) { 1395 nchain = ip->cluster.array[i].chain; 1396 if (nchain) { 1397 ip->cluster.array[i].chain = NULL; 1398 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID; 1399 } 1400 dropch[i] = nchain; 1401 ++i; 1402 } 1403 1404 /* 1405 * Fixup fields. Note that the inode-embedded cluster is never 1406 * directly locked. 1407 */ 1408 if (cluster) { 1409 ip->cluster.nchains = cluster->nchains; 1410 ip->cluster.focus = cluster->focus; 1411 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED; 1412 } else { 1413 ip->cluster.nchains = 0; 1414 ip->cluster.focus = NULL; 1415 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS; 1416 } 1417 1418 hammer2_spin_unex(&ip->cluster_spin); 1419 1420 /* 1421 * Cleanup outside of spinlock 1422 */ 1423 while (--i >= 0) { 1424 if (dropch[i]) 1425 hammer2_chain_drop(dropch[i]); 1426 } 1427 } 1428 1429 /* 1430 * Repoint a single element from the cluster to the ip. Used by the 1431 * synchronization threads to piecemeal update inodes. Does not change 1432 * focus and requires inode to be re-locked to clean-up flags (XXX). 1433 */ 1434 void 1435 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster, 1436 int idx) 1437 { 1438 hammer2_chain_t *ochain; 1439 hammer2_chain_t *nchain; 1440 int i; 1441 1442 hammer2_spin_ex(&ip->cluster_spin); 1443 KKASSERT(idx < cluster->nchains); 1444 if (idx < ip->cluster.nchains) { 1445 ochain = ip->cluster.array[idx].chain; 1446 nchain = cluster->array[idx].chain; 1447 } else { 1448 ochain = NULL; 1449 nchain = cluster->array[idx].chain; 1450 for (i = ip->cluster.nchains; i <= idx; ++i) { 1451 bzero(&ip->cluster.array[i], 1452 sizeof(ip->cluster.array[i])); 1453 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID; 1454 } 1455 ip->cluster.nchains = idx + 1; 1456 } 1457 if (ochain != nchain) { 1458 /* 1459 * Make adjustments. 1460 */ 1461 ip->cluster.array[idx].chain = nchain; 1462 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID; 1463 ip->cluster.array[idx].flags |= cluster->array[idx].flags & 1464 HAMMER2_CITEM_INVALID; 1465 } 1466 hammer2_spin_unex(&ip->cluster_spin); 1467 if (ochain != nchain) { 1468 if (nchain) 1469 hammer2_chain_ref(nchain); 1470 if (ochain) 1471 hammer2_chain_drop(ochain); 1472 } 1473 } 1474 1475 hammer2_key_t 1476 hammer2_inode_data_count(const hammer2_inode_t *ip) 1477 { 1478 hammer2_chain_t *chain; 1479 hammer2_key_t count = 0; 1480 int i; 1481 1482 for (i = 0; i < ip->cluster.nchains; ++i) { 1483 if ((chain = ip->cluster.array[i].chain) != NULL) { 1484 if (count < chain->bref.embed.stats.data_count) 1485 count = chain->bref.embed.stats.data_count; 1486 } 1487 } 1488 return count; 1489 } 1490 1491 hammer2_key_t 1492 hammer2_inode_inode_count(const hammer2_inode_t *ip) 1493 { 1494 hammer2_chain_t *chain; 1495 hammer2_key_t count = 0; 1496 int i; 1497 1498 for (i = 0; i < ip->cluster.nchains; ++i) { 1499 if ((chain = ip->cluster.array[i].chain) != NULL) { 1500 if (count < chain->bref.embed.stats.inode_count) 1501 count = chain->bref.embed.stats.inode_count; 1502 } 1503 } 1504 return count; 1505 } 1506 1507 /* 1508 * Called with a locked inode to finish unlinking an inode after xop_unlink 1509 * had been run. This function is responsible for decrementing nlinks. 1510 * 1511 * We don't bother decrementing nlinks if the file is not open and this was 1512 * the last link. 1513 * 1514 * If the inode is a hardlink target it's chain has not yet been deleted, 1515 * otherwise it's chain has been deleted. 1516 * 1517 * If isopen then any prior deletion was not permanent and the inode is 1518 * left intact with nlinks == 0; 1519 */ 1520 int 1521 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen) 1522 { 1523 hammer2_pfs_t *pmp; 1524 int error; 1525 1526 pmp = ip->pmp; 1527 1528 /* 1529 * Decrement nlinks. If this is the last link and the file is 1530 * not open we can just delete the inode and not bother dropping 1531 * nlinks to 0 (avoiding unnecessary block updates). 1532 */ 1533 if (ip->meta.nlinks == 1) { 1534 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED); 1535 if (isopen == 0) 1536 goto killit; 1537 } 1538 1539 hammer2_inode_modify(ip); 1540 --ip->meta.nlinks; 1541 if ((int64_t)ip->meta.nlinks < 0) 1542 ip->meta.nlinks = 0; /* safety */ 1543 1544 /* 1545 * If nlinks is not zero we are done. However, this should only be 1546 * possible with a hardlink target. If the inode is an embedded 1547 * hardlink nlinks should have dropped to zero, warn and proceed 1548 * with the next step. 1549 */ 1550 if (ip->meta.nlinks) { 1551 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) 1552 return 0; 1553 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n", 1554 (intmax_t)ip->meta.nlinks); 1555 return 0; 1556 } 1557 1558 if (ip->vp) 1559 hammer2_knote(ip->vp, NOTE_DELETE); 1560 1561 /* 1562 * nlinks is now an implied zero, delete the inode if not open. 1563 * We avoid unnecessary media updates by not bothering to actually 1564 * decrement nlinks for the 1->0 transition 1565 * 1566 * Put the inode on the sideq to ensure that any disconnected chains 1567 * get properly flushed (so they can be freed). Defer the deletion 1568 * to the sync code, doing it now will desynchronize the inode from 1569 * related directory entries (which is bad). 1570 * 1571 * NOTE: killit can be reached without modifying the inode, so 1572 * make sure that it is on the SIDEQ. 1573 */ 1574 if (isopen == 0) { 1575 #if 0 1576 hammer2_xop_destroy_t *xop; 1577 #endif 1578 1579 killit: 1580 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); 1581 hammer2_inode_delayed_sideq(ip); 1582 #if 0 1583 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1584 hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc); 1585 error = hammer2_xop_collect(&xop->head, 0); 1586 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1587 #endif 1588 } 1589 error = 0; /* XXX */ 1590 1591 return error; 1592 } 1593 1594 /* 1595 * Mark an inode as being modified, meaning that the caller will modify 1596 * ip->meta. 1597 * 1598 * If a vnode is present we set the vnode dirty and the nominal filesystem 1599 * sync will also handle synchronizing the inode meta-data. Unless NOSIDEQ 1600 * we must ensure that the inode is on pmp->sideq. 1601 * 1602 * NOTE: We must always queue the inode to the sideq. This allows H2 to 1603 * shortcut vsyncscan() and flush inodes and their related vnodes 1604 * in a two stages. H2 still calls vfsync() for each vnode. 1605 * 1606 * NOTE: No mtid (modify_tid) is passed into this routine. The caller is 1607 * only modifying the in-memory inode. A modify_tid is synchronized 1608 * later when the inode gets flushed. 1609 * 1610 * NOTE: As an exception to the general rule, the inode MAY be locked 1611 * shared for this particular call. 1612 */ 1613 void 1614 hammer2_inode_modify(hammer2_inode_t *ip) 1615 { 1616 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED); 1617 if (ip->vp) 1618 vsetisdirty(ip->vp); 1619 if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0) 1620 hammer2_inode_delayed_sideq(ip); 1621 } 1622 1623 /* 1624 * Synchronize the inode's frontend state with the chain state prior 1625 * to any explicit flush of the inode or any strategy write call. This 1626 * does not flush the inode's chain or its sub-topology to media (higher 1627 * level layers are responsible for doing that). 1628 * 1629 * Called with a locked inode inside a normal transaction. 1630 * 1631 * inode must be locked. 1632 */ 1633 int 1634 hammer2_inode_chain_sync(hammer2_inode_t *ip) 1635 { 1636 int error; 1637 1638 error = 0; 1639 if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) { 1640 hammer2_xop_fsync_t *xop; 1641 1642 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1643 xop->clear_directdata = 0; 1644 if (ip->flags & HAMMER2_INODE_RESIZED) { 1645 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) && 1646 ip->meta.size > HAMMER2_EMBEDDED_BYTES) { 1647 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA; 1648 xop->clear_directdata = 1; 1649 } 1650 xop->osize = ip->osize; 1651 } else { 1652 xop->osize = ip->meta.size; /* safety */ 1653 } 1654 xop->ipflags = ip->flags; 1655 xop->meta = ip->meta; 1656 1657 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED | 1658 HAMMER2_INODE_MODIFIED); 1659 hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc); 1660 error = hammer2_xop_collect(&xop->head, 0); 1661 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1662 if (error == HAMMER2_ERROR_ENOENT) 1663 error = 0; 1664 if (error) { 1665 kprintf("hammer2: unable to fsync inode %p\n", ip); 1666 /* 1667 atomic_set_int(&ip->flags, 1668 xop->ipflags & (HAMMER2_INODE_RESIZED | 1669 HAMMER2_INODE_MODIFIED)); 1670 */ 1671 /* XXX return error somehow? */ 1672 } 1673 } 1674 return error; 1675 } 1676 1677 /* 1678 * When an inode is flagged INODE_CREATING its chains have not actually 1679 * been inserting into the on-media tree yet. 1680 */ 1681 int 1682 hammer2_inode_chain_ins(hammer2_inode_t *ip) 1683 { 1684 int error; 1685 1686 error = 0; 1687 if (ip->flags & HAMMER2_INODE_CREATING) { 1688 hammer2_xop_create_t *xop; 1689 1690 atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING); 1691 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1692 xop->lhc = ip->meta.inum; 1693 xop->flags = 0; 1694 hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc); 1695 error = hammer2_xop_collect(&xop->head, 0); 1696 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1697 if (error == HAMMER2_ERROR_ENOENT) 1698 error = 0; 1699 if (error) { 1700 kprintf("hammer2: backend unable to " 1701 "insert inode %p %ld\n", ip, ip->meta.inum); 1702 /* XXX return error somehow? */ 1703 } 1704 } 1705 return error; 1706 } 1707 1708 /* 1709 * When an inode is flagged INODE_DELETING it has been deleted (no directory 1710 * entry or open refs are left, though as an optimization H2 might leave 1711 * nlinks == 1 to avoid unnecessary block updates). The backend flush then 1712 * needs to actually remove it from the topology. 1713 * 1714 * NOTE: backend flush must still sync and flush the deleted inode to clean 1715 * out related chains. 1716 * 1717 * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED 1718 * to prevent the vnode reclaim code from trying to delete it twice. 1719 */ 1720 int 1721 hammer2_inode_chain_des(hammer2_inode_t *ip) 1722 { 1723 int error; 1724 1725 error = 0; 1726 if (ip->flags & HAMMER2_INODE_DELETING) { 1727 hammer2_xop_destroy_t *xop; 1728 1729 atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING | 1730 HAMMER2_INODE_ISUNLINKED); 1731 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1732 hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc); 1733 error = hammer2_xop_collect(&xop->head, 0); 1734 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1735 1736 if (error == HAMMER2_ERROR_ENOENT) 1737 error = 0; 1738 if (error) { 1739 kprintf("hammer2: backend unable to " 1740 "delete inode %p %ld\n", ip, ip->meta.inum); 1741 /* XXX return error somehow? */ 1742 } 1743 } 1744 return error; 1745 } 1746 1747 /* 1748 * Flushes the inode's chain and its sub-topology to media. Interlocks 1749 * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush. Any strategy 1750 * function creating or modifying a chain under this inode will re-set the 1751 * flag. 1752 * 1753 * inode must be locked. 1754 */ 1755 int 1756 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags) 1757 { 1758 hammer2_xop_fsync_t *xop; 1759 int error; 1760 1761 atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA); 1762 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags); 1763 hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc); 1764 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL); 1765 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1766 if (error == HAMMER2_ERROR_ENOENT) 1767 error = 0; 1768 1769 return error; 1770 } 1771