1 /* 2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/cdefs.h> 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/lock.h> 40 #include <sys/uuid.h> 41 42 #include "hammer2.h" 43 44 #define INODE_DEBUG 0 45 46 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp, 47 hammer2_tid_t, meta.inum); 48 49 int 50 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2) 51 { 52 if (ip1->meta.inum < ip2->meta.inum) 53 return(-1); 54 if (ip1->meta.inum > ip2->meta.inum) 55 return(1); 56 return(0); 57 } 58 59 static __inline 60 void 61 hammer2_knote(struct vnode *vp, int flags) 62 { 63 if (flags) 64 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 65 } 66 67 /* 68 * Caller holds pmp->list_spin and the inode should be locked. Merge ip 69 * with the specified depend. 70 * 71 * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating 72 * that successive calls must ensure the ip is on a pass2 depend (or they are 73 * all SYNCQ). If the passed-in depend is not NULL and not (void *)-1 then 74 * we can set pass2 on it and return. 75 * 76 * If the ip is not on SYNCQ it is merged with the passed-in depend, creating 77 * a self-depend if necessary, and depend->pass2 is set according 78 * to the PASS2 flag. SIDEQ is set. 79 */ 80 static __noinline 81 hammer2_depend_t * 82 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend) 83 { 84 hammer2_pfs_t *pmp = ip->pmp; 85 hammer2_depend_t *dtmp; 86 hammer2_inode_t *iptmp; 87 88 /* 89 * If ip is SYNCQ its entry is used for the syncq list and it will 90 * no longer be associated with a dependency. Merging this status 91 * with a passed-in depend implies PASS2. 92 */ 93 if (ip->flags & HAMMER2_INODE_SYNCQ) { 94 if (depend == (void *)-1 || 95 depend == NULL) { 96 return ((void *)-1); 97 } 98 depend->pass2 = 1; 99 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN); 100 101 return depend; 102 } 103 104 /* 105 * If ip is already SIDEQ, merge ip->depend into the passed-in depend. 106 * If it is not, associate the ip with the passed-in depend, creating 107 * a single-entry dependency using depend_static if necessary. 108 * 109 * NOTE: The use of ip->depend_static always requires that the 110 * specific ip containing the structure is part of that 111 * particular depend_static's dependency group. 112 */ 113 if (ip->flags & HAMMER2_INODE_SIDEQ) { 114 /* 115 * Merge ip->depend with the passed-in depend. If the 116 * passed-in depend is not a special case, all ips associated 117 * with ip->depend (including the original ip) must be moved 118 * to the passed-in depend. 119 */ 120 if (depend == NULL) { 121 depend = ip->depend; 122 } else if (depend == (void *)-1) { 123 depend = ip->depend; 124 depend->pass2 = 1; 125 } else if (depend != ip->depend) { 126 #ifdef INVARIANTS 127 int sanitychk = 0; 128 #endif 129 dtmp = ip->depend; 130 while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) { 131 #ifdef INVARIANTS 132 if (iptmp == ip) 133 sanitychk = 1; 134 #endif 135 TAILQ_REMOVE(&dtmp->sideq, iptmp, entry); 136 TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry); 137 iptmp->depend = depend; 138 } 139 KKASSERT(sanitychk == 1); 140 depend->count += dtmp->count; 141 depend->pass2 |= dtmp->pass2; 142 TAILQ_REMOVE(&pmp->depq, dtmp, entry); 143 dtmp->count = 0; 144 dtmp->pass2 = 0; 145 } 146 } else { 147 /* 148 * Add ip to the sideq, creating a self-dependency if 149 * necessary. 150 */ 151 hammer2_inode_ref(ip); 152 atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ); 153 if (depend == NULL) { 154 depend = &ip->depend_static; 155 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry); 156 } else if (depend == (void *)-1) { 157 depend = &ip->depend_static; 158 depend->pass2 = 1; 159 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry); 160 } /* else add ip to passed-in depend */ 161 TAILQ_INSERT_TAIL(&depend->sideq, ip, entry); 162 ip->depend = depend; 163 ++depend->count; 164 ++pmp->sideq_count; 165 } 166 167 if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2) 168 depend->pass2 = 1; 169 if (depend->pass2) 170 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN); 171 172 return depend; 173 } 174 175 /* 176 * Put a solo inode on the SIDEQ (meaning that its dirty). This can also 177 * occur from inode_lock4() and inode_depend(). 178 * 179 * Caller must pass-in a locked inode. 180 */ 181 void 182 hammer2_inode_delayed_sideq(hammer2_inode_t *ip) 183 { 184 hammer2_pfs_t *pmp = ip->pmp; 185 186 /* 187 * Optimize case to avoid pmp spinlock. 188 */ 189 if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) { 190 hammer2_spin_ex(&pmp->list_spin); 191 hammer2_inode_setdepend_locked(ip, NULL); 192 hammer2_spin_unex(&pmp->list_spin); 193 } 194 } 195 196 /* 197 * Lock an inode, with SYNCQ semantics. 198 * 199 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of 200 * flags for options: 201 * 202 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired. The 203 * inode locking function will automatically set the RDONLY flag. 204 * shared locks are not subject to SYNCQ semantics, exclusive locks 205 * are. 206 * 207 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data. 208 * Most front-end inode locks do. 209 * 210 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that 211 * the inode data be resolved. This is used by the syncthr because 212 * it can run on an unresolved/out-of-sync cluster, and also by the 213 * vnode reclamation code to avoid unnecessary I/O (particularly when 214 * disposing of hundreds of thousands of cached vnodes). 215 * 216 * This function, along with lock4, has SYNCQ semantics. If the inode being 217 * locked is on the SYNCQ, that is it has been staged by the syncer, we must 218 * block until the operation is complete (even if we can lock the inode). In 219 * order to reduce the stall time, we re-order the inode to the front of the 220 * pmp->syncq prior to blocking. This reordering VERY significantly improves 221 * performance. 222 * 223 * The inode locking function locks the inode itself, resolves any stale 224 * chains in the inode's cluster, and allocates a fresh copy of the 225 * cluster with 1 ref and all the underlying chains locked. 226 * 227 * ip->cluster will be stable while the inode is locked. 228 * 229 * NOTE: We don't combine the inode/chain lock because putting away an 230 * inode would otherwise confuse multiple lock holders of the inode. 231 */ 232 void 233 hammer2_inode_lock(hammer2_inode_t *ip, int how) 234 { 235 hammer2_pfs_t *pmp; 236 237 hammer2_inode_ref(ip); 238 pmp = ip->pmp; 239 240 /* 241 * Inode structure mutex - Shared lock 242 */ 243 if (how & HAMMER2_RESOLVE_SHARED) { 244 hammer2_mtx_sh(&ip->lock); 245 return; 246 } 247 248 /* 249 * Inode structure mutex - Exclusive lock 250 * 251 * An exclusive lock (if not recursive) must wait for inodes on 252 * SYNCQ to flush first, to ensure that meta-data dependencies such 253 * as the nlink count and related directory entries are not split 254 * across flushes. 255 * 256 * If the vnode is locked by the current thread it must be unlocked 257 * across the tsleep() to avoid a deadlock. 258 */ 259 hammer2_mtx_ex(&ip->lock); 260 if (hammer2_mtx_refs(&ip->lock) > 1) 261 return; 262 while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) { 263 hammer2_spin_ex(&pmp->list_spin); 264 if (ip->flags & HAMMER2_INODE_SYNCQ) { 265 tsleep_interlock(&ip->flags, 0); 266 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP); 267 TAILQ_REMOVE(&pmp->syncq, ip, entry); 268 TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry); 269 hammer2_spin_unex(&pmp->list_spin); 270 hammer2_mtx_unlock(&ip->lock); 271 tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0); 272 hammer2_mtx_ex(&ip->lock); 273 continue; 274 } 275 hammer2_spin_unex(&pmp->list_spin); 276 break; 277 } 278 } 279 280 /* 281 * Exclusively lock up to four inodes, in order, with SYNCQ semantics. 282 * ip1 and ip2 must not be NULL. ip3 and ip4 may be NULL, but if ip3 is 283 * NULL then ip4 must also be NULL. 284 * 285 * This creates a dependency between up to four inodes. 286 */ 287 void 288 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2, 289 hammer2_inode_t *ip3, hammer2_inode_t *ip4) 290 { 291 hammer2_inode_t *ips[4]; 292 hammer2_inode_t *iptmp; 293 hammer2_inode_t *ipslp; 294 hammer2_depend_t *depend; 295 hammer2_pfs_t *pmp; 296 size_t count; 297 size_t i; 298 299 pmp = ip1->pmp; /* may be NULL */ 300 KKASSERT(pmp == ip2->pmp); 301 302 ips[0] = ip1; 303 ips[1] = ip2; 304 if (ip3 == NULL) { 305 count = 2; 306 } else if (ip4 == NULL) { 307 count = 3; 308 ips[2] = ip3; 309 KKASSERT(pmp == ip3->pmp); 310 } else { 311 count = 4; 312 ips[2] = ip3; 313 ips[3] = ip4; 314 KKASSERT(pmp == ip3->pmp); 315 KKASSERT(pmp == ip4->pmp); 316 } 317 318 for (i = 0; i < count; ++i) 319 hammer2_inode_ref(ips[i]); 320 321 restart: 322 /* 323 * Lock the inodes in order 324 */ 325 for (i = 0; i < count; ++i) { 326 hammer2_mtx_ex(&ips[i]->lock); 327 } 328 329 /* 330 * Associate dependencies, record the first inode found on SYNCQ 331 * (operation is allowed to proceed for inodes on PASS2) for our 332 * sleep operation, this inode is theoretically the last one sync'd 333 * in the sequence. 334 * 335 * All inodes found on SYNCQ are moved to the head of the syncq 336 * to reduce stalls. 337 */ 338 hammer2_spin_ex(&pmp->list_spin); 339 depend = NULL; 340 ipslp = NULL; 341 for (i = 0; i < count; ++i) { 342 iptmp = ips[i]; 343 depend = hammer2_inode_setdepend_locked(iptmp, depend); 344 if (iptmp->flags & HAMMER2_INODE_SYNCQ) { 345 TAILQ_REMOVE(&pmp->syncq, iptmp, entry); 346 TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry); 347 if (ipslp == NULL) 348 ipslp = iptmp; 349 } 350 } 351 hammer2_spin_unex(&pmp->list_spin); 352 353 /* 354 * Block and retry if any of the inodes are on SYNCQ. It is 355 * important that we allow the operation to proceed in the 356 * PASS2 case, to avoid deadlocking against the vnode. 357 */ 358 if (ipslp) { 359 for (i = 0; i < count; ++i) 360 hammer2_mtx_unlock(&ips[i]->lock); 361 tsleep(&ipslp->flags, 0, "h2sync", 2); 362 goto restart; 363 } 364 } 365 366 /* 367 * Release an inode lock. If another thread is blocked on SYNCQ_WAKEUP 368 * we wake them up. 369 */ 370 void 371 hammer2_inode_unlock(hammer2_inode_t *ip) 372 { 373 if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) { 374 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP); 375 hammer2_mtx_unlock(&ip->lock); 376 wakeup(&ip->flags); 377 } else { 378 hammer2_mtx_unlock(&ip->lock); 379 } 380 hammer2_inode_drop(ip); 381 } 382 383 /* 384 * If either ip1 or ip2 have been tapped by the syncer, make sure that both 385 * are. This ensure that dependencies (e.g. dirent-v-inode) are synced 386 * together. For dirent-v-inode depends, pass the dirent as ip1. 387 * 388 * If neither ip1 or ip2 have been tapped by the syncer, merge them into a 389 * single dependency. Dependencies are entered into pmp->depq. This 390 * effectively flags the inodes SIDEQ. 391 * 392 * Both ip1 and ip2 must be locked by the caller. This also ensures 393 * that we can't race the end of the syncer's queue run. 394 */ 395 void 396 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2) 397 { 398 hammer2_pfs_t *pmp; 399 hammer2_depend_t *depend; 400 401 pmp = ip1->pmp; 402 hammer2_spin_ex(&pmp->list_spin); 403 depend = hammer2_inode_setdepend_locked(ip1, NULL); 404 depend = hammer2_inode_setdepend_locked(ip2, depend); 405 hammer2_spin_unex(&pmp->list_spin); 406 } 407 408 /* 409 * Select a chain out of an inode's cluster and lock it. 410 * 411 * The inode does not have to be locked. 412 */ 413 hammer2_chain_t * 414 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how) 415 { 416 hammer2_chain_t *chain; 417 hammer2_cluster_t *cluster; 418 419 hammer2_spin_sh(&ip->cluster_spin); 420 cluster = &ip->cluster; 421 if (clindex >= cluster->nchains) 422 chain = NULL; 423 else 424 chain = cluster->array[clindex].chain; 425 if (chain) { 426 hammer2_chain_ref(chain); 427 hammer2_spin_unsh(&ip->cluster_spin); 428 hammer2_chain_lock(chain, how); 429 } else { 430 hammer2_spin_unsh(&ip->cluster_spin); 431 } 432 return chain; 433 } 434 435 hammer2_chain_t * 436 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex, 437 hammer2_chain_t **parentp, int how) 438 { 439 hammer2_chain_t *chain; 440 hammer2_chain_t *parent; 441 442 for (;;) { 443 hammer2_spin_sh(&ip->cluster_spin); 444 if (clindex >= ip->cluster.nchains) 445 chain = NULL; 446 else 447 chain = ip->cluster.array[clindex].chain; 448 if (chain) { 449 hammer2_chain_ref(chain); 450 hammer2_spin_unsh(&ip->cluster_spin); 451 hammer2_chain_lock(chain, how); 452 } else { 453 hammer2_spin_unsh(&ip->cluster_spin); 454 } 455 456 /* 457 * Get parent, lock order must be (parent, chain). 458 */ 459 parent = chain->parent; 460 if (parent) { 461 hammer2_chain_ref(parent); 462 hammer2_chain_unlock(chain); 463 hammer2_chain_lock(parent, how); 464 hammer2_chain_lock(chain, how); 465 } 466 if (ip->cluster.array[clindex].chain == chain && 467 chain->parent == parent) { 468 break; 469 } 470 471 /* 472 * Retry 473 */ 474 hammer2_chain_unlock(chain); 475 hammer2_chain_drop(chain); 476 if (parent) { 477 hammer2_chain_unlock(parent); 478 hammer2_chain_drop(parent); 479 } 480 } 481 *parentp = parent; 482 483 return chain; 484 } 485 486 /* 487 * Temporarily release a lock held shared or exclusive. Caller must 488 * hold the lock shared or exclusive on call and lock will be released 489 * on return. 490 * 491 * Restore a lock that was temporarily released. 492 */ 493 hammer2_mtx_state_t 494 hammer2_inode_lock_temp_release(hammer2_inode_t *ip) 495 { 496 return hammer2_mtx_temp_release(&ip->lock); 497 } 498 499 void 500 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate) 501 { 502 hammer2_mtx_temp_restore(&ip->lock, ostate); 503 } 504 505 /* 506 * Upgrade a shared inode lock to exclusive and return. If the inode lock 507 * is already held exclusively this is a NOP. 508 * 509 * The caller MUST hold the inode lock either shared or exclusive on call 510 * and will own the lock exclusively on return. 511 * 512 * Returns non-zero if the lock was already exclusive prior to the upgrade. 513 */ 514 int 515 hammer2_inode_lock_upgrade(hammer2_inode_t *ip) 516 { 517 int wasexclusive; 518 519 if (mtx_islocked_ex(&ip->lock)) { 520 wasexclusive = 1; 521 } else { 522 hammer2_mtx_unlock(&ip->lock); 523 hammer2_mtx_ex(&ip->lock); 524 wasexclusive = 0; 525 } 526 return wasexclusive; 527 } 528 529 /* 530 * Downgrade an inode lock from exclusive to shared only if the inode 531 * lock was previously shared. If the inode lock was previously exclusive, 532 * this is a NOP. 533 */ 534 void 535 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive) 536 { 537 if (wasexclusive == 0) 538 hammer2_mtx_downgrade(&ip->lock); 539 } 540 541 /* 542 * Lookup an inode by inode number 543 */ 544 hammer2_inode_t * 545 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum) 546 { 547 hammer2_inode_t *ip; 548 549 KKASSERT(pmp); 550 if (pmp->spmp_hmp) { 551 ip = NULL; 552 } else { 553 hammer2_spin_ex(&pmp->inum_spin); 554 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum); 555 if (ip) 556 hammer2_inode_ref(ip); 557 hammer2_spin_unex(&pmp->inum_spin); 558 } 559 return(ip); 560 } 561 562 /* 563 * Adding a ref to an inode is only legal if the inode already has at least 564 * one ref. 565 * 566 * (can be called with spinlock held) 567 */ 568 void 569 hammer2_inode_ref(hammer2_inode_t *ip) 570 { 571 atomic_add_int(&ip->refs, 1); 572 if (hammer2_debug & 0x80000) { 573 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs); 574 print_backtrace(8); 575 } 576 } 577 578 /* 579 * Drop an inode reference, freeing the inode when the last reference goes 580 * away. 581 */ 582 void 583 hammer2_inode_drop(hammer2_inode_t *ip) 584 { 585 hammer2_pfs_t *pmp; 586 u_int refs; 587 588 while (ip) { 589 if (hammer2_debug & 0x80000) { 590 kprintf("INODE-1 %p (%d->%d)\n", 591 ip, ip->refs, ip->refs - 1); 592 print_backtrace(8); 593 } 594 refs = ip->refs; 595 cpu_ccfence(); 596 if (refs == 1) { 597 /* 598 * Transition to zero, must interlock with 599 * the inode inumber lookup tree (if applicable). 600 * It should not be possible for anyone to race 601 * the transition to 0. 602 */ 603 pmp = ip->pmp; 604 KKASSERT(pmp); 605 hammer2_spin_ex(&pmp->inum_spin); 606 607 if (atomic_cmpset_int(&ip->refs, 1, 0)) { 608 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0); 609 if (ip->flags & HAMMER2_INODE_ONRBTREE) { 610 atomic_clear_int(&ip->flags, 611 HAMMER2_INODE_ONRBTREE); 612 RB_REMOVE(hammer2_inode_tree, 613 &pmp->inum_tree, ip); 614 --pmp->inum_count; 615 } 616 hammer2_spin_unex(&pmp->inum_spin); 617 618 ip->pmp = NULL; 619 620 /* 621 * Cleaning out ip->cluster isn't entirely 622 * trivial. 623 */ 624 hammer2_inode_repoint(ip, NULL, NULL); 625 626 kfree(ip, pmp->minode); 627 atomic_add_long(&pmp->inmem_inodes, -1); 628 ip = NULL; /* will terminate loop */ 629 } else { 630 hammer2_spin_unex(&ip->pmp->inum_spin); 631 } 632 } else { 633 /* 634 * Non zero transition 635 */ 636 if (atomic_cmpset_int(&ip->refs, refs, refs - 1)) 637 break; 638 } 639 } 640 } 641 642 /* 643 * Get the vnode associated with the given inode, allocating the vnode if 644 * necessary. The vnode will be returned exclusively locked. 645 * 646 * *errorp is set to a UNIX error, not a HAMMER2 error. 647 * 648 * The caller must lock the inode (shared or exclusive). 649 * 650 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim 651 * races. 652 */ 653 struct vnode * 654 hammer2_igetv(hammer2_inode_t *ip, int *errorp) 655 { 656 hammer2_pfs_t *pmp; 657 struct vnode *vp; 658 659 pmp = ip->pmp; 660 KKASSERT(pmp != NULL); 661 *errorp = 0; 662 663 for (;;) { 664 /* 665 * Attempt to reuse an existing vnode assignment. It is 666 * possible to race a reclaim so the vget() may fail. The 667 * inode must be unlocked during the vget() to avoid a 668 * deadlock against a reclaim. 669 */ 670 int wasexclusive; 671 672 vp = ip->vp; 673 if (vp) { 674 /* 675 * Inode must be unlocked during the vget() to avoid 676 * possible deadlocks, but leave the ip ref intact. 677 * 678 * vnode is held to prevent destruction during the 679 * vget(). The vget() can still fail if we lost 680 * a reclaim race on the vnode. 681 */ 682 hammer2_mtx_state_t ostate; 683 684 vhold(vp); 685 ostate = hammer2_inode_lock_temp_release(ip); 686 if (vget(vp, LK_EXCLUSIVE)) { 687 vdrop(vp); 688 hammer2_inode_lock_temp_restore(ip, ostate); 689 continue; 690 } 691 hammer2_inode_lock_temp_restore(ip, ostate); 692 vdrop(vp); 693 /* vp still locked and ref from vget */ 694 if (ip->vp != vp) { 695 kprintf("hammer2: igetv race %p/%p\n", 696 ip->vp, vp); 697 vput(vp); 698 continue; 699 } 700 *errorp = 0; 701 break; 702 } 703 704 /* 705 * No vnode exists, allocate a new vnode. Beware of 706 * allocation races. This function will return an 707 * exclusively locked and referenced vnode. 708 */ 709 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0); 710 if (*errorp) { 711 kprintf("hammer2: igetv getnewvnode failed %d\n", 712 *errorp); 713 vp = NULL; 714 break; 715 } 716 717 /* 718 * Lock the inode and check for an allocation race. 719 */ 720 wasexclusive = hammer2_inode_lock_upgrade(ip); 721 if (ip->vp != NULL) { 722 vp->v_type = VBAD; 723 vx_put(vp); 724 hammer2_inode_lock_downgrade(ip, wasexclusive); 725 continue; 726 } 727 728 switch (ip->meta.type) { 729 case HAMMER2_OBJTYPE_DIRECTORY: 730 vp->v_type = VDIR; 731 break; 732 case HAMMER2_OBJTYPE_REGFILE: 733 /* 734 * Regular file must use buffer cache I/O 735 * (VKVABIO cpu sync semantics supported) 736 */ 737 vp->v_type = VREG; 738 vsetflags(vp, VKVABIO); 739 vinitvmio(vp, ip->meta.size, 740 HAMMER2_LBUFSIZE, 741 (int)ip->meta.size & HAMMER2_LBUFMASK); 742 break; 743 case HAMMER2_OBJTYPE_SOFTLINK: 744 /* 745 * XXX for now we are using the generic file_read 746 * and file_write code so we need a buffer cache 747 * association. 748 * 749 * (VKVABIO cpu sync semantics supported) 750 */ 751 vp->v_type = VLNK; 752 vsetflags(vp, VKVABIO); 753 vinitvmio(vp, ip->meta.size, 754 HAMMER2_LBUFSIZE, 755 (int)ip->meta.size & HAMMER2_LBUFMASK); 756 break; 757 case HAMMER2_OBJTYPE_CDEV: 758 vp->v_type = VCHR; 759 /* fall through */ 760 case HAMMER2_OBJTYPE_BDEV: 761 vp->v_ops = &pmp->mp->mnt_vn_spec_ops; 762 if (ip->meta.type != HAMMER2_OBJTYPE_CDEV) 763 vp->v_type = VBLK; 764 addaliasu(vp, 765 ip->meta.rmajor, 766 ip->meta.rminor); 767 break; 768 case HAMMER2_OBJTYPE_FIFO: 769 vp->v_type = VFIFO; 770 vp->v_ops = &pmp->mp->mnt_vn_fifo_ops; 771 break; 772 case HAMMER2_OBJTYPE_SOCKET: 773 vp->v_type = VSOCK; 774 break; 775 default: 776 panic("hammer2: unhandled objtype %d", 777 ip->meta.type); 778 break; 779 } 780 781 if (ip == pmp->iroot) 782 vsetflags(vp, VROOT); 783 784 vp->v_data = ip; 785 ip->vp = vp; 786 hammer2_inode_ref(ip); /* vp association */ 787 hammer2_inode_lock_downgrade(ip, wasexclusive); 788 break; 789 } 790 791 /* 792 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0. 793 */ 794 if (hammer2_debug & 0x0002) { 795 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n", 796 vp, vp->v_refcnt, vp->v_auxrefs); 797 } 798 return (vp); 799 } 800 801 /* 802 * XXX this API needs a rewrite. It needs to be split into a 803 * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get 804 * rid of the inode/chain lock reversal fudge. 805 * 806 * Returns the inode associated with the passed-in cluster, allocating a new 807 * hammer2_inode structure if necessary, then synchronizing it to the passed 808 * xop cluster. When synchronizing, if idx >= 0, only cluster index (idx) 809 * is synchronized. Otherwise the whole cluster is synchronized. inum will 810 * be extracted from the passed-in xop and the inum argument will be ignored. 811 * 812 * If xop is passed as NULL then a new hammer2_inode is allocated with the 813 * specified inum, and returned. For normal inodes, the inode will be 814 * indexed in memory and if it already exists the existing ip will be 815 * returned instead of allocating a new one. The superroot and PFS inodes 816 * are not indexed in memory. 817 * 818 * The passed-in cluster must be locked and will remain locked on return. 819 * The returned inode will be locked and the caller may dispose of both 820 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller 821 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode. 822 * 823 * The hammer2_inode structure regulates the interface between the high level 824 * kernel VNOPS API and the filesystem backend (the chains). 825 * 826 * On return the inode is locked with the supplied cluster. 827 */ 828 hammer2_inode_t * 829 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop, 830 hammer2_tid_t inum, int idx) 831 { 832 hammer2_inode_t *nip; 833 const hammer2_inode_data_t *iptmp; 834 const hammer2_inode_data_t *nipdata; 835 836 KKASSERT(xop == NULL || 837 hammer2_cluster_type(&xop->cluster) == 838 HAMMER2_BREF_TYPE_INODE); 839 KKASSERT(pmp); 840 841 /* 842 * Interlocked lookup/ref of the inode. This code is only needed 843 * when looking up inodes with nlinks != 0 (TODO: optimize out 844 * otherwise and test for duplicates). 845 * 846 * Cluster can be NULL during the initial pfs allocation. 847 */ 848 if (xop) { 849 iptmp = &hammer2_xop_gdata(xop)->ipdata; 850 inum = iptmp->meta.inum; 851 hammer2_xop_pdata(xop); 852 } 853 again: 854 nip = hammer2_inode_lookup(pmp, inum); 855 if (nip) { 856 /* 857 * We may have to unhold the cluster to avoid a deadlock 858 * against vnlru (and possibly other XOPs). 859 */ 860 if (xop) { 861 if (hammer2_mtx_ex_try(&nip->lock) != 0) { 862 hammer2_cluster_unhold(&xop->cluster); 863 hammer2_mtx_ex(&nip->lock); 864 hammer2_cluster_rehold(&xop->cluster); 865 } 866 } else { 867 hammer2_mtx_ex(&nip->lock); 868 } 869 870 /* 871 * Handle SMP race (not applicable to the super-root spmp 872 * which can't index inodes due to duplicative inode numbers). 873 */ 874 if (pmp->spmp_hmp == NULL && 875 (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) { 876 hammer2_mtx_unlock(&nip->lock); 877 hammer2_inode_drop(nip); 878 goto again; 879 } 880 if (xop) { 881 if (idx >= 0) 882 hammer2_inode_repoint_one(nip, &xop->cluster, 883 idx); 884 else 885 hammer2_inode_repoint(nip, NULL, &xop->cluster); 886 } 887 return nip; 888 } 889 890 /* 891 * We couldn't find the inode number, create a new inode and try to 892 * insert it, handle insertion races. 893 */ 894 nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO); 895 spin_init(&nip->cluster_spin, "h2clspin"); 896 atomic_add_long(&pmp->inmem_inodes, 1); 897 if (pmp->spmp_hmp) 898 nip->flags = HAMMER2_INODE_SROOT; 899 900 /* 901 * Initialize nip's cluster. A cluster is provided for normal 902 * inodes but typically not for the super-root or PFS inodes. 903 */ 904 nip->cluster.refs = 1; 905 nip->cluster.pmp = pmp; 906 nip->cluster.flags |= HAMMER2_CLUSTER_INODE; 907 if (xop) { 908 nipdata = &hammer2_xop_gdata(xop)->ipdata; 909 nip->meta = nipdata->meta; 910 hammer2_xop_pdata(xop); 911 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD); 912 hammer2_inode_repoint(nip, NULL, &xop->cluster); 913 } else { 914 nip->meta.inum = inum; /* PFS inum is always 1 XXX */ 915 /* mtime will be updated when a cluster is available */ 916 atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD); /*XXX*/ 917 } 918 919 nip->pmp = pmp; 920 921 /* 922 * ref and lock on nip gives it state compatible to after a 923 * hammer2_inode_lock() call. 924 */ 925 nip->refs = 1; 926 hammer2_mtx_init(&nip->lock, "h2inode"); 927 hammer2_mtx_init(&nip->truncate_lock, "h2trunc"); 928 hammer2_mtx_ex(&nip->lock); 929 TAILQ_INIT(&nip->depend_static.sideq); 930 /* combination of thread lock and chain lock == inode lock */ 931 932 /* 933 * Attempt to add the inode. If it fails we raced another inode 934 * get. Undo all the work and try again. 935 */ 936 if (pmp->spmp_hmp == NULL) { 937 hammer2_spin_ex(&pmp->inum_spin); 938 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) { 939 hammer2_spin_unex(&pmp->inum_spin); 940 hammer2_mtx_unlock(&nip->lock); 941 hammer2_inode_drop(nip); 942 goto again; 943 } 944 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE); 945 ++pmp->inum_count; 946 hammer2_spin_unex(&pmp->inum_spin); 947 } 948 return (nip); 949 } 950 951 /* 952 * Create a PFS inode under the superroot. This function will create the 953 * inode, its media chains, and also insert it into the media. 954 * 955 * Caller must be in a flush transaction because we are inserting the inode 956 * onto the media. 957 */ 958 hammer2_inode_t * 959 hammer2_inode_create_pfs(hammer2_pfs_t *spmp, 960 const uint8_t *name, size_t name_len, 961 int *errorp) 962 { 963 hammer2_xop_create_t *xop; 964 hammer2_inode_t *pip; 965 hammer2_inode_t *nip; 966 int error; 967 uuid_t pip_uid; 968 uuid_t pip_gid; 969 uint32_t pip_mode; 970 uint8_t pip_comp_algo; 971 uint8_t pip_check_algo; 972 hammer2_tid_t pip_inum; 973 hammer2_key_t lhc; 974 975 pip = spmp->iroot; 976 nip = NULL; 977 978 lhc = hammer2_dirhash(name, name_len); 979 *errorp = 0; 980 981 /* 982 * Locate the inode or indirect block to create the new 983 * entry in. At the same time check for key collisions 984 * and iterate until we don't get one. 985 * 986 * Lock the directory exclusively for now to guarantee that 987 * we can find an unused lhc for the name. Due to collisions, 988 * two different creates can end up with the same lhc so we 989 * cannot depend on the OS to prevent the collision. 990 */ 991 hammer2_inode_lock(pip, 0); 992 993 pip_uid = pip->meta.uid; 994 pip_gid = pip->meta.gid; 995 pip_mode = pip->meta.mode; 996 pip_comp_algo = pip->meta.comp_algo; 997 pip_check_algo = pip->meta.check_algo; 998 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum; 999 1000 /* 1001 * Locate an unused key in the collision space. 1002 */ 1003 { 1004 hammer2_xop_scanlhc_t *sxop; 1005 hammer2_key_t lhcbase; 1006 1007 lhcbase = lhc; 1008 sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1009 sxop->lhc = lhc; 1010 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 1011 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1012 if (lhc != sxop->head.cluster.focus->bref.key) 1013 break; 1014 ++lhc; 1015 } 1016 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1017 1018 if (error) { 1019 if (error != HAMMER2_ERROR_ENOENT) 1020 goto done2; 1021 ++lhc; 1022 error = 0; 1023 } 1024 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { 1025 error = HAMMER2_ERROR_ENOSPC; 1026 goto done2; 1027 } 1028 } 1029 1030 /* 1031 * Create the inode with the lhc as the key. 1032 */ 1033 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1034 xop->lhc = lhc; 1035 xop->flags = HAMMER2_INSERT_PFSROOT; 1036 bzero(&xop->meta, sizeof(xop->meta)); 1037 1038 xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY; 1039 xop->meta.inum = 1; 1040 xop->meta.iparent = pip_inum; 1041 1042 /* Inherit parent's inode compression mode. */ 1043 xop->meta.comp_algo = pip_comp_algo; 1044 xop->meta.check_algo = pip_check_algo; 1045 xop->meta.version = HAMMER2_INODE_VERSION_ONE; 1046 hammer2_update_time(&xop->meta.ctime); 1047 xop->meta.mtime = xop->meta.ctime; 1048 xop->meta.mode = 0755; 1049 xop->meta.nlinks = 1; 1050 1051 /* 1052 * Regular files and softlinks allow a small amount of data to be 1053 * directly embedded in the inode. This flag will be cleared if 1054 * the size is extended past the embedded limit. 1055 */ 1056 if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE || 1057 xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) { 1058 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA; 1059 } 1060 hammer2_xop_setname(&xop->head, name, name_len); 1061 xop->meta.name_len = name_len; 1062 xop->meta.name_key = lhc; 1063 KKASSERT(name_len < HAMMER2_INODE_MAXNAME); 1064 1065 hammer2_xop_start(&xop->head, &hammer2_inode_create_desc); 1066 1067 error = hammer2_xop_collect(&xop->head, 0); 1068 #if INODE_DEBUG 1069 kprintf("CREATE INODE %*.*s\n", 1070 (int)name_len, (int)name_len, name); 1071 #endif 1072 1073 if (error) { 1074 *errorp = error; 1075 goto done; 1076 } 1077 1078 /* 1079 * Set up the new inode if not a hardlink pointer. 1080 * 1081 * NOTE: *_get() integrates chain's lock into the inode lock. 1082 * 1083 * NOTE: Only one new inode can currently be created per 1084 * transaction. If the need arises we can adjust 1085 * hammer2_trans_init() to allow more. 1086 * 1087 * NOTE: nipdata will have chain's blockset data. 1088 */ 1089 nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1); 1090 nip->comp_heuristic = 0; 1091 done: 1092 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1093 done2: 1094 hammer2_inode_unlock(pip); 1095 1096 return (nip); 1097 } 1098 1099 /* 1100 * Create a new, normal inode. This function will create the inode, 1101 * the media chains, but will not insert the chains onto the media topology 1102 * (doing so would require a flush transaction and cause long stalls). 1103 * 1104 * Caller must be in a normal transaction. 1105 */ 1106 hammer2_inode_t * 1107 hammer2_inode_create_normal(hammer2_inode_t *pip, 1108 struct vattr *vap, struct ucred *cred, 1109 hammer2_key_t inum, int *errorp) 1110 { 1111 hammer2_xop_create_t *xop; 1112 hammer2_inode_t *dip; 1113 hammer2_inode_t *nip; 1114 int error; 1115 uid_t xuid; 1116 uuid_t pip_uid; 1117 uuid_t pip_gid; 1118 uint32_t pip_mode; 1119 uint8_t pip_comp_algo; 1120 uint8_t pip_check_algo; 1121 hammer2_tid_t pip_inum; 1122 uint8_t type; 1123 1124 dip = pip->pmp->iroot; 1125 KKASSERT(dip != NULL); 1126 1127 *errorp = 0; 1128 1129 /*hammer2_inode_lock(dip, 0);*/ 1130 1131 pip_uid = pip->meta.uid; 1132 pip_gid = pip->meta.gid; 1133 pip_mode = pip->meta.mode; 1134 pip_comp_algo = pip->meta.comp_algo; 1135 pip_check_algo = pip->meta.check_algo; 1136 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum; 1137 1138 /* 1139 * Create the in-memory hammer2_inode structure for the specified 1140 * inode. 1141 */ 1142 nip = hammer2_inode_get(dip->pmp, NULL, inum, -1); 1143 nip->comp_heuristic = 0; 1144 KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 && 1145 nip->cluster.nchains == 0); 1146 atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING); 1147 1148 /* 1149 * Setup the inode meta-data 1150 */ 1151 nip->meta.type = hammer2_get_obj_type(vap->va_type); 1152 1153 switch (nip->meta.type) { 1154 case HAMMER2_OBJTYPE_CDEV: 1155 case HAMMER2_OBJTYPE_BDEV: 1156 nip->meta.rmajor = vap->va_rmajor; 1157 nip->meta.rminor = vap->va_rminor; 1158 break; 1159 default: 1160 break; 1161 } 1162 type = nip->meta.type; 1163 1164 KKASSERT(nip->meta.inum == inum); 1165 nip->meta.iparent = pip_inum; 1166 1167 /* Inherit parent's inode compression mode. */ 1168 nip->meta.comp_algo = pip_comp_algo; 1169 nip->meta.check_algo = pip_check_algo; 1170 nip->meta.version = HAMMER2_INODE_VERSION_ONE; 1171 hammer2_update_time(&nip->meta.ctime); 1172 nip->meta.mtime = nip->meta.ctime; 1173 nip->meta.mode = vap->va_mode; 1174 nip->meta.nlinks = 1; 1175 1176 xuid = hammer2_to_unix_xid(&pip_uid); 1177 xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode, 1178 xuid, cred, 1179 &vap->va_mode); 1180 if (vap->va_vaflags & VA_UID_UUID_VALID) 1181 nip->meta.uid = vap->va_uid_uuid; 1182 else if (vap->va_uid != (uid_t)VNOVAL) 1183 hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid); 1184 else 1185 hammer2_guid_to_uuid(&nip->meta.uid, xuid); 1186 1187 if (vap->va_vaflags & VA_GID_UUID_VALID) 1188 nip->meta.gid = vap->va_gid_uuid; 1189 else if (vap->va_gid != (gid_t)VNOVAL) 1190 hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid); 1191 else 1192 nip->meta.gid = pip_gid; 1193 1194 /* 1195 * Regular files and softlinks allow a small amount of data to be 1196 * directly embedded in the inode. This flag will be cleared if 1197 * the size is extended past the embedded limit. 1198 */ 1199 if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE || 1200 nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) { 1201 nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA; 1202 } 1203 1204 /* 1205 * Create the inode using (inum) as the key. Pass pip for 1206 * method inheritance. 1207 */ 1208 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1209 xop->lhc = inum; 1210 xop->flags = 0; 1211 xop->meta = nip->meta; 1212 KKASSERT(vap); 1213 1214 xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum); 1215 xop->meta.name_key = inum; 1216 nip->meta.name_len = xop->meta.name_len; 1217 nip->meta.name_key = xop->meta.name_key; 1218 hammer2_inode_modify(nip); 1219 1220 /* 1221 * Create the inode media chains but leave them detached. We are 1222 * not in a flush transaction so we can't mess with media topology 1223 * above normal inodes (i.e. the index of the inodes themselves). 1224 * 1225 * We've already set the INODE_CREATING flag. The inode's media 1226 * chains will be inserted onto the media topology on the next 1227 * filesystem sync. 1228 */ 1229 hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc); 1230 1231 error = hammer2_xop_collect(&xop->head, 0); 1232 #if INODE_DEBUG 1233 kprintf("create inode type %d error %d\n", nip->meta.type, error); 1234 #endif 1235 1236 if (error) { 1237 *errorp = error; 1238 goto done; 1239 } 1240 1241 /* 1242 * Associate the media chains created by the backend with the 1243 * frontend inode. 1244 */ 1245 hammer2_inode_repoint(nip, NULL, &xop->head.cluster); 1246 done: 1247 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1248 /*hammer2_inode_unlock(dip);*/ 1249 1250 return (nip); 1251 } 1252 1253 /* 1254 * Create a directory entry under dip with the specified name, inode number, 1255 * and OBJTYPE (type). 1256 * 1257 * This returns a UNIX errno code, not a HAMMER2_ERROR_* code. 1258 * 1259 * Caller must hold dip locked. 1260 */ 1261 int 1262 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len, 1263 hammer2_key_t inum, uint8_t type) 1264 { 1265 hammer2_xop_mkdirent_t *xop; 1266 hammer2_key_t lhc; 1267 int error; 1268 1269 lhc = 0; 1270 error = 0; 1271 1272 KKASSERT(name != NULL); 1273 lhc = hammer2_dirhash(name, name_len); 1274 1275 /* 1276 * Locate the inode or indirect block to create the new 1277 * entry in. At the same time check for key collisions 1278 * and iterate until we don't get one. 1279 * 1280 * Lock the directory exclusively for now to guarantee that 1281 * we can find an unused lhc for the name. Due to collisions, 1282 * two different creates can end up with the same lhc so we 1283 * cannot depend on the OS to prevent the collision. 1284 */ 1285 hammer2_inode_modify(dip); 1286 1287 /* 1288 * If name specified, locate an unused key in the collision space. 1289 * Otherwise use the passed-in lhc directly. 1290 */ 1291 { 1292 hammer2_xop_scanlhc_t *sxop; 1293 hammer2_key_t lhcbase; 1294 1295 lhcbase = lhc; 1296 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1297 sxop->lhc = lhc; 1298 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 1299 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1300 if (lhc != sxop->head.cluster.focus->bref.key) 1301 break; 1302 ++lhc; 1303 } 1304 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1305 1306 if (error) { 1307 if (error != HAMMER2_ERROR_ENOENT) 1308 goto done2; 1309 ++lhc; 1310 error = 0; 1311 } 1312 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { 1313 error = HAMMER2_ERROR_ENOSPC; 1314 goto done2; 1315 } 1316 } 1317 1318 /* 1319 * Create the directory entry with the lhc as the key. 1320 */ 1321 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1322 xop->lhc = lhc; 1323 bzero(&xop->dirent, sizeof(xop->dirent)); 1324 xop->dirent.inum = inum; 1325 xop->dirent.type = type; 1326 xop->dirent.namlen = name_len; 1327 1328 KKASSERT(name_len < HAMMER2_INODE_MAXNAME); 1329 hammer2_xop_setname(&xop->head, name, name_len); 1330 1331 hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc); 1332 1333 error = hammer2_xop_collect(&xop->head, 0); 1334 1335 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1336 done2: 1337 error = hammer2_error_to_errno(error); 1338 1339 return error; 1340 } 1341 1342 /* 1343 * Repoint ip->cluster's chains to cluster's chains and fixup the default 1344 * focus. All items, valid or invalid, are repointed. hammer2_xop_start() 1345 * filters out invalid or non-matching elements. 1346 * 1347 * Caller must hold the inode and cluster exclusive locked, if not NULL, 1348 * must also be locked. 1349 * 1350 * Cluster may be NULL to clean out any chains in ip->cluster. 1351 */ 1352 void 1353 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip, 1354 hammer2_cluster_t *cluster) 1355 { 1356 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER]; 1357 hammer2_chain_t *ochain; 1358 hammer2_chain_t *nchain; 1359 int i; 1360 1361 bzero(dropch, sizeof(dropch)); 1362 1363 /* 1364 * Replace chains in ip->cluster with chains from cluster and 1365 * adjust the focus if necessary. 1366 * 1367 * NOTE: nchain and/or ochain can be NULL due to gaps 1368 * in the cluster arrays. 1369 */ 1370 hammer2_spin_ex(&ip->cluster_spin); 1371 for (i = 0; cluster && i < cluster->nchains; ++i) { 1372 /* 1373 * Do not replace elements which are the same. Also handle 1374 * element count discrepancies. 1375 */ 1376 nchain = cluster->array[i].chain; 1377 if (i < ip->cluster.nchains) { 1378 ochain = ip->cluster.array[i].chain; 1379 if (ochain == nchain) 1380 continue; 1381 } else { 1382 ochain = NULL; 1383 } 1384 1385 /* 1386 * Make adjustments 1387 */ 1388 ip->cluster.array[i].chain = nchain; 1389 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID; 1390 ip->cluster.array[i].flags |= cluster->array[i].flags & 1391 HAMMER2_CITEM_INVALID; 1392 if (nchain) 1393 hammer2_chain_ref(nchain); 1394 dropch[i] = ochain; 1395 } 1396 1397 /* 1398 * Release any left-over chains in ip->cluster. 1399 */ 1400 while (i < ip->cluster.nchains) { 1401 nchain = ip->cluster.array[i].chain; 1402 if (nchain) { 1403 ip->cluster.array[i].chain = NULL; 1404 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID; 1405 } 1406 dropch[i] = nchain; 1407 ++i; 1408 } 1409 1410 /* 1411 * Fixup fields. Note that the inode-embedded cluster is never 1412 * directly locked. 1413 */ 1414 if (cluster) { 1415 ip->cluster.nchains = cluster->nchains; 1416 ip->cluster.focus = cluster->focus; 1417 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED; 1418 } else { 1419 ip->cluster.nchains = 0; 1420 ip->cluster.focus = NULL; 1421 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS; 1422 } 1423 1424 hammer2_spin_unex(&ip->cluster_spin); 1425 1426 /* 1427 * Cleanup outside of spinlock 1428 */ 1429 while (--i >= 0) { 1430 if (dropch[i]) 1431 hammer2_chain_drop(dropch[i]); 1432 } 1433 } 1434 1435 /* 1436 * Repoint a single element from the cluster to the ip. Used by the 1437 * synchronization threads to piecemeal update inodes. Does not change 1438 * focus and requires inode to be re-locked to clean-up flags (XXX). 1439 */ 1440 void 1441 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster, 1442 int idx) 1443 { 1444 hammer2_chain_t *ochain; 1445 hammer2_chain_t *nchain; 1446 int i; 1447 1448 hammer2_spin_ex(&ip->cluster_spin); 1449 KKASSERT(idx < cluster->nchains); 1450 if (idx < ip->cluster.nchains) { 1451 ochain = ip->cluster.array[idx].chain; 1452 nchain = cluster->array[idx].chain; 1453 } else { 1454 ochain = NULL; 1455 nchain = cluster->array[idx].chain; 1456 for (i = ip->cluster.nchains; i <= idx; ++i) { 1457 bzero(&ip->cluster.array[i], 1458 sizeof(ip->cluster.array[i])); 1459 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID; 1460 } 1461 ip->cluster.nchains = idx + 1; 1462 } 1463 if (ochain != nchain) { 1464 /* 1465 * Make adjustments. 1466 */ 1467 ip->cluster.array[idx].chain = nchain; 1468 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID; 1469 ip->cluster.array[idx].flags |= cluster->array[idx].flags & 1470 HAMMER2_CITEM_INVALID; 1471 } 1472 hammer2_spin_unex(&ip->cluster_spin); 1473 if (ochain != nchain) { 1474 if (nchain) 1475 hammer2_chain_ref(nchain); 1476 if (ochain) 1477 hammer2_chain_drop(ochain); 1478 } 1479 } 1480 1481 /* 1482 * Called with a locked inode to finish unlinking an inode after xop_unlink 1483 * had been run. This function is responsible for decrementing nlinks. 1484 * 1485 * We don't bother decrementing nlinks if the file is not open and this was 1486 * the last link. 1487 * 1488 * If the inode is a hardlink target it's chain has not yet been deleted, 1489 * otherwise it's chain has been deleted. 1490 * 1491 * If isopen then any prior deletion was not permanent and the inode is 1492 * left intact with nlinks == 0; 1493 */ 1494 int 1495 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen) 1496 { 1497 hammer2_pfs_t *pmp; 1498 int error; 1499 1500 pmp = ip->pmp; 1501 1502 /* 1503 * Decrement nlinks. If this is the last link and the file is 1504 * not open we can just delete the inode and not bother dropping 1505 * nlinks to 0 (avoiding unnecessary block updates). 1506 */ 1507 if (ip->meta.nlinks == 1) { 1508 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED); 1509 if (isopen == 0) 1510 goto killit; 1511 } 1512 1513 hammer2_inode_modify(ip); 1514 --ip->meta.nlinks; 1515 if ((int64_t)ip->meta.nlinks < 0) 1516 ip->meta.nlinks = 0; /* safety */ 1517 1518 /* 1519 * If nlinks is not zero we are done. However, this should only be 1520 * possible with a hardlink target. If the inode is an embedded 1521 * hardlink nlinks should have dropped to zero, warn and proceed 1522 * with the next step. 1523 */ 1524 if (ip->meta.nlinks) { 1525 if ((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) 1526 return 0; 1527 kprintf("hammer2_inode_unlink: nlinks was not 0 (%jd)\n", 1528 (intmax_t)ip->meta.nlinks); 1529 return 0; 1530 } 1531 1532 if (ip->vp) 1533 hammer2_knote(ip->vp, NOTE_DELETE); 1534 1535 /* 1536 * nlinks is now an implied zero, delete the inode if not open. 1537 * We avoid unnecessary media updates by not bothering to actually 1538 * decrement nlinks for the 1->0 transition 1539 * 1540 * Put the inode on the sideq to ensure that any disconnected chains 1541 * get properly flushed (so they can be freed). Defer the deletion 1542 * to the sync code, doing it now will desynchronize the inode from 1543 * related directory entries (which is bad). 1544 * 1545 * NOTE: killit can be reached without modifying the inode, so 1546 * make sure that it is on the SIDEQ. 1547 */ 1548 if (isopen == 0) { 1549 #if 0 1550 hammer2_xop_destroy_t *xop; 1551 #endif 1552 1553 killit: 1554 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); 1555 hammer2_inode_delayed_sideq(ip); 1556 #if 0 1557 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1558 hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc); 1559 error = hammer2_xop_collect(&xop->head, 0); 1560 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1561 #endif 1562 } 1563 error = 0; /* XXX */ 1564 1565 return error; 1566 } 1567 1568 /* 1569 * Mark an inode as being modified, meaning that the caller will modify 1570 * ip->meta. 1571 * 1572 * If a vnode is present we set the vnode dirty and the nominal filesystem 1573 * sync will also handle synchronizing the inode meta-data. If no vnode 1574 * is present we must ensure that the inode is on pmp->sideq. 1575 * 1576 * NOTE: We must always queue the inode to the sideq. This allows H2 to 1577 * shortcut vsyncscan() and flush inodes and their related vnodes 1578 * in a two stages. H2 still calls vfsync() for each vnode. 1579 * 1580 * NOTE: No mtid (modify_tid) is passed into this routine. The caller is 1581 * only modifying the in-memory inode. A modify_tid is synchronized 1582 * later when the inode gets flushed. 1583 * 1584 * NOTE: As an exception to the general rule, the inode MAY be locked 1585 * shared for this particular call. 1586 */ 1587 void 1588 hammer2_inode_modify(hammer2_inode_t *ip) 1589 { 1590 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED); 1591 if (ip->vp) 1592 vsetisdirty(ip->vp); 1593 if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0) 1594 hammer2_inode_delayed_sideq(ip); 1595 } 1596 1597 /* 1598 * Synchronize the inode's frontend state with the chain state prior 1599 * to any explicit flush of the inode or any strategy write call. This 1600 * does not flush the inode's chain or its sub-topology to media (higher 1601 * level layers are responsible for doing that). 1602 * 1603 * Called with a locked inode inside a normal transaction. 1604 * 1605 * inode must be locked. 1606 */ 1607 int 1608 hammer2_inode_chain_sync(hammer2_inode_t *ip) 1609 { 1610 int error; 1611 1612 error = 0; 1613 if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) { 1614 hammer2_xop_fsync_t *xop; 1615 1616 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1617 xop->clear_directdata = 0; 1618 if (ip->flags & HAMMER2_INODE_RESIZED) { 1619 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) && 1620 ip->meta.size > HAMMER2_EMBEDDED_BYTES) { 1621 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA; 1622 xop->clear_directdata = 1; 1623 } 1624 xop->osize = ip->osize; 1625 } else { 1626 xop->osize = ip->meta.size; /* safety */ 1627 } 1628 xop->ipflags = ip->flags; 1629 xop->meta = ip->meta; 1630 1631 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED | 1632 HAMMER2_INODE_MODIFIED); 1633 hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc); 1634 error = hammer2_xop_collect(&xop->head, 0); 1635 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1636 if (error == HAMMER2_ERROR_ENOENT) 1637 error = 0; 1638 if (error) { 1639 kprintf("hammer2: unable to fsync inode %p\n", ip); 1640 /* 1641 atomic_set_int(&ip->flags, 1642 xop->ipflags & (HAMMER2_INODE_RESIZED | 1643 HAMMER2_INODE_MODIFIED)); 1644 */ 1645 /* XXX return error somehow? */ 1646 } 1647 } 1648 return error; 1649 } 1650 1651 /* 1652 * When an inode is flagged INODE_CREATING its chains have not actually 1653 * been inserting into the on-media tree yet. 1654 */ 1655 int 1656 hammer2_inode_chain_ins(hammer2_inode_t *ip) 1657 { 1658 int error; 1659 1660 error = 0; 1661 if (ip->flags & HAMMER2_INODE_CREATING) { 1662 hammer2_xop_create_t *xop; 1663 1664 atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING); 1665 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1666 xop->lhc = ip->meta.inum; 1667 xop->flags = 0; 1668 hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc); 1669 error = hammer2_xop_collect(&xop->head, 0); 1670 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1671 if (error == HAMMER2_ERROR_ENOENT) 1672 error = 0; 1673 if (error) { 1674 kprintf("hammer2: backend unable to " 1675 "insert inode %p %ld\n", ip, ip->meta.inum); 1676 /* XXX return error somehow? */ 1677 } 1678 } 1679 return error; 1680 } 1681 1682 /* 1683 * When an inode is flagged INODE_DELETING it has been deleted (no directory 1684 * entry or open refs are left, though as an optimization H2 might leave 1685 * nlinks == 1 to avoid unnecessary block updates). The backend flush then 1686 * needs to actually remove it from the topology. 1687 * 1688 * NOTE: backend flush must still sync and flush the deleted inode to clean 1689 * out related chains. 1690 * 1691 * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED 1692 * to prevent the vnode reclaim code from trying to delete it twice. 1693 */ 1694 int 1695 hammer2_inode_chain_des(hammer2_inode_t *ip) 1696 { 1697 int error; 1698 1699 error = 0; 1700 if (ip->flags & HAMMER2_INODE_DELETING) { 1701 hammer2_xop_destroy_t *xop; 1702 1703 atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING | 1704 HAMMER2_INODE_ISUNLINKED); 1705 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1706 hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc); 1707 error = hammer2_xop_collect(&xop->head, 0); 1708 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1709 1710 if (error == HAMMER2_ERROR_ENOENT) 1711 error = 0; 1712 if (error) { 1713 kprintf("hammer2: backend unable to " 1714 "delete inode %p %ld\n", ip, ip->meta.inum); 1715 /* XXX return error somehow? */ 1716 } 1717 } 1718 return error; 1719 } 1720 1721 /* 1722 * Flushes the inode's chain and its sub-topology to media. Interlocks 1723 * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush. Any strategy 1724 * function creating or modifying a chain under this inode will re-set the 1725 * flag. 1726 * 1727 * inode must be locked. 1728 */ 1729 int 1730 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags) 1731 { 1732 hammer2_xop_fsync_t *xop; 1733 int error; 1734 1735 atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA); 1736 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags); 1737 hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc); 1738 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL); 1739 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1740 if (error == HAMMER2_ERROR_ENOENT) 1741 error = 0; 1742 1743 return error; 1744 } 1745