1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org> 5 * Copyright (c) 2011-2022 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Matthew Dillon <dillon@dragonflybsd.org> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 /* 38 #include <sys/cdefs.h> 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/types.h> 42 #include <sys/lock.h> 43 #include <sys/uuid.h> 44 #include <sys/vnode.h> 45 */ 46 47 #include "hammer2.h" 48 49 #define INODE_DEBUG 0 50 51 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp, 52 hammer2_tid_t, meta.inum); 53 54 int 55 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2) 56 { 57 if (ip1->meta.inum < ip2->meta.inum) 58 return(-1); 59 if (ip1->meta.inum > ip2->meta.inum) 60 return(1); 61 return(0); 62 } 63 64 /* 65 * Caller holds pmp->list_spin and the inode should be locked. Merge ip 66 * with the specified depend. 67 * 68 * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating 69 * that successive calls must ensure the ip is on a pass2 depend (or they are 70 * all SYNCQ). If the passed-in depend is not NULL and not (void *)-1 then 71 * we can set pass2 on it and return. 72 * 73 * If the ip is not on SYNCQ it is merged with the passed-in depend, creating 74 * a self-depend if necessary, and depend->pass2 is set according 75 * to the PASS2 flag. SIDEQ is set. 76 */ 77 static __noinline 78 hammer2_depend_t * 79 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend) 80 { 81 hammer2_pfs_t *pmp = ip->pmp; 82 hammer2_depend_t *dtmp; 83 hammer2_inode_t *iptmp; 84 85 /* 86 * If ip is SYNCQ its entry is used for the syncq list and it will 87 * no longer be associated with a dependency. Merging this status 88 * with a passed-in depend implies PASS2. 89 */ 90 if (ip->flags & HAMMER2_INODE_SYNCQ) { 91 if (depend == (void *)-1 || 92 depend == NULL) { 93 return ((void *)-1); 94 } 95 depend->pass2 = 1; 96 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN); 97 98 return depend; 99 } 100 101 /* 102 * If ip is already SIDEQ, merge ip->depend into the passed-in depend. 103 * If it is not, associate the ip with the passed-in depend, creating 104 * a single-entry dependency using depend_static if necessary. 105 * 106 * NOTE: The use of ip->depend_static always requires that the 107 * specific ip containing the structure is part of that 108 * particular depend_static's dependency group. 109 */ 110 if (ip->flags & HAMMER2_INODE_SIDEQ) { 111 /* 112 * Merge ip->depend with the passed-in depend. If the 113 * passed-in depend is not a special case, all ips associated 114 * with ip->depend (including the original ip) must be moved 115 * to the passed-in depend. 116 */ 117 if (depend == NULL) { 118 depend = ip->depend; 119 } else if (depend == (void *)-1) { 120 depend = ip->depend; 121 depend->pass2 = 1; 122 } else if (depend != ip->depend) { 123 #ifdef INVARIANTS 124 int sanitychk = 0; 125 #endif 126 dtmp = ip->depend; 127 while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) { 128 #ifdef INVARIANTS 129 if (iptmp == ip) 130 sanitychk = 1; 131 #endif 132 TAILQ_REMOVE(&dtmp->sideq, iptmp, entry); 133 TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry); 134 iptmp->depend = depend; 135 } 136 KKASSERT(sanitychk == 1); 137 depend->count += dtmp->count; 138 depend->pass2 |= dtmp->pass2; 139 TAILQ_REMOVE(&pmp->depq, dtmp, entry); 140 dtmp->count = 0; 141 dtmp->pass2 = 0; 142 } 143 } else { 144 /* 145 * Add ip to the sideq, creating a self-dependency if 146 * necessary. 147 */ 148 hammer2_inode_ref(ip); /* extra ref usually via hammer2_inode_modify() */ 149 atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ); 150 if (depend == NULL) { 151 depend = &ip->depend_static; 152 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry); 153 } else if (depend == (void *)-1) { 154 depend = &ip->depend_static; 155 depend->pass2 = 1; 156 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry); 157 } /* else add ip to passed-in depend */ 158 TAILQ_INSERT_TAIL(&depend->sideq, ip, entry); 159 ip->depend = depend; 160 ++depend->count; 161 ++pmp->sideq_count; 162 } 163 164 if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2) 165 depend->pass2 = 1; 166 if (depend->pass2) 167 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN); 168 169 return depend; 170 } 171 172 /* 173 * Put a solo inode on the SIDEQ (meaning that its dirty). This can also 174 * occur from inode_lock4() and inode_depend(). 175 * 176 * Caller must pass-in a locked inode. 177 */ 178 void 179 hammer2_inode_delayed_sideq(hammer2_inode_t *ip) 180 { 181 hammer2_pfs_t *pmp = ip->pmp; 182 183 /* 184 * Optimize case to avoid pmp spinlock. 185 */ 186 if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) { 187 hammer2_spin_ex(&pmp->list_spin); 188 hammer2_inode_setdepend_locked(ip, NULL); 189 hammer2_spin_unex(&pmp->list_spin); 190 } 191 } 192 193 /* 194 * Lock an inode, with SYNCQ semantics. 195 * 196 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of 197 * flags for options: 198 * 199 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired. The 200 * inode locking function will automatically set the RDONLY flag. 201 * shared locks are not subject to SYNCQ semantics, exclusive locks 202 * are. 203 * 204 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data. 205 * Most front-end inode locks do. 206 * 207 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that 208 * the inode data be resolved. This is used by the syncthr because 209 * it can run on an unresolved/out-of-sync cluster, and also by the 210 * vnode reclamation code to avoid unnecessary I/O (particularly when 211 * disposing of hundreds of thousands of cached vnodes). 212 * 213 * This function, along with lock4, has SYNCQ semantics. If the inode being 214 * locked is on the SYNCQ, that is it has been staged by the syncer, we must 215 * block until the operation is complete (even if we can lock the inode). In 216 * order to reduce the stall time, we re-order the inode to the front of the 217 * pmp->syncq prior to blocking. This reordering VERY significantly improves 218 * performance. 219 * 220 * The inode locking function locks the inode itself, resolves any stale 221 * chains in the inode's cluster, and allocates a fresh copy of the 222 * cluster with 1 ref and all the underlying chains locked. 223 * 224 * ip->cluster will be stable while the inode is locked. 225 * 226 * NOTE: We don't combine the inode/chain lock because putting away an 227 * inode would otherwise confuse multiple lock holders of the inode. 228 */ 229 void 230 hammer2_inode_lock(hammer2_inode_t *ip, int how) 231 { 232 hammer2_pfs_t *pmp; 233 234 hammer2_inode_ref(ip); 235 pmp = ip->pmp; 236 237 /* 238 * Inode structure mutex - Shared lock 239 */ 240 if (how & HAMMER2_RESOLVE_SHARED) { 241 hammer2_mtx_sh(&ip->lock); 242 return; 243 } 244 245 /* 246 * Inode structure mutex - Exclusive lock 247 * 248 * An exclusive lock (if not recursive) must wait for inodes on 249 * SYNCQ to flush first, to ensure that meta-data dependencies such 250 * as the nlink count and related directory entries are not split 251 * across flushes. 252 * 253 * If the vnode is locked by the current thread it must be unlocked 254 * across the tsleep() to avoid a deadlock. 255 */ 256 hammer2_mtx_ex(&ip->lock); 257 if (hammer2_mtx_refs(&ip->lock) > 1) 258 return; 259 while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) { 260 hammer2_spin_ex(&pmp->list_spin); 261 if (ip->flags & HAMMER2_INODE_SYNCQ) { 262 tsleep_interlock(&ip->flags, 0); 263 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP); 264 TAILQ_REMOVE(&pmp->syncq, ip, entry); 265 TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry); 266 hammer2_spin_unex(&pmp->list_spin); 267 hammer2_mtx_unlock(&ip->lock); 268 tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0); 269 hammer2_mtx_ex(&ip->lock); 270 continue; 271 } 272 hammer2_spin_unex(&pmp->list_spin); 273 break; 274 } 275 } 276 277 /* 278 * Exclusively lock up to four inodes, in order, with SYNCQ semantics. 279 * ip1 and ip2 must not be NULL. ip3 and ip4 may be NULL, but if ip3 is 280 * NULL then ip4 must also be NULL. 281 * 282 * This creates a dependency between up to four inodes. 283 */ 284 void 285 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2, 286 hammer2_inode_t *ip3, hammer2_inode_t *ip4) 287 { 288 hammer2_inode_t *ips[4]; 289 hammer2_inode_t *iptmp; 290 hammer2_inode_t *ipslp; 291 hammer2_depend_t *depend; 292 hammer2_pfs_t *pmp; 293 size_t count; 294 size_t i; 295 296 pmp = ip1->pmp; /* may be NULL */ 297 KKASSERT(pmp == ip2->pmp); 298 299 ips[0] = ip1; 300 ips[1] = ip2; 301 if (ip3 == NULL) { 302 count = 2; 303 } else if (ip4 == NULL) { 304 count = 3; 305 ips[2] = ip3; 306 KKASSERT(pmp == ip3->pmp); 307 } else { 308 count = 4; 309 ips[2] = ip3; 310 ips[3] = ip4; 311 KKASSERT(pmp == ip3->pmp); 312 KKASSERT(pmp == ip4->pmp); 313 } 314 315 for (i = 0; i < count; ++i) 316 hammer2_inode_ref(ips[i]); 317 318 restart: 319 /* 320 * Lock the inodes in order 321 */ 322 for (i = 0; i < count; ++i) { 323 hammer2_mtx_ex(&ips[i]->lock); 324 } 325 326 /* 327 * Associate dependencies, record the first inode found on SYNCQ 328 * (operation is allowed to proceed for inodes on PASS2) for our 329 * sleep operation, this inode is theoretically the last one sync'd 330 * in the sequence. 331 * 332 * All inodes found on SYNCQ are moved to the head of the syncq 333 * to reduce stalls. 334 */ 335 hammer2_spin_ex(&pmp->list_spin); 336 depend = NULL; 337 ipslp = NULL; 338 for (i = 0; i < count; ++i) { 339 iptmp = ips[i]; 340 depend = hammer2_inode_setdepend_locked(iptmp, depend); 341 if (iptmp->flags & HAMMER2_INODE_SYNCQ) { 342 TAILQ_REMOVE(&pmp->syncq, iptmp, entry); 343 TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry); 344 if (ipslp == NULL) 345 ipslp = iptmp; 346 } 347 } 348 hammer2_spin_unex(&pmp->list_spin); 349 350 /* 351 * Block and retry if any of the inodes are on SYNCQ. It is 352 * important that we allow the operation to proceed in the 353 * PASS2 case, to avoid deadlocking against the vnode. 354 */ 355 if (ipslp) { 356 for (i = 0; i < count; ++i) 357 hammer2_mtx_unlock(&ips[i]->lock); 358 tsleep(&ipslp->flags, 0, "h2sync", 2); 359 goto restart; 360 } 361 } 362 363 /* 364 * Release an inode lock. If another thread is blocked on SYNCQ_WAKEUP 365 * we wake them up. 366 */ 367 void 368 hammer2_inode_unlock(hammer2_inode_t *ip) 369 { 370 if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) { 371 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP); 372 hammer2_mtx_unlock(&ip->lock); 373 wakeup(&ip->flags); 374 } else { 375 hammer2_mtx_unlock(&ip->lock); 376 } 377 hammer2_inode_drop(ip); 378 } 379 380 /* 381 * If either ip1 or ip2 have been tapped by the syncer, make sure that both 382 * are. This ensure that dependencies (e.g. dirent-v-inode) are synced 383 * together. For dirent-v-inode depends, pass the dirent as ip1. 384 * 385 * If neither ip1 or ip2 have been tapped by the syncer, merge them into a 386 * single dependency. Dependencies are entered into pmp->depq. This 387 * effectively flags the inodes SIDEQ. 388 * 389 * Both ip1 and ip2 must be locked by the caller. This also ensures 390 * that we can't race the end of the syncer's queue run. 391 */ 392 void 393 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2) 394 { 395 hammer2_pfs_t *pmp; 396 hammer2_depend_t *depend; 397 398 pmp = ip1->pmp; 399 hammer2_spin_ex(&pmp->list_spin); 400 depend = hammer2_inode_setdepend_locked(ip1, NULL); 401 depend = hammer2_inode_setdepend_locked(ip2, depend); 402 hammer2_spin_unex(&pmp->list_spin); 403 } 404 405 /* 406 * Select a chain out of an inode's cluster and lock it. 407 * 408 * The inode does not have to be locked. 409 */ 410 hammer2_chain_t * 411 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how) 412 { 413 hammer2_chain_t *chain; 414 hammer2_cluster_t *cluster; 415 416 hammer2_spin_sh(&ip->cluster_spin); 417 cluster = &ip->cluster; 418 if (clindex >= cluster->nchains) 419 chain = NULL; 420 else 421 chain = cluster->array[clindex].chain; 422 if (chain) { 423 hammer2_chain_ref(chain); 424 hammer2_spin_unsh(&ip->cluster_spin); 425 hammer2_chain_lock(chain, how); 426 } else { 427 hammer2_spin_unsh(&ip->cluster_spin); 428 } 429 return chain; 430 } 431 432 hammer2_chain_t * 433 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex, 434 hammer2_chain_t **parentp, int how) 435 { 436 hammer2_chain_t *chain; 437 hammer2_chain_t *parent; 438 439 for (;;) { 440 hammer2_spin_sh(&ip->cluster_spin); 441 if (clindex >= ip->cluster.nchains) 442 chain = NULL; 443 else 444 chain = ip->cluster.array[clindex].chain; 445 if (chain) { 446 hammer2_chain_ref(chain); 447 hammer2_spin_unsh(&ip->cluster_spin); 448 hammer2_chain_lock(chain, how); 449 } else { 450 hammer2_spin_unsh(&ip->cluster_spin); 451 } 452 453 /* 454 * Get parent, lock order must be (parent, chain). 455 */ 456 parent = chain->parent; 457 if (parent) { 458 hammer2_chain_ref(parent); 459 hammer2_chain_unlock(chain); 460 hammer2_chain_lock(parent, how); 461 hammer2_chain_lock(chain, how); 462 } 463 if (ip->cluster.array[clindex].chain == chain && 464 chain->parent == parent) { 465 break; 466 } 467 468 /* 469 * Retry 470 */ 471 hammer2_chain_unlock(chain); 472 hammer2_chain_drop(chain); 473 if (parent) { 474 hammer2_chain_unlock(parent); 475 hammer2_chain_drop(parent); 476 } 477 } 478 *parentp = parent; 479 480 return chain; 481 } 482 483 /* 484 * Temporarily release a lock held shared or exclusive. Caller must 485 * hold the lock shared or exclusive on call and lock will be released 486 * on return. 487 * 488 * Restore a lock that was temporarily released. 489 */ 490 hammer2_mtx_state_t 491 hammer2_inode_lock_temp_release(hammer2_inode_t *ip) 492 { 493 return hammer2_mtx_temp_release(&ip->lock); 494 } 495 496 void 497 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate) 498 { 499 hammer2_mtx_temp_restore(&ip->lock, ostate); 500 } 501 502 /* 503 * Upgrade a shared inode lock to exclusive and return. If the inode lock 504 * is already held exclusively this is a NOP. 505 * 506 * The caller MUST hold the inode lock either shared or exclusive on call 507 * and will own the lock exclusively on return. 508 * 509 * Returns non-zero if the lock was already exclusive prior to the upgrade. 510 */ 511 int 512 hammer2_inode_lock_upgrade(hammer2_inode_t *ip) 513 { 514 int wasexclusive; 515 516 /* XXX pretends it wasn't exclusive, but shouldn't matter */ 517 //if (mtx_islocked_ex(&ip->lock)) { 518 if (0) { 519 wasexclusive = 1; 520 } else { 521 hammer2_mtx_unlock(&ip->lock); 522 hammer2_mtx_ex(&ip->lock); 523 wasexclusive = 0; 524 } 525 return wasexclusive; 526 } 527 528 /* 529 * Downgrade an inode lock from exclusive to shared only if the inode 530 * lock was previously shared. If the inode lock was previously exclusive, 531 * this is a NOP. 532 */ 533 void 534 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive) 535 { 536 if (wasexclusive == 0) 537 hammer2_mtx_downgrade(&ip->lock); 538 } 539 540 /* 541 * Lookup an inode by inode number 542 */ 543 hammer2_inode_t * 544 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum) 545 { 546 hammer2_inode_t *ip; 547 548 KKASSERT(pmp); 549 if (pmp->spmp_hmp) { 550 ip = NULL; 551 } else { 552 hammer2_spin_ex(&pmp->inum_spin); 553 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum); 554 if (ip) 555 hammer2_inode_ref(ip); 556 hammer2_spin_unex(&pmp->inum_spin); 557 } 558 return(ip); 559 } 560 561 /* 562 * Adding a ref to an inode is only legal if the inode already has at least 563 * one ref. 564 * 565 * (can be called with spinlock held) 566 */ 567 void 568 hammer2_inode_ref(hammer2_inode_t *ip) 569 { 570 atomic_add_int(&ip->refs, 1); 571 if (hammer2_debug & 0x80000) { 572 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs); 573 print_backtrace(8); 574 } 575 } 576 577 /* 578 * Drop an inode reference, freeing the inode when the last reference goes 579 * away. 580 */ 581 void 582 hammer2_inode_drop(hammer2_inode_t *ip) 583 { 584 hammer2_pfs_t *pmp; 585 u_int refs; 586 587 while (ip) { 588 if (hammer2_debug & 0x80000) { 589 kprintf("INODE-1 %p (%d->%d)\n", 590 ip, ip->refs, ip->refs - 1); 591 print_backtrace(8); 592 } 593 refs = ip->refs; 594 cpu_ccfence(); 595 if (refs == 1) { 596 /* 597 * Transition to zero, must interlock with 598 * the inode inumber lookup tree (if applicable). 599 * It should not be possible for anyone to race 600 * the transition to 0. 601 */ 602 pmp = ip->pmp; 603 KKASSERT(pmp); 604 hammer2_spin_ex(&pmp->inum_spin); 605 606 if (atomic_cmpset_int(&ip->refs, 1, 0)) { 607 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0); 608 if (ip->flags & HAMMER2_INODE_ONRBTREE) { 609 atomic_clear_int(&ip->flags, 610 HAMMER2_INODE_ONRBTREE); 611 RB_REMOVE(hammer2_inode_tree, 612 &pmp->inum_tree, ip); 613 --pmp->inum_count; 614 } 615 hammer2_spin_unex(&pmp->inum_spin); 616 617 ip->pmp = NULL; 618 619 /* 620 * Cleaning out ip->cluster isn't entirely 621 * trivial. 622 */ 623 hammer2_inode_repoint(ip, NULL); 624 625 /* 626 * VOP_RECLAIM is currently unused, 627 * so directly free vnode before inode. 628 */ 629 if (ip->vp) { 630 if (ip->vp->v_malloced) 631 freevnode(ip->vp); 632 } else { 633 /* PFS inode ? */ 634 } 635 636 kfree_obj(ip, pmp->minode); 637 atomic_add_long(&pmp->inmem_inodes, -1); 638 ip = NULL; /* will terminate loop */ 639 } else { 640 hammer2_spin_unex(&ip->pmp->inum_spin); 641 } 642 } else { 643 /* 644 * Non zero transition 645 */ 646 if (atomic_cmpset_int(&ip->refs, refs, refs - 1)) 647 break; 648 } 649 } 650 } 651 652 /* 653 * Get the vnode associated with the given inode, allocating the vnode if 654 * necessary. The vnode will be returned exclusively locked. 655 * 656 * *errorp is set to a UNIX error, not a HAMMER2 error. 657 * 658 * The caller must lock the inode (shared or exclusive). 659 * 660 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim 661 * races. 662 */ 663 struct vnode * 664 hammer2_igetv(hammer2_inode_t *ip, int *errorp) 665 { 666 hammer2_pfs_t *pmp; 667 struct vnode *vp; 668 669 pmp = ip->pmp; 670 KKASSERT(pmp != NULL); 671 *errorp = 0; 672 673 for (;;) { 674 /* 675 * Attempt to reuse an existing vnode assignment. It is 676 * possible to race a reclaim so the vget() may fail. The 677 * inode must be unlocked during the vget() to avoid a 678 * deadlock against a reclaim. 679 */ 680 int wasexclusive; 681 682 vp = ip->vp; 683 if (vp) { 684 /* 685 * Inode must be unlocked during the vget() to avoid 686 * possible deadlocks, but leave the ip ref intact. 687 * 688 * vnode is held to prevent destruction during the 689 * vget(). The vget() can still fail if we lost 690 * a reclaim race on the vnode. 691 */ 692 hammer2_mtx_state_t ostate; 693 694 vhold(vp); 695 ostate = hammer2_inode_lock_temp_release(ip); 696 if (vget(vp, LK_EXCLUSIVE)) { 697 vdrop(vp); 698 hammer2_inode_lock_temp_restore(ip, ostate); 699 continue; 700 } 701 hammer2_inode_lock_temp_restore(ip, ostate); 702 vdrop(vp); 703 /* vp still locked and ref from vget */ 704 if (ip->vp != vp) { 705 kprintf("hammer2: igetv race %p/%p\n", 706 ip->vp, vp); 707 vput(vp); 708 continue; 709 } 710 *errorp = 0; 711 break; 712 } 713 714 /* 715 * No vnode exists, allocate a new vnode. Beware of 716 * allocation races. This function will return an 717 * exclusively locked and referenced vnode. 718 */ 719 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0); 720 if (*errorp) { 721 kprintf("hammer2: igetv getnewvnode failed %d\n", 722 *errorp); 723 vp = NULL; 724 break; 725 } 726 727 /* 728 * Lock the inode and check for an allocation race. 729 */ 730 wasexclusive = hammer2_inode_lock_upgrade(ip); 731 if (ip->vp != NULL) { 732 vp->v_type = VBAD; 733 vx_put(vp); 734 hammer2_inode_lock_downgrade(ip, wasexclusive); 735 continue; 736 } 737 738 switch (ip->meta.type) { 739 case HAMMER2_OBJTYPE_DIRECTORY: 740 vp->v_type = VDIR; 741 break; 742 case HAMMER2_OBJTYPE_REGFILE: 743 /* 744 * Regular file must use buffer cache I/O 745 * (VKVABIO cpu sync semantics supported) 746 */ 747 vp->v_type = VREG; 748 vsetflags(vp, VKVABIO); 749 vinitvmio(vp, ip->meta.size, 750 HAMMER2_LBUFSIZE, 751 (int)ip->meta.size & HAMMER2_LBUFMASK); 752 break; 753 case HAMMER2_OBJTYPE_SOFTLINK: 754 /* 755 * XXX for now we are using the generic file_read 756 * and file_write code so we need a buffer cache 757 * association. 758 * 759 * (VKVABIO cpu sync semantics supported) 760 */ 761 vp->v_type = VLNK; 762 vsetflags(vp, VKVABIO); 763 vinitvmio(vp, ip->meta.size, 764 HAMMER2_LBUFSIZE, 765 (int)ip->meta.size & HAMMER2_LBUFMASK); 766 break; 767 case HAMMER2_OBJTYPE_CDEV: 768 vp->v_type = VCHR; 769 /* fall through */ 770 case HAMMER2_OBJTYPE_BDEV: 771 //vp->v_ops = &pmp->mp->mnt_vn_spec_ops; 772 if (ip->meta.type != HAMMER2_OBJTYPE_CDEV) 773 vp->v_type = VBLK; 774 addaliasu(vp, 775 ip->meta.rmajor, 776 ip->meta.rminor); 777 break; 778 case HAMMER2_OBJTYPE_FIFO: 779 vp->v_type = VFIFO; 780 //vp->v_ops = &pmp->mp->mnt_vn_fifo_ops; 781 break; 782 case HAMMER2_OBJTYPE_SOCKET: 783 vp->v_type = VSOCK; 784 break; 785 default: 786 panic("hammer2: unhandled objtype %d", 787 ip->meta.type); 788 break; 789 } 790 791 if (ip == pmp->iroot) 792 vsetflags(vp, VROOT); 793 794 vp->v_data = ip; 795 ip->vp = vp; 796 hammer2_inode_ref(ip); /* vp association */ 797 hammer2_inode_lock_downgrade(ip, wasexclusive); 798 vx_downgrade(vp); 799 break; 800 } 801 802 /* 803 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0. 804 */ 805 if (hammer2_debug & 0x0002) { 806 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n", 807 vp, -1, -1); 808 } 809 return (vp); 810 } 811 812 /* 813 * XXX this API needs a rewrite. It needs to be split into a 814 * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get 815 * rid of the inode/chain lock reversal fudge. 816 * 817 * Returns the inode associated with the passed-in cluster, allocating a new 818 * hammer2_inode structure if necessary, then synchronizing it to the passed 819 * xop cluster. When synchronizing, if idx >= 0, only cluster index (idx) 820 * is synchronized. Otherwise the whole cluster is synchronized. inum will 821 * be extracted from the passed-in xop and the inum argument will be ignored. 822 * 823 * If xop is passed as NULL then a new hammer2_inode is allocated with the 824 * specified inum, and returned. For normal inodes, the inode will be 825 * indexed in memory and if it already exists the existing ip will be 826 * returned instead of allocating a new one. The superroot and PFS inodes 827 * are not indexed in memory. 828 * 829 * The passed-in cluster must be locked and will remain locked on return. 830 * The returned inode will be locked and the caller may dispose of both 831 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller 832 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode. 833 * 834 * The hammer2_inode structure regulates the interface between the high level 835 * kernel VNOPS API and the filesystem backend (the chains). 836 * 837 * On return the inode is locked with the supplied cluster. 838 */ 839 hammer2_inode_t * 840 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop, 841 hammer2_tid_t inum, int idx) 842 { 843 hammer2_inode_t *nip; 844 const hammer2_inode_data_t *iptmp; 845 const hammer2_inode_data_t *nipdata; 846 847 KKASSERT(xop == NULL || 848 hammer2_cluster_type(&xop->cluster) == 849 HAMMER2_BREF_TYPE_INODE); 850 KKASSERT(pmp); 851 852 /* 853 * Interlocked lookup/ref of the inode. This code is only needed 854 * when looking up inodes with nlinks != 0 (TODO: optimize out 855 * otherwise and test for duplicates). 856 * 857 * Cluster can be NULL during the initial pfs allocation. 858 */ 859 if (xop) { 860 iptmp = &hammer2_xop_gdata(xop)->ipdata; 861 inum = iptmp->meta.inum; 862 hammer2_xop_pdata(xop); 863 } 864 again: 865 nip = hammer2_inode_lookup(pmp, inum); 866 if (nip) { 867 /* 868 * We may have to unhold the cluster to avoid a deadlock 869 * against vnlru (and possibly other XOPs). 870 */ 871 if (xop) { 872 if (hammer2_mtx_ex_try(&nip->lock) != 0) { 873 hammer2_cluster_unhold(&xop->cluster); 874 hammer2_mtx_ex(&nip->lock); 875 hammer2_cluster_rehold(&xop->cluster); 876 } 877 } else { 878 hammer2_mtx_ex(&nip->lock); 879 } 880 881 /* 882 * Handle SMP race (not applicable to the super-root spmp 883 * which can't index inodes due to duplicative inode numbers). 884 */ 885 if (pmp->spmp_hmp == NULL && 886 (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) { 887 hammer2_mtx_unlock(&nip->lock); 888 hammer2_inode_drop(nip); 889 goto again; 890 } 891 if (xop) { 892 if (idx >= 0) 893 hammer2_inode_repoint_one(nip, &xop->cluster, 894 idx); 895 else 896 hammer2_inode_repoint(nip, &xop->cluster); 897 } 898 return nip; 899 } 900 901 /* 902 * We couldn't find the inode number, create a new inode and try to 903 * insert it, handle insertion races. 904 */ 905 nip = kmalloc_obj(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO); 906 hammer2_spin_init(&nip->cluster_spin, "h2clspin"); 907 atomic_add_long(&pmp->inmem_inodes, 1); 908 909 /* 910 * Initialize nip's cluster. A cluster is provided for normal 911 * inodes but typically not for the super-root or PFS inodes. 912 */ 913 { 914 hammer2_inode_t *nnip = nip; 915 nip->ihash = (int)hammer2_icrc32(&nnip, sizeof(nnip)); 916 } 917 918 nip->cluster.refs = 1; 919 nip->cluster.pmp = pmp; 920 nip->cluster.flags |= HAMMER2_CLUSTER_INODE; 921 if (xop) { 922 nipdata = &hammer2_xop_gdata(xop)->ipdata; 923 nip->meta = nipdata->meta; 924 hammer2_xop_pdata(xop); 925 hammer2_inode_repoint(nip, &xop->cluster); 926 } else { 927 nip->meta.inum = inum; /* PFS inum is always 1 XXX */ 928 /* mtime will be updated when a cluster is available */ 929 } 930 931 nip->pmp = pmp; 932 933 /* 934 * ref and lock on nip gives it state compatible to after a 935 * hammer2_inode_lock() call. 936 */ 937 nip->refs = 1; 938 hammer2_mtx_init(&nip->lock, "h2inode"); 939 hammer2_mtx_init(&nip->truncate_lock, "h2trunc"); 940 hammer2_mtx_ex(&nip->lock); 941 TAILQ_INIT(&nip->depend_static.sideq); 942 /* combination of thread lock and chain lock == inode lock */ 943 944 /* 945 * Attempt to add the inode. If it fails we raced another inode 946 * get. Undo all the work and try again. 947 */ 948 if (pmp->spmp_hmp == NULL) { 949 hammer2_spin_ex(&pmp->inum_spin); 950 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) { 951 hammer2_spin_unex(&pmp->inum_spin); 952 hammer2_mtx_unlock(&nip->lock); 953 hammer2_inode_drop(nip); 954 goto again; 955 } 956 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE); 957 ++pmp->inum_count; 958 hammer2_spin_unex(&pmp->inum_spin); 959 } 960 return (nip); 961 } 962 963 /* 964 * Create a PFS inode under the superroot. This function will create the 965 * inode, its media chains, and also insert it into the media. 966 * 967 * Caller must be in a flush transaction because we are inserting the inode 968 * onto the media. 969 */ 970 hammer2_inode_t * 971 hammer2_inode_create_pfs(hammer2_pfs_t *spmp, 972 const uint8_t *name, size_t name_len, 973 int *errorp) 974 { 975 hammer2_xop_create_t *xop; 976 hammer2_inode_t *pip; 977 hammer2_inode_t *nip; 978 int error; 979 uuid_t pip_uid; 980 uuid_t pip_gid; 981 uint32_t pip_mode; 982 uint8_t pip_comp_algo; 983 uint8_t pip_check_algo; 984 hammer2_tid_t pip_inum; 985 hammer2_key_t lhc; 986 987 pip = spmp->iroot; 988 nip = NULL; 989 990 lhc = hammer2_dirhash(name, name_len); 991 *errorp = 0; 992 993 /* 994 * Locate the inode or indirect block to create the new 995 * entry in. At the same time check for key collisions 996 * and iterate until we don't get one. 997 * 998 * Lock the directory exclusively for now to guarantee that 999 * we can find an unused lhc for the name. Due to collisions, 1000 * two different creates can end up with the same lhc so we 1001 * cannot depend on the OS to prevent the collision. 1002 */ 1003 hammer2_inode_lock(pip, 0); 1004 1005 pip_uid = pip->meta.uid; 1006 pip_gid = pip->meta.gid; 1007 pip_mode = pip->meta.mode; 1008 pip_comp_algo = pip->meta.comp_algo; 1009 pip_check_algo = pip->meta.check_algo; 1010 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum; 1011 1012 /* 1013 * Locate an unused key in the collision space. 1014 */ 1015 { 1016 hammer2_xop_scanlhc_t *sxop; 1017 hammer2_key_t lhcbase; 1018 1019 lhcbase = lhc; 1020 sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1021 sxop->lhc = lhc; 1022 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 1023 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1024 if (lhc != sxop->head.cluster.focus->bref.key) 1025 break; 1026 ++lhc; 1027 } 1028 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1029 1030 if (error) { 1031 if (error != HAMMER2_ERROR_ENOENT) 1032 goto done2; 1033 ++lhc; 1034 error = 0; 1035 } 1036 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { 1037 error = HAMMER2_ERROR_ENOSPC; 1038 goto done2; 1039 } 1040 } 1041 1042 /* 1043 * Create the inode with the lhc as the key. 1044 */ 1045 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1046 xop->lhc = lhc; 1047 xop->flags = HAMMER2_INSERT_PFSROOT; 1048 bzero(&xop->meta, sizeof(xop->meta)); 1049 1050 xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY; 1051 xop->meta.inum = 1; 1052 xop->meta.iparent = pip_inum; 1053 1054 /* Inherit parent's inode compression mode. */ 1055 xop->meta.comp_algo = pip_comp_algo; 1056 xop->meta.check_algo = pip_check_algo; 1057 xop->meta.version = HAMMER2_INODE_VERSION_ONE; 1058 hammer2_update_time(&xop->meta.ctime); 1059 xop->meta.mtime = xop->meta.ctime; 1060 xop->meta.mode = 0755; 1061 xop->meta.nlinks = 1; 1062 1063 /* 1064 * Regular files and softlinks allow a small amount of data to be 1065 * directly embedded in the inode. This flag will be cleared if 1066 * the size is extended past the embedded limit. 1067 */ 1068 if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE || 1069 xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) { 1070 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA; 1071 } 1072 hammer2_xop_setname(&xop->head, name, name_len); 1073 xop->meta.name_len = name_len; 1074 xop->meta.name_key = lhc; 1075 KKASSERT(name_len < HAMMER2_INODE_MAXNAME); 1076 1077 hammer2_xop_start(&xop->head, &hammer2_inode_create_desc); 1078 1079 error = hammer2_xop_collect(&xop->head, 0); 1080 #if INODE_DEBUG 1081 kprintf("CREATE INODE %*.*s\n", 1082 (int)name_len, (int)name_len, name); 1083 #endif 1084 1085 if (error) { 1086 *errorp = error; 1087 goto done; 1088 } 1089 1090 /* 1091 * Set up the new inode if not a hardlink pointer. 1092 * 1093 * NOTE: *_get() integrates chain's lock into the inode lock. 1094 * 1095 * NOTE: Only one new inode can currently be created per 1096 * transaction. If the need arises we can adjust 1097 * hammer2_trans_init() to allow more. 1098 * 1099 * NOTE: nipdata will have chain's blockset data. 1100 */ 1101 nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1); 1102 nip->comp_heuristic = 0; 1103 done: 1104 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1105 done2: 1106 hammer2_inode_unlock(pip); 1107 1108 return (nip); 1109 } 1110 1111 /* 1112 * Create a new, normal inode. This function will create the inode, 1113 * the media chains, but will not insert the chains onto the media topology 1114 * (doing so would require a flush transaction and cause long stalls). 1115 * 1116 * Caller must be in a normal transaction. 1117 */ 1118 hammer2_inode_t * 1119 hammer2_inode_create_normal(hammer2_inode_t *pip, 1120 struct vattr *vap, struct ucred *cred, 1121 hammer2_key_t inum, int *errorp) 1122 { 1123 hammer2_xop_create_t *xop; 1124 hammer2_inode_t *dip; 1125 hammer2_inode_t *nip; 1126 int error; 1127 uid_t xuid; 1128 uuid_t pip_uid; 1129 uuid_t pip_gid; 1130 uint32_t pip_mode; 1131 uint8_t pip_comp_algo; 1132 uint8_t pip_check_algo; 1133 hammer2_tid_t pip_inum; 1134 uint8_t type; 1135 1136 dip = pip->pmp->iroot; 1137 KKASSERT(dip != NULL); 1138 1139 *errorp = 0; 1140 1141 /*hammer2_inode_lock(dip, 0);*/ 1142 1143 pip_uid = pip->meta.uid; 1144 pip_gid = pip->meta.gid; 1145 pip_mode = pip->meta.mode; 1146 pip_comp_algo = pip->meta.comp_algo; 1147 pip_check_algo = pip->meta.check_algo; 1148 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum; 1149 1150 /* 1151 * Create the in-memory hammer2_inode structure for the specified 1152 * inode. 1153 */ 1154 nip = hammer2_inode_get(dip->pmp, NULL, inum, -1); 1155 nip->comp_heuristic = 0; 1156 KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 && 1157 nip->cluster.nchains == 0); 1158 atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING); 1159 1160 /* 1161 * Setup the inode meta-data 1162 */ 1163 nip->meta.type = hammer2_get_obj_type(vap->va_type); 1164 1165 switch (nip->meta.type) { 1166 case HAMMER2_OBJTYPE_CDEV: 1167 case HAMMER2_OBJTYPE_BDEV: 1168 assert(0); /* XXX unsupported */ 1169 nip->meta.rmajor = vap->va_rmajor; 1170 nip->meta.rminor = vap->va_rminor; 1171 break; 1172 default: 1173 break; 1174 } 1175 type = nip->meta.type; 1176 1177 KKASSERT(nip->meta.inum == inum); 1178 nip->meta.iparent = pip_inum; 1179 1180 /* Inherit parent's inode compression mode. */ 1181 nip->meta.comp_algo = pip_comp_algo; 1182 nip->meta.check_algo = pip_check_algo; 1183 nip->meta.version = HAMMER2_INODE_VERSION_ONE; 1184 hammer2_update_time(&nip->meta.ctime); 1185 nip->meta.mtime = nip->meta.ctime; 1186 nip->meta.mode = vap->va_mode; 1187 nip->meta.nlinks = 1; 1188 1189 xuid = hammer2_to_unix_xid(&pip_uid); 1190 xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode, 1191 xuid, cred, 1192 &vap->va_mode); 1193 if (vap->va_vaflags & VA_UID_UUID_VALID) 1194 nip->meta.uid = vap->va_uid_uuid; 1195 else if (vap->va_uid != (uid_t)VNOVAL) 1196 hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid); 1197 else 1198 hammer2_guid_to_uuid(&nip->meta.uid, xuid); 1199 1200 if (vap->va_vaflags & VA_GID_UUID_VALID) 1201 nip->meta.gid = vap->va_gid_uuid; 1202 else if (vap->va_gid != (gid_t)VNOVAL) 1203 hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid); 1204 else 1205 nip->meta.gid = pip_gid; 1206 1207 /* 1208 * Regular files and softlinks allow a small amount of data to be 1209 * directly embedded in the inode. This flag will be cleared if 1210 * the size is extended past the embedded limit. 1211 */ 1212 if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE || 1213 nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) { 1214 nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA; 1215 } 1216 1217 /* 1218 * Create the inode using (inum) as the key. Pass pip for 1219 * method inheritance. 1220 */ 1221 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1222 xop->lhc = inum; 1223 xop->flags = 0; 1224 xop->meta = nip->meta; 1225 KKASSERT(vap); 1226 1227 xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum); 1228 xop->meta.name_key = inum; 1229 nip->meta.name_len = xop->meta.name_len; 1230 nip->meta.name_key = xop->meta.name_key; 1231 hammer2_inode_modify(nip); 1232 1233 /* 1234 * Create the inode media chains but leave them detached. We are 1235 * not in a flush transaction so we can't mess with media topology 1236 * above normal inodes (i.e. the index of the inodes themselves). 1237 * 1238 * We've already set the INODE_CREATING flag. The inode's media 1239 * chains will be inserted onto the media topology on the next 1240 * filesystem sync. 1241 */ 1242 hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc); 1243 1244 error = hammer2_xop_collect(&xop->head, 0); 1245 #if INODE_DEBUG 1246 kprintf("create inode type %d error %d\n", nip->meta.type, error); 1247 #endif 1248 1249 if (error) { 1250 *errorp = error; 1251 goto done; 1252 } 1253 1254 /* 1255 * Associate the media chains created by the backend with the 1256 * frontend inode. 1257 */ 1258 hammer2_inode_repoint(nip, &xop->head.cluster); 1259 done: 1260 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1261 /*hammer2_inode_unlock(dip);*/ 1262 1263 return (nip); 1264 } 1265 1266 /* 1267 * Create a directory entry under dip with the specified name, inode number, 1268 * and OBJTYPE (type). 1269 * 1270 * This returns a UNIX errno code, not a HAMMER2_ERROR_* code. 1271 * 1272 * Caller must hold dip locked. 1273 */ 1274 int 1275 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len, 1276 hammer2_key_t inum, uint8_t type) 1277 { 1278 hammer2_xop_mkdirent_t *xop; 1279 hammer2_key_t lhc; 1280 int error; 1281 1282 lhc = 0; 1283 error = 0; 1284 1285 KKASSERT(name != NULL); 1286 lhc = hammer2_dirhash(name, name_len); 1287 1288 /* 1289 * Locate the inode or indirect block to create the new 1290 * entry in. At the same time check for key collisions 1291 * and iterate until we don't get one. 1292 * 1293 * Lock the directory exclusively for now to guarantee that 1294 * we can find an unused lhc for the name. Due to collisions, 1295 * two different creates can end up with the same lhc so we 1296 * cannot depend on the OS to prevent the collision. 1297 */ 1298 hammer2_inode_modify(dip); 1299 1300 /* 1301 * If name specified, locate an unused key in the collision space. 1302 * Otherwise use the passed-in lhc directly. 1303 */ 1304 { 1305 hammer2_xop_scanlhc_t *sxop; 1306 hammer2_key_t lhcbase; 1307 1308 lhcbase = lhc; 1309 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1310 sxop->lhc = lhc; 1311 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 1312 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1313 if (lhc != sxop->head.cluster.focus->bref.key) 1314 break; 1315 ++lhc; 1316 } 1317 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1318 1319 if (error) { 1320 if (error != HAMMER2_ERROR_ENOENT) 1321 goto done2; 1322 ++lhc; 1323 error = 0; 1324 } 1325 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { 1326 error = HAMMER2_ERROR_ENOSPC; 1327 goto done2; 1328 } 1329 } 1330 1331 /* 1332 * Create the directory entry with the lhc as the key. 1333 */ 1334 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1335 xop->lhc = lhc; 1336 bzero(&xop->dirent, sizeof(xop->dirent)); 1337 xop->dirent.inum = inum; 1338 xop->dirent.type = type; 1339 xop->dirent.namlen = name_len; 1340 1341 KKASSERT(name_len < HAMMER2_INODE_MAXNAME); 1342 hammer2_xop_setname(&xop->head, name, name_len); 1343 1344 hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc); 1345 1346 error = hammer2_xop_collect(&xop->head, 0); 1347 1348 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1349 done2: 1350 error = hammer2_error_to_errno(error); 1351 1352 return error; 1353 } 1354 1355 /* 1356 * Repoint ip->cluster's chains to cluster's chains and fixup the default 1357 * focus. All items, valid or invalid, are repointed. hammer2_xop_start() 1358 * filters out invalid or non-matching elements. 1359 * 1360 * Caller must hold the inode and cluster exclusive locked, if not NULL, 1361 * must also be locked. 1362 * 1363 * Cluster may be NULL to clean out any chains in ip->cluster. 1364 */ 1365 void 1366 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_cluster_t *cluster) 1367 { 1368 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER]; 1369 hammer2_chain_t *ochain; 1370 hammer2_chain_t *nchain; 1371 int i; 1372 1373 bzero(dropch, sizeof(dropch)); 1374 1375 /* 1376 * Replace chains in ip->cluster with chains from cluster and 1377 * adjust the focus if necessary. 1378 * 1379 * NOTE: nchain and/or ochain can be NULL due to gaps 1380 * in the cluster arrays. 1381 */ 1382 hammer2_spin_ex(&ip->cluster_spin); 1383 for (i = 0; cluster && i < cluster->nchains; ++i) { 1384 /* 1385 * Do not replace elements which are the same. Also handle 1386 * element count discrepancies. 1387 */ 1388 nchain = cluster->array[i].chain; 1389 if (i < ip->cluster.nchains) { 1390 ochain = ip->cluster.array[i].chain; 1391 if (ochain == nchain) 1392 continue; 1393 } else { 1394 ochain = NULL; 1395 } 1396 1397 /* 1398 * Make adjustments 1399 */ 1400 ip->cluster.array[i].chain = nchain; 1401 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID; 1402 ip->cluster.array[i].flags |= cluster->array[i].flags & 1403 HAMMER2_CITEM_INVALID; 1404 if (nchain) 1405 hammer2_chain_ref(nchain); 1406 dropch[i] = ochain; 1407 } 1408 1409 /* 1410 * Release any left-over chains in ip->cluster. 1411 */ 1412 while (i < ip->cluster.nchains) { 1413 nchain = ip->cluster.array[i].chain; 1414 if (nchain) { 1415 ip->cluster.array[i].chain = NULL; 1416 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID; 1417 } 1418 dropch[i] = nchain; 1419 ++i; 1420 } 1421 1422 /* 1423 * Fixup fields. Note that the inode-embedded cluster is never 1424 * directly locked. 1425 */ 1426 if (cluster) { 1427 ip->cluster.nchains = cluster->nchains; 1428 ip->cluster.focus = cluster->focus; 1429 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED; 1430 } else { 1431 ip->cluster.nchains = 0; 1432 ip->cluster.focus = NULL; 1433 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS; 1434 } 1435 1436 hammer2_spin_unex(&ip->cluster_spin); 1437 1438 /* 1439 * Cleanup outside of spinlock 1440 */ 1441 while (--i >= 0) { 1442 if (dropch[i]) 1443 hammer2_chain_drop(dropch[i]); 1444 } 1445 } 1446 1447 /* 1448 * Repoint a single element from the cluster to the ip. Used by the 1449 * synchronization threads to piecemeal update inodes. Does not change 1450 * focus and requires inode to be re-locked to clean-up flags (XXX). 1451 */ 1452 void 1453 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster, 1454 int idx) 1455 { 1456 hammer2_chain_t *ochain; 1457 hammer2_chain_t *nchain; 1458 int i; 1459 1460 hammer2_spin_ex(&ip->cluster_spin); 1461 KKASSERT(idx < cluster->nchains); 1462 if (idx < ip->cluster.nchains) { 1463 ochain = ip->cluster.array[idx].chain; 1464 nchain = cluster->array[idx].chain; 1465 } else { 1466 ochain = NULL; 1467 nchain = cluster->array[idx].chain; 1468 for (i = ip->cluster.nchains; i <= idx; ++i) { 1469 bzero(&ip->cluster.array[i], 1470 sizeof(ip->cluster.array[i])); 1471 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID; 1472 } 1473 ip->cluster.nchains = idx + 1; 1474 } 1475 if (ochain != nchain) { 1476 /* 1477 * Make adjustments. 1478 */ 1479 ip->cluster.array[idx].chain = nchain; 1480 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID; 1481 ip->cluster.array[idx].flags |= cluster->array[idx].flags & 1482 HAMMER2_CITEM_INVALID; 1483 } 1484 hammer2_spin_unex(&ip->cluster_spin); 1485 if (ochain != nchain) { 1486 if (nchain) 1487 hammer2_chain_ref(nchain); 1488 if (ochain) 1489 hammer2_chain_drop(ochain); 1490 } 1491 } 1492 1493 hammer2_key_t 1494 hammer2_inode_data_count(const hammer2_inode_t *ip) 1495 { 1496 hammer2_chain_t *chain; 1497 hammer2_key_t count = 0; 1498 int i; 1499 1500 for (i = 0; i < ip->cluster.nchains; ++i) { 1501 if ((chain = ip->cluster.array[i].chain) != NULL) { 1502 if (count < chain->bref.embed.stats.data_count) 1503 count = chain->bref.embed.stats.data_count; 1504 } 1505 } 1506 return count; 1507 } 1508 1509 hammer2_key_t 1510 hammer2_inode_inode_count(const hammer2_inode_t *ip) 1511 { 1512 hammer2_chain_t *chain; 1513 hammer2_key_t count = 0; 1514 int i; 1515 1516 for (i = 0; i < ip->cluster.nchains; ++i) { 1517 if ((chain = ip->cluster.array[i].chain) != NULL) { 1518 if (count < chain->bref.embed.stats.inode_count) 1519 count = chain->bref.embed.stats.inode_count; 1520 } 1521 } 1522 return count; 1523 } 1524 1525 /* 1526 * Called with a locked inode to finish unlinking an inode after xop_unlink 1527 * had been run. This function is responsible for decrementing nlinks. 1528 * 1529 * We don't bother decrementing nlinks if the file is not open and this was 1530 * the last link. 1531 * 1532 * If the inode is a hardlink target it's chain has not yet been deleted, 1533 * otherwise it's chain has been deleted. 1534 * 1535 * If isopen then any prior deletion was not permanent and the inode is 1536 * left intact with nlinks == 0; 1537 */ 1538 int 1539 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, struct vnode **vprecyclep) 1540 { 1541 hammer2_pfs_t *pmp; 1542 struct vnode *vp; 1543 int error; 1544 1545 pmp = ip->pmp; 1546 error = 0; 1547 1548 /* 1549 * Decrement nlinks. Catch a bad nlinks count here too (e.g. 0 or 1550 * negative), and just assume a transition to 0. 1551 */ 1552 if ((int64_t)ip->meta.nlinks <= 1) { 1553 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED); 1554 1555 /* 1556 * Scrap the vnode as quickly as possible. The vp association 1557 * stays intact while we hold the inode locked. However, vp 1558 * can be NULL here. 1559 */ 1560 vp = ip->vp; 1561 cpu_ccfence(); 1562 1563 /* 1564 * If no vp is associated there is no high-level state to 1565 * deal with and we can scrap the inode immediately. 1566 */ 1567 if (vp == NULL) { 1568 if ((ip->flags & HAMMER2_INODE_DELETING) == 0) { 1569 atomic_set_int(&ip->flags, 1570 HAMMER2_INODE_DELETING); 1571 hammer2_inode_delayed_sideq(ip); 1572 } 1573 return 0; 1574 } 1575 1576 /* 1577 * Because INODE_ISUNLINKED is set with the inode lock 1578 * held, the vnode cannot be ripped up from under us. 1579 * There may still be refs so knote anyone waiting for 1580 * a delete notification. 1581 * 1582 * The vnode is not necessarily ref'd due to the unlinking 1583 * itself, so we have to defer handling to the end of the 1584 * VOP, which will then call hammer2_inode_vprecycle(). 1585 */ 1586 if (vprecyclep) { 1587 vhold(vp); 1588 *vprecyclep = vp; 1589 } 1590 } 1591 1592 /* 1593 * Adjust nlinks and retain the inode on the media for now 1594 */ 1595 hammer2_inode_modify(ip); 1596 if ((int64_t)ip->meta.nlinks > 1) 1597 --ip->meta.nlinks; 1598 else 1599 ip->meta.nlinks = 0; 1600 1601 return 0; 1602 } 1603 1604 /* 1605 * Called at the end of a VOP that removes a file with a vnode that 1606 * we want to try to dispose of quickly due to a file deletion. If 1607 * we don't do this, the vnode can hang around with 0 refs for a very 1608 * long time and prevent reclamation of the underlying file and inode 1609 * (inode remains on-media with nlinks == 0 until the vnode is recycled 1610 * due to random system activity or a umount). 1611 */ 1612 void 1613 hammer2_inode_vprecycle(struct vnode *vp) 1614 { 1615 if (vget(vp, LK_EXCLUSIVE) == 0) { 1616 vfinalize(vp); 1617 hammer2_knote(vp, NOTE_DELETE); 1618 vdrop(vp); 1619 vput(vp); 1620 } else { 1621 vdrop(vp); 1622 } 1623 } 1624 1625 1626 /* 1627 * Mark an inode as being modified, meaning that the caller will modify 1628 * ip->meta. 1629 * 1630 * If a vnode is present we set the vnode dirty and the nominal filesystem 1631 * sync will also handle synchronizing the inode meta-data. Unless NOSIDEQ 1632 * we must ensure that the inode is on pmp->sideq. 1633 * 1634 * NOTE: We must always queue the inode to the sideq. This allows H2 to 1635 * shortcut vsyncscan() and flush inodes and their related vnodes 1636 * in a two stages. H2 still calls vfsync() for each vnode. 1637 * 1638 * NOTE: No mtid (modify_tid) is passed into this routine. The caller is 1639 * only modifying the in-memory inode. A modify_tid is synchronized 1640 * later when the inode gets flushed. 1641 * 1642 * NOTE: As an exception to the general rule, the inode MAY be locked 1643 * shared for this particular call. 1644 */ 1645 void 1646 hammer2_inode_modify(hammer2_inode_t *ip) 1647 { 1648 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED); 1649 if (ip->vp) 1650 vsetisdirty(ip->vp); 1651 if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0) 1652 hammer2_inode_delayed_sideq(ip); 1653 } 1654 1655 /* 1656 * Synchronize the inode's frontend state with the chain state prior 1657 * to any explicit flush of the inode or any strategy write call. This 1658 * does not flush the inode's chain or its sub-topology to media (higher 1659 * level layers are responsible for doing that). 1660 * 1661 * Called with a locked inode inside a normal transaction. 1662 * 1663 * inode must be locked. 1664 */ 1665 int 1666 hammer2_inode_chain_sync(hammer2_inode_t *ip) 1667 { 1668 int error; 1669 1670 error = 0; 1671 if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) { 1672 hammer2_xop_fsync_t *xop; 1673 1674 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1675 xop->clear_directdata = 0; 1676 if (ip->flags & HAMMER2_INODE_RESIZED) { 1677 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) && 1678 ip->meta.size > HAMMER2_EMBEDDED_BYTES) { 1679 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA; 1680 xop->clear_directdata = 1; 1681 } 1682 xop->osize = ip->osize; 1683 } else { 1684 xop->osize = ip->meta.size; /* safety */ 1685 } 1686 xop->ipflags = ip->flags; 1687 xop->meta = ip->meta; 1688 1689 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED | 1690 HAMMER2_INODE_MODIFIED); 1691 hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc); 1692 error = hammer2_xop_collect(&xop->head, 0); 1693 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1694 if (error == HAMMER2_ERROR_ENOENT) 1695 error = 0; 1696 if (error) { 1697 kprintf("hammer2: unable to fsync inode %p\n", ip); 1698 /* 1699 atomic_set_int(&ip->flags, 1700 xop->ipflags & (HAMMER2_INODE_RESIZED | 1701 HAMMER2_INODE_MODIFIED)); 1702 */ 1703 /* XXX return error somehow? */ 1704 } 1705 } 1706 return error; 1707 } 1708 1709 /* 1710 * When an inode is flagged INODE_CREATING its chains have not actually 1711 * been inserting into the on-media tree yet. 1712 */ 1713 int 1714 hammer2_inode_chain_ins(hammer2_inode_t *ip) 1715 { 1716 int error; 1717 1718 error = 0; 1719 if (ip->flags & HAMMER2_INODE_CREATING) { 1720 hammer2_xop_create_t *xop; 1721 1722 atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING); 1723 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1724 xop->lhc = ip->meta.inum; 1725 xop->flags = 0; 1726 hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc); 1727 error = hammer2_xop_collect(&xop->head, 0); 1728 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1729 if (error == HAMMER2_ERROR_ENOENT) 1730 error = 0; 1731 if (error) { 1732 kprintf("hammer2: backend unable to " 1733 "insert inode %p %ld\n", ip, ip->meta.inum); 1734 /* XXX return error somehow? */ 1735 } 1736 } 1737 return error; 1738 } 1739 1740 /* 1741 * When an inode is flagged INODE_DELETING it has been deleted (no directory 1742 * entry or open refs are left, though as an optimization H2 might leave 1743 * nlinks == 1 to avoid unnecessary block updates). The backend flush then 1744 * needs to actually remove it from the topology. 1745 * 1746 * NOTE: backend flush must still sync and flush the deleted inode to clean 1747 * out related chains. 1748 * 1749 * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED 1750 * to prevent the vnode reclaim code from trying to delete it twice. 1751 */ 1752 int 1753 hammer2_inode_chain_des(hammer2_inode_t *ip) 1754 { 1755 int error; 1756 1757 error = 0; 1758 if (ip->flags & HAMMER2_INODE_DELETING) { 1759 hammer2_xop_destroy_t *xop; 1760 1761 atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING | 1762 HAMMER2_INODE_ISUNLINKED); 1763 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1764 hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc); 1765 error = hammer2_xop_collect(&xop->head, 0); 1766 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1767 1768 if (error == HAMMER2_ERROR_ENOENT) 1769 error = 0; 1770 if (error) { 1771 kprintf("hammer2: backend unable to " 1772 "delete inode %p %ld\n", ip, ip->meta.inum); 1773 /* XXX return error somehow? */ 1774 } 1775 } 1776 return error; 1777 } 1778 1779 /* 1780 * Flushes the inode's chain and its sub-topology to media. Interlocks 1781 * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush. Any strategy 1782 * function creating or modifying a chain under this inode will re-set the 1783 * flag. 1784 * 1785 * inode must be locked. 1786 */ 1787 int 1788 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags) 1789 { 1790 hammer2_xop_fsync_t *xop; 1791 int error; 1792 1793 atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA); 1794 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags); 1795 hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc); 1796 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL); 1797 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1798 if (error == HAMMER2_ERROR_ENOENT) 1799 error = 0; 1800 1801 return error; 1802 } 1803 1804 hammer2_key_t 1805 hammer2_pfs_inode_count(hammer2_pfs_t *pmp) 1806 { 1807 struct hammer2_inode *ip; 1808 hammer2_key_t count = 0; 1809 1810 hammer2_spin_ex(&pmp->inum_spin); 1811 RB_FOREACH(ip, hammer2_inode_tree, &pmp->inum_tree) 1812 count++; 1813 hammer2_spin_unex(&pmp->inum_spin); 1814 1815 return count; 1816 } 1817 1818 int 1819 vflush(struct mount *mp, int rootrefs, int flags) 1820 { 1821 hammer2_pfs_t *pmp = MPTOPMP(mp); 1822 struct hammer2_inode *ip, *tmp; 1823 struct vnode *vp; 1824 hammer2_key_t count_before, count_after, count_delta; 1825 1826 hammer2_spin_ex(&pmp->inum_spin); 1827 count_before = 0; 1828 RB_FOREACH(ip, hammer2_inode_tree, &pmp->inum_tree) 1829 count_before++; 1830 1831 RB_FOREACH_SAFE(ip, hammer2_inode_tree, &pmp->inum_tree, tmp) { 1832 vp = ip->vp; 1833 assert(vp); 1834 if (!vp->v_vflushed) { 1835 /* 1836 printf("%s: drop ip=%p inum=%ld refs=%d\n", 1837 __func__, ip, ip->meta.inum, ip->refs); 1838 */ 1839 assert(ip->refs > 1); 1840 hammer2_inode_drop(ip); 1841 vp->v_vflushed = 1; 1842 } 1843 } 1844 1845 count_after = 0; 1846 RB_FOREACH(ip, hammer2_inode_tree, &pmp->inum_tree) 1847 count_after++; 1848 hammer2_spin_unex(&pmp->inum_spin); 1849 1850 printf("%s: total inode %ld -> %ld\n", 1851 __func__, count_before, count_after); 1852 1853 assert(count_before >= count_after); 1854 count_delta = count_before - count_after; 1855 1856 if (count_delta) { 1857 if (hammer2_debug & 0x80000000) 1858 assert(0); 1859 else 1860 printf("%s: %ld inode freed\n", __func__, count_delta); 1861 } 1862 1863 return 0; 1864 } 1865