1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org> 5 * Copyright (c) 2011-2022 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Matthew Dillon <dillon@dragonflybsd.org> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 /* 38 #include <sys/cdefs.h> 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/types.h> 42 #include <sys/lock.h> 43 #include <sys/uuid.h> 44 #include <sys/vnode.h> 45 */ 46 47 #include "hammer2.h" 48 49 #define INODE_DEBUG 0 50 51 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp, 52 hammer2_tid_t, meta.inum); 53 54 int 55 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2) 56 { 57 if (ip1->meta.inum < ip2->meta.inum) 58 return(-1); 59 if (ip1->meta.inum > ip2->meta.inum) 60 return(1); 61 return(0); 62 } 63 64 /* 65 * Caller holds pmp->list_spin and the inode should be locked. Merge ip 66 * with the specified depend. 67 * 68 * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating 69 * that successive calls must ensure the ip is on a pass2 depend (or they are 70 * all SYNCQ). If the passed-in depend is not NULL and not (void *)-1 then 71 * we can set pass2 on it and return. 72 * 73 * If the ip is not on SYNCQ it is merged with the passed-in depend, creating 74 * a self-depend if necessary, and depend->pass2 is set according 75 * to the PASS2 flag. SIDEQ is set. 76 */ 77 static __noinline 78 hammer2_depend_t * 79 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend) 80 { 81 hammer2_pfs_t *pmp = ip->pmp; 82 hammer2_depend_t *dtmp; 83 hammer2_inode_t *iptmp; 84 85 /* 86 * If ip is SYNCQ its entry is used for the syncq list and it will 87 * no longer be associated with a dependency. Merging this status 88 * with a passed-in depend implies PASS2. 89 */ 90 if (ip->flags & HAMMER2_INODE_SYNCQ) { 91 if (depend == (void *)-1 || 92 depend == NULL) { 93 return ((void *)-1); 94 } 95 depend->pass2 = 1; 96 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN); 97 98 return depend; 99 } 100 101 /* 102 * If ip is already SIDEQ, merge ip->depend into the passed-in depend. 103 * If it is not, associate the ip with the passed-in depend, creating 104 * a single-entry dependency using depend_static if necessary. 105 * 106 * NOTE: The use of ip->depend_static always requires that the 107 * specific ip containing the structure is part of that 108 * particular depend_static's dependency group. 109 */ 110 if (ip->flags & HAMMER2_INODE_SIDEQ) { 111 /* 112 * Merge ip->depend with the passed-in depend. If the 113 * passed-in depend is not a special case, all ips associated 114 * with ip->depend (including the original ip) must be moved 115 * to the passed-in depend. 116 */ 117 if (depend == NULL) { 118 depend = ip->depend; 119 } else if (depend == (void *)-1) { 120 depend = ip->depend; 121 depend->pass2 = 1; 122 } else if (depend != ip->depend) { 123 #ifdef INVARIANTS 124 int sanitychk = 0; 125 #endif 126 dtmp = ip->depend; 127 while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) { 128 #ifdef INVARIANTS 129 if (iptmp == ip) 130 sanitychk = 1; 131 #endif 132 TAILQ_REMOVE(&dtmp->sideq, iptmp, entry); 133 TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry); 134 iptmp->depend = depend; 135 } 136 KKASSERT(sanitychk == 1); 137 depend->count += dtmp->count; 138 depend->pass2 |= dtmp->pass2; 139 TAILQ_REMOVE(&pmp->depq, dtmp, entry); 140 dtmp->count = 0; 141 dtmp->pass2 = 0; 142 } 143 } else { 144 /* 145 * Add ip to the sideq, creating a self-dependency if 146 * necessary. 147 */ 148 hammer2_inode_ref(ip); /* extra ref usually via hammer2_inode_modify() */ 149 atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ); 150 if (depend == NULL) { 151 depend = &ip->depend_static; 152 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry); 153 } else if (depend == (void *)-1) { 154 depend = &ip->depend_static; 155 depend->pass2 = 1; 156 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry); 157 } /* else add ip to passed-in depend */ 158 TAILQ_INSERT_TAIL(&depend->sideq, ip, entry); 159 ip->depend = depend; 160 ++depend->count; 161 ++pmp->sideq_count; 162 } 163 164 if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2) 165 depend->pass2 = 1; 166 if (depend->pass2) 167 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN); 168 169 return depend; 170 } 171 172 /* 173 * Put a solo inode on the SIDEQ (meaning that its dirty). This can also 174 * occur from inode_lock4() and inode_depend(). 175 * 176 * Caller must pass-in a locked inode. 177 */ 178 void 179 hammer2_inode_delayed_sideq(hammer2_inode_t *ip) 180 { 181 hammer2_pfs_t *pmp = ip->pmp; 182 183 /* 184 * Optimize case to avoid pmp spinlock. 185 */ 186 if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) { 187 hammer2_spin_ex(&pmp->list_spin); 188 hammer2_inode_setdepend_locked(ip, NULL); 189 hammer2_spin_unex(&pmp->list_spin); 190 } 191 } 192 193 /* 194 * Lock an inode, with SYNCQ semantics. 195 * 196 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of 197 * flags for options: 198 * 199 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired. 200 * shared locks are not subject to SYNCQ semantics, exclusive locks 201 * are. 202 * 203 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data. 204 * Most front-end inode locks do. 205 * 206 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that 207 * the inode data be resolved. This is used by the syncthr because 208 * it can run on an unresolved/out-of-sync cluster, and also by the 209 * vnode reclamation code to avoid unnecessary I/O (particularly when 210 * disposing of hundreds of thousands of cached vnodes). 211 * 212 * This function, along with lock4, has SYNCQ semantics. If the inode being 213 * locked is on the SYNCQ, that is it has been staged by the syncer, we must 214 * block until the operation is complete (even if we can lock the inode). In 215 * order to reduce the stall time, we re-order the inode to the front of the 216 * pmp->syncq prior to blocking. This reordering VERY significantly improves 217 * performance. 218 * 219 * The inode locking function locks the inode itself, resolves any stale 220 * chains in the inode's cluster, and allocates a fresh copy of the 221 * cluster with 1 ref and all the underlying chains locked. 222 * 223 * ip->cluster will be stable while the inode is locked. 224 * 225 * NOTE: We don't combine the inode/chain lock because putting away an 226 * inode would otherwise confuse multiple lock holders of the inode. 227 */ 228 void 229 hammer2_inode_lock(hammer2_inode_t *ip, int how) 230 { 231 hammer2_pfs_t *pmp; 232 233 hammer2_inode_ref(ip); 234 pmp = ip->pmp; 235 236 /* 237 * Inode structure mutex - Shared lock 238 */ 239 if (how & HAMMER2_RESOLVE_SHARED) { 240 hammer2_mtx_sh(&ip->lock); 241 return; 242 } 243 244 /* 245 * Inode structure mutex - Exclusive lock 246 * 247 * An exclusive lock (if not recursive) must wait for inodes on 248 * SYNCQ to flush first, to ensure that meta-data dependencies such 249 * as the nlink count and related directory entries are not split 250 * across flushes. 251 * 252 * If the vnode is locked by the current thread it must be unlocked 253 * across the tsleep() to avoid a deadlock. 254 */ 255 hammer2_mtx_ex(&ip->lock); 256 if (hammer2_mtx_refs(&ip->lock) > 1) 257 return; 258 while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) { 259 hammer2_spin_ex(&pmp->list_spin); 260 if (ip->flags & HAMMER2_INODE_SYNCQ) { 261 tsleep_interlock(&ip->flags, 0); 262 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP); 263 TAILQ_REMOVE(&pmp->syncq, ip, entry); 264 TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry); 265 hammer2_spin_unex(&pmp->list_spin); 266 hammer2_mtx_unlock(&ip->lock); 267 tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0); 268 hammer2_mtx_ex(&ip->lock); 269 continue; 270 } 271 hammer2_spin_unex(&pmp->list_spin); 272 break; 273 } 274 } 275 276 /* 277 * Exclusively lock up to four inodes, in order, with SYNCQ semantics. 278 * ip1 and ip2 must not be NULL. ip3 and ip4 may be NULL, but if ip3 is 279 * NULL then ip4 must also be NULL. 280 * 281 * This creates a dependency between up to four inodes. 282 */ 283 void 284 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2, 285 hammer2_inode_t *ip3, hammer2_inode_t *ip4) 286 { 287 hammer2_inode_t *ips[4]; 288 hammer2_inode_t *iptmp; 289 hammer2_inode_t *ipslp; 290 hammer2_depend_t *depend; 291 hammer2_pfs_t *pmp; 292 size_t count; 293 size_t i; 294 295 pmp = ip1->pmp; /* may be NULL */ 296 KKASSERT(pmp == ip2->pmp); 297 298 ips[0] = ip1; 299 ips[1] = ip2; 300 if (ip3 == NULL) { 301 count = 2; 302 } else if (ip4 == NULL) { 303 count = 3; 304 ips[2] = ip3; 305 KKASSERT(pmp == ip3->pmp); 306 } else { 307 count = 4; 308 ips[2] = ip3; 309 ips[3] = ip4; 310 KKASSERT(pmp == ip3->pmp); 311 KKASSERT(pmp == ip4->pmp); 312 } 313 314 for (i = 0; i < count; ++i) 315 hammer2_inode_ref(ips[i]); 316 317 restart: 318 /* 319 * Lock the inodes in order 320 */ 321 for (i = 0; i < count; ++i) { 322 hammer2_mtx_ex(&ips[i]->lock); 323 } 324 325 /* 326 * Associate dependencies, record the first inode found on SYNCQ 327 * (operation is allowed to proceed for inodes on PASS2) for our 328 * sleep operation, this inode is theoretically the last one sync'd 329 * in the sequence. 330 * 331 * All inodes found on SYNCQ are moved to the head of the syncq 332 * to reduce stalls. 333 */ 334 hammer2_spin_ex(&pmp->list_spin); 335 depend = NULL; 336 ipslp = NULL; 337 for (i = 0; i < count; ++i) { 338 iptmp = ips[i]; 339 depend = hammer2_inode_setdepend_locked(iptmp, depend); 340 if (iptmp->flags & HAMMER2_INODE_SYNCQ) { 341 TAILQ_REMOVE(&pmp->syncq, iptmp, entry); 342 TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry); 343 if (ipslp == NULL) 344 ipslp = iptmp; 345 } 346 } 347 hammer2_spin_unex(&pmp->list_spin); 348 349 /* 350 * Block and retry if any of the inodes are on SYNCQ. It is 351 * important that we allow the operation to proceed in the 352 * PASS2 case, to avoid deadlocking against the vnode. 353 */ 354 if (ipslp) { 355 for (i = 0; i < count; ++i) 356 hammer2_mtx_unlock(&ips[i]->lock); 357 tsleep(&ipslp->flags, 0, "h2sync", 2); 358 goto restart; 359 } 360 } 361 362 /* 363 * Release an inode lock. If another thread is blocked on SYNCQ_WAKEUP 364 * we wake them up. 365 */ 366 void 367 hammer2_inode_unlock(hammer2_inode_t *ip) 368 { 369 if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) { 370 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP); 371 hammer2_mtx_unlock(&ip->lock); 372 wakeup(&ip->flags); 373 } else { 374 hammer2_mtx_unlock(&ip->lock); 375 } 376 hammer2_inode_drop(ip); 377 } 378 379 /* 380 * If either ip1 or ip2 have been tapped by the syncer, make sure that both 381 * are. This ensure that dependencies (e.g. dirent-v-inode) are synced 382 * together. For dirent-v-inode depends, pass the dirent as ip1. 383 * 384 * If neither ip1 or ip2 have been tapped by the syncer, merge them into a 385 * single dependency. Dependencies are entered into pmp->depq. This 386 * effectively flags the inodes SIDEQ. 387 * 388 * Both ip1 and ip2 must be locked by the caller. This also ensures 389 * that we can't race the end of the syncer's queue run. 390 */ 391 void 392 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2) 393 { 394 hammer2_pfs_t *pmp; 395 hammer2_depend_t *depend; 396 397 pmp = ip1->pmp; 398 hammer2_spin_ex(&pmp->list_spin); 399 depend = hammer2_inode_setdepend_locked(ip1, NULL); 400 depend = hammer2_inode_setdepend_locked(ip2, depend); 401 hammer2_spin_unex(&pmp->list_spin); 402 } 403 404 /* 405 * Select a chain out of an inode's cluster and lock it. 406 * 407 * The inode does not have to be locked. 408 */ 409 hammer2_chain_t * 410 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how) 411 { 412 hammer2_chain_t *chain; 413 hammer2_cluster_t *cluster; 414 415 hammer2_spin_sh(&ip->cluster_spin); 416 cluster = &ip->cluster; 417 if (clindex >= cluster->nchains) 418 chain = NULL; 419 else 420 chain = cluster->array[clindex].chain; 421 if (chain) { 422 hammer2_chain_ref(chain); 423 hammer2_spin_unsh(&ip->cluster_spin); 424 hammer2_chain_lock(chain, how); 425 } else { 426 hammer2_spin_unsh(&ip->cluster_spin); 427 } 428 return chain; 429 } 430 431 hammer2_chain_t * 432 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex, 433 hammer2_chain_t **parentp, int how) 434 { 435 hammer2_chain_t *chain; 436 hammer2_chain_t *parent; 437 438 for (;;) { 439 hammer2_spin_sh(&ip->cluster_spin); 440 if (clindex >= ip->cluster.nchains) 441 chain = NULL; 442 else 443 chain = ip->cluster.array[clindex].chain; 444 if (chain) { 445 hammer2_chain_ref(chain); 446 hammer2_spin_unsh(&ip->cluster_spin); 447 hammer2_chain_lock(chain, how); 448 } else { 449 hammer2_spin_unsh(&ip->cluster_spin); 450 } 451 452 /* 453 * Get parent, lock order must be (parent, chain). 454 */ 455 parent = chain->parent; 456 if (parent) { 457 hammer2_chain_ref(parent); 458 hammer2_chain_unlock(chain); 459 hammer2_chain_lock(parent, how); 460 hammer2_chain_lock(chain, how); 461 } 462 if (ip->cluster.array[clindex].chain == chain && 463 chain->parent == parent) { 464 break; 465 } 466 467 /* 468 * Retry 469 */ 470 hammer2_chain_unlock(chain); 471 hammer2_chain_drop(chain); 472 if (parent) { 473 hammer2_chain_unlock(parent); 474 hammer2_chain_drop(parent); 475 } 476 } 477 *parentp = parent; 478 479 return chain; 480 } 481 482 /* 483 * Temporarily release a lock held shared or exclusive. Caller must 484 * hold the lock shared or exclusive on call and lock will be released 485 * on return. 486 * 487 * Restore a lock that was temporarily released. 488 */ 489 hammer2_mtx_state_t 490 hammer2_inode_lock_temp_release(hammer2_inode_t *ip) 491 { 492 return hammer2_mtx_temp_release(&ip->lock); 493 } 494 495 void 496 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate) 497 { 498 hammer2_mtx_temp_restore(&ip->lock, ostate); 499 } 500 501 /* 502 * Upgrade a shared inode lock to exclusive and return. If the inode lock 503 * is already held exclusively this is a NOP. 504 * 505 * The caller MUST hold the inode lock either shared or exclusive on call 506 * and will own the lock exclusively on return. 507 * 508 * Returns non-zero if the lock was already exclusive prior to the upgrade. 509 */ 510 int 511 hammer2_inode_lock_upgrade(hammer2_inode_t *ip) 512 { 513 int wasexclusive; 514 515 /* XXX pretends it wasn't exclusive, but shouldn't matter */ 516 //if (mtx_islocked_ex(&ip->lock)) { 517 if (0) { 518 wasexclusive = 1; 519 } else { 520 hammer2_mtx_unlock(&ip->lock); 521 hammer2_mtx_ex(&ip->lock); 522 wasexclusive = 0; 523 } 524 return wasexclusive; 525 } 526 527 /* 528 * Downgrade an inode lock from exclusive to shared only if the inode 529 * lock was previously shared. If the inode lock was previously exclusive, 530 * this is a NOP. 531 */ 532 void 533 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive) 534 { 535 if (wasexclusive == 0) 536 hammer2_mtx_downgrade(&ip->lock); 537 } 538 539 /* 540 * Lookup an inode by inode number 541 */ 542 hammer2_inode_t * 543 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum) 544 { 545 hammer2_inode_t *ip; 546 547 KKASSERT(pmp); 548 if (pmp->spmp_hmp) { 549 ip = NULL; 550 } else { 551 hammer2_spin_ex(&pmp->inum_spin); 552 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum); 553 if (ip) 554 hammer2_inode_ref(ip); 555 hammer2_spin_unex(&pmp->inum_spin); 556 } 557 return(ip); 558 } 559 560 /* 561 * Adding a ref to an inode is only legal if the inode already has at least 562 * one ref. 563 * 564 * (can be called with spinlock held) 565 */ 566 void 567 hammer2_inode_ref(hammer2_inode_t *ip) 568 { 569 atomic_add_int(&ip->refs, 1); 570 if (hammer2_debug & 0x80000) { 571 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs); 572 print_backtrace(8); 573 } 574 } 575 576 /* 577 * Drop an inode reference, freeing the inode when the last reference goes 578 * away. 579 */ 580 void 581 hammer2_inode_drop(hammer2_inode_t *ip) 582 { 583 hammer2_pfs_t *pmp; 584 u_int refs; 585 586 while (ip) { 587 if (hammer2_debug & 0x80000) { 588 kprintf("INODE-1 %p (%d->%d)\n", 589 ip, ip->refs, ip->refs - 1); 590 print_backtrace(8); 591 } 592 refs = ip->refs; 593 cpu_ccfence(); 594 if (refs == 1) { 595 /* 596 * Transition to zero, must interlock with 597 * the inode inumber lookup tree (if applicable). 598 * It should not be possible for anyone to race 599 * the transition to 0. 600 */ 601 pmp = ip->pmp; 602 KKASSERT(pmp); 603 hammer2_spin_ex(&pmp->inum_spin); 604 605 if (atomic_cmpset_int(&ip->refs, 1, 0)) { 606 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0); 607 if (ip->flags & HAMMER2_INODE_ONRBTREE) { 608 atomic_clear_int(&ip->flags, 609 HAMMER2_INODE_ONRBTREE); 610 RB_REMOVE(hammer2_inode_tree, 611 &pmp->inum_tree, ip); 612 --pmp->inum_count; 613 } 614 hammer2_spin_unex(&pmp->inum_spin); 615 616 ip->pmp = NULL; 617 618 /* 619 * Cleaning out ip->cluster isn't entirely 620 * trivial. 621 */ 622 hammer2_inode_repoint(ip, NULL); 623 624 /* 625 * VOP_RECLAIM is currently unused, 626 * so directly free vnode before inode. 627 */ 628 if (ip->vp) { 629 if (ip->vp->v_malloced) 630 freevnode(ip->vp); 631 } else { 632 /* PFS inode ? */ 633 } 634 635 kfree_obj(ip, pmp->minode); 636 atomic_add_long(&pmp->inmem_inodes, -1); 637 ip = NULL; /* will terminate loop */ 638 } else { 639 hammer2_spin_unex(&ip->pmp->inum_spin); 640 } 641 } else { 642 /* 643 * Non zero transition 644 */ 645 if (atomic_cmpset_int(&ip->refs, refs, refs - 1)) 646 break; 647 } 648 } 649 } 650 651 /* 652 * Get the vnode associated with the given inode, allocating the vnode if 653 * necessary. The vnode will be returned exclusively locked. 654 * 655 * *errorp is set to a UNIX error, not a HAMMER2 error. 656 * 657 * The caller must lock the inode (shared or exclusive). 658 * 659 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim 660 * races. 661 */ 662 struct vnode * 663 hammer2_igetv(hammer2_inode_t *ip, int *errorp) 664 { 665 hammer2_pfs_t *pmp; 666 struct vnode *vp; 667 668 pmp = ip->pmp; 669 KKASSERT(pmp != NULL); 670 *errorp = 0; 671 672 for (;;) { 673 /* 674 * Attempt to reuse an existing vnode assignment. It is 675 * possible to race a reclaim so the vget() may fail. The 676 * inode must be unlocked during the vget() to avoid a 677 * deadlock against a reclaim. 678 */ 679 int wasexclusive; 680 681 vp = ip->vp; 682 if (vp) { 683 /* 684 * Inode must be unlocked during the vget() to avoid 685 * possible deadlocks, but leave the ip ref intact. 686 * 687 * vnode is held to prevent destruction during the 688 * vget(). The vget() can still fail if we lost 689 * a reclaim race on the vnode. 690 */ 691 hammer2_mtx_state_t ostate; 692 693 vhold(vp); 694 ostate = hammer2_inode_lock_temp_release(ip); 695 if (vget(vp, LK_EXCLUSIVE)) { 696 vdrop(vp); 697 hammer2_inode_lock_temp_restore(ip, ostate); 698 continue; 699 } 700 hammer2_inode_lock_temp_restore(ip, ostate); 701 vdrop(vp); 702 /* vp still locked and ref from vget */ 703 if (ip->vp != vp) { 704 kprintf("hammer2: igetv race %p/%p\n", 705 ip->vp, vp); 706 vput(vp); 707 continue; 708 } 709 *errorp = 0; 710 break; 711 } 712 713 /* 714 * No vnode exists, allocate a new vnode. Beware of 715 * allocation races. This function will return an 716 * exclusively locked and referenced vnode. 717 */ 718 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0); 719 if (*errorp) { 720 kprintf("hammer2: igetv getnewvnode failed %d\n", 721 *errorp); 722 vp = NULL; 723 break; 724 } 725 726 /* 727 * Lock the inode and check for an allocation race. 728 */ 729 wasexclusive = hammer2_inode_lock_upgrade(ip); 730 if (ip->vp != NULL) { 731 vp->v_type = VBAD; 732 vx_put(vp); 733 hammer2_inode_lock_downgrade(ip, wasexclusive); 734 continue; 735 } 736 737 switch (ip->meta.type) { 738 case HAMMER2_OBJTYPE_DIRECTORY: 739 vp->v_type = VDIR; 740 break; 741 case HAMMER2_OBJTYPE_REGFILE: 742 /* 743 * Regular file must use buffer cache I/O 744 * (VKVABIO cpu sync semantics supported) 745 */ 746 vp->v_type = VREG; 747 vsetflags(vp, VKVABIO); 748 vinitvmio(vp, ip->meta.size, 749 HAMMER2_LBUFSIZE, 750 (int)ip->meta.size & HAMMER2_LBUFMASK); 751 break; 752 case HAMMER2_OBJTYPE_SOFTLINK: 753 /* 754 * XXX for now we are using the generic file_read 755 * and file_write code so we need a buffer cache 756 * association. 757 * 758 * (VKVABIO cpu sync semantics supported) 759 */ 760 vp->v_type = VLNK; 761 vsetflags(vp, VKVABIO); 762 vinitvmio(vp, ip->meta.size, 763 HAMMER2_LBUFSIZE, 764 (int)ip->meta.size & HAMMER2_LBUFMASK); 765 break; 766 case HAMMER2_OBJTYPE_CDEV: 767 vp->v_type = VCHR; 768 /* fall through */ 769 case HAMMER2_OBJTYPE_BDEV: 770 //vp->v_ops = &pmp->mp->mnt_vn_spec_ops; 771 if (ip->meta.type != HAMMER2_OBJTYPE_CDEV) 772 vp->v_type = VBLK; 773 addaliasu(vp, 774 ip->meta.rmajor, 775 ip->meta.rminor); 776 break; 777 case HAMMER2_OBJTYPE_FIFO: 778 vp->v_type = VFIFO; 779 //vp->v_ops = &pmp->mp->mnt_vn_fifo_ops; 780 break; 781 case HAMMER2_OBJTYPE_SOCKET: 782 vp->v_type = VSOCK; 783 break; 784 default: 785 panic("hammer2: unhandled objtype %d", 786 ip->meta.type); 787 break; 788 } 789 790 if (ip == pmp->iroot) 791 vsetflags(vp, VROOT); 792 793 vp->v_data = ip; 794 ip->vp = vp; 795 hammer2_inode_ref(ip); /* vp association */ 796 hammer2_inode_lock_downgrade(ip, wasexclusive); 797 vx_downgrade(vp); 798 break; 799 } 800 801 /* 802 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0. 803 */ 804 if (hammer2_debug & 0x0002) { 805 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n", 806 vp, -1, -1); 807 } 808 return (vp); 809 } 810 811 /* 812 * XXX this API needs a rewrite. It needs to be split into a 813 * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get 814 * rid of the inode/chain lock reversal fudge. 815 * 816 * Returns the inode associated with the passed-in cluster, allocating a new 817 * hammer2_inode structure if necessary, then synchronizing it to the passed 818 * xop cluster. When synchronizing, if idx >= 0, only cluster index (idx) 819 * is synchronized. Otherwise the whole cluster is synchronized. inum will 820 * be extracted from the passed-in xop and the inum argument will be ignored. 821 * 822 * If xop is passed as NULL then a new hammer2_inode is allocated with the 823 * specified inum, and returned. For normal inodes, the inode will be 824 * indexed in memory and if it already exists the existing ip will be 825 * returned instead of allocating a new one. The superroot and PFS inodes 826 * are not indexed in memory. 827 * 828 * The passed-in cluster must be locked and will remain locked on return. 829 * The returned inode will be locked and the caller may dispose of both 830 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller 831 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode. 832 * 833 * The hammer2_inode structure regulates the interface between the high level 834 * kernel VNOPS API and the filesystem backend (the chains). 835 * 836 * On return the inode is locked with the supplied cluster. 837 */ 838 hammer2_inode_t * 839 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop, 840 hammer2_tid_t inum, int idx) 841 { 842 hammer2_inode_t *nip; 843 const hammer2_inode_data_t *iptmp; 844 const hammer2_inode_data_t *nipdata; 845 846 KKASSERT(xop == NULL || 847 hammer2_cluster_type(&xop->cluster) == 848 HAMMER2_BREF_TYPE_INODE); 849 KKASSERT(pmp); 850 851 /* 852 * Interlocked lookup/ref of the inode. This code is only needed 853 * when looking up inodes with nlinks != 0 (TODO: optimize out 854 * otherwise and test for duplicates). 855 * 856 * Cluster can be NULL during the initial pfs allocation. 857 */ 858 if (xop) { 859 iptmp = &hammer2_xop_gdata(xop)->ipdata; 860 inum = iptmp->meta.inum; 861 hammer2_xop_pdata(xop); 862 } 863 again: 864 nip = hammer2_inode_lookup(pmp, inum); 865 if (nip) { 866 /* 867 * We may have to unhold the cluster to avoid a deadlock 868 * against vnlru (and possibly other XOPs). 869 */ 870 if (xop) { 871 if (hammer2_mtx_ex_try(&nip->lock) != 0) { 872 hammer2_cluster_unhold(&xop->cluster); 873 hammer2_mtx_ex(&nip->lock); 874 hammer2_cluster_rehold(&xop->cluster); 875 } 876 } else { 877 hammer2_mtx_ex(&nip->lock); 878 } 879 880 /* 881 * Handle SMP race (not applicable to the super-root spmp 882 * which can't index inodes due to duplicative inode numbers). 883 */ 884 if (pmp->spmp_hmp == NULL && 885 (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) { 886 hammer2_mtx_unlock(&nip->lock); 887 hammer2_inode_drop(nip); 888 goto again; 889 } 890 if (xop) { 891 if (idx >= 0) 892 hammer2_inode_repoint_one(nip, &xop->cluster, 893 idx); 894 else 895 hammer2_inode_repoint(nip, &xop->cluster); 896 } 897 return nip; 898 } 899 900 /* 901 * We couldn't find the inode number, create a new inode and try to 902 * insert it, handle insertion races. 903 */ 904 nip = kmalloc_obj(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO); 905 hammer2_spin_init(&nip->cluster_spin, "h2clspin"); 906 atomic_add_long(&pmp->inmem_inodes, 1); 907 908 /* 909 * Initialize nip's cluster. A cluster is provided for normal 910 * inodes but typically not for the super-root or PFS inodes. 911 */ 912 { 913 hammer2_inode_t *nnip = nip; 914 nip->ihash = (int)hammer2_icrc32(&nnip, sizeof(nnip)); 915 } 916 917 nip->cluster.refs = 1; 918 nip->cluster.pmp = pmp; 919 nip->cluster.flags |= HAMMER2_CLUSTER_INODE; 920 if (xop) { 921 nipdata = &hammer2_xop_gdata(xop)->ipdata; 922 nip->meta = nipdata->meta; 923 hammer2_xop_pdata(xop); 924 hammer2_inode_repoint(nip, &xop->cluster); 925 } else { 926 nip->meta.inum = inum; /* PFS inum is always 1 XXX */ 927 /* mtime will be updated when a cluster is available */ 928 } 929 930 nip->pmp = pmp; 931 932 /* 933 * ref and lock on nip gives it state compatible to after a 934 * hammer2_inode_lock() call. 935 */ 936 nip->refs = 1; 937 hammer2_mtx_init(&nip->lock, "h2inode"); 938 hammer2_mtx_init(&nip->truncate_lock, "h2trunc"); 939 hammer2_mtx_ex(&nip->lock); 940 TAILQ_INIT(&nip->depend_static.sideq); 941 /* combination of thread lock and chain lock == inode lock */ 942 943 /* 944 * Attempt to add the inode. If it fails we raced another inode 945 * get. Undo all the work and try again. 946 */ 947 if (pmp->spmp_hmp == NULL) { 948 hammer2_spin_ex(&pmp->inum_spin); 949 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) { 950 hammer2_spin_unex(&pmp->inum_spin); 951 hammer2_mtx_unlock(&nip->lock); 952 hammer2_inode_drop(nip); 953 goto again; 954 } 955 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE); 956 ++pmp->inum_count; 957 hammer2_spin_unex(&pmp->inum_spin); 958 } 959 return (nip); 960 } 961 962 /* 963 * Create a PFS inode under the superroot. This function will create the 964 * inode, its media chains, and also insert it into the media. 965 * 966 * Caller must be in a flush transaction because we are inserting the inode 967 * onto the media. 968 */ 969 hammer2_inode_t * 970 hammer2_inode_create_pfs(hammer2_pfs_t *spmp, 971 const uint8_t *name, size_t name_len, 972 int *errorp) 973 { 974 hammer2_xop_create_t *xop; 975 hammer2_inode_t *pip; 976 hammer2_inode_t *nip; 977 int error; 978 uuid_t pip_uid; 979 uuid_t pip_gid; 980 uint32_t pip_mode; 981 uint8_t pip_comp_algo; 982 uint8_t pip_check_algo; 983 hammer2_tid_t pip_inum; 984 hammer2_key_t lhc; 985 986 pip = spmp->iroot; 987 nip = NULL; 988 989 lhc = hammer2_dirhash(name, name_len); 990 *errorp = 0; 991 992 /* 993 * Locate the inode or indirect block to create the new 994 * entry in. At the same time check for key collisions 995 * and iterate until we don't get one. 996 * 997 * Lock the directory exclusively for now to guarantee that 998 * we can find an unused lhc for the name. Due to collisions, 999 * two different creates can end up with the same lhc so we 1000 * cannot depend on the OS to prevent the collision. 1001 */ 1002 hammer2_inode_lock(pip, 0); 1003 1004 pip_uid = pip->meta.uid; 1005 pip_gid = pip->meta.gid; 1006 pip_mode = pip->meta.mode; 1007 pip_comp_algo = pip->meta.comp_algo; 1008 pip_check_algo = pip->meta.check_algo; 1009 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum; 1010 1011 /* 1012 * Locate an unused key in the collision space. 1013 */ 1014 { 1015 hammer2_xop_scanlhc_t *sxop; 1016 hammer2_key_t lhcbase; 1017 1018 lhcbase = lhc; 1019 sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1020 sxop->lhc = lhc; 1021 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 1022 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1023 if (lhc != sxop->head.cluster.focus->bref.key) 1024 break; 1025 ++lhc; 1026 } 1027 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1028 1029 if (error) { 1030 if (error != HAMMER2_ERROR_ENOENT) 1031 goto done2; 1032 ++lhc; 1033 error = 0; 1034 } 1035 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { 1036 error = HAMMER2_ERROR_ENOSPC; 1037 goto done2; 1038 } 1039 } 1040 1041 /* 1042 * Create the inode with the lhc as the key. 1043 */ 1044 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1045 xop->lhc = lhc; 1046 xop->flags = HAMMER2_INSERT_PFSROOT; 1047 bzero(&xop->meta, sizeof(xop->meta)); 1048 1049 xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY; 1050 xop->meta.inum = 1; 1051 xop->meta.iparent = pip_inum; 1052 1053 /* Inherit parent's inode compression mode. */ 1054 xop->meta.comp_algo = pip_comp_algo; 1055 xop->meta.check_algo = pip_check_algo; 1056 xop->meta.version = HAMMER2_INODE_VERSION_ONE; 1057 hammer2_update_time(&xop->meta.ctime); 1058 xop->meta.mtime = xop->meta.ctime; 1059 xop->meta.mode = 0755; 1060 xop->meta.nlinks = 1; 1061 1062 /* 1063 * Regular files and softlinks allow a small amount of data to be 1064 * directly embedded in the inode. This flag will be cleared if 1065 * the size is extended past the embedded limit. 1066 */ 1067 if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE || 1068 xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) { 1069 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA; 1070 } 1071 hammer2_xop_setname(&xop->head, name, name_len); 1072 xop->meta.name_len = name_len; 1073 xop->meta.name_key = lhc; 1074 KKASSERT(name_len < HAMMER2_INODE_MAXNAME); 1075 1076 hammer2_xop_start(&xop->head, &hammer2_inode_create_desc); 1077 1078 error = hammer2_xop_collect(&xop->head, 0); 1079 #if INODE_DEBUG 1080 kprintf("CREATE INODE %*.*s\n", 1081 (int)name_len, (int)name_len, name); 1082 #endif 1083 1084 if (error) { 1085 *errorp = error; 1086 goto done; 1087 } 1088 1089 /* 1090 * Set up the new inode if not a hardlink pointer. 1091 * 1092 * NOTE: *_get() integrates chain's lock into the inode lock. 1093 * 1094 * NOTE: Only one new inode can currently be created per 1095 * transaction. If the need arises we can adjust 1096 * hammer2_trans_init() to allow more. 1097 * 1098 * NOTE: nipdata will have chain's blockset data. 1099 */ 1100 nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1); 1101 nip->comp_heuristic = 0; 1102 done: 1103 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1104 done2: 1105 hammer2_inode_unlock(pip); 1106 1107 return (nip); 1108 } 1109 1110 /* 1111 * Create a new, normal inode. This function will create the inode, 1112 * the media chains, but will not insert the chains onto the media topology 1113 * (doing so would require a flush transaction and cause long stalls). 1114 * 1115 * Caller must be in a normal transaction. 1116 */ 1117 hammer2_inode_t * 1118 hammer2_inode_create_normal(hammer2_inode_t *pip, 1119 struct vattr *vap, struct ucred *cred, 1120 hammer2_key_t inum, int *errorp) 1121 { 1122 hammer2_xop_create_t *xop; 1123 hammer2_inode_t *dip; 1124 hammer2_inode_t *nip; 1125 int error; 1126 uid_t xuid; 1127 uuid_t pip_uid; 1128 uuid_t pip_gid; 1129 uint32_t pip_mode; 1130 uint8_t pip_comp_algo; 1131 uint8_t pip_check_algo; 1132 hammer2_tid_t pip_inum; 1133 uint8_t type; 1134 1135 dip = pip->pmp->iroot; 1136 KKASSERT(dip != NULL); 1137 1138 *errorp = 0; 1139 1140 /*hammer2_inode_lock(dip, 0);*/ 1141 1142 pip_uid = pip->meta.uid; 1143 pip_gid = pip->meta.gid; 1144 pip_mode = pip->meta.mode; 1145 pip_comp_algo = pip->meta.comp_algo; 1146 pip_check_algo = pip->meta.check_algo; 1147 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum; 1148 1149 /* 1150 * Create the in-memory hammer2_inode structure for the specified 1151 * inode. 1152 */ 1153 nip = hammer2_inode_get(dip->pmp, NULL, inum, -1); 1154 nip->comp_heuristic = 0; 1155 KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 && 1156 nip->cluster.nchains == 0); 1157 atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING); 1158 1159 /* 1160 * Setup the inode meta-data 1161 */ 1162 nip->meta.type = hammer2_get_obj_type(vap->va_type); 1163 1164 switch (nip->meta.type) { 1165 case HAMMER2_OBJTYPE_CDEV: 1166 case HAMMER2_OBJTYPE_BDEV: 1167 assert(0); /* XXX unsupported */ 1168 nip->meta.rmajor = vap->va_rmajor; 1169 nip->meta.rminor = vap->va_rminor; 1170 break; 1171 default: 1172 break; 1173 } 1174 type = nip->meta.type; 1175 1176 KKASSERT(nip->meta.inum == inum); 1177 nip->meta.iparent = pip_inum; 1178 1179 /* Inherit parent's inode compression mode. */ 1180 nip->meta.comp_algo = pip_comp_algo; 1181 nip->meta.check_algo = pip_check_algo; 1182 nip->meta.version = HAMMER2_INODE_VERSION_ONE; 1183 hammer2_update_time(&nip->meta.ctime); 1184 nip->meta.mtime = nip->meta.ctime; 1185 nip->meta.mode = vap->va_mode; 1186 nip->meta.nlinks = 1; 1187 1188 xuid = hammer2_to_unix_xid(&pip_uid); 1189 xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode, 1190 xuid, cred, 1191 &vap->va_mode); 1192 if (vap->va_vaflags & VA_UID_UUID_VALID) 1193 nip->meta.uid = vap->va_uid_uuid; 1194 else if (vap->va_uid != (uid_t)VNOVAL) 1195 hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid); 1196 else 1197 hammer2_guid_to_uuid(&nip->meta.uid, xuid); 1198 1199 if (vap->va_vaflags & VA_GID_UUID_VALID) 1200 nip->meta.gid = vap->va_gid_uuid; 1201 else if (vap->va_gid != (gid_t)VNOVAL) 1202 hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid); 1203 else 1204 nip->meta.gid = pip_gid; 1205 1206 /* 1207 * Regular files and softlinks allow a small amount of data to be 1208 * directly embedded in the inode. This flag will be cleared if 1209 * the size is extended past the embedded limit. 1210 */ 1211 if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE || 1212 nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) { 1213 nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA; 1214 } 1215 1216 /* 1217 * Create the inode using (inum) as the key. Pass pip for 1218 * method inheritance. 1219 */ 1220 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1221 xop->lhc = inum; 1222 xop->flags = 0; 1223 xop->meta = nip->meta; 1224 KKASSERT(vap); 1225 1226 xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum); 1227 xop->meta.name_key = inum; 1228 nip->meta.name_len = xop->meta.name_len; 1229 nip->meta.name_key = xop->meta.name_key; 1230 hammer2_inode_modify(nip); 1231 1232 /* 1233 * Create the inode media chains but leave them detached. We are 1234 * not in a flush transaction so we can't mess with media topology 1235 * above normal inodes (i.e. the index of the inodes themselves). 1236 * 1237 * We've already set the INODE_CREATING flag. The inode's media 1238 * chains will be inserted onto the media topology on the next 1239 * filesystem sync. 1240 */ 1241 hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc); 1242 1243 error = hammer2_xop_collect(&xop->head, 0); 1244 #if INODE_DEBUG 1245 kprintf("create inode type %d error %d\n", nip->meta.type, error); 1246 #endif 1247 1248 if (error) { 1249 *errorp = error; 1250 goto done; 1251 } 1252 1253 /* 1254 * Associate the media chains created by the backend with the 1255 * frontend inode. 1256 */ 1257 hammer2_inode_repoint(nip, &xop->head.cluster); 1258 done: 1259 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1260 /*hammer2_inode_unlock(dip);*/ 1261 1262 return (nip); 1263 } 1264 1265 /* 1266 * Create a directory entry under dip with the specified name, inode number, 1267 * and OBJTYPE (type). 1268 * 1269 * This returns a UNIX errno code, not a HAMMER2_ERROR_* code. 1270 * 1271 * Caller must hold dip locked. 1272 */ 1273 int 1274 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len, 1275 hammer2_key_t inum, uint8_t type) 1276 { 1277 hammer2_xop_mkdirent_t *xop; 1278 hammer2_key_t lhc; 1279 int error; 1280 1281 lhc = 0; 1282 error = 0; 1283 1284 KKASSERT(name != NULL); 1285 lhc = hammer2_dirhash(name, name_len); 1286 1287 /* 1288 * Locate the inode or indirect block to create the new 1289 * entry in. At the same time check for key collisions 1290 * and iterate until we don't get one. 1291 * 1292 * Lock the directory exclusively for now to guarantee that 1293 * we can find an unused lhc for the name. Due to collisions, 1294 * two different creates can end up with the same lhc so we 1295 * cannot depend on the OS to prevent the collision. 1296 */ 1297 hammer2_inode_modify(dip); 1298 1299 /* 1300 * If name specified, locate an unused key in the collision space. 1301 * Otherwise use the passed-in lhc directly. 1302 */ 1303 { 1304 hammer2_xop_scanlhc_t *sxop; 1305 hammer2_key_t lhcbase; 1306 1307 lhcbase = lhc; 1308 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1309 sxop->lhc = lhc; 1310 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 1311 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1312 if (lhc != sxop->head.cluster.focus->bref.key) 1313 break; 1314 ++lhc; 1315 } 1316 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1317 1318 if (error) { 1319 if (error != HAMMER2_ERROR_ENOENT) 1320 goto done2; 1321 ++lhc; 1322 error = 0; 1323 } 1324 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { 1325 error = HAMMER2_ERROR_ENOSPC; 1326 goto done2; 1327 } 1328 } 1329 1330 /* 1331 * Create the directory entry with the lhc as the key. 1332 */ 1333 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1334 xop->lhc = lhc; 1335 bzero(&xop->dirent, sizeof(xop->dirent)); 1336 xop->dirent.inum = inum; 1337 xop->dirent.type = type; 1338 xop->dirent.namlen = name_len; 1339 1340 KKASSERT(name_len < HAMMER2_INODE_MAXNAME); 1341 hammer2_xop_setname(&xop->head, name, name_len); 1342 1343 hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc); 1344 1345 error = hammer2_xop_collect(&xop->head, 0); 1346 1347 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1348 done2: 1349 error = hammer2_error_to_errno(error); 1350 1351 return error; 1352 } 1353 1354 /* 1355 * Repoint ip->cluster's chains to cluster's chains and fixup the default 1356 * focus. All items, valid or invalid, are repointed. hammer2_xop_start() 1357 * filters out invalid or non-matching elements. 1358 * 1359 * Caller must hold the inode and cluster exclusive locked, if not NULL, 1360 * must also be locked. 1361 * 1362 * Cluster may be NULL to clean out any chains in ip->cluster. 1363 */ 1364 void 1365 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_cluster_t *cluster) 1366 { 1367 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER]; 1368 hammer2_chain_t *ochain; 1369 hammer2_chain_t *nchain; 1370 int i; 1371 1372 bzero(dropch, sizeof(dropch)); 1373 1374 /* 1375 * Replace chains in ip->cluster with chains from cluster and 1376 * adjust the focus if necessary. 1377 * 1378 * NOTE: nchain and/or ochain can be NULL due to gaps 1379 * in the cluster arrays. 1380 */ 1381 hammer2_spin_ex(&ip->cluster_spin); 1382 for (i = 0; cluster && i < cluster->nchains; ++i) { 1383 /* 1384 * Do not replace elements which are the same. Also handle 1385 * element count discrepancies. 1386 */ 1387 nchain = cluster->array[i].chain; 1388 if (i < ip->cluster.nchains) { 1389 ochain = ip->cluster.array[i].chain; 1390 if (ochain == nchain) 1391 continue; 1392 } else { 1393 ochain = NULL; 1394 } 1395 1396 /* 1397 * Make adjustments 1398 */ 1399 ip->cluster.array[i].chain = nchain; 1400 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID; 1401 ip->cluster.array[i].flags |= cluster->array[i].flags & 1402 HAMMER2_CITEM_INVALID; 1403 if (nchain) 1404 hammer2_chain_ref(nchain); 1405 dropch[i] = ochain; 1406 } 1407 1408 /* 1409 * Release any left-over chains in ip->cluster. 1410 */ 1411 while (i < ip->cluster.nchains) { 1412 nchain = ip->cluster.array[i].chain; 1413 if (nchain) { 1414 ip->cluster.array[i].chain = NULL; 1415 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID; 1416 } 1417 dropch[i] = nchain; 1418 ++i; 1419 } 1420 1421 /* 1422 * Fixup fields. Note that the inode-embedded cluster is never 1423 * directly locked. 1424 */ 1425 if (cluster) { 1426 ip->cluster.nchains = cluster->nchains; 1427 ip->cluster.focus = cluster->focus; 1428 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED; 1429 } else { 1430 ip->cluster.nchains = 0; 1431 ip->cluster.focus = NULL; 1432 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS; 1433 } 1434 1435 hammer2_spin_unex(&ip->cluster_spin); 1436 1437 /* 1438 * Cleanup outside of spinlock 1439 */ 1440 while (--i >= 0) { 1441 if (dropch[i]) 1442 hammer2_chain_drop(dropch[i]); 1443 } 1444 } 1445 1446 /* 1447 * Repoint a single element from the cluster to the ip. Used by the 1448 * synchronization threads to piecemeal update inodes. Does not change 1449 * focus and requires inode to be re-locked to clean-up flags (XXX). 1450 */ 1451 void 1452 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster, 1453 int idx) 1454 { 1455 hammer2_chain_t *ochain; 1456 hammer2_chain_t *nchain; 1457 int i; 1458 1459 hammer2_spin_ex(&ip->cluster_spin); 1460 KKASSERT(idx < cluster->nchains); 1461 if (idx < ip->cluster.nchains) { 1462 ochain = ip->cluster.array[idx].chain; 1463 nchain = cluster->array[idx].chain; 1464 } else { 1465 ochain = NULL; 1466 nchain = cluster->array[idx].chain; 1467 for (i = ip->cluster.nchains; i <= idx; ++i) { 1468 bzero(&ip->cluster.array[i], 1469 sizeof(ip->cluster.array[i])); 1470 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID; 1471 } 1472 ip->cluster.nchains = idx + 1; 1473 } 1474 if (ochain != nchain) { 1475 /* 1476 * Make adjustments. 1477 */ 1478 ip->cluster.array[idx].chain = nchain; 1479 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID; 1480 ip->cluster.array[idx].flags |= cluster->array[idx].flags & 1481 HAMMER2_CITEM_INVALID; 1482 } 1483 hammer2_spin_unex(&ip->cluster_spin); 1484 if (ochain != nchain) { 1485 if (nchain) 1486 hammer2_chain_ref(nchain); 1487 if (ochain) 1488 hammer2_chain_drop(ochain); 1489 } 1490 } 1491 1492 hammer2_key_t 1493 hammer2_inode_data_count(const hammer2_inode_t *ip) 1494 { 1495 hammer2_chain_t *chain; 1496 hammer2_key_t count = 0; 1497 int i; 1498 1499 for (i = 0; i < ip->cluster.nchains; ++i) { 1500 if ((chain = ip->cluster.array[i].chain) != NULL) { 1501 if (count < chain->bref.embed.stats.data_count) 1502 count = chain->bref.embed.stats.data_count; 1503 } 1504 } 1505 return count; 1506 } 1507 1508 hammer2_key_t 1509 hammer2_inode_inode_count(const hammer2_inode_t *ip) 1510 { 1511 hammer2_chain_t *chain; 1512 hammer2_key_t count = 0; 1513 int i; 1514 1515 for (i = 0; i < ip->cluster.nchains; ++i) { 1516 if ((chain = ip->cluster.array[i].chain) != NULL) { 1517 if (count < chain->bref.embed.stats.inode_count) 1518 count = chain->bref.embed.stats.inode_count; 1519 } 1520 } 1521 return count; 1522 } 1523 1524 /* 1525 * Called with a locked inode to finish unlinking an inode after xop_unlink 1526 * had been run. This function is responsible for decrementing nlinks. 1527 * 1528 * We don't bother decrementing nlinks if the file is not open and this was 1529 * the last link. 1530 * 1531 * If the inode is a hardlink target it's chain has not yet been deleted, 1532 * otherwise it's chain has been deleted. 1533 * 1534 * If isopen then any prior deletion was not permanent and the inode is 1535 * left intact with nlinks == 0; 1536 */ 1537 int 1538 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, struct vnode **vprecyclep) 1539 { 1540 hammer2_pfs_t *pmp; 1541 struct vnode *vp; 1542 int error; 1543 1544 pmp = ip->pmp; 1545 error = 0; 1546 1547 /* 1548 * Decrement nlinks. Catch a bad nlinks count here too (e.g. 0 or 1549 * negative), and just assume a transition to 0. 1550 */ 1551 if ((int64_t)ip->meta.nlinks <= 1) { 1552 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED); 1553 1554 /* 1555 * Scrap the vnode as quickly as possible. The vp association 1556 * stays intact while we hold the inode locked. However, vp 1557 * can be NULL here. 1558 */ 1559 vp = ip->vp; 1560 cpu_ccfence(); 1561 1562 /* 1563 * If no vp is associated there is no high-level state to 1564 * deal with and we can scrap the inode immediately. 1565 */ 1566 if (vp == NULL) { 1567 if ((ip->flags & HAMMER2_INODE_DELETING) == 0) { 1568 atomic_set_int(&ip->flags, 1569 HAMMER2_INODE_DELETING); 1570 hammer2_inode_delayed_sideq(ip); 1571 } 1572 return 0; 1573 } 1574 1575 /* 1576 * Because INODE_ISUNLINKED is set with the inode lock 1577 * held, the vnode cannot be ripped up from under us. 1578 * There may still be refs so knote anyone waiting for 1579 * a delete notification. 1580 * 1581 * The vnode is not necessarily ref'd due to the unlinking 1582 * itself, so we have to defer handling to the end of the 1583 * VOP, which will then call hammer2_inode_vprecycle(). 1584 */ 1585 if (vprecyclep) { 1586 vhold(vp); 1587 *vprecyclep = vp; 1588 } 1589 } 1590 1591 /* 1592 * Adjust nlinks and retain the inode on the media for now 1593 */ 1594 hammer2_inode_modify(ip); 1595 if ((int64_t)ip->meta.nlinks > 1) 1596 --ip->meta.nlinks; 1597 else 1598 ip->meta.nlinks = 0; 1599 1600 return 0; 1601 } 1602 1603 /* 1604 * Called at the end of a VOP that removes a file with a vnode that 1605 * we want to try to dispose of quickly due to a file deletion. If 1606 * we don't do this, the vnode can hang around with 0 refs for a very 1607 * long time and prevent reclamation of the underlying file and inode 1608 * (inode remains on-media with nlinks == 0 until the vnode is recycled 1609 * due to random system activity or a umount). 1610 */ 1611 void 1612 hammer2_inode_vprecycle(struct vnode *vp) 1613 { 1614 if (vget(vp, LK_EXCLUSIVE) == 0) { 1615 vfinalize(vp); 1616 hammer2_knote(vp, NOTE_DELETE); 1617 vdrop(vp); 1618 vput(vp); 1619 } else { 1620 vdrop(vp); 1621 } 1622 } 1623 1624 1625 /* 1626 * Mark an inode as being modified, meaning that the caller will modify 1627 * ip->meta. 1628 * 1629 * If a vnode is present we set the vnode dirty and the nominal filesystem 1630 * sync will also handle synchronizing the inode meta-data. Unless NOSIDEQ 1631 * we must ensure that the inode is on pmp->sideq. 1632 * 1633 * NOTE: We must always queue the inode to the sideq. This allows H2 to 1634 * shortcut vsyncscan() and flush inodes and their related vnodes 1635 * in a two stages. H2 still calls vfsync() for each vnode. 1636 * 1637 * NOTE: No mtid (modify_tid) is passed into this routine. The caller is 1638 * only modifying the in-memory inode. A modify_tid is synchronized 1639 * later when the inode gets flushed. 1640 * 1641 * NOTE: As an exception to the general rule, the inode MAY be locked 1642 * shared for this particular call. 1643 */ 1644 void 1645 hammer2_inode_modify(hammer2_inode_t *ip) 1646 { 1647 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED); 1648 if (ip->vp) 1649 vsetisdirty(ip->vp); 1650 if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0) 1651 hammer2_inode_delayed_sideq(ip); 1652 } 1653 1654 /* 1655 * Synchronize the inode's frontend state with the chain state prior 1656 * to any explicit flush of the inode or any strategy write call. This 1657 * does not flush the inode's chain or its sub-topology to media (higher 1658 * level layers are responsible for doing that). 1659 * 1660 * Called with a locked inode inside a normal transaction. 1661 * 1662 * inode must be locked. 1663 */ 1664 int 1665 hammer2_inode_chain_sync(hammer2_inode_t *ip) 1666 { 1667 int error; 1668 1669 error = 0; 1670 if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) { 1671 hammer2_xop_fsync_t *xop; 1672 1673 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1674 xop->clear_directdata = 0; 1675 if (ip->flags & HAMMER2_INODE_RESIZED) { 1676 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) && 1677 ip->meta.size > HAMMER2_EMBEDDED_BYTES) { 1678 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA; 1679 xop->clear_directdata = 1; 1680 } 1681 xop->osize = ip->osize; 1682 } else { 1683 xop->osize = ip->meta.size; /* safety */ 1684 } 1685 xop->ipflags = ip->flags; 1686 xop->meta = ip->meta; 1687 1688 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED | 1689 HAMMER2_INODE_MODIFIED); 1690 hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc); 1691 error = hammer2_xop_collect(&xop->head, 0); 1692 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1693 if (error == HAMMER2_ERROR_ENOENT) 1694 error = 0; 1695 if (error) { 1696 kprintf("hammer2: unable to fsync inode %p\n", ip); 1697 /* 1698 atomic_set_int(&ip->flags, 1699 xop->ipflags & (HAMMER2_INODE_RESIZED | 1700 HAMMER2_INODE_MODIFIED)); 1701 */ 1702 /* XXX return error somehow? */ 1703 } 1704 } 1705 return error; 1706 } 1707 1708 /* 1709 * When an inode is flagged INODE_CREATING its chains have not actually 1710 * been inserting into the on-media tree yet. 1711 */ 1712 int 1713 hammer2_inode_chain_ins(hammer2_inode_t *ip) 1714 { 1715 int error; 1716 1717 error = 0; 1718 if (ip->flags & HAMMER2_INODE_CREATING) { 1719 hammer2_xop_create_t *xop; 1720 1721 atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING); 1722 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1723 xop->lhc = ip->meta.inum; 1724 xop->flags = 0; 1725 hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc); 1726 error = hammer2_xop_collect(&xop->head, 0); 1727 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1728 if (error == HAMMER2_ERROR_ENOENT) 1729 error = 0; 1730 if (error) { 1731 kprintf("hammer2: backend unable to " 1732 "insert inode %p %ld\n", ip, ip->meta.inum); 1733 /* XXX return error somehow? */ 1734 } 1735 } 1736 return error; 1737 } 1738 1739 /* 1740 * When an inode is flagged INODE_DELETING it has been deleted (no directory 1741 * entry or open refs are left, though as an optimization H2 might leave 1742 * nlinks == 1 to avoid unnecessary block updates). The backend flush then 1743 * needs to actually remove it from the topology. 1744 * 1745 * NOTE: backend flush must still sync and flush the deleted inode to clean 1746 * out related chains. 1747 * 1748 * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED 1749 * to prevent the vnode reclaim code from trying to delete it twice. 1750 */ 1751 int 1752 hammer2_inode_chain_des(hammer2_inode_t *ip) 1753 { 1754 int error; 1755 1756 error = 0; 1757 if (ip->flags & HAMMER2_INODE_DELETING) { 1758 hammer2_xop_destroy_t *xop; 1759 1760 atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING | 1761 HAMMER2_INODE_ISUNLINKED); 1762 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1763 hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc); 1764 error = hammer2_xop_collect(&xop->head, 0); 1765 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1766 1767 if (error == HAMMER2_ERROR_ENOENT) 1768 error = 0; 1769 if (error) { 1770 kprintf("hammer2: backend unable to " 1771 "delete inode %p %ld\n", ip, ip->meta.inum); 1772 /* XXX return error somehow? */ 1773 } 1774 } 1775 return error; 1776 } 1777 1778 /* 1779 * Flushes the inode's chain and its sub-topology to media. Interlocks 1780 * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush. Any strategy 1781 * function creating or modifying a chain under this inode will re-set the 1782 * flag. 1783 * 1784 * inode must be locked. 1785 */ 1786 int 1787 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags) 1788 { 1789 hammer2_xop_fsync_t *xop; 1790 int error; 1791 1792 atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA); 1793 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags); 1794 hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc); 1795 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL); 1796 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1797 if (error == HAMMER2_ERROR_ENOENT) 1798 error = 0; 1799 1800 return error; 1801 } 1802 1803 hammer2_key_t 1804 hammer2_pfs_inode_count(hammer2_pfs_t *pmp) 1805 { 1806 struct hammer2_inode *ip; 1807 hammer2_key_t count = 0; 1808 1809 hammer2_spin_ex(&pmp->inum_spin); 1810 RB_FOREACH(ip, hammer2_inode_tree, &pmp->inum_tree) 1811 count++; 1812 hammer2_spin_unex(&pmp->inum_spin); 1813 1814 return count; 1815 } 1816 1817 int 1818 vflush(struct mount *mp, int rootrefs, int flags) 1819 { 1820 hammer2_pfs_t *pmp = MPTOPMP(mp); 1821 struct hammer2_inode *ip, *tmp; 1822 struct vnode *vp; 1823 hammer2_key_t count_before, count_after, count_delta; 1824 1825 hammer2_spin_ex(&pmp->inum_spin); 1826 count_before = 0; 1827 RB_FOREACH(ip, hammer2_inode_tree, &pmp->inum_tree) 1828 count_before++; 1829 1830 RB_FOREACH_SAFE(ip, hammer2_inode_tree, &pmp->inum_tree, tmp) { 1831 vp = ip->vp; 1832 assert(vp); 1833 if (!vp->v_vflushed) { 1834 /* 1835 printf("%s: drop ip=%p inum=%ld refs=%d\n", 1836 __func__, ip, ip->meta.inum, ip->refs); 1837 */ 1838 assert(ip->refs > 1); 1839 hammer2_inode_drop(ip); 1840 vp->v_vflushed = 1; 1841 } 1842 } 1843 1844 count_after = 0; 1845 RB_FOREACH(ip, hammer2_inode_tree, &pmp->inum_tree) 1846 count_after++; 1847 hammer2_spin_unex(&pmp->inum_spin); 1848 1849 printf("%s: total inode %ld -> %ld\n", 1850 __func__, count_before, count_after); 1851 1852 assert(count_before >= count_after); 1853 count_delta = count_before - count_after; 1854 1855 if (count_delta) { 1856 if (hammer2_debug & 0x80000000) 1857 assert(0); 1858 else 1859 printf("%s: %ld inode freed\n", __func__, count_delta); 1860 } 1861 1862 return 0; 1863 } 1864