1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org> 5 * Copyright (c) 2011-2022 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Matthew Dillon <dillon@dragonflybsd.org> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 /* 38 #include <sys/cdefs.h> 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/types.h> 42 #include <sys/lock.h> 43 #include <sys/uuid.h> 44 #include <sys/vnode.h> 45 */ 46 47 #include "hammer2.h" 48 49 #define INODE_DEBUG 0 50 51 RB_GENERATE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp, 52 hammer2_tid_t, meta.inum); 53 54 int 55 hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2) 56 { 57 if (ip1->meta.inum < ip2->meta.inum) 58 return(-1); 59 if (ip1->meta.inum > ip2->meta.inum) 60 return(1); 61 return(0); 62 } 63 64 /* 65 * Caller holds pmp->list_spin and the inode should be locked. Merge ip 66 * with the specified depend. 67 * 68 * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating 69 * that successive calls must ensure the ip is on a pass2 depend (or they are 70 * all SYNCQ). If the passed-in depend is not NULL and not (void *)-1 then 71 * we can set pass2 on it and return. 72 * 73 * If the ip is not on SYNCQ it is merged with the passed-in depend, creating 74 * a self-depend if necessary, and depend->pass2 is set according 75 * to the PASS2 flag. SIDEQ is set. 76 */ 77 static __noinline 78 hammer2_depend_t * 79 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend) 80 { 81 hammer2_pfs_t *pmp = ip->pmp; 82 hammer2_depend_t *dtmp; 83 hammer2_inode_t *iptmp; 84 85 /* 86 * If ip is SYNCQ its entry is used for the syncq list and it will 87 * no longer be associated with a dependency. Merging this status 88 * with a passed-in depend implies PASS2. 89 */ 90 if (ip->flags & HAMMER2_INODE_SYNCQ) { 91 if (depend == (void *)-1 || 92 depend == NULL) { 93 return ((void *)-1); 94 } 95 depend->pass2 = 1; 96 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN); 97 98 return depend; 99 } 100 101 /* 102 * If ip is already SIDEQ, merge ip->depend into the passed-in depend. 103 * If it is not, associate the ip with the passed-in depend, creating 104 * a single-entry dependency using depend_static if necessary. 105 * 106 * NOTE: The use of ip->depend_static always requires that the 107 * specific ip containing the structure is part of that 108 * particular depend_static's dependency group. 109 */ 110 if (ip->flags & HAMMER2_INODE_SIDEQ) { 111 /* 112 * Merge ip->depend with the passed-in depend. If the 113 * passed-in depend is not a special case, all ips associated 114 * with ip->depend (including the original ip) must be moved 115 * to the passed-in depend. 116 */ 117 if (depend == NULL) { 118 depend = ip->depend; 119 } else if (depend == (void *)-1) { 120 depend = ip->depend; 121 depend->pass2 = 1; 122 } else if (depend != ip->depend) { 123 #ifdef INVARIANTS 124 int sanitychk = 0; 125 #endif 126 dtmp = ip->depend; 127 while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) { 128 #ifdef INVARIANTS 129 if (iptmp == ip) 130 sanitychk = 1; 131 #endif 132 TAILQ_REMOVE(&dtmp->sideq, iptmp, entry); 133 TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry); 134 iptmp->depend = depend; 135 } 136 KKASSERT(sanitychk == 1); 137 depend->count += dtmp->count; 138 depend->pass2 |= dtmp->pass2; 139 TAILQ_REMOVE(&pmp->depq, dtmp, entry); 140 dtmp->count = 0; 141 dtmp->pass2 = 0; 142 } 143 } else { 144 /* 145 * Add ip to the sideq, creating a self-dependency if 146 * necessary. 147 */ 148 hammer2_inode_ref(ip); /* extra ref usually via hammer2_inode_modify() */ 149 atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ); 150 if (depend == NULL) { 151 depend = &ip->depend_static; 152 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry); 153 } else if (depend == (void *)-1) { 154 depend = &ip->depend_static; 155 depend->pass2 = 1; 156 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry); 157 } /* else add ip to passed-in depend */ 158 TAILQ_INSERT_TAIL(&depend->sideq, ip, entry); 159 ip->depend = depend; 160 ++depend->count; 161 ++pmp->sideq_count; 162 } 163 164 if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2) 165 depend->pass2 = 1; 166 if (depend->pass2) 167 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN); 168 169 return depend; 170 } 171 172 /* 173 * Put a solo inode on the SIDEQ (meaning that its dirty). This can also 174 * occur from inode_lock4() and inode_depend(). 175 * 176 * Caller must pass-in a locked inode. 177 */ 178 void 179 hammer2_inode_delayed_sideq(hammer2_inode_t *ip) 180 { 181 hammer2_pfs_t *pmp = ip->pmp; 182 183 /* 184 * Optimize case to avoid pmp spinlock. 185 */ 186 if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) { 187 hammer2_spin_ex(&pmp->list_spin); 188 hammer2_inode_setdepend_locked(ip, NULL); 189 hammer2_spin_unex(&pmp->list_spin); 190 } 191 } 192 193 /* 194 * Lock an inode, with SYNCQ semantics. 195 * 196 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of 197 * flags for options: 198 * 199 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired. 200 * shared locks are not subject to SYNCQ semantics, exclusive locks 201 * are. 202 * 203 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data. 204 * Most front-end inode locks do. 205 * 206 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that 207 * the inode data be resolved. This is used by the syncthr because 208 * it can run on an unresolved/out-of-sync cluster, and also by the 209 * vnode reclamation code to avoid unnecessary I/O (particularly when 210 * disposing of hundreds of thousands of cached vnodes). 211 * 212 * This function, along with lock4, has SYNCQ semantics. If the inode being 213 * locked is on the SYNCQ, that is it has been staged by the syncer, we must 214 * block until the operation is complete (even if we can lock the inode). In 215 * order to reduce the stall time, we re-order the inode to the front of the 216 * pmp->syncq prior to blocking. This reordering VERY significantly improves 217 * performance. 218 * 219 * The inode locking function locks the inode itself, resolves any stale 220 * chains in the inode's cluster, and allocates a fresh copy of the 221 * cluster with 1 ref and all the underlying chains locked. 222 * 223 * ip->cluster will be stable while the inode is locked. 224 * 225 * NOTE: We don't combine the inode/chain lock because putting away an 226 * inode would otherwise confuse multiple lock holders of the inode. 227 */ 228 void 229 hammer2_inode_lock(hammer2_inode_t *ip, int how) 230 { 231 hammer2_pfs_t *pmp; 232 233 hammer2_inode_ref(ip); 234 pmp = ip->pmp; 235 236 /* 237 * Inode structure mutex - Shared lock 238 */ 239 if (how & HAMMER2_RESOLVE_SHARED) { 240 hammer2_mtx_sh(&ip->lock); 241 return; 242 } 243 244 /* 245 * Inode structure mutex - Exclusive lock 246 * 247 * An exclusive lock (if not recursive) must wait for inodes on 248 * SYNCQ to flush first, to ensure that meta-data dependencies such 249 * as the nlink count and related directory entries are not split 250 * across flushes. 251 * 252 * If the vnode is locked by the current thread it must be unlocked 253 * across the tsleep() to avoid a deadlock. 254 */ 255 hammer2_mtx_ex(&ip->lock); 256 if (hammer2_mtx_refs(&ip->lock) > 1) 257 return; 258 while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) { 259 hammer2_spin_ex(&pmp->list_spin); 260 if (ip->flags & HAMMER2_INODE_SYNCQ) { 261 tsleep_interlock(&ip->flags, 0); 262 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP); 263 TAILQ_REMOVE(&pmp->syncq, ip, entry); 264 TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry); 265 hammer2_spin_unex(&pmp->list_spin); 266 hammer2_mtx_unlock(&ip->lock); 267 tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0); 268 hammer2_mtx_ex(&ip->lock); 269 continue; 270 } 271 hammer2_spin_unex(&pmp->list_spin); 272 break; 273 } 274 } 275 276 /* 277 * Exclusively lock up to four inodes, in order, with SYNCQ semantics. 278 * ip1 and ip2 must not be NULL. ip3 and ip4 may be NULL, but if ip3 is 279 * NULL then ip4 must also be NULL. 280 * 281 * This creates a dependency between up to four inodes. 282 */ 283 void 284 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2, 285 hammer2_inode_t *ip3, hammer2_inode_t *ip4) 286 { 287 hammer2_inode_t *ips[4]; 288 hammer2_inode_t *iptmp; 289 hammer2_inode_t *ipslp; 290 hammer2_depend_t *depend; 291 hammer2_pfs_t *pmp; 292 size_t count; 293 size_t i; 294 295 pmp = ip1->pmp; /* may be NULL */ 296 KKASSERT(pmp == ip2->pmp); 297 298 ips[0] = ip1; 299 ips[1] = ip2; 300 if (ip3 == NULL) { 301 count = 2; 302 } else if (ip4 == NULL) { 303 count = 3; 304 ips[2] = ip3; 305 KKASSERT(pmp == ip3->pmp); 306 } else { 307 count = 4; 308 ips[2] = ip3; 309 ips[3] = ip4; 310 KKASSERT(pmp == ip3->pmp); 311 KKASSERT(pmp == ip4->pmp); 312 } 313 314 for (i = 0; i < count; ++i) 315 hammer2_inode_ref(ips[i]); 316 317 restart: 318 /* 319 * Lock the inodes in order 320 */ 321 for (i = 0; i < count; ++i) { 322 hammer2_mtx_ex(&ips[i]->lock); 323 } 324 325 /* 326 * Associate dependencies, record the first inode found on SYNCQ 327 * (operation is allowed to proceed for inodes on PASS2) for our 328 * sleep operation, this inode is theoretically the last one sync'd 329 * in the sequence. 330 * 331 * All inodes found on SYNCQ are moved to the head of the syncq 332 * to reduce stalls. 333 */ 334 hammer2_spin_ex(&pmp->list_spin); 335 depend = NULL; 336 ipslp = NULL; 337 for (i = 0; i < count; ++i) { 338 iptmp = ips[i]; 339 depend = hammer2_inode_setdepend_locked(iptmp, depend); 340 if (iptmp->flags & HAMMER2_INODE_SYNCQ) { 341 TAILQ_REMOVE(&pmp->syncq, iptmp, entry); 342 TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry); 343 if (ipslp == NULL) 344 ipslp = iptmp; 345 } 346 } 347 hammer2_spin_unex(&pmp->list_spin); 348 349 /* 350 * Block and retry if any of the inodes are on SYNCQ. It is 351 * important that we allow the operation to proceed in the 352 * PASS2 case, to avoid deadlocking against the vnode. 353 */ 354 if (ipslp) { 355 for (i = 0; i < count; ++i) 356 hammer2_mtx_unlock(&ips[i]->lock); 357 tsleep(&ipslp->flags, 0, "h2sync", 2); 358 goto restart; 359 } 360 } 361 362 /* 363 * Release an inode lock. If another thread is blocked on SYNCQ_WAKEUP 364 * we wake them up. 365 */ 366 void 367 hammer2_inode_unlock(hammer2_inode_t *ip) 368 { 369 if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) { 370 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP); 371 hammer2_mtx_unlock(&ip->lock); 372 wakeup(&ip->flags); 373 } else { 374 hammer2_mtx_unlock(&ip->lock); 375 } 376 hammer2_inode_drop(ip); 377 } 378 379 /* 380 * If either ip1 or ip2 have been tapped by the syncer, make sure that both 381 * are. This ensure that dependencies (e.g. dirent-v-inode) are synced 382 * together. For dirent-v-inode depends, pass the dirent as ip1. 383 * 384 * If neither ip1 or ip2 have been tapped by the syncer, merge them into a 385 * single dependency. Dependencies are entered into pmp->depq. This 386 * effectively flags the inodes SIDEQ. 387 * 388 * Both ip1 and ip2 must be locked by the caller. This also ensures 389 * that we can't race the end of the syncer's queue run. 390 */ 391 void 392 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2) 393 { 394 hammer2_pfs_t *pmp; 395 hammer2_depend_t *depend; 396 397 pmp = ip1->pmp; 398 hammer2_spin_ex(&pmp->list_spin); 399 depend = hammer2_inode_setdepend_locked(ip1, NULL); 400 depend = hammer2_inode_setdepend_locked(ip2, depend); 401 hammer2_spin_unex(&pmp->list_spin); 402 } 403 404 /* 405 * Select a chain out of an inode's cluster and lock it. 406 * 407 * The inode does not have to be locked. 408 */ 409 hammer2_chain_t * 410 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how) 411 { 412 hammer2_chain_t *chain; 413 hammer2_cluster_t *cluster; 414 415 hammer2_spin_sh(&ip->cluster_spin); 416 cluster = &ip->cluster; 417 if (clindex >= cluster->nchains) 418 chain = NULL; 419 else 420 chain = cluster->array[clindex].chain; 421 if (chain) { 422 hammer2_chain_ref(chain); 423 hammer2_spin_unsh(&ip->cluster_spin); 424 hammer2_chain_lock(chain, how); 425 } else { 426 hammer2_spin_unsh(&ip->cluster_spin); 427 } 428 return chain; 429 } 430 431 hammer2_chain_t * 432 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex, 433 hammer2_chain_t **parentp, int how) 434 { 435 hammer2_chain_t *chain; 436 hammer2_chain_t *parent; 437 438 for (;;) { 439 hammer2_spin_sh(&ip->cluster_spin); 440 if (clindex >= ip->cluster.nchains) 441 chain = NULL; 442 else 443 chain = ip->cluster.array[clindex].chain; 444 if (chain) { 445 hammer2_chain_ref(chain); 446 hammer2_spin_unsh(&ip->cluster_spin); 447 hammer2_chain_lock(chain, how); 448 } else { 449 hammer2_spin_unsh(&ip->cluster_spin); 450 } 451 452 /* 453 * Get parent, lock order must be (parent, chain). 454 */ 455 parent = chain->parent; 456 if (parent) { 457 hammer2_chain_ref(parent); 458 hammer2_chain_unlock(chain); 459 hammer2_chain_lock(parent, how); 460 hammer2_chain_lock(chain, how); 461 } 462 if (ip->cluster.array[clindex].chain == chain && 463 chain->parent == parent) { 464 break; 465 } 466 467 /* 468 * Retry 469 */ 470 hammer2_chain_unlock(chain); 471 hammer2_chain_drop(chain); 472 if (parent) { 473 hammer2_chain_unlock(parent); 474 hammer2_chain_drop(parent); 475 } 476 } 477 *parentp = parent; 478 479 return chain; 480 } 481 482 /* 483 * Temporarily release a lock held shared or exclusive. Caller must 484 * hold the lock shared or exclusive on call and lock will be released 485 * on return. 486 * 487 * Restore a lock that was temporarily released. 488 */ 489 hammer2_mtx_state_t 490 hammer2_inode_lock_temp_release(hammer2_inode_t *ip) 491 { 492 return hammer2_mtx_temp_release(&ip->lock); 493 } 494 495 void 496 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate) 497 { 498 hammer2_mtx_temp_restore(&ip->lock, ostate); 499 } 500 501 /* 502 * Upgrade a shared inode lock to exclusive and return. If the inode lock 503 * is already held exclusively this is a NOP. 504 * 505 * The caller MUST hold the inode lock either shared or exclusive on call 506 * and will own the lock exclusively on return. 507 * 508 * Returns non-zero if the lock was already exclusive prior to the upgrade. 509 */ 510 int 511 hammer2_inode_lock_upgrade(hammer2_inode_t *ip) 512 { 513 int wasexclusive; 514 515 /* XXX pretends it wasn't exclusive, but shouldn't matter */ 516 //if (mtx_islocked_ex(&ip->lock)) { 517 if (0) { 518 wasexclusive = 1; 519 } else { 520 hammer2_mtx_unlock(&ip->lock); 521 hammer2_mtx_ex(&ip->lock); 522 wasexclusive = 0; 523 } 524 return wasexclusive; 525 } 526 527 /* 528 * Downgrade an inode lock from exclusive to shared only if the inode 529 * lock was previously shared. If the inode lock was previously exclusive, 530 * this is a NOP. 531 */ 532 void 533 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive) 534 { 535 if (wasexclusive == 0) 536 hammer2_mtx_downgrade(&ip->lock); 537 } 538 539 /* 540 * Lookup an inode by inode number 541 */ 542 hammer2_inode_t * 543 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum) 544 { 545 hammer2_inode_t *ip; 546 547 KKASSERT(pmp); 548 if (pmp->spmp_hmp) { 549 ip = NULL; 550 } else { 551 hammer2_spin_ex(&pmp->inum_spin); 552 ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum); 553 if (ip) 554 hammer2_inode_ref(ip); 555 hammer2_spin_unex(&pmp->inum_spin); 556 } 557 return(ip); 558 } 559 560 /* 561 * Adding a ref to an inode is only legal if the inode already has at least 562 * one ref. 563 * 564 * (can be called with spinlock held) 565 */ 566 void 567 hammer2_inode_ref(hammer2_inode_t *ip) 568 { 569 atomic_add_int(&ip->refs, 1); 570 if (hammer2_debug & 0x80000) { 571 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs); 572 print_backtrace(8); 573 } 574 } 575 576 /* 577 * Drop an inode reference, freeing the inode when the last reference goes 578 * away. 579 */ 580 void 581 hammer2_inode_drop(hammer2_inode_t *ip) 582 { 583 hammer2_pfs_t *pmp; 584 u_int refs; 585 586 while (ip) { 587 if (hammer2_debug & 0x80000) { 588 kprintf("INODE-1 %p (%d->%d)\n", 589 ip, ip->refs, ip->refs - 1); 590 print_backtrace(8); 591 } 592 refs = ip->refs; 593 cpu_ccfence(); 594 if (refs == 1) { 595 /* 596 * Transition to zero, must interlock with 597 * the inode inumber lookup tree (if applicable). 598 * It should not be possible for anyone to race 599 * the transition to 0. 600 */ 601 pmp = ip->pmp; 602 KKASSERT(pmp); 603 hammer2_spin_ex(&pmp->inum_spin); 604 605 if (atomic_cmpset_int(&ip->refs, 1, 0)) { 606 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0); 607 if (ip->flags & HAMMER2_INODE_ONRBTREE) { 608 atomic_clear_int(&ip->flags, 609 HAMMER2_INODE_ONRBTREE); 610 RB_REMOVE(hammer2_inode_tree, 611 &pmp->inum_tree, ip); 612 --pmp->inum_count; 613 } 614 hammer2_spin_unex(&pmp->inum_spin); 615 616 ip->pmp = NULL; 617 618 /* 619 * Cleaning out ip->cluster isn't entirely 620 * trivial. 621 */ 622 hammer2_inode_repoint(ip, NULL); 623 /* 624 * Add inode to reclaim queue. 625 */ 626 TAILQ_INSERT_TAIL(&pmp->recq, ip, recq_entry); 627 ip = NULL; /* will terminate loop */ 628 } else { 629 hammer2_spin_unex(&ip->pmp->inum_spin); 630 } 631 } else { 632 /* 633 * Non zero transition 634 */ 635 if (atomic_cmpset_int(&ip->refs, refs, refs - 1)) 636 break; 637 } 638 } 639 } 640 641 /* 642 * Get the vnode associated with the given inode, allocating the vnode if 643 * necessary. The vnode will be returned exclusively locked. 644 * 645 * *errorp is set to a UNIX error, not a HAMMER2 error. 646 * 647 * The caller must lock the inode (shared or exclusive). 648 * 649 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim 650 * races. 651 */ 652 struct m_vnode * 653 hammer2_igetv(hammer2_inode_t *ip, int *errorp) 654 { 655 hammer2_pfs_t *pmp; 656 struct m_vnode *vp; 657 658 pmp = ip->pmp; 659 KKASSERT(pmp != NULL); 660 *errorp = 0; 661 662 for (;;) { 663 /* 664 * Attempt to reuse an existing vnode assignment. It is 665 * possible to race a reclaim so the vget() may fail. The 666 * inode must be unlocked during the vget() to avoid a 667 * deadlock against a reclaim. 668 */ 669 int wasexclusive; 670 671 vp = ip->vp; 672 if (vp) { 673 /* 674 * Inode must be unlocked during the vget() to avoid 675 * possible deadlocks, but leave the ip ref intact. 676 * 677 * vnode is held to prevent destruction during the 678 * vget(). The vget() can still fail if we lost 679 * a reclaim race on the vnode. 680 */ 681 hammer2_mtx_state_t ostate; 682 683 vhold(vp); 684 ostate = hammer2_inode_lock_temp_release(ip); 685 if (vget(vp, LK_EXCLUSIVE)) { 686 vdrop(vp); 687 hammer2_inode_lock_temp_restore(ip, ostate); 688 continue; 689 } 690 hammer2_inode_lock_temp_restore(ip, ostate); 691 vdrop(vp); 692 /* vp still locked and ref from vget */ 693 if (ip->vp != vp) { 694 kprintf("hammer2: igetv race %p/%p\n", 695 ip->vp, vp); 696 vput(vp); 697 continue; 698 } 699 *errorp = 0; 700 break; 701 } 702 703 /* 704 * No vnode exists, allocate a new vnode. Beware of 705 * allocation races. This function will return an 706 * exclusively locked and referenced vnode. 707 */ 708 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0); 709 if (*errorp) { 710 kprintf("hammer2: igetv getnewvnode failed %d\n", 711 *errorp); 712 vp = NULL; 713 break; 714 } 715 716 /* 717 * Lock the inode and check for an allocation race. 718 */ 719 wasexclusive = hammer2_inode_lock_upgrade(ip); 720 if (ip->vp != NULL) { 721 vp->v_type = VBAD; 722 vx_put(vp); 723 hammer2_inode_lock_downgrade(ip, wasexclusive); 724 continue; 725 } 726 727 switch (ip->meta.type) { 728 case HAMMER2_OBJTYPE_DIRECTORY: 729 vp->v_type = VDIR; 730 break; 731 case HAMMER2_OBJTYPE_REGFILE: 732 /* 733 * Regular file must use buffer cache I/O 734 * (VKVABIO cpu sync semantics supported) 735 */ 736 vp->v_type = VREG; 737 vsetflags(vp, VKVABIO); 738 vinitvmio(vp, ip->meta.size, 739 HAMMER2_LBUFSIZE, 740 (int)ip->meta.size & HAMMER2_LBUFMASK); 741 break; 742 case HAMMER2_OBJTYPE_SOFTLINK: 743 /* 744 * XXX for now we are using the generic file_read 745 * and file_write code so we need a buffer cache 746 * association. 747 * 748 * (VKVABIO cpu sync semantics supported) 749 */ 750 vp->v_type = VLNK; 751 vsetflags(vp, VKVABIO); 752 vinitvmio(vp, ip->meta.size, 753 HAMMER2_LBUFSIZE, 754 (int)ip->meta.size & HAMMER2_LBUFMASK); 755 break; 756 case HAMMER2_OBJTYPE_CDEV: 757 vp->v_type = VCHR; 758 /* fall through */ 759 case HAMMER2_OBJTYPE_BDEV: 760 //vp->v_ops = &pmp->mp->mnt_vn_spec_ops; 761 if (ip->meta.type != HAMMER2_OBJTYPE_CDEV) 762 vp->v_type = VBLK; 763 addaliasu(vp, 764 ip->meta.rmajor, 765 ip->meta.rminor); 766 break; 767 case HAMMER2_OBJTYPE_FIFO: 768 vp->v_type = VFIFO; 769 //vp->v_ops = &pmp->mp->mnt_vn_fifo_ops; 770 break; 771 case HAMMER2_OBJTYPE_SOCKET: 772 vp->v_type = VSOCK; 773 break; 774 default: 775 panic("hammer2: unhandled objtype %d", 776 ip->meta.type); 777 break; 778 } 779 780 if (ip == pmp->iroot) 781 vsetflags(vp, VROOT); 782 783 vp->v_data = ip; 784 ip->vp = vp; 785 hammer2_inode_ref(ip); /* vp association */ 786 hammer2_inode_lock_downgrade(ip, wasexclusive); 787 vx_downgrade(vp); 788 break; 789 } 790 791 /* 792 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0. 793 */ 794 if (hammer2_debug & 0x0002) { 795 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n", 796 vp, -1, -1); 797 } 798 return (vp); 799 } 800 801 /* 802 * XXX this API needs a rewrite. It needs to be split into a 803 * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get 804 * rid of the inode/chain lock reversal fudge. 805 * 806 * Returns the inode associated with the passed-in cluster, allocating a new 807 * hammer2_inode structure if necessary, then synchronizing it to the passed 808 * xop cluster. When synchronizing, if idx >= 0, only cluster index (idx) 809 * is synchronized. Otherwise the whole cluster is synchronized. inum will 810 * be extracted from the passed-in xop and the inum argument will be ignored. 811 * 812 * If xop is passed as NULL then a new hammer2_inode is allocated with the 813 * specified inum, and returned. For normal inodes, the inode will be 814 * indexed in memory and if it already exists the existing ip will be 815 * returned instead of allocating a new one. The superroot and PFS inodes 816 * are not indexed in memory. 817 * 818 * The passed-in cluster must be locked and will remain locked on return. 819 * The returned inode will be locked and the caller may dispose of both 820 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller 821 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode. 822 * 823 * The hammer2_inode structure regulates the interface between the high level 824 * kernel VNOPS API and the filesystem backend (the chains). 825 * 826 * On return the inode is locked with the supplied cluster. 827 */ 828 hammer2_inode_t * 829 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop, 830 hammer2_tid_t inum, int idx) 831 { 832 hammer2_inode_t *nip; 833 const hammer2_inode_data_t *iptmp; 834 const hammer2_inode_data_t *nipdata; 835 836 KKASSERT(xop == NULL || 837 hammer2_cluster_type(&xop->cluster) == 838 HAMMER2_BREF_TYPE_INODE); 839 KKASSERT(pmp); 840 841 /* 842 * Interlocked lookup/ref of the inode. This code is only needed 843 * when looking up inodes with nlinks != 0 (TODO: optimize out 844 * otherwise and test for duplicates). 845 * 846 * Cluster can be NULL during the initial pfs allocation. 847 */ 848 if (xop) { 849 iptmp = &hammer2_xop_gdata(xop)->ipdata; 850 inum = iptmp->meta.inum; 851 hammer2_xop_pdata(xop); 852 } 853 again: 854 nip = hammer2_inode_lookup(pmp, inum); 855 if (nip) { 856 /* 857 * We may have to unhold the cluster to avoid a deadlock 858 * against vnlru (and possibly other XOPs). 859 */ 860 if (xop) { 861 if (hammer2_mtx_ex_try(&nip->lock) != 0) { 862 hammer2_cluster_unhold(&xop->cluster); 863 hammer2_mtx_ex(&nip->lock); 864 hammer2_cluster_rehold(&xop->cluster); 865 } 866 } else { 867 hammer2_mtx_ex(&nip->lock); 868 } 869 870 /* 871 * Handle SMP race (not applicable to the super-root spmp 872 * which can't index inodes due to duplicative inode numbers). 873 */ 874 if (pmp->spmp_hmp == NULL && 875 (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) { 876 hammer2_mtx_unlock(&nip->lock); 877 hammer2_inode_drop(nip); 878 goto again; 879 } 880 if (xop) { 881 if (idx >= 0) 882 hammer2_inode_repoint_one(nip, &xop->cluster, 883 idx); 884 else 885 hammer2_inode_repoint(nip, &xop->cluster); 886 } 887 return nip; 888 } 889 890 /* 891 * We couldn't find the inode number, create a new inode and try to 892 * insert it, handle insertion races. 893 */ 894 nip = kmalloc_obj(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO); 895 hammer2_spin_init(&nip->cluster_spin, "h2clspin"); 896 atomic_add_long(&pmp->inmem_inodes, 1); 897 898 /* 899 * Initialize nip's cluster. A cluster is provided for normal 900 * inodes but typically not for the super-root or PFS inodes. 901 */ 902 { 903 hammer2_inode_t *nnip = nip; 904 nip->ihash = (int)hammer2_icrc32(&nnip, sizeof(nnip)); 905 } 906 907 nip->cluster.refs = 1; 908 nip->cluster.pmp = pmp; 909 nip->cluster.flags |= HAMMER2_CLUSTER_INODE; 910 if (xop) { 911 nipdata = &hammer2_xop_gdata(xop)->ipdata; 912 nip->meta = nipdata->meta; 913 hammer2_xop_pdata(xop); 914 hammer2_inode_repoint(nip, &xop->cluster); 915 } else { 916 nip->meta.inum = inum; /* PFS inum is always 1 XXX */ 917 /* mtime will be updated when a cluster is available */ 918 } 919 920 nip->pmp = pmp; 921 922 /* 923 * ref and lock on nip gives it state compatible to after a 924 * hammer2_inode_lock() call. 925 */ 926 nip->refs = 1; 927 hammer2_mtx_init(&nip->lock, "h2inode"); 928 hammer2_mtx_init(&nip->truncate_lock, "h2trunc"); 929 hammer2_mtx_ex(&nip->lock); 930 TAILQ_INIT(&nip->depend_static.sideq); 931 /* combination of thread lock and chain lock == inode lock */ 932 933 /* 934 * Attempt to add the inode. If it fails we raced another inode 935 * get. Undo all the work and try again. 936 */ 937 if (pmp->spmp_hmp == NULL) { 938 hammer2_spin_ex(&pmp->inum_spin); 939 if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) { 940 hammer2_spin_unex(&pmp->inum_spin); 941 hammer2_mtx_unlock(&nip->lock); 942 hammer2_inode_drop(nip); 943 goto again; 944 } 945 atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE); 946 ++pmp->inum_count; 947 hammer2_spin_unex(&pmp->inum_spin); 948 } 949 return (nip); 950 } 951 952 /* 953 * Create a PFS inode under the superroot. This function will create the 954 * inode, its media chains, and also insert it into the media. 955 * 956 * Caller must be in a flush transaction because we are inserting the inode 957 * onto the media. 958 */ 959 hammer2_inode_t * 960 hammer2_inode_create_pfs(hammer2_pfs_t *spmp, 961 const char *name, size_t name_len, 962 int *errorp) 963 { 964 hammer2_xop_create_t *xop; 965 hammer2_inode_t *pip; 966 hammer2_inode_t *nip; 967 int error; 968 uint8_t pip_comp_algo; 969 uint8_t pip_check_algo; 970 hammer2_tid_t pip_inum; 971 hammer2_key_t lhc; 972 973 pip = spmp->iroot; 974 nip = NULL; 975 976 lhc = hammer2_dirhash(name, name_len); 977 *errorp = 0; 978 979 /* 980 * Locate the inode or indirect block to create the new 981 * entry in. At the same time check for key collisions 982 * and iterate until we don't get one. 983 * 984 * Lock the directory exclusively for now to guarantee that 985 * we can find an unused lhc for the name. Due to collisions, 986 * two different creates can end up with the same lhc so we 987 * cannot depend on the OS to prevent the collision. 988 */ 989 hammer2_inode_lock(pip, 0); 990 991 pip_comp_algo = pip->meta.comp_algo; 992 pip_check_algo = pip->meta.check_algo; 993 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum; 994 995 /* 996 * Locate an unused key in the collision space. 997 */ 998 { 999 hammer2_xop_scanlhc_t *sxop; 1000 hammer2_key_t lhcbase; 1001 1002 lhcbase = lhc; 1003 sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1004 sxop->lhc = lhc; 1005 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 1006 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1007 if (lhc != sxop->head.cluster.focus->bref.key) 1008 break; 1009 ++lhc; 1010 } 1011 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1012 1013 if (error) { 1014 if (error != HAMMER2_ERROR_ENOENT) 1015 goto done2; 1016 ++lhc; 1017 error = 0; 1018 } 1019 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { 1020 error = HAMMER2_ERROR_ENOSPC; 1021 goto done2; 1022 } 1023 } 1024 1025 /* 1026 * Create the inode with the lhc as the key. 1027 */ 1028 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1029 xop->lhc = lhc; 1030 xop->flags = HAMMER2_INSERT_PFSROOT; 1031 bzero(&xop->meta, sizeof(xop->meta)); 1032 1033 xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY; 1034 xop->meta.inum = 1; 1035 xop->meta.iparent = pip_inum; 1036 1037 /* Inherit parent's inode compression mode. */ 1038 xop->meta.comp_algo = pip_comp_algo; 1039 xop->meta.check_algo = pip_check_algo; 1040 xop->meta.version = HAMMER2_INODE_VERSION_ONE; 1041 hammer2_update_time(&xop->meta.ctime, false); 1042 xop->meta.mtime = xop->meta.ctime; 1043 xop->meta.mode = 0755; 1044 xop->meta.nlinks = 1; 1045 1046 /* 1047 * Regular files and softlinks allow a small amount of data to be 1048 * directly embedded in the inode. This flag will be cleared if 1049 * the size is extended past the embedded limit. 1050 */ 1051 if (xop->meta.type == HAMMER2_OBJTYPE_REGFILE || 1052 xop->meta.type == HAMMER2_OBJTYPE_SOFTLINK) { 1053 xop->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA; 1054 } 1055 hammer2_xop_setname(&xop->head, name, name_len); 1056 xop->meta.name_len = name_len; 1057 xop->meta.name_key = lhc; 1058 KKASSERT(name_len < HAMMER2_INODE_MAXNAME); 1059 1060 hammer2_xop_start(&xop->head, &hammer2_inode_create_desc); 1061 1062 error = hammer2_xop_collect(&xop->head, 0); 1063 #if INODE_DEBUG 1064 kprintf("CREATE INODE %*.*s\n", 1065 (int)name_len, (int)name_len, name); 1066 #endif 1067 1068 if (error) { 1069 *errorp = error; 1070 goto done; 1071 } 1072 1073 /* 1074 * Set up the new inode if not a hardlink pointer. 1075 * 1076 * NOTE: *_get() integrates chain's lock into the inode lock. 1077 * 1078 * NOTE: Only one new inode can currently be created per 1079 * transaction. If the need arises we can adjust 1080 * hammer2_trans_init() to allow more. 1081 * 1082 * NOTE: nipdata will have chain's blockset data. 1083 */ 1084 nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1); 1085 nip->comp_heuristic = 0; 1086 done: 1087 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1088 done2: 1089 hammer2_inode_unlock(pip); 1090 1091 return (nip); 1092 } 1093 1094 /* 1095 * Create a new, normal inode. This function will create the inode, 1096 * the media chains, but will not insert the chains onto the media topology 1097 * (doing so would require a flush transaction and cause long stalls). 1098 * 1099 * Caller must be in a normal transaction. 1100 */ 1101 hammer2_inode_t * 1102 hammer2_inode_create_normal(hammer2_inode_t *pip, 1103 struct vattr *vap, struct ucred *cred, 1104 hammer2_key_t inum, int *errorp) 1105 { 1106 hammer2_xop_create_t *xop; 1107 hammer2_inode_t *dip; 1108 hammer2_inode_t *nip; 1109 int error; 1110 uid_t xuid; 1111 uuid_t pip_uid; 1112 uuid_t pip_gid; 1113 uint32_t pip_mode; 1114 uint8_t pip_comp_algo; 1115 uint8_t pip_check_algo; 1116 hammer2_tid_t pip_inum; 1117 1118 dip = pip->pmp->iroot; 1119 KKASSERT(dip != NULL); 1120 1121 *errorp = 0; 1122 1123 /*hammer2_inode_lock(dip, 0);*/ 1124 1125 pip_uid = pip->meta.uid; 1126 pip_gid = pip->meta.gid; 1127 pip_mode = pip->meta.mode; 1128 pip_comp_algo = pip->meta.comp_algo; 1129 pip_check_algo = pip->meta.check_algo; 1130 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum; 1131 1132 /* 1133 * Create the in-memory hammer2_inode structure for the specified 1134 * inode. 1135 */ 1136 nip = hammer2_inode_get(dip->pmp, NULL, inum, -1); 1137 nip->comp_heuristic = 0; 1138 KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 && 1139 nip->cluster.nchains == 0); 1140 atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING); 1141 1142 /* 1143 * Setup the inode meta-data 1144 */ 1145 nip->meta.type = hammer2_get_obj_type(vap->va_type); 1146 1147 switch (nip->meta.type) { 1148 case HAMMER2_OBJTYPE_CDEV: 1149 case HAMMER2_OBJTYPE_BDEV: 1150 assert(0); /* XXX unsupported */ 1151 nip->meta.rmajor = vap->va_rmajor; 1152 nip->meta.rminor = vap->va_rminor; 1153 break; 1154 default: 1155 break; 1156 } 1157 1158 KKASSERT(nip->meta.inum == inum); 1159 nip->meta.iparent = pip_inum; 1160 1161 /* Inherit parent's inode compression mode. */ 1162 nip->meta.comp_algo = pip_comp_algo; 1163 nip->meta.check_algo = pip_check_algo; 1164 nip->meta.version = HAMMER2_INODE_VERSION_ONE; 1165 hammer2_update_time(&nip->meta.ctime, false); 1166 nip->meta.mtime = nip->meta.ctime; 1167 nip->meta.mode = vap->va_mode; 1168 nip->meta.nlinks = 1; 1169 1170 xuid = hammer2_to_unix_xid(&pip_uid); 1171 xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode, 1172 xuid, cred, 1173 &vap->va_mode); 1174 if (vap->va_vaflags & VA_UID_UUID_VALID) 1175 nip->meta.uid = vap->va_uid_uuid; 1176 else if (vap->va_uid != (uid_t)VNOVAL) 1177 hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid); 1178 else 1179 hammer2_guid_to_uuid(&nip->meta.uid, xuid); 1180 1181 if (vap->va_vaflags & VA_GID_UUID_VALID) 1182 nip->meta.gid = vap->va_gid_uuid; 1183 else if (vap->va_gid != (gid_t)VNOVAL) 1184 hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid); 1185 else 1186 nip->meta.gid = pip_gid; 1187 1188 /* 1189 * Regular files and softlinks allow a small amount of data to be 1190 * directly embedded in the inode. This flag will be cleared if 1191 * the size is extended past the embedded limit. 1192 */ 1193 if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE || 1194 nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) { 1195 nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA; 1196 } 1197 1198 /* 1199 * Create the inode using (inum) as the key. Pass pip for 1200 * method inheritance. 1201 */ 1202 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING); 1203 xop->lhc = inum; 1204 xop->flags = 0; 1205 xop->meta = nip->meta; 1206 KKASSERT(vap); 1207 1208 xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum); 1209 xop->meta.name_key = inum; 1210 nip->meta.name_len = xop->meta.name_len; 1211 nip->meta.name_key = xop->meta.name_key; 1212 hammer2_inode_modify(nip); 1213 1214 /* 1215 * Create the inode media chains but leave them detached. We are 1216 * not in a flush transaction so we can't mess with media topology 1217 * above normal inodes (i.e. the index of the inodes themselves). 1218 * 1219 * We've already set the INODE_CREATING flag. The inode's media 1220 * chains will be inserted onto the media topology on the next 1221 * filesystem sync. 1222 */ 1223 hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc); 1224 1225 error = hammer2_xop_collect(&xop->head, 0); 1226 #if INODE_DEBUG 1227 kprintf("create inode type %d error %d\n", nip->meta.type, error); 1228 #endif 1229 1230 if (error) { 1231 *errorp = error; 1232 goto done; 1233 } 1234 1235 /* 1236 * Associate the media chains created by the backend with the 1237 * frontend inode. 1238 */ 1239 hammer2_inode_repoint(nip, &xop->head.cluster); 1240 done: 1241 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1242 /*hammer2_inode_unlock(dip);*/ 1243 1244 return (nip); 1245 } 1246 1247 /* 1248 * Create a directory entry under dip with the specified name, inode number, 1249 * and OBJTYPE (type). 1250 * 1251 * This returns a UNIX errno code, not a HAMMER2_ERROR_* code. 1252 * 1253 * Caller must hold dip locked. 1254 */ 1255 int 1256 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len, 1257 hammer2_key_t inum, uint8_t type) 1258 { 1259 hammer2_xop_mkdirent_t *xop; 1260 hammer2_key_t lhc; 1261 int error; 1262 1263 lhc = 0; 1264 error = 0; 1265 1266 KKASSERT(name != NULL); 1267 lhc = hammer2_dirhash(name, name_len); 1268 1269 /* 1270 * Locate the inode or indirect block to create the new 1271 * entry in. At the same time check for key collisions 1272 * and iterate until we don't get one. 1273 * 1274 * Lock the directory exclusively for now to guarantee that 1275 * we can find an unused lhc for the name. Due to collisions, 1276 * two different creates can end up with the same lhc so we 1277 * cannot depend on the OS to prevent the collision. 1278 */ 1279 hammer2_inode_modify(dip); 1280 1281 /* 1282 * If name specified, locate an unused key in the collision space. 1283 * Otherwise use the passed-in lhc directly. 1284 */ 1285 { 1286 hammer2_xop_scanlhc_t *sxop; 1287 hammer2_key_t lhcbase; 1288 1289 lhcbase = lhc; 1290 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1291 sxop->lhc = lhc; 1292 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 1293 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1294 if (lhc != sxop->head.cluster.focus->bref.key) 1295 break; 1296 ++lhc; 1297 } 1298 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1299 1300 if (error) { 1301 if (error != HAMMER2_ERROR_ENOENT) 1302 goto done2; 1303 ++lhc; 1304 error = 0; 1305 } 1306 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) { 1307 error = HAMMER2_ERROR_ENOSPC; 1308 goto done2; 1309 } 1310 } 1311 1312 /* 1313 * Create the directory entry with the lhc as the key. 1314 */ 1315 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1316 xop->lhc = lhc; 1317 bzero(&xop->dirent, sizeof(xop->dirent)); 1318 xop->dirent.inum = inum; 1319 xop->dirent.type = type; 1320 xop->dirent.namlen = name_len; 1321 1322 KKASSERT(name_len < HAMMER2_INODE_MAXNAME); 1323 hammer2_xop_setname(&xop->head, name, name_len); 1324 1325 hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc); 1326 1327 error = hammer2_xop_collect(&xop->head, 0); 1328 1329 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1330 done2: 1331 error = hammer2_error_to_errno(error); 1332 1333 return error; 1334 } 1335 1336 /* 1337 * Repoint ip->cluster's chains to cluster's chains and fixup the default 1338 * focus. All items, valid or invalid, are repointed. hammer2_xop_start() 1339 * filters out invalid or non-matching elements. 1340 * 1341 * Caller must hold the inode and cluster exclusive locked, if not NULL, 1342 * must also be locked. 1343 * 1344 * Cluster may be NULL to clean out any chains in ip->cluster. 1345 */ 1346 void 1347 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_cluster_t *cluster) 1348 { 1349 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER]; 1350 hammer2_chain_t *ochain; 1351 hammer2_chain_t *nchain; 1352 int i; 1353 1354 bzero(dropch, sizeof(dropch)); 1355 1356 /* 1357 * Replace chains in ip->cluster with chains from cluster and 1358 * adjust the focus if necessary. 1359 * 1360 * NOTE: nchain and/or ochain can be NULL due to gaps 1361 * in the cluster arrays. 1362 */ 1363 hammer2_spin_ex(&ip->cluster_spin); 1364 for (i = 0; cluster && i < cluster->nchains; ++i) { 1365 /* 1366 * Do not replace elements which are the same. Also handle 1367 * element count discrepancies. 1368 */ 1369 nchain = cluster->array[i].chain; 1370 if (i < ip->cluster.nchains) { 1371 ochain = ip->cluster.array[i].chain; 1372 if (ochain == nchain) 1373 continue; 1374 } else { 1375 ochain = NULL; 1376 } 1377 1378 /* 1379 * Make adjustments 1380 */ 1381 ip->cluster.array[i].chain = nchain; 1382 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID; 1383 ip->cluster.array[i].flags |= cluster->array[i].flags & 1384 HAMMER2_CITEM_INVALID; 1385 if (nchain) 1386 hammer2_chain_ref(nchain); 1387 dropch[i] = ochain; 1388 } 1389 1390 /* 1391 * Release any left-over chains in ip->cluster. 1392 */ 1393 while (i < ip->cluster.nchains) { 1394 nchain = ip->cluster.array[i].chain; 1395 if (nchain) { 1396 ip->cluster.array[i].chain = NULL; 1397 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID; 1398 } 1399 dropch[i] = nchain; 1400 ++i; 1401 } 1402 1403 /* 1404 * Fixup fields. Note that the inode-embedded cluster is never 1405 * directly locked. 1406 */ 1407 if (cluster) { 1408 ip->cluster.nchains = cluster->nchains; 1409 ip->cluster.focus = cluster->focus; 1410 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED; 1411 } else { 1412 ip->cluster.nchains = 0; 1413 ip->cluster.focus = NULL; 1414 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS; 1415 } 1416 1417 hammer2_spin_unex(&ip->cluster_spin); 1418 1419 /* 1420 * Cleanup outside of spinlock 1421 */ 1422 while (--i >= 0) { 1423 if (dropch[i]) 1424 hammer2_chain_drop(dropch[i]); 1425 } 1426 } 1427 1428 /* 1429 * Repoint a single element from the cluster to the ip. Used by the 1430 * synchronization threads to piecemeal update inodes. Does not change 1431 * focus and requires inode to be re-locked to clean-up flags (XXX). 1432 */ 1433 void 1434 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster, 1435 int idx) 1436 { 1437 hammer2_chain_t *ochain; 1438 hammer2_chain_t *nchain; 1439 int i; 1440 1441 hammer2_spin_ex(&ip->cluster_spin); 1442 KKASSERT(idx < cluster->nchains); 1443 if (idx < ip->cluster.nchains) { 1444 ochain = ip->cluster.array[idx].chain; 1445 nchain = cluster->array[idx].chain; 1446 } else { 1447 ochain = NULL; 1448 nchain = cluster->array[idx].chain; 1449 for (i = ip->cluster.nchains; i <= idx; ++i) { 1450 bzero(&ip->cluster.array[i], 1451 sizeof(ip->cluster.array[i])); 1452 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID; 1453 } 1454 ip->cluster.nchains = idx + 1; 1455 } 1456 if (ochain != nchain) { 1457 /* 1458 * Make adjustments. 1459 */ 1460 ip->cluster.array[idx].chain = nchain; 1461 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID; 1462 ip->cluster.array[idx].flags |= cluster->array[idx].flags & 1463 HAMMER2_CITEM_INVALID; 1464 } 1465 hammer2_spin_unex(&ip->cluster_spin); 1466 if (ochain != nchain) { 1467 if (nchain) 1468 hammer2_chain_ref(nchain); 1469 if (ochain) 1470 hammer2_chain_drop(ochain); 1471 } 1472 } 1473 1474 hammer2_key_t 1475 hammer2_inode_data_count(const hammer2_inode_t *ip) 1476 { 1477 hammer2_chain_t *chain; 1478 hammer2_key_t count = 0; 1479 int i; 1480 1481 for (i = 0; i < ip->cluster.nchains; ++i) { 1482 if ((chain = ip->cluster.array[i].chain) != NULL) { 1483 if (count < chain->bref.embed.stats.data_count) 1484 count = chain->bref.embed.stats.data_count; 1485 } 1486 } 1487 return count; 1488 } 1489 1490 hammer2_key_t 1491 hammer2_inode_inode_count(const hammer2_inode_t *ip) 1492 { 1493 hammer2_chain_t *chain; 1494 hammer2_key_t count = 0; 1495 int i; 1496 1497 for (i = 0; i < ip->cluster.nchains; ++i) { 1498 if ((chain = ip->cluster.array[i].chain) != NULL) { 1499 if (count < chain->bref.embed.stats.inode_count) 1500 count = chain->bref.embed.stats.inode_count; 1501 } 1502 } 1503 return count; 1504 } 1505 1506 /* 1507 * Called with a locked inode to finish unlinking an inode after xop_unlink 1508 * had been run. This function is responsible for decrementing nlinks. 1509 * 1510 * We don't bother decrementing nlinks if the file is not open and this was 1511 * the last link. 1512 * 1513 * If the inode is a hardlink target it's chain has not yet been deleted, 1514 * otherwise it's chain has been deleted. 1515 * 1516 * If isopen then any prior deletion was not permanent and the inode is 1517 * left intact with nlinks == 0; 1518 */ 1519 int 1520 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, struct m_vnode **vprecyclep) 1521 { 1522 struct m_vnode *vp; 1523 1524 /* 1525 * Decrement nlinks. Catch a bad nlinks count here too (e.g. 0 or 1526 * negative), and just assume a transition to 0. 1527 */ 1528 if ((int64_t)ip->meta.nlinks <= 1) { 1529 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED); 1530 1531 /* 1532 * Scrap the vnode as quickly as possible. The vp association 1533 * stays intact while we hold the inode locked. However, vp 1534 * can be NULL here. 1535 */ 1536 vp = ip->vp; 1537 cpu_ccfence(); 1538 1539 /* 1540 * If no vp is associated there is no high-level state to 1541 * deal with and we can scrap the inode immediately. 1542 */ 1543 if (vp == NULL) { 1544 if ((ip->flags & HAMMER2_INODE_DELETING) == 0) { 1545 atomic_set_int(&ip->flags, 1546 HAMMER2_INODE_DELETING); 1547 hammer2_inode_delayed_sideq(ip); 1548 } 1549 return 0; 1550 } 1551 1552 /* 1553 * Because INODE_ISUNLINKED is set with the inode lock 1554 * held, the vnode cannot be ripped up from under us. 1555 * There may still be refs so knote anyone waiting for 1556 * a delete notification. 1557 * 1558 * The vnode is not necessarily ref'd due to the unlinking 1559 * itself, so we have to defer handling to the end of the 1560 * VOP, which will then call hammer2_inode_vprecycle(). 1561 */ 1562 if (vprecyclep) { 1563 vhold(vp); 1564 *vprecyclep = vp; 1565 } 1566 } 1567 1568 /* 1569 * Adjust nlinks and retain the inode on the media for now 1570 */ 1571 hammer2_inode_modify(ip); 1572 if ((int64_t)ip->meta.nlinks > 1) 1573 --ip->meta.nlinks; 1574 else 1575 ip->meta.nlinks = 0; 1576 1577 return 0; 1578 } 1579 1580 /* 1581 * Called at the end of a VOP that removes a file with a vnode that 1582 * we want to try to dispose of quickly due to a file deletion. If 1583 * we don't do this, the vnode can hang around with 0 refs for a very 1584 * long time and prevent reclamation of the underlying file and inode 1585 * (inode remains on-media with nlinks == 0 until the vnode is recycled 1586 * due to random system activity or a umount). 1587 */ 1588 void 1589 hammer2_inode_vprecycle(struct m_vnode *vp) 1590 { 1591 if (vget(vp, LK_EXCLUSIVE) == 0) { 1592 vfinalize(vp); 1593 hammer2_knote(vp, NOTE_DELETE); 1594 vdrop(vp); 1595 vput(vp); 1596 } else { 1597 vdrop(vp); 1598 } 1599 } 1600 1601 1602 /* 1603 * Mark an inode as being modified, meaning that the caller will modify 1604 * ip->meta. 1605 * 1606 * If a vnode is present we set the vnode dirty and the nominal filesystem 1607 * sync will also handle synchronizing the inode meta-data. Unless NOSIDEQ 1608 * we must ensure that the inode is on pmp->sideq. 1609 * 1610 * NOTE: We must always queue the inode to the sideq. This allows H2 to 1611 * shortcut vsyncscan() and flush inodes and their related vnodes 1612 * in a two stages. H2 still calls vfsync() for each vnode. 1613 * 1614 * NOTE: No mtid (modify_tid) is passed into this routine. The caller is 1615 * only modifying the in-memory inode. A modify_tid is synchronized 1616 * later when the inode gets flushed. 1617 * 1618 * NOTE: As an exception to the general rule, the inode MAY be locked 1619 * shared for this particular call. 1620 */ 1621 void 1622 hammer2_inode_modify(hammer2_inode_t *ip) 1623 { 1624 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED); 1625 if (ip->vp) 1626 vsetisdirty(ip->vp); 1627 if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0) 1628 hammer2_inode_delayed_sideq(ip); 1629 } 1630 1631 /* 1632 * Synchronize the inode's frontend state with the chain state prior 1633 * to any explicit flush of the inode or any strategy write call. This 1634 * does not flush the inode's chain or its sub-topology to media (higher 1635 * level layers are responsible for doing that). 1636 * 1637 * Called with a locked inode inside a normal transaction. 1638 * 1639 * inode must be locked. 1640 */ 1641 int 1642 hammer2_inode_chain_sync(hammer2_inode_t *ip) 1643 { 1644 int error; 1645 1646 error = 0; 1647 if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) { 1648 hammer2_xop_fsync_t *xop; 1649 1650 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1651 xop->clear_directdata = 0; 1652 if (ip->flags & HAMMER2_INODE_RESIZED) { 1653 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) && 1654 ip->meta.size > HAMMER2_EMBEDDED_BYTES) { 1655 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA; 1656 xop->clear_directdata = 1; 1657 } 1658 xop->osize = ip->osize; 1659 } else { 1660 xop->osize = ip->meta.size; /* safety */ 1661 } 1662 xop->ipflags = ip->flags; 1663 xop->meta = ip->meta; 1664 1665 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED | 1666 HAMMER2_INODE_MODIFIED); 1667 hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc); 1668 error = hammer2_xop_collect(&xop->head, 0); 1669 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1670 if (error == HAMMER2_ERROR_ENOENT) 1671 error = 0; 1672 if (error) { 1673 kprintf("hammer2: unable to fsync inode %p\n", ip); 1674 /* 1675 atomic_set_int(&ip->flags, 1676 xop->ipflags & (HAMMER2_INODE_RESIZED | 1677 HAMMER2_INODE_MODIFIED)); 1678 */ 1679 /* XXX return error somehow? */ 1680 } 1681 } 1682 return error; 1683 } 1684 1685 /* 1686 * When an inode is flagged INODE_CREATING its chains have not actually 1687 * been inserting into the on-media tree yet. 1688 */ 1689 int 1690 hammer2_inode_chain_ins(hammer2_inode_t *ip) 1691 { 1692 int error; 1693 1694 error = 0; 1695 if (ip->flags & HAMMER2_INODE_CREATING) { 1696 hammer2_xop_create_t *xop; 1697 1698 atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING); 1699 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1700 xop->lhc = ip->meta.inum; 1701 xop->flags = 0; 1702 hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc); 1703 error = hammer2_xop_collect(&xop->head, 0); 1704 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1705 if (error == HAMMER2_ERROR_ENOENT) 1706 error = 0; 1707 if (error) { 1708 kprintf("hammer2: backend unable to " 1709 "insert inode %p %ld\n", ip, (long)ip->meta.inum); 1710 /* XXX return error somehow? */ 1711 } 1712 } 1713 return error; 1714 } 1715 1716 /* 1717 * When an inode is flagged INODE_DELETING it has been deleted (no directory 1718 * entry or open refs are left, though as an optimization H2 might leave 1719 * nlinks == 1 to avoid unnecessary block updates). The backend flush then 1720 * needs to actually remove it from the topology. 1721 * 1722 * NOTE: backend flush must still sync and flush the deleted inode to clean 1723 * out related chains. 1724 * 1725 * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED 1726 * to prevent the vnode reclaim code from trying to delete it twice. 1727 */ 1728 int 1729 hammer2_inode_chain_des(hammer2_inode_t *ip) 1730 { 1731 int error; 1732 1733 error = 0; 1734 if (ip->flags & HAMMER2_INODE_DELETING) { 1735 hammer2_xop_destroy_t *xop; 1736 1737 atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING | 1738 HAMMER2_INODE_ISUNLINKED); 1739 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 1740 hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc); 1741 error = hammer2_xop_collect(&xop->head, 0); 1742 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1743 1744 if (error == HAMMER2_ERROR_ENOENT) 1745 error = 0; 1746 if (error) { 1747 kprintf("hammer2: backend unable to " 1748 "delete inode %p %ld\n", ip, (long)ip->meta.inum); 1749 /* XXX return error somehow? */ 1750 } 1751 } 1752 return error; 1753 } 1754 1755 /* 1756 * Flushes the inode's chain and its sub-topology to media. Interlocks 1757 * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush. Any strategy 1758 * function creating or modifying a chain under this inode will re-set the 1759 * flag. 1760 * 1761 * inode must be locked. 1762 */ 1763 int 1764 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags) 1765 { 1766 hammer2_xop_fsync_t *xop; 1767 int error; 1768 1769 atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA); 1770 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags); 1771 hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc); 1772 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL); 1773 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1774 if (error == HAMMER2_ERROR_ENOENT) 1775 error = 0; 1776 1777 return error; 1778 } 1779 1780 hammer2_key_t 1781 hammer2_pfs_inode_count(hammer2_pfs_t *pmp) 1782 { 1783 struct hammer2_inode *ip; 1784 hammer2_key_t count = 0; 1785 1786 hammer2_spin_ex(&pmp->inum_spin); 1787 RB_FOREACH(ip, hammer2_inode_tree, &pmp->inum_tree) 1788 count++; 1789 hammer2_spin_unex(&pmp->inum_spin); 1790 1791 return count; 1792 } 1793 1794 int 1795 vflush(struct mount *mp, int rootrefs, int flags) 1796 { 1797 hammer2_pfs_t *pmp = MPTOPMP(mp); 1798 struct hammer2_inode *ip, *tmp; 1799 struct m_vnode *vp; 1800 hammer2_key_t count_before, count_after, count_recq; 1801 1802 printf("%s: total chain %ld\n", __func__, hammer2_chain_allocs); 1803 printf("%s: total dio %d\n", __func__, hammer2_dio_count); 1804 1805 hammer2_spin_ex(&pmp->inum_spin); 1806 count_before = 0; 1807 RB_FOREACH(ip, hammer2_inode_tree, &pmp->inum_tree) 1808 count_before++; 1809 1810 RB_FOREACH_SAFE(ip, hammer2_inode_tree, &pmp->inum_tree, tmp) { 1811 vp = ip->vp; 1812 assert(vp); 1813 if (!vp->v_vflushed) { 1814 /* 1815 * Not all inodes are modified and ref'd, 1816 * so ip->refs requirement here is the initial 1. 1817 */ 1818 assert(ip->refs > 0); 1819 hammer2_inode_drop(ip); 1820 vp->v_vflushed = 1; 1821 } 1822 } 1823 1824 count_after = 0; 1825 RB_FOREACH(ip, hammer2_inode_tree, &pmp->inum_tree) 1826 count_after++; 1827 hammer2_spin_unex(&pmp->inum_spin); 1828 1829 printf("%s: total inode %jd -> %jd\n", 1830 __func__, (intmax_t)count_before, (intmax_t)count_after); 1831 assert(count_before >= count_after); 1832 1833 count_recq = 0; 1834 TAILQ_FOREACH(ip, &pmp->recq, recq_entry) 1835 count_recq++; 1836 if (count_recq) 1837 printf("%s: %jd inode in reclaim queue\n", 1838 __func__, (intmax_t)count_recq); 1839 1840 return 0; 1841 } 1842