1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include "hammer.h" 36 #include <vm/vm_extern.h> 37 38 static int hammer_unload_inode(struct hammer_inode *ip); 39 static void hammer_free_inode(hammer_inode_t ip); 40 static void hammer_flush_inode_core(hammer_inode_t ip, 41 hammer_flush_group_t flg, int flags); 42 static int hammer_setup_child_callback(hammer_record_t rec, void *data); 43 #if 0 44 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data); 45 #endif 46 static int hammer_setup_parent_inodes(hammer_inode_t ip, int depth, 47 hammer_flush_group_t flg); 48 static int hammer_setup_parent_inodes_helper(hammer_record_t record, 49 int depth, hammer_flush_group_t flg); 50 static void hammer_inode_wakereclaims(hammer_inode_t ip); 51 static struct hammer_inostats *hammer_inode_inostats(hammer_mount_t hmp, 52 pid_t pid); 53 54 #ifdef DEBUG_TRUNCATE 55 extern struct hammer_inode *HammerTruncIp; 56 #endif 57 58 struct krate hammer_gen_krate = { 1 }; 59 60 /* 61 * RB-Tree support for inode structures 62 */ 63 int 64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2) 65 { 66 if (ip1->obj_localization < ip2->obj_localization) 67 return(-1); 68 if (ip1->obj_localization > ip2->obj_localization) 69 return(1); 70 if (ip1->obj_id < ip2->obj_id) 71 return(-1); 72 if (ip1->obj_id > ip2->obj_id) 73 return(1); 74 if (ip1->obj_asof < ip2->obj_asof) 75 return(-1); 76 if (ip1->obj_asof > ip2->obj_asof) 77 return(1); 78 return(0); 79 } 80 81 int 82 hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2) 83 { 84 if (ip1->redo_fifo_start < ip2->redo_fifo_start) 85 return(-1); 86 if (ip1->redo_fifo_start > ip2->redo_fifo_start) 87 return(1); 88 return(0); 89 } 90 91 /* 92 * RB-Tree support for inode structures / special LOOKUP_INFO 93 */ 94 static int 95 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip) 96 { 97 if (info->obj_localization < ip->obj_localization) 98 return(-1); 99 if (info->obj_localization > ip->obj_localization) 100 return(1); 101 if (info->obj_id < ip->obj_id) 102 return(-1); 103 if (info->obj_id > ip->obj_id) 104 return(1); 105 if (info->obj_asof < ip->obj_asof) 106 return(-1); 107 if (info->obj_asof > ip->obj_asof) 108 return(1); 109 return(0); 110 } 111 112 /* 113 * Used by hammer_scan_inode_snapshots() to locate all of an object's 114 * snapshots. Note that the asof field is not tested, which we can get 115 * away with because it is the lowest-priority field. 116 */ 117 static int 118 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data) 119 { 120 hammer_inode_info_t info = data; 121 122 if (ip->obj_localization > info->obj_localization) 123 return(1); 124 if (ip->obj_localization < info->obj_localization) 125 return(-1); 126 if (ip->obj_id > info->obj_id) 127 return(1); 128 if (ip->obj_id < info->obj_id) 129 return(-1); 130 return(0); 131 } 132 133 /* 134 * Used by hammer_unload_pseudofs() to locate all inodes associated with 135 * a particular PFS. 136 */ 137 static int 138 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data) 139 { 140 u_int32_t localization = *(u_int32_t *)data; 141 if (ip->obj_localization > localization) 142 return(1); 143 if (ip->obj_localization < localization) 144 return(-1); 145 return(0); 146 } 147 148 /* 149 * RB-Tree support for pseudofs structures 150 */ 151 static int 152 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2) 153 { 154 if (p1->localization < p2->localization) 155 return(-1); 156 if (p1->localization > p2->localization) 157 return(1); 158 return(0); 159 } 160 161 162 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare); 163 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node, 164 hammer_inode_info_cmp, hammer_inode_info_t); 165 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node, 166 hammer_pfs_rb_compare, u_int32_t, localization); 167 168 /* 169 * The kernel is not actively referencing this vnode but is still holding 170 * it cached. 171 * 172 * This is called from the frontend. 173 * 174 * MPALMOSTSAFE 175 */ 176 int 177 hammer_vop_inactive(struct vop_inactive_args *ap) 178 { 179 struct hammer_inode *ip = VTOI(ap->a_vp); 180 hammer_mount_t hmp; 181 182 /* 183 * Degenerate case 184 */ 185 if (ip == NULL) { 186 vrecycle(ap->a_vp); 187 return(0); 188 } 189 190 /* 191 * If the inode no longer has visibility in the filesystem try to 192 * recycle it immediately, even if the inode is dirty. Recycling 193 * it quickly allows the system to reclaim buffer cache and VM 194 * resources which can matter a lot in a heavily loaded system. 195 * 196 * This can deadlock in vfsync() if we aren't careful. 197 * 198 * Do not queue the inode to the flusher if we still have visibility, 199 * otherwise namespace calls such as chmod will unnecessarily generate 200 * multiple inode updates. 201 */ 202 if (ip->ino_data.nlinks == 0) { 203 hmp = ip->hmp; 204 lwkt_gettoken(&hmp->fs_token); 205 hammer_inode_unloadable_check(ip, 0); 206 if (ip->flags & HAMMER_INODE_MODMASK) 207 hammer_flush_inode(ip, 0); 208 lwkt_reltoken(&hmp->fs_token); 209 vrecycle(ap->a_vp); 210 } 211 return(0); 212 } 213 214 /* 215 * Release the vnode association. This is typically (but not always) 216 * the last reference on the inode. 217 * 218 * Once the association is lost we are on our own with regards to 219 * flushing the inode. 220 * 221 * We must interlock ip->vp so hammer_get_vnode() can avoid races. 222 */ 223 int 224 hammer_vop_reclaim(struct vop_reclaim_args *ap) 225 { 226 struct hammer_inode *ip; 227 hammer_mount_t hmp; 228 struct vnode *vp; 229 230 vp = ap->a_vp; 231 232 if ((ip = vp->v_data) != NULL) { 233 hmp = ip->hmp; 234 lwkt_gettoken(&hmp->fs_token); 235 hammer_lock_ex(&ip->lock); 236 vp->v_data = NULL; 237 ip->vp = NULL; 238 239 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) { 240 ++hammer_count_reclaims; 241 ++hmp->count_reclaims; 242 ip->flags |= HAMMER_INODE_RECLAIM; 243 } 244 hammer_unlock(&ip->lock); 245 vclrisdirty(vp); 246 hammer_rel_inode(ip, 1); 247 lwkt_reltoken(&hmp->fs_token); 248 } 249 return(0); 250 } 251 252 /* 253 * Inform the kernel that the inode is dirty. This will be checked 254 * by vn_unlock(). 255 * 256 * Theoretically in order to reclaim a vnode the hammer_vop_reclaim() 257 * must be called which will interlock against our inode lock, so 258 * if VRECLAIMED is not set vp->v_mount (as used by vsetisdirty()) 259 * should be stable without having to acquire any new locks. 260 */ 261 void 262 hammer_inode_dirty(struct hammer_inode *ip) 263 { 264 struct vnode *vp; 265 266 if ((ip->flags & HAMMER_INODE_MODMASK) && 267 (vp = ip->vp) != NULL && 268 (vp->v_flag & (VRECLAIMED | VISDIRTY)) == 0) { 269 vsetisdirty(vp); 270 } 271 } 272 273 /* 274 * Return a locked vnode for the specified inode. The inode must be 275 * referenced but NOT LOCKED on entry and will remain referenced on 276 * return. 277 * 278 * Called from the frontend. 279 */ 280 int 281 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp) 282 { 283 hammer_mount_t hmp; 284 struct vnode *vp; 285 int error = 0; 286 u_int8_t obj_type; 287 288 hmp = ip->hmp; 289 290 for (;;) { 291 if ((vp = ip->vp) == NULL) { 292 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0); 293 if (error) 294 break; 295 hammer_lock_ex(&ip->lock); 296 if (ip->vp != NULL) { 297 hammer_unlock(&ip->lock); 298 vp = *vpp; 299 vp->v_type = VBAD; 300 vx_put(vp); 301 continue; 302 } 303 hammer_ref(&ip->lock); 304 vp = *vpp; 305 ip->vp = vp; 306 307 obj_type = ip->ino_data.obj_type; 308 vp->v_type = hammer_get_vnode_type(obj_type); 309 310 hammer_inode_wakereclaims(ip); 311 312 switch(ip->ino_data.obj_type) { 313 case HAMMER_OBJTYPE_CDEV: 314 case HAMMER_OBJTYPE_BDEV: 315 vp->v_ops = &hmp->mp->mnt_vn_spec_ops; 316 addaliasu(vp, ip->ino_data.rmajor, 317 ip->ino_data.rminor); 318 break; 319 case HAMMER_OBJTYPE_FIFO: 320 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops; 321 break; 322 case HAMMER_OBJTYPE_REGFILE: 323 break; 324 default: 325 break; 326 } 327 328 /* 329 * Only mark as the root vnode if the ip is not 330 * historical, otherwise the VFS cache will get 331 * confused. The other half of the special handling 332 * is in hammer_vop_nlookupdotdot(). 333 * 334 * Pseudo-filesystem roots can be accessed via 335 * non-root filesystem paths and setting VROOT may 336 * confuse the namecache. Set VPFSROOT instead. 337 */ 338 if (ip->obj_id == HAMMER_OBJID_ROOT) { 339 if (ip->obj_asof == hmp->asof) { 340 if (ip->obj_localization == 0) 341 vsetflags(vp, VROOT); 342 else 343 vsetflags(vp, VPFSROOT); 344 } else { 345 vsetflags(vp, VPFSROOT); 346 } 347 } 348 349 vp->v_data = (void *)ip; 350 /* vnode locked by getnewvnode() */ 351 /* make related vnode dirty if inode dirty? */ 352 hammer_unlock(&ip->lock); 353 if (vp->v_type == VREG) { 354 vinitvmio(vp, ip->ino_data.size, 355 hammer_blocksize(ip->ino_data.size), 356 hammer_blockoff(ip->ino_data.size)); 357 } 358 break; 359 } 360 361 /* 362 * Interlock vnode clearing. This does not prevent the 363 * vnode from going into a reclaimed state but it does 364 * prevent it from being destroyed or reused so the vget() 365 * will properly fail. 366 */ 367 hammer_lock_ex(&ip->lock); 368 if ((vp = ip->vp) == NULL) { 369 hammer_unlock(&ip->lock); 370 continue; 371 } 372 vhold(vp); 373 hammer_unlock(&ip->lock); 374 375 /* 376 * loop if the vget fails (aka races), or if the vp 377 * no longer matches ip->vp. 378 */ 379 if (vget(vp, LK_EXCLUSIVE) == 0) { 380 if (vp == ip->vp) { 381 vdrop(vp); 382 break; 383 } 384 vput(vp); 385 } 386 vdrop(vp); 387 } 388 *vpp = vp; 389 return(error); 390 } 391 392 /* 393 * Locate all copies of the inode for obj_id compatible with the specified 394 * asof, reference, and issue the related call-back. This routine is used 395 * for direct-io invalidation and does not create any new inodes. 396 */ 397 void 398 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo, 399 int (*callback)(hammer_inode_t ip, void *data), 400 void *data) 401 { 402 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root, 403 hammer_inode_info_cmp_all_history, 404 callback, iinfo); 405 } 406 407 /* 408 * Acquire a HAMMER inode. The returned inode is not locked. These functions 409 * do not attach or detach the related vnode (use hammer_get_vnode() for 410 * that). 411 * 412 * The flags argument is only applied for newly created inodes, and only 413 * certain flags are inherited. 414 * 415 * Called from the frontend. 416 */ 417 struct hammer_inode * 418 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip, 419 int64_t obj_id, hammer_tid_t asof, u_int32_t localization, 420 int flags, int *errorp) 421 { 422 hammer_mount_t hmp = trans->hmp; 423 struct hammer_node_cache *cachep; 424 struct hammer_inode_info iinfo; 425 struct hammer_cursor cursor; 426 struct hammer_inode *ip; 427 428 429 /* 430 * Determine if we already have an inode cached. If we do then 431 * we are golden. 432 * 433 * If we find an inode with no vnode we have to mark the 434 * transaction such that hammer_inode_waitreclaims() is 435 * called later on to avoid building up an infinite number 436 * of inodes. Otherwise we can continue to * add new inodes 437 * faster then they can be disposed of, even with the tsleep 438 * delay. 439 * 440 * If we find a dummy inode we return a failure so dounlink 441 * (which does another lookup) doesn't try to mess with the 442 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode() 443 * to ref dummy inodes. 444 */ 445 iinfo.obj_id = obj_id; 446 iinfo.obj_asof = asof; 447 iinfo.obj_localization = localization; 448 loop: 449 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 450 if (ip) { 451 if (ip->flags & HAMMER_INODE_DUMMY) { 452 *errorp = ENOENT; 453 return(NULL); 454 } 455 hammer_ref(&ip->lock); 456 *errorp = 0; 457 return(ip); 458 } 459 460 /* 461 * Allocate a new inode structure and deal with races later. 462 */ 463 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 464 ++hammer_count_inodes; 465 ++hmp->count_inodes; 466 ip->obj_id = obj_id; 467 ip->obj_asof = iinfo.obj_asof; 468 ip->obj_localization = localization; 469 ip->hmp = hmp; 470 ip->flags = flags & HAMMER_INODE_RO; 471 ip->cache[0].ip = ip; 472 ip->cache[1].ip = ip; 473 ip->cache[2].ip = ip; 474 ip->cache[3].ip = ip; 475 if (hmp->ronly) 476 ip->flags |= HAMMER_INODE_RO; 477 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off = 478 0x7FFFFFFFFFFFFFFFLL; 479 RB_INIT(&ip->rec_tree); 480 TAILQ_INIT(&ip->target_list); 481 hammer_ref(&ip->lock); 482 483 /* 484 * Locate the on-disk inode. If this is a PFS root we always 485 * access the current version of the root inode and (if it is not 486 * a master) always access information under it with a snapshot 487 * TID. 488 * 489 * We cache recent inode lookups in this directory in dip->cache[2]. 490 * If we can't find it we assume the inode we are looking for is 491 * close to the directory inode. 492 */ 493 retry: 494 cachep = NULL; 495 if (dip) { 496 if (dip->cache[2].node) 497 cachep = &dip->cache[2]; 498 else 499 cachep = &dip->cache[0]; 500 } 501 hammer_init_cursor(trans, &cursor, cachep, NULL); 502 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE; 503 cursor.key_beg.obj_id = ip->obj_id; 504 cursor.key_beg.key = 0; 505 cursor.key_beg.create_tid = 0; 506 cursor.key_beg.delete_tid = 0; 507 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE; 508 cursor.key_beg.obj_type = 0; 509 510 cursor.asof = iinfo.obj_asof; 511 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA | 512 HAMMER_CURSOR_ASOF; 513 514 *errorp = hammer_btree_lookup(&cursor); 515 if (*errorp == EDEADLK) { 516 hammer_done_cursor(&cursor); 517 goto retry; 518 } 519 520 /* 521 * On success the B-Tree lookup will hold the appropriate 522 * buffer cache buffers and provide a pointer to the requested 523 * information. Copy the information to the in-memory inode 524 * and cache the B-Tree node to improve future operations. 525 */ 526 if (*errorp == 0) { 527 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf; 528 ip->ino_data = cursor.data->inode; 529 530 /* 531 * cache[0] tries to cache the location of the object inode. 532 * The assumption is that it is near the directory inode. 533 * 534 * cache[1] tries to cache the location of the object data. 535 * We might have something in the governing directory from 536 * scan optimizations (see the strategy code in 537 * hammer_vnops.c). 538 * 539 * We update dip->cache[2], if possible, with the location 540 * of the object inode for future directory shortcuts. 541 */ 542 hammer_cache_node(&ip->cache[0], cursor.node); 543 if (dip) { 544 if (dip->cache[3].node) { 545 hammer_cache_node(&ip->cache[1], 546 dip->cache[3].node); 547 } 548 hammer_cache_node(&dip->cache[2], cursor.node); 549 } 550 551 /* 552 * The file should not contain any data past the file size 553 * stored in the inode. Setting save_trunc_off to the 554 * file size instead of max reduces B-Tree lookup overheads 555 * on append by allowing the flusher to avoid checking for 556 * record overwrites. 557 */ 558 ip->save_trunc_off = ip->ino_data.size; 559 560 /* 561 * Locate and assign the pseudofs management structure to 562 * the inode. 563 */ 564 if (dip && dip->obj_localization == ip->obj_localization) { 565 ip->pfsm = dip->pfsm; 566 hammer_ref(&ip->pfsm->lock); 567 } else { 568 ip->pfsm = hammer_load_pseudofs(trans, 569 ip->obj_localization, 570 errorp); 571 *errorp = 0; /* ignore ENOENT */ 572 } 573 } 574 575 /* 576 * The inode is placed on the red-black tree and will be synced to 577 * the media when flushed or by the filesystem sync. If this races 578 * another instantiation/lookup the insertion will fail. 579 */ 580 if (*errorp == 0) { 581 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 582 hammer_free_inode(ip); 583 hammer_done_cursor(&cursor); 584 goto loop; 585 } 586 ip->flags |= HAMMER_INODE_ONDISK; 587 } else { 588 if (ip->flags & HAMMER_INODE_RSV_INODES) { 589 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */ 590 --hmp->rsv_inodes; 591 } 592 593 hammer_free_inode(ip); 594 ip = NULL; 595 } 596 hammer_done_cursor(&cursor); 597 598 /* 599 * NEWINODE is only set if the inode becomes dirty later, 600 * setting it here just leads to unnecessary stalls. 601 * 602 * trans->flags |= HAMMER_TRANSF_NEWINODE; 603 */ 604 return (ip); 605 } 606 607 /* 608 * Get a dummy inode to placemark a broken directory entry. 609 */ 610 struct hammer_inode * 611 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip, 612 int64_t obj_id, hammer_tid_t asof, u_int32_t localization, 613 int flags, int *errorp) 614 { 615 hammer_mount_t hmp = trans->hmp; 616 struct hammer_inode_info iinfo; 617 struct hammer_inode *ip; 618 619 /* 620 * Determine if we already have an inode cached. If we do then 621 * we are golden. 622 * 623 * If we find an inode with no vnode we have to mark the 624 * transaction such that hammer_inode_waitreclaims() is 625 * called later on to avoid building up an infinite number 626 * of inodes. Otherwise we can continue to * add new inodes 627 * faster then they can be disposed of, even with the tsleep 628 * delay. 629 * 630 * If we find a non-fake inode we return an error. Only fake 631 * inodes can be returned by this routine. 632 */ 633 iinfo.obj_id = obj_id; 634 iinfo.obj_asof = asof; 635 iinfo.obj_localization = localization; 636 loop: 637 *errorp = 0; 638 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 639 if (ip) { 640 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) { 641 *errorp = ENOENT; 642 return(NULL); 643 } 644 hammer_ref(&ip->lock); 645 return(ip); 646 } 647 648 /* 649 * Allocate a new inode structure and deal with races later. 650 */ 651 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 652 ++hammer_count_inodes; 653 ++hmp->count_inodes; 654 ip->obj_id = obj_id; 655 ip->obj_asof = iinfo.obj_asof; 656 ip->obj_localization = localization; 657 ip->hmp = hmp; 658 ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY; 659 ip->cache[0].ip = ip; 660 ip->cache[1].ip = ip; 661 ip->cache[2].ip = ip; 662 ip->cache[3].ip = ip; 663 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off = 664 0x7FFFFFFFFFFFFFFFLL; 665 RB_INIT(&ip->rec_tree); 666 TAILQ_INIT(&ip->target_list); 667 hammer_ref(&ip->lock); 668 669 /* 670 * Populate the dummy inode. Leave everything zero'd out. 671 * 672 * (ip->ino_leaf and ip->ino_data) 673 * 674 * Make the dummy inode a FIFO object which most copy programs 675 * will properly ignore. 676 */ 677 ip->save_trunc_off = ip->ino_data.size; 678 ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO; 679 680 /* 681 * Locate and assign the pseudofs management structure to 682 * the inode. 683 */ 684 if (dip && dip->obj_localization == ip->obj_localization) { 685 ip->pfsm = dip->pfsm; 686 hammer_ref(&ip->pfsm->lock); 687 } else { 688 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization, 689 errorp); 690 *errorp = 0; /* ignore ENOENT */ 691 } 692 693 /* 694 * The inode is placed on the red-black tree and will be synced to 695 * the media when flushed or by the filesystem sync. If this races 696 * another instantiation/lookup the insertion will fail. 697 * 698 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake. 699 */ 700 if (*errorp == 0) { 701 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 702 hammer_free_inode(ip); 703 goto loop; 704 } 705 } else { 706 if (ip->flags & HAMMER_INODE_RSV_INODES) { 707 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */ 708 --hmp->rsv_inodes; 709 } 710 hammer_free_inode(ip); 711 ip = NULL; 712 } 713 trans->flags |= HAMMER_TRANSF_NEWINODE; 714 return (ip); 715 } 716 717 /* 718 * Return a referenced inode only if it is in our inode cache. 719 * 720 * Dummy inodes do not count. 721 */ 722 struct hammer_inode * 723 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id, 724 hammer_tid_t asof, u_int32_t localization) 725 { 726 hammer_mount_t hmp = trans->hmp; 727 struct hammer_inode_info iinfo; 728 struct hammer_inode *ip; 729 730 iinfo.obj_id = obj_id; 731 iinfo.obj_asof = asof; 732 iinfo.obj_localization = localization; 733 734 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 735 if (ip) { 736 if (ip->flags & HAMMER_INODE_DUMMY) 737 ip = NULL; 738 else 739 hammer_ref(&ip->lock); 740 } 741 return(ip); 742 } 743 744 /* 745 * Create a new filesystem object, returning the inode in *ipp. The 746 * returned inode will be referenced. The inode is created in-memory. 747 * 748 * If pfsm is non-NULL the caller wishes to create the root inode for 749 * a master PFS. 750 */ 751 int 752 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap, 753 struct ucred *cred, 754 hammer_inode_t dip, const char *name, int namelen, 755 hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp) 756 { 757 hammer_mount_t hmp; 758 hammer_inode_t ip; 759 uid_t xuid; 760 int error; 761 int64_t namekey; 762 u_int32_t dummy; 763 764 hmp = trans->hmp; 765 766 /* 767 * Disallow the creation of new inodes in directories which 768 * have been deleted. In HAMMER, this will cause a record 769 * syncing assertion later on in the flush code. 770 */ 771 if (dip && dip->ino_data.nlinks == 0) { 772 *ipp = NULL; 773 return (EINVAL); 774 } 775 776 /* 777 * Allocate inode 778 */ 779 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 780 ++hammer_count_inodes; 781 ++hmp->count_inodes; 782 trans->flags |= HAMMER_TRANSF_NEWINODE; 783 784 if (pfsm) { 785 KKASSERT(pfsm->localization != 0); 786 ip->obj_id = HAMMER_OBJID_ROOT; 787 ip->obj_localization = pfsm->localization; 788 } else { 789 KKASSERT(dip != NULL); 790 namekey = hammer_directory_namekey(dip, name, namelen, &dummy); 791 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey); 792 ip->obj_localization = dip->obj_localization; 793 } 794 795 KKASSERT(ip->obj_id != 0); 796 ip->obj_asof = hmp->asof; 797 ip->hmp = hmp; 798 ip->flush_state = HAMMER_FST_IDLE; 799 ip->flags = HAMMER_INODE_DDIRTY | 800 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME; 801 ip->cache[0].ip = ip; 802 ip->cache[1].ip = ip; 803 ip->cache[2].ip = ip; 804 ip->cache[3].ip = ip; 805 806 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL; 807 /* ip->save_trunc_off = 0; (already zero) */ 808 RB_INIT(&ip->rec_tree); 809 TAILQ_INIT(&ip->target_list); 810 811 ip->ino_data.atime = trans->time; 812 ip->ino_data.mtime = trans->time; 813 ip->ino_data.size = 0; 814 ip->ino_data.nlinks = 0; 815 816 /* 817 * A nohistory designator on the parent directory is inherited by 818 * the child. We will do this even for pseudo-fs creation... the 819 * sysad can turn it off. 820 */ 821 if (dip) { 822 ip->ino_data.uflags = dip->ino_data.uflags & 823 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP); 824 } 825 826 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 827 ip->ino_leaf.base.localization = ip->obj_localization + 828 HAMMER_LOCALIZE_INODE; 829 ip->ino_leaf.base.obj_id = ip->obj_id; 830 ip->ino_leaf.base.key = 0; 831 ip->ino_leaf.base.create_tid = 0; 832 ip->ino_leaf.base.delete_tid = 0; 833 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE; 834 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type); 835 836 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type; 837 ip->ino_data.version = HAMMER_INODE_DATA_VERSION; 838 ip->ino_data.mode = vap->va_mode; 839 ip->ino_data.ctime = trans->time; 840 841 /* 842 * If we are running version 2 or greater directory entries are 843 * inode-localized instead of data-localized. 844 */ 845 if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) { 846 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) { 847 ip->ino_data.cap_flags |= 848 HAMMER_INODE_CAP_DIR_LOCAL_INO; 849 } 850 } 851 if (trans->hmp->version >= HAMMER_VOL_VERSION_SIX) { 852 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) { 853 ip->ino_data.cap_flags |= 854 HAMMER_INODE_CAP_DIRHASH_ALG1; 855 } 856 } 857 858 /* 859 * Setup the ".." pointer. This only needs to be done for directories 860 * but we do it for all objects as a recovery aid if dip exists. 861 * The inode is probably a PFS root if dip is NULL. 862 */ 863 if (dip) 864 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id; 865 #if 0 866 /* 867 * The parent_obj_localization field only applies to pseudo-fs roots. 868 * XXX this is no longer applicable, PFSs are no longer directly 869 * tied into the parent's directory structure. 870 */ 871 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY && 872 ip->obj_id == HAMMER_OBJID_ROOT) { 873 ip->ino_data.ext.obj.parent_obj_localization = 874 dip->obj_localization; 875 } 876 #endif 877 878 switch(ip->ino_leaf.base.obj_type) { 879 case HAMMER_OBJTYPE_CDEV: 880 case HAMMER_OBJTYPE_BDEV: 881 ip->ino_data.rmajor = vap->va_rmajor; 882 ip->ino_data.rminor = vap->va_rminor; 883 break; 884 default: 885 break; 886 } 887 888 /* 889 * Calculate default uid/gid and overwrite with information from 890 * the vap. 891 */ 892 if (dip) { 893 xuid = hammer_to_unix_xid(&dip->ino_data.uid); 894 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, 895 xuid, cred, &vap->va_mode); 896 } else { 897 xuid = 0; 898 } 899 ip->ino_data.mode = vap->va_mode; 900 901 if (vap->va_vaflags & VA_UID_UUID_VALID) 902 ip->ino_data.uid = vap->va_uid_uuid; 903 else if (vap->va_uid != (uid_t)VNOVAL) 904 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid); 905 else 906 hammer_guid_to_uuid(&ip->ino_data.uid, xuid); 907 908 if (vap->va_vaflags & VA_GID_UUID_VALID) 909 ip->ino_data.gid = vap->va_gid_uuid; 910 else if (vap->va_gid != (gid_t)VNOVAL) 911 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid); 912 else if (dip) 913 ip->ino_data.gid = dip->ino_data.gid; 914 915 hammer_ref(&ip->lock); 916 917 if (pfsm) { 918 ip->pfsm = pfsm; 919 hammer_ref(&pfsm->lock); 920 error = 0; 921 } else if (dip->obj_localization == ip->obj_localization) { 922 ip->pfsm = dip->pfsm; 923 hammer_ref(&ip->pfsm->lock); 924 error = 0; 925 } else { 926 ip->pfsm = hammer_load_pseudofs(trans, 927 ip->obj_localization, 928 &error); 929 error = 0; /* ignore ENOENT */ 930 } 931 932 if (error) { 933 hammer_free_inode(ip); 934 ip = NULL; 935 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 936 panic("hammer_create_inode: duplicate obj_id %llx", 937 (long long)ip->obj_id); 938 /* not reached */ 939 hammer_free_inode(ip); 940 } 941 *ipp = ip; 942 return(error); 943 } 944 945 /* 946 * Final cleanup / freeing of an inode structure 947 */ 948 static void 949 hammer_free_inode(hammer_inode_t ip) 950 { 951 struct hammer_mount *hmp; 952 953 hmp = ip->hmp; 954 KKASSERT(hammer_oneref(&ip->lock)); 955 hammer_uncache_node(&ip->cache[0]); 956 hammer_uncache_node(&ip->cache[1]); 957 hammer_uncache_node(&ip->cache[2]); 958 hammer_uncache_node(&ip->cache[3]); 959 hammer_inode_wakereclaims(ip); 960 if (ip->objid_cache) 961 hammer_clear_objid(ip); 962 --hammer_count_inodes; 963 --hmp->count_inodes; 964 if (ip->pfsm) { 965 hammer_rel_pseudofs(hmp, ip->pfsm); 966 ip->pfsm = NULL; 967 } 968 kfree(ip, hmp->m_inodes); 969 ip = NULL; 970 } 971 972 /* 973 * Retrieve pseudo-fs data. NULL will never be returned. 974 * 975 * If an error occurs *errorp will be set and a default template is returned, 976 * otherwise *errorp is set to 0. Typically when an error occurs it will 977 * be ENOENT. 978 */ 979 hammer_pseudofs_inmem_t 980 hammer_load_pseudofs(hammer_transaction_t trans, 981 u_int32_t localization, int *errorp) 982 { 983 hammer_mount_t hmp = trans->hmp; 984 hammer_inode_t ip; 985 hammer_pseudofs_inmem_t pfsm; 986 struct hammer_cursor cursor; 987 int bytes; 988 989 retry: 990 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization); 991 if (pfsm) { 992 hammer_ref(&pfsm->lock); 993 *errorp = 0; 994 return(pfsm); 995 } 996 997 /* 998 * PFS records are associated with the root inode (not the PFS root 999 * inode, but the real root). Avoid an infinite recursion if loading 1000 * the PFS for the real root. 1001 */ 1002 if (localization) { 1003 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, 1004 HAMMER_MAX_TID, 1005 HAMMER_DEF_LOCALIZATION, 0, errorp); 1006 } else { 1007 ip = NULL; 1008 } 1009 1010 pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO); 1011 pfsm->localization = localization; 1012 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid; 1013 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid; 1014 1015 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip); 1016 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION + 1017 HAMMER_LOCALIZE_MISC; 1018 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 1019 cursor.key_beg.create_tid = 0; 1020 cursor.key_beg.delete_tid = 0; 1021 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 1022 cursor.key_beg.obj_type = 0; 1023 cursor.key_beg.key = localization; 1024 cursor.asof = HAMMER_MAX_TID; 1025 cursor.flags |= HAMMER_CURSOR_ASOF; 1026 1027 if (ip) 1028 *errorp = hammer_ip_lookup(&cursor); 1029 else 1030 *errorp = hammer_btree_lookup(&cursor); 1031 if (*errorp == 0) { 1032 *errorp = hammer_ip_resolve_data(&cursor); 1033 if (*errorp == 0) { 1034 if (cursor.data->pfsd.mirror_flags & 1035 HAMMER_PFSD_DELETED) { 1036 *errorp = ENOENT; 1037 } else { 1038 bytes = cursor.leaf->data_len; 1039 if (bytes > sizeof(pfsm->pfsd)) 1040 bytes = sizeof(pfsm->pfsd); 1041 bcopy(cursor.data, &pfsm->pfsd, bytes); 1042 } 1043 } 1044 } 1045 hammer_done_cursor(&cursor); 1046 1047 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid); 1048 hammer_ref(&pfsm->lock); 1049 if (ip) 1050 hammer_rel_inode(ip, 0); 1051 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) { 1052 kfree(pfsm, hmp->m_misc); 1053 goto retry; 1054 } 1055 return(pfsm); 1056 } 1057 1058 /* 1059 * Store pseudo-fs data. The backend will automatically delete any prior 1060 * on-disk pseudo-fs data but we have to delete in-memory versions. 1061 */ 1062 int 1063 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm) 1064 { 1065 struct hammer_cursor cursor; 1066 hammer_record_t record; 1067 hammer_inode_t ip; 1068 int error; 1069 1070 /* 1071 * PFS records are associated with the root inode (not the PFS root 1072 * inode, but the real root). 1073 */ 1074 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 1075 HAMMER_DEF_LOCALIZATION, 0, &error); 1076 retry: 1077 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid); 1078 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip); 1079 cursor.key_beg.localization = ip->obj_localization + 1080 HAMMER_LOCALIZE_MISC; 1081 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 1082 cursor.key_beg.create_tid = 0; 1083 cursor.key_beg.delete_tid = 0; 1084 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 1085 cursor.key_beg.obj_type = 0; 1086 cursor.key_beg.key = pfsm->localization; 1087 cursor.asof = HAMMER_MAX_TID; 1088 cursor.flags |= HAMMER_CURSOR_ASOF; 1089 1090 /* 1091 * Replace any in-memory version of the record. 1092 */ 1093 error = hammer_ip_lookup(&cursor); 1094 if (error == 0 && hammer_cursor_inmem(&cursor)) { 1095 record = cursor.iprec; 1096 if (record->flags & HAMMER_RECF_INTERLOCK_BE) { 1097 KKASSERT(cursor.deadlk_rec == NULL); 1098 hammer_ref(&record->lock); 1099 cursor.deadlk_rec = record; 1100 error = EDEADLK; 1101 } else { 1102 record->flags |= HAMMER_RECF_DELETED_FE; 1103 error = 0; 1104 } 1105 } 1106 1107 /* 1108 * Allocate replacement general record. The backend flush will 1109 * delete any on-disk version of the record. 1110 */ 1111 if (error == 0 || error == ENOENT) { 1112 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd)); 1113 record->type = HAMMER_MEM_RECORD_GENERAL; 1114 1115 record->leaf.base.localization = ip->obj_localization + 1116 HAMMER_LOCALIZE_MISC; 1117 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS; 1118 record->leaf.base.key = pfsm->localization; 1119 record->leaf.data_len = sizeof(pfsm->pfsd); 1120 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd)); 1121 error = hammer_ip_add_record(trans, record); 1122 } 1123 hammer_done_cursor(&cursor); 1124 if (error == EDEADLK) 1125 goto retry; 1126 hammer_rel_inode(ip, 0); 1127 return(error); 1128 } 1129 1130 /* 1131 * Create a root directory for a PFS if one does not alredy exist. 1132 * 1133 * The PFS root stands alone so we must also bump the nlinks count 1134 * to prevent it from being destroyed on release. 1135 */ 1136 int 1137 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred, 1138 hammer_pseudofs_inmem_t pfsm) 1139 { 1140 hammer_inode_t ip; 1141 struct vattr vap; 1142 int error; 1143 1144 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 1145 pfsm->localization, 0, &error); 1146 if (ip == NULL) { 1147 vattr_null(&vap); 1148 vap.va_mode = 0755; 1149 vap.va_type = VDIR; 1150 error = hammer_create_inode(trans, &vap, cred, 1151 NULL, NULL, 0, 1152 pfsm, &ip); 1153 if (error == 0) { 1154 ++ip->ino_data.nlinks; 1155 hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY); 1156 } 1157 } 1158 if (ip) 1159 hammer_rel_inode(ip, 0); 1160 return(error); 1161 } 1162 1163 /* 1164 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY 1165 * if we are unable to disassociate all the inodes. 1166 */ 1167 static 1168 int 1169 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data) 1170 { 1171 int res; 1172 1173 hammer_ref(&ip->lock); 1174 if (hammer_isactive(&ip->lock) == 2 && ip->vp) 1175 vclean_unlocked(ip->vp); 1176 if (hammer_isactive(&ip->lock) == 1 && ip->vp == NULL) 1177 res = 0; 1178 else 1179 res = -1; /* stop, someone is using the inode */ 1180 hammer_rel_inode(ip, 0); 1181 return(res); 1182 } 1183 1184 int 1185 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization) 1186 { 1187 int res; 1188 int try; 1189 1190 for (try = res = 0; try < 4; ++try) { 1191 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root, 1192 hammer_inode_pfs_cmp, 1193 hammer_unload_pseudofs_callback, 1194 &localization); 1195 if (res == 0 && try > 1) 1196 break; 1197 hammer_flusher_sync(trans->hmp); 1198 } 1199 if (res != 0) 1200 res = ENOTEMPTY; 1201 return(res); 1202 } 1203 1204 1205 /* 1206 * Release a reference on a PFS 1207 */ 1208 void 1209 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm) 1210 { 1211 hammer_rel(&pfsm->lock); 1212 if (hammer_norefs(&pfsm->lock)) { 1213 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm); 1214 kfree(pfsm, hmp->m_misc); 1215 } 1216 } 1217 1218 /* 1219 * Called by hammer_sync_inode(). 1220 */ 1221 static int 1222 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip) 1223 { 1224 hammer_transaction_t trans = cursor->trans; 1225 hammer_record_t record; 1226 int error; 1227 int redirty; 1228 1229 retry: 1230 error = 0; 1231 1232 /* 1233 * If the inode has a presence on-disk then locate it and mark 1234 * it deleted, setting DELONDISK. 1235 * 1236 * The record may or may not be physically deleted, depending on 1237 * the retention policy. 1238 */ 1239 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) == 1240 HAMMER_INODE_ONDISK) { 1241 hammer_normalize_cursor(cursor); 1242 cursor->key_beg.localization = ip->obj_localization + 1243 HAMMER_LOCALIZE_INODE; 1244 cursor->key_beg.obj_id = ip->obj_id; 1245 cursor->key_beg.key = 0; 1246 cursor->key_beg.create_tid = 0; 1247 cursor->key_beg.delete_tid = 0; 1248 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE; 1249 cursor->key_beg.obj_type = 0; 1250 cursor->asof = ip->obj_asof; 1251 cursor->flags &= ~HAMMER_CURSOR_INITMASK; 1252 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF; 1253 cursor->flags |= HAMMER_CURSOR_BACKEND; 1254 1255 error = hammer_btree_lookup(cursor); 1256 if (hammer_debug_inode) 1257 kprintf("IPDEL %p %08x %d", ip, ip->flags, error); 1258 1259 if (error == 0) { 1260 error = hammer_ip_delete_record(cursor, ip, trans->tid); 1261 if (hammer_debug_inode) 1262 kprintf(" error %d\n", error); 1263 if (error == 0) { 1264 ip->flags |= HAMMER_INODE_DELONDISK; 1265 } 1266 if (cursor->node) 1267 hammer_cache_node(&ip->cache[0], cursor->node); 1268 } 1269 if (error == EDEADLK) { 1270 hammer_done_cursor(cursor); 1271 error = hammer_init_cursor(trans, cursor, 1272 &ip->cache[0], ip); 1273 if (hammer_debug_inode) 1274 kprintf("IPDED %p %d\n", ip, error); 1275 if (error == 0) 1276 goto retry; 1277 } 1278 } 1279 1280 /* 1281 * Ok, write out the initial record or a new record (after deleting 1282 * the old one), unless the DELETED flag is set. This routine will 1283 * clear DELONDISK if it writes out a record. 1284 * 1285 * Update our inode statistics if this is the first application of 1286 * the inode on-disk. 1287 */ 1288 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) { 1289 /* 1290 * Generate a record and write it to the media. We clean-up 1291 * the state before releasing so we do not have to set-up 1292 * a flush_group. 1293 */ 1294 record = hammer_alloc_mem_record(ip, 0); 1295 record->type = HAMMER_MEM_RECORD_INODE; 1296 record->flush_state = HAMMER_FST_FLUSH; 1297 record->leaf = ip->sync_ino_leaf; 1298 record->leaf.base.create_tid = trans->tid; 1299 record->leaf.data_len = sizeof(ip->sync_ino_data); 1300 record->leaf.create_ts = trans->time32; 1301 record->data = (void *)&ip->sync_ino_data; 1302 record->flags |= HAMMER_RECF_INTERLOCK_BE; 1303 1304 /* 1305 * If this flag is set we cannot sync the new file size 1306 * because we haven't finished related truncations. The 1307 * inode will be flushed in another flush group to finish 1308 * the job. 1309 */ 1310 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) && 1311 ip->sync_ino_data.size != ip->ino_data.size) { 1312 redirty = 1; 1313 ip->sync_ino_data.size = ip->ino_data.size; 1314 } else { 1315 redirty = 0; 1316 } 1317 1318 for (;;) { 1319 error = hammer_ip_sync_record_cursor(cursor, record); 1320 if (hammer_debug_inode) 1321 kprintf("GENREC %p rec %08x %d\n", 1322 ip, record->flags, error); 1323 if (error != EDEADLK) 1324 break; 1325 hammer_done_cursor(cursor); 1326 error = hammer_init_cursor(trans, cursor, 1327 &ip->cache[0], ip); 1328 if (hammer_debug_inode) 1329 kprintf("GENREC reinit %d\n", error); 1330 if (error) 1331 break; 1332 } 1333 1334 /* 1335 * Note: The record was never on the inode's record tree 1336 * so just wave our hands importantly and destroy it. 1337 */ 1338 record->flags |= HAMMER_RECF_COMMITTED; 1339 record->flags &= ~HAMMER_RECF_INTERLOCK_BE; 1340 record->flush_state = HAMMER_FST_IDLE; 1341 ++ip->rec_generation; 1342 hammer_rel_mem_record(record); 1343 1344 /* 1345 * Finish up. 1346 */ 1347 if (error == 0) { 1348 if (hammer_debug_inode) 1349 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags); 1350 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | 1351 HAMMER_INODE_SDIRTY | 1352 HAMMER_INODE_ATIME | 1353 HAMMER_INODE_MTIME); 1354 ip->flags &= ~HAMMER_INODE_DELONDISK; 1355 if (redirty) 1356 ip->sync_flags |= HAMMER_INODE_DDIRTY; 1357 1358 /* 1359 * Root volume count of inodes 1360 */ 1361 hammer_sync_lock_sh(trans); 1362 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) { 1363 hammer_modify_volume_field(trans, 1364 trans->rootvol, 1365 vol0_stat_inodes); 1366 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes; 1367 hammer_modify_volume_done(trans->rootvol); 1368 ip->flags |= HAMMER_INODE_ONDISK; 1369 if (hammer_debug_inode) 1370 kprintf("NOWONDISK %p\n", ip); 1371 } 1372 hammer_sync_unlock(trans); 1373 } 1374 } 1375 1376 /* 1377 * If the inode has been destroyed, clean out any left-over flags 1378 * that may have been set by the frontend. 1379 */ 1380 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 1381 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | 1382 HAMMER_INODE_SDIRTY | 1383 HAMMER_INODE_ATIME | 1384 HAMMER_INODE_MTIME); 1385 } 1386 return(error); 1387 } 1388 1389 /* 1390 * Update only the itimes fields. 1391 * 1392 * ATIME can be updated without generating any UNDO. MTIME is updated 1393 * with UNDO so it is guaranteed to be synchronized properly in case of 1394 * a crash. 1395 * 1396 * Neither field is included in the B-Tree leaf element's CRC, which is how 1397 * we can get away with updating ATIME the way we do. 1398 */ 1399 static int 1400 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip) 1401 { 1402 hammer_transaction_t trans = cursor->trans; 1403 int error; 1404 1405 retry: 1406 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) != 1407 HAMMER_INODE_ONDISK) { 1408 return(0); 1409 } 1410 1411 hammer_normalize_cursor(cursor); 1412 cursor->key_beg.localization = ip->obj_localization + 1413 HAMMER_LOCALIZE_INODE; 1414 cursor->key_beg.obj_id = ip->obj_id; 1415 cursor->key_beg.key = 0; 1416 cursor->key_beg.create_tid = 0; 1417 cursor->key_beg.delete_tid = 0; 1418 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE; 1419 cursor->key_beg.obj_type = 0; 1420 cursor->asof = ip->obj_asof; 1421 cursor->flags &= ~HAMMER_CURSOR_INITMASK; 1422 cursor->flags |= HAMMER_CURSOR_ASOF; 1423 cursor->flags |= HAMMER_CURSOR_GET_LEAF; 1424 cursor->flags |= HAMMER_CURSOR_GET_DATA; 1425 cursor->flags |= HAMMER_CURSOR_BACKEND; 1426 1427 error = hammer_btree_lookup(cursor); 1428 if (error == 0) { 1429 hammer_cache_node(&ip->cache[0], cursor->node); 1430 if (ip->sync_flags & HAMMER_INODE_MTIME) { 1431 /* 1432 * Updating MTIME requires an UNDO. Just cover 1433 * both atime and mtime. 1434 */ 1435 hammer_sync_lock_sh(trans); 1436 hammer_modify_buffer(trans, cursor->data_buffer, 1437 HAMMER_ITIMES_BASE(&cursor->data->inode), 1438 HAMMER_ITIMES_BYTES); 1439 cursor->data->inode.atime = ip->sync_ino_data.atime; 1440 cursor->data->inode.mtime = ip->sync_ino_data.mtime; 1441 hammer_modify_buffer_done(cursor->data_buffer); 1442 hammer_sync_unlock(trans); 1443 } else if (ip->sync_flags & HAMMER_INODE_ATIME) { 1444 /* 1445 * Updating atime only can be done in-place with 1446 * no UNDO. 1447 */ 1448 hammer_sync_lock_sh(trans); 1449 hammer_modify_buffer(trans, cursor->data_buffer, 1450 NULL, 0); 1451 cursor->data->inode.atime = ip->sync_ino_data.atime; 1452 hammer_modify_buffer_done(cursor->data_buffer); 1453 hammer_sync_unlock(trans); 1454 } 1455 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME); 1456 } 1457 if (error == EDEADLK) { 1458 hammer_done_cursor(cursor); 1459 error = hammer_init_cursor(trans, cursor, 1460 &ip->cache[0], ip); 1461 if (error == 0) 1462 goto retry; 1463 } 1464 return(error); 1465 } 1466 1467 /* 1468 * Release a reference on an inode, flush as requested. 1469 * 1470 * On the last reference we queue the inode to the flusher for its final 1471 * disposition. 1472 */ 1473 void 1474 hammer_rel_inode(struct hammer_inode *ip, int flush) 1475 { 1476 /* 1477 * Handle disposition when dropping the last ref. 1478 */ 1479 for (;;) { 1480 if (hammer_oneref(&ip->lock)) { 1481 /* 1482 * Determine whether on-disk action is needed for 1483 * the inode's final disposition. 1484 */ 1485 KKASSERT(ip->vp == NULL); 1486 hammer_inode_unloadable_check(ip, 0); 1487 if (ip->flags & HAMMER_INODE_MODMASK) { 1488 hammer_flush_inode(ip, 0); 1489 } else if (hammer_oneref(&ip->lock)) { 1490 hammer_unload_inode(ip); 1491 break; 1492 } 1493 } else { 1494 if (flush) 1495 hammer_flush_inode(ip, 0); 1496 1497 /* 1498 * The inode still has multiple refs, try to drop 1499 * one ref. 1500 */ 1501 KKASSERT(hammer_isactive(&ip->lock) >= 1); 1502 if (hammer_isactive(&ip->lock) > 1) { 1503 hammer_rel(&ip->lock); 1504 break; 1505 } 1506 } 1507 } 1508 } 1509 1510 /* 1511 * Unload and destroy the specified inode. Must be called with one remaining 1512 * reference. The reference is disposed of. 1513 * 1514 * The inode must be completely clean. 1515 */ 1516 static int 1517 hammer_unload_inode(struct hammer_inode *ip) 1518 { 1519 hammer_mount_t hmp = ip->hmp; 1520 1521 KASSERT(hammer_oneref(&ip->lock), 1522 ("hammer_unload_inode: %d refs", hammer_isactive(&ip->lock))); 1523 KKASSERT(ip->vp == NULL); 1524 KKASSERT(ip->flush_state == HAMMER_FST_IDLE); 1525 KKASSERT(ip->cursor_ip_refs == 0); 1526 KKASSERT(hammer_notlocked(&ip->lock)); 1527 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0); 1528 1529 KKASSERT(RB_EMPTY(&ip->rec_tree)); 1530 KKASSERT(TAILQ_EMPTY(&ip->target_list)); 1531 1532 if (ip->flags & HAMMER_INODE_RDIRTY) { 1533 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip); 1534 ip->flags &= ~HAMMER_INODE_RDIRTY; 1535 } 1536 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip); 1537 1538 hammer_free_inode(ip); 1539 return(0); 1540 } 1541 1542 /* 1543 * Called during unmounting if a critical error occured. The in-memory 1544 * inode and all related structures are destroyed. 1545 * 1546 * If a critical error did not occur the unmount code calls the standard 1547 * release and asserts that the inode is gone. 1548 */ 1549 int 1550 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused) 1551 { 1552 hammer_record_t rec; 1553 1554 /* 1555 * Get rid of the inodes in-memory records, regardless of their 1556 * state, and clear the mod-mask. 1557 */ 1558 while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) { 1559 TAILQ_REMOVE(&ip->target_list, rec, target_entry); 1560 rec->target_ip = NULL; 1561 if (rec->flush_state == HAMMER_FST_SETUP) 1562 rec->flush_state = HAMMER_FST_IDLE; 1563 } 1564 while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) { 1565 if (rec->flush_state == HAMMER_FST_FLUSH) 1566 --rec->flush_group->refs; 1567 else 1568 hammer_ref(&rec->lock); 1569 KKASSERT(hammer_oneref(&rec->lock)); 1570 rec->flush_state = HAMMER_FST_IDLE; 1571 rec->flush_group = NULL; 1572 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */ 1573 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */ 1574 ++ip->rec_generation; 1575 hammer_rel_mem_record(rec); 1576 } 1577 ip->flags &= ~HAMMER_INODE_MODMASK; 1578 ip->sync_flags &= ~HAMMER_INODE_MODMASK; 1579 KKASSERT(ip->vp == NULL); 1580 1581 /* 1582 * Remove the inode from any flush group, force it idle. FLUSH 1583 * and SETUP states have an inode ref. 1584 */ 1585 switch(ip->flush_state) { 1586 case HAMMER_FST_FLUSH: 1587 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip); 1588 --ip->flush_group->refs; 1589 ip->flush_group = NULL; 1590 /* fall through */ 1591 case HAMMER_FST_SETUP: 1592 hammer_rel(&ip->lock); 1593 ip->flush_state = HAMMER_FST_IDLE; 1594 /* fall through */ 1595 case HAMMER_FST_IDLE: 1596 break; 1597 } 1598 1599 /* 1600 * There shouldn't be any associated vnode. The unload needs at 1601 * least one ref, if we do have a vp steal its ip ref. 1602 */ 1603 if (ip->vp) { 1604 kprintf("hammer_destroy_inode_callback: Unexpected " 1605 "vnode association ip %p vp %p\n", ip, ip->vp); 1606 ip->vp->v_data = NULL; 1607 ip->vp = NULL; 1608 } else { 1609 hammer_ref(&ip->lock); 1610 } 1611 hammer_unload_inode(ip); 1612 return(0); 1613 } 1614 1615 /* 1616 * Called on mount -u when switching from RW to RO or vise-versa. Adjust 1617 * the read-only flag for cached inodes. 1618 * 1619 * This routine is called from a RB_SCAN(). 1620 */ 1621 int 1622 hammer_reload_inode(hammer_inode_t ip, void *arg __unused) 1623 { 1624 hammer_mount_t hmp = ip->hmp; 1625 1626 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID) 1627 ip->flags |= HAMMER_INODE_RO; 1628 else 1629 ip->flags &= ~HAMMER_INODE_RO; 1630 return(0); 1631 } 1632 1633 /* 1634 * A transaction has modified an inode, requiring updates as specified by 1635 * the passed flags. 1636 * 1637 * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime, 1638 * and not including size changes due to write-append 1639 * (but other size changes are included). 1640 * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to 1641 * write-append. 1642 * HAMMER_INODE_XDIRTY: Dirty in-memory records 1643 * HAMMER_INODE_BUFS: Dirty buffer cache buffers 1644 * HAMMER_INODE_DELETED: Inode record/data must be deleted 1645 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated 1646 */ 1647 void 1648 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags) 1649 { 1650 /* 1651 * ronly of 0 or 2 does not trigger assertion. 1652 * 2 is a special error state 1653 */ 1654 KKASSERT(ip->hmp->ronly != 1 || 1655 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 1656 HAMMER_INODE_SDIRTY | 1657 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED | 1658 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0); 1659 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) { 1660 ip->flags |= HAMMER_INODE_RSV_INODES; 1661 ++ip->hmp->rsv_inodes; 1662 } 1663 1664 /* 1665 * Set the NEWINODE flag in the transaction if the inode 1666 * transitions to a dirty state. This is used to track 1667 * the load on the inode cache. 1668 */ 1669 if (trans && 1670 (ip->flags & HAMMER_INODE_MODMASK) == 0 && 1671 (flags & HAMMER_INODE_MODMASK)) { 1672 trans->flags |= HAMMER_TRANSF_NEWINODE; 1673 } 1674 if (flags & HAMMER_INODE_MODMASK) 1675 hammer_inode_dirty(ip); 1676 ip->flags |= flags; 1677 } 1678 1679 /* 1680 * Attempt to quickly update the atime for a hammer inode. Return 0 on 1681 * success, -1 on failure. 1682 * 1683 * We attempt to update the atime with only the ip lock and not the 1684 * whole filesystem lock in order to improve concurrency. We can only 1685 * do this safely if the ATIME flag is already pending on the inode. 1686 * 1687 * This function is called via a vnops path (ip pointer is stable) without 1688 * fs_token held. 1689 */ 1690 int 1691 hammer_update_atime_quick(hammer_inode_t ip) 1692 { 1693 struct timeval tv; 1694 int res = -1; 1695 1696 if ((ip->flags & HAMMER_INODE_RO) || 1697 (ip->hmp->mp->mnt_flag & MNT_NOATIME)) { 1698 /* 1699 * Silently indicate success on read-only mount/snap 1700 */ 1701 res = 0; 1702 } else if (ip->flags & HAMMER_INODE_ATIME) { 1703 /* 1704 * Double check with inode lock held against backend. This 1705 * is only safe if all we need to do is update 1706 * ino_data.atime. 1707 */ 1708 getmicrotime(&tv); 1709 hammer_lock_ex(&ip->lock); 1710 if (ip->flags & HAMMER_INODE_ATIME) { 1711 ip->ino_data.atime = 1712 (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec; 1713 res = 0; 1714 } 1715 hammer_unlock(&ip->lock); 1716 } 1717 return res; 1718 } 1719 1720 /* 1721 * Request that an inode be flushed. This whole mess cannot block and may 1722 * recurse (if not synchronous). Once requested HAMMER will attempt to 1723 * actively flush the inode until the flush can be done. 1724 * 1725 * The inode may already be flushing, or may be in a setup state. We can 1726 * place the inode in a flushing state if it is currently idle and flag it 1727 * to reflush if it is currently flushing. 1728 * 1729 * Upon return if the inode could not be flushed due to a setup 1730 * dependancy, then it will be automatically flushed when the dependancy 1731 * is satisfied. 1732 */ 1733 void 1734 hammer_flush_inode(hammer_inode_t ip, int flags) 1735 { 1736 hammer_mount_t hmp; 1737 hammer_flush_group_t flg; 1738 int good; 1739 1740 /* 1741 * fill_flush_group is the first flush group we may be able to 1742 * continue filling, it may be open or closed but it will always 1743 * be past the currently flushing (running) flg. 1744 * 1745 * next_flush_group is the next open flush group. 1746 */ 1747 hmp = ip->hmp; 1748 while ((flg = hmp->fill_flush_group) != NULL) { 1749 KKASSERT(flg->running == 0); 1750 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit && 1751 flg->total_count <= hammer_autoflush) { 1752 break; 1753 } 1754 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry); 1755 hammer_flusher_async(ip->hmp, flg); 1756 } 1757 if (flg == NULL) { 1758 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO); 1759 flg->seq = hmp->flusher.next++; 1760 if (hmp->next_flush_group == NULL) 1761 hmp->next_flush_group = flg; 1762 if (hmp->fill_flush_group == NULL) 1763 hmp->fill_flush_group = flg; 1764 RB_INIT(&flg->flush_tree); 1765 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry); 1766 } 1767 1768 /* 1769 * Trivial 'nothing to flush' case. If the inode is in a SETUP 1770 * state we have to put it back into an IDLE state so we can 1771 * drop the extra ref. 1772 * 1773 * If we have a parent dependancy we must still fall through 1774 * so we can run it. 1775 */ 1776 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) { 1777 if (ip->flush_state == HAMMER_FST_SETUP && 1778 TAILQ_EMPTY(&ip->target_list)) { 1779 ip->flush_state = HAMMER_FST_IDLE; 1780 hammer_rel_inode(ip, 0); 1781 } 1782 if (ip->flush_state == HAMMER_FST_IDLE) 1783 return; 1784 } 1785 1786 /* 1787 * Our flush action will depend on the current state. 1788 */ 1789 switch(ip->flush_state) { 1790 case HAMMER_FST_IDLE: 1791 /* 1792 * We have no dependancies and can flush immediately. Some 1793 * our children may not be flushable so we have to re-test 1794 * with that additional knowledge. 1795 */ 1796 hammer_flush_inode_core(ip, flg, flags); 1797 break; 1798 case HAMMER_FST_SETUP: 1799 /* 1800 * Recurse upwards through dependancies via target_list 1801 * and start their flusher actions going if possible. 1802 * 1803 * 'good' is our connectivity. -1 means we have none and 1804 * can't flush, 0 means there weren't any dependancies, and 1805 * 1 means we have good connectivity. 1806 */ 1807 good = hammer_setup_parent_inodes(ip, 0, flg); 1808 1809 if (good >= 0) { 1810 /* 1811 * We can continue if good >= 0. Determine how 1812 * many records under our inode can be flushed (and 1813 * mark them). 1814 */ 1815 hammer_flush_inode_core(ip, flg, flags); 1816 } else { 1817 /* 1818 * Parent has no connectivity, tell it to flush 1819 * us as soon as it does. 1820 * 1821 * The REFLUSH flag is also needed to trigger 1822 * dependancy wakeups. 1823 */ 1824 ip->flags |= HAMMER_INODE_CONN_DOWN | 1825 HAMMER_INODE_REFLUSH; 1826 if (flags & HAMMER_FLUSH_SIGNAL) { 1827 ip->flags |= HAMMER_INODE_RESIGNAL; 1828 hammer_flusher_async(ip->hmp, flg); 1829 } 1830 } 1831 break; 1832 case HAMMER_FST_FLUSH: 1833 /* 1834 * We are already flushing, flag the inode to reflush 1835 * if needed after it completes its current flush. 1836 * 1837 * The REFLUSH flag is also needed to trigger 1838 * dependancy wakeups. 1839 */ 1840 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0) 1841 ip->flags |= HAMMER_INODE_REFLUSH; 1842 if (flags & HAMMER_FLUSH_SIGNAL) { 1843 ip->flags |= HAMMER_INODE_RESIGNAL; 1844 hammer_flusher_async(ip->hmp, flg); 1845 } 1846 break; 1847 } 1848 } 1849 1850 /* 1851 * Scan ip->target_list, which is a list of records owned by PARENTS to our 1852 * ip which reference our ip. 1853 * 1854 * XXX This is a huge mess of recursive code, but not one bit of it blocks 1855 * so for now do not ref/deref the structures. Note that if we use the 1856 * ref/rel code later, the rel CAN block. 1857 */ 1858 static int 1859 hammer_setup_parent_inodes(hammer_inode_t ip, int depth, 1860 hammer_flush_group_t flg) 1861 { 1862 hammer_record_t depend; 1863 int good; 1864 int r; 1865 1866 /* 1867 * If we hit our recursion limit and we have parent dependencies 1868 * We cannot continue. Returning < 0 will cause us to be flagged 1869 * for reflush. Returning -2 cuts off additional dependency checks 1870 * because they are likely to also hit the depth limit. 1871 * 1872 * We cannot return < 0 if there are no dependencies or there might 1873 * not be anything to wakeup (ip). 1874 */ 1875 if (depth == 20 && TAILQ_FIRST(&ip->target_list)) { 1876 if (hammer_debug_general & 0x10000) 1877 krateprintf(&hammer_gen_krate, 1878 "HAMMER Warning: depth limit reached on " 1879 "setup recursion, inode %p %016llx\n", 1880 ip, (long long)ip->obj_id); 1881 return(-2); 1882 } 1883 1884 /* 1885 * Scan dependencies 1886 */ 1887 good = 0; 1888 TAILQ_FOREACH(depend, &ip->target_list, target_entry) { 1889 r = hammer_setup_parent_inodes_helper(depend, depth, flg); 1890 KKASSERT(depend->target_ip == ip); 1891 if (r < 0 && good == 0) 1892 good = -1; 1893 if (r > 0) 1894 good = 1; 1895 1896 /* 1897 * If we failed due to the recursion depth limit then stop 1898 * now. 1899 */ 1900 if (r == -2) 1901 break; 1902 } 1903 return(good); 1904 } 1905 1906 /* 1907 * This helper function takes a record representing the dependancy between 1908 * the parent inode and child inode. 1909 * 1910 * record = record in question (*rec in below) 1911 * record->ip = parent inode (*pip in below) 1912 * record->target_ip = child inode (*ip in below) 1913 * 1914 * *pip--------------\ 1915 * ^ \rec_tree 1916 * \ \ 1917 * \ip /\\\\\ rbtree of recs from parent inode's view 1918 * \ //\\\\\\ 1919 * \ / ........ 1920 * \ / 1921 * \------*rec------target_ip------>*ip 1922 * ...target_entry<----...----->target_list<---... 1923 * list of recs from inode's view 1924 * 1925 * We are asked to recurse upwards and convert the record from SETUP 1926 * to FLUSH if possible. 1927 * 1928 * Return 1 if the record gives us connectivity 1929 * 1930 * Return 0 if the record is not relevant 1931 * 1932 * Return -1 if we can't resolve the dependancy and there is no connectivity. 1933 */ 1934 static int 1935 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth, 1936 hammer_flush_group_t flg) 1937 { 1938 hammer_inode_t pip; 1939 int good; 1940 1941 KKASSERT(record->flush_state != HAMMER_FST_IDLE); 1942 pip = record->ip; 1943 1944 /* 1945 * If the record is already flushing, is it in our flush group? 1946 * 1947 * If it is in our flush group but it is a general record or a 1948 * delete-on-disk, it does not improve our connectivity (return 0), 1949 * and if the target inode is not trying to destroy itself we can't 1950 * allow the operation yet anyway (the second return -1). 1951 */ 1952 if (record->flush_state == HAMMER_FST_FLUSH) { 1953 /* 1954 * If not in our flush group ask the parent to reflush 1955 * us as soon as possible. 1956 */ 1957 if (record->flush_group != flg) { 1958 pip->flags |= HAMMER_INODE_REFLUSH; 1959 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 1960 return(-1); 1961 } 1962 1963 /* 1964 * If in our flush group everything is already set up, 1965 * just return whether the record will improve our 1966 * visibility or not. 1967 */ 1968 if (record->type == HAMMER_MEM_RECORD_ADD) 1969 return(1); 1970 return(0); 1971 } 1972 1973 /* 1974 * It must be a setup record. Try to resolve the setup dependancies 1975 * by recursing upwards so we can place ip on the flush list. 1976 * 1977 * Limit ourselves to 20 levels of recursion to avoid blowing out 1978 * the kernel stack. If we hit the recursion limit we can't flush 1979 * until the parent flushes. The parent will flush independantly 1980 * on its own and ultimately a deep recursion will be resolved. 1981 */ 1982 KKASSERT(record->flush_state == HAMMER_FST_SETUP); 1983 1984 good = hammer_setup_parent_inodes(pip, depth + 1, flg); 1985 1986 /* 1987 * If good < 0 the parent has no connectivity and we cannot safely 1988 * flush the directory entry, which also means we can't flush our 1989 * ip. Flag us for downward recursion once the parent's 1990 * connectivity is resolved. Flag the parent for [re]flush or it 1991 * may not check for downward recursions. 1992 */ 1993 if (good < 0) { 1994 pip->flags |= HAMMER_INODE_REFLUSH; 1995 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 1996 return(good); 1997 } 1998 1999 /* 2000 * We are go, place the parent inode in a flushing state so we can 2001 * place its record in a flushing state. Note that the parent 2002 * may already be flushing. The record must be in the same flush 2003 * group as the parent. 2004 */ 2005 if (pip->flush_state != HAMMER_FST_FLUSH) 2006 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION); 2007 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH); 2008 2009 /* 2010 * It is possible for a rename to create a loop in the recursion 2011 * and revisit a record. This will result in the record being 2012 * placed in a flush state unexpectedly. This check deals with 2013 * the case. 2014 */ 2015 if (record->flush_state == HAMMER_FST_FLUSH) { 2016 if (record->type == HAMMER_MEM_RECORD_ADD) 2017 return(1); 2018 return(0); 2019 } 2020 2021 KKASSERT(record->flush_state == HAMMER_FST_SETUP); 2022 2023 #if 0 2024 if (record->type == HAMMER_MEM_RECORD_DEL && 2025 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) { 2026 /* 2027 * Regardless of flushing state we cannot sync this path if the 2028 * record represents a delete-on-disk but the target inode 2029 * is not ready to sync its own deletion. 2030 * 2031 * XXX need to count effective nlinks to determine whether 2032 * the flush is ok, otherwise removing a hardlink will 2033 * just leave the DEL record to rot. 2034 */ 2035 record->target_ip->flags |= HAMMER_INODE_REFLUSH; 2036 return(-1); 2037 } else 2038 #endif 2039 if (pip->flush_group == flg) { 2040 /* 2041 * Because we have not calculated nlinks yet we can just 2042 * set records to the flush state if the parent is in 2043 * the same flush group as we are. 2044 */ 2045 record->flush_state = HAMMER_FST_FLUSH; 2046 record->flush_group = flg; 2047 ++record->flush_group->refs; 2048 hammer_ref(&record->lock); 2049 2050 /* 2051 * A general directory-add contributes to our visibility. 2052 * 2053 * Otherwise it is probably a directory-delete or 2054 * delete-on-disk record and does not contribute to our 2055 * visbility (but we can still flush it). 2056 */ 2057 if (record->type == HAMMER_MEM_RECORD_ADD) 2058 return(1); 2059 return(0); 2060 } else { 2061 /* 2062 * If the parent is not in our flush group we cannot 2063 * flush this record yet, there is no visibility. 2064 * We tell the parent to reflush and mark ourselves 2065 * so the parent knows it should flush us too. 2066 */ 2067 pip->flags |= HAMMER_INODE_REFLUSH; 2068 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 2069 return(-1); 2070 } 2071 } 2072 2073 /* 2074 * This is the core routine placing an inode into the FST_FLUSH state. 2075 */ 2076 static void 2077 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags) 2078 { 2079 hammer_mount_t hmp = ip->hmp; 2080 int go_count; 2081 2082 /* 2083 * Set flush state and prevent the flusher from cycling into 2084 * the next flush group. Do not place the ip on the list yet. 2085 * Inodes not in the idle state get an extra reference. 2086 */ 2087 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH); 2088 if (ip->flush_state == HAMMER_FST_IDLE) 2089 hammer_ref(&ip->lock); 2090 ip->flush_state = HAMMER_FST_FLUSH; 2091 ip->flush_group = flg; 2092 ++hmp->flusher.group_lock; 2093 ++hmp->count_iqueued; 2094 ++hammer_count_iqueued; 2095 ++flg->total_count; 2096 hammer_redo_fifo_start_flush(ip); 2097 2098 #if 0 2099 /* 2100 * We need to be able to vfsync/truncate from the backend. 2101 * 2102 * XXX Any truncation from the backend will acquire the vnode 2103 * independently. 2104 */ 2105 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0); 2106 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) { 2107 ip->flags |= HAMMER_INODE_VHELD; 2108 vref(ip->vp); 2109 } 2110 #endif 2111 2112 /* 2113 * Figure out how many in-memory records we can actually flush 2114 * (not including inode meta-data, buffers, etc). 2115 */ 2116 KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0); 2117 if (flags & HAMMER_FLUSH_RECURSION) { 2118 /* 2119 * If this is a upwards recursion we do not want to 2120 * recurse down again! 2121 */ 2122 go_count = 1; 2123 #if 0 2124 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) { 2125 /* 2126 * No new records are added if we must complete a flush 2127 * from a previous cycle, but we do have to move the records 2128 * from the previous cycle to the current one. 2129 */ 2130 #if 0 2131 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 2132 hammer_syncgrp_child_callback, NULL); 2133 #endif 2134 go_count = 1; 2135 #endif 2136 } else { 2137 /* 2138 * Normal flush, scan records and bring them into the flush. 2139 * Directory adds and deletes are usually skipped (they are 2140 * grouped with the related inode rather then with the 2141 * directory). 2142 * 2143 * go_count can be negative, which means the scan aborted 2144 * due to the flush group being over-full and we should 2145 * flush what we have. 2146 */ 2147 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 2148 hammer_setup_child_callback, NULL); 2149 } 2150 2151 /* 2152 * This is a more involved test that includes go_count. If we 2153 * can't flush, flag the inode and return. If go_count is 0 we 2154 * were are unable to flush any records in our rec_tree and 2155 * must ignore the XDIRTY flag. 2156 */ 2157 if (go_count == 0) { 2158 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) { 2159 --hmp->count_iqueued; 2160 --hammer_count_iqueued; 2161 2162 --flg->total_count; 2163 ip->flush_state = HAMMER_FST_SETUP; 2164 ip->flush_group = NULL; 2165 if (flags & HAMMER_FLUSH_SIGNAL) { 2166 ip->flags |= HAMMER_INODE_REFLUSH | 2167 HAMMER_INODE_RESIGNAL; 2168 } else { 2169 ip->flags |= HAMMER_INODE_REFLUSH; 2170 } 2171 #if 0 2172 if (ip->flags & HAMMER_INODE_VHELD) { 2173 ip->flags &= ~HAMMER_INODE_VHELD; 2174 vrele(ip->vp); 2175 } 2176 #endif 2177 2178 /* 2179 * REFLUSH is needed to trigger dependancy wakeups 2180 * when an inode is in SETUP. 2181 */ 2182 ip->flags |= HAMMER_INODE_REFLUSH; 2183 if (--hmp->flusher.group_lock == 0) 2184 wakeup(&hmp->flusher.group_lock); 2185 return; 2186 } 2187 } 2188 2189 /* 2190 * Snapshot the state of the inode for the backend flusher. 2191 * 2192 * We continue to retain save_trunc_off even when all truncations 2193 * have been resolved as an optimization to determine if we can 2194 * skip the B-Tree lookup for overwrite deletions. 2195 * 2196 * NOTE: The DELETING flag is a mod flag, but it is also sticky, 2197 * and stays in ip->flags. Once set, it stays set until the 2198 * inode is destroyed. 2199 */ 2200 if (ip->flags & HAMMER_INODE_TRUNCATED) { 2201 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0); 2202 ip->sync_trunc_off = ip->trunc_off; 2203 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL; 2204 ip->flags &= ~HAMMER_INODE_TRUNCATED; 2205 ip->sync_flags |= HAMMER_INODE_TRUNCATED; 2206 2207 /* 2208 * The save_trunc_off used to cache whether the B-Tree 2209 * holds any records past that point is not used until 2210 * after the truncation has succeeded, so we can safely 2211 * set it now. 2212 */ 2213 if (ip->save_trunc_off > ip->sync_trunc_off) 2214 ip->save_trunc_off = ip->sync_trunc_off; 2215 } 2216 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK & 2217 ~HAMMER_INODE_TRUNCATED); 2218 ip->sync_ino_leaf = ip->ino_leaf; 2219 ip->sync_ino_data = ip->ino_data; 2220 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED; 2221 #ifdef DEBUG_TRUNCATE 2222 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp) 2223 kprintf("truncateS %016llx\n", ip->sync_trunc_off); 2224 #endif 2225 2226 /* 2227 * The flusher list inherits our inode and reference. 2228 */ 2229 KKASSERT(flg->running == 0); 2230 RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip); 2231 if (--hmp->flusher.group_lock == 0) 2232 wakeup(&hmp->flusher.group_lock); 2233 2234 /* 2235 * Auto-flush the group if it grows too large. Make sure the 2236 * inode reclaim wait pipeline continues to work. 2237 */ 2238 if (flg->total_count >= hammer_autoflush || 2239 flg->total_count >= hammer_limit_reclaims / 4) { 2240 if (hmp->fill_flush_group == flg) 2241 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry); 2242 hammer_flusher_async(hmp, flg); 2243 } 2244 } 2245 2246 /* 2247 * Callback for scan of ip->rec_tree. Try to include each record in our 2248 * flush. ip->flush_group has been set but the inode has not yet been 2249 * moved into a flushing state. 2250 * 2251 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on 2252 * both inodes. 2253 * 2254 * We return 1 for any record placed or found in FST_FLUSH, which prevents 2255 * the caller from shortcutting the flush. 2256 */ 2257 static int 2258 hammer_setup_child_callback(hammer_record_t rec, void *data) 2259 { 2260 hammer_flush_group_t flg; 2261 hammer_inode_t target_ip; 2262 hammer_inode_t ip; 2263 int r; 2264 2265 /* 2266 * Records deleted or committed by the backend are ignored. 2267 * Note that the flush detects deleted frontend records at 2268 * multiple points to deal with races. This is just the first 2269 * line of defense. The only time HAMMER_RECF_DELETED_FE cannot 2270 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it 2271 * messes up link-count calculations. 2272 * 2273 * NOTE: Don't get confused between record deletion and, say, 2274 * directory entry deletion. The deletion of a directory entry 2275 * which is on-media has nothing to do with the record deletion 2276 * flags. 2277 */ 2278 if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE | 2279 HAMMER_RECF_COMMITTED)) { 2280 if (rec->flush_state == HAMMER_FST_FLUSH) { 2281 KKASSERT(rec->flush_group == rec->ip->flush_group); 2282 r = 1; 2283 } else { 2284 r = 0; 2285 } 2286 return(r); 2287 } 2288 2289 /* 2290 * If the record is in an idle state it has no dependancies and 2291 * can be flushed. 2292 */ 2293 ip = rec->ip; 2294 flg = ip->flush_group; 2295 r = 0; 2296 2297 switch(rec->flush_state) { 2298 case HAMMER_FST_IDLE: 2299 /* 2300 * The record has no setup dependancy, we can flush it. 2301 */ 2302 KKASSERT(rec->target_ip == NULL); 2303 rec->flush_state = HAMMER_FST_FLUSH; 2304 rec->flush_group = flg; 2305 ++flg->refs; 2306 hammer_ref(&rec->lock); 2307 r = 1; 2308 break; 2309 case HAMMER_FST_SETUP: 2310 /* 2311 * The record has a setup dependancy. These are typically 2312 * directory entry adds and deletes. Such entries will be 2313 * flushed when their inodes are flushed so we do not 2314 * usually have to add them to the flush here. However, 2315 * if the target_ip has set HAMMER_INODE_CONN_DOWN then 2316 * it is asking us to flush this record (and it). 2317 */ 2318 target_ip = rec->target_ip; 2319 KKASSERT(target_ip != NULL); 2320 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE); 2321 2322 /* 2323 * If the target IP is already flushing in our group 2324 * we could associate the record, but target_ip has 2325 * already synced ino_data to sync_ino_data and we 2326 * would also have to adjust nlinks. Plus there are 2327 * ordering issues for adds and deletes. 2328 * 2329 * Reflush downward if this is an ADD, and upward if 2330 * this is a DEL. 2331 */ 2332 if (target_ip->flush_state == HAMMER_FST_FLUSH) { 2333 if (rec->type == HAMMER_MEM_RECORD_ADD) 2334 ip->flags |= HAMMER_INODE_REFLUSH; 2335 else 2336 target_ip->flags |= HAMMER_INODE_REFLUSH; 2337 break; 2338 } 2339 2340 /* 2341 * Target IP is not yet flushing. This can get complex 2342 * because we have to be careful about the recursion. 2343 * 2344 * Directories create an issue for us in that if a flush 2345 * of a directory is requested the expectation is to flush 2346 * any pending directory entries, but this will cause the 2347 * related inodes to recursively flush as well. We can't 2348 * really defer the operation so just get as many as we 2349 * can and 2350 */ 2351 #if 0 2352 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 && 2353 (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) { 2354 /* 2355 * We aren't reclaiming and the target ip was not 2356 * previously prevented from flushing due to this 2357 * record dependancy. Do not flush this record. 2358 */ 2359 /*r = 0;*/ 2360 } else 2361 #endif 2362 if (flg->total_count + flg->refs > 2363 ip->hmp->undo_rec_limit) { 2364 /* 2365 * Our flush group is over-full and we risk blowing 2366 * out the UNDO FIFO. Stop the scan, flush what we 2367 * have, then reflush the directory. 2368 * 2369 * The directory may be forced through multiple 2370 * flush groups before it can be completely 2371 * flushed. 2372 */ 2373 ip->flags |= HAMMER_INODE_RESIGNAL | 2374 HAMMER_INODE_REFLUSH; 2375 r = -1; 2376 } else if (rec->type == HAMMER_MEM_RECORD_ADD) { 2377 /* 2378 * If the target IP is not flushing we can force 2379 * it to flush, even if it is unable to write out 2380 * any of its own records we have at least one in 2381 * hand that we CAN deal with. 2382 */ 2383 rec->flush_state = HAMMER_FST_FLUSH; 2384 rec->flush_group = flg; 2385 ++flg->refs; 2386 hammer_ref(&rec->lock); 2387 hammer_flush_inode_core(target_ip, flg, 2388 HAMMER_FLUSH_RECURSION); 2389 r = 1; 2390 } else { 2391 /* 2392 * General or delete-on-disk record. 2393 * 2394 * XXX this needs help. If a delete-on-disk we could 2395 * disconnect the target. If the target has its own 2396 * dependancies they really need to be flushed. 2397 * 2398 * XXX 2399 */ 2400 rec->flush_state = HAMMER_FST_FLUSH; 2401 rec->flush_group = flg; 2402 ++flg->refs; 2403 hammer_ref(&rec->lock); 2404 hammer_flush_inode_core(target_ip, flg, 2405 HAMMER_FLUSH_RECURSION); 2406 r = 1; 2407 } 2408 break; 2409 case HAMMER_FST_FLUSH: 2410 /* 2411 * The record could be part of a previous flush group if the 2412 * inode is a directory (the record being a directory entry). 2413 * Once the flush group was closed a hammer_test_inode() 2414 * function can cause a new flush group to be setup, placing 2415 * the directory inode itself in a new flush group. 2416 * 2417 * When associated with a previous flush group we count it 2418 * as if it were in our current flush group, since it will 2419 * effectively be flushed by the time we flush our current 2420 * flush group. 2421 */ 2422 KKASSERT( 2423 rec->ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY || 2424 rec->flush_group == flg); 2425 r = 1; 2426 break; 2427 } 2428 return(r); 2429 } 2430 2431 #if 0 2432 /* 2433 * This version just moves records already in a flush state to the new 2434 * flush group and that is it. 2435 */ 2436 static int 2437 hammer_syncgrp_child_callback(hammer_record_t rec, void *data) 2438 { 2439 hammer_inode_t ip = rec->ip; 2440 2441 switch(rec->flush_state) { 2442 case HAMMER_FST_FLUSH: 2443 KKASSERT(rec->flush_group == ip->flush_group); 2444 break; 2445 default: 2446 break; 2447 } 2448 return(0); 2449 } 2450 #endif 2451 2452 /* 2453 * Wait for a previously queued flush to complete. 2454 * 2455 * If a critical error occured we don't try to wait. 2456 */ 2457 void 2458 hammer_wait_inode(hammer_inode_t ip) 2459 { 2460 /* 2461 * The inode can be in a SETUP state in which case RESIGNAL 2462 * should be set. If RESIGNAL is not set then the previous 2463 * flush completed and a later operation placed the inode 2464 * in a passive setup state again, so we're done. 2465 * 2466 * The inode can be in a FLUSH state in which case we 2467 * can just wait for completion. 2468 */ 2469 while (ip->flush_state == HAMMER_FST_FLUSH || 2470 (ip->flush_state == HAMMER_FST_SETUP && 2471 (ip->flags & HAMMER_INODE_RESIGNAL))) { 2472 /* 2473 * Don't try to flush on a critical error 2474 */ 2475 if (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 2476 break; 2477 2478 /* 2479 * If the inode was already being flushed its flg 2480 * may not have been queued to the backend. We 2481 * have to make sure it gets queued or we can wind 2482 * up blocked or deadlocked (particularly if we are 2483 * the vnlru thread). 2484 */ 2485 if (ip->flush_state == HAMMER_FST_FLUSH) { 2486 KKASSERT(ip->flush_group); 2487 if (ip->flush_group->closed == 0) { 2488 if (hammer_debug_inode) { 2489 kprintf("hammer: debug: forcing " 2490 "async flush ip %016jx\n", 2491 (intmax_t)ip->obj_id); 2492 } 2493 hammer_flusher_async(ip->hmp, 2494 ip->flush_group); 2495 continue; /* retest */ 2496 } 2497 } 2498 2499 /* 2500 * In a flush state with the flg queued to the backend 2501 * or in a setup state with RESIGNAL set, we can safely 2502 * wait. 2503 */ 2504 ip->flags |= HAMMER_INODE_FLUSHW; 2505 tsleep(&ip->flags, 0, "hmrwin", 0); 2506 } 2507 2508 #if 0 2509 /* 2510 * The inode may have been in a passive setup state, 2511 * call flush to make sure we get signaled. 2512 */ 2513 if (ip->flush_state == HAMMER_FST_SETUP) 2514 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2515 #endif 2516 2517 } 2518 2519 /* 2520 * Called by the backend code when a flush has been completed. 2521 * The inode has already been removed from the flush list. 2522 * 2523 * A pipelined flush can occur, in which case we must re-enter the 2524 * inode on the list and re-copy its fields. 2525 */ 2526 void 2527 hammer_flush_inode_done(hammer_inode_t ip, int error) 2528 { 2529 hammer_mount_t hmp; 2530 int dorel; 2531 2532 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH); 2533 2534 hmp = ip->hmp; 2535 2536 /* 2537 * Auto-reflush if the backend could not completely flush 2538 * the inode. This fixes a case where a deferred buffer flush 2539 * could cause fsync to return early. 2540 */ 2541 if (ip->sync_flags & HAMMER_INODE_MODMASK) 2542 ip->flags |= HAMMER_INODE_REFLUSH; 2543 2544 /* 2545 * Merge left-over flags back into the frontend and fix the state. 2546 * Incomplete truncations are retained by the backend. 2547 */ 2548 ip->error = error; 2549 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED; 2550 ip->sync_flags &= HAMMER_INODE_TRUNCATED; 2551 2552 /* 2553 * The backend may have adjusted nlinks, so if the adjusted nlinks 2554 * does not match the fronttend set the frontend's DDIRTY flag again. 2555 */ 2556 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks) 2557 ip->flags |= HAMMER_INODE_DDIRTY; 2558 2559 /* 2560 * Fix up the dirty buffer status. 2561 */ 2562 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) { 2563 ip->flags |= HAMMER_INODE_BUFS; 2564 } 2565 hammer_redo_fifo_end_flush(ip); 2566 2567 /* 2568 * Re-set the XDIRTY flag if some of the inode's in-memory records 2569 * could not be flushed. 2570 */ 2571 KKASSERT((RB_EMPTY(&ip->rec_tree) && 2572 (ip->flags & HAMMER_INODE_XDIRTY) == 0) || 2573 (!RB_EMPTY(&ip->rec_tree) && 2574 (ip->flags & HAMMER_INODE_XDIRTY) != 0)); 2575 2576 /* 2577 * Do not lose track of inodes which no longer have vnode 2578 * assocations, otherwise they may never get flushed again. 2579 * 2580 * The reflush flag can be set superfluously, causing extra pain 2581 * for no reason. If the inode is no longer modified it no longer 2582 * needs to be flushed. 2583 */ 2584 if (ip->flags & HAMMER_INODE_MODMASK) { 2585 if (ip->vp == NULL) 2586 ip->flags |= HAMMER_INODE_REFLUSH; 2587 } else { 2588 ip->flags &= ~HAMMER_INODE_REFLUSH; 2589 } 2590 2591 /* 2592 * The fs token is held but the inode lock is not held. Because this 2593 * is a backend flush it is possible that the vnode has no references 2594 * and cause a reclaim race inside vsetisdirty() if/when it blocks. 2595 * 2596 * Therefore, we must lock the inode around this particular dirtying 2597 * operation. We don't have to around other dirtying operations 2598 * where the vnode is implicitly or explicitly held. 2599 */ 2600 if (ip->flags & HAMMER_INODE_MODMASK) { 2601 hammer_lock_ex(&ip->lock); 2602 hammer_inode_dirty(ip); 2603 hammer_unlock(&ip->lock); 2604 } 2605 2606 /* 2607 * Adjust the flush state. 2608 */ 2609 if (ip->flags & HAMMER_INODE_WOULDBLOCK) { 2610 /* 2611 * We were unable to flush out all our records, leave the 2612 * inode in a flush state and in the current flush group. 2613 * The flush group will be re-run. 2614 * 2615 * This occurs if the UNDO block gets too full or there is 2616 * too much dirty meta-data and allows the flusher to 2617 * finalize the UNDO block and then re-flush. 2618 */ 2619 ip->flags &= ~HAMMER_INODE_WOULDBLOCK; 2620 dorel = 0; 2621 } else { 2622 /* 2623 * Remove from the flush_group 2624 */ 2625 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip); 2626 ip->flush_group = NULL; 2627 2628 #if 0 2629 /* 2630 * Clean up the vnode ref and tracking counts. 2631 */ 2632 if (ip->flags & HAMMER_INODE_VHELD) { 2633 ip->flags &= ~HAMMER_INODE_VHELD; 2634 vrele(ip->vp); 2635 } 2636 #endif 2637 --hmp->count_iqueued; 2638 --hammer_count_iqueued; 2639 2640 /* 2641 * And adjust the state. 2642 */ 2643 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) { 2644 ip->flush_state = HAMMER_FST_IDLE; 2645 dorel = 1; 2646 } else { 2647 ip->flush_state = HAMMER_FST_SETUP; 2648 dorel = 0; 2649 } 2650 2651 /* 2652 * If the frontend is waiting for a flush to complete, 2653 * wake it up. 2654 */ 2655 if (ip->flags & HAMMER_INODE_FLUSHW) { 2656 ip->flags &= ~HAMMER_INODE_FLUSHW; 2657 wakeup(&ip->flags); 2658 } 2659 2660 /* 2661 * If the frontend made more changes and requested another 2662 * flush, then try to get it running. 2663 * 2664 * Reflushes are aborted when the inode is errored out. 2665 */ 2666 if (ip->flags & HAMMER_INODE_REFLUSH) { 2667 ip->flags &= ~HAMMER_INODE_REFLUSH; 2668 if (ip->flags & HAMMER_INODE_RESIGNAL) { 2669 ip->flags &= ~HAMMER_INODE_RESIGNAL; 2670 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2671 } else { 2672 hammer_flush_inode(ip, 0); 2673 } 2674 } 2675 } 2676 2677 /* 2678 * If we have no parent dependancies we can clear CONN_DOWN 2679 */ 2680 if (TAILQ_EMPTY(&ip->target_list)) 2681 ip->flags &= ~HAMMER_INODE_CONN_DOWN; 2682 2683 /* 2684 * If the inode is now clean drop the space reservation. 2685 */ 2686 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 2687 (ip->flags & HAMMER_INODE_RSV_INODES)) { 2688 ip->flags &= ~HAMMER_INODE_RSV_INODES; 2689 --hmp->rsv_inodes; 2690 } 2691 2692 ip->flags &= ~HAMMER_INODE_SLAVEFLUSH; 2693 2694 if (dorel) 2695 hammer_rel_inode(ip, 0); 2696 } 2697 2698 /* 2699 * Called from hammer_sync_inode() to synchronize in-memory records 2700 * to the media. 2701 */ 2702 static int 2703 hammer_sync_record_callback(hammer_record_t record, void *data) 2704 { 2705 hammer_cursor_t cursor = data; 2706 hammer_transaction_t trans = cursor->trans; 2707 hammer_mount_t hmp = trans->hmp; 2708 int error; 2709 2710 /* 2711 * Skip records that do not belong to the current flush. 2712 */ 2713 ++hammer_stats_record_iterations; 2714 if (record->flush_state != HAMMER_FST_FLUSH) 2715 return(0); 2716 2717 #if 1 2718 if (record->flush_group != record->ip->flush_group) { 2719 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group); 2720 if (hammer_debug_critical) 2721 Debugger("blah2"); 2722 return(0); 2723 } 2724 #endif 2725 KKASSERT(record->flush_group == record->ip->flush_group); 2726 2727 /* 2728 * Interlock the record using the BE flag. Once BE is set the 2729 * frontend cannot change the state of FE. 2730 * 2731 * NOTE: If FE is set prior to us setting BE we still sync the 2732 * record out, but the flush completion code converts it to 2733 * a delete-on-disk record instead of destroying it. 2734 */ 2735 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0); 2736 record->flags |= HAMMER_RECF_INTERLOCK_BE; 2737 2738 /* 2739 * The backend has already disposed of the record. 2740 */ 2741 if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) { 2742 error = 0; 2743 goto done; 2744 } 2745 2746 /* 2747 * If the whole inode is being deleted and all on-disk records will 2748 * be deleted very soon, we can't sync any new records to disk 2749 * because they will be deleted in the same transaction they were 2750 * created in (delete_tid == create_tid), which will assert. 2751 * 2752 * XXX There may be a case with RECORD_ADD with DELETED_FE set 2753 * that we currently panic on. 2754 */ 2755 if (record->ip->sync_flags & HAMMER_INODE_DELETING) { 2756 switch(record->type) { 2757 case HAMMER_MEM_RECORD_DATA: 2758 /* 2759 * We don't have to do anything, if the record was 2760 * committed the space will have been accounted for 2761 * in the blockmap. 2762 */ 2763 /* fall through */ 2764 case HAMMER_MEM_RECORD_GENERAL: 2765 /* 2766 * Set deleted-by-backend flag. Do not set the 2767 * backend committed flag, because we are throwing 2768 * the record away. 2769 */ 2770 record->flags |= HAMMER_RECF_DELETED_BE; 2771 ++record->ip->rec_generation; 2772 error = 0; 2773 goto done; 2774 case HAMMER_MEM_RECORD_ADD: 2775 panic("hammer_sync_record_callback: illegal add " 2776 "during inode deletion record %p", record); 2777 break; /* NOT REACHED */ 2778 case HAMMER_MEM_RECORD_INODE: 2779 panic("hammer_sync_record_callback: attempt to " 2780 "sync inode record %p?", record); 2781 break; /* NOT REACHED */ 2782 case HAMMER_MEM_RECORD_DEL: 2783 /* 2784 * Follow through and issue the on-disk deletion 2785 */ 2786 break; 2787 } 2788 } 2789 2790 /* 2791 * If DELETED_FE is set special handling is needed for directory 2792 * entries. Dependant pieces related to the directory entry may 2793 * have already been synced to disk. If this occurs we have to 2794 * sync the directory entry and then change the in-memory record 2795 * from an ADD to a DELETE to cover the fact that it's been 2796 * deleted by the frontend. 2797 * 2798 * A directory delete covering record (MEM_RECORD_DEL) can never 2799 * be deleted by the frontend. 2800 * 2801 * Any other record type (aka DATA) can be deleted by the frontend. 2802 * XXX At the moment the flusher must skip it because there may 2803 * be another data record in the flush group for the same block, 2804 * meaning that some frontend data changes can leak into the backend's 2805 * synchronization point. 2806 */ 2807 if (record->flags & HAMMER_RECF_DELETED_FE) { 2808 if (record->type == HAMMER_MEM_RECORD_ADD) { 2809 /* 2810 * Convert a front-end deleted directory-add to 2811 * a directory-delete entry later. 2812 */ 2813 record->flags |= HAMMER_RECF_CONVERT_DELETE; 2814 } else { 2815 /* 2816 * Dispose of the record (race case). Mark as 2817 * deleted by backend (and not committed). 2818 */ 2819 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL); 2820 record->flags |= HAMMER_RECF_DELETED_BE; 2821 ++record->ip->rec_generation; 2822 error = 0; 2823 goto done; 2824 } 2825 } 2826 2827 /* 2828 * Assign the create_tid for new records. Deletions already 2829 * have the record's entire key properly set up. 2830 */ 2831 if (record->type != HAMMER_MEM_RECORD_DEL) { 2832 record->leaf.base.create_tid = trans->tid; 2833 record->leaf.create_ts = trans->time32; 2834 } 2835 2836 /* 2837 * This actually moves the record to the on-media B-Tree. We 2838 * must also generate REDO_TERM entries in the UNDO/REDO FIFO 2839 * indicating that the related REDO_WRITE(s) have been committed. 2840 * 2841 * During recovery any REDO_TERM's within the nominal recovery span 2842 * are ignored since the related meta-data is being undone, causing 2843 * any matching REDO_WRITEs to execute. The REDO_TERMs outside 2844 * the nominal recovery span will match against REDO_WRITEs and 2845 * prevent them from being executed (because the meta-data has 2846 * already been synchronized). 2847 */ 2848 if (record->flags & HAMMER_RECF_REDO) { 2849 KKASSERT(record->type == HAMMER_MEM_RECORD_DATA); 2850 hammer_generate_redo(trans, record->ip, 2851 record->leaf.base.key - 2852 record->leaf.data_len, 2853 HAMMER_REDO_TERM_WRITE, 2854 NULL, 2855 record->leaf.data_len); 2856 } 2857 2858 for (;;) { 2859 error = hammer_ip_sync_record_cursor(cursor, record); 2860 if (error != EDEADLK) 2861 break; 2862 hammer_done_cursor(cursor); 2863 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0], 2864 record->ip); 2865 if (error) 2866 break; 2867 } 2868 record->flags &= ~HAMMER_RECF_CONVERT_DELETE; 2869 2870 if (error) 2871 error = -error; 2872 done: 2873 hammer_flush_record_done(record, error); 2874 2875 /* 2876 * Do partial finalization if we have built up too many dirty 2877 * buffers. Otherwise a buffer cache deadlock can occur when 2878 * doing things like creating tens of thousands of tiny files. 2879 * 2880 * We must release our cursor lock to avoid a 3-way deadlock 2881 * due to the exclusive sync lock the finalizer must get. 2882 * 2883 * WARNING: See warnings in hammer_unlock_cursor() function. 2884 */ 2885 if (hammer_flusher_meta_limit(hmp) || 2886 vm_page_count_severe()) { 2887 hammer_unlock_cursor(cursor); 2888 hammer_flusher_finalize(trans, 0); 2889 hammer_lock_cursor(cursor); 2890 } 2891 return(error); 2892 } 2893 2894 /* 2895 * Backend function called by the flusher to sync an inode to media. 2896 */ 2897 int 2898 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip) 2899 { 2900 struct hammer_cursor cursor; 2901 hammer_node_t tmp_node; 2902 hammer_record_t depend; 2903 hammer_record_t next; 2904 int error, tmp_error; 2905 u_int64_t nlinks; 2906 2907 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0) 2908 return(0); 2909 2910 error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip); 2911 if (error) 2912 goto done; 2913 2914 /* 2915 * Any directory records referencing this inode which are not in 2916 * our current flush group must adjust our nlink count for the 2917 * purposes of synchronizating to disk. 2918 * 2919 * Records which are in our flush group can be unlinked from our 2920 * inode now, potentially allowing the inode to be physically 2921 * deleted. 2922 * 2923 * This cannot block. 2924 */ 2925 nlinks = ip->ino_data.nlinks; 2926 next = TAILQ_FIRST(&ip->target_list); 2927 while ((depend = next) != NULL) { 2928 next = TAILQ_NEXT(depend, target_entry); 2929 if (depend->flush_state == HAMMER_FST_FLUSH && 2930 depend->flush_group == ip->flush_group) { 2931 /* 2932 * If this is an ADD that was deleted by the frontend 2933 * the frontend nlinks count will have already been 2934 * decremented, but the backend is going to sync its 2935 * directory entry and must account for it. The 2936 * record will be converted to a delete-on-disk when 2937 * it gets synced. 2938 * 2939 * If the ADD was not deleted by the frontend we 2940 * can remove the dependancy from our target_list. 2941 */ 2942 if (depend->flags & HAMMER_RECF_DELETED_FE) { 2943 ++nlinks; 2944 } else { 2945 TAILQ_REMOVE(&ip->target_list, depend, 2946 target_entry); 2947 depend->target_ip = NULL; 2948 } 2949 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) { 2950 /* 2951 * Not part of our flush group and not deleted by 2952 * the front-end, adjust the link count synced to 2953 * the media (undo what the frontend did when it 2954 * queued the record). 2955 */ 2956 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0); 2957 switch(depend->type) { 2958 case HAMMER_MEM_RECORD_ADD: 2959 --nlinks; 2960 break; 2961 case HAMMER_MEM_RECORD_DEL: 2962 ++nlinks; 2963 break; 2964 default: 2965 break; 2966 } 2967 } 2968 } 2969 2970 /* 2971 * Set dirty if we had to modify the link count. 2972 */ 2973 if (ip->sync_ino_data.nlinks != nlinks) { 2974 KKASSERT((int64_t)nlinks >= 0); 2975 ip->sync_ino_data.nlinks = nlinks; 2976 ip->sync_flags |= HAMMER_INODE_DDIRTY; 2977 } 2978 2979 /* 2980 * If there is a trunction queued destroy any data past the (aligned) 2981 * truncation point. Userland will have dealt with the buffer 2982 * containing the truncation point for us. 2983 * 2984 * We don't flush pending frontend data buffers until after we've 2985 * dealt with the truncation. 2986 */ 2987 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) { 2988 /* 2989 * Interlock trunc_off. The VOP front-end may continue to 2990 * make adjustments to it while we are blocked. 2991 */ 2992 off_t trunc_off; 2993 off_t aligned_trunc_off; 2994 int blkmask; 2995 2996 trunc_off = ip->sync_trunc_off; 2997 blkmask = hammer_blocksize(trunc_off) - 1; 2998 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask; 2999 3000 /* 3001 * Delete any whole blocks on-media. The front-end has 3002 * already cleaned out any partial block and made it 3003 * pending. The front-end may have updated trunc_off 3004 * while we were blocked so we only use sync_trunc_off. 3005 * 3006 * This operation can blow out the buffer cache, EWOULDBLOCK 3007 * means we were unable to complete the deletion. The 3008 * deletion will update sync_trunc_off in that case. 3009 */ 3010 error = hammer_ip_delete_range(&cursor, ip, 3011 aligned_trunc_off, 3012 0x7FFFFFFFFFFFFFFFLL, 2); 3013 if (error == EWOULDBLOCK) { 3014 ip->flags |= HAMMER_INODE_WOULDBLOCK; 3015 error = 0; 3016 goto defer_buffer_flush; 3017 } 3018 3019 if (error) 3020 goto done; 3021 3022 /* 3023 * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO. 3024 * 3025 * XXX we do this even if we did not previously generate 3026 * a REDO_TRUNC record. This operation may enclosed the 3027 * range for multiple prior truncation entries in the REDO 3028 * log. 3029 */ 3030 if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR && 3031 (ip->flags & HAMMER_INODE_RDIRTY)) { 3032 hammer_generate_redo(trans, ip, aligned_trunc_off, 3033 HAMMER_REDO_TERM_TRUNC, 3034 NULL, 0); 3035 } 3036 3037 /* 3038 * Clear the truncation flag on the backend after we have 3039 * completed the deletions. Backend data is now good again 3040 * (including new records we are about to sync, below). 3041 * 3042 * Leave sync_trunc_off intact. As we write additional 3043 * records the backend will update sync_trunc_off. This 3044 * tells the backend whether it can skip the overwrite 3045 * test. This should work properly even when the backend 3046 * writes full blocks where the truncation point straddles 3047 * the block because the comparison is against the base 3048 * offset of the record. 3049 */ 3050 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED; 3051 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */ 3052 } else { 3053 error = 0; 3054 } 3055 3056 /* 3057 * Now sync related records. These will typically be directory 3058 * entries, records tracking direct-writes, or delete-on-disk records. 3059 */ 3060 if (error == 0) { 3061 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 3062 hammer_sync_record_callback, &cursor); 3063 if (tmp_error < 0) 3064 tmp_error = -error; 3065 if (tmp_error) 3066 error = tmp_error; 3067 } 3068 hammer_cache_node(&ip->cache[1], cursor.node); 3069 3070 /* 3071 * Re-seek for inode update, assuming our cache hasn't been ripped 3072 * out from under us. 3073 */ 3074 if (error == 0) { 3075 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error); 3076 if (tmp_node) { 3077 hammer_cursor_downgrade(&cursor); 3078 hammer_lock_sh(&tmp_node->lock); 3079 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0) 3080 hammer_cursor_seek(&cursor, tmp_node, 0); 3081 hammer_unlock(&tmp_node->lock); 3082 hammer_rel_node(tmp_node); 3083 } 3084 error = 0; 3085 } 3086 3087 /* 3088 * If we are deleting the inode the frontend had better not have 3089 * any active references on elements making up the inode. 3090 * 3091 * The call to hammer_ip_delete_clean() cleans up auxillary records 3092 * but not DB or DATA records. Those must have already been deleted 3093 * by the normal truncation mechanic. 3094 */ 3095 if (error == 0 && ip->sync_ino_data.nlinks == 0 && 3096 RB_EMPTY(&ip->rec_tree) && 3097 (ip->sync_flags & HAMMER_INODE_DELETING) && 3098 (ip->flags & HAMMER_INODE_DELETED) == 0) { 3099 int count1 = 0; 3100 3101 error = hammer_ip_delete_clean(&cursor, ip, &count1); 3102 if (error == 0) { 3103 ip->flags |= HAMMER_INODE_DELETED; 3104 ip->sync_flags &= ~HAMMER_INODE_DELETING; 3105 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED; 3106 KKASSERT(RB_EMPTY(&ip->rec_tree)); 3107 3108 /* 3109 * Set delete_tid in both the frontend and backend 3110 * copy of the inode record. The DELETED flag handles 3111 * this, do not set DDIRTY. 3112 */ 3113 ip->ino_leaf.base.delete_tid = trans->tid; 3114 ip->sync_ino_leaf.base.delete_tid = trans->tid; 3115 ip->ino_leaf.delete_ts = trans->time32; 3116 ip->sync_ino_leaf.delete_ts = trans->time32; 3117 3118 3119 /* 3120 * Adjust the inode count in the volume header 3121 */ 3122 hammer_sync_lock_sh(trans); 3123 if (ip->flags & HAMMER_INODE_ONDISK) { 3124 hammer_modify_volume_field(trans, 3125 trans->rootvol, 3126 vol0_stat_inodes); 3127 --ip->hmp->rootvol->ondisk->vol0_stat_inodes; 3128 hammer_modify_volume_done(trans->rootvol); 3129 } 3130 hammer_sync_unlock(trans); 3131 } 3132 } 3133 3134 if (error) 3135 goto done; 3136 ip->sync_flags &= ~HAMMER_INODE_BUFS; 3137 3138 defer_buffer_flush: 3139 /* 3140 * Now update the inode's on-disk inode-data and/or on-disk record. 3141 * DELETED and ONDISK are managed only in ip->flags. 3142 * 3143 * In the case of a defered buffer flush we still update the on-disk 3144 * inode to satisfy visibility requirements if there happen to be 3145 * directory dependancies. 3146 */ 3147 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) { 3148 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK: 3149 /* 3150 * If deleted and on-disk, don't set any additional flags. 3151 * the delete flag takes care of things. 3152 * 3153 * Clear flags which may have been set by the frontend. 3154 */ 3155 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 3156 HAMMER_INODE_SDIRTY | 3157 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME | 3158 HAMMER_INODE_DELETING); 3159 break; 3160 case HAMMER_INODE_DELETED: 3161 /* 3162 * Take care of the case where a deleted inode was never 3163 * flushed to the disk in the first place. 3164 * 3165 * Clear flags which may have been set by the frontend. 3166 */ 3167 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 3168 HAMMER_INODE_SDIRTY | 3169 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME | 3170 HAMMER_INODE_DELETING); 3171 while (RB_ROOT(&ip->rec_tree)) { 3172 hammer_record_t record = RB_ROOT(&ip->rec_tree); 3173 hammer_ref(&record->lock); 3174 KKASSERT(hammer_oneref(&record->lock)); 3175 record->flags |= HAMMER_RECF_DELETED_BE; 3176 ++record->ip->rec_generation; 3177 hammer_rel_mem_record(record); 3178 } 3179 break; 3180 case HAMMER_INODE_ONDISK: 3181 /* 3182 * If already on-disk, do not set any additional flags. 3183 */ 3184 break; 3185 default: 3186 /* 3187 * If not on-disk and not deleted, set DDIRTY to force 3188 * an initial record to be written. 3189 * 3190 * Also set the create_tid in both the frontend and backend 3191 * copy of the inode record. 3192 */ 3193 ip->ino_leaf.base.create_tid = trans->tid; 3194 ip->ino_leaf.create_ts = trans->time32; 3195 ip->sync_ino_leaf.base.create_tid = trans->tid; 3196 ip->sync_ino_leaf.create_ts = trans->time32; 3197 ip->sync_flags |= HAMMER_INODE_DDIRTY; 3198 break; 3199 } 3200 3201 /* 3202 * If DDIRTY or SDIRTY is set, write out a new record. 3203 * If the inode is already on-disk the old record is marked as 3204 * deleted. 3205 * 3206 * If DELETED is set hammer_update_inode() will delete the existing 3207 * record without writing out a new one. 3208 * 3209 * If *ONLY* the ITIMES flag is set we can update the record in-place. 3210 */ 3211 if (ip->flags & HAMMER_INODE_DELETED) { 3212 error = hammer_update_inode(&cursor, ip); 3213 } else 3214 if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) && 3215 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) { 3216 error = hammer_update_itimes(&cursor, ip); 3217 } else 3218 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY | 3219 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) { 3220 error = hammer_update_inode(&cursor, ip); 3221 } 3222 done: 3223 if (ip->flags & HAMMER_INODE_MODMASK) 3224 hammer_inode_dirty(ip); 3225 if (error) { 3226 hammer_critical_error(ip->hmp, ip, error, 3227 "while syncing inode"); 3228 } 3229 hammer_done_cursor(&cursor); 3230 return(error); 3231 } 3232 3233 /* 3234 * This routine is called when the OS is no longer actively referencing 3235 * the inode (but might still be keeping it cached), or when releasing 3236 * the last reference to an inode. 3237 * 3238 * At this point if the inode's nlinks count is zero we want to destroy 3239 * it, which may mean destroying it on-media too. 3240 */ 3241 void 3242 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp) 3243 { 3244 struct vnode *vp; 3245 3246 /* 3247 * Set the DELETING flag when the link count drops to 0 and the 3248 * OS no longer has any opens on the inode. 3249 * 3250 * The backend will clear DELETING (a mod flag) and set DELETED 3251 * (a state flag) when it is actually able to perform the 3252 * operation. 3253 * 3254 * Don't reflag the deletion if the flusher is currently syncing 3255 * one that was already flagged. A previously set DELETING flag 3256 * may bounce around flags and sync_flags until the operation is 3257 * completely done. 3258 * 3259 * Do not attempt to modify a snapshot inode (one set to read-only). 3260 */ 3261 if (ip->ino_data.nlinks == 0 && 3262 ((ip->flags | ip->sync_flags) & (HAMMER_INODE_RO|HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) { 3263 ip->flags |= HAMMER_INODE_DELETING; 3264 ip->flags |= HAMMER_INODE_TRUNCATED; 3265 ip->trunc_off = 0; 3266 vp = NULL; 3267 if (getvp) { 3268 if (hammer_get_vnode(ip, &vp) != 0) 3269 return; 3270 } 3271 3272 /* 3273 * Final cleanup 3274 */ 3275 if (ip->vp) 3276 nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0, 0); 3277 if (ip->flags & HAMMER_INODE_MODMASK) 3278 hammer_inode_dirty(ip); 3279 if (getvp) 3280 vput(vp); 3281 } 3282 } 3283 3284 /* 3285 * After potentially resolving a dependancy the inode is tested 3286 * to determine whether it needs to be reflushed. 3287 */ 3288 void 3289 hammer_test_inode(hammer_inode_t ip) 3290 { 3291 if (ip->flags & HAMMER_INODE_REFLUSH) { 3292 ip->flags &= ~HAMMER_INODE_REFLUSH; 3293 hammer_ref(&ip->lock); 3294 if (ip->flags & HAMMER_INODE_RESIGNAL) { 3295 ip->flags &= ~HAMMER_INODE_RESIGNAL; 3296 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 3297 } else { 3298 hammer_flush_inode(ip, 0); 3299 } 3300 hammer_rel_inode(ip, 0); 3301 } 3302 } 3303 3304 /* 3305 * Clear the RECLAIM flag on an inode. This occurs when the inode is 3306 * reassociated with a vp or just before it gets freed. 3307 * 3308 * Pipeline wakeups to threads blocked due to an excessive number of 3309 * detached inodes. This typically occurs when atime updates accumulate 3310 * while scanning a directory tree. 3311 */ 3312 static void 3313 hammer_inode_wakereclaims(hammer_inode_t ip) 3314 { 3315 struct hammer_reclaim *reclaim; 3316 hammer_mount_t hmp = ip->hmp; 3317 3318 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) 3319 return; 3320 3321 --hammer_count_reclaims; 3322 --hmp->count_reclaims; 3323 ip->flags &= ~HAMMER_INODE_RECLAIM; 3324 3325 if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) { 3326 KKASSERT(reclaim->count > 0); 3327 if (--reclaim->count == 0) { 3328 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry); 3329 wakeup(reclaim); 3330 } 3331 } 3332 } 3333 3334 /* 3335 * Setup our reclaim pipeline. We only let so many detached (and dirty) 3336 * inodes build up before we start blocking. This routine is called 3337 * if a new inode is created or an inode is loaded from media. 3338 * 3339 * When we block we don't care *which* inode has finished reclaiming, 3340 * as long as one does. 3341 * 3342 * The reclaim pipeline is primarily governed by the auto-flush which is 3343 * 1/4 hammer_limit_reclaims. We don't want to block if the count is 3344 * less than 1/2 hammer_limit_reclaims. From 1/2 to full count is 3345 * dynamically governed. 3346 */ 3347 void 3348 hammer_inode_waitreclaims(hammer_transaction_t trans) 3349 { 3350 hammer_mount_t hmp = trans->hmp; 3351 struct hammer_reclaim reclaim; 3352 int lower_limit; 3353 3354 /* 3355 * Track inode load, delay if the number of reclaiming inodes is 3356 * between 2/4 and 4/4 hammer_limit_reclaims, depending. 3357 */ 3358 if (curthread->td_proc) { 3359 struct hammer_inostats *stats; 3360 3361 stats = hammer_inode_inostats(hmp, curthread->td_proc->p_pid); 3362 ++stats->count; 3363 3364 if (stats->count > hammer_limit_reclaims / 2) 3365 stats->count = hammer_limit_reclaims / 2; 3366 lower_limit = hammer_limit_reclaims - stats->count; 3367 if (hammer_debug_general & 0x10000) { 3368 kprintf("pid %5d limit %d\n", 3369 (int)curthread->td_proc->p_pid, lower_limit); 3370 } 3371 } else { 3372 lower_limit = hammer_limit_reclaims * 3 / 4; 3373 } 3374 if (hmp->count_reclaims >= lower_limit) { 3375 reclaim.count = 1; 3376 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry); 3377 tsleep(&reclaim, 0, "hmrrcm", hz); 3378 if (reclaim.count > 0) 3379 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry); 3380 } 3381 } 3382 3383 /* 3384 * Keep track of reclaim statistics on a per-pid basis using a loose 3385 * 4-way set associative hash table. Collisions inherit the count of 3386 * the previous entry. 3387 * 3388 * NOTE: We want to be careful here to limit the chain size. If the chain 3389 * size is too large a pid will spread its stats out over too many 3390 * entries under certain types of heavy filesystem activity and 3391 * wind up not delaying long enough. 3392 */ 3393 static 3394 struct hammer_inostats * 3395 hammer_inode_inostats(hammer_mount_t hmp, pid_t pid) 3396 { 3397 struct hammer_inostats *stats; 3398 int delta; 3399 int chain; 3400 static volatile int iterator; /* we don't care about MP races */ 3401 3402 /* 3403 * Chain up to 4 times to find our entry. 3404 */ 3405 for (chain = 0; chain < 4; ++chain) { 3406 stats = &hmp->inostats[(pid + chain) & HAMMER_INOSTATS_HMASK]; 3407 if (stats->pid == pid) 3408 break; 3409 } 3410 3411 /* 3412 * Replace one of the four chaining entries with our new entry. 3413 */ 3414 if (chain == 4) { 3415 stats = &hmp->inostats[(pid + (iterator++ & 3)) & 3416 HAMMER_INOSTATS_HMASK]; 3417 stats->pid = pid; 3418 } 3419 3420 /* 3421 * Decay the entry 3422 */ 3423 if (stats->count && stats->ltick != ticks) { 3424 delta = ticks - stats->ltick; 3425 stats->ltick = ticks; 3426 if (delta <= 0 || delta > hz * 60) 3427 stats->count = 0; 3428 else 3429 stats->count = stats->count * hz / (hz + delta); 3430 } 3431 if (hammer_debug_general & 0x10000) 3432 kprintf("pid %5d stats %d\n", (int)pid, stats->count); 3433 return (stats); 3434 } 3435 3436 #if 0 3437 3438 /* 3439 * XXX not used, doesn't work very well due to the large batching nature 3440 * of flushes. 3441 * 3442 * A larger then normal backlog of inodes is sitting in the flusher, 3443 * enforce a general slowdown to let it catch up. This routine is only 3444 * called on completion of a non-flusher-related transaction which 3445 * performed B-Tree node I/O. 3446 * 3447 * It is possible for the flusher to stall in a continuous load. 3448 * blogbench -i1000 -o seems to do a good job generating this sort of load. 3449 * If the flusher is unable to catch up the inode count can bloat until 3450 * we run out of kvm. 3451 * 3452 * This is a bit of a hack. 3453 */ 3454 void 3455 hammer_inode_waithard(hammer_mount_t hmp) 3456 { 3457 /* 3458 * Hysteresis. 3459 */ 3460 if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) { 3461 if (hmp->count_reclaims < hammer_limit_reclaims / 2 && 3462 hmp->count_iqueued < hmp->count_inodes / 20) { 3463 hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY; 3464 return; 3465 } 3466 } else { 3467 if (hmp->count_reclaims < hammer_limit_reclaims || 3468 hmp->count_iqueued < hmp->count_inodes / 10) { 3469 return; 3470 } 3471 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY; 3472 } 3473 3474 /* 3475 * Block for one flush cycle. 3476 */ 3477 hammer_flusher_wait_next(hmp); 3478 } 3479 3480 #endif 3481