1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $ 35 */ 36 37 #include "hammer.h" 38 #include <vm/vm_extern.h> 39 40 static int hammer_unload_inode(struct hammer_inode *ip); 41 static void hammer_free_inode(hammer_inode_t ip); 42 static void hammer_flush_inode_core(hammer_inode_t ip, 43 hammer_flush_group_t flg, int flags); 44 static int hammer_setup_child_callback(hammer_record_t rec, void *data); 45 #if 0 46 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data); 47 #endif 48 static int hammer_setup_parent_inodes(hammer_inode_t ip, int depth, 49 hammer_flush_group_t flg); 50 static int hammer_setup_parent_inodes_helper(hammer_record_t record, 51 int depth, hammer_flush_group_t flg); 52 static void hammer_inode_wakereclaims(hammer_inode_t ip); 53 static struct hammer_inostats *hammer_inode_inostats(hammer_mount_t hmp, 54 pid_t pid); 55 56 #ifdef DEBUG_TRUNCATE 57 extern struct hammer_inode *HammerTruncIp; 58 #endif 59 60 /* 61 * RB-Tree support for inode structures 62 */ 63 int 64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2) 65 { 66 if (ip1->obj_localization < ip2->obj_localization) 67 return(-1); 68 if (ip1->obj_localization > ip2->obj_localization) 69 return(1); 70 if (ip1->obj_id < ip2->obj_id) 71 return(-1); 72 if (ip1->obj_id > ip2->obj_id) 73 return(1); 74 if (ip1->obj_asof < ip2->obj_asof) 75 return(-1); 76 if (ip1->obj_asof > ip2->obj_asof) 77 return(1); 78 return(0); 79 } 80 81 int 82 hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2) 83 { 84 if (ip1->redo_fifo_start < ip2->redo_fifo_start) 85 return(-1); 86 if (ip1->redo_fifo_start > ip2->redo_fifo_start) 87 return(1); 88 return(0); 89 } 90 91 /* 92 * RB-Tree support for inode structures / special LOOKUP_INFO 93 */ 94 static int 95 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip) 96 { 97 if (info->obj_localization < ip->obj_localization) 98 return(-1); 99 if (info->obj_localization > ip->obj_localization) 100 return(1); 101 if (info->obj_id < ip->obj_id) 102 return(-1); 103 if (info->obj_id > ip->obj_id) 104 return(1); 105 if (info->obj_asof < ip->obj_asof) 106 return(-1); 107 if (info->obj_asof > ip->obj_asof) 108 return(1); 109 return(0); 110 } 111 112 /* 113 * Used by hammer_scan_inode_snapshots() to locate all of an object's 114 * snapshots. Note that the asof field is not tested, which we can get 115 * away with because it is the lowest-priority field. 116 */ 117 static int 118 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data) 119 { 120 hammer_inode_info_t info = data; 121 122 if (ip->obj_localization > info->obj_localization) 123 return(1); 124 if (ip->obj_localization < info->obj_localization) 125 return(-1); 126 if (ip->obj_id > info->obj_id) 127 return(1); 128 if (ip->obj_id < info->obj_id) 129 return(-1); 130 return(0); 131 } 132 133 /* 134 * Used by hammer_unload_pseudofs() to locate all inodes associated with 135 * a particular PFS. 136 */ 137 static int 138 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data) 139 { 140 u_int32_t localization = *(u_int32_t *)data; 141 if (ip->obj_localization > localization) 142 return(1); 143 if (ip->obj_localization < localization) 144 return(-1); 145 return(0); 146 } 147 148 /* 149 * RB-Tree support for pseudofs structures 150 */ 151 static int 152 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2) 153 { 154 if (p1->localization < p2->localization) 155 return(-1); 156 if (p1->localization > p2->localization) 157 return(1); 158 return(0); 159 } 160 161 162 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare); 163 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node, 164 hammer_inode_info_cmp, hammer_inode_info_t); 165 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node, 166 hammer_pfs_rb_compare, u_int32_t, localization); 167 168 /* 169 * The kernel is not actively referencing this vnode but is still holding 170 * it cached. 171 * 172 * This is called from the frontend. 173 * 174 * MPALMOSTSAFE 175 */ 176 int 177 hammer_vop_inactive(struct vop_inactive_args *ap) 178 { 179 struct hammer_inode *ip = VTOI(ap->a_vp); 180 hammer_mount_t hmp; 181 182 /* 183 * Degenerate case 184 */ 185 if (ip == NULL) { 186 vrecycle(ap->a_vp); 187 return(0); 188 } 189 190 /* 191 * If the inode no longer has visibility in the filesystem try to 192 * recycle it immediately, even if the inode is dirty. Recycling 193 * it quickly allows the system to reclaim buffer cache and VM 194 * resources which can matter a lot in a heavily loaded system. 195 * 196 * This can deadlock in vfsync() if we aren't careful. 197 * 198 * Do not queue the inode to the flusher if we still have visibility, 199 * otherwise namespace calls such as chmod will unnecessarily generate 200 * multiple inode updates. 201 */ 202 if (ip->ino_data.nlinks == 0) { 203 hmp = ip->hmp; 204 lwkt_gettoken(&hmp->fs_token); 205 hammer_inode_unloadable_check(ip, 0); 206 if (ip->flags & HAMMER_INODE_MODMASK) 207 hammer_flush_inode(ip, 0); 208 lwkt_reltoken(&hmp->fs_token); 209 vrecycle(ap->a_vp); 210 } 211 return(0); 212 } 213 214 /* 215 * Release the vnode association. This is typically (but not always) 216 * the last reference on the inode. 217 * 218 * Once the association is lost we are on our own with regards to 219 * flushing the inode. 220 * 221 * We must interlock ip->vp so hammer_get_vnode() can avoid races. 222 */ 223 int 224 hammer_vop_reclaim(struct vop_reclaim_args *ap) 225 { 226 struct hammer_inode *ip; 227 hammer_mount_t hmp; 228 struct vnode *vp; 229 230 vp = ap->a_vp; 231 232 if ((ip = vp->v_data) != NULL) { 233 hmp = ip->hmp; 234 lwkt_gettoken(&hmp->fs_token); 235 hammer_lock_ex(&ip->lock); 236 vp->v_data = NULL; 237 ip->vp = NULL; 238 239 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) { 240 ++hammer_count_reclaims; 241 ++hmp->count_reclaims; 242 ip->flags |= HAMMER_INODE_RECLAIM; 243 } 244 hammer_unlock(&ip->lock); 245 hammer_rel_inode(ip, 1); 246 lwkt_reltoken(&hmp->fs_token); 247 } 248 return(0); 249 } 250 251 /* 252 * Return a locked vnode for the specified inode. The inode must be 253 * referenced but NOT LOCKED on entry and will remain referenced on 254 * return. 255 * 256 * Called from the frontend. 257 */ 258 int 259 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp) 260 { 261 hammer_mount_t hmp; 262 struct vnode *vp; 263 int error = 0; 264 u_int8_t obj_type; 265 266 hmp = ip->hmp; 267 268 for (;;) { 269 if ((vp = ip->vp) == NULL) { 270 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0); 271 if (error) 272 break; 273 hammer_lock_ex(&ip->lock); 274 if (ip->vp != NULL) { 275 hammer_unlock(&ip->lock); 276 vp = *vpp; 277 vp->v_type = VBAD; 278 vx_put(vp); 279 continue; 280 } 281 hammer_ref(&ip->lock); 282 vp = *vpp; 283 ip->vp = vp; 284 285 obj_type = ip->ino_data.obj_type; 286 vp->v_type = hammer_get_vnode_type(obj_type); 287 288 hammer_inode_wakereclaims(ip); 289 290 switch(ip->ino_data.obj_type) { 291 case HAMMER_OBJTYPE_CDEV: 292 case HAMMER_OBJTYPE_BDEV: 293 vp->v_ops = &hmp->mp->mnt_vn_spec_ops; 294 addaliasu(vp, ip->ino_data.rmajor, 295 ip->ino_data.rminor); 296 break; 297 case HAMMER_OBJTYPE_FIFO: 298 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops; 299 break; 300 case HAMMER_OBJTYPE_REGFILE: 301 break; 302 default: 303 break; 304 } 305 306 /* 307 * Only mark as the root vnode if the ip is not 308 * historical, otherwise the VFS cache will get 309 * confused. The other half of the special handling 310 * is in hammer_vop_nlookupdotdot(). 311 * 312 * Pseudo-filesystem roots can be accessed via 313 * non-root filesystem paths and setting VROOT may 314 * confuse the namecache. Set VPFSROOT instead. 315 */ 316 if (ip->obj_id == HAMMER_OBJID_ROOT && 317 ip->obj_asof == hmp->asof) { 318 if (ip->obj_localization == 0) 319 vsetflags(vp, VROOT); 320 else 321 vsetflags(vp, VPFSROOT); 322 } 323 324 vp->v_data = (void *)ip; 325 /* vnode locked by getnewvnode() */ 326 /* make related vnode dirty if inode dirty? */ 327 hammer_unlock(&ip->lock); 328 if (vp->v_type == VREG) { 329 vinitvmio(vp, ip->ino_data.size, 330 hammer_blocksize(ip->ino_data.size), 331 hammer_blockoff(ip->ino_data.size)); 332 } 333 break; 334 } 335 336 /* 337 * Interlock vnode clearing. This does not prevent the 338 * vnode from going into a reclaimed state but it does 339 * prevent it from being destroyed or reused so the vget() 340 * will properly fail. 341 */ 342 hammer_lock_ex(&ip->lock); 343 if ((vp = ip->vp) == NULL) { 344 hammer_unlock(&ip->lock); 345 continue; 346 } 347 vhold_interlocked(vp); 348 hammer_unlock(&ip->lock); 349 350 /* 351 * loop if the vget fails (aka races), or if the vp 352 * no longer matches ip->vp. 353 */ 354 if (vget(vp, LK_EXCLUSIVE) == 0) { 355 if (vp == ip->vp) { 356 vdrop(vp); 357 break; 358 } 359 vput(vp); 360 } 361 vdrop(vp); 362 } 363 *vpp = vp; 364 return(error); 365 } 366 367 /* 368 * Locate all copies of the inode for obj_id compatible with the specified 369 * asof, reference, and issue the related call-back. This routine is used 370 * for direct-io invalidation and does not create any new inodes. 371 */ 372 void 373 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo, 374 int (*callback)(hammer_inode_t ip, void *data), 375 void *data) 376 { 377 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root, 378 hammer_inode_info_cmp_all_history, 379 callback, iinfo); 380 } 381 382 /* 383 * Acquire a HAMMER inode. The returned inode is not locked. These functions 384 * do not attach or detach the related vnode (use hammer_get_vnode() for 385 * that). 386 * 387 * The flags argument is only applied for newly created inodes, and only 388 * certain flags are inherited. 389 * 390 * Called from the frontend. 391 */ 392 struct hammer_inode * 393 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip, 394 int64_t obj_id, hammer_tid_t asof, u_int32_t localization, 395 int flags, int *errorp) 396 { 397 hammer_mount_t hmp = trans->hmp; 398 struct hammer_node_cache *cachep; 399 struct hammer_inode_info iinfo; 400 struct hammer_cursor cursor; 401 struct hammer_inode *ip; 402 403 404 /* 405 * Determine if we already have an inode cached. If we do then 406 * we are golden. 407 * 408 * If we find an inode with no vnode we have to mark the 409 * transaction such that hammer_inode_waitreclaims() is 410 * called later on to avoid building up an infinite number 411 * of inodes. Otherwise we can continue to * add new inodes 412 * faster then they can be disposed of, even with the tsleep 413 * delay. 414 * 415 * If we find a dummy inode we return a failure so dounlink 416 * (which does another lookup) doesn't try to mess with the 417 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode() 418 * to ref dummy inodes. 419 */ 420 iinfo.obj_id = obj_id; 421 iinfo.obj_asof = asof; 422 iinfo.obj_localization = localization; 423 loop: 424 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 425 if (ip) { 426 if (ip->flags & HAMMER_INODE_DUMMY) { 427 *errorp = ENOENT; 428 return(NULL); 429 } 430 hammer_ref(&ip->lock); 431 *errorp = 0; 432 return(ip); 433 } 434 435 /* 436 * Allocate a new inode structure and deal with races later. 437 */ 438 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 439 ++hammer_count_inodes; 440 ++hmp->count_inodes; 441 ip->obj_id = obj_id; 442 ip->obj_asof = iinfo.obj_asof; 443 ip->obj_localization = localization; 444 ip->hmp = hmp; 445 ip->flags = flags & HAMMER_INODE_RO; 446 ip->cache[0].ip = ip; 447 ip->cache[1].ip = ip; 448 ip->cache[2].ip = ip; 449 ip->cache[3].ip = ip; 450 if (hmp->ronly) 451 ip->flags |= HAMMER_INODE_RO; 452 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off = 453 0x7FFFFFFFFFFFFFFFLL; 454 RB_INIT(&ip->rec_tree); 455 TAILQ_INIT(&ip->target_list); 456 hammer_ref(&ip->lock); 457 458 /* 459 * Locate the on-disk inode. If this is a PFS root we always 460 * access the current version of the root inode and (if it is not 461 * a master) always access information under it with a snapshot 462 * TID. 463 * 464 * We cache recent inode lookups in this directory in dip->cache[2]. 465 * If we can't find it we assume the inode we are looking for is 466 * close to the directory inode. 467 */ 468 retry: 469 cachep = NULL; 470 if (dip) { 471 if (dip->cache[2].node) 472 cachep = &dip->cache[2]; 473 else 474 cachep = &dip->cache[0]; 475 } 476 hammer_init_cursor(trans, &cursor, cachep, NULL); 477 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE; 478 cursor.key_beg.obj_id = ip->obj_id; 479 cursor.key_beg.key = 0; 480 cursor.key_beg.create_tid = 0; 481 cursor.key_beg.delete_tid = 0; 482 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE; 483 cursor.key_beg.obj_type = 0; 484 485 cursor.asof = iinfo.obj_asof; 486 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA | 487 HAMMER_CURSOR_ASOF; 488 489 *errorp = hammer_btree_lookup(&cursor); 490 if (*errorp == EDEADLK) { 491 hammer_done_cursor(&cursor); 492 goto retry; 493 } 494 495 /* 496 * On success the B-Tree lookup will hold the appropriate 497 * buffer cache buffers and provide a pointer to the requested 498 * information. Copy the information to the in-memory inode 499 * and cache the B-Tree node to improve future operations. 500 */ 501 if (*errorp == 0) { 502 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf; 503 ip->ino_data = cursor.data->inode; 504 505 /* 506 * cache[0] tries to cache the location of the object inode. 507 * The assumption is that it is near the directory inode. 508 * 509 * cache[1] tries to cache the location of the object data. 510 * We might have something in the governing directory from 511 * scan optimizations (see the strategy code in 512 * hammer_vnops.c). 513 * 514 * We update dip->cache[2], if possible, with the location 515 * of the object inode for future directory shortcuts. 516 */ 517 hammer_cache_node(&ip->cache[0], cursor.node); 518 if (dip) { 519 if (dip->cache[3].node) { 520 hammer_cache_node(&ip->cache[1], 521 dip->cache[3].node); 522 } 523 hammer_cache_node(&dip->cache[2], cursor.node); 524 } 525 526 /* 527 * The file should not contain any data past the file size 528 * stored in the inode. Setting save_trunc_off to the 529 * file size instead of max reduces B-Tree lookup overheads 530 * on append by allowing the flusher to avoid checking for 531 * record overwrites. 532 */ 533 ip->save_trunc_off = ip->ino_data.size; 534 535 /* 536 * Locate and assign the pseudofs management structure to 537 * the inode. 538 */ 539 if (dip && dip->obj_localization == ip->obj_localization) { 540 ip->pfsm = dip->pfsm; 541 hammer_ref(&ip->pfsm->lock); 542 } else { 543 ip->pfsm = hammer_load_pseudofs(trans, 544 ip->obj_localization, 545 errorp); 546 *errorp = 0; /* ignore ENOENT */ 547 } 548 } 549 550 /* 551 * The inode is placed on the red-black tree and will be synced to 552 * the media when flushed or by the filesystem sync. If this races 553 * another instantiation/lookup the insertion will fail. 554 */ 555 if (*errorp == 0) { 556 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 557 hammer_free_inode(ip); 558 hammer_done_cursor(&cursor); 559 goto loop; 560 } 561 ip->flags |= HAMMER_INODE_ONDISK; 562 } else { 563 if (ip->flags & HAMMER_INODE_RSV_INODES) { 564 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */ 565 --hmp->rsv_inodes; 566 } 567 568 hammer_free_inode(ip); 569 ip = NULL; 570 } 571 hammer_done_cursor(&cursor); 572 573 /* 574 * NEWINODE is only set if the inode becomes dirty later, 575 * setting it here just leads to unnecessary stalls. 576 * 577 * trans->flags |= HAMMER_TRANSF_NEWINODE; 578 */ 579 return (ip); 580 } 581 582 /* 583 * Get a dummy inode to placemark a broken directory entry. 584 */ 585 struct hammer_inode * 586 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip, 587 int64_t obj_id, hammer_tid_t asof, u_int32_t localization, 588 int flags, int *errorp) 589 { 590 hammer_mount_t hmp = trans->hmp; 591 struct hammer_inode_info iinfo; 592 struct hammer_inode *ip; 593 594 /* 595 * Determine if we already have an inode cached. If we do then 596 * we are golden. 597 * 598 * If we find an inode with no vnode we have to mark the 599 * transaction such that hammer_inode_waitreclaims() is 600 * called later on to avoid building up an infinite number 601 * of inodes. Otherwise we can continue to * add new inodes 602 * faster then they can be disposed of, even with the tsleep 603 * delay. 604 * 605 * If we find a non-fake inode we return an error. Only fake 606 * inodes can be returned by this routine. 607 */ 608 iinfo.obj_id = obj_id; 609 iinfo.obj_asof = asof; 610 iinfo.obj_localization = localization; 611 loop: 612 *errorp = 0; 613 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 614 if (ip) { 615 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) { 616 *errorp = ENOENT; 617 return(NULL); 618 } 619 hammer_ref(&ip->lock); 620 return(ip); 621 } 622 623 /* 624 * Allocate a new inode structure and deal with races later. 625 */ 626 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 627 ++hammer_count_inodes; 628 ++hmp->count_inodes; 629 ip->obj_id = obj_id; 630 ip->obj_asof = iinfo.obj_asof; 631 ip->obj_localization = localization; 632 ip->hmp = hmp; 633 ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY; 634 ip->cache[0].ip = ip; 635 ip->cache[1].ip = ip; 636 ip->cache[2].ip = ip; 637 ip->cache[3].ip = ip; 638 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off = 639 0x7FFFFFFFFFFFFFFFLL; 640 RB_INIT(&ip->rec_tree); 641 TAILQ_INIT(&ip->target_list); 642 hammer_ref(&ip->lock); 643 644 /* 645 * Populate the dummy inode. Leave everything zero'd out. 646 * 647 * (ip->ino_leaf and ip->ino_data) 648 * 649 * Make the dummy inode a FIFO object which most copy programs 650 * will properly ignore. 651 */ 652 ip->save_trunc_off = ip->ino_data.size; 653 ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO; 654 655 /* 656 * Locate and assign the pseudofs management structure to 657 * the inode. 658 */ 659 if (dip && dip->obj_localization == ip->obj_localization) { 660 ip->pfsm = dip->pfsm; 661 hammer_ref(&ip->pfsm->lock); 662 } else { 663 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization, 664 errorp); 665 *errorp = 0; /* ignore ENOENT */ 666 } 667 668 /* 669 * The inode is placed on the red-black tree and will be synced to 670 * the media when flushed or by the filesystem sync. If this races 671 * another instantiation/lookup the insertion will fail. 672 * 673 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake. 674 */ 675 if (*errorp == 0) { 676 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 677 hammer_free_inode(ip); 678 goto loop; 679 } 680 } else { 681 if (ip->flags & HAMMER_INODE_RSV_INODES) { 682 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */ 683 --hmp->rsv_inodes; 684 } 685 hammer_free_inode(ip); 686 ip = NULL; 687 } 688 trans->flags |= HAMMER_TRANSF_NEWINODE; 689 return (ip); 690 } 691 692 /* 693 * Return a referenced inode only if it is in our inode cache. 694 * 695 * Dummy inodes do not count. 696 */ 697 struct hammer_inode * 698 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id, 699 hammer_tid_t asof, u_int32_t localization) 700 { 701 hammer_mount_t hmp = trans->hmp; 702 struct hammer_inode_info iinfo; 703 struct hammer_inode *ip; 704 705 iinfo.obj_id = obj_id; 706 iinfo.obj_asof = asof; 707 iinfo.obj_localization = localization; 708 709 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 710 if (ip) { 711 if (ip->flags & HAMMER_INODE_DUMMY) 712 ip = NULL; 713 else 714 hammer_ref(&ip->lock); 715 } 716 return(ip); 717 } 718 719 /* 720 * Create a new filesystem object, returning the inode in *ipp. The 721 * returned inode will be referenced. The inode is created in-memory. 722 * 723 * If pfsm is non-NULL the caller wishes to create the root inode for 724 * a master PFS. 725 */ 726 int 727 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap, 728 struct ucred *cred, 729 hammer_inode_t dip, const char *name, int namelen, 730 hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp) 731 { 732 hammer_mount_t hmp; 733 hammer_inode_t ip; 734 uid_t xuid; 735 int error; 736 int64_t namekey; 737 u_int32_t dummy; 738 739 hmp = trans->hmp; 740 741 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 742 ++hammer_count_inodes; 743 ++hmp->count_inodes; 744 trans->flags |= HAMMER_TRANSF_NEWINODE; 745 746 if (pfsm) { 747 KKASSERT(pfsm->localization != 0); 748 ip->obj_id = HAMMER_OBJID_ROOT; 749 ip->obj_localization = pfsm->localization; 750 } else { 751 KKASSERT(dip != NULL); 752 namekey = hammer_directory_namekey(dip, name, namelen, &dummy); 753 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey); 754 ip->obj_localization = dip->obj_localization; 755 } 756 757 KKASSERT(ip->obj_id != 0); 758 ip->obj_asof = hmp->asof; 759 ip->hmp = hmp; 760 ip->flush_state = HAMMER_FST_IDLE; 761 ip->flags = HAMMER_INODE_DDIRTY | 762 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME; 763 ip->cache[0].ip = ip; 764 ip->cache[1].ip = ip; 765 ip->cache[2].ip = ip; 766 ip->cache[3].ip = ip; 767 768 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL; 769 /* ip->save_trunc_off = 0; (already zero) */ 770 RB_INIT(&ip->rec_tree); 771 TAILQ_INIT(&ip->target_list); 772 773 ip->ino_data.atime = trans->time; 774 ip->ino_data.mtime = trans->time; 775 ip->ino_data.size = 0; 776 ip->ino_data.nlinks = 0; 777 778 /* 779 * A nohistory designator on the parent directory is inherited by 780 * the child. We will do this even for pseudo-fs creation... the 781 * sysad can turn it off. 782 */ 783 if (dip) { 784 ip->ino_data.uflags = dip->ino_data.uflags & 785 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP); 786 } 787 788 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 789 ip->ino_leaf.base.localization = ip->obj_localization + 790 HAMMER_LOCALIZE_INODE; 791 ip->ino_leaf.base.obj_id = ip->obj_id; 792 ip->ino_leaf.base.key = 0; 793 ip->ino_leaf.base.create_tid = 0; 794 ip->ino_leaf.base.delete_tid = 0; 795 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE; 796 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type); 797 798 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type; 799 ip->ino_data.version = HAMMER_INODE_DATA_VERSION; 800 ip->ino_data.mode = vap->va_mode; 801 ip->ino_data.ctime = trans->time; 802 803 /* 804 * If we are running version 2 or greater directory entries are 805 * inode-localized instead of data-localized. 806 */ 807 if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) { 808 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) { 809 ip->ino_data.cap_flags |= 810 HAMMER_INODE_CAP_DIR_LOCAL_INO; 811 } 812 } 813 if (trans->hmp->version >= HAMMER_VOL_VERSION_SIX) { 814 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) { 815 ip->ino_data.cap_flags |= 816 HAMMER_INODE_CAP_DIRHASH_ALG1; 817 } 818 } 819 820 /* 821 * Setup the ".." pointer. This only needs to be done for directories 822 * but we do it for all objects as a recovery aid. 823 */ 824 if (dip) 825 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id; 826 #if 0 827 /* 828 * The parent_obj_localization field only applies to pseudo-fs roots. 829 * XXX this is no longer applicable, PFSs are no longer directly 830 * tied into the parent's directory structure. 831 */ 832 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY && 833 ip->obj_id == HAMMER_OBJID_ROOT) { 834 ip->ino_data.ext.obj.parent_obj_localization = 835 dip->obj_localization; 836 } 837 #endif 838 839 switch(ip->ino_leaf.base.obj_type) { 840 case HAMMER_OBJTYPE_CDEV: 841 case HAMMER_OBJTYPE_BDEV: 842 ip->ino_data.rmajor = vap->va_rmajor; 843 ip->ino_data.rminor = vap->va_rminor; 844 break; 845 default: 846 break; 847 } 848 849 /* 850 * Calculate default uid/gid and overwrite with information from 851 * the vap. 852 */ 853 if (dip) { 854 xuid = hammer_to_unix_xid(&dip->ino_data.uid); 855 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, 856 xuid, cred, &vap->va_mode); 857 } else { 858 xuid = 0; 859 } 860 ip->ino_data.mode = vap->va_mode; 861 862 if (vap->va_vaflags & VA_UID_UUID_VALID) 863 ip->ino_data.uid = vap->va_uid_uuid; 864 else if (vap->va_uid != (uid_t)VNOVAL) 865 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid); 866 else 867 hammer_guid_to_uuid(&ip->ino_data.uid, xuid); 868 869 if (vap->va_vaflags & VA_GID_UUID_VALID) 870 ip->ino_data.gid = vap->va_gid_uuid; 871 else if (vap->va_gid != (gid_t)VNOVAL) 872 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid); 873 else if (dip) 874 ip->ino_data.gid = dip->ino_data.gid; 875 876 hammer_ref(&ip->lock); 877 878 if (pfsm) { 879 ip->pfsm = pfsm; 880 hammer_ref(&pfsm->lock); 881 error = 0; 882 } else if (dip->obj_localization == ip->obj_localization) { 883 ip->pfsm = dip->pfsm; 884 hammer_ref(&ip->pfsm->lock); 885 error = 0; 886 } else { 887 ip->pfsm = hammer_load_pseudofs(trans, 888 ip->obj_localization, 889 &error); 890 error = 0; /* ignore ENOENT */ 891 } 892 893 if (error) { 894 hammer_free_inode(ip); 895 ip = NULL; 896 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 897 panic("hammer_create_inode: duplicate obj_id %llx", 898 (long long)ip->obj_id); 899 /* not reached */ 900 hammer_free_inode(ip); 901 } 902 *ipp = ip; 903 return(error); 904 } 905 906 /* 907 * Final cleanup / freeing of an inode structure 908 */ 909 static void 910 hammer_free_inode(hammer_inode_t ip) 911 { 912 struct hammer_mount *hmp; 913 914 hmp = ip->hmp; 915 KKASSERT(hammer_oneref(&ip->lock)); 916 hammer_uncache_node(&ip->cache[0]); 917 hammer_uncache_node(&ip->cache[1]); 918 hammer_uncache_node(&ip->cache[2]); 919 hammer_uncache_node(&ip->cache[3]); 920 hammer_inode_wakereclaims(ip); 921 if (ip->objid_cache) 922 hammer_clear_objid(ip); 923 --hammer_count_inodes; 924 --hmp->count_inodes; 925 if (ip->pfsm) { 926 hammer_rel_pseudofs(hmp, ip->pfsm); 927 ip->pfsm = NULL; 928 } 929 kfree(ip, hmp->m_inodes); 930 ip = NULL; 931 } 932 933 /* 934 * Retrieve pseudo-fs data. NULL will never be returned. 935 * 936 * If an error occurs *errorp will be set and a default template is returned, 937 * otherwise *errorp is set to 0. Typically when an error occurs it will 938 * be ENOENT. 939 */ 940 hammer_pseudofs_inmem_t 941 hammer_load_pseudofs(hammer_transaction_t trans, 942 u_int32_t localization, int *errorp) 943 { 944 hammer_mount_t hmp = trans->hmp; 945 hammer_inode_t ip; 946 hammer_pseudofs_inmem_t pfsm; 947 struct hammer_cursor cursor; 948 int bytes; 949 950 retry: 951 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization); 952 if (pfsm) { 953 hammer_ref(&pfsm->lock); 954 *errorp = 0; 955 return(pfsm); 956 } 957 958 /* 959 * PFS records are stored in the root inode (not the PFS root inode, 960 * but the real root). Avoid an infinite recursion if loading 961 * the PFS for the real root. 962 */ 963 if (localization) { 964 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, 965 HAMMER_MAX_TID, 966 HAMMER_DEF_LOCALIZATION, 0, errorp); 967 } else { 968 ip = NULL; 969 } 970 971 pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO); 972 pfsm->localization = localization; 973 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid; 974 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid; 975 976 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip); 977 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION + 978 HAMMER_LOCALIZE_MISC; 979 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 980 cursor.key_beg.create_tid = 0; 981 cursor.key_beg.delete_tid = 0; 982 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 983 cursor.key_beg.obj_type = 0; 984 cursor.key_beg.key = localization; 985 cursor.asof = HAMMER_MAX_TID; 986 cursor.flags |= HAMMER_CURSOR_ASOF; 987 988 if (ip) 989 *errorp = hammer_ip_lookup(&cursor); 990 else 991 *errorp = hammer_btree_lookup(&cursor); 992 if (*errorp == 0) { 993 *errorp = hammer_ip_resolve_data(&cursor); 994 if (*errorp == 0) { 995 if (cursor.data->pfsd.mirror_flags & 996 HAMMER_PFSD_DELETED) { 997 *errorp = ENOENT; 998 } else { 999 bytes = cursor.leaf->data_len; 1000 if (bytes > sizeof(pfsm->pfsd)) 1001 bytes = sizeof(pfsm->pfsd); 1002 bcopy(cursor.data, &pfsm->pfsd, bytes); 1003 } 1004 } 1005 } 1006 hammer_done_cursor(&cursor); 1007 1008 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid); 1009 hammer_ref(&pfsm->lock); 1010 if (ip) 1011 hammer_rel_inode(ip, 0); 1012 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) { 1013 kfree(pfsm, hmp->m_misc); 1014 goto retry; 1015 } 1016 return(pfsm); 1017 } 1018 1019 /* 1020 * Store pseudo-fs data. The backend will automatically delete any prior 1021 * on-disk pseudo-fs data but we have to delete in-memory versions. 1022 */ 1023 int 1024 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm) 1025 { 1026 struct hammer_cursor cursor; 1027 hammer_record_t record; 1028 hammer_inode_t ip; 1029 int error; 1030 1031 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 1032 HAMMER_DEF_LOCALIZATION, 0, &error); 1033 retry: 1034 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid); 1035 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip); 1036 cursor.key_beg.localization = ip->obj_localization + 1037 HAMMER_LOCALIZE_MISC; 1038 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 1039 cursor.key_beg.create_tid = 0; 1040 cursor.key_beg.delete_tid = 0; 1041 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 1042 cursor.key_beg.obj_type = 0; 1043 cursor.key_beg.key = pfsm->localization; 1044 cursor.asof = HAMMER_MAX_TID; 1045 cursor.flags |= HAMMER_CURSOR_ASOF; 1046 1047 /* 1048 * Replace any in-memory version of the record. 1049 */ 1050 error = hammer_ip_lookup(&cursor); 1051 if (error == 0 && hammer_cursor_inmem(&cursor)) { 1052 record = cursor.iprec; 1053 if (record->flags & HAMMER_RECF_INTERLOCK_BE) { 1054 KKASSERT(cursor.deadlk_rec == NULL); 1055 hammer_ref(&record->lock); 1056 cursor.deadlk_rec = record; 1057 error = EDEADLK; 1058 } else { 1059 record->flags |= HAMMER_RECF_DELETED_FE; 1060 error = 0; 1061 } 1062 } 1063 1064 /* 1065 * Allocate replacement general record. The backend flush will 1066 * delete any on-disk version of the record. 1067 */ 1068 if (error == 0 || error == ENOENT) { 1069 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd)); 1070 record->type = HAMMER_MEM_RECORD_GENERAL; 1071 1072 record->leaf.base.localization = ip->obj_localization + 1073 HAMMER_LOCALIZE_MISC; 1074 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS; 1075 record->leaf.base.key = pfsm->localization; 1076 record->leaf.data_len = sizeof(pfsm->pfsd); 1077 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd)); 1078 error = hammer_ip_add_record(trans, record); 1079 } 1080 hammer_done_cursor(&cursor); 1081 if (error == EDEADLK) 1082 goto retry; 1083 hammer_rel_inode(ip, 0); 1084 return(error); 1085 } 1086 1087 /* 1088 * Create a root directory for a PFS if one does not alredy exist. 1089 * 1090 * The PFS root stands alone so we must also bump the nlinks count 1091 * to prevent it from being destroyed on release. 1092 */ 1093 int 1094 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred, 1095 hammer_pseudofs_inmem_t pfsm) 1096 { 1097 hammer_inode_t ip; 1098 struct vattr vap; 1099 int error; 1100 1101 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 1102 pfsm->localization, 0, &error); 1103 if (ip == NULL) { 1104 vattr_null(&vap); 1105 vap.va_mode = 0755; 1106 vap.va_type = VDIR; 1107 error = hammer_create_inode(trans, &vap, cred, 1108 NULL, NULL, 0, 1109 pfsm, &ip); 1110 if (error == 0) { 1111 ++ip->ino_data.nlinks; 1112 hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY); 1113 } 1114 } 1115 if (ip) 1116 hammer_rel_inode(ip, 0); 1117 return(error); 1118 } 1119 1120 /* 1121 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY 1122 * if we are unable to disassociate all the inodes. 1123 */ 1124 static 1125 int 1126 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data) 1127 { 1128 int res; 1129 1130 hammer_ref(&ip->lock); 1131 if (hammer_isactive(&ip->lock) == 2 && ip->vp) 1132 vclean_unlocked(ip->vp); 1133 if (hammer_isactive(&ip->lock) == 1 && ip->vp == NULL) 1134 res = 0; 1135 else 1136 res = -1; /* stop, someone is using the inode */ 1137 hammer_rel_inode(ip, 0); 1138 return(res); 1139 } 1140 1141 int 1142 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization) 1143 { 1144 int res; 1145 int try; 1146 1147 for (try = res = 0; try < 4; ++try) { 1148 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root, 1149 hammer_inode_pfs_cmp, 1150 hammer_unload_pseudofs_callback, 1151 &localization); 1152 if (res == 0 && try > 1) 1153 break; 1154 hammer_flusher_sync(trans->hmp); 1155 } 1156 if (res != 0) 1157 res = ENOTEMPTY; 1158 return(res); 1159 } 1160 1161 1162 /* 1163 * Release a reference on a PFS 1164 */ 1165 void 1166 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm) 1167 { 1168 hammer_rel(&pfsm->lock); 1169 if (hammer_norefs(&pfsm->lock)) { 1170 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm); 1171 kfree(pfsm, hmp->m_misc); 1172 } 1173 } 1174 1175 /* 1176 * Called by hammer_sync_inode(). 1177 */ 1178 static int 1179 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip) 1180 { 1181 hammer_transaction_t trans = cursor->trans; 1182 hammer_record_t record; 1183 int error; 1184 int redirty; 1185 1186 retry: 1187 error = 0; 1188 1189 /* 1190 * If the inode has a presence on-disk then locate it and mark 1191 * it deleted, setting DELONDISK. 1192 * 1193 * The record may or may not be physically deleted, depending on 1194 * the retention policy. 1195 */ 1196 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) == 1197 HAMMER_INODE_ONDISK) { 1198 hammer_normalize_cursor(cursor); 1199 cursor->key_beg.localization = ip->obj_localization + 1200 HAMMER_LOCALIZE_INODE; 1201 cursor->key_beg.obj_id = ip->obj_id; 1202 cursor->key_beg.key = 0; 1203 cursor->key_beg.create_tid = 0; 1204 cursor->key_beg.delete_tid = 0; 1205 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE; 1206 cursor->key_beg.obj_type = 0; 1207 cursor->asof = ip->obj_asof; 1208 cursor->flags &= ~HAMMER_CURSOR_INITMASK; 1209 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF; 1210 cursor->flags |= HAMMER_CURSOR_BACKEND; 1211 1212 error = hammer_btree_lookup(cursor); 1213 if (hammer_debug_inode) 1214 kprintf("IPDEL %p %08x %d", ip, ip->flags, error); 1215 1216 if (error == 0) { 1217 error = hammer_ip_delete_record(cursor, ip, trans->tid); 1218 if (hammer_debug_inode) 1219 kprintf(" error %d\n", error); 1220 if (error == 0) { 1221 ip->flags |= HAMMER_INODE_DELONDISK; 1222 } 1223 if (cursor->node) 1224 hammer_cache_node(&ip->cache[0], cursor->node); 1225 } 1226 if (error == EDEADLK) { 1227 hammer_done_cursor(cursor); 1228 error = hammer_init_cursor(trans, cursor, 1229 &ip->cache[0], ip); 1230 if (hammer_debug_inode) 1231 kprintf("IPDED %p %d\n", ip, error); 1232 if (error == 0) 1233 goto retry; 1234 } 1235 } 1236 1237 /* 1238 * Ok, write out the initial record or a new record (after deleting 1239 * the old one), unless the DELETED flag is set. This routine will 1240 * clear DELONDISK if it writes out a record. 1241 * 1242 * Update our inode statistics if this is the first application of 1243 * the inode on-disk. 1244 */ 1245 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) { 1246 /* 1247 * Generate a record and write it to the media. We clean-up 1248 * the state before releasing so we do not have to set-up 1249 * a flush_group. 1250 */ 1251 record = hammer_alloc_mem_record(ip, 0); 1252 record->type = HAMMER_MEM_RECORD_INODE; 1253 record->flush_state = HAMMER_FST_FLUSH; 1254 record->leaf = ip->sync_ino_leaf; 1255 record->leaf.base.create_tid = trans->tid; 1256 record->leaf.data_len = sizeof(ip->sync_ino_data); 1257 record->leaf.create_ts = trans->time32; 1258 record->data = (void *)&ip->sync_ino_data; 1259 record->flags |= HAMMER_RECF_INTERLOCK_BE; 1260 1261 /* 1262 * If this flag is set we cannot sync the new file size 1263 * because we haven't finished related truncations. The 1264 * inode will be flushed in another flush group to finish 1265 * the job. 1266 */ 1267 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) && 1268 ip->sync_ino_data.size != ip->ino_data.size) { 1269 redirty = 1; 1270 ip->sync_ino_data.size = ip->ino_data.size; 1271 } else { 1272 redirty = 0; 1273 } 1274 1275 for (;;) { 1276 error = hammer_ip_sync_record_cursor(cursor, record); 1277 if (hammer_debug_inode) 1278 kprintf("GENREC %p rec %08x %d\n", 1279 ip, record->flags, error); 1280 if (error != EDEADLK) 1281 break; 1282 hammer_done_cursor(cursor); 1283 error = hammer_init_cursor(trans, cursor, 1284 &ip->cache[0], ip); 1285 if (hammer_debug_inode) 1286 kprintf("GENREC reinit %d\n", error); 1287 if (error) 1288 break; 1289 } 1290 1291 /* 1292 * Note: The record was never on the inode's record tree 1293 * so just wave our hands importantly and destroy it. 1294 */ 1295 record->flags |= HAMMER_RECF_COMMITTED; 1296 record->flags &= ~HAMMER_RECF_INTERLOCK_BE; 1297 record->flush_state = HAMMER_FST_IDLE; 1298 ++ip->rec_generation; 1299 hammer_rel_mem_record(record); 1300 1301 /* 1302 * Finish up. 1303 */ 1304 if (error == 0) { 1305 if (hammer_debug_inode) 1306 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags); 1307 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | 1308 HAMMER_INODE_SDIRTY | 1309 HAMMER_INODE_ATIME | 1310 HAMMER_INODE_MTIME); 1311 ip->flags &= ~HAMMER_INODE_DELONDISK; 1312 if (redirty) 1313 ip->sync_flags |= HAMMER_INODE_DDIRTY; 1314 1315 /* 1316 * Root volume count of inodes 1317 */ 1318 hammer_sync_lock_sh(trans); 1319 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) { 1320 hammer_modify_volume_field(trans, 1321 trans->rootvol, 1322 vol0_stat_inodes); 1323 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes; 1324 hammer_modify_volume_done(trans->rootvol); 1325 ip->flags |= HAMMER_INODE_ONDISK; 1326 if (hammer_debug_inode) 1327 kprintf("NOWONDISK %p\n", ip); 1328 } 1329 hammer_sync_unlock(trans); 1330 } 1331 } 1332 1333 /* 1334 * If the inode has been destroyed, clean out any left-over flags 1335 * that may have been set by the frontend. 1336 */ 1337 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 1338 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | 1339 HAMMER_INODE_SDIRTY | 1340 HAMMER_INODE_ATIME | 1341 HAMMER_INODE_MTIME); 1342 } 1343 return(error); 1344 } 1345 1346 /* 1347 * Update only the itimes fields. 1348 * 1349 * ATIME can be updated without generating any UNDO. MTIME is updated 1350 * with UNDO so it is guaranteed to be synchronized properly in case of 1351 * a crash. 1352 * 1353 * Neither field is included in the B-Tree leaf element's CRC, which is how 1354 * we can get away with updating ATIME the way we do. 1355 */ 1356 static int 1357 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip) 1358 { 1359 hammer_transaction_t trans = cursor->trans; 1360 int error; 1361 1362 retry: 1363 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) != 1364 HAMMER_INODE_ONDISK) { 1365 return(0); 1366 } 1367 1368 hammer_normalize_cursor(cursor); 1369 cursor->key_beg.localization = ip->obj_localization + 1370 HAMMER_LOCALIZE_INODE; 1371 cursor->key_beg.obj_id = ip->obj_id; 1372 cursor->key_beg.key = 0; 1373 cursor->key_beg.create_tid = 0; 1374 cursor->key_beg.delete_tid = 0; 1375 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE; 1376 cursor->key_beg.obj_type = 0; 1377 cursor->asof = ip->obj_asof; 1378 cursor->flags &= ~HAMMER_CURSOR_INITMASK; 1379 cursor->flags |= HAMMER_CURSOR_ASOF; 1380 cursor->flags |= HAMMER_CURSOR_GET_LEAF; 1381 cursor->flags |= HAMMER_CURSOR_GET_DATA; 1382 cursor->flags |= HAMMER_CURSOR_BACKEND; 1383 1384 error = hammer_btree_lookup(cursor); 1385 if (error == 0) { 1386 hammer_cache_node(&ip->cache[0], cursor->node); 1387 if (ip->sync_flags & HAMMER_INODE_MTIME) { 1388 /* 1389 * Updating MTIME requires an UNDO. Just cover 1390 * both atime and mtime. 1391 */ 1392 hammer_sync_lock_sh(trans); 1393 hammer_modify_buffer(trans, cursor->data_buffer, 1394 HAMMER_ITIMES_BASE(&cursor->data->inode), 1395 HAMMER_ITIMES_BYTES); 1396 cursor->data->inode.atime = ip->sync_ino_data.atime; 1397 cursor->data->inode.mtime = ip->sync_ino_data.mtime; 1398 hammer_modify_buffer_done(cursor->data_buffer); 1399 hammer_sync_unlock(trans); 1400 } else if (ip->sync_flags & HAMMER_INODE_ATIME) { 1401 /* 1402 * Updating atime only can be done in-place with 1403 * no UNDO. 1404 */ 1405 hammer_sync_lock_sh(trans); 1406 hammer_modify_buffer(trans, cursor->data_buffer, 1407 NULL, 0); 1408 cursor->data->inode.atime = ip->sync_ino_data.atime; 1409 hammer_modify_buffer_done(cursor->data_buffer); 1410 hammer_sync_unlock(trans); 1411 } 1412 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME); 1413 } 1414 if (error == EDEADLK) { 1415 hammer_done_cursor(cursor); 1416 error = hammer_init_cursor(trans, cursor, 1417 &ip->cache[0], ip); 1418 if (error == 0) 1419 goto retry; 1420 } 1421 return(error); 1422 } 1423 1424 /* 1425 * Release a reference on an inode, flush as requested. 1426 * 1427 * On the last reference we queue the inode to the flusher for its final 1428 * disposition. 1429 */ 1430 void 1431 hammer_rel_inode(struct hammer_inode *ip, int flush) 1432 { 1433 /*hammer_mount_t hmp = ip->hmp;*/ 1434 1435 /* 1436 * Handle disposition when dropping the last ref. 1437 */ 1438 for (;;) { 1439 if (hammer_oneref(&ip->lock)) { 1440 /* 1441 * Determine whether on-disk action is needed for 1442 * the inode's final disposition. 1443 */ 1444 KKASSERT(ip->vp == NULL); 1445 hammer_inode_unloadable_check(ip, 0); 1446 if (ip->flags & HAMMER_INODE_MODMASK) { 1447 hammer_flush_inode(ip, 0); 1448 } else if (hammer_oneref(&ip->lock)) { 1449 hammer_unload_inode(ip); 1450 break; 1451 } 1452 } else { 1453 if (flush) 1454 hammer_flush_inode(ip, 0); 1455 1456 /* 1457 * The inode still has multiple refs, try to drop 1458 * one ref. 1459 */ 1460 KKASSERT(hammer_isactive(&ip->lock) >= 1); 1461 if (hammer_isactive(&ip->lock) > 1) { 1462 hammer_rel(&ip->lock); 1463 break; 1464 } 1465 } 1466 } 1467 } 1468 1469 /* 1470 * Unload and destroy the specified inode. Must be called with one remaining 1471 * reference. The reference is disposed of. 1472 * 1473 * The inode must be completely clean. 1474 */ 1475 static int 1476 hammer_unload_inode(struct hammer_inode *ip) 1477 { 1478 hammer_mount_t hmp = ip->hmp; 1479 1480 KASSERT(hammer_oneref(&ip->lock), 1481 ("hammer_unload_inode: %d refs\n", hammer_isactive(&ip->lock))); 1482 KKASSERT(ip->vp == NULL); 1483 KKASSERT(ip->flush_state == HAMMER_FST_IDLE); 1484 KKASSERT(ip->cursor_ip_refs == 0); 1485 KKASSERT(hammer_notlocked(&ip->lock)); 1486 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0); 1487 1488 KKASSERT(RB_EMPTY(&ip->rec_tree)); 1489 KKASSERT(TAILQ_EMPTY(&ip->target_list)); 1490 1491 if (ip->flags & HAMMER_INODE_RDIRTY) { 1492 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip); 1493 ip->flags &= ~HAMMER_INODE_RDIRTY; 1494 } 1495 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip); 1496 1497 hammer_free_inode(ip); 1498 return(0); 1499 } 1500 1501 /* 1502 * Called during unmounting if a critical error occured. The in-memory 1503 * inode and all related structures are destroyed. 1504 * 1505 * If a critical error did not occur the unmount code calls the standard 1506 * release and asserts that the inode is gone. 1507 */ 1508 int 1509 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused) 1510 { 1511 hammer_record_t rec; 1512 1513 /* 1514 * Get rid of the inodes in-memory records, regardless of their 1515 * state, and clear the mod-mask. 1516 */ 1517 while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) { 1518 TAILQ_REMOVE(&ip->target_list, rec, target_entry); 1519 rec->target_ip = NULL; 1520 if (rec->flush_state == HAMMER_FST_SETUP) 1521 rec->flush_state = HAMMER_FST_IDLE; 1522 } 1523 while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) { 1524 if (rec->flush_state == HAMMER_FST_FLUSH) 1525 --rec->flush_group->refs; 1526 else 1527 hammer_ref(&rec->lock); 1528 KKASSERT(hammer_oneref(&rec->lock)); 1529 rec->flush_state = HAMMER_FST_IDLE; 1530 rec->flush_group = NULL; 1531 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */ 1532 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */ 1533 ++ip->rec_generation; 1534 hammer_rel_mem_record(rec); 1535 } 1536 ip->flags &= ~HAMMER_INODE_MODMASK; 1537 ip->sync_flags &= ~HAMMER_INODE_MODMASK; 1538 KKASSERT(ip->vp == NULL); 1539 1540 /* 1541 * Remove the inode from any flush group, force it idle. FLUSH 1542 * and SETUP states have an inode ref. 1543 */ 1544 switch(ip->flush_state) { 1545 case HAMMER_FST_FLUSH: 1546 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip); 1547 --ip->flush_group->refs; 1548 ip->flush_group = NULL; 1549 /* fall through */ 1550 case HAMMER_FST_SETUP: 1551 hammer_rel(&ip->lock); 1552 ip->flush_state = HAMMER_FST_IDLE; 1553 /* fall through */ 1554 case HAMMER_FST_IDLE: 1555 break; 1556 } 1557 1558 /* 1559 * There shouldn't be any associated vnode. The unload needs at 1560 * least one ref, if we do have a vp steal its ip ref. 1561 */ 1562 if (ip->vp) { 1563 kprintf("hammer_destroy_inode_callback: Unexpected " 1564 "vnode association ip %p vp %p\n", ip, ip->vp); 1565 ip->vp->v_data = NULL; 1566 ip->vp = NULL; 1567 } else { 1568 hammer_ref(&ip->lock); 1569 } 1570 hammer_unload_inode(ip); 1571 return(0); 1572 } 1573 1574 /* 1575 * Called on mount -u when switching from RW to RO or vise-versa. Adjust 1576 * the read-only flag for cached inodes. 1577 * 1578 * This routine is called from a RB_SCAN(). 1579 */ 1580 int 1581 hammer_reload_inode(hammer_inode_t ip, void *arg __unused) 1582 { 1583 hammer_mount_t hmp = ip->hmp; 1584 1585 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID) 1586 ip->flags |= HAMMER_INODE_RO; 1587 else 1588 ip->flags &= ~HAMMER_INODE_RO; 1589 return(0); 1590 } 1591 1592 /* 1593 * A transaction has modified an inode, requiring updates as specified by 1594 * the passed flags. 1595 * 1596 * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime, 1597 * and not including size changes due to write-append 1598 * (but other size changes are included). 1599 * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to 1600 * write-append. 1601 * HAMMER_INODE_XDIRTY: Dirty in-memory records 1602 * HAMMER_INODE_BUFS: Dirty buffer cache buffers 1603 * HAMMER_INODE_DELETED: Inode record/data must be deleted 1604 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated 1605 */ 1606 void 1607 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags) 1608 { 1609 /* 1610 * ronly of 0 or 2 does not trigger assertion. 1611 * 2 is a special error state 1612 */ 1613 KKASSERT(ip->hmp->ronly != 1 || 1614 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 1615 HAMMER_INODE_SDIRTY | 1616 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED | 1617 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0); 1618 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) { 1619 ip->flags |= HAMMER_INODE_RSV_INODES; 1620 ++ip->hmp->rsv_inodes; 1621 } 1622 1623 /* 1624 * Set the NEWINODE flag in the transaction if the inode 1625 * transitions to a dirty state. This is used to track 1626 * the load on the inode cache. 1627 */ 1628 if (trans && 1629 (ip->flags & HAMMER_INODE_MODMASK) == 0 && 1630 (flags & HAMMER_INODE_MODMASK)) { 1631 trans->flags |= HAMMER_TRANSF_NEWINODE; 1632 } 1633 1634 ip->flags |= flags; 1635 } 1636 1637 /* 1638 * Attempt to quickly update the atime for a hammer inode. Return 0 on 1639 * success, -1 on failure. 1640 * 1641 * We attempt to update the atime with only the ip lock and not the 1642 * whole filesystem lock in order to improve concurrency. We can only 1643 * do this safely if the ATIME flag is already pending on the inode. 1644 * 1645 * This function is called via a vnops path (ip pointer is stable) without 1646 * fs_token held. 1647 */ 1648 int 1649 hammer_update_atime_quick(hammer_inode_t ip) 1650 { 1651 struct timeval tv; 1652 int res = -1; 1653 1654 if ((ip->flags & HAMMER_INODE_RO) || 1655 (ip->hmp->mp->mnt_flag & MNT_NOATIME)) { 1656 /* 1657 * Silently indicate success on read-only mount/snap 1658 */ 1659 res = 0; 1660 } else if (ip->flags & HAMMER_INODE_ATIME) { 1661 /* 1662 * Double check with inode lock held against backend. This 1663 * is only safe if all we need to do is update 1664 * ino_data.atime. 1665 */ 1666 getmicrotime(&tv); 1667 hammer_lock_ex(&ip->lock); 1668 if (ip->flags & HAMMER_INODE_ATIME) { 1669 ip->ino_data.atime = 1670 (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec; 1671 res = 0; 1672 } 1673 hammer_unlock(&ip->lock); 1674 } 1675 return res; 1676 } 1677 1678 /* 1679 * Request that an inode be flushed. This whole mess cannot block and may 1680 * recurse (if not synchronous). Once requested HAMMER will attempt to 1681 * actively flush the inode until the flush can be done. 1682 * 1683 * The inode may already be flushing, or may be in a setup state. We can 1684 * place the inode in a flushing state if it is currently idle and flag it 1685 * to reflush if it is currently flushing. 1686 * 1687 * Upon return if the inode could not be flushed due to a setup 1688 * dependancy, then it will be automatically flushed when the dependancy 1689 * is satisfied. 1690 */ 1691 void 1692 hammer_flush_inode(hammer_inode_t ip, int flags) 1693 { 1694 hammer_mount_t hmp; 1695 hammer_flush_group_t flg; 1696 int good; 1697 1698 /* 1699 * fill_flush_group is the first flush group we may be able to 1700 * continue filling, it may be open or closed but it will always 1701 * be past the currently flushing (running) flg. 1702 * 1703 * next_flush_group is the next open flush group. 1704 */ 1705 hmp = ip->hmp; 1706 while ((flg = hmp->fill_flush_group) != NULL) { 1707 KKASSERT(flg->running == 0); 1708 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit && 1709 flg->total_count <= hammer_autoflush) { 1710 break; 1711 } 1712 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry); 1713 hammer_flusher_async(ip->hmp, flg); 1714 } 1715 if (flg == NULL) { 1716 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO); 1717 flg->seq = hmp->flusher.next++; 1718 if (hmp->next_flush_group == NULL) 1719 hmp->next_flush_group = flg; 1720 if (hmp->fill_flush_group == NULL) 1721 hmp->fill_flush_group = flg; 1722 RB_INIT(&flg->flush_tree); 1723 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry); 1724 } 1725 1726 /* 1727 * Trivial 'nothing to flush' case. If the inode is in a SETUP 1728 * state we have to put it back into an IDLE state so we can 1729 * drop the extra ref. 1730 * 1731 * If we have a parent dependancy we must still fall through 1732 * so we can run it. 1733 */ 1734 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) { 1735 if (ip->flush_state == HAMMER_FST_SETUP && 1736 TAILQ_EMPTY(&ip->target_list)) { 1737 ip->flush_state = HAMMER_FST_IDLE; 1738 hammer_rel_inode(ip, 0); 1739 } 1740 if (ip->flush_state == HAMMER_FST_IDLE) 1741 return; 1742 } 1743 1744 /* 1745 * Our flush action will depend on the current state. 1746 */ 1747 switch(ip->flush_state) { 1748 case HAMMER_FST_IDLE: 1749 /* 1750 * We have no dependancies and can flush immediately. Some 1751 * our children may not be flushable so we have to re-test 1752 * with that additional knowledge. 1753 */ 1754 hammer_flush_inode_core(ip, flg, flags); 1755 break; 1756 case HAMMER_FST_SETUP: 1757 /* 1758 * Recurse upwards through dependancies via target_list 1759 * and start their flusher actions going if possible. 1760 * 1761 * 'good' is our connectivity. -1 means we have none and 1762 * can't flush, 0 means there weren't any dependancies, and 1763 * 1 means we have good connectivity. 1764 */ 1765 good = hammer_setup_parent_inodes(ip, 0, flg); 1766 1767 if (good >= 0) { 1768 /* 1769 * We can continue if good >= 0. Determine how 1770 * many records under our inode can be flushed (and 1771 * mark them). 1772 */ 1773 hammer_flush_inode_core(ip, flg, flags); 1774 } else { 1775 /* 1776 * Parent has no connectivity, tell it to flush 1777 * us as soon as it does. 1778 * 1779 * The REFLUSH flag is also needed to trigger 1780 * dependancy wakeups. 1781 */ 1782 ip->flags |= HAMMER_INODE_CONN_DOWN | 1783 HAMMER_INODE_REFLUSH; 1784 if (flags & HAMMER_FLUSH_SIGNAL) { 1785 ip->flags |= HAMMER_INODE_RESIGNAL; 1786 hammer_flusher_async(ip->hmp, flg); 1787 } 1788 } 1789 break; 1790 case HAMMER_FST_FLUSH: 1791 /* 1792 * We are already flushing, flag the inode to reflush 1793 * if needed after it completes its current flush. 1794 * 1795 * The REFLUSH flag is also needed to trigger 1796 * dependancy wakeups. 1797 */ 1798 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0) 1799 ip->flags |= HAMMER_INODE_REFLUSH; 1800 if (flags & HAMMER_FLUSH_SIGNAL) { 1801 ip->flags |= HAMMER_INODE_RESIGNAL; 1802 hammer_flusher_async(ip->hmp, flg); 1803 } 1804 break; 1805 } 1806 } 1807 1808 /* 1809 * Scan ip->target_list, which is a list of records owned by PARENTS to our 1810 * ip which reference our ip. 1811 * 1812 * XXX This is a huge mess of recursive code, but not one bit of it blocks 1813 * so for now do not ref/deref the structures. Note that if we use the 1814 * ref/rel code later, the rel CAN block. 1815 */ 1816 static int 1817 hammer_setup_parent_inodes(hammer_inode_t ip, int depth, 1818 hammer_flush_group_t flg) 1819 { 1820 hammer_record_t depend; 1821 int good; 1822 int r; 1823 1824 /* 1825 * If we hit our recursion limit and we have parent dependencies 1826 * We cannot continue. Returning < 0 will cause us to be flagged 1827 * for reflush. Returning -2 cuts off additional dependency checks 1828 * because they are likely to also hit the depth limit. 1829 * 1830 * We cannot return < 0 if there are no dependencies or there might 1831 * not be anything to wakeup (ip). 1832 */ 1833 if (depth == 20 && TAILQ_FIRST(&ip->target_list)) { 1834 kprintf("HAMMER Warning: depth limit reached on " 1835 "setup recursion, inode %p %016llx\n", 1836 ip, (long long)ip->obj_id); 1837 return(-2); 1838 } 1839 1840 /* 1841 * Scan dependencies 1842 */ 1843 good = 0; 1844 TAILQ_FOREACH(depend, &ip->target_list, target_entry) { 1845 r = hammer_setup_parent_inodes_helper(depend, depth, flg); 1846 KKASSERT(depend->target_ip == ip); 1847 if (r < 0 && good == 0) 1848 good = -1; 1849 if (r > 0) 1850 good = 1; 1851 1852 /* 1853 * If we failed due to the recursion depth limit then stop 1854 * now. 1855 */ 1856 if (r == -2) 1857 break; 1858 } 1859 return(good); 1860 } 1861 1862 /* 1863 * This helper function takes a record representing the dependancy between 1864 * the parent inode and child inode. 1865 * 1866 * record->ip = parent inode 1867 * record->target_ip = child inode 1868 * 1869 * We are asked to recurse upwards and convert the record from SETUP 1870 * to FLUSH if possible. 1871 * 1872 * Return 1 if the record gives us connectivity 1873 * 1874 * Return 0 if the record is not relevant 1875 * 1876 * Return -1 if we can't resolve the dependancy and there is no connectivity. 1877 */ 1878 static int 1879 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth, 1880 hammer_flush_group_t flg) 1881 { 1882 hammer_mount_t hmp; 1883 hammer_inode_t pip; 1884 int good; 1885 1886 KKASSERT(record->flush_state != HAMMER_FST_IDLE); 1887 pip = record->ip; 1888 hmp = pip->hmp; 1889 1890 /* 1891 * If the record is already flushing, is it in our flush group? 1892 * 1893 * If it is in our flush group but it is a general record or a 1894 * delete-on-disk, it does not improve our connectivity (return 0), 1895 * and if the target inode is not trying to destroy itself we can't 1896 * allow the operation yet anyway (the second return -1). 1897 */ 1898 if (record->flush_state == HAMMER_FST_FLUSH) { 1899 /* 1900 * If not in our flush group ask the parent to reflush 1901 * us as soon as possible. 1902 */ 1903 if (record->flush_group != flg) { 1904 pip->flags |= HAMMER_INODE_REFLUSH; 1905 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 1906 return(-1); 1907 } 1908 1909 /* 1910 * If in our flush group everything is already set up, 1911 * just return whether the record will improve our 1912 * visibility or not. 1913 */ 1914 if (record->type == HAMMER_MEM_RECORD_ADD) 1915 return(1); 1916 return(0); 1917 } 1918 1919 /* 1920 * It must be a setup record. Try to resolve the setup dependancies 1921 * by recursing upwards so we can place ip on the flush list. 1922 * 1923 * Limit ourselves to 20 levels of recursion to avoid blowing out 1924 * the kernel stack. If we hit the recursion limit we can't flush 1925 * until the parent flushes. The parent will flush independantly 1926 * on its own and ultimately a deep recursion will be resolved. 1927 */ 1928 KKASSERT(record->flush_state == HAMMER_FST_SETUP); 1929 1930 good = hammer_setup_parent_inodes(pip, depth + 1, flg); 1931 1932 /* 1933 * If good < 0 the parent has no connectivity and we cannot safely 1934 * flush the directory entry, which also means we can't flush our 1935 * ip. Flag us for downward recursion once the parent's 1936 * connectivity is resolved. Flag the parent for [re]flush or it 1937 * may not check for downward recursions. 1938 */ 1939 if (good < 0) { 1940 pip->flags |= HAMMER_INODE_REFLUSH; 1941 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 1942 return(good); 1943 } 1944 1945 /* 1946 * We are go, place the parent inode in a flushing state so we can 1947 * place its record in a flushing state. Note that the parent 1948 * may already be flushing. The record must be in the same flush 1949 * group as the parent. 1950 */ 1951 if (pip->flush_state != HAMMER_FST_FLUSH) 1952 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION); 1953 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH); 1954 1955 /* 1956 * It is possible for a rename to create a loop in the recursion 1957 * and revisit a record. This will result in the record being 1958 * placed in a flush state unexpectedly. This check deals with 1959 * the case. 1960 */ 1961 if (record->flush_state == HAMMER_FST_FLUSH) { 1962 if (record->type == HAMMER_MEM_RECORD_ADD) 1963 return(1); 1964 return(0); 1965 } 1966 1967 KKASSERT(record->flush_state == HAMMER_FST_SETUP); 1968 1969 #if 0 1970 if (record->type == HAMMER_MEM_RECORD_DEL && 1971 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) { 1972 /* 1973 * Regardless of flushing state we cannot sync this path if the 1974 * record represents a delete-on-disk but the target inode 1975 * is not ready to sync its own deletion. 1976 * 1977 * XXX need to count effective nlinks to determine whether 1978 * the flush is ok, otherwise removing a hardlink will 1979 * just leave the DEL record to rot. 1980 */ 1981 record->target_ip->flags |= HAMMER_INODE_REFLUSH; 1982 return(-1); 1983 } else 1984 #endif 1985 if (pip->flush_group == flg) { 1986 /* 1987 * Because we have not calculated nlinks yet we can just 1988 * set records to the flush state if the parent is in 1989 * the same flush group as we are. 1990 */ 1991 record->flush_state = HAMMER_FST_FLUSH; 1992 record->flush_group = flg; 1993 ++record->flush_group->refs; 1994 hammer_ref(&record->lock); 1995 1996 /* 1997 * A general directory-add contributes to our visibility. 1998 * 1999 * Otherwise it is probably a directory-delete or 2000 * delete-on-disk record and does not contribute to our 2001 * visbility (but we can still flush it). 2002 */ 2003 if (record->type == HAMMER_MEM_RECORD_ADD) 2004 return(1); 2005 return(0); 2006 } else { 2007 /* 2008 * If the parent is not in our flush group we cannot 2009 * flush this record yet, there is no visibility. 2010 * We tell the parent to reflush and mark ourselves 2011 * so the parent knows it should flush us too. 2012 */ 2013 pip->flags |= HAMMER_INODE_REFLUSH; 2014 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 2015 return(-1); 2016 } 2017 } 2018 2019 /* 2020 * This is the core routine placing an inode into the FST_FLUSH state. 2021 */ 2022 static void 2023 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags) 2024 { 2025 hammer_mount_t hmp = ip->hmp; 2026 int go_count; 2027 2028 /* 2029 * Set flush state and prevent the flusher from cycling into 2030 * the next flush group. Do not place the ip on the list yet. 2031 * Inodes not in the idle state get an extra reference. 2032 */ 2033 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH); 2034 if (ip->flush_state == HAMMER_FST_IDLE) 2035 hammer_ref(&ip->lock); 2036 ip->flush_state = HAMMER_FST_FLUSH; 2037 ip->flush_group = flg; 2038 ++hmp->flusher.group_lock; 2039 ++hmp->count_iqueued; 2040 ++hammer_count_iqueued; 2041 ++flg->total_count; 2042 hammer_redo_fifo_start_flush(ip); 2043 2044 #if 0 2045 /* 2046 * We need to be able to vfsync/truncate from the backend. 2047 * 2048 * XXX Any truncation from the backend will acquire the vnode 2049 * independently. 2050 */ 2051 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0); 2052 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) { 2053 ip->flags |= HAMMER_INODE_VHELD; 2054 vref(ip->vp); 2055 } 2056 #endif 2057 2058 /* 2059 * Figure out how many in-memory records we can actually flush 2060 * (not including inode meta-data, buffers, etc). 2061 */ 2062 KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0); 2063 if (flags & HAMMER_FLUSH_RECURSION) { 2064 /* 2065 * If this is a upwards recursion we do not want to 2066 * recurse down again! 2067 */ 2068 go_count = 1; 2069 #if 0 2070 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) { 2071 /* 2072 * No new records are added if we must complete a flush 2073 * from a previous cycle, but we do have to move the records 2074 * from the previous cycle to the current one. 2075 */ 2076 #if 0 2077 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 2078 hammer_syncgrp_child_callback, NULL); 2079 #endif 2080 go_count = 1; 2081 #endif 2082 } else { 2083 /* 2084 * Normal flush, scan records and bring them into the flush. 2085 * Directory adds and deletes are usually skipped (they are 2086 * grouped with the related inode rather then with the 2087 * directory). 2088 * 2089 * go_count can be negative, which means the scan aborted 2090 * due to the flush group being over-full and we should 2091 * flush what we have. 2092 */ 2093 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 2094 hammer_setup_child_callback, NULL); 2095 } 2096 2097 /* 2098 * This is a more involved test that includes go_count. If we 2099 * can't flush, flag the inode and return. If go_count is 0 we 2100 * were are unable to flush any records in our rec_tree and 2101 * must ignore the XDIRTY flag. 2102 */ 2103 if (go_count == 0) { 2104 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) { 2105 --hmp->count_iqueued; 2106 --hammer_count_iqueued; 2107 2108 --flg->total_count; 2109 ip->flush_state = HAMMER_FST_SETUP; 2110 ip->flush_group = NULL; 2111 if (flags & HAMMER_FLUSH_SIGNAL) { 2112 ip->flags |= HAMMER_INODE_REFLUSH | 2113 HAMMER_INODE_RESIGNAL; 2114 } else { 2115 ip->flags |= HAMMER_INODE_REFLUSH; 2116 } 2117 #if 0 2118 if (ip->flags & HAMMER_INODE_VHELD) { 2119 ip->flags &= ~HAMMER_INODE_VHELD; 2120 vrele(ip->vp); 2121 } 2122 #endif 2123 2124 /* 2125 * REFLUSH is needed to trigger dependancy wakeups 2126 * when an inode is in SETUP. 2127 */ 2128 ip->flags |= HAMMER_INODE_REFLUSH; 2129 if (--hmp->flusher.group_lock == 0) 2130 wakeup(&hmp->flusher.group_lock); 2131 return; 2132 } 2133 } 2134 2135 /* 2136 * Snapshot the state of the inode for the backend flusher. 2137 * 2138 * We continue to retain save_trunc_off even when all truncations 2139 * have been resolved as an optimization to determine if we can 2140 * skip the B-Tree lookup for overwrite deletions. 2141 * 2142 * NOTE: The DELETING flag is a mod flag, but it is also sticky, 2143 * and stays in ip->flags. Once set, it stays set until the 2144 * inode is destroyed. 2145 */ 2146 if (ip->flags & HAMMER_INODE_TRUNCATED) { 2147 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0); 2148 ip->sync_trunc_off = ip->trunc_off; 2149 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL; 2150 ip->flags &= ~HAMMER_INODE_TRUNCATED; 2151 ip->sync_flags |= HAMMER_INODE_TRUNCATED; 2152 2153 /* 2154 * The save_trunc_off used to cache whether the B-Tree 2155 * holds any records past that point is not used until 2156 * after the truncation has succeeded, so we can safely 2157 * set it now. 2158 */ 2159 if (ip->save_trunc_off > ip->sync_trunc_off) 2160 ip->save_trunc_off = ip->sync_trunc_off; 2161 } 2162 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK & 2163 ~HAMMER_INODE_TRUNCATED); 2164 ip->sync_ino_leaf = ip->ino_leaf; 2165 ip->sync_ino_data = ip->ino_data; 2166 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED; 2167 #ifdef DEBUG_TRUNCATE 2168 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp) 2169 kprintf("truncateS %016llx\n", ip->sync_trunc_off); 2170 #endif 2171 2172 /* 2173 * The flusher list inherits our inode and reference. 2174 */ 2175 KKASSERT(flg->running == 0); 2176 RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip); 2177 if (--hmp->flusher.group_lock == 0) 2178 wakeup(&hmp->flusher.group_lock); 2179 2180 /* 2181 * Auto-flush the group if it grows too large. Make sure the 2182 * inode reclaim wait pipeline continues to work. 2183 */ 2184 if (flg->total_count >= hammer_autoflush || 2185 flg->total_count >= hammer_limit_reclaims / 4) { 2186 if (hmp->fill_flush_group == flg) 2187 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry); 2188 hammer_flusher_async(hmp, flg); 2189 } 2190 } 2191 2192 /* 2193 * Callback for scan of ip->rec_tree. Try to include each record in our 2194 * flush. ip->flush_group has been set but the inode has not yet been 2195 * moved into a flushing state. 2196 * 2197 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on 2198 * both inodes. 2199 * 2200 * We return 1 for any record placed or found in FST_FLUSH, which prevents 2201 * the caller from shortcutting the flush. 2202 */ 2203 static int 2204 hammer_setup_child_callback(hammer_record_t rec, void *data) 2205 { 2206 hammer_flush_group_t flg; 2207 hammer_inode_t target_ip; 2208 hammer_inode_t ip; 2209 int r; 2210 2211 /* 2212 * Records deleted or committed by the backend are ignored. 2213 * Note that the flush detects deleted frontend records at 2214 * multiple points to deal with races. This is just the first 2215 * line of defense. The only time HAMMER_RECF_DELETED_FE cannot 2216 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it 2217 * messes up link-count calculations. 2218 * 2219 * NOTE: Don't get confused between record deletion and, say, 2220 * directory entry deletion. The deletion of a directory entry 2221 * which is on-media has nothing to do with the record deletion 2222 * flags. 2223 */ 2224 if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE | 2225 HAMMER_RECF_COMMITTED)) { 2226 if (rec->flush_state == HAMMER_FST_FLUSH) { 2227 KKASSERT(rec->flush_group == rec->ip->flush_group); 2228 r = 1; 2229 } else { 2230 r = 0; 2231 } 2232 return(r); 2233 } 2234 2235 /* 2236 * If the record is in an idle state it has no dependancies and 2237 * can be flushed. 2238 */ 2239 ip = rec->ip; 2240 flg = ip->flush_group; 2241 r = 0; 2242 2243 switch(rec->flush_state) { 2244 case HAMMER_FST_IDLE: 2245 /* 2246 * The record has no setup dependancy, we can flush it. 2247 */ 2248 KKASSERT(rec->target_ip == NULL); 2249 rec->flush_state = HAMMER_FST_FLUSH; 2250 rec->flush_group = flg; 2251 ++flg->refs; 2252 hammer_ref(&rec->lock); 2253 r = 1; 2254 break; 2255 case HAMMER_FST_SETUP: 2256 /* 2257 * The record has a setup dependancy. These are typically 2258 * directory entry adds and deletes. Such entries will be 2259 * flushed when their inodes are flushed so we do not 2260 * usually have to add them to the flush here. However, 2261 * if the target_ip has set HAMMER_INODE_CONN_DOWN then 2262 * it is asking us to flush this record (and it). 2263 */ 2264 target_ip = rec->target_ip; 2265 KKASSERT(target_ip != NULL); 2266 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE); 2267 2268 /* 2269 * If the target IP is already flushing in our group 2270 * we could associate the record, but target_ip has 2271 * already synced ino_data to sync_ino_data and we 2272 * would also have to adjust nlinks. Plus there are 2273 * ordering issues for adds and deletes. 2274 * 2275 * Reflush downward if this is an ADD, and upward if 2276 * this is a DEL. 2277 */ 2278 if (target_ip->flush_state == HAMMER_FST_FLUSH) { 2279 if (rec->type == HAMMER_MEM_RECORD_ADD) 2280 ip->flags |= HAMMER_INODE_REFLUSH; 2281 else 2282 target_ip->flags |= HAMMER_INODE_REFLUSH; 2283 break; 2284 } 2285 2286 /* 2287 * Target IP is not yet flushing. This can get complex 2288 * because we have to be careful about the recursion. 2289 * 2290 * Directories create an issue for us in that if a flush 2291 * of a directory is requested the expectation is to flush 2292 * any pending directory entries, but this will cause the 2293 * related inodes to recursively flush as well. We can't 2294 * really defer the operation so just get as many as we 2295 * can and 2296 */ 2297 #if 0 2298 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 && 2299 (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) { 2300 /* 2301 * We aren't reclaiming and the target ip was not 2302 * previously prevented from flushing due to this 2303 * record dependancy. Do not flush this record. 2304 */ 2305 /*r = 0;*/ 2306 } else 2307 #endif 2308 if (flg->total_count + flg->refs > 2309 ip->hmp->undo_rec_limit) { 2310 /* 2311 * Our flush group is over-full and we risk blowing 2312 * out the UNDO FIFO. Stop the scan, flush what we 2313 * have, then reflush the directory. 2314 * 2315 * The directory may be forced through multiple 2316 * flush groups before it can be completely 2317 * flushed. 2318 */ 2319 ip->flags |= HAMMER_INODE_RESIGNAL | 2320 HAMMER_INODE_REFLUSH; 2321 r = -1; 2322 } else if (rec->type == HAMMER_MEM_RECORD_ADD) { 2323 /* 2324 * If the target IP is not flushing we can force 2325 * it to flush, even if it is unable to write out 2326 * any of its own records we have at least one in 2327 * hand that we CAN deal with. 2328 */ 2329 rec->flush_state = HAMMER_FST_FLUSH; 2330 rec->flush_group = flg; 2331 ++flg->refs; 2332 hammer_ref(&rec->lock); 2333 hammer_flush_inode_core(target_ip, flg, 2334 HAMMER_FLUSH_RECURSION); 2335 r = 1; 2336 } else { 2337 /* 2338 * General or delete-on-disk record. 2339 * 2340 * XXX this needs help. If a delete-on-disk we could 2341 * disconnect the target. If the target has its own 2342 * dependancies they really need to be flushed. 2343 * 2344 * XXX 2345 */ 2346 rec->flush_state = HAMMER_FST_FLUSH; 2347 rec->flush_group = flg; 2348 ++flg->refs; 2349 hammer_ref(&rec->lock); 2350 hammer_flush_inode_core(target_ip, flg, 2351 HAMMER_FLUSH_RECURSION); 2352 r = 1; 2353 } 2354 break; 2355 case HAMMER_FST_FLUSH: 2356 /* 2357 * The record could be part of a previous flush group if the 2358 * inode is a directory (the record being a directory entry). 2359 * Once the flush group was closed a hammer_test_inode() 2360 * function can cause a new flush group to be setup, placing 2361 * the directory inode itself in a new flush group. 2362 * 2363 * When associated with a previous flush group we count it 2364 * as if it were in our current flush group, since it will 2365 * effectively be flushed by the time we flush our current 2366 * flush group. 2367 */ 2368 KKASSERT( 2369 rec->ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY || 2370 rec->flush_group == flg); 2371 r = 1; 2372 break; 2373 } 2374 return(r); 2375 } 2376 2377 #if 0 2378 /* 2379 * This version just moves records already in a flush state to the new 2380 * flush group and that is it. 2381 */ 2382 static int 2383 hammer_syncgrp_child_callback(hammer_record_t rec, void *data) 2384 { 2385 hammer_inode_t ip = rec->ip; 2386 2387 switch(rec->flush_state) { 2388 case HAMMER_FST_FLUSH: 2389 KKASSERT(rec->flush_group == ip->flush_group); 2390 break; 2391 default: 2392 break; 2393 } 2394 return(0); 2395 } 2396 #endif 2397 2398 /* 2399 * Wait for a previously queued flush to complete. 2400 * 2401 * If a critical error occured we don't try to wait. 2402 */ 2403 void 2404 hammer_wait_inode(hammer_inode_t ip) 2405 { 2406 hammer_flush_group_t flg; 2407 2408 flg = NULL; 2409 2410 /* 2411 * The inode can be in a SETUP state in which case RESIGNAL 2412 * should be set. If RESIGNAL is not set then the previous 2413 * flush completed and a later operation placed the inode 2414 * in a passive setup state again, so we're done. 2415 * 2416 * The inode can be in a FLUSH state in which case we 2417 * can just wait for completion. 2418 */ 2419 while (ip->flush_state == HAMMER_FST_FLUSH || 2420 (ip->flush_state == HAMMER_FST_SETUP && 2421 (ip->flags & HAMMER_INODE_RESIGNAL))) { 2422 /* 2423 * Don't try to flush on a critical error 2424 */ 2425 if (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 2426 break; 2427 2428 /* 2429 * If the inode was already being flushed its flg 2430 * may not have been queued to the backend. We 2431 * have to make sure it gets queued or we can wind 2432 * up blocked or deadlocked (particularly if we are 2433 * the vnlru thread). 2434 */ 2435 if (ip->flush_state == HAMMER_FST_FLUSH) { 2436 KKASSERT(ip->flush_group); 2437 if (ip->flush_group->closed == 0) { 2438 kprintf("hammer: debug: forcing async " 2439 "flush ip %016jx\n", 2440 (intmax_t)ip->obj_id); 2441 hammer_flusher_async(ip->hmp, 2442 ip->flush_group); 2443 continue; /* retest */ 2444 } 2445 } 2446 2447 /* 2448 * In a flush state with the flg queued to the backend 2449 * or in a setup state with RESIGNAL set, we can safely 2450 * wait. 2451 */ 2452 ip->flags |= HAMMER_INODE_FLUSHW; 2453 tsleep(&ip->flags, 0, "hmrwin", 0); 2454 } 2455 2456 #if 0 2457 /* 2458 * The inode may have been in a passive setup state, 2459 * call flush to make sure we get signaled. 2460 */ 2461 if (ip->flush_state == HAMMER_FST_SETUP) 2462 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2463 #endif 2464 2465 } 2466 2467 /* 2468 * Called by the backend code when a flush has been completed. 2469 * The inode has already been removed from the flush list. 2470 * 2471 * A pipelined flush can occur, in which case we must re-enter the 2472 * inode on the list and re-copy its fields. 2473 */ 2474 void 2475 hammer_flush_inode_done(hammer_inode_t ip, int error) 2476 { 2477 hammer_mount_t hmp; 2478 int dorel; 2479 2480 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH); 2481 2482 hmp = ip->hmp; 2483 2484 /* 2485 * Auto-reflush if the backend could not completely flush 2486 * the inode. This fixes a case where a deferred buffer flush 2487 * could cause fsync to return early. 2488 */ 2489 if (ip->sync_flags & HAMMER_INODE_MODMASK) 2490 ip->flags |= HAMMER_INODE_REFLUSH; 2491 2492 /* 2493 * Merge left-over flags back into the frontend and fix the state. 2494 * Incomplete truncations are retained by the backend. 2495 */ 2496 ip->error = error; 2497 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED; 2498 ip->sync_flags &= HAMMER_INODE_TRUNCATED; 2499 2500 /* 2501 * The backend may have adjusted nlinks, so if the adjusted nlinks 2502 * does not match the fronttend set the frontend's DDIRTY flag again. 2503 */ 2504 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks) 2505 ip->flags |= HAMMER_INODE_DDIRTY; 2506 2507 /* 2508 * Fix up the dirty buffer status. 2509 */ 2510 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) { 2511 ip->flags |= HAMMER_INODE_BUFS; 2512 } 2513 hammer_redo_fifo_end_flush(ip); 2514 2515 /* 2516 * Re-set the XDIRTY flag if some of the inode's in-memory records 2517 * could not be flushed. 2518 */ 2519 KKASSERT((RB_EMPTY(&ip->rec_tree) && 2520 (ip->flags & HAMMER_INODE_XDIRTY) == 0) || 2521 (!RB_EMPTY(&ip->rec_tree) && 2522 (ip->flags & HAMMER_INODE_XDIRTY) != 0)); 2523 2524 /* 2525 * Do not lose track of inodes which no longer have vnode 2526 * assocations, otherwise they may never get flushed again. 2527 * 2528 * The reflush flag can be set superfluously, causing extra pain 2529 * for no reason. If the inode is no longer modified it no longer 2530 * needs to be flushed. 2531 */ 2532 if (ip->flags & HAMMER_INODE_MODMASK) { 2533 if (ip->vp == NULL) 2534 ip->flags |= HAMMER_INODE_REFLUSH; 2535 } else { 2536 ip->flags &= ~HAMMER_INODE_REFLUSH; 2537 } 2538 2539 /* 2540 * Adjust the flush state. 2541 */ 2542 if (ip->flags & HAMMER_INODE_WOULDBLOCK) { 2543 /* 2544 * We were unable to flush out all our records, leave the 2545 * inode in a flush state and in the current flush group. 2546 * The flush group will be re-run. 2547 * 2548 * This occurs if the UNDO block gets too full or there is 2549 * too much dirty meta-data and allows the flusher to 2550 * finalize the UNDO block and then re-flush. 2551 */ 2552 ip->flags &= ~HAMMER_INODE_WOULDBLOCK; 2553 dorel = 0; 2554 } else { 2555 /* 2556 * Remove from the flush_group 2557 */ 2558 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip); 2559 ip->flush_group = NULL; 2560 2561 #if 0 2562 /* 2563 * Clean up the vnode ref and tracking counts. 2564 */ 2565 if (ip->flags & HAMMER_INODE_VHELD) { 2566 ip->flags &= ~HAMMER_INODE_VHELD; 2567 vrele(ip->vp); 2568 } 2569 #endif 2570 --hmp->count_iqueued; 2571 --hammer_count_iqueued; 2572 2573 /* 2574 * And adjust the state. 2575 */ 2576 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) { 2577 ip->flush_state = HAMMER_FST_IDLE; 2578 dorel = 1; 2579 } else { 2580 ip->flush_state = HAMMER_FST_SETUP; 2581 dorel = 0; 2582 } 2583 2584 /* 2585 * If the frontend is waiting for a flush to complete, 2586 * wake it up. 2587 */ 2588 if (ip->flags & HAMMER_INODE_FLUSHW) { 2589 ip->flags &= ~HAMMER_INODE_FLUSHW; 2590 wakeup(&ip->flags); 2591 } 2592 2593 /* 2594 * If the frontend made more changes and requested another 2595 * flush, then try to get it running. 2596 * 2597 * Reflushes are aborted when the inode is errored out. 2598 */ 2599 if (ip->flags & HAMMER_INODE_REFLUSH) { 2600 ip->flags &= ~HAMMER_INODE_REFLUSH; 2601 if (ip->flags & HAMMER_INODE_RESIGNAL) { 2602 ip->flags &= ~HAMMER_INODE_RESIGNAL; 2603 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2604 } else { 2605 hammer_flush_inode(ip, 0); 2606 } 2607 } 2608 } 2609 2610 /* 2611 * If we have no parent dependancies we can clear CONN_DOWN 2612 */ 2613 if (TAILQ_EMPTY(&ip->target_list)) 2614 ip->flags &= ~HAMMER_INODE_CONN_DOWN; 2615 2616 /* 2617 * If the inode is now clean drop the space reservation. 2618 */ 2619 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 2620 (ip->flags & HAMMER_INODE_RSV_INODES)) { 2621 ip->flags &= ~HAMMER_INODE_RSV_INODES; 2622 --hmp->rsv_inodes; 2623 } 2624 2625 ip->flags &= ~HAMMER_INODE_SLAVEFLUSH; 2626 2627 if (dorel) 2628 hammer_rel_inode(ip, 0); 2629 } 2630 2631 /* 2632 * Called from hammer_sync_inode() to synchronize in-memory records 2633 * to the media. 2634 */ 2635 static int 2636 hammer_sync_record_callback(hammer_record_t record, void *data) 2637 { 2638 hammer_cursor_t cursor = data; 2639 hammer_transaction_t trans = cursor->trans; 2640 hammer_mount_t hmp = trans->hmp; 2641 int error; 2642 2643 /* 2644 * Skip records that do not belong to the current flush. 2645 */ 2646 ++hammer_stats_record_iterations; 2647 if (record->flush_state != HAMMER_FST_FLUSH) 2648 return(0); 2649 2650 #if 1 2651 if (record->flush_group != record->ip->flush_group) { 2652 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group); 2653 if (hammer_debug_critical) 2654 Debugger("blah2"); 2655 return(0); 2656 } 2657 #endif 2658 KKASSERT(record->flush_group == record->ip->flush_group); 2659 2660 /* 2661 * Interlock the record using the BE flag. Once BE is set the 2662 * frontend cannot change the state of FE. 2663 * 2664 * NOTE: If FE is set prior to us setting BE we still sync the 2665 * record out, but the flush completion code converts it to 2666 * a delete-on-disk record instead of destroying it. 2667 */ 2668 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0); 2669 record->flags |= HAMMER_RECF_INTERLOCK_BE; 2670 2671 /* 2672 * The backend has already disposed of the record. 2673 */ 2674 if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) { 2675 error = 0; 2676 goto done; 2677 } 2678 2679 /* 2680 * If the whole inode is being deleted and all on-disk records will 2681 * be deleted very soon, we can't sync any new records to disk 2682 * because they will be deleted in the same transaction they were 2683 * created in (delete_tid == create_tid), which will assert. 2684 * 2685 * XXX There may be a case with RECORD_ADD with DELETED_FE set 2686 * that we currently panic on. 2687 */ 2688 if (record->ip->sync_flags & HAMMER_INODE_DELETING) { 2689 switch(record->type) { 2690 case HAMMER_MEM_RECORD_DATA: 2691 /* 2692 * We don't have to do anything, if the record was 2693 * committed the space will have been accounted for 2694 * in the blockmap. 2695 */ 2696 /* fall through */ 2697 case HAMMER_MEM_RECORD_GENERAL: 2698 /* 2699 * Set deleted-by-backend flag. Do not set the 2700 * backend committed flag, because we are throwing 2701 * the record away. 2702 */ 2703 record->flags |= HAMMER_RECF_DELETED_BE; 2704 ++record->ip->rec_generation; 2705 error = 0; 2706 goto done; 2707 case HAMMER_MEM_RECORD_ADD: 2708 panic("hammer_sync_record_callback: illegal add " 2709 "during inode deletion record %p", record); 2710 break; /* NOT REACHED */ 2711 case HAMMER_MEM_RECORD_INODE: 2712 panic("hammer_sync_record_callback: attempt to " 2713 "sync inode record %p?", record); 2714 break; /* NOT REACHED */ 2715 case HAMMER_MEM_RECORD_DEL: 2716 /* 2717 * Follow through and issue the on-disk deletion 2718 */ 2719 break; 2720 } 2721 } 2722 2723 /* 2724 * If DELETED_FE is set special handling is needed for directory 2725 * entries. Dependant pieces related to the directory entry may 2726 * have already been synced to disk. If this occurs we have to 2727 * sync the directory entry and then change the in-memory record 2728 * from an ADD to a DELETE to cover the fact that it's been 2729 * deleted by the frontend. 2730 * 2731 * A directory delete covering record (MEM_RECORD_DEL) can never 2732 * be deleted by the frontend. 2733 * 2734 * Any other record type (aka DATA) can be deleted by the frontend. 2735 * XXX At the moment the flusher must skip it because there may 2736 * be another data record in the flush group for the same block, 2737 * meaning that some frontend data changes can leak into the backend's 2738 * synchronization point. 2739 */ 2740 if (record->flags & HAMMER_RECF_DELETED_FE) { 2741 if (record->type == HAMMER_MEM_RECORD_ADD) { 2742 /* 2743 * Convert a front-end deleted directory-add to 2744 * a directory-delete entry later. 2745 */ 2746 record->flags |= HAMMER_RECF_CONVERT_DELETE; 2747 } else { 2748 /* 2749 * Dispose of the record (race case). Mark as 2750 * deleted by backend (and not committed). 2751 */ 2752 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL); 2753 record->flags |= HAMMER_RECF_DELETED_BE; 2754 ++record->ip->rec_generation; 2755 error = 0; 2756 goto done; 2757 } 2758 } 2759 2760 /* 2761 * Assign the create_tid for new records. Deletions already 2762 * have the record's entire key properly set up. 2763 */ 2764 if (record->type != HAMMER_MEM_RECORD_DEL) { 2765 record->leaf.base.create_tid = trans->tid; 2766 record->leaf.create_ts = trans->time32; 2767 } 2768 2769 /* 2770 * This actually moves the record to the on-media B-Tree. We 2771 * must also generate REDO_TERM entries in the UNDO/REDO FIFO 2772 * indicating that the related REDO_WRITE(s) have been committed. 2773 * 2774 * During recovery any REDO_TERM's within the nominal recovery span 2775 * are ignored since the related meta-data is being undone, causing 2776 * any matching REDO_WRITEs to execute. The REDO_TERMs outside 2777 * the nominal recovery span will match against REDO_WRITEs and 2778 * prevent them from being executed (because the meta-data has 2779 * already been synchronized). 2780 */ 2781 if (record->flags & HAMMER_RECF_REDO) { 2782 KKASSERT(record->type == HAMMER_MEM_RECORD_DATA); 2783 hammer_generate_redo(trans, record->ip, 2784 record->leaf.base.key - 2785 record->leaf.data_len, 2786 HAMMER_REDO_TERM_WRITE, 2787 NULL, 2788 record->leaf.data_len); 2789 } 2790 for (;;) { 2791 error = hammer_ip_sync_record_cursor(cursor, record); 2792 if (error != EDEADLK) 2793 break; 2794 hammer_done_cursor(cursor); 2795 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0], 2796 record->ip); 2797 if (error) 2798 break; 2799 } 2800 record->flags &= ~HAMMER_RECF_CONVERT_DELETE; 2801 2802 if (error) 2803 error = -error; 2804 done: 2805 hammer_flush_record_done(record, error); 2806 2807 /* 2808 * Do partial finalization if we have built up too many dirty 2809 * buffers. Otherwise a buffer cache deadlock can occur when 2810 * doing things like creating tens of thousands of tiny files. 2811 * 2812 * We must release our cursor lock to avoid a 3-way deadlock 2813 * due to the exclusive sync lock the finalizer must get. 2814 * 2815 * WARNING: See warnings in hammer_unlock_cursor() function. 2816 */ 2817 if (hammer_flusher_meta_limit(hmp)) { 2818 hammer_unlock_cursor(cursor); 2819 hammer_flusher_finalize(trans, 0); 2820 hammer_lock_cursor(cursor); 2821 } 2822 2823 return(error); 2824 } 2825 2826 /* 2827 * Backend function called by the flusher to sync an inode to media. 2828 */ 2829 int 2830 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip) 2831 { 2832 struct hammer_cursor cursor; 2833 hammer_node_t tmp_node; 2834 hammer_record_t depend; 2835 hammer_record_t next; 2836 int error, tmp_error; 2837 u_int64_t nlinks; 2838 2839 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0) 2840 return(0); 2841 2842 error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip); 2843 if (error) 2844 goto done; 2845 2846 /* 2847 * Any directory records referencing this inode which are not in 2848 * our current flush group must adjust our nlink count for the 2849 * purposes of synchronizating to disk. 2850 * 2851 * Records which are in our flush group can be unlinked from our 2852 * inode now, potentially allowing the inode to be physically 2853 * deleted. 2854 * 2855 * This cannot block. 2856 */ 2857 nlinks = ip->ino_data.nlinks; 2858 next = TAILQ_FIRST(&ip->target_list); 2859 while ((depend = next) != NULL) { 2860 next = TAILQ_NEXT(depend, target_entry); 2861 if (depend->flush_state == HAMMER_FST_FLUSH && 2862 depend->flush_group == ip->flush_group) { 2863 /* 2864 * If this is an ADD that was deleted by the frontend 2865 * the frontend nlinks count will have already been 2866 * decremented, but the backend is going to sync its 2867 * directory entry and must account for it. The 2868 * record will be converted to a delete-on-disk when 2869 * it gets synced. 2870 * 2871 * If the ADD was not deleted by the frontend we 2872 * can remove the dependancy from our target_list. 2873 */ 2874 if (depend->flags & HAMMER_RECF_DELETED_FE) { 2875 ++nlinks; 2876 } else { 2877 TAILQ_REMOVE(&ip->target_list, depend, 2878 target_entry); 2879 depend->target_ip = NULL; 2880 } 2881 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) { 2882 /* 2883 * Not part of our flush group and not deleted by 2884 * the front-end, adjust the link count synced to 2885 * the media (undo what the frontend did when it 2886 * queued the record). 2887 */ 2888 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0); 2889 switch(depend->type) { 2890 case HAMMER_MEM_RECORD_ADD: 2891 --nlinks; 2892 break; 2893 case HAMMER_MEM_RECORD_DEL: 2894 ++nlinks; 2895 break; 2896 default: 2897 break; 2898 } 2899 } 2900 } 2901 2902 /* 2903 * Set dirty if we had to modify the link count. 2904 */ 2905 if (ip->sync_ino_data.nlinks != nlinks) { 2906 KKASSERT((int64_t)nlinks >= 0); 2907 ip->sync_ino_data.nlinks = nlinks; 2908 ip->sync_flags |= HAMMER_INODE_DDIRTY; 2909 } 2910 2911 /* 2912 * If there is a trunction queued destroy any data past the (aligned) 2913 * truncation point. Userland will have dealt with the buffer 2914 * containing the truncation point for us. 2915 * 2916 * We don't flush pending frontend data buffers until after we've 2917 * dealt with the truncation. 2918 */ 2919 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) { 2920 /* 2921 * Interlock trunc_off. The VOP front-end may continue to 2922 * make adjustments to it while we are blocked. 2923 */ 2924 off_t trunc_off; 2925 off_t aligned_trunc_off; 2926 int blkmask; 2927 2928 trunc_off = ip->sync_trunc_off; 2929 blkmask = hammer_blocksize(trunc_off) - 1; 2930 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask; 2931 2932 /* 2933 * Delete any whole blocks on-media. The front-end has 2934 * already cleaned out any partial block and made it 2935 * pending. The front-end may have updated trunc_off 2936 * while we were blocked so we only use sync_trunc_off. 2937 * 2938 * This operation can blow out the buffer cache, EWOULDBLOCK 2939 * means we were unable to complete the deletion. The 2940 * deletion will update sync_trunc_off in that case. 2941 */ 2942 error = hammer_ip_delete_range(&cursor, ip, 2943 aligned_trunc_off, 2944 0x7FFFFFFFFFFFFFFFLL, 2); 2945 if (error == EWOULDBLOCK) { 2946 ip->flags |= HAMMER_INODE_WOULDBLOCK; 2947 error = 0; 2948 goto defer_buffer_flush; 2949 } 2950 2951 if (error) 2952 goto done; 2953 2954 /* 2955 * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO. 2956 * 2957 * XXX we do this even if we did not previously generate 2958 * a REDO_TRUNC record. This operation may enclosed the 2959 * range for multiple prior truncation entries in the REDO 2960 * log. 2961 */ 2962 if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR && 2963 (ip->flags & HAMMER_INODE_RDIRTY)) { 2964 hammer_generate_redo(trans, ip, aligned_trunc_off, 2965 HAMMER_REDO_TERM_TRUNC, 2966 NULL, 0); 2967 } 2968 2969 /* 2970 * Clear the truncation flag on the backend after we have 2971 * completed the deletions. Backend data is now good again 2972 * (including new records we are about to sync, below). 2973 * 2974 * Leave sync_trunc_off intact. As we write additional 2975 * records the backend will update sync_trunc_off. This 2976 * tells the backend whether it can skip the overwrite 2977 * test. This should work properly even when the backend 2978 * writes full blocks where the truncation point straddles 2979 * the block because the comparison is against the base 2980 * offset of the record. 2981 */ 2982 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED; 2983 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */ 2984 } else { 2985 error = 0; 2986 } 2987 2988 /* 2989 * Now sync related records. These will typically be directory 2990 * entries, records tracking direct-writes, or delete-on-disk records. 2991 */ 2992 if (error == 0) { 2993 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 2994 hammer_sync_record_callback, &cursor); 2995 if (tmp_error < 0) 2996 tmp_error = -error; 2997 if (tmp_error) 2998 error = tmp_error; 2999 } 3000 hammer_cache_node(&ip->cache[1], cursor.node); 3001 3002 /* 3003 * Re-seek for inode update, assuming our cache hasn't been ripped 3004 * out from under us. 3005 */ 3006 if (error == 0) { 3007 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error); 3008 if (tmp_node) { 3009 hammer_cursor_downgrade(&cursor); 3010 hammer_lock_sh(&tmp_node->lock); 3011 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0) 3012 hammer_cursor_seek(&cursor, tmp_node, 0); 3013 hammer_unlock(&tmp_node->lock); 3014 hammer_rel_node(tmp_node); 3015 } 3016 error = 0; 3017 } 3018 3019 /* 3020 * If we are deleting the inode the frontend had better not have 3021 * any active references on elements making up the inode. 3022 * 3023 * The call to hammer_ip_delete_clean() cleans up auxillary records 3024 * but not DB or DATA records. Those must have already been deleted 3025 * by the normal truncation mechanic. 3026 */ 3027 if (error == 0 && ip->sync_ino_data.nlinks == 0 && 3028 RB_EMPTY(&ip->rec_tree) && 3029 (ip->sync_flags & HAMMER_INODE_DELETING) && 3030 (ip->flags & HAMMER_INODE_DELETED) == 0) { 3031 int count1 = 0; 3032 3033 error = hammer_ip_delete_clean(&cursor, ip, &count1); 3034 if (error == 0) { 3035 ip->flags |= HAMMER_INODE_DELETED; 3036 ip->sync_flags &= ~HAMMER_INODE_DELETING; 3037 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED; 3038 KKASSERT(RB_EMPTY(&ip->rec_tree)); 3039 3040 /* 3041 * Set delete_tid in both the frontend and backend 3042 * copy of the inode record. The DELETED flag handles 3043 * this, do not set DDIRTY. 3044 */ 3045 ip->ino_leaf.base.delete_tid = trans->tid; 3046 ip->sync_ino_leaf.base.delete_tid = trans->tid; 3047 ip->ino_leaf.delete_ts = trans->time32; 3048 ip->sync_ino_leaf.delete_ts = trans->time32; 3049 3050 3051 /* 3052 * Adjust the inode count in the volume header 3053 */ 3054 hammer_sync_lock_sh(trans); 3055 if (ip->flags & HAMMER_INODE_ONDISK) { 3056 hammer_modify_volume_field(trans, 3057 trans->rootvol, 3058 vol0_stat_inodes); 3059 --ip->hmp->rootvol->ondisk->vol0_stat_inodes; 3060 hammer_modify_volume_done(trans->rootvol); 3061 } 3062 hammer_sync_unlock(trans); 3063 } 3064 } 3065 3066 if (error) 3067 goto done; 3068 ip->sync_flags &= ~HAMMER_INODE_BUFS; 3069 3070 defer_buffer_flush: 3071 /* 3072 * Now update the inode's on-disk inode-data and/or on-disk record. 3073 * DELETED and ONDISK are managed only in ip->flags. 3074 * 3075 * In the case of a defered buffer flush we still update the on-disk 3076 * inode to satisfy visibility requirements if there happen to be 3077 * directory dependancies. 3078 */ 3079 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) { 3080 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK: 3081 /* 3082 * If deleted and on-disk, don't set any additional flags. 3083 * the delete flag takes care of things. 3084 * 3085 * Clear flags which may have been set by the frontend. 3086 */ 3087 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 3088 HAMMER_INODE_SDIRTY | 3089 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME | 3090 HAMMER_INODE_DELETING); 3091 break; 3092 case HAMMER_INODE_DELETED: 3093 /* 3094 * Take care of the case where a deleted inode was never 3095 * flushed to the disk in the first place. 3096 * 3097 * Clear flags which may have been set by the frontend. 3098 */ 3099 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 3100 HAMMER_INODE_SDIRTY | 3101 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME | 3102 HAMMER_INODE_DELETING); 3103 while (RB_ROOT(&ip->rec_tree)) { 3104 hammer_record_t record = RB_ROOT(&ip->rec_tree); 3105 hammer_ref(&record->lock); 3106 KKASSERT(hammer_oneref(&record->lock)); 3107 record->flags |= HAMMER_RECF_DELETED_BE; 3108 ++record->ip->rec_generation; 3109 hammer_rel_mem_record(record); 3110 } 3111 break; 3112 case HAMMER_INODE_ONDISK: 3113 /* 3114 * If already on-disk, do not set any additional flags. 3115 */ 3116 break; 3117 default: 3118 /* 3119 * If not on-disk and not deleted, set DDIRTY to force 3120 * an initial record to be written. 3121 * 3122 * Also set the create_tid in both the frontend and backend 3123 * copy of the inode record. 3124 */ 3125 ip->ino_leaf.base.create_tid = trans->tid; 3126 ip->ino_leaf.create_ts = trans->time32; 3127 ip->sync_ino_leaf.base.create_tid = trans->tid; 3128 ip->sync_ino_leaf.create_ts = trans->time32; 3129 ip->sync_flags |= HAMMER_INODE_DDIRTY; 3130 break; 3131 } 3132 3133 /* 3134 * If DDIRTY or SDIRTY is set, write out a new record. 3135 * If the inode is already on-disk the old record is marked as 3136 * deleted. 3137 * 3138 * If DELETED is set hammer_update_inode() will delete the existing 3139 * record without writing out a new one. 3140 * 3141 * If *ONLY* the ITIMES flag is set we can update the record in-place. 3142 */ 3143 if (ip->flags & HAMMER_INODE_DELETED) { 3144 error = hammer_update_inode(&cursor, ip); 3145 } else 3146 if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) && 3147 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) { 3148 error = hammer_update_itimes(&cursor, ip); 3149 } else 3150 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY | 3151 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) { 3152 error = hammer_update_inode(&cursor, ip); 3153 } 3154 done: 3155 if (error) { 3156 hammer_critical_error(ip->hmp, ip, error, 3157 "while syncing inode"); 3158 } 3159 hammer_done_cursor(&cursor); 3160 return(error); 3161 } 3162 3163 /* 3164 * This routine is called when the OS is no longer actively referencing 3165 * the inode (but might still be keeping it cached), or when releasing 3166 * the last reference to an inode. 3167 * 3168 * At this point if the inode's nlinks count is zero we want to destroy 3169 * it, which may mean destroying it on-media too. 3170 */ 3171 void 3172 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp) 3173 { 3174 struct vnode *vp; 3175 3176 /* 3177 * Set the DELETING flag when the link count drops to 0 and the 3178 * OS no longer has any opens on the inode. 3179 * 3180 * The backend will clear DELETING (a mod flag) and set DELETED 3181 * (a state flag) when it is actually able to perform the 3182 * operation. 3183 * 3184 * Don't reflag the deletion if the flusher is currently syncing 3185 * one that was already flagged. A previously set DELETING flag 3186 * may bounce around flags and sync_flags until the operation is 3187 * completely done. 3188 * 3189 * Do not attempt to modify a snapshot inode (one set to read-only). 3190 */ 3191 if (ip->ino_data.nlinks == 0 && 3192 ((ip->flags | ip->sync_flags) & (HAMMER_INODE_RO|HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) { 3193 ip->flags |= HAMMER_INODE_DELETING; 3194 ip->flags |= HAMMER_INODE_TRUNCATED; 3195 ip->trunc_off = 0; 3196 vp = NULL; 3197 if (getvp) { 3198 if (hammer_get_vnode(ip, &vp) != 0) 3199 return; 3200 } 3201 3202 /* 3203 * Final cleanup 3204 */ 3205 if (ip->vp) 3206 nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0); 3207 if (getvp) 3208 vput(vp); 3209 } 3210 } 3211 3212 /* 3213 * After potentially resolving a dependancy the inode is tested 3214 * to determine whether it needs to be reflushed. 3215 */ 3216 void 3217 hammer_test_inode(hammer_inode_t ip) 3218 { 3219 if (ip->flags & HAMMER_INODE_REFLUSH) { 3220 ip->flags &= ~HAMMER_INODE_REFLUSH; 3221 hammer_ref(&ip->lock); 3222 if (ip->flags & HAMMER_INODE_RESIGNAL) { 3223 ip->flags &= ~HAMMER_INODE_RESIGNAL; 3224 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 3225 } else { 3226 hammer_flush_inode(ip, 0); 3227 } 3228 hammer_rel_inode(ip, 0); 3229 } 3230 } 3231 3232 /* 3233 * Clear the RECLAIM flag on an inode. This occurs when the inode is 3234 * reassociated with a vp or just before it gets freed. 3235 * 3236 * Pipeline wakeups to threads blocked due to an excessive number of 3237 * detached inodes. This typically occurs when atime updates accumulate 3238 * while scanning a directory tree. 3239 */ 3240 static void 3241 hammer_inode_wakereclaims(hammer_inode_t ip) 3242 { 3243 struct hammer_reclaim *reclaim; 3244 hammer_mount_t hmp = ip->hmp; 3245 3246 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) 3247 return; 3248 3249 --hammer_count_reclaims; 3250 --hmp->count_reclaims; 3251 ip->flags &= ~HAMMER_INODE_RECLAIM; 3252 3253 if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) { 3254 KKASSERT(reclaim->count > 0); 3255 if (--reclaim->count == 0) { 3256 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry); 3257 wakeup(reclaim); 3258 } 3259 } 3260 } 3261 3262 /* 3263 * Setup our reclaim pipeline. We only let so many detached (and dirty) 3264 * inodes build up before we start blocking. This routine is called 3265 * if a new inode is created or an inode is loaded from media. 3266 * 3267 * When we block we don't care *which* inode has finished reclaiming, 3268 * as long as one does. 3269 * 3270 * The reclaim pipeline is primarily governed by the auto-flush which is 3271 * 1/4 hammer_limit_reclaims. We don't want to block if the count is 3272 * less than 1/2 hammer_limit_reclaims. From 1/2 to full count is 3273 * dynamically governed. 3274 */ 3275 void 3276 hammer_inode_waitreclaims(hammer_transaction_t trans) 3277 { 3278 hammer_mount_t hmp = trans->hmp; 3279 struct hammer_reclaim reclaim; 3280 int lower_limit; 3281 3282 /* 3283 * Track inode load, delay if the number of reclaiming inodes is 3284 * between 2/4 and 4/4 hammer_limit_reclaims, depending. 3285 */ 3286 if (curthread->td_proc) { 3287 struct hammer_inostats *stats; 3288 3289 stats = hammer_inode_inostats(hmp, curthread->td_proc->p_pid); 3290 ++stats->count; 3291 3292 if (stats->count > hammer_limit_reclaims / 2) 3293 stats->count = hammer_limit_reclaims / 2; 3294 lower_limit = hammer_limit_reclaims - stats->count; 3295 if (hammer_debug_general & 0x10000) { 3296 kprintf("pid %5d limit %d\n", 3297 (int)curthread->td_proc->p_pid, lower_limit); 3298 } 3299 } else { 3300 lower_limit = hammer_limit_reclaims * 3 / 4; 3301 } 3302 if (hmp->count_reclaims >= lower_limit) { 3303 reclaim.count = 1; 3304 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry); 3305 tsleep(&reclaim, 0, "hmrrcm", hz); 3306 if (reclaim.count > 0) 3307 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry); 3308 } 3309 } 3310 3311 /* 3312 * Keep track of reclaim statistics on a per-pid basis using a loose 3313 * 4-way set associative hash table. Collisions inherit the count of 3314 * the previous entry. 3315 * 3316 * NOTE: We want to be careful here to limit the chain size. If the chain 3317 * size is too large a pid will spread its stats out over too many 3318 * entries under certain types of heavy filesystem activity and 3319 * wind up not delaying long enough. 3320 */ 3321 static 3322 struct hammer_inostats * 3323 hammer_inode_inostats(hammer_mount_t hmp, pid_t pid) 3324 { 3325 struct hammer_inostats *stats; 3326 int delta; 3327 int chain; 3328 static volatile int iterator; /* we don't care about MP races */ 3329 3330 /* 3331 * Chain up to 4 times to find our entry. 3332 */ 3333 for (chain = 0; chain < 4; ++chain) { 3334 stats = &hmp->inostats[(pid + chain) & HAMMER_INOSTATS_HMASK]; 3335 if (stats->pid == pid) 3336 break; 3337 } 3338 3339 /* 3340 * Replace one of the four chaining entries with our new entry. 3341 */ 3342 if (chain == 4) { 3343 stats = &hmp->inostats[(pid + (iterator++ & 3)) & 3344 HAMMER_INOSTATS_HMASK]; 3345 stats->pid = pid; 3346 } 3347 3348 /* 3349 * Decay the entry 3350 */ 3351 if (stats->count && stats->ltick != ticks) { 3352 delta = ticks - stats->ltick; 3353 stats->ltick = ticks; 3354 if (delta <= 0 || delta > hz * 60) 3355 stats->count = 0; 3356 else 3357 stats->count = stats->count * hz / (hz + delta); 3358 } 3359 if (hammer_debug_general & 0x10000) 3360 kprintf("pid %5d stats %d\n", (int)pid, stats->count); 3361 return (stats); 3362 } 3363 3364 #if 0 3365 3366 /* 3367 * XXX not used, doesn't work very well due to the large batching nature 3368 * of flushes. 3369 * 3370 * A larger then normal backlog of inodes is sitting in the flusher, 3371 * enforce a general slowdown to let it catch up. This routine is only 3372 * called on completion of a non-flusher-related transaction which 3373 * performed B-Tree node I/O. 3374 * 3375 * It is possible for the flusher to stall in a continuous load. 3376 * blogbench -i1000 -o seems to do a good job generating this sort of load. 3377 * If the flusher is unable to catch up the inode count can bloat until 3378 * we run out of kvm. 3379 * 3380 * This is a bit of a hack. 3381 */ 3382 void 3383 hammer_inode_waithard(hammer_mount_t hmp) 3384 { 3385 /* 3386 * Hysteresis. 3387 */ 3388 if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) { 3389 if (hmp->count_reclaims < hammer_limit_reclaims / 2 && 3390 hmp->count_iqueued < hmp->count_inodes / 20) { 3391 hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY; 3392 return; 3393 } 3394 } else { 3395 if (hmp->count_reclaims < hammer_limit_reclaims || 3396 hmp->count_iqueued < hmp->count_inodes / 10) { 3397 return; 3398 } 3399 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY; 3400 } 3401 3402 /* 3403 * Block for one flush cycle. 3404 */ 3405 hammer_flusher_wait_next(hmp); 3406 } 3407 3408 #endif 3409