1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $ 35 */ 36 37 #include "hammer.h" 38 #include <vm/vm_extern.h> 39 #include <sys/buf.h> 40 #include <sys/buf2.h> 41 42 static int hammer_unload_inode(struct hammer_inode *ip); 43 static void hammer_free_inode(hammer_inode_t ip); 44 static void hammer_flush_inode_core(hammer_inode_t ip, 45 hammer_flush_group_t flg, int flags); 46 static int hammer_setup_child_callback(hammer_record_t rec, void *data); 47 #if 0 48 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data); 49 #endif 50 static int hammer_setup_parent_inodes(hammer_inode_t ip, 51 hammer_flush_group_t flg); 52 static int hammer_setup_parent_inodes_helper(hammer_record_t record, 53 hammer_flush_group_t flg); 54 static void hammer_inode_wakereclaims(hammer_inode_t ip, int dowake); 55 56 #ifdef DEBUG_TRUNCATE 57 extern struct hammer_inode *HammerTruncIp; 58 #endif 59 60 /* 61 * RB-Tree support for inode structures 62 */ 63 int 64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2) 65 { 66 if (ip1->obj_localization < ip2->obj_localization) 67 return(-1); 68 if (ip1->obj_localization > ip2->obj_localization) 69 return(1); 70 if (ip1->obj_id < ip2->obj_id) 71 return(-1); 72 if (ip1->obj_id > ip2->obj_id) 73 return(1); 74 if (ip1->obj_asof < ip2->obj_asof) 75 return(-1); 76 if (ip1->obj_asof > ip2->obj_asof) 77 return(1); 78 return(0); 79 } 80 81 /* 82 * RB-Tree support for inode structures / special LOOKUP_INFO 83 */ 84 static int 85 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip) 86 { 87 if (info->obj_localization < ip->obj_localization) 88 return(-1); 89 if (info->obj_localization > ip->obj_localization) 90 return(1); 91 if (info->obj_id < ip->obj_id) 92 return(-1); 93 if (info->obj_id > ip->obj_id) 94 return(1); 95 if (info->obj_asof < ip->obj_asof) 96 return(-1); 97 if (info->obj_asof > ip->obj_asof) 98 return(1); 99 return(0); 100 } 101 102 /* 103 * Used by hammer_scan_inode_snapshots() to locate all of an object's 104 * snapshots. Note that the asof field is not tested, which we can get 105 * away with because it is the lowest-priority field. 106 */ 107 static int 108 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data) 109 { 110 hammer_inode_info_t info = data; 111 112 if (ip->obj_localization > info->obj_localization) 113 return(1); 114 if (ip->obj_localization < info->obj_localization) 115 return(-1); 116 if (ip->obj_id > info->obj_id) 117 return(1); 118 if (ip->obj_id < info->obj_id) 119 return(-1); 120 return(0); 121 } 122 123 /* 124 * Used by hammer_unload_pseudofs() to locate all inodes associated with 125 * a particular PFS. 126 */ 127 static int 128 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data) 129 { 130 u_int32_t localization = *(u_int32_t *)data; 131 if (ip->obj_localization > localization) 132 return(1); 133 if (ip->obj_localization < localization) 134 return(-1); 135 return(0); 136 } 137 138 /* 139 * RB-Tree support for pseudofs structures 140 */ 141 static int 142 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2) 143 { 144 if (p1->localization < p2->localization) 145 return(-1); 146 if (p1->localization > p2->localization) 147 return(1); 148 return(0); 149 } 150 151 152 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare); 153 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node, 154 hammer_inode_info_cmp, hammer_inode_info_t); 155 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node, 156 hammer_pfs_rb_compare, u_int32_t, localization); 157 158 /* 159 * The kernel is not actively referencing this vnode but is still holding 160 * it cached. 161 * 162 * This is called from the frontend. 163 */ 164 int 165 hammer_vop_inactive(struct vop_inactive_args *ap) 166 { 167 struct hammer_inode *ip = VTOI(ap->a_vp); 168 169 /* 170 * Degenerate case 171 */ 172 if (ip == NULL) { 173 vrecycle(ap->a_vp); 174 return(0); 175 } 176 177 /* 178 * If the inode no longer has visibility in the filesystem try to 179 * recycle it immediately, even if the inode is dirty. Recycling 180 * it quickly allows the system to reclaim buffer cache and VM 181 * resources which can matter a lot in a heavily loaded system. 182 * 183 * This can deadlock in vfsync() if we aren't careful. 184 * 185 * Do not queue the inode to the flusher if we still have visibility, 186 * otherwise namespace calls such as chmod will unnecessarily generate 187 * multiple inode updates. 188 */ 189 hammer_inode_unloadable_check(ip, 0); 190 if (ip->ino_data.nlinks == 0) { 191 if (ip->flags & HAMMER_INODE_MODMASK) 192 hammer_flush_inode(ip, 0); 193 vrecycle(ap->a_vp); 194 } 195 return(0); 196 } 197 198 /* 199 * Release the vnode association. This is typically (but not always) 200 * the last reference on the inode. 201 * 202 * Once the association is lost we are on our own with regards to 203 * flushing the inode. 204 */ 205 int 206 hammer_vop_reclaim(struct vop_reclaim_args *ap) 207 { 208 struct hammer_inode *ip; 209 hammer_mount_t hmp; 210 struct vnode *vp; 211 212 vp = ap->a_vp; 213 214 if ((ip = vp->v_data) != NULL) { 215 hmp = ip->hmp; 216 vp->v_data = NULL; 217 ip->vp = NULL; 218 219 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) { 220 ++hammer_count_reclaiming; 221 ++hmp->inode_reclaims; 222 ip->flags |= HAMMER_INODE_RECLAIM; 223 } 224 hammer_rel_inode(ip, 1); 225 } 226 return(0); 227 } 228 229 /* 230 * Return a locked vnode for the specified inode. The inode must be 231 * referenced but NOT LOCKED on entry and will remain referenced on 232 * return. 233 * 234 * Called from the frontend. 235 */ 236 int 237 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp) 238 { 239 hammer_mount_t hmp; 240 struct vnode *vp; 241 int error = 0; 242 u_int8_t obj_type; 243 244 hmp = ip->hmp; 245 246 for (;;) { 247 if ((vp = ip->vp) == NULL) { 248 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0); 249 if (error) 250 break; 251 hammer_lock_ex(&ip->lock); 252 if (ip->vp != NULL) { 253 hammer_unlock(&ip->lock); 254 vp->v_type = VBAD; 255 vx_put(vp); 256 continue; 257 } 258 hammer_ref(&ip->lock); 259 vp = *vpp; 260 ip->vp = vp; 261 262 obj_type = ip->ino_data.obj_type; 263 vp->v_type = hammer_get_vnode_type(obj_type); 264 265 hammer_inode_wakereclaims(ip, 0); 266 267 switch(ip->ino_data.obj_type) { 268 case HAMMER_OBJTYPE_CDEV: 269 case HAMMER_OBJTYPE_BDEV: 270 vp->v_ops = &hmp->mp->mnt_vn_spec_ops; 271 addaliasu(vp, ip->ino_data.rmajor, 272 ip->ino_data.rminor); 273 break; 274 case HAMMER_OBJTYPE_FIFO: 275 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops; 276 break; 277 default: 278 break; 279 } 280 281 /* 282 * Only mark as the root vnode if the ip is not 283 * historical, otherwise the VFS cache will get 284 * confused. The other half of the special handling 285 * is in hammer_vop_nlookupdotdot(). 286 * 287 * Pseudo-filesystem roots can be accessed via 288 * non-root filesystem paths and setting VROOT may 289 * confuse the namecache. Set VPFSROOT instead. 290 */ 291 if (ip->obj_id == HAMMER_OBJID_ROOT && 292 ip->obj_asof == hmp->asof) { 293 if (ip->obj_localization == 0) 294 vp->v_flag |= VROOT; 295 else 296 vp->v_flag |= VPFSROOT; 297 } 298 299 vp->v_data = (void *)ip; 300 /* vnode locked by getnewvnode() */ 301 /* make related vnode dirty if inode dirty? */ 302 hammer_unlock(&ip->lock); 303 if (vp->v_type == VREG) 304 vinitvmio(vp, ip->ino_data.size); 305 break; 306 } 307 308 /* 309 * loop if the vget fails (aka races), or if the vp 310 * no longer matches ip->vp. 311 */ 312 if (vget(vp, LK_EXCLUSIVE) == 0) { 313 if (vp == ip->vp) 314 break; 315 vput(vp); 316 } 317 } 318 *vpp = vp; 319 return(error); 320 } 321 322 /* 323 * Locate all copies of the inode for obj_id compatible with the specified 324 * asof, reference, and issue the related call-back. This routine is used 325 * for direct-io invalidation and does not create any new inodes. 326 */ 327 void 328 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo, 329 int (*callback)(hammer_inode_t ip, void *data), 330 void *data) 331 { 332 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root, 333 hammer_inode_info_cmp_all_history, 334 callback, iinfo); 335 } 336 337 /* 338 * Acquire a HAMMER inode. The returned inode is not locked. These functions 339 * do not attach or detach the related vnode (use hammer_get_vnode() for 340 * that). 341 * 342 * The flags argument is only applied for newly created inodes, and only 343 * certain flags are inherited. 344 * 345 * Called from the frontend. 346 */ 347 struct hammer_inode * 348 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip, 349 int64_t obj_id, hammer_tid_t asof, u_int32_t localization, 350 int flags, int *errorp) 351 { 352 hammer_mount_t hmp = trans->hmp; 353 struct hammer_inode_info iinfo; 354 struct hammer_cursor cursor; 355 struct hammer_inode *ip; 356 357 358 /* 359 * Determine if we already have an inode cached. If we do then 360 * we are golden. 361 * 362 * If we find an inode with no vnode we have to mark the 363 * transaction such that hammer_inode_waitreclaims() is 364 * called later on to avoid building up an infinite number 365 * of inodes. Otherwise we can continue to * add new inodes 366 * faster then they can be disposed of, even with the tsleep 367 * delay. 368 * 369 * If we find a dummy inode we return a failure so dounlink 370 * (which does another lookup) doesn't try to mess with the 371 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode() 372 * to ref dummy inodes. 373 */ 374 iinfo.obj_id = obj_id; 375 iinfo.obj_asof = asof; 376 iinfo.obj_localization = localization; 377 loop: 378 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 379 if (ip) { 380 if (ip->flags & HAMMER_INODE_DUMMY) { 381 *errorp = ENOENT; 382 return(NULL); 383 } 384 hammer_ref(&ip->lock); 385 *errorp = 0; 386 return(ip); 387 } 388 389 /* 390 * Allocate a new inode structure and deal with races later. 391 */ 392 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 393 ++hammer_count_inodes; 394 ++hmp->count_inodes; 395 ip->obj_id = obj_id; 396 ip->obj_asof = iinfo.obj_asof; 397 ip->obj_localization = localization; 398 ip->hmp = hmp; 399 ip->flags = flags & HAMMER_INODE_RO; 400 ip->cache[0].ip = ip; 401 ip->cache[1].ip = ip; 402 if (hmp->ronly) 403 ip->flags |= HAMMER_INODE_RO; 404 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off = 405 0x7FFFFFFFFFFFFFFFLL; 406 RB_INIT(&ip->rec_tree); 407 TAILQ_INIT(&ip->target_list); 408 hammer_ref(&ip->lock); 409 410 /* 411 * Locate the on-disk inode. If this is a PFS root we always 412 * access the current version of the root inode and (if it is not 413 * a master) always access information under it with a snapshot 414 * TID. 415 */ 416 retry: 417 hammer_init_cursor(trans, &cursor, (dip ? &dip->cache[0] : NULL), NULL); 418 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE; 419 cursor.key_beg.obj_id = ip->obj_id; 420 cursor.key_beg.key = 0; 421 cursor.key_beg.create_tid = 0; 422 cursor.key_beg.delete_tid = 0; 423 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE; 424 cursor.key_beg.obj_type = 0; 425 426 cursor.asof = iinfo.obj_asof; 427 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA | 428 HAMMER_CURSOR_ASOF; 429 430 *errorp = hammer_btree_lookup(&cursor); 431 if (*errorp == EDEADLK) { 432 hammer_done_cursor(&cursor); 433 goto retry; 434 } 435 436 /* 437 * On success the B-Tree lookup will hold the appropriate 438 * buffer cache buffers and provide a pointer to the requested 439 * information. Copy the information to the in-memory inode 440 * and cache the B-Tree node to improve future operations. 441 */ 442 if (*errorp == 0) { 443 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf; 444 ip->ino_data = cursor.data->inode; 445 446 /* 447 * cache[0] tries to cache the location of the object inode. 448 * The assumption is that it is near the directory inode. 449 * 450 * cache[1] tries to cache the location of the object data. 451 * The assumption is that it is near the directory data. 452 */ 453 hammer_cache_node(&ip->cache[0], cursor.node); 454 if (dip && dip->cache[1].node) 455 hammer_cache_node(&ip->cache[1], dip->cache[1].node); 456 457 /* 458 * The file should not contain any data past the file size 459 * stored in the inode. Setting save_trunc_off to the 460 * file size instead of max reduces B-Tree lookup overheads 461 * on append by allowing the flusher to avoid checking for 462 * record overwrites. 463 */ 464 ip->save_trunc_off = ip->ino_data.size; 465 466 /* 467 * Locate and assign the pseudofs management structure to 468 * the inode. 469 */ 470 if (dip && dip->obj_localization == ip->obj_localization) { 471 ip->pfsm = dip->pfsm; 472 hammer_ref(&ip->pfsm->lock); 473 } else { 474 ip->pfsm = hammer_load_pseudofs(trans, 475 ip->obj_localization, 476 errorp); 477 *errorp = 0; /* ignore ENOENT */ 478 } 479 } 480 481 /* 482 * The inode is placed on the red-black tree and will be synced to 483 * the media when flushed or by the filesystem sync. If this races 484 * another instantiation/lookup the insertion will fail. 485 */ 486 if (*errorp == 0) { 487 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 488 hammer_free_inode(ip); 489 hammer_done_cursor(&cursor); 490 goto loop; 491 } 492 ip->flags |= HAMMER_INODE_ONDISK; 493 } else { 494 if (ip->flags & HAMMER_INODE_RSV_INODES) { 495 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */ 496 --hmp->rsv_inodes; 497 } 498 499 hammer_free_inode(ip); 500 ip = NULL; 501 } 502 hammer_done_cursor(&cursor); 503 trans->flags |= HAMMER_TRANSF_NEWINODE; 504 return (ip); 505 } 506 507 /* 508 * Get a dummy inode to placemark a broken directory entry. 509 */ 510 struct hammer_inode * 511 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip, 512 int64_t obj_id, hammer_tid_t asof, u_int32_t localization, 513 int flags, int *errorp) 514 { 515 hammer_mount_t hmp = trans->hmp; 516 struct hammer_inode_info iinfo; 517 struct hammer_inode *ip; 518 519 /* 520 * Determine if we already have an inode cached. If we do then 521 * we are golden. 522 * 523 * If we find an inode with no vnode we have to mark the 524 * transaction such that hammer_inode_waitreclaims() is 525 * called later on to avoid building up an infinite number 526 * of inodes. Otherwise we can continue to * add new inodes 527 * faster then they can be disposed of, even with the tsleep 528 * delay. 529 * 530 * If we find a non-fake inode we return an error. Only fake 531 * inodes can be returned by this routine. 532 */ 533 iinfo.obj_id = obj_id; 534 iinfo.obj_asof = asof; 535 iinfo.obj_localization = localization; 536 loop: 537 *errorp = 0; 538 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 539 if (ip) { 540 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) { 541 *errorp = ENOENT; 542 return(NULL); 543 } 544 hammer_ref(&ip->lock); 545 return(ip); 546 } 547 548 /* 549 * Allocate a new inode structure and deal with races later. 550 */ 551 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 552 ++hammer_count_inodes; 553 ++hmp->count_inodes; 554 ip->obj_id = obj_id; 555 ip->obj_asof = iinfo.obj_asof; 556 ip->obj_localization = localization; 557 ip->hmp = hmp; 558 ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY; 559 ip->cache[0].ip = ip; 560 ip->cache[1].ip = ip; 561 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off = 562 0x7FFFFFFFFFFFFFFFLL; 563 RB_INIT(&ip->rec_tree); 564 TAILQ_INIT(&ip->target_list); 565 hammer_ref(&ip->lock); 566 567 /* 568 * Populate the dummy inode. Leave everything zero'd out. 569 * 570 * (ip->ino_leaf and ip->ino_data) 571 * 572 * Make the dummy inode a FIFO object which most copy programs 573 * will properly ignore. 574 */ 575 ip->save_trunc_off = ip->ino_data.size; 576 ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO; 577 578 /* 579 * Locate and assign the pseudofs management structure to 580 * the inode. 581 */ 582 if (dip && dip->obj_localization == ip->obj_localization) { 583 ip->pfsm = dip->pfsm; 584 hammer_ref(&ip->pfsm->lock); 585 } else { 586 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization, 587 errorp); 588 *errorp = 0; /* ignore ENOENT */ 589 } 590 591 /* 592 * The inode is placed on the red-black tree and will be synced to 593 * the media when flushed or by the filesystem sync. If this races 594 * another instantiation/lookup the insertion will fail. 595 * 596 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake. 597 */ 598 if (*errorp == 0) { 599 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 600 hammer_free_inode(ip); 601 goto loop; 602 } 603 } else { 604 if (ip->flags & HAMMER_INODE_RSV_INODES) { 605 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */ 606 --hmp->rsv_inodes; 607 } 608 hammer_free_inode(ip); 609 ip = NULL; 610 } 611 trans->flags |= HAMMER_TRANSF_NEWINODE; 612 return (ip); 613 } 614 615 /* 616 * Create a new filesystem object, returning the inode in *ipp. The 617 * returned inode will be referenced. The inode is created in-memory. 618 * 619 * If pfsm is non-NULL the caller wishes to create the root inode for 620 * a master PFS. 621 */ 622 int 623 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap, 624 struct ucred *cred, hammer_inode_t dip, 625 hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp) 626 { 627 hammer_mount_t hmp; 628 hammer_inode_t ip; 629 uid_t xuid; 630 int error; 631 632 hmp = trans->hmp; 633 634 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 635 ++hammer_count_inodes; 636 ++hmp->count_inodes; 637 trans->flags |= HAMMER_TRANSF_NEWINODE; 638 639 if (pfsm) { 640 KKASSERT(pfsm->localization != 0); 641 ip->obj_id = HAMMER_OBJID_ROOT; 642 ip->obj_localization = pfsm->localization; 643 } else { 644 KKASSERT(dip != NULL); 645 ip->obj_id = hammer_alloc_objid(hmp, dip); 646 ip->obj_localization = dip->obj_localization; 647 } 648 649 KKASSERT(ip->obj_id != 0); 650 ip->obj_asof = hmp->asof; 651 ip->hmp = hmp; 652 ip->flush_state = HAMMER_FST_IDLE; 653 ip->flags = HAMMER_INODE_DDIRTY | 654 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME; 655 ip->cache[0].ip = ip; 656 ip->cache[1].ip = ip; 657 658 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL; 659 /* ip->save_trunc_off = 0; (already zero) */ 660 RB_INIT(&ip->rec_tree); 661 TAILQ_INIT(&ip->target_list); 662 663 ip->ino_data.atime = trans->time; 664 ip->ino_data.mtime = trans->time; 665 ip->ino_data.size = 0; 666 ip->ino_data.nlinks = 0; 667 668 /* 669 * A nohistory designator on the parent directory is inherited by 670 * the child. We will do this even for pseudo-fs creation... the 671 * sysad can turn it off. 672 */ 673 if (dip) { 674 ip->ino_data.uflags = dip->ino_data.uflags & 675 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP); 676 } 677 678 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 679 ip->ino_leaf.base.localization = ip->obj_localization + 680 HAMMER_LOCALIZE_INODE; 681 ip->ino_leaf.base.obj_id = ip->obj_id; 682 ip->ino_leaf.base.key = 0; 683 ip->ino_leaf.base.create_tid = 0; 684 ip->ino_leaf.base.delete_tid = 0; 685 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE; 686 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type); 687 688 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type; 689 ip->ino_data.version = HAMMER_INODE_DATA_VERSION; 690 ip->ino_data.mode = vap->va_mode; 691 ip->ino_data.ctime = trans->time; 692 693 /* 694 * If we are running version 2 or greater we use dirhash algorithm #1 695 * which is semi-sorted. Algorithm #0 was just a pure crc. 696 */ 697 if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) { 698 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) { 699 ip->ino_data.cap_flags |= HAMMER_INODE_CAP_DIRHASH_ALG1; 700 } 701 } 702 703 /* 704 * Setup the ".." pointer. This only needs to be done for directories 705 * but we do it for all objects as a recovery aid. 706 */ 707 if (dip) 708 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id; 709 #if 0 710 /* 711 * The parent_obj_localization field only applies to pseudo-fs roots. 712 * XXX this is no longer applicable, PFSs are no longer directly 713 * tied into the parent's directory structure. 714 */ 715 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY && 716 ip->obj_id == HAMMER_OBJID_ROOT) { 717 ip->ino_data.ext.obj.parent_obj_localization = 718 dip->obj_localization; 719 } 720 #endif 721 722 switch(ip->ino_leaf.base.obj_type) { 723 case HAMMER_OBJTYPE_CDEV: 724 case HAMMER_OBJTYPE_BDEV: 725 ip->ino_data.rmajor = vap->va_rmajor; 726 ip->ino_data.rminor = vap->va_rminor; 727 break; 728 default: 729 break; 730 } 731 732 /* 733 * Calculate default uid/gid and overwrite with information from 734 * the vap. 735 */ 736 if (dip) { 737 xuid = hammer_to_unix_xid(&dip->ino_data.uid); 738 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, 739 xuid, cred, &vap->va_mode); 740 } else { 741 xuid = 0; 742 } 743 ip->ino_data.mode = vap->va_mode; 744 745 if (vap->va_vaflags & VA_UID_UUID_VALID) 746 ip->ino_data.uid = vap->va_uid_uuid; 747 else if (vap->va_uid != (uid_t)VNOVAL) 748 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid); 749 else 750 hammer_guid_to_uuid(&ip->ino_data.uid, xuid); 751 752 if (vap->va_vaflags & VA_GID_UUID_VALID) 753 ip->ino_data.gid = vap->va_gid_uuid; 754 else if (vap->va_gid != (gid_t)VNOVAL) 755 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid); 756 else if (dip) 757 ip->ino_data.gid = dip->ino_data.gid; 758 759 hammer_ref(&ip->lock); 760 761 if (pfsm) { 762 ip->pfsm = pfsm; 763 hammer_ref(&pfsm->lock); 764 error = 0; 765 } else if (dip->obj_localization == ip->obj_localization) { 766 ip->pfsm = dip->pfsm; 767 hammer_ref(&ip->pfsm->lock); 768 error = 0; 769 } else { 770 ip->pfsm = hammer_load_pseudofs(trans, 771 ip->obj_localization, 772 &error); 773 error = 0; /* ignore ENOENT */ 774 } 775 776 if (error) { 777 hammer_free_inode(ip); 778 ip = NULL; 779 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 780 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id); 781 /* not reached */ 782 hammer_free_inode(ip); 783 } 784 *ipp = ip; 785 return(error); 786 } 787 788 /* 789 * Final cleanup / freeing of an inode structure 790 */ 791 static void 792 hammer_free_inode(hammer_inode_t ip) 793 { 794 struct hammer_mount *hmp; 795 796 hmp = ip->hmp; 797 KKASSERT(ip->lock.refs == 1); 798 hammer_uncache_node(&ip->cache[0]); 799 hammer_uncache_node(&ip->cache[1]); 800 hammer_inode_wakereclaims(ip, 1); 801 if (ip->objid_cache) 802 hammer_clear_objid(ip); 803 --hammer_count_inodes; 804 --hmp->count_inodes; 805 if (ip->pfsm) { 806 hammer_rel_pseudofs(hmp, ip->pfsm); 807 ip->pfsm = NULL; 808 } 809 kfree(ip, hmp->m_inodes); 810 ip = NULL; 811 } 812 813 /* 814 * Retrieve pseudo-fs data. NULL will never be returned. 815 * 816 * If an error occurs *errorp will be set and a default template is returned, 817 * otherwise *errorp is set to 0. Typically when an error occurs it will 818 * be ENOENT. 819 */ 820 hammer_pseudofs_inmem_t 821 hammer_load_pseudofs(hammer_transaction_t trans, 822 u_int32_t localization, int *errorp) 823 { 824 hammer_mount_t hmp = trans->hmp; 825 hammer_inode_t ip; 826 hammer_pseudofs_inmem_t pfsm; 827 struct hammer_cursor cursor; 828 int bytes; 829 830 retry: 831 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization); 832 if (pfsm) { 833 hammer_ref(&pfsm->lock); 834 *errorp = 0; 835 return(pfsm); 836 } 837 838 /* 839 * PFS records are stored in the root inode (not the PFS root inode, 840 * but the real root). Avoid an infinite recursion if loading 841 * the PFS for the real root. 842 */ 843 if (localization) { 844 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, 845 HAMMER_MAX_TID, 846 HAMMER_DEF_LOCALIZATION, 0, errorp); 847 } else { 848 ip = NULL; 849 } 850 851 pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO); 852 pfsm->localization = localization; 853 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid; 854 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid; 855 856 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip); 857 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION + 858 HAMMER_LOCALIZE_MISC; 859 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 860 cursor.key_beg.create_tid = 0; 861 cursor.key_beg.delete_tid = 0; 862 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 863 cursor.key_beg.obj_type = 0; 864 cursor.key_beg.key = localization; 865 cursor.asof = HAMMER_MAX_TID; 866 cursor.flags |= HAMMER_CURSOR_ASOF; 867 868 if (ip) 869 *errorp = hammer_ip_lookup(&cursor); 870 else 871 *errorp = hammer_btree_lookup(&cursor); 872 if (*errorp == 0) { 873 *errorp = hammer_ip_resolve_data(&cursor); 874 if (*errorp == 0) { 875 if (cursor.data->pfsd.mirror_flags & 876 HAMMER_PFSD_DELETED) { 877 *errorp = ENOENT; 878 } else { 879 bytes = cursor.leaf->data_len; 880 if (bytes > sizeof(pfsm->pfsd)) 881 bytes = sizeof(pfsm->pfsd); 882 bcopy(cursor.data, &pfsm->pfsd, bytes); 883 } 884 } 885 } 886 hammer_done_cursor(&cursor); 887 888 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid); 889 hammer_ref(&pfsm->lock); 890 if (ip) 891 hammer_rel_inode(ip, 0); 892 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) { 893 kfree(pfsm, hmp->m_misc); 894 goto retry; 895 } 896 return(pfsm); 897 } 898 899 /* 900 * Store pseudo-fs data. The backend will automatically delete any prior 901 * on-disk pseudo-fs data but we have to delete in-memory versions. 902 */ 903 int 904 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm) 905 { 906 struct hammer_cursor cursor; 907 hammer_record_t record; 908 hammer_inode_t ip; 909 int error; 910 911 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 912 HAMMER_DEF_LOCALIZATION, 0, &error); 913 retry: 914 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid); 915 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip); 916 cursor.key_beg.localization = ip->obj_localization + 917 HAMMER_LOCALIZE_MISC; 918 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 919 cursor.key_beg.create_tid = 0; 920 cursor.key_beg.delete_tid = 0; 921 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 922 cursor.key_beg.obj_type = 0; 923 cursor.key_beg.key = pfsm->localization; 924 cursor.asof = HAMMER_MAX_TID; 925 cursor.flags |= HAMMER_CURSOR_ASOF; 926 927 error = hammer_ip_lookup(&cursor); 928 if (error == 0 && hammer_cursor_inmem(&cursor)) { 929 record = cursor.iprec; 930 if (record->flags & HAMMER_RECF_INTERLOCK_BE) { 931 KKASSERT(cursor.deadlk_rec == NULL); 932 hammer_ref(&record->lock); 933 cursor.deadlk_rec = record; 934 error = EDEADLK; 935 } else { 936 record->flags |= HAMMER_RECF_DELETED_FE; 937 error = 0; 938 } 939 } 940 if (error == 0 || error == ENOENT) { 941 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd)); 942 record->type = HAMMER_MEM_RECORD_GENERAL; 943 944 record->leaf.base.localization = ip->obj_localization + 945 HAMMER_LOCALIZE_MISC; 946 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS; 947 record->leaf.base.key = pfsm->localization; 948 record->leaf.data_len = sizeof(pfsm->pfsd); 949 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd)); 950 error = hammer_ip_add_record(trans, record); 951 } 952 hammer_done_cursor(&cursor); 953 if (error == EDEADLK) 954 goto retry; 955 hammer_rel_inode(ip, 0); 956 return(error); 957 } 958 959 /* 960 * Create a root directory for a PFS if one does not alredy exist. 961 * 962 * The PFS root stands alone so we must also bump the nlinks count 963 * to prevent it from being destroyed on release. 964 */ 965 int 966 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred, 967 hammer_pseudofs_inmem_t pfsm) 968 { 969 hammer_inode_t ip; 970 struct vattr vap; 971 int error; 972 973 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 974 pfsm->localization, 0, &error); 975 if (ip == NULL) { 976 vattr_null(&vap); 977 vap.va_mode = 0755; 978 vap.va_type = VDIR; 979 error = hammer_create_inode(trans, &vap, cred, NULL, pfsm, &ip); 980 if (error == 0) { 981 ++ip->ino_data.nlinks; 982 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY); 983 } 984 } 985 if (ip) 986 hammer_rel_inode(ip, 0); 987 return(error); 988 } 989 990 /* 991 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY 992 * if we are unable to disassociate all the inodes. 993 */ 994 static 995 int 996 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data) 997 { 998 int res; 999 1000 hammer_ref(&ip->lock); 1001 if (ip->lock.refs == 2 && ip->vp) 1002 vclean_unlocked(ip->vp); 1003 if (ip->lock.refs == 1 && ip->vp == NULL) 1004 res = 0; 1005 else 1006 res = -1; /* stop, someone is using the inode */ 1007 hammer_rel_inode(ip, 0); 1008 return(res); 1009 } 1010 1011 int 1012 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization) 1013 { 1014 int res; 1015 int try; 1016 1017 for (try = res = 0; try < 4; ++try) { 1018 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root, 1019 hammer_inode_pfs_cmp, 1020 hammer_unload_pseudofs_callback, 1021 &localization); 1022 if (res == 0 && try > 1) 1023 break; 1024 hammer_flusher_sync(trans->hmp); 1025 } 1026 if (res != 0) 1027 res = ENOTEMPTY; 1028 return(res); 1029 } 1030 1031 1032 /* 1033 * Release a reference on a PFS 1034 */ 1035 void 1036 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm) 1037 { 1038 hammer_unref(&pfsm->lock); 1039 if (pfsm->lock.refs == 0) { 1040 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm); 1041 kfree(pfsm, hmp->m_misc); 1042 } 1043 } 1044 1045 /* 1046 * Called by hammer_sync_inode(). 1047 */ 1048 static int 1049 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip) 1050 { 1051 hammer_transaction_t trans = cursor->trans; 1052 hammer_record_t record; 1053 int error; 1054 int redirty; 1055 1056 retry: 1057 error = 0; 1058 1059 /* 1060 * If the inode has a presence on-disk then locate it and mark 1061 * it deleted, setting DELONDISK. 1062 * 1063 * The record may or may not be physically deleted, depending on 1064 * the retention policy. 1065 */ 1066 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) == 1067 HAMMER_INODE_ONDISK) { 1068 hammer_normalize_cursor(cursor); 1069 cursor->key_beg.localization = ip->obj_localization + 1070 HAMMER_LOCALIZE_INODE; 1071 cursor->key_beg.obj_id = ip->obj_id; 1072 cursor->key_beg.key = 0; 1073 cursor->key_beg.create_tid = 0; 1074 cursor->key_beg.delete_tid = 0; 1075 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE; 1076 cursor->key_beg.obj_type = 0; 1077 cursor->asof = ip->obj_asof; 1078 cursor->flags &= ~HAMMER_CURSOR_INITMASK; 1079 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF; 1080 cursor->flags |= HAMMER_CURSOR_BACKEND; 1081 1082 error = hammer_btree_lookup(cursor); 1083 if (hammer_debug_inode) 1084 kprintf("IPDEL %p %08x %d", ip, ip->flags, error); 1085 1086 if (error == 0) { 1087 error = hammer_ip_delete_record(cursor, ip, trans->tid); 1088 if (hammer_debug_inode) 1089 kprintf(" error %d\n", error); 1090 if (error == 0) { 1091 ip->flags |= HAMMER_INODE_DELONDISK; 1092 } 1093 if (cursor->node) 1094 hammer_cache_node(&ip->cache[0], cursor->node); 1095 } 1096 if (error == EDEADLK) { 1097 hammer_done_cursor(cursor); 1098 error = hammer_init_cursor(trans, cursor, 1099 &ip->cache[0], ip); 1100 if (hammer_debug_inode) 1101 kprintf("IPDED %p %d\n", ip, error); 1102 if (error == 0) 1103 goto retry; 1104 } 1105 } 1106 1107 /* 1108 * Ok, write out the initial record or a new record (after deleting 1109 * the old one), unless the DELETED flag is set. This routine will 1110 * clear DELONDISK if it writes out a record. 1111 * 1112 * Update our inode statistics if this is the first application of 1113 * the inode on-disk. 1114 */ 1115 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) { 1116 /* 1117 * Generate a record and write it to the media. We clean-up 1118 * the state before releasing so we do not have to set-up 1119 * a flush_group. 1120 */ 1121 record = hammer_alloc_mem_record(ip, 0); 1122 record->type = HAMMER_MEM_RECORD_INODE; 1123 record->flush_state = HAMMER_FST_FLUSH; 1124 record->leaf = ip->sync_ino_leaf; 1125 record->leaf.base.create_tid = trans->tid; 1126 record->leaf.data_len = sizeof(ip->sync_ino_data); 1127 record->leaf.create_ts = trans->time32; 1128 record->data = (void *)&ip->sync_ino_data; 1129 record->flags |= HAMMER_RECF_INTERLOCK_BE; 1130 1131 /* 1132 * If this flag is set we cannot sync the new file size 1133 * because we haven't finished related truncations. The 1134 * inode will be flushed in another flush group to finish 1135 * the job. 1136 */ 1137 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) && 1138 ip->sync_ino_data.size != ip->ino_data.size) { 1139 redirty = 1; 1140 ip->sync_ino_data.size = ip->ino_data.size; 1141 } else { 1142 redirty = 0; 1143 } 1144 1145 for (;;) { 1146 error = hammer_ip_sync_record_cursor(cursor, record); 1147 if (hammer_debug_inode) 1148 kprintf("GENREC %p rec %08x %d\n", 1149 ip, record->flags, error); 1150 if (error != EDEADLK) 1151 break; 1152 hammer_done_cursor(cursor); 1153 error = hammer_init_cursor(trans, cursor, 1154 &ip->cache[0], ip); 1155 if (hammer_debug_inode) 1156 kprintf("GENREC reinit %d\n", error); 1157 if (error) 1158 break; 1159 } 1160 1161 /* 1162 * The record isn't managed by the inode's record tree, 1163 * destroy it whether we succeed or fail. 1164 */ 1165 record->flags &= ~HAMMER_RECF_INTERLOCK_BE; 1166 record->flags |= HAMMER_RECF_DELETED_FE | HAMMER_RECF_COMMITTED; 1167 record->flush_state = HAMMER_FST_IDLE; 1168 hammer_rel_mem_record(record); 1169 1170 /* 1171 * Finish up. 1172 */ 1173 if (error == 0) { 1174 if (hammer_debug_inode) 1175 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags); 1176 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | 1177 HAMMER_INODE_ATIME | 1178 HAMMER_INODE_MTIME); 1179 ip->flags &= ~HAMMER_INODE_DELONDISK; 1180 if (redirty) 1181 ip->sync_flags |= HAMMER_INODE_DDIRTY; 1182 1183 /* 1184 * Root volume count of inodes 1185 */ 1186 hammer_sync_lock_sh(trans); 1187 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) { 1188 hammer_modify_volume_field(trans, 1189 trans->rootvol, 1190 vol0_stat_inodes); 1191 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes; 1192 hammer_modify_volume_done(trans->rootvol); 1193 ip->flags |= HAMMER_INODE_ONDISK; 1194 if (hammer_debug_inode) 1195 kprintf("NOWONDISK %p\n", ip); 1196 } 1197 hammer_sync_unlock(trans); 1198 } 1199 } 1200 1201 /* 1202 * If the inode has been destroyed, clean out any left-over flags 1203 * that may have been set by the frontend. 1204 */ 1205 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 1206 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | 1207 HAMMER_INODE_ATIME | 1208 HAMMER_INODE_MTIME); 1209 } 1210 return(error); 1211 } 1212 1213 /* 1214 * Update only the itimes fields. 1215 * 1216 * ATIME can be updated without generating any UNDO. MTIME is updated 1217 * with UNDO so it is guaranteed to be synchronized properly in case of 1218 * a crash. 1219 * 1220 * Neither field is included in the B-Tree leaf element's CRC, which is how 1221 * we can get away with updating ATIME the way we do. 1222 */ 1223 static int 1224 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip) 1225 { 1226 hammer_transaction_t trans = cursor->trans; 1227 int error; 1228 1229 retry: 1230 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) != 1231 HAMMER_INODE_ONDISK) { 1232 return(0); 1233 } 1234 1235 hammer_normalize_cursor(cursor); 1236 cursor->key_beg.localization = ip->obj_localization + 1237 HAMMER_LOCALIZE_INODE; 1238 cursor->key_beg.obj_id = ip->obj_id; 1239 cursor->key_beg.key = 0; 1240 cursor->key_beg.create_tid = 0; 1241 cursor->key_beg.delete_tid = 0; 1242 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE; 1243 cursor->key_beg.obj_type = 0; 1244 cursor->asof = ip->obj_asof; 1245 cursor->flags &= ~HAMMER_CURSOR_INITMASK; 1246 cursor->flags |= HAMMER_CURSOR_ASOF; 1247 cursor->flags |= HAMMER_CURSOR_GET_LEAF; 1248 cursor->flags |= HAMMER_CURSOR_GET_DATA; 1249 cursor->flags |= HAMMER_CURSOR_BACKEND; 1250 1251 error = hammer_btree_lookup(cursor); 1252 if (error == 0) { 1253 hammer_cache_node(&ip->cache[0], cursor->node); 1254 if (ip->sync_flags & HAMMER_INODE_MTIME) { 1255 /* 1256 * Updating MTIME requires an UNDO. Just cover 1257 * both atime and mtime. 1258 */ 1259 hammer_sync_lock_sh(trans); 1260 hammer_modify_buffer(trans, cursor->data_buffer, 1261 HAMMER_ITIMES_BASE(&cursor->data->inode), 1262 HAMMER_ITIMES_BYTES); 1263 cursor->data->inode.atime = ip->sync_ino_data.atime; 1264 cursor->data->inode.mtime = ip->sync_ino_data.mtime; 1265 hammer_modify_buffer_done(cursor->data_buffer); 1266 hammer_sync_unlock(trans); 1267 } else if (ip->sync_flags & HAMMER_INODE_ATIME) { 1268 /* 1269 * Updating atime only can be done in-place with 1270 * no UNDO. 1271 */ 1272 hammer_sync_lock_sh(trans); 1273 hammer_modify_buffer(trans, cursor->data_buffer, 1274 NULL, 0); 1275 cursor->data->inode.atime = ip->sync_ino_data.atime; 1276 hammer_modify_buffer_done(cursor->data_buffer); 1277 hammer_sync_unlock(trans); 1278 } 1279 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME); 1280 } 1281 if (error == EDEADLK) { 1282 hammer_done_cursor(cursor); 1283 error = hammer_init_cursor(trans, cursor, 1284 &ip->cache[0], ip); 1285 if (error == 0) 1286 goto retry; 1287 } 1288 return(error); 1289 } 1290 1291 /* 1292 * Release a reference on an inode, flush as requested. 1293 * 1294 * On the last reference we queue the inode to the flusher for its final 1295 * disposition. 1296 */ 1297 void 1298 hammer_rel_inode(struct hammer_inode *ip, int flush) 1299 { 1300 /*hammer_mount_t hmp = ip->hmp;*/ 1301 1302 /* 1303 * Handle disposition when dropping the last ref. 1304 */ 1305 for (;;) { 1306 if (ip->lock.refs == 1) { 1307 /* 1308 * Determine whether on-disk action is needed for 1309 * the inode's final disposition. 1310 */ 1311 KKASSERT(ip->vp == NULL); 1312 hammer_inode_unloadable_check(ip, 0); 1313 if (ip->flags & HAMMER_INODE_MODMASK) { 1314 hammer_flush_inode(ip, 0); 1315 } else if (ip->lock.refs == 1) { 1316 hammer_unload_inode(ip); 1317 break; 1318 } 1319 } else { 1320 if (flush) 1321 hammer_flush_inode(ip, 0); 1322 1323 /* 1324 * The inode still has multiple refs, try to drop 1325 * one ref. 1326 */ 1327 KKASSERT(ip->lock.refs >= 1); 1328 if (ip->lock.refs > 1) { 1329 hammer_unref(&ip->lock); 1330 break; 1331 } 1332 } 1333 } 1334 } 1335 1336 /* 1337 * Unload and destroy the specified inode. Must be called with one remaining 1338 * reference. The reference is disposed of. 1339 * 1340 * The inode must be completely clean. 1341 */ 1342 static int 1343 hammer_unload_inode(struct hammer_inode *ip) 1344 { 1345 hammer_mount_t hmp = ip->hmp; 1346 1347 KASSERT(ip->lock.refs == 1, 1348 ("hammer_unload_inode: %d refs\n", ip->lock.refs)); 1349 KKASSERT(ip->vp == NULL); 1350 KKASSERT(ip->flush_state == HAMMER_FST_IDLE); 1351 KKASSERT(ip->cursor_ip_refs == 0); 1352 KKASSERT(ip->lock.lockcount == 0); 1353 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0); 1354 1355 KKASSERT(RB_EMPTY(&ip->rec_tree)); 1356 KKASSERT(TAILQ_EMPTY(&ip->target_list)); 1357 1358 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip); 1359 1360 hammer_free_inode(ip); 1361 return(0); 1362 } 1363 1364 /* 1365 * Called during unmounting if a critical error occured. The in-memory 1366 * inode and all related structures are destroyed. 1367 * 1368 * If a critical error did not occur the unmount code calls the standard 1369 * release and asserts that the inode is gone. 1370 */ 1371 int 1372 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused) 1373 { 1374 hammer_record_t rec; 1375 1376 /* 1377 * Get rid of the inodes in-memory records, regardless of their 1378 * state, and clear the mod-mask. 1379 */ 1380 while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) { 1381 TAILQ_REMOVE(&ip->target_list, rec, target_entry); 1382 rec->target_ip = NULL; 1383 if (rec->flush_state == HAMMER_FST_SETUP) 1384 rec->flush_state = HAMMER_FST_IDLE; 1385 } 1386 while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) { 1387 if (rec->flush_state == HAMMER_FST_FLUSH) 1388 --rec->flush_group->refs; 1389 else 1390 hammer_ref(&rec->lock); 1391 KKASSERT(rec->lock.refs == 1); 1392 rec->flush_state = HAMMER_FST_IDLE; 1393 rec->flush_group = NULL; 1394 rec->flags |= HAMMER_RECF_DELETED_FE; 1395 rec->flags |= HAMMER_RECF_DELETED_BE; 1396 hammer_rel_mem_record(rec); 1397 } 1398 ip->flags &= ~HAMMER_INODE_MODMASK; 1399 ip->sync_flags &= ~HAMMER_INODE_MODMASK; 1400 KKASSERT(ip->vp == NULL); 1401 1402 /* 1403 * Remove the inode from any flush group, force it idle. FLUSH 1404 * and SETUP states have an inode ref. 1405 */ 1406 switch(ip->flush_state) { 1407 case HAMMER_FST_FLUSH: 1408 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry); 1409 --ip->flush_group->refs; 1410 ip->flush_group = NULL; 1411 /* fall through */ 1412 case HAMMER_FST_SETUP: 1413 hammer_unref(&ip->lock); 1414 ip->flush_state = HAMMER_FST_IDLE; 1415 /* fall through */ 1416 case HAMMER_FST_IDLE: 1417 break; 1418 } 1419 1420 /* 1421 * There shouldn't be any associated vnode. The unload needs at 1422 * least one ref, if we do have a vp steal its ip ref. 1423 */ 1424 if (ip->vp) { 1425 kprintf("hammer_destroy_inode_callback: Unexpected " 1426 "vnode association ip %p vp %p\n", ip, ip->vp); 1427 ip->vp->v_data = NULL; 1428 ip->vp = NULL; 1429 } else { 1430 hammer_ref(&ip->lock); 1431 } 1432 hammer_unload_inode(ip); 1433 return(0); 1434 } 1435 1436 /* 1437 * Called on mount -u when switching from RW to RO or vise-versa. Adjust 1438 * the read-only flag for cached inodes. 1439 * 1440 * This routine is called from a RB_SCAN(). 1441 */ 1442 int 1443 hammer_reload_inode(hammer_inode_t ip, void *arg __unused) 1444 { 1445 hammer_mount_t hmp = ip->hmp; 1446 1447 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID) 1448 ip->flags |= HAMMER_INODE_RO; 1449 else 1450 ip->flags &= ~HAMMER_INODE_RO; 1451 return(0); 1452 } 1453 1454 /* 1455 * A transaction has modified an inode, requiring updates as specified by 1456 * the passed flags. 1457 * 1458 * HAMMER_INODE_DDIRTY: Inode data has been updated 1459 * HAMMER_INODE_XDIRTY: Dirty in-memory records 1460 * HAMMER_INODE_BUFS: Dirty buffer cache buffers 1461 * HAMMER_INODE_DELETED: Inode record/data must be deleted 1462 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated 1463 */ 1464 void 1465 hammer_modify_inode(hammer_inode_t ip, int flags) 1466 { 1467 /* 1468 * ronly of 0 or 2 does not trigger assertion. 1469 * 2 is a special error state 1470 */ 1471 KKASSERT(ip->hmp->ronly != 1 || 1472 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 1473 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED | 1474 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0); 1475 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) { 1476 ip->flags |= HAMMER_INODE_RSV_INODES; 1477 ++ip->hmp->rsv_inodes; 1478 } 1479 1480 ip->flags |= flags; 1481 } 1482 1483 /* 1484 * Request that an inode be flushed. This whole mess cannot block and may 1485 * recurse (if not synchronous). Once requested HAMMER will attempt to 1486 * actively flush the inode until the flush can be done. 1487 * 1488 * The inode may already be flushing, or may be in a setup state. We can 1489 * place the inode in a flushing state if it is currently idle and flag it 1490 * to reflush if it is currently flushing. 1491 * 1492 * Upon return if the inode could not be flushed due to a setup 1493 * dependancy, then it will be automatically flushed when the dependancy 1494 * is satisfied. 1495 */ 1496 void 1497 hammer_flush_inode(hammer_inode_t ip, int flags) 1498 { 1499 hammer_mount_t hmp; 1500 hammer_flush_group_t flg; 1501 int good; 1502 1503 /* 1504 * next_flush_group is the first flush group we can place the inode 1505 * in. It may be NULL. If it becomes full we append a new flush 1506 * group and make that the next_flush_group. 1507 */ 1508 hmp = ip->hmp; 1509 while ((flg = hmp->next_flush_group) != NULL) { 1510 KKASSERT(flg->running == 0); 1511 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit) 1512 break; 1513 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry); 1514 hammer_flusher_async(ip->hmp, flg); 1515 } 1516 if (flg == NULL) { 1517 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO); 1518 hmp->next_flush_group = flg; 1519 TAILQ_INIT(&flg->flush_list); 1520 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry); 1521 } 1522 1523 /* 1524 * Trivial 'nothing to flush' case. If the inode is in a SETUP 1525 * state we have to put it back into an IDLE state so we can 1526 * drop the extra ref. 1527 * 1528 * If we have a parent dependancy we must still fall through 1529 * so we can run it. 1530 */ 1531 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) { 1532 if (ip->flush_state == HAMMER_FST_SETUP && 1533 TAILQ_EMPTY(&ip->target_list)) { 1534 ip->flush_state = HAMMER_FST_IDLE; 1535 hammer_rel_inode(ip, 0); 1536 } 1537 if (ip->flush_state == HAMMER_FST_IDLE) 1538 return; 1539 } 1540 1541 /* 1542 * Our flush action will depend on the current state. 1543 */ 1544 switch(ip->flush_state) { 1545 case HAMMER_FST_IDLE: 1546 /* 1547 * We have no dependancies and can flush immediately. Some 1548 * our children may not be flushable so we have to re-test 1549 * with that additional knowledge. 1550 */ 1551 hammer_flush_inode_core(ip, flg, flags); 1552 break; 1553 case HAMMER_FST_SETUP: 1554 /* 1555 * Recurse upwards through dependancies via target_list 1556 * and start their flusher actions going if possible. 1557 * 1558 * 'good' is our connectivity. -1 means we have none and 1559 * can't flush, 0 means there weren't any dependancies, and 1560 * 1 means we have good connectivity. 1561 */ 1562 good = hammer_setup_parent_inodes(ip, flg); 1563 1564 if (good >= 0) { 1565 /* 1566 * We can continue if good >= 0. Determine how 1567 * many records under our inode can be flushed (and 1568 * mark them). 1569 */ 1570 hammer_flush_inode_core(ip, flg, flags); 1571 } else { 1572 /* 1573 * Parent has no connectivity, tell it to flush 1574 * us as soon as it does. 1575 * 1576 * The REFLUSH flag is also needed to trigger 1577 * dependancy wakeups. 1578 */ 1579 ip->flags |= HAMMER_INODE_CONN_DOWN | 1580 HAMMER_INODE_REFLUSH; 1581 if (flags & HAMMER_FLUSH_SIGNAL) { 1582 ip->flags |= HAMMER_INODE_RESIGNAL; 1583 hammer_flusher_async(ip->hmp, flg); 1584 } 1585 } 1586 break; 1587 case HAMMER_FST_FLUSH: 1588 /* 1589 * We are already flushing, flag the inode to reflush 1590 * if needed after it completes its current flush. 1591 * 1592 * The REFLUSH flag is also needed to trigger 1593 * dependancy wakeups. 1594 */ 1595 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0) 1596 ip->flags |= HAMMER_INODE_REFLUSH; 1597 if (flags & HAMMER_FLUSH_SIGNAL) { 1598 ip->flags |= HAMMER_INODE_RESIGNAL; 1599 hammer_flusher_async(ip->hmp, flg); 1600 } 1601 break; 1602 } 1603 } 1604 1605 /* 1606 * Scan ip->target_list, which is a list of records owned by PARENTS to our 1607 * ip which reference our ip. 1608 * 1609 * XXX This is a huge mess of recursive code, but not one bit of it blocks 1610 * so for now do not ref/deref the structures. Note that if we use the 1611 * ref/rel code later, the rel CAN block. 1612 */ 1613 static int 1614 hammer_setup_parent_inodes(hammer_inode_t ip, hammer_flush_group_t flg) 1615 { 1616 hammer_record_t depend; 1617 int good; 1618 int r; 1619 1620 good = 0; 1621 TAILQ_FOREACH(depend, &ip->target_list, target_entry) { 1622 r = hammer_setup_parent_inodes_helper(depend, flg); 1623 KKASSERT(depend->target_ip == ip); 1624 if (r < 0 && good == 0) 1625 good = -1; 1626 if (r > 0) 1627 good = 1; 1628 } 1629 return(good); 1630 } 1631 1632 /* 1633 * This helper function takes a record representing the dependancy between 1634 * the parent inode and child inode. 1635 * 1636 * record->ip = parent inode 1637 * record->target_ip = child inode 1638 * 1639 * We are asked to recurse upwards and convert the record from SETUP 1640 * to FLUSH if possible. 1641 * 1642 * Return 1 if the record gives us connectivity 1643 * 1644 * Return 0 if the record is not relevant 1645 * 1646 * Return -1 if we can't resolve the dependancy and there is no connectivity. 1647 */ 1648 static int 1649 hammer_setup_parent_inodes_helper(hammer_record_t record, 1650 hammer_flush_group_t flg) 1651 { 1652 hammer_mount_t hmp; 1653 hammer_inode_t pip; 1654 int good; 1655 1656 KKASSERT(record->flush_state != HAMMER_FST_IDLE); 1657 pip = record->ip; 1658 hmp = pip->hmp; 1659 1660 /* 1661 * If the record is already flushing, is it in our flush group? 1662 * 1663 * If it is in our flush group but it is a general record or a 1664 * delete-on-disk, it does not improve our connectivity (return 0), 1665 * and if the target inode is not trying to destroy itself we can't 1666 * allow the operation yet anyway (the second return -1). 1667 */ 1668 if (record->flush_state == HAMMER_FST_FLUSH) { 1669 /* 1670 * If not in our flush group ask the parent to reflush 1671 * us as soon as possible. 1672 */ 1673 if (record->flush_group != flg) { 1674 pip->flags |= HAMMER_INODE_REFLUSH; 1675 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 1676 return(-1); 1677 } 1678 1679 /* 1680 * If in our flush group everything is already set up, 1681 * just return whether the record will improve our 1682 * visibility or not. 1683 */ 1684 if (record->type == HAMMER_MEM_RECORD_ADD) 1685 return(1); 1686 return(0); 1687 } 1688 1689 /* 1690 * It must be a setup record. Try to resolve the setup dependancies 1691 * by recursing upwards so we can place ip on the flush list. 1692 */ 1693 KKASSERT(record->flush_state == HAMMER_FST_SETUP); 1694 1695 good = hammer_setup_parent_inodes(pip, flg); 1696 1697 /* 1698 * If good < 0 the parent has no connectivity and we cannot safely 1699 * flush the directory entry, which also means we can't flush our 1700 * ip. Flag the parent and us for downward recursion once the 1701 * parent's connectivity is resolved. 1702 */ 1703 if (good < 0) { 1704 /* pip->flags |= HAMMER_INODE_CONN_DOWN; set by recursion */ 1705 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 1706 return(good); 1707 } 1708 1709 /* 1710 * We are go, place the parent inode in a flushing state so we can 1711 * place its record in a flushing state. Note that the parent 1712 * may already be flushing. The record must be in the same flush 1713 * group as the parent. 1714 */ 1715 if (pip->flush_state != HAMMER_FST_FLUSH) 1716 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION); 1717 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH); 1718 KKASSERT(record->flush_state == HAMMER_FST_SETUP); 1719 1720 #if 0 1721 if (record->type == HAMMER_MEM_RECORD_DEL && 1722 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) { 1723 /* 1724 * Regardless of flushing state we cannot sync this path if the 1725 * record represents a delete-on-disk but the target inode 1726 * is not ready to sync its own deletion. 1727 * 1728 * XXX need to count effective nlinks to determine whether 1729 * the flush is ok, otherwise removing a hardlink will 1730 * just leave the DEL record to rot. 1731 */ 1732 record->target_ip->flags |= HAMMER_INODE_REFLUSH; 1733 return(-1); 1734 } else 1735 #endif 1736 if (pip->flush_group == flg) { 1737 /* 1738 * Because we have not calculated nlinks yet we can just 1739 * set records to the flush state if the parent is in 1740 * the same flush group as we are. 1741 */ 1742 record->flush_state = HAMMER_FST_FLUSH; 1743 record->flush_group = flg; 1744 ++record->flush_group->refs; 1745 hammer_ref(&record->lock); 1746 1747 /* 1748 * A general directory-add contributes to our visibility. 1749 * 1750 * Otherwise it is probably a directory-delete or 1751 * delete-on-disk record and does not contribute to our 1752 * visbility (but we can still flush it). 1753 */ 1754 if (record->type == HAMMER_MEM_RECORD_ADD) 1755 return(1); 1756 return(0); 1757 } else { 1758 /* 1759 * If the parent is not in our flush group we cannot 1760 * flush this record yet, there is no visibility. 1761 * We tell the parent to reflush and mark ourselves 1762 * so the parent knows it should flush us too. 1763 */ 1764 pip->flags |= HAMMER_INODE_REFLUSH; 1765 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 1766 return(-1); 1767 } 1768 } 1769 1770 /* 1771 * This is the core routine placing an inode into the FST_FLUSH state. 1772 */ 1773 static void 1774 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags) 1775 { 1776 int go_count; 1777 1778 /* 1779 * Set flush state and prevent the flusher from cycling into 1780 * the next flush group. Do not place the ip on the list yet. 1781 * Inodes not in the idle state get an extra reference. 1782 */ 1783 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH); 1784 if (ip->flush_state == HAMMER_FST_IDLE) 1785 hammer_ref(&ip->lock); 1786 ip->flush_state = HAMMER_FST_FLUSH; 1787 ip->flush_group = flg; 1788 ++ip->hmp->flusher.group_lock; 1789 ++ip->hmp->count_iqueued; 1790 ++hammer_count_iqueued; 1791 ++flg->total_count; 1792 1793 /* 1794 * If the flush group reaches the autoflush limit we want to signal 1795 * the flusher. This is particularly important for remove()s. 1796 */ 1797 if (flg->total_count == hammer_autoflush) 1798 flags |= HAMMER_FLUSH_SIGNAL; 1799 1800 /* 1801 * We need to be able to vfsync/truncate from the backend. 1802 */ 1803 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0); 1804 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) { 1805 ip->flags |= HAMMER_INODE_VHELD; 1806 vref(ip->vp); 1807 } 1808 1809 /* 1810 * Figure out how many in-memory records we can actually flush 1811 * (not including inode meta-data, buffers, etc). 1812 */ 1813 KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0); 1814 if (flags & HAMMER_FLUSH_RECURSION) { 1815 /* 1816 * If this is a upwards recursion we do not want to 1817 * recurse down again! 1818 */ 1819 go_count = 1; 1820 #if 0 1821 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) { 1822 /* 1823 * No new records are added if we must complete a flush 1824 * from a previous cycle, but we do have to move the records 1825 * from the previous cycle to the current one. 1826 */ 1827 #if 0 1828 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 1829 hammer_syncgrp_child_callback, NULL); 1830 #endif 1831 go_count = 1; 1832 #endif 1833 } else { 1834 /* 1835 * Normal flush, scan records and bring them into the flush. 1836 * Directory adds and deletes are usually skipped (they are 1837 * grouped with the related inode rather then with the 1838 * directory). 1839 * 1840 * go_count can be negative, which means the scan aborted 1841 * due to the flush group being over-full and we should 1842 * flush what we have. 1843 */ 1844 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 1845 hammer_setup_child_callback, NULL); 1846 } 1847 1848 /* 1849 * This is a more involved test that includes go_count. If we 1850 * can't flush, flag the inode and return. If go_count is 0 we 1851 * were are unable to flush any records in our rec_tree and 1852 * must ignore the XDIRTY flag. 1853 */ 1854 if (go_count == 0) { 1855 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) { 1856 --ip->hmp->count_iqueued; 1857 --hammer_count_iqueued; 1858 1859 --flg->total_count; 1860 ip->flush_state = HAMMER_FST_SETUP; 1861 ip->flush_group = NULL; 1862 if (ip->flags & HAMMER_INODE_VHELD) { 1863 ip->flags &= ~HAMMER_INODE_VHELD; 1864 vrele(ip->vp); 1865 } 1866 1867 /* 1868 * REFLUSH is needed to trigger dependancy wakeups 1869 * when an inode is in SETUP. 1870 */ 1871 ip->flags |= HAMMER_INODE_REFLUSH; 1872 if (flags & HAMMER_FLUSH_SIGNAL) { 1873 ip->flags |= HAMMER_INODE_RESIGNAL; 1874 hammer_flusher_async(ip->hmp, flg); 1875 } 1876 if (--ip->hmp->flusher.group_lock == 0) 1877 wakeup(&ip->hmp->flusher.group_lock); 1878 return; 1879 } 1880 } 1881 1882 /* 1883 * Snapshot the state of the inode for the backend flusher. 1884 * 1885 * We continue to retain save_trunc_off even when all truncations 1886 * have been resolved as an optimization to determine if we can 1887 * skip the B-Tree lookup for overwrite deletions. 1888 * 1889 * NOTE: The DELETING flag is a mod flag, but it is also sticky, 1890 * and stays in ip->flags. Once set, it stays set until the 1891 * inode is destroyed. 1892 */ 1893 if (ip->flags & HAMMER_INODE_TRUNCATED) { 1894 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0); 1895 ip->sync_trunc_off = ip->trunc_off; 1896 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL; 1897 ip->flags &= ~HAMMER_INODE_TRUNCATED; 1898 ip->sync_flags |= HAMMER_INODE_TRUNCATED; 1899 1900 /* 1901 * The save_trunc_off used to cache whether the B-Tree 1902 * holds any records past that point is not used until 1903 * after the truncation has succeeded, so we can safely 1904 * set it now. 1905 */ 1906 if (ip->save_trunc_off > ip->sync_trunc_off) 1907 ip->save_trunc_off = ip->sync_trunc_off; 1908 } 1909 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK & 1910 ~HAMMER_INODE_TRUNCATED); 1911 ip->sync_ino_leaf = ip->ino_leaf; 1912 ip->sync_ino_data = ip->ino_data; 1913 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED; 1914 #ifdef DEBUG_TRUNCATE 1915 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp) 1916 kprintf("truncateS %016llx\n", ip->sync_trunc_off); 1917 #endif 1918 1919 /* 1920 * The flusher list inherits our inode and reference. 1921 */ 1922 KKASSERT(flg->running == 0); 1923 TAILQ_INSERT_TAIL(&flg->flush_list, ip, flush_entry); 1924 if (--ip->hmp->flusher.group_lock == 0) 1925 wakeup(&ip->hmp->flusher.group_lock); 1926 1927 if (flags & HAMMER_FLUSH_SIGNAL) { 1928 hammer_flusher_async(ip->hmp, flg); 1929 } 1930 } 1931 1932 /* 1933 * Callback for scan of ip->rec_tree. Try to include each record in our 1934 * flush. ip->flush_group has been set but the inode has not yet been 1935 * moved into a flushing state. 1936 * 1937 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on 1938 * both inodes. 1939 * 1940 * We return 1 for any record placed or found in FST_FLUSH, which prevents 1941 * the caller from shortcutting the flush. 1942 */ 1943 static int 1944 hammer_setup_child_callback(hammer_record_t rec, void *data) 1945 { 1946 hammer_flush_group_t flg; 1947 hammer_inode_t target_ip; 1948 hammer_inode_t ip; 1949 int r; 1950 1951 /* 1952 * Deleted records are ignored. Note that the flush detects deleted 1953 * front-end records at multiple points to deal with races. This is 1954 * just the first line of defense. The only time DELETED_FE cannot 1955 * be set is when HAMMER_RECF_INTERLOCK_BE is set. 1956 * 1957 * Don't get confused between record deletion and, say, directory 1958 * entry deletion. The deletion of a directory entry that is on 1959 * the media has nothing to do with the record deletion flags. 1960 */ 1961 if (rec->flags & (HAMMER_RECF_DELETED_FE|HAMMER_RECF_DELETED_BE)) { 1962 if (rec->flush_state == HAMMER_FST_FLUSH) { 1963 KKASSERT(rec->flush_group == rec->ip->flush_group); 1964 r = 1; 1965 } else { 1966 r = 0; 1967 } 1968 return(r); 1969 } 1970 1971 /* 1972 * If the record is in an idle state it has no dependancies and 1973 * can be flushed. 1974 */ 1975 ip = rec->ip; 1976 flg = ip->flush_group; 1977 r = 0; 1978 1979 switch(rec->flush_state) { 1980 case HAMMER_FST_IDLE: 1981 /* 1982 * The record has no setup dependancy, we can flush it. 1983 */ 1984 KKASSERT(rec->target_ip == NULL); 1985 rec->flush_state = HAMMER_FST_FLUSH; 1986 rec->flush_group = flg; 1987 ++flg->refs; 1988 hammer_ref(&rec->lock); 1989 r = 1; 1990 break; 1991 case HAMMER_FST_SETUP: 1992 /* 1993 * The record has a setup dependancy. These are typically 1994 * directory entry adds and deletes. Such entries will be 1995 * flushed when their inodes are flushed so we do not 1996 * usually have to add them to the flush here. However, 1997 * if the target_ip has set HAMMER_INODE_CONN_DOWN then 1998 * it is asking us to flush this record (and it). 1999 */ 2000 target_ip = rec->target_ip; 2001 KKASSERT(target_ip != NULL); 2002 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE); 2003 2004 /* 2005 * If the target IP is already flushing in our group 2006 * we could associate the record, but target_ip has 2007 * already synced ino_data to sync_ino_data and we 2008 * would also have to adjust nlinks. Plus there are 2009 * ordering issues for adds and deletes. 2010 * 2011 * Reflush downward if this is an ADD, and upward if 2012 * this is a DEL. 2013 */ 2014 if (target_ip->flush_state == HAMMER_FST_FLUSH) { 2015 if (rec->flush_state == HAMMER_MEM_RECORD_ADD) 2016 ip->flags |= HAMMER_INODE_REFLUSH; 2017 else 2018 target_ip->flags |= HAMMER_INODE_REFLUSH; 2019 break; 2020 } 2021 2022 /* 2023 * Target IP is not yet flushing. This can get complex 2024 * because we have to be careful about the recursion. 2025 * 2026 * Directories create an issue for us in that if a flush 2027 * of a directory is requested the expectation is to flush 2028 * any pending directory entries, but this will cause the 2029 * related inodes to recursively flush as well. We can't 2030 * really defer the operation so just get as many as we 2031 * can and 2032 */ 2033 #if 0 2034 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 && 2035 (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) { 2036 /* 2037 * We aren't reclaiming and the target ip was not 2038 * previously prevented from flushing due to this 2039 * record dependancy. Do not flush this record. 2040 */ 2041 /*r = 0;*/ 2042 } else 2043 #endif 2044 if (flg->total_count + flg->refs > 2045 ip->hmp->undo_rec_limit) { 2046 /* 2047 * Our flush group is over-full and we risk blowing 2048 * out the UNDO FIFO. Stop the scan, flush what we 2049 * have, then reflush the directory. 2050 * 2051 * The directory may be forced through multiple 2052 * flush groups before it can be completely 2053 * flushed. 2054 */ 2055 ip->flags |= HAMMER_INODE_RESIGNAL | 2056 HAMMER_INODE_REFLUSH; 2057 r = -1; 2058 } else if (rec->type == HAMMER_MEM_RECORD_ADD) { 2059 /* 2060 * If the target IP is not flushing we can force 2061 * it to flush, even if it is unable to write out 2062 * any of its own records we have at least one in 2063 * hand that we CAN deal with. 2064 */ 2065 rec->flush_state = HAMMER_FST_FLUSH; 2066 rec->flush_group = flg; 2067 ++flg->refs; 2068 hammer_ref(&rec->lock); 2069 hammer_flush_inode_core(target_ip, flg, 2070 HAMMER_FLUSH_RECURSION); 2071 r = 1; 2072 } else { 2073 /* 2074 * General or delete-on-disk record. 2075 * 2076 * XXX this needs help. If a delete-on-disk we could 2077 * disconnect the target. If the target has its own 2078 * dependancies they really need to be flushed. 2079 * 2080 * XXX 2081 */ 2082 rec->flush_state = HAMMER_FST_FLUSH; 2083 rec->flush_group = flg; 2084 ++flg->refs; 2085 hammer_ref(&rec->lock); 2086 hammer_flush_inode_core(target_ip, flg, 2087 HAMMER_FLUSH_RECURSION); 2088 r = 1; 2089 } 2090 break; 2091 case HAMMER_FST_FLUSH: 2092 /* 2093 * The flush_group should already match. 2094 */ 2095 KKASSERT(rec->flush_group == flg); 2096 r = 1; 2097 break; 2098 } 2099 return(r); 2100 } 2101 2102 #if 0 2103 /* 2104 * This version just moves records already in a flush state to the new 2105 * flush group and that is it. 2106 */ 2107 static int 2108 hammer_syncgrp_child_callback(hammer_record_t rec, void *data) 2109 { 2110 hammer_inode_t ip = rec->ip; 2111 2112 switch(rec->flush_state) { 2113 case HAMMER_FST_FLUSH: 2114 KKASSERT(rec->flush_group == ip->flush_group); 2115 break; 2116 default: 2117 break; 2118 } 2119 return(0); 2120 } 2121 #endif 2122 2123 /* 2124 * Wait for a previously queued flush to complete. 2125 * 2126 * If a critical error occured we don't try to wait. 2127 */ 2128 void 2129 hammer_wait_inode(hammer_inode_t ip) 2130 { 2131 hammer_flush_group_t flg; 2132 2133 flg = NULL; 2134 if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) { 2135 while (ip->flush_state != HAMMER_FST_IDLE && 2136 (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) { 2137 if (ip->flush_state == HAMMER_FST_SETUP) 2138 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2139 if (ip->flush_state != HAMMER_FST_IDLE) { 2140 ip->flags |= HAMMER_INODE_FLUSHW; 2141 tsleep(&ip->flags, 0, "hmrwin", 0); 2142 } 2143 } 2144 } 2145 } 2146 2147 /* 2148 * Called by the backend code when a flush has been completed. 2149 * The inode has already been removed from the flush list. 2150 * 2151 * A pipelined flush can occur, in which case we must re-enter the 2152 * inode on the list and re-copy its fields. 2153 */ 2154 void 2155 hammer_flush_inode_done(hammer_inode_t ip, int error) 2156 { 2157 hammer_mount_t hmp; 2158 int dorel; 2159 2160 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH); 2161 2162 hmp = ip->hmp; 2163 2164 /* 2165 * Auto-reflush if the backend could not completely flush 2166 * the inode. This fixes a case where a deferred buffer flush 2167 * could cause fsync to return early. 2168 */ 2169 if (ip->sync_flags & HAMMER_INODE_MODMASK) 2170 ip->flags |= HAMMER_INODE_REFLUSH; 2171 2172 /* 2173 * Merge left-over flags back into the frontend and fix the state. 2174 * Incomplete truncations are retained by the backend. 2175 */ 2176 ip->error = error; 2177 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED; 2178 ip->sync_flags &= HAMMER_INODE_TRUNCATED; 2179 2180 /* 2181 * The backend may have adjusted nlinks, so if the adjusted nlinks 2182 * does not match the fronttend set the frontend's RDIRTY flag again. 2183 */ 2184 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks) 2185 ip->flags |= HAMMER_INODE_DDIRTY; 2186 2187 /* 2188 * Fix up the dirty buffer status. 2189 */ 2190 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) { 2191 ip->flags |= HAMMER_INODE_BUFS; 2192 } 2193 2194 /* 2195 * Re-set the XDIRTY flag if some of the inode's in-memory records 2196 * could not be flushed. 2197 */ 2198 KKASSERT((RB_EMPTY(&ip->rec_tree) && 2199 (ip->flags & HAMMER_INODE_XDIRTY) == 0) || 2200 (!RB_EMPTY(&ip->rec_tree) && 2201 (ip->flags & HAMMER_INODE_XDIRTY) != 0)); 2202 2203 /* 2204 * Do not lose track of inodes which no longer have vnode 2205 * assocations, otherwise they may never get flushed again. 2206 * 2207 * The reflush flag can be set superfluously, causing extra pain 2208 * for no reason. If the inode is no longer modified it no longer 2209 * needs to be flushed. 2210 */ 2211 if (ip->flags & HAMMER_INODE_MODMASK) { 2212 if (ip->vp == NULL) 2213 ip->flags |= HAMMER_INODE_REFLUSH; 2214 } else { 2215 ip->flags &= ~HAMMER_INODE_REFLUSH; 2216 } 2217 2218 /* 2219 * Adjust the flush state. 2220 */ 2221 if (ip->flags & HAMMER_INODE_WOULDBLOCK) { 2222 /* 2223 * We were unable to flush out all our records, leave the 2224 * inode in a flush state and in the current flush group. 2225 * The flush group will be re-run. 2226 * 2227 * This occurs if the UNDO block gets too full or there is 2228 * too much dirty meta-data and allows the flusher to 2229 * finalize the UNDO block and then re-flush. 2230 */ 2231 ip->flags &= ~HAMMER_INODE_WOULDBLOCK; 2232 dorel = 0; 2233 } else { 2234 /* 2235 * Remove from the flush_group 2236 */ 2237 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry); 2238 ip->flush_group = NULL; 2239 2240 /* 2241 * Clean up the vnode ref and tracking counts. 2242 */ 2243 if (ip->flags & HAMMER_INODE_VHELD) { 2244 ip->flags &= ~HAMMER_INODE_VHELD; 2245 vrele(ip->vp); 2246 } 2247 --hmp->count_iqueued; 2248 --hammer_count_iqueued; 2249 2250 /* 2251 * And adjust the state. 2252 */ 2253 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) { 2254 ip->flush_state = HAMMER_FST_IDLE; 2255 dorel = 1; 2256 } else { 2257 ip->flush_state = HAMMER_FST_SETUP; 2258 dorel = 0; 2259 } 2260 2261 /* 2262 * If the frontend is waiting for a flush to complete, 2263 * wake it up. 2264 */ 2265 if (ip->flags & HAMMER_INODE_FLUSHW) { 2266 ip->flags &= ~HAMMER_INODE_FLUSHW; 2267 wakeup(&ip->flags); 2268 } 2269 2270 /* 2271 * If the frontend made more changes and requested another 2272 * flush, then try to get it running. 2273 * 2274 * Reflushes are aborted when the inode is errored out. 2275 */ 2276 if (ip->flags & HAMMER_INODE_REFLUSH) { 2277 ip->flags &= ~HAMMER_INODE_REFLUSH; 2278 if (ip->flags & HAMMER_INODE_RESIGNAL) { 2279 ip->flags &= ~HAMMER_INODE_RESIGNAL; 2280 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2281 } else { 2282 hammer_flush_inode(ip, 0); 2283 } 2284 } 2285 } 2286 2287 /* 2288 * If we have no parent dependancies we can clear CONN_DOWN 2289 */ 2290 if (TAILQ_EMPTY(&ip->target_list)) 2291 ip->flags &= ~HAMMER_INODE_CONN_DOWN; 2292 2293 /* 2294 * If the inode is now clean drop the space reservation. 2295 */ 2296 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 2297 (ip->flags & HAMMER_INODE_RSV_INODES)) { 2298 ip->flags &= ~HAMMER_INODE_RSV_INODES; 2299 --hmp->rsv_inodes; 2300 } 2301 2302 if (dorel) 2303 hammer_rel_inode(ip, 0); 2304 } 2305 2306 /* 2307 * Called from hammer_sync_inode() to synchronize in-memory records 2308 * to the media. 2309 */ 2310 static int 2311 hammer_sync_record_callback(hammer_record_t record, void *data) 2312 { 2313 hammer_cursor_t cursor = data; 2314 hammer_transaction_t trans = cursor->trans; 2315 hammer_mount_t hmp = trans->hmp; 2316 int error; 2317 2318 /* 2319 * Skip records that do not belong to the current flush. 2320 */ 2321 ++hammer_stats_record_iterations; 2322 if (record->flush_state != HAMMER_FST_FLUSH) 2323 return(0); 2324 2325 #if 1 2326 if (record->flush_group != record->ip->flush_group) { 2327 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group); 2328 Debugger("blah2"); 2329 return(0); 2330 } 2331 #endif 2332 KKASSERT(record->flush_group == record->ip->flush_group); 2333 2334 /* 2335 * Interlock the record using the BE flag. Once BE is set the 2336 * frontend cannot change the state of FE. 2337 * 2338 * NOTE: If FE is set prior to us setting BE we still sync the 2339 * record out, but the flush completion code converts it to 2340 * a delete-on-disk record instead of destroying it. 2341 */ 2342 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0); 2343 record->flags |= HAMMER_RECF_INTERLOCK_BE; 2344 2345 /* 2346 * The backend may have already disposed of the record. 2347 */ 2348 if (record->flags & HAMMER_RECF_DELETED_BE) { 2349 error = 0; 2350 goto done; 2351 } 2352 2353 /* 2354 * If the whole inode is being deleting all on-disk records will 2355 * be deleted very soon, we can't sync any new records to disk 2356 * because they will be deleted in the same transaction they were 2357 * created in (delete_tid == create_tid), which will assert. 2358 * 2359 * XXX There may be a case with RECORD_ADD with DELETED_FE set 2360 * that we currently panic on. 2361 */ 2362 if (record->ip->sync_flags & HAMMER_INODE_DELETING) { 2363 switch(record->type) { 2364 case HAMMER_MEM_RECORD_DATA: 2365 /* 2366 * We don't have to do anything, if the record was 2367 * committed the space will have been accounted for 2368 * in the blockmap. 2369 */ 2370 /* fall through */ 2371 case HAMMER_MEM_RECORD_GENERAL: 2372 record->flags |= HAMMER_RECF_DELETED_FE; 2373 record->flags |= HAMMER_RECF_DELETED_BE; 2374 error = 0; 2375 goto done; 2376 case HAMMER_MEM_RECORD_ADD: 2377 panic("hammer_sync_record_callback: illegal add " 2378 "during inode deletion record %p", record); 2379 break; /* NOT REACHED */ 2380 case HAMMER_MEM_RECORD_INODE: 2381 panic("hammer_sync_record_callback: attempt to " 2382 "sync inode record %p?", record); 2383 break; /* NOT REACHED */ 2384 case HAMMER_MEM_RECORD_DEL: 2385 /* 2386 * Follow through and issue the on-disk deletion 2387 */ 2388 break; 2389 } 2390 } 2391 2392 /* 2393 * If DELETED_FE is set special handling is needed for directory 2394 * entries. Dependant pieces related to the directory entry may 2395 * have already been synced to disk. If this occurs we have to 2396 * sync the directory entry and then change the in-memory record 2397 * from an ADD to a DELETE to cover the fact that it's been 2398 * deleted by the frontend. 2399 * 2400 * A directory delete covering record (MEM_RECORD_DEL) can never 2401 * be deleted by the frontend. 2402 * 2403 * Any other record type (aka DATA) can be deleted by the frontend. 2404 * XXX At the moment the flusher must skip it because there may 2405 * be another data record in the flush group for the same block, 2406 * meaning that some frontend data changes can leak into the backend's 2407 * synchronization point. 2408 */ 2409 if (record->flags & HAMMER_RECF_DELETED_FE) { 2410 if (record->type == HAMMER_MEM_RECORD_ADD) { 2411 record->flags |= HAMMER_RECF_CONVERT_DELETE; 2412 } else { 2413 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL); 2414 record->flags |= HAMMER_RECF_DELETED_BE; 2415 error = 0; 2416 goto done; 2417 } 2418 } 2419 2420 /* 2421 * Assign the create_tid for new records. Deletions already 2422 * have the record's entire key properly set up. 2423 */ 2424 if (record->type != HAMMER_MEM_RECORD_DEL) 2425 record->leaf.base.create_tid = trans->tid; 2426 record->leaf.create_ts = trans->time32; 2427 for (;;) { 2428 error = hammer_ip_sync_record_cursor(cursor, record); 2429 if (error != EDEADLK) 2430 break; 2431 hammer_done_cursor(cursor); 2432 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0], 2433 record->ip); 2434 if (error) 2435 break; 2436 } 2437 record->flags &= ~HAMMER_RECF_CONVERT_DELETE; 2438 2439 if (error) 2440 error = -error; 2441 done: 2442 hammer_flush_record_done(record, error); 2443 2444 /* 2445 * Do partial finalization if we have built up too many dirty 2446 * buffers. Otherwise a buffer cache deadlock can occur when 2447 * doing things like creating tens of thousands of tiny files. 2448 * 2449 * We must release our cursor lock to avoid a 3-way deadlock 2450 * due to the exclusive sync lock the finalizer must get. 2451 */ 2452 if (hammer_flusher_meta_limit(hmp)) { 2453 hammer_unlock_cursor(cursor); 2454 hammer_flusher_finalize(trans, 0); 2455 hammer_lock_cursor(cursor); 2456 } 2457 2458 return(error); 2459 } 2460 2461 /* 2462 * Backend function called by the flusher to sync an inode to media. 2463 */ 2464 int 2465 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip) 2466 { 2467 struct hammer_cursor cursor; 2468 hammer_node_t tmp_node; 2469 hammer_record_t depend; 2470 hammer_record_t next; 2471 int error, tmp_error; 2472 u_int64_t nlinks; 2473 2474 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0) 2475 return(0); 2476 2477 error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip); 2478 if (error) 2479 goto done; 2480 2481 /* 2482 * Any directory records referencing this inode which are not in 2483 * our current flush group must adjust our nlink count for the 2484 * purposes of synchronization to disk. 2485 * 2486 * Records which are in our flush group can be unlinked from our 2487 * inode now, potentially allowing the inode to be physically 2488 * deleted. 2489 * 2490 * This cannot block. 2491 */ 2492 nlinks = ip->ino_data.nlinks; 2493 next = TAILQ_FIRST(&ip->target_list); 2494 while ((depend = next) != NULL) { 2495 next = TAILQ_NEXT(depend, target_entry); 2496 if (depend->flush_state == HAMMER_FST_FLUSH && 2497 depend->flush_group == ip->flush_group) { 2498 /* 2499 * If this is an ADD that was deleted by the frontend 2500 * the frontend nlinks count will have already been 2501 * decremented, but the backend is going to sync its 2502 * directory entry and must account for it. The 2503 * record will be converted to a delete-on-disk when 2504 * it gets synced. 2505 * 2506 * If the ADD was not deleted by the frontend we 2507 * can remove the dependancy from our target_list. 2508 */ 2509 if (depend->flags & HAMMER_RECF_DELETED_FE) { 2510 ++nlinks; 2511 } else { 2512 TAILQ_REMOVE(&ip->target_list, depend, 2513 target_entry); 2514 depend->target_ip = NULL; 2515 } 2516 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) { 2517 /* 2518 * Not part of our flush group 2519 */ 2520 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0); 2521 switch(depend->type) { 2522 case HAMMER_MEM_RECORD_ADD: 2523 --nlinks; 2524 break; 2525 case HAMMER_MEM_RECORD_DEL: 2526 ++nlinks; 2527 break; 2528 default: 2529 break; 2530 } 2531 } 2532 } 2533 2534 /* 2535 * Set dirty if we had to modify the link count. 2536 */ 2537 if (ip->sync_ino_data.nlinks != nlinks) { 2538 KKASSERT((int64_t)nlinks >= 0); 2539 ip->sync_ino_data.nlinks = nlinks; 2540 ip->sync_flags |= HAMMER_INODE_DDIRTY; 2541 } 2542 2543 /* 2544 * If there is a trunction queued destroy any data past the (aligned) 2545 * truncation point. Userland will have dealt with the buffer 2546 * containing the truncation point for us. 2547 * 2548 * We don't flush pending frontend data buffers until after we've 2549 * dealt with the truncation. 2550 */ 2551 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) { 2552 /* 2553 * Interlock trunc_off. The VOP front-end may continue to 2554 * make adjustments to it while we are blocked. 2555 */ 2556 off_t trunc_off; 2557 off_t aligned_trunc_off; 2558 int blkmask; 2559 2560 trunc_off = ip->sync_trunc_off; 2561 blkmask = hammer_blocksize(trunc_off) - 1; 2562 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask; 2563 2564 /* 2565 * Delete any whole blocks on-media. The front-end has 2566 * already cleaned out any partial block and made it 2567 * pending. The front-end may have updated trunc_off 2568 * while we were blocked so we only use sync_trunc_off. 2569 * 2570 * This operation can blow out the buffer cache, EWOULDBLOCK 2571 * means we were unable to complete the deletion. The 2572 * deletion will update sync_trunc_off in that case. 2573 */ 2574 error = hammer_ip_delete_range(&cursor, ip, 2575 aligned_trunc_off, 2576 0x7FFFFFFFFFFFFFFFLL, 2); 2577 if (error == EWOULDBLOCK) { 2578 ip->flags |= HAMMER_INODE_WOULDBLOCK; 2579 error = 0; 2580 goto defer_buffer_flush; 2581 } 2582 2583 if (error) 2584 goto done; 2585 2586 /* 2587 * Clear the truncation flag on the backend after we have 2588 * complete the deletions. Backend data is now good again 2589 * (including new records we are about to sync, below). 2590 * 2591 * Leave sync_trunc_off intact. As we write additional 2592 * records the backend will update sync_trunc_off. This 2593 * tells the backend whether it can skip the overwrite 2594 * test. This should work properly even when the backend 2595 * writes full blocks where the truncation point straddles 2596 * the block because the comparison is against the base 2597 * offset of the record. 2598 */ 2599 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED; 2600 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */ 2601 } else { 2602 error = 0; 2603 } 2604 2605 /* 2606 * Now sync related records. These will typically be directory 2607 * entries, records tracking direct-writes, or delete-on-disk records. 2608 */ 2609 if (error == 0) { 2610 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 2611 hammer_sync_record_callback, &cursor); 2612 if (tmp_error < 0) 2613 tmp_error = -error; 2614 if (tmp_error) 2615 error = tmp_error; 2616 } 2617 hammer_cache_node(&ip->cache[1], cursor.node); 2618 2619 /* 2620 * Re-seek for inode update, assuming our cache hasn't been ripped 2621 * out from under us. 2622 */ 2623 if (error == 0) { 2624 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error); 2625 if (tmp_node) { 2626 hammer_cursor_downgrade(&cursor); 2627 hammer_lock_sh(&tmp_node->lock); 2628 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0) 2629 hammer_cursor_seek(&cursor, tmp_node, 0); 2630 hammer_unlock(&tmp_node->lock); 2631 hammer_rel_node(tmp_node); 2632 } 2633 error = 0; 2634 } 2635 2636 /* 2637 * If we are deleting the inode the frontend had better not have 2638 * any active references on elements making up the inode. 2639 * 2640 * The call to hammer_ip_delete_clean() cleans up auxillary records 2641 * but not DB or DATA records. Those must have already been deleted 2642 * by the normal truncation mechanic. 2643 */ 2644 if (error == 0 && ip->sync_ino_data.nlinks == 0 && 2645 RB_EMPTY(&ip->rec_tree) && 2646 (ip->sync_flags & HAMMER_INODE_DELETING) && 2647 (ip->flags & HAMMER_INODE_DELETED) == 0) { 2648 int count1 = 0; 2649 2650 error = hammer_ip_delete_clean(&cursor, ip, &count1); 2651 if (error == 0) { 2652 ip->flags |= HAMMER_INODE_DELETED; 2653 ip->sync_flags &= ~HAMMER_INODE_DELETING; 2654 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED; 2655 KKASSERT(RB_EMPTY(&ip->rec_tree)); 2656 2657 /* 2658 * Set delete_tid in both the frontend and backend 2659 * copy of the inode record. The DELETED flag handles 2660 * this, do not set RDIRTY. 2661 */ 2662 ip->ino_leaf.base.delete_tid = trans->tid; 2663 ip->sync_ino_leaf.base.delete_tid = trans->tid; 2664 ip->ino_leaf.delete_ts = trans->time32; 2665 ip->sync_ino_leaf.delete_ts = trans->time32; 2666 2667 2668 /* 2669 * Adjust the inode count in the volume header 2670 */ 2671 hammer_sync_lock_sh(trans); 2672 if (ip->flags & HAMMER_INODE_ONDISK) { 2673 hammer_modify_volume_field(trans, 2674 trans->rootvol, 2675 vol0_stat_inodes); 2676 --ip->hmp->rootvol->ondisk->vol0_stat_inodes; 2677 hammer_modify_volume_done(trans->rootvol); 2678 } 2679 hammer_sync_unlock(trans); 2680 } 2681 } 2682 2683 if (error) 2684 goto done; 2685 ip->sync_flags &= ~HAMMER_INODE_BUFS; 2686 2687 defer_buffer_flush: 2688 /* 2689 * Now update the inode's on-disk inode-data and/or on-disk record. 2690 * DELETED and ONDISK are managed only in ip->flags. 2691 * 2692 * In the case of a defered buffer flush we still update the on-disk 2693 * inode to satisfy visibility requirements if there happen to be 2694 * directory dependancies. 2695 */ 2696 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) { 2697 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK: 2698 /* 2699 * If deleted and on-disk, don't set any additional flags. 2700 * the delete flag takes care of things. 2701 * 2702 * Clear flags which may have been set by the frontend. 2703 */ 2704 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 2705 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME | 2706 HAMMER_INODE_DELETING); 2707 break; 2708 case HAMMER_INODE_DELETED: 2709 /* 2710 * Take care of the case where a deleted inode was never 2711 * flushed to the disk in the first place. 2712 * 2713 * Clear flags which may have been set by the frontend. 2714 */ 2715 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 2716 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME | 2717 HAMMER_INODE_DELETING); 2718 while (RB_ROOT(&ip->rec_tree)) { 2719 hammer_record_t record = RB_ROOT(&ip->rec_tree); 2720 hammer_ref(&record->lock); 2721 KKASSERT(record->lock.refs == 1); 2722 record->flags |= HAMMER_RECF_DELETED_FE; 2723 record->flags |= HAMMER_RECF_DELETED_BE; 2724 hammer_rel_mem_record(record); 2725 } 2726 break; 2727 case HAMMER_INODE_ONDISK: 2728 /* 2729 * If already on-disk, do not set any additional flags. 2730 */ 2731 break; 2732 default: 2733 /* 2734 * If not on-disk and not deleted, set DDIRTY to force 2735 * an initial record to be written. 2736 * 2737 * Also set the create_tid in both the frontend and backend 2738 * copy of the inode record. 2739 */ 2740 ip->ino_leaf.base.create_tid = trans->tid; 2741 ip->ino_leaf.create_ts = trans->time32; 2742 ip->sync_ino_leaf.base.create_tid = trans->tid; 2743 ip->sync_ino_leaf.create_ts = trans->time32; 2744 ip->sync_flags |= HAMMER_INODE_DDIRTY; 2745 break; 2746 } 2747 2748 /* 2749 * If RDIRTY or DDIRTY is set, write out a new record. If the inode 2750 * is already on-disk the old record is marked as deleted. 2751 * 2752 * If DELETED is set hammer_update_inode() will delete the existing 2753 * record without writing out a new one. 2754 * 2755 * If *ONLY* the ITIMES flag is set we can update the record in-place. 2756 */ 2757 if (ip->flags & HAMMER_INODE_DELETED) { 2758 error = hammer_update_inode(&cursor, ip); 2759 } else 2760 if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 && 2761 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) { 2762 error = hammer_update_itimes(&cursor, ip); 2763 } else 2764 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) { 2765 error = hammer_update_inode(&cursor, ip); 2766 } 2767 done: 2768 if (error) { 2769 hammer_critical_error(ip->hmp, ip, error, 2770 "while syncing inode"); 2771 } 2772 hammer_done_cursor(&cursor); 2773 return(error); 2774 } 2775 2776 /* 2777 * This routine is called when the OS is no longer actively referencing 2778 * the inode (but might still be keeping it cached), or when releasing 2779 * the last reference to an inode. 2780 * 2781 * At this point if the inode's nlinks count is zero we want to destroy 2782 * it, which may mean destroying it on-media too. 2783 */ 2784 void 2785 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp) 2786 { 2787 struct vnode *vp; 2788 2789 /* 2790 * Set the DELETING flag when the link count drops to 0 and the 2791 * OS no longer has any opens on the inode. 2792 * 2793 * The backend will clear DELETING (a mod flag) and set DELETED 2794 * (a state flag) when it is actually able to perform the 2795 * operation. 2796 * 2797 * Don't reflag the deletion if the flusher is currently syncing 2798 * one that was already flagged. A previously set DELETING flag 2799 * may bounce around flags and sync_flags until the operation is 2800 * completely done. 2801 */ 2802 if (ip->ino_data.nlinks == 0 && 2803 ((ip->flags | ip->sync_flags) & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) { 2804 ip->flags |= HAMMER_INODE_DELETING; 2805 ip->flags |= HAMMER_INODE_TRUNCATED; 2806 ip->trunc_off = 0; 2807 vp = NULL; 2808 if (getvp) { 2809 if (hammer_get_vnode(ip, &vp) != 0) 2810 return; 2811 } 2812 2813 /* 2814 * Final cleanup 2815 */ 2816 if (ip->vp) { 2817 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE); 2818 vnode_pager_setsize(ip->vp, 0); 2819 } 2820 if (getvp) { 2821 vput(vp); 2822 } 2823 } 2824 } 2825 2826 /* 2827 * After potentially resolving a dependancy the inode is tested 2828 * to determine whether it needs to be reflushed. 2829 */ 2830 void 2831 hammer_test_inode(hammer_inode_t ip) 2832 { 2833 if (ip->flags & HAMMER_INODE_REFLUSH) { 2834 ip->flags &= ~HAMMER_INODE_REFLUSH; 2835 hammer_ref(&ip->lock); 2836 if (ip->flags & HAMMER_INODE_RESIGNAL) { 2837 ip->flags &= ~HAMMER_INODE_RESIGNAL; 2838 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2839 } else { 2840 hammer_flush_inode(ip, 0); 2841 } 2842 hammer_rel_inode(ip, 0); 2843 } 2844 } 2845 2846 /* 2847 * Clear the RECLAIM flag on an inode. This occurs when the inode is 2848 * reassociated with a vp or just before it gets freed. 2849 * 2850 * Pipeline wakeups to threads blocked due to an excessive number of 2851 * detached inodes. The reclaim count generates a bit of negative 2852 * feedback. 2853 */ 2854 static void 2855 hammer_inode_wakereclaims(hammer_inode_t ip, int dowake) 2856 { 2857 struct hammer_reclaim *reclaim; 2858 hammer_mount_t hmp = ip->hmp; 2859 2860 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) 2861 return; 2862 2863 --hammer_count_reclaiming; 2864 --hmp->inode_reclaims; 2865 ip->flags &= ~HAMMER_INODE_RECLAIM; 2866 2867 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT || dowake) { 2868 reclaim = TAILQ_FIRST(&hmp->reclaim_list); 2869 if (reclaim && reclaim->count > 0 && --reclaim->count == 0) { 2870 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry); 2871 wakeup(reclaim); 2872 } 2873 } 2874 } 2875 2876 /* 2877 * Setup our reclaim pipeline. We only let so many detached (and dirty) 2878 * inodes build up before we start blocking. 2879 * 2880 * When we block we don't care *which* inode has finished reclaiming, 2881 * as lone as one does. This is somewhat heuristical... we also put a 2882 * cap on how long we are willing to wait. 2883 */ 2884 void 2885 hammer_inode_waitreclaims(hammer_mount_t hmp) 2886 { 2887 struct hammer_reclaim reclaim; 2888 int delay; 2889 2890 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT) 2891 return; 2892 delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz / 2893 (HAMMER_RECLAIM_WAIT * 3) + 1; 2894 if (delay > 0) { 2895 reclaim.count = 2; 2896 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry); 2897 tsleep(&reclaim, 0, "hmrrcm", delay); 2898 if (reclaim.count > 0) 2899 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry); 2900 } 2901 } 2902 2903 /* 2904 * A larger then normal backlog of inodes is sitting in the flusher, 2905 * enforce a general slowdown to let it catch up. This routine is only 2906 * called on completion of a non-flusher-related transaction which 2907 * performed B-Tree node I/O. 2908 * 2909 * It is possible for the flusher to stall in a continuous load. 2910 * blogbench -i1000 -o seems to do a good job generating this sort of load. 2911 * If the flusher is unable to catch up the inode count can bloat until 2912 * we run out of kvm. 2913 * 2914 * This is a bit of a hack. 2915 */ 2916 void 2917 hammer_inode_waithard(hammer_mount_t hmp) 2918 { 2919 /* 2920 * Hysteresis. 2921 */ 2922 if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) { 2923 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT / 2 && 2924 hmp->count_iqueued < hmp->count_inodes / 20) { 2925 hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY; 2926 return; 2927 } 2928 } else { 2929 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT || 2930 hmp->count_iqueued < hmp->count_inodes / 10) { 2931 return; 2932 } 2933 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY; 2934 } 2935 2936 /* 2937 * Block for one flush cycle. 2938 */ 2939 hammer_flusher_wait_next(hmp); 2940 } 2941 2942