1 /* 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $ 35 */ 36 37 #include "hammer.h" 38 #include <vm/vm_extern.h> 39 #include <sys/buf.h> 40 #include <sys/buf2.h> 41 42 static int hammer_unload_inode(struct hammer_inode *ip); 43 static void hammer_free_inode(hammer_inode_t ip); 44 static void hammer_flush_inode_core(hammer_inode_t ip, 45 hammer_flush_group_t flg, int flags); 46 static int hammer_setup_child_callback(hammer_record_t rec, void *data); 47 #if 0 48 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data); 49 #endif 50 static int hammer_setup_parent_inodes(hammer_inode_t ip, int depth, 51 hammer_flush_group_t flg); 52 static int hammer_setup_parent_inodes_helper(hammer_record_t record, 53 int depth, hammer_flush_group_t flg); 54 static void hammer_inode_wakereclaims(hammer_inode_t ip, int dowake); 55 56 #ifdef DEBUG_TRUNCATE 57 extern struct hammer_inode *HammerTruncIp; 58 #endif 59 60 /* 61 * RB-Tree support for inode structures 62 */ 63 int 64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2) 65 { 66 if (ip1->obj_localization < ip2->obj_localization) 67 return(-1); 68 if (ip1->obj_localization > ip2->obj_localization) 69 return(1); 70 if (ip1->obj_id < ip2->obj_id) 71 return(-1); 72 if (ip1->obj_id > ip2->obj_id) 73 return(1); 74 if (ip1->obj_asof < ip2->obj_asof) 75 return(-1); 76 if (ip1->obj_asof > ip2->obj_asof) 77 return(1); 78 return(0); 79 } 80 81 /* 82 * RB-Tree support for inode structures / special LOOKUP_INFO 83 */ 84 static int 85 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip) 86 { 87 if (info->obj_localization < ip->obj_localization) 88 return(-1); 89 if (info->obj_localization > ip->obj_localization) 90 return(1); 91 if (info->obj_id < ip->obj_id) 92 return(-1); 93 if (info->obj_id > ip->obj_id) 94 return(1); 95 if (info->obj_asof < ip->obj_asof) 96 return(-1); 97 if (info->obj_asof > ip->obj_asof) 98 return(1); 99 return(0); 100 } 101 102 /* 103 * Used by hammer_scan_inode_snapshots() to locate all of an object's 104 * snapshots. Note that the asof field is not tested, which we can get 105 * away with because it is the lowest-priority field. 106 */ 107 static int 108 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data) 109 { 110 hammer_inode_info_t info = data; 111 112 if (ip->obj_localization > info->obj_localization) 113 return(1); 114 if (ip->obj_localization < info->obj_localization) 115 return(-1); 116 if (ip->obj_id > info->obj_id) 117 return(1); 118 if (ip->obj_id < info->obj_id) 119 return(-1); 120 return(0); 121 } 122 123 /* 124 * Used by hammer_unload_pseudofs() to locate all inodes associated with 125 * a particular PFS. 126 */ 127 static int 128 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data) 129 { 130 u_int32_t localization = *(u_int32_t *)data; 131 if (ip->obj_localization > localization) 132 return(1); 133 if (ip->obj_localization < localization) 134 return(-1); 135 return(0); 136 } 137 138 /* 139 * RB-Tree support for pseudofs structures 140 */ 141 static int 142 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2) 143 { 144 if (p1->localization < p2->localization) 145 return(-1); 146 if (p1->localization > p2->localization) 147 return(1); 148 return(0); 149 } 150 151 152 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare); 153 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node, 154 hammer_inode_info_cmp, hammer_inode_info_t); 155 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node, 156 hammer_pfs_rb_compare, u_int32_t, localization); 157 158 /* 159 * The kernel is not actively referencing this vnode but is still holding 160 * it cached. 161 * 162 * This is called from the frontend. 163 */ 164 int 165 hammer_vop_inactive(struct vop_inactive_args *ap) 166 { 167 struct hammer_inode *ip = VTOI(ap->a_vp); 168 169 /* 170 * Degenerate case 171 */ 172 if (ip == NULL) { 173 vrecycle(ap->a_vp); 174 return(0); 175 } 176 177 /* 178 * If the inode no longer has visibility in the filesystem try to 179 * recycle it immediately, even if the inode is dirty. Recycling 180 * it quickly allows the system to reclaim buffer cache and VM 181 * resources which can matter a lot in a heavily loaded system. 182 * 183 * This can deadlock in vfsync() if we aren't careful. 184 * 185 * Do not queue the inode to the flusher if we still have visibility, 186 * otherwise namespace calls such as chmod will unnecessarily generate 187 * multiple inode updates. 188 */ 189 hammer_inode_unloadable_check(ip, 0); 190 if (ip->ino_data.nlinks == 0) { 191 if (ip->flags & HAMMER_INODE_MODMASK) 192 hammer_flush_inode(ip, 0); 193 vrecycle(ap->a_vp); 194 } 195 return(0); 196 } 197 198 /* 199 * Release the vnode association. This is typically (but not always) 200 * the last reference on the inode. 201 * 202 * Once the association is lost we are on our own with regards to 203 * flushing the inode. 204 */ 205 int 206 hammer_vop_reclaim(struct vop_reclaim_args *ap) 207 { 208 struct hammer_inode *ip; 209 hammer_mount_t hmp; 210 struct vnode *vp; 211 212 vp = ap->a_vp; 213 214 if ((ip = vp->v_data) != NULL) { 215 hmp = ip->hmp; 216 vp->v_data = NULL; 217 ip->vp = NULL; 218 219 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) { 220 ++hammer_count_reclaiming; 221 ++hmp->inode_reclaims; 222 ip->flags |= HAMMER_INODE_RECLAIM; 223 } 224 hammer_rel_inode(ip, 1); 225 } 226 return(0); 227 } 228 229 /* 230 * Return a locked vnode for the specified inode. The inode must be 231 * referenced but NOT LOCKED on entry and will remain referenced on 232 * return. 233 * 234 * Called from the frontend. 235 */ 236 int 237 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp) 238 { 239 hammer_mount_t hmp; 240 struct vnode *vp; 241 int error = 0; 242 u_int8_t obj_type; 243 244 hmp = ip->hmp; 245 246 for (;;) { 247 if ((vp = ip->vp) == NULL) { 248 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0); 249 if (error) 250 break; 251 hammer_lock_ex(&ip->lock); 252 if (ip->vp != NULL) { 253 hammer_unlock(&ip->lock); 254 vp = *vpp; 255 vp->v_type = VBAD; 256 vx_put(vp); 257 continue; 258 } 259 hammer_ref(&ip->lock); 260 vp = *vpp; 261 ip->vp = vp; 262 263 obj_type = ip->ino_data.obj_type; 264 vp->v_type = hammer_get_vnode_type(obj_type); 265 266 hammer_inode_wakereclaims(ip, 0); 267 268 switch(ip->ino_data.obj_type) { 269 case HAMMER_OBJTYPE_CDEV: 270 case HAMMER_OBJTYPE_BDEV: 271 vp->v_ops = &hmp->mp->mnt_vn_spec_ops; 272 addaliasu(vp, ip->ino_data.rmajor, 273 ip->ino_data.rminor); 274 break; 275 case HAMMER_OBJTYPE_FIFO: 276 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops; 277 break; 278 default: 279 break; 280 } 281 282 /* 283 * Only mark as the root vnode if the ip is not 284 * historical, otherwise the VFS cache will get 285 * confused. The other half of the special handling 286 * is in hammer_vop_nlookupdotdot(). 287 * 288 * Pseudo-filesystem roots can be accessed via 289 * non-root filesystem paths and setting VROOT may 290 * confuse the namecache. Set VPFSROOT instead. 291 */ 292 if (ip->obj_id == HAMMER_OBJID_ROOT && 293 ip->obj_asof == hmp->asof) { 294 if (ip->obj_localization == 0) 295 vp->v_flag |= VROOT; 296 else 297 vp->v_flag |= VPFSROOT; 298 } 299 300 vp->v_data = (void *)ip; 301 /* vnode locked by getnewvnode() */ 302 /* make related vnode dirty if inode dirty? */ 303 hammer_unlock(&ip->lock); 304 if (vp->v_type == VREG) 305 vinitvmio(vp, ip->ino_data.size); 306 break; 307 } 308 309 /* 310 * loop if the vget fails (aka races), or if the vp 311 * no longer matches ip->vp. 312 */ 313 if (vget(vp, LK_EXCLUSIVE) == 0) { 314 if (vp == ip->vp) 315 break; 316 vput(vp); 317 } 318 } 319 *vpp = vp; 320 return(error); 321 } 322 323 /* 324 * Locate all copies of the inode for obj_id compatible with the specified 325 * asof, reference, and issue the related call-back. This routine is used 326 * for direct-io invalidation and does not create any new inodes. 327 */ 328 void 329 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo, 330 int (*callback)(hammer_inode_t ip, void *data), 331 void *data) 332 { 333 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root, 334 hammer_inode_info_cmp_all_history, 335 callback, iinfo); 336 } 337 338 /* 339 * Acquire a HAMMER inode. The returned inode is not locked. These functions 340 * do not attach or detach the related vnode (use hammer_get_vnode() for 341 * that). 342 * 343 * The flags argument is only applied for newly created inodes, and only 344 * certain flags are inherited. 345 * 346 * Called from the frontend. 347 */ 348 struct hammer_inode * 349 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip, 350 int64_t obj_id, hammer_tid_t asof, u_int32_t localization, 351 int flags, int *errorp) 352 { 353 hammer_mount_t hmp = trans->hmp; 354 struct hammer_node_cache *cachep; 355 struct hammer_inode_info iinfo; 356 struct hammer_cursor cursor; 357 struct hammer_inode *ip; 358 359 360 /* 361 * Determine if we already have an inode cached. If we do then 362 * we are golden. 363 * 364 * If we find an inode with no vnode we have to mark the 365 * transaction such that hammer_inode_waitreclaims() is 366 * called later on to avoid building up an infinite number 367 * of inodes. Otherwise we can continue to * add new inodes 368 * faster then they can be disposed of, even with the tsleep 369 * delay. 370 * 371 * If we find a dummy inode we return a failure so dounlink 372 * (which does another lookup) doesn't try to mess with the 373 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode() 374 * to ref dummy inodes. 375 */ 376 iinfo.obj_id = obj_id; 377 iinfo.obj_asof = asof; 378 iinfo.obj_localization = localization; 379 loop: 380 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 381 if (ip) { 382 if (ip->flags & HAMMER_INODE_DUMMY) { 383 *errorp = ENOENT; 384 return(NULL); 385 } 386 hammer_ref(&ip->lock); 387 *errorp = 0; 388 return(ip); 389 } 390 391 /* 392 * Allocate a new inode structure and deal with races later. 393 */ 394 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 395 ++hammer_count_inodes; 396 ++hmp->count_inodes; 397 ip->obj_id = obj_id; 398 ip->obj_asof = iinfo.obj_asof; 399 ip->obj_localization = localization; 400 ip->hmp = hmp; 401 ip->flags = flags & HAMMER_INODE_RO; 402 ip->cache[0].ip = ip; 403 ip->cache[1].ip = ip; 404 ip->cache[2].ip = ip; 405 ip->cache[3].ip = ip; 406 if (hmp->ronly) 407 ip->flags |= HAMMER_INODE_RO; 408 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off = 409 0x7FFFFFFFFFFFFFFFLL; 410 RB_INIT(&ip->rec_tree); 411 TAILQ_INIT(&ip->target_list); 412 hammer_ref(&ip->lock); 413 414 /* 415 * Locate the on-disk inode. If this is a PFS root we always 416 * access the current version of the root inode and (if it is not 417 * a master) always access information under it with a snapshot 418 * TID. 419 * 420 * We cache recent inode lookups in this directory in dip->cache[2]. 421 * If we can't find it we assume the inode we are looking for is 422 * close to the directory inode. 423 */ 424 retry: 425 cachep = NULL; 426 if (dip) { 427 if (dip->cache[2].node) 428 cachep = &dip->cache[2]; 429 else 430 cachep = &dip->cache[0]; 431 } 432 hammer_init_cursor(trans, &cursor, cachep, NULL); 433 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE; 434 cursor.key_beg.obj_id = ip->obj_id; 435 cursor.key_beg.key = 0; 436 cursor.key_beg.create_tid = 0; 437 cursor.key_beg.delete_tid = 0; 438 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE; 439 cursor.key_beg.obj_type = 0; 440 441 cursor.asof = iinfo.obj_asof; 442 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA | 443 HAMMER_CURSOR_ASOF; 444 445 *errorp = hammer_btree_lookup(&cursor); 446 if (*errorp == EDEADLK) { 447 hammer_done_cursor(&cursor); 448 goto retry; 449 } 450 451 /* 452 * On success the B-Tree lookup will hold the appropriate 453 * buffer cache buffers and provide a pointer to the requested 454 * information. Copy the information to the in-memory inode 455 * and cache the B-Tree node to improve future operations. 456 */ 457 if (*errorp == 0) { 458 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf; 459 ip->ino_data = cursor.data->inode; 460 461 /* 462 * cache[0] tries to cache the location of the object inode. 463 * The assumption is that it is near the directory inode. 464 * 465 * cache[1] tries to cache the location of the object data. 466 * We might have something in the governing directory from 467 * scan optimizations (see the strategy code in 468 * hammer_vnops.c). 469 * 470 * We update dip->cache[2], if possible, with the location 471 * of the object inode for future directory shortcuts. 472 */ 473 hammer_cache_node(&ip->cache[0], cursor.node); 474 if (dip) { 475 if (dip->cache[3].node) { 476 hammer_cache_node(&ip->cache[1], 477 dip->cache[3].node); 478 } 479 hammer_cache_node(&dip->cache[2], cursor.node); 480 } 481 482 /* 483 * The file should not contain any data past the file size 484 * stored in the inode. Setting save_trunc_off to the 485 * file size instead of max reduces B-Tree lookup overheads 486 * on append by allowing the flusher to avoid checking for 487 * record overwrites. 488 */ 489 ip->save_trunc_off = ip->ino_data.size; 490 491 /* 492 * Locate and assign the pseudofs management structure to 493 * the inode. 494 */ 495 if (dip && dip->obj_localization == ip->obj_localization) { 496 ip->pfsm = dip->pfsm; 497 hammer_ref(&ip->pfsm->lock); 498 } else { 499 ip->pfsm = hammer_load_pseudofs(trans, 500 ip->obj_localization, 501 errorp); 502 *errorp = 0; /* ignore ENOENT */ 503 } 504 } 505 506 /* 507 * The inode is placed on the red-black tree and will be synced to 508 * the media when flushed or by the filesystem sync. If this races 509 * another instantiation/lookup the insertion will fail. 510 */ 511 if (*errorp == 0) { 512 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 513 hammer_free_inode(ip); 514 hammer_done_cursor(&cursor); 515 goto loop; 516 } 517 ip->flags |= HAMMER_INODE_ONDISK; 518 } else { 519 if (ip->flags & HAMMER_INODE_RSV_INODES) { 520 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */ 521 --hmp->rsv_inodes; 522 } 523 524 hammer_free_inode(ip); 525 ip = NULL; 526 } 527 hammer_done_cursor(&cursor); 528 trans->flags |= HAMMER_TRANSF_NEWINODE; 529 return (ip); 530 } 531 532 /* 533 * Get a dummy inode to placemark a broken directory entry. 534 */ 535 struct hammer_inode * 536 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip, 537 int64_t obj_id, hammer_tid_t asof, u_int32_t localization, 538 int flags, int *errorp) 539 { 540 hammer_mount_t hmp = trans->hmp; 541 struct hammer_inode_info iinfo; 542 struct hammer_inode *ip; 543 544 /* 545 * Determine if we already have an inode cached. If we do then 546 * we are golden. 547 * 548 * If we find an inode with no vnode we have to mark the 549 * transaction such that hammer_inode_waitreclaims() is 550 * called later on to avoid building up an infinite number 551 * of inodes. Otherwise we can continue to * add new inodes 552 * faster then they can be disposed of, even with the tsleep 553 * delay. 554 * 555 * If we find a non-fake inode we return an error. Only fake 556 * inodes can be returned by this routine. 557 */ 558 iinfo.obj_id = obj_id; 559 iinfo.obj_asof = asof; 560 iinfo.obj_localization = localization; 561 loop: 562 *errorp = 0; 563 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 564 if (ip) { 565 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) { 566 *errorp = ENOENT; 567 return(NULL); 568 } 569 hammer_ref(&ip->lock); 570 return(ip); 571 } 572 573 /* 574 * Allocate a new inode structure and deal with races later. 575 */ 576 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 577 ++hammer_count_inodes; 578 ++hmp->count_inodes; 579 ip->obj_id = obj_id; 580 ip->obj_asof = iinfo.obj_asof; 581 ip->obj_localization = localization; 582 ip->hmp = hmp; 583 ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY; 584 ip->cache[0].ip = ip; 585 ip->cache[1].ip = ip; 586 ip->cache[2].ip = ip; 587 ip->cache[3].ip = ip; 588 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off = 589 0x7FFFFFFFFFFFFFFFLL; 590 RB_INIT(&ip->rec_tree); 591 TAILQ_INIT(&ip->target_list); 592 hammer_ref(&ip->lock); 593 594 /* 595 * Populate the dummy inode. Leave everything zero'd out. 596 * 597 * (ip->ino_leaf and ip->ino_data) 598 * 599 * Make the dummy inode a FIFO object which most copy programs 600 * will properly ignore. 601 */ 602 ip->save_trunc_off = ip->ino_data.size; 603 ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO; 604 605 /* 606 * Locate and assign the pseudofs management structure to 607 * the inode. 608 */ 609 if (dip && dip->obj_localization == ip->obj_localization) { 610 ip->pfsm = dip->pfsm; 611 hammer_ref(&ip->pfsm->lock); 612 } else { 613 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization, 614 errorp); 615 *errorp = 0; /* ignore ENOENT */ 616 } 617 618 /* 619 * The inode is placed on the red-black tree and will be synced to 620 * the media when flushed or by the filesystem sync. If this races 621 * another instantiation/lookup the insertion will fail. 622 * 623 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake. 624 */ 625 if (*errorp == 0) { 626 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 627 hammer_free_inode(ip); 628 goto loop; 629 } 630 } else { 631 if (ip->flags & HAMMER_INODE_RSV_INODES) { 632 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */ 633 --hmp->rsv_inodes; 634 } 635 hammer_free_inode(ip); 636 ip = NULL; 637 } 638 trans->flags |= HAMMER_TRANSF_NEWINODE; 639 return (ip); 640 } 641 642 /* 643 * Return a referenced inode only if it is in our inode cache. 644 * 645 * Dummy inodes do not count. 646 */ 647 struct hammer_inode * 648 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id, 649 hammer_tid_t asof, u_int32_t localization) 650 { 651 hammer_mount_t hmp = trans->hmp; 652 struct hammer_inode_info iinfo; 653 struct hammer_inode *ip; 654 655 iinfo.obj_id = obj_id; 656 iinfo.obj_asof = asof; 657 iinfo.obj_localization = localization; 658 659 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo); 660 if (ip) { 661 if (ip->flags & HAMMER_INODE_DUMMY) 662 ip = NULL; 663 else 664 hammer_ref(&ip->lock); 665 } 666 return(ip); 667 } 668 669 /* 670 * Create a new filesystem object, returning the inode in *ipp. The 671 * returned inode will be referenced. The inode is created in-memory. 672 * 673 * If pfsm is non-NULL the caller wishes to create the root inode for 674 * a master PFS. 675 */ 676 int 677 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap, 678 struct ucred *cred, 679 hammer_inode_t dip, const char *name, int namelen, 680 hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp) 681 { 682 hammer_mount_t hmp; 683 hammer_inode_t ip; 684 uid_t xuid; 685 int error; 686 int64_t namekey; 687 u_int32_t dummy; 688 689 hmp = trans->hmp; 690 691 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO); 692 ++hammer_count_inodes; 693 ++hmp->count_inodes; 694 trans->flags |= HAMMER_TRANSF_NEWINODE; 695 696 if (pfsm) { 697 KKASSERT(pfsm->localization != 0); 698 ip->obj_id = HAMMER_OBJID_ROOT; 699 ip->obj_localization = pfsm->localization; 700 } else { 701 KKASSERT(dip != NULL); 702 namekey = hammer_directory_namekey(dip, name, namelen, &dummy); 703 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey); 704 ip->obj_localization = dip->obj_localization; 705 } 706 707 KKASSERT(ip->obj_id != 0); 708 ip->obj_asof = hmp->asof; 709 ip->hmp = hmp; 710 ip->flush_state = HAMMER_FST_IDLE; 711 ip->flags = HAMMER_INODE_DDIRTY | 712 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME; 713 ip->cache[0].ip = ip; 714 ip->cache[1].ip = ip; 715 ip->cache[2].ip = ip; 716 ip->cache[3].ip = ip; 717 718 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL; 719 /* ip->save_trunc_off = 0; (already zero) */ 720 RB_INIT(&ip->rec_tree); 721 TAILQ_INIT(&ip->target_list); 722 723 ip->ino_data.atime = trans->time; 724 ip->ino_data.mtime = trans->time; 725 ip->ino_data.size = 0; 726 ip->ino_data.nlinks = 0; 727 728 /* 729 * A nohistory designator on the parent directory is inherited by 730 * the child. We will do this even for pseudo-fs creation... the 731 * sysad can turn it off. 732 */ 733 if (dip) { 734 ip->ino_data.uflags = dip->ino_data.uflags & 735 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP); 736 } 737 738 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; 739 ip->ino_leaf.base.localization = ip->obj_localization + 740 HAMMER_LOCALIZE_INODE; 741 ip->ino_leaf.base.obj_id = ip->obj_id; 742 ip->ino_leaf.base.key = 0; 743 ip->ino_leaf.base.create_tid = 0; 744 ip->ino_leaf.base.delete_tid = 0; 745 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE; 746 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type); 747 748 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type; 749 ip->ino_data.version = HAMMER_INODE_DATA_VERSION; 750 ip->ino_data.mode = vap->va_mode; 751 ip->ino_data.ctime = trans->time; 752 753 /* 754 * If we are running version 2 or greater we use dirhash algorithm #1 755 * which is semi-sorted. Algorithm #0 was just a pure crc. 756 */ 757 if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) { 758 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) { 759 ip->ino_data.cap_flags |= HAMMER_INODE_CAP_DIRHASH_ALG1; 760 } 761 } 762 763 /* 764 * Setup the ".." pointer. This only needs to be done for directories 765 * but we do it for all objects as a recovery aid. 766 */ 767 if (dip) 768 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id; 769 #if 0 770 /* 771 * The parent_obj_localization field only applies to pseudo-fs roots. 772 * XXX this is no longer applicable, PFSs are no longer directly 773 * tied into the parent's directory structure. 774 */ 775 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY && 776 ip->obj_id == HAMMER_OBJID_ROOT) { 777 ip->ino_data.ext.obj.parent_obj_localization = 778 dip->obj_localization; 779 } 780 #endif 781 782 switch(ip->ino_leaf.base.obj_type) { 783 case HAMMER_OBJTYPE_CDEV: 784 case HAMMER_OBJTYPE_BDEV: 785 ip->ino_data.rmajor = vap->va_rmajor; 786 ip->ino_data.rminor = vap->va_rminor; 787 break; 788 default: 789 break; 790 } 791 792 /* 793 * Calculate default uid/gid and overwrite with information from 794 * the vap. 795 */ 796 if (dip) { 797 xuid = hammer_to_unix_xid(&dip->ino_data.uid); 798 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, 799 xuid, cred, &vap->va_mode); 800 } else { 801 xuid = 0; 802 } 803 ip->ino_data.mode = vap->va_mode; 804 805 if (vap->va_vaflags & VA_UID_UUID_VALID) 806 ip->ino_data.uid = vap->va_uid_uuid; 807 else if (vap->va_uid != (uid_t)VNOVAL) 808 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid); 809 else 810 hammer_guid_to_uuid(&ip->ino_data.uid, xuid); 811 812 if (vap->va_vaflags & VA_GID_UUID_VALID) 813 ip->ino_data.gid = vap->va_gid_uuid; 814 else if (vap->va_gid != (gid_t)VNOVAL) 815 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid); 816 else if (dip) 817 ip->ino_data.gid = dip->ino_data.gid; 818 819 hammer_ref(&ip->lock); 820 821 if (pfsm) { 822 ip->pfsm = pfsm; 823 hammer_ref(&pfsm->lock); 824 error = 0; 825 } else if (dip->obj_localization == ip->obj_localization) { 826 ip->pfsm = dip->pfsm; 827 hammer_ref(&ip->pfsm->lock); 828 error = 0; 829 } else { 830 ip->pfsm = hammer_load_pseudofs(trans, 831 ip->obj_localization, 832 &error); 833 error = 0; /* ignore ENOENT */ 834 } 835 836 if (error) { 837 hammer_free_inode(ip); 838 ip = NULL; 839 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) { 840 panic("hammer_create_inode: duplicate obj_id %llx", 841 (long long)ip->obj_id); 842 /* not reached */ 843 hammer_free_inode(ip); 844 } 845 *ipp = ip; 846 return(error); 847 } 848 849 /* 850 * Final cleanup / freeing of an inode structure 851 */ 852 static void 853 hammer_free_inode(hammer_inode_t ip) 854 { 855 struct hammer_mount *hmp; 856 857 hmp = ip->hmp; 858 KKASSERT(ip->lock.refs == 1); 859 hammer_uncache_node(&ip->cache[0]); 860 hammer_uncache_node(&ip->cache[1]); 861 hammer_uncache_node(&ip->cache[2]); 862 hammer_uncache_node(&ip->cache[3]); 863 hammer_inode_wakereclaims(ip, 1); 864 if (ip->objid_cache) 865 hammer_clear_objid(ip); 866 --hammer_count_inodes; 867 --hmp->count_inodes; 868 if (ip->pfsm) { 869 hammer_rel_pseudofs(hmp, ip->pfsm); 870 ip->pfsm = NULL; 871 } 872 kfree(ip, hmp->m_inodes); 873 ip = NULL; 874 } 875 876 /* 877 * Retrieve pseudo-fs data. NULL will never be returned. 878 * 879 * If an error occurs *errorp will be set and a default template is returned, 880 * otherwise *errorp is set to 0. Typically when an error occurs it will 881 * be ENOENT. 882 */ 883 hammer_pseudofs_inmem_t 884 hammer_load_pseudofs(hammer_transaction_t trans, 885 u_int32_t localization, int *errorp) 886 { 887 hammer_mount_t hmp = trans->hmp; 888 hammer_inode_t ip; 889 hammer_pseudofs_inmem_t pfsm; 890 struct hammer_cursor cursor; 891 int bytes; 892 893 retry: 894 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization); 895 if (pfsm) { 896 hammer_ref(&pfsm->lock); 897 *errorp = 0; 898 return(pfsm); 899 } 900 901 /* 902 * PFS records are stored in the root inode (not the PFS root inode, 903 * but the real root). Avoid an infinite recursion if loading 904 * the PFS for the real root. 905 */ 906 if (localization) { 907 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, 908 HAMMER_MAX_TID, 909 HAMMER_DEF_LOCALIZATION, 0, errorp); 910 } else { 911 ip = NULL; 912 } 913 914 pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO); 915 pfsm->localization = localization; 916 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid; 917 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid; 918 919 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip); 920 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION + 921 HAMMER_LOCALIZE_MISC; 922 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 923 cursor.key_beg.create_tid = 0; 924 cursor.key_beg.delete_tid = 0; 925 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 926 cursor.key_beg.obj_type = 0; 927 cursor.key_beg.key = localization; 928 cursor.asof = HAMMER_MAX_TID; 929 cursor.flags |= HAMMER_CURSOR_ASOF; 930 931 if (ip) 932 *errorp = hammer_ip_lookup(&cursor); 933 else 934 *errorp = hammer_btree_lookup(&cursor); 935 if (*errorp == 0) { 936 *errorp = hammer_ip_resolve_data(&cursor); 937 if (*errorp == 0) { 938 if (cursor.data->pfsd.mirror_flags & 939 HAMMER_PFSD_DELETED) { 940 *errorp = ENOENT; 941 } else { 942 bytes = cursor.leaf->data_len; 943 if (bytes > sizeof(pfsm->pfsd)) 944 bytes = sizeof(pfsm->pfsd); 945 bcopy(cursor.data, &pfsm->pfsd, bytes); 946 } 947 } 948 } 949 hammer_done_cursor(&cursor); 950 951 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid); 952 hammer_ref(&pfsm->lock); 953 if (ip) 954 hammer_rel_inode(ip, 0); 955 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) { 956 kfree(pfsm, hmp->m_misc); 957 goto retry; 958 } 959 return(pfsm); 960 } 961 962 /* 963 * Store pseudo-fs data. The backend will automatically delete any prior 964 * on-disk pseudo-fs data but we have to delete in-memory versions. 965 */ 966 int 967 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm) 968 { 969 struct hammer_cursor cursor; 970 hammer_record_t record; 971 hammer_inode_t ip; 972 int error; 973 974 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 975 HAMMER_DEF_LOCALIZATION, 0, &error); 976 retry: 977 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid); 978 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip); 979 cursor.key_beg.localization = ip->obj_localization + 980 HAMMER_LOCALIZE_MISC; 981 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT; 982 cursor.key_beg.create_tid = 0; 983 cursor.key_beg.delete_tid = 0; 984 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS; 985 cursor.key_beg.obj_type = 0; 986 cursor.key_beg.key = pfsm->localization; 987 cursor.asof = HAMMER_MAX_TID; 988 cursor.flags |= HAMMER_CURSOR_ASOF; 989 990 /* 991 * Replace any in-memory version of the record. 992 */ 993 error = hammer_ip_lookup(&cursor); 994 if (error == 0 && hammer_cursor_inmem(&cursor)) { 995 record = cursor.iprec; 996 if (record->flags & HAMMER_RECF_INTERLOCK_BE) { 997 KKASSERT(cursor.deadlk_rec == NULL); 998 hammer_ref(&record->lock); 999 cursor.deadlk_rec = record; 1000 error = EDEADLK; 1001 } else { 1002 record->flags |= HAMMER_RECF_DELETED_FE; 1003 error = 0; 1004 } 1005 } 1006 1007 /* 1008 * Allocate replacement general record. The backend flush will 1009 * delete any on-disk version of the record. 1010 */ 1011 if (error == 0 || error == ENOENT) { 1012 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd)); 1013 record->type = HAMMER_MEM_RECORD_GENERAL; 1014 1015 record->leaf.base.localization = ip->obj_localization + 1016 HAMMER_LOCALIZE_MISC; 1017 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS; 1018 record->leaf.base.key = pfsm->localization; 1019 record->leaf.data_len = sizeof(pfsm->pfsd); 1020 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd)); 1021 error = hammer_ip_add_record(trans, record); 1022 } 1023 hammer_done_cursor(&cursor); 1024 if (error == EDEADLK) 1025 goto retry; 1026 hammer_rel_inode(ip, 0); 1027 return(error); 1028 } 1029 1030 /* 1031 * Create a root directory for a PFS if one does not alredy exist. 1032 * 1033 * The PFS root stands alone so we must also bump the nlinks count 1034 * to prevent it from being destroyed on release. 1035 */ 1036 int 1037 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred, 1038 hammer_pseudofs_inmem_t pfsm) 1039 { 1040 hammer_inode_t ip; 1041 struct vattr vap; 1042 int error; 1043 1044 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID, 1045 pfsm->localization, 0, &error); 1046 if (ip == NULL) { 1047 vattr_null(&vap); 1048 vap.va_mode = 0755; 1049 vap.va_type = VDIR; 1050 error = hammer_create_inode(trans, &vap, cred, 1051 NULL, NULL, 0, 1052 pfsm, &ip); 1053 if (error == 0) { 1054 ++ip->ino_data.nlinks; 1055 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY); 1056 } 1057 } 1058 if (ip) 1059 hammer_rel_inode(ip, 0); 1060 return(error); 1061 } 1062 1063 /* 1064 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY 1065 * if we are unable to disassociate all the inodes. 1066 */ 1067 static 1068 int 1069 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data) 1070 { 1071 int res; 1072 1073 hammer_ref(&ip->lock); 1074 if (ip->lock.refs == 2 && ip->vp) 1075 vclean_unlocked(ip->vp); 1076 if (ip->lock.refs == 1 && ip->vp == NULL) 1077 res = 0; 1078 else 1079 res = -1; /* stop, someone is using the inode */ 1080 hammer_rel_inode(ip, 0); 1081 return(res); 1082 } 1083 1084 int 1085 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization) 1086 { 1087 int res; 1088 int try; 1089 1090 for (try = res = 0; try < 4; ++try) { 1091 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root, 1092 hammer_inode_pfs_cmp, 1093 hammer_unload_pseudofs_callback, 1094 &localization); 1095 if (res == 0 && try > 1) 1096 break; 1097 hammer_flusher_sync(trans->hmp); 1098 } 1099 if (res != 0) 1100 res = ENOTEMPTY; 1101 return(res); 1102 } 1103 1104 1105 /* 1106 * Release a reference on a PFS 1107 */ 1108 void 1109 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm) 1110 { 1111 hammer_unref(&pfsm->lock); 1112 if (pfsm->lock.refs == 0) { 1113 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm); 1114 kfree(pfsm, hmp->m_misc); 1115 } 1116 } 1117 1118 /* 1119 * Called by hammer_sync_inode(). 1120 */ 1121 static int 1122 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip) 1123 { 1124 hammer_transaction_t trans = cursor->trans; 1125 hammer_record_t record; 1126 int error; 1127 int redirty; 1128 1129 retry: 1130 error = 0; 1131 1132 /* 1133 * If the inode has a presence on-disk then locate it and mark 1134 * it deleted, setting DELONDISK. 1135 * 1136 * The record may or may not be physically deleted, depending on 1137 * the retention policy. 1138 */ 1139 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) == 1140 HAMMER_INODE_ONDISK) { 1141 hammer_normalize_cursor(cursor); 1142 cursor->key_beg.localization = ip->obj_localization + 1143 HAMMER_LOCALIZE_INODE; 1144 cursor->key_beg.obj_id = ip->obj_id; 1145 cursor->key_beg.key = 0; 1146 cursor->key_beg.create_tid = 0; 1147 cursor->key_beg.delete_tid = 0; 1148 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE; 1149 cursor->key_beg.obj_type = 0; 1150 cursor->asof = ip->obj_asof; 1151 cursor->flags &= ~HAMMER_CURSOR_INITMASK; 1152 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF; 1153 cursor->flags |= HAMMER_CURSOR_BACKEND; 1154 1155 error = hammer_btree_lookup(cursor); 1156 if (hammer_debug_inode) 1157 kprintf("IPDEL %p %08x %d", ip, ip->flags, error); 1158 1159 if (error == 0) { 1160 error = hammer_ip_delete_record(cursor, ip, trans->tid); 1161 if (hammer_debug_inode) 1162 kprintf(" error %d\n", error); 1163 if (error == 0) { 1164 ip->flags |= HAMMER_INODE_DELONDISK; 1165 } 1166 if (cursor->node) 1167 hammer_cache_node(&ip->cache[0], cursor->node); 1168 } 1169 if (error == EDEADLK) { 1170 hammer_done_cursor(cursor); 1171 error = hammer_init_cursor(trans, cursor, 1172 &ip->cache[0], ip); 1173 if (hammer_debug_inode) 1174 kprintf("IPDED %p %d\n", ip, error); 1175 if (error == 0) 1176 goto retry; 1177 } 1178 } 1179 1180 /* 1181 * Ok, write out the initial record or a new record (after deleting 1182 * the old one), unless the DELETED flag is set. This routine will 1183 * clear DELONDISK if it writes out a record. 1184 * 1185 * Update our inode statistics if this is the first application of 1186 * the inode on-disk. 1187 */ 1188 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) { 1189 /* 1190 * Generate a record and write it to the media. We clean-up 1191 * the state before releasing so we do not have to set-up 1192 * a flush_group. 1193 */ 1194 record = hammer_alloc_mem_record(ip, 0); 1195 record->type = HAMMER_MEM_RECORD_INODE; 1196 record->flush_state = HAMMER_FST_FLUSH; 1197 record->leaf = ip->sync_ino_leaf; 1198 record->leaf.base.create_tid = trans->tid; 1199 record->leaf.data_len = sizeof(ip->sync_ino_data); 1200 record->leaf.create_ts = trans->time32; 1201 record->data = (void *)&ip->sync_ino_data; 1202 record->flags |= HAMMER_RECF_INTERLOCK_BE; 1203 1204 /* 1205 * If this flag is set we cannot sync the new file size 1206 * because we haven't finished related truncations. The 1207 * inode will be flushed in another flush group to finish 1208 * the job. 1209 */ 1210 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) && 1211 ip->sync_ino_data.size != ip->ino_data.size) { 1212 redirty = 1; 1213 ip->sync_ino_data.size = ip->ino_data.size; 1214 } else { 1215 redirty = 0; 1216 } 1217 1218 for (;;) { 1219 error = hammer_ip_sync_record_cursor(cursor, record); 1220 if (hammer_debug_inode) 1221 kprintf("GENREC %p rec %08x %d\n", 1222 ip, record->flags, error); 1223 if (error != EDEADLK) 1224 break; 1225 hammer_done_cursor(cursor); 1226 error = hammer_init_cursor(trans, cursor, 1227 &ip->cache[0], ip); 1228 if (hammer_debug_inode) 1229 kprintf("GENREC reinit %d\n", error); 1230 if (error) 1231 break; 1232 } 1233 1234 /* 1235 * Note: The record was never on the inode's record tree 1236 * so just wave our hands importantly and destroy it. 1237 */ 1238 record->flags |= HAMMER_RECF_COMMITTED; 1239 record->flags &= ~HAMMER_RECF_INTERLOCK_BE; 1240 record->flush_state = HAMMER_FST_IDLE; 1241 ++ip->rec_generation; 1242 hammer_rel_mem_record(record); 1243 1244 /* 1245 * Finish up. 1246 */ 1247 if (error == 0) { 1248 if (hammer_debug_inode) 1249 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags); 1250 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | 1251 HAMMER_INODE_ATIME | 1252 HAMMER_INODE_MTIME); 1253 ip->flags &= ~HAMMER_INODE_DELONDISK; 1254 if (redirty) 1255 ip->sync_flags |= HAMMER_INODE_DDIRTY; 1256 1257 /* 1258 * Root volume count of inodes 1259 */ 1260 hammer_sync_lock_sh(trans); 1261 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) { 1262 hammer_modify_volume_field(trans, 1263 trans->rootvol, 1264 vol0_stat_inodes); 1265 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes; 1266 hammer_modify_volume_done(trans->rootvol); 1267 ip->flags |= HAMMER_INODE_ONDISK; 1268 if (hammer_debug_inode) 1269 kprintf("NOWONDISK %p\n", ip); 1270 } 1271 hammer_sync_unlock(trans); 1272 } 1273 } 1274 1275 /* 1276 * If the inode has been destroyed, clean out any left-over flags 1277 * that may have been set by the frontend. 1278 */ 1279 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 1280 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | 1281 HAMMER_INODE_ATIME | 1282 HAMMER_INODE_MTIME); 1283 } 1284 return(error); 1285 } 1286 1287 /* 1288 * Update only the itimes fields. 1289 * 1290 * ATIME can be updated without generating any UNDO. MTIME is updated 1291 * with UNDO so it is guaranteed to be synchronized properly in case of 1292 * a crash. 1293 * 1294 * Neither field is included in the B-Tree leaf element's CRC, which is how 1295 * we can get away with updating ATIME the way we do. 1296 */ 1297 static int 1298 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip) 1299 { 1300 hammer_transaction_t trans = cursor->trans; 1301 int error; 1302 1303 retry: 1304 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) != 1305 HAMMER_INODE_ONDISK) { 1306 return(0); 1307 } 1308 1309 hammer_normalize_cursor(cursor); 1310 cursor->key_beg.localization = ip->obj_localization + 1311 HAMMER_LOCALIZE_INODE; 1312 cursor->key_beg.obj_id = ip->obj_id; 1313 cursor->key_beg.key = 0; 1314 cursor->key_beg.create_tid = 0; 1315 cursor->key_beg.delete_tid = 0; 1316 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE; 1317 cursor->key_beg.obj_type = 0; 1318 cursor->asof = ip->obj_asof; 1319 cursor->flags &= ~HAMMER_CURSOR_INITMASK; 1320 cursor->flags |= HAMMER_CURSOR_ASOF; 1321 cursor->flags |= HAMMER_CURSOR_GET_LEAF; 1322 cursor->flags |= HAMMER_CURSOR_GET_DATA; 1323 cursor->flags |= HAMMER_CURSOR_BACKEND; 1324 1325 error = hammer_btree_lookup(cursor); 1326 if (error == 0) { 1327 hammer_cache_node(&ip->cache[0], cursor->node); 1328 if (ip->sync_flags & HAMMER_INODE_MTIME) { 1329 /* 1330 * Updating MTIME requires an UNDO. Just cover 1331 * both atime and mtime. 1332 */ 1333 hammer_sync_lock_sh(trans); 1334 hammer_modify_buffer(trans, cursor->data_buffer, 1335 HAMMER_ITIMES_BASE(&cursor->data->inode), 1336 HAMMER_ITIMES_BYTES); 1337 cursor->data->inode.atime = ip->sync_ino_data.atime; 1338 cursor->data->inode.mtime = ip->sync_ino_data.mtime; 1339 hammer_modify_buffer_done(cursor->data_buffer); 1340 hammer_sync_unlock(trans); 1341 } else if (ip->sync_flags & HAMMER_INODE_ATIME) { 1342 /* 1343 * Updating atime only can be done in-place with 1344 * no UNDO. 1345 */ 1346 hammer_sync_lock_sh(trans); 1347 hammer_modify_buffer(trans, cursor->data_buffer, 1348 NULL, 0); 1349 cursor->data->inode.atime = ip->sync_ino_data.atime; 1350 hammer_modify_buffer_done(cursor->data_buffer); 1351 hammer_sync_unlock(trans); 1352 } 1353 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME); 1354 } 1355 if (error == EDEADLK) { 1356 hammer_done_cursor(cursor); 1357 error = hammer_init_cursor(trans, cursor, 1358 &ip->cache[0], ip); 1359 if (error == 0) 1360 goto retry; 1361 } 1362 return(error); 1363 } 1364 1365 /* 1366 * Release a reference on an inode, flush as requested. 1367 * 1368 * On the last reference we queue the inode to the flusher for its final 1369 * disposition. 1370 */ 1371 void 1372 hammer_rel_inode(struct hammer_inode *ip, int flush) 1373 { 1374 /*hammer_mount_t hmp = ip->hmp;*/ 1375 1376 /* 1377 * Handle disposition when dropping the last ref. 1378 */ 1379 for (;;) { 1380 if (ip->lock.refs == 1) { 1381 /* 1382 * Determine whether on-disk action is needed for 1383 * the inode's final disposition. 1384 */ 1385 KKASSERT(ip->vp == NULL); 1386 hammer_inode_unloadable_check(ip, 0); 1387 if (ip->flags & HAMMER_INODE_MODMASK) { 1388 hammer_flush_inode(ip, 0); 1389 } else if (ip->lock.refs == 1) { 1390 hammer_unload_inode(ip); 1391 break; 1392 } 1393 } else { 1394 if (flush) 1395 hammer_flush_inode(ip, 0); 1396 1397 /* 1398 * The inode still has multiple refs, try to drop 1399 * one ref. 1400 */ 1401 KKASSERT(ip->lock.refs >= 1); 1402 if (ip->lock.refs > 1) { 1403 hammer_unref(&ip->lock); 1404 break; 1405 } 1406 } 1407 } 1408 } 1409 1410 /* 1411 * Unload and destroy the specified inode. Must be called with one remaining 1412 * reference. The reference is disposed of. 1413 * 1414 * The inode must be completely clean. 1415 */ 1416 static int 1417 hammer_unload_inode(struct hammer_inode *ip) 1418 { 1419 hammer_mount_t hmp = ip->hmp; 1420 1421 KASSERT(ip->lock.refs == 1, 1422 ("hammer_unload_inode: %d refs\n", ip->lock.refs)); 1423 KKASSERT(ip->vp == NULL); 1424 KKASSERT(ip->flush_state == HAMMER_FST_IDLE); 1425 KKASSERT(ip->cursor_ip_refs == 0); 1426 KKASSERT(ip->lock.lockcount == 0); 1427 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0); 1428 1429 KKASSERT(RB_EMPTY(&ip->rec_tree)); 1430 KKASSERT(TAILQ_EMPTY(&ip->target_list)); 1431 1432 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip); 1433 1434 hammer_free_inode(ip); 1435 return(0); 1436 } 1437 1438 /* 1439 * Called during unmounting if a critical error occured. The in-memory 1440 * inode and all related structures are destroyed. 1441 * 1442 * If a critical error did not occur the unmount code calls the standard 1443 * release and asserts that the inode is gone. 1444 */ 1445 int 1446 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused) 1447 { 1448 hammer_record_t rec; 1449 1450 /* 1451 * Get rid of the inodes in-memory records, regardless of their 1452 * state, and clear the mod-mask. 1453 */ 1454 while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) { 1455 TAILQ_REMOVE(&ip->target_list, rec, target_entry); 1456 rec->target_ip = NULL; 1457 if (rec->flush_state == HAMMER_FST_SETUP) 1458 rec->flush_state = HAMMER_FST_IDLE; 1459 } 1460 while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) { 1461 if (rec->flush_state == HAMMER_FST_FLUSH) 1462 --rec->flush_group->refs; 1463 else 1464 hammer_ref(&rec->lock); 1465 KKASSERT(rec->lock.refs == 1); 1466 rec->flush_state = HAMMER_FST_IDLE; 1467 rec->flush_group = NULL; 1468 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */ 1469 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */ 1470 ++ip->rec_generation; 1471 hammer_rel_mem_record(rec); 1472 } 1473 ip->flags &= ~HAMMER_INODE_MODMASK; 1474 ip->sync_flags &= ~HAMMER_INODE_MODMASK; 1475 KKASSERT(ip->vp == NULL); 1476 1477 /* 1478 * Remove the inode from any flush group, force it idle. FLUSH 1479 * and SETUP states have an inode ref. 1480 */ 1481 switch(ip->flush_state) { 1482 case HAMMER_FST_FLUSH: 1483 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry); 1484 --ip->flush_group->refs; 1485 ip->flush_group = NULL; 1486 /* fall through */ 1487 case HAMMER_FST_SETUP: 1488 hammer_unref(&ip->lock); 1489 ip->flush_state = HAMMER_FST_IDLE; 1490 /* fall through */ 1491 case HAMMER_FST_IDLE: 1492 break; 1493 } 1494 1495 /* 1496 * There shouldn't be any associated vnode. The unload needs at 1497 * least one ref, if we do have a vp steal its ip ref. 1498 */ 1499 if (ip->vp) { 1500 kprintf("hammer_destroy_inode_callback: Unexpected " 1501 "vnode association ip %p vp %p\n", ip, ip->vp); 1502 ip->vp->v_data = NULL; 1503 ip->vp = NULL; 1504 } else { 1505 hammer_ref(&ip->lock); 1506 } 1507 hammer_unload_inode(ip); 1508 return(0); 1509 } 1510 1511 /* 1512 * Called on mount -u when switching from RW to RO or vise-versa. Adjust 1513 * the read-only flag for cached inodes. 1514 * 1515 * This routine is called from a RB_SCAN(). 1516 */ 1517 int 1518 hammer_reload_inode(hammer_inode_t ip, void *arg __unused) 1519 { 1520 hammer_mount_t hmp = ip->hmp; 1521 1522 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID) 1523 ip->flags |= HAMMER_INODE_RO; 1524 else 1525 ip->flags &= ~HAMMER_INODE_RO; 1526 return(0); 1527 } 1528 1529 /* 1530 * A transaction has modified an inode, requiring updates as specified by 1531 * the passed flags. 1532 * 1533 * HAMMER_INODE_DDIRTY: Inode data has been updated 1534 * HAMMER_INODE_XDIRTY: Dirty in-memory records 1535 * HAMMER_INODE_BUFS: Dirty buffer cache buffers 1536 * HAMMER_INODE_DELETED: Inode record/data must be deleted 1537 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated 1538 */ 1539 void 1540 hammer_modify_inode(hammer_inode_t ip, int flags) 1541 { 1542 /* 1543 * ronly of 0 or 2 does not trigger assertion. 1544 * 2 is a special error state 1545 */ 1546 KKASSERT(ip->hmp->ronly != 1 || 1547 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 1548 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED | 1549 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0); 1550 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) { 1551 ip->flags |= HAMMER_INODE_RSV_INODES; 1552 ++ip->hmp->rsv_inodes; 1553 } 1554 1555 ip->flags |= flags; 1556 } 1557 1558 /* 1559 * Request that an inode be flushed. This whole mess cannot block and may 1560 * recurse (if not synchronous). Once requested HAMMER will attempt to 1561 * actively flush the inode until the flush can be done. 1562 * 1563 * The inode may already be flushing, or may be in a setup state. We can 1564 * place the inode in a flushing state if it is currently idle and flag it 1565 * to reflush if it is currently flushing. 1566 * 1567 * Upon return if the inode could not be flushed due to a setup 1568 * dependancy, then it will be automatically flushed when the dependancy 1569 * is satisfied. 1570 */ 1571 void 1572 hammer_flush_inode(hammer_inode_t ip, int flags) 1573 { 1574 hammer_mount_t hmp; 1575 hammer_flush_group_t flg; 1576 int good; 1577 1578 /* 1579 * next_flush_group is the first flush group we can place the inode 1580 * in. It may be NULL. If it becomes full we append a new flush 1581 * group and make that the next_flush_group. 1582 */ 1583 hmp = ip->hmp; 1584 while ((flg = hmp->next_flush_group) != NULL) { 1585 KKASSERT(flg->running == 0); 1586 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit) 1587 break; 1588 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry); 1589 hammer_flusher_async(ip->hmp, flg); 1590 } 1591 if (flg == NULL) { 1592 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO); 1593 hmp->next_flush_group = flg; 1594 TAILQ_INIT(&flg->flush_list); 1595 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry); 1596 } 1597 1598 /* 1599 * Trivial 'nothing to flush' case. If the inode is in a SETUP 1600 * state we have to put it back into an IDLE state so we can 1601 * drop the extra ref. 1602 * 1603 * If we have a parent dependancy we must still fall through 1604 * so we can run it. 1605 */ 1606 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) { 1607 if (ip->flush_state == HAMMER_FST_SETUP && 1608 TAILQ_EMPTY(&ip->target_list)) { 1609 ip->flush_state = HAMMER_FST_IDLE; 1610 hammer_rel_inode(ip, 0); 1611 } 1612 if (ip->flush_state == HAMMER_FST_IDLE) 1613 return; 1614 } 1615 1616 /* 1617 * Our flush action will depend on the current state. 1618 */ 1619 switch(ip->flush_state) { 1620 case HAMMER_FST_IDLE: 1621 /* 1622 * We have no dependancies and can flush immediately. Some 1623 * our children may not be flushable so we have to re-test 1624 * with that additional knowledge. 1625 */ 1626 hammer_flush_inode_core(ip, flg, flags); 1627 break; 1628 case HAMMER_FST_SETUP: 1629 /* 1630 * Recurse upwards through dependancies via target_list 1631 * and start their flusher actions going if possible. 1632 * 1633 * 'good' is our connectivity. -1 means we have none and 1634 * can't flush, 0 means there weren't any dependancies, and 1635 * 1 means we have good connectivity. 1636 */ 1637 good = hammer_setup_parent_inodes(ip, 0, flg); 1638 1639 if (good >= 0) { 1640 /* 1641 * We can continue if good >= 0. Determine how 1642 * many records under our inode can be flushed (and 1643 * mark them). 1644 */ 1645 hammer_flush_inode_core(ip, flg, flags); 1646 } else { 1647 /* 1648 * Parent has no connectivity, tell it to flush 1649 * us as soon as it does. 1650 * 1651 * The REFLUSH flag is also needed to trigger 1652 * dependancy wakeups. 1653 */ 1654 ip->flags |= HAMMER_INODE_CONN_DOWN | 1655 HAMMER_INODE_REFLUSH; 1656 if (flags & HAMMER_FLUSH_SIGNAL) { 1657 ip->flags |= HAMMER_INODE_RESIGNAL; 1658 hammer_flusher_async(ip->hmp, flg); 1659 } 1660 } 1661 break; 1662 case HAMMER_FST_FLUSH: 1663 /* 1664 * We are already flushing, flag the inode to reflush 1665 * if needed after it completes its current flush. 1666 * 1667 * The REFLUSH flag is also needed to trigger 1668 * dependancy wakeups. 1669 */ 1670 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0) 1671 ip->flags |= HAMMER_INODE_REFLUSH; 1672 if (flags & HAMMER_FLUSH_SIGNAL) { 1673 ip->flags |= HAMMER_INODE_RESIGNAL; 1674 hammer_flusher_async(ip->hmp, flg); 1675 } 1676 break; 1677 } 1678 } 1679 1680 /* 1681 * Scan ip->target_list, which is a list of records owned by PARENTS to our 1682 * ip which reference our ip. 1683 * 1684 * XXX This is a huge mess of recursive code, but not one bit of it blocks 1685 * so for now do not ref/deref the structures. Note that if we use the 1686 * ref/rel code later, the rel CAN block. 1687 */ 1688 static int 1689 hammer_setup_parent_inodes(hammer_inode_t ip, int depth, 1690 hammer_flush_group_t flg) 1691 { 1692 hammer_record_t depend; 1693 int good; 1694 int r; 1695 1696 /* 1697 * If we hit our recursion limit and we have parent dependencies 1698 * We cannot continue. Returning < 0 will cause us to be flagged 1699 * for reflush. Returning -2 cuts off additional dependency checks 1700 * because they are likely to also hit the depth limit. 1701 * 1702 * We cannot return < 0 if there are no dependencies or there might 1703 * not be anything to wakeup (ip). 1704 */ 1705 if (depth == 20 && TAILQ_FIRST(&ip->target_list)) { 1706 kprintf("HAMMER Warning: depth limit reached on " 1707 "setup recursion, inode %p %016llx\n", 1708 ip, (long long)ip->obj_id); 1709 return(-2); 1710 } 1711 1712 /* 1713 * Scan dependencies 1714 */ 1715 good = 0; 1716 TAILQ_FOREACH(depend, &ip->target_list, target_entry) { 1717 r = hammer_setup_parent_inodes_helper(depend, depth, flg); 1718 KKASSERT(depend->target_ip == ip); 1719 if (r < 0 && good == 0) 1720 good = -1; 1721 if (r > 0) 1722 good = 1; 1723 1724 /* 1725 * If we failed due to the recursion depth limit then stop 1726 * now. 1727 */ 1728 if (r == -2) 1729 break; 1730 } 1731 return(good); 1732 } 1733 1734 /* 1735 * This helper function takes a record representing the dependancy between 1736 * the parent inode and child inode. 1737 * 1738 * record->ip = parent inode 1739 * record->target_ip = child inode 1740 * 1741 * We are asked to recurse upwards and convert the record from SETUP 1742 * to FLUSH if possible. 1743 * 1744 * Return 1 if the record gives us connectivity 1745 * 1746 * Return 0 if the record is not relevant 1747 * 1748 * Return -1 if we can't resolve the dependancy and there is no connectivity. 1749 */ 1750 static int 1751 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth, 1752 hammer_flush_group_t flg) 1753 { 1754 hammer_mount_t hmp; 1755 hammer_inode_t pip; 1756 int good; 1757 1758 KKASSERT(record->flush_state != HAMMER_FST_IDLE); 1759 pip = record->ip; 1760 hmp = pip->hmp; 1761 1762 /* 1763 * If the record is already flushing, is it in our flush group? 1764 * 1765 * If it is in our flush group but it is a general record or a 1766 * delete-on-disk, it does not improve our connectivity (return 0), 1767 * and if the target inode is not trying to destroy itself we can't 1768 * allow the operation yet anyway (the second return -1). 1769 */ 1770 if (record->flush_state == HAMMER_FST_FLUSH) { 1771 /* 1772 * If not in our flush group ask the parent to reflush 1773 * us as soon as possible. 1774 */ 1775 if (record->flush_group != flg) { 1776 pip->flags |= HAMMER_INODE_REFLUSH; 1777 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 1778 return(-1); 1779 } 1780 1781 /* 1782 * If in our flush group everything is already set up, 1783 * just return whether the record will improve our 1784 * visibility or not. 1785 */ 1786 if (record->type == HAMMER_MEM_RECORD_ADD) 1787 return(1); 1788 return(0); 1789 } 1790 1791 /* 1792 * It must be a setup record. Try to resolve the setup dependancies 1793 * by recursing upwards so we can place ip on the flush list. 1794 * 1795 * Limit ourselves to 20 levels of recursion to avoid blowing out 1796 * the kernel stack. If we hit the recursion limit we can't flush 1797 * until the parent flushes. The parent will flush independantly 1798 * on its own and ultimately a deep recursion will be resolved. 1799 */ 1800 KKASSERT(record->flush_state == HAMMER_FST_SETUP); 1801 1802 good = hammer_setup_parent_inodes(pip, depth + 1, flg); 1803 1804 /* 1805 * If good < 0 the parent has no connectivity and we cannot safely 1806 * flush the directory entry, which also means we can't flush our 1807 * ip. Flag us for downward recursion once the parent's 1808 * connectivity is resolved. Flag the parent for [re]flush or it 1809 * may not check for downward recursions. 1810 */ 1811 if (good < 0) { 1812 pip->flags |= HAMMER_INODE_REFLUSH; 1813 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 1814 return(good); 1815 } 1816 1817 /* 1818 * We are go, place the parent inode in a flushing state so we can 1819 * place its record in a flushing state. Note that the parent 1820 * may already be flushing. The record must be in the same flush 1821 * group as the parent. 1822 */ 1823 if (pip->flush_state != HAMMER_FST_FLUSH) 1824 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION); 1825 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH); 1826 KKASSERT(record->flush_state == HAMMER_FST_SETUP); 1827 1828 #if 0 1829 if (record->type == HAMMER_MEM_RECORD_DEL && 1830 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) { 1831 /* 1832 * Regardless of flushing state we cannot sync this path if the 1833 * record represents a delete-on-disk but the target inode 1834 * is not ready to sync its own deletion. 1835 * 1836 * XXX need to count effective nlinks to determine whether 1837 * the flush is ok, otherwise removing a hardlink will 1838 * just leave the DEL record to rot. 1839 */ 1840 record->target_ip->flags |= HAMMER_INODE_REFLUSH; 1841 return(-1); 1842 } else 1843 #endif 1844 if (pip->flush_group == flg) { 1845 /* 1846 * Because we have not calculated nlinks yet we can just 1847 * set records to the flush state if the parent is in 1848 * the same flush group as we are. 1849 */ 1850 record->flush_state = HAMMER_FST_FLUSH; 1851 record->flush_group = flg; 1852 ++record->flush_group->refs; 1853 hammer_ref(&record->lock); 1854 1855 /* 1856 * A general directory-add contributes to our visibility. 1857 * 1858 * Otherwise it is probably a directory-delete or 1859 * delete-on-disk record and does not contribute to our 1860 * visbility (but we can still flush it). 1861 */ 1862 if (record->type == HAMMER_MEM_RECORD_ADD) 1863 return(1); 1864 return(0); 1865 } else { 1866 /* 1867 * If the parent is not in our flush group we cannot 1868 * flush this record yet, there is no visibility. 1869 * We tell the parent to reflush and mark ourselves 1870 * so the parent knows it should flush us too. 1871 */ 1872 pip->flags |= HAMMER_INODE_REFLUSH; 1873 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN; 1874 return(-1); 1875 } 1876 } 1877 1878 /* 1879 * This is the core routine placing an inode into the FST_FLUSH state. 1880 */ 1881 static void 1882 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags) 1883 { 1884 int go_count; 1885 1886 /* 1887 * Set flush state and prevent the flusher from cycling into 1888 * the next flush group. Do not place the ip on the list yet. 1889 * Inodes not in the idle state get an extra reference. 1890 */ 1891 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH); 1892 if (ip->flush_state == HAMMER_FST_IDLE) 1893 hammer_ref(&ip->lock); 1894 ip->flush_state = HAMMER_FST_FLUSH; 1895 ip->flush_group = flg; 1896 ++ip->hmp->flusher.group_lock; 1897 ++ip->hmp->count_iqueued; 1898 ++hammer_count_iqueued; 1899 ++flg->total_count; 1900 1901 /* 1902 * If the flush group reaches the autoflush limit we want to signal 1903 * the flusher. This is particularly important for remove()s. 1904 */ 1905 if (flg->total_count == hammer_autoflush) 1906 flags |= HAMMER_FLUSH_SIGNAL; 1907 1908 /* 1909 * We need to be able to vfsync/truncate from the backend. 1910 */ 1911 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0); 1912 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) { 1913 ip->flags |= HAMMER_INODE_VHELD; 1914 vref(ip->vp); 1915 } 1916 1917 /* 1918 * Figure out how many in-memory records we can actually flush 1919 * (not including inode meta-data, buffers, etc). 1920 */ 1921 KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0); 1922 if (flags & HAMMER_FLUSH_RECURSION) { 1923 /* 1924 * If this is a upwards recursion we do not want to 1925 * recurse down again! 1926 */ 1927 go_count = 1; 1928 #if 0 1929 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) { 1930 /* 1931 * No new records are added if we must complete a flush 1932 * from a previous cycle, but we do have to move the records 1933 * from the previous cycle to the current one. 1934 */ 1935 #if 0 1936 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 1937 hammer_syncgrp_child_callback, NULL); 1938 #endif 1939 go_count = 1; 1940 #endif 1941 } else { 1942 /* 1943 * Normal flush, scan records and bring them into the flush. 1944 * Directory adds and deletes are usually skipped (they are 1945 * grouped with the related inode rather then with the 1946 * directory). 1947 * 1948 * go_count can be negative, which means the scan aborted 1949 * due to the flush group being over-full and we should 1950 * flush what we have. 1951 */ 1952 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 1953 hammer_setup_child_callback, NULL); 1954 } 1955 1956 /* 1957 * This is a more involved test that includes go_count. If we 1958 * can't flush, flag the inode and return. If go_count is 0 we 1959 * were are unable to flush any records in our rec_tree and 1960 * must ignore the XDIRTY flag. 1961 */ 1962 if (go_count == 0) { 1963 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) { 1964 --ip->hmp->count_iqueued; 1965 --hammer_count_iqueued; 1966 1967 --flg->total_count; 1968 ip->flush_state = HAMMER_FST_SETUP; 1969 ip->flush_group = NULL; 1970 if (ip->flags & HAMMER_INODE_VHELD) { 1971 ip->flags &= ~HAMMER_INODE_VHELD; 1972 vrele(ip->vp); 1973 } 1974 1975 /* 1976 * REFLUSH is needed to trigger dependancy wakeups 1977 * when an inode is in SETUP. 1978 */ 1979 ip->flags |= HAMMER_INODE_REFLUSH; 1980 if (flags & HAMMER_FLUSH_SIGNAL) { 1981 ip->flags |= HAMMER_INODE_RESIGNAL; 1982 hammer_flusher_async(ip->hmp, flg); 1983 } 1984 if (--ip->hmp->flusher.group_lock == 0) 1985 wakeup(&ip->hmp->flusher.group_lock); 1986 return; 1987 } 1988 } 1989 1990 /* 1991 * Snapshot the state of the inode for the backend flusher. 1992 * 1993 * We continue to retain save_trunc_off even when all truncations 1994 * have been resolved as an optimization to determine if we can 1995 * skip the B-Tree lookup for overwrite deletions. 1996 * 1997 * NOTE: The DELETING flag is a mod flag, but it is also sticky, 1998 * and stays in ip->flags. Once set, it stays set until the 1999 * inode is destroyed. 2000 */ 2001 if (ip->flags & HAMMER_INODE_TRUNCATED) { 2002 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0); 2003 ip->sync_trunc_off = ip->trunc_off; 2004 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL; 2005 ip->flags &= ~HAMMER_INODE_TRUNCATED; 2006 ip->sync_flags |= HAMMER_INODE_TRUNCATED; 2007 2008 /* 2009 * The save_trunc_off used to cache whether the B-Tree 2010 * holds any records past that point is not used until 2011 * after the truncation has succeeded, so we can safely 2012 * set it now. 2013 */ 2014 if (ip->save_trunc_off > ip->sync_trunc_off) 2015 ip->save_trunc_off = ip->sync_trunc_off; 2016 } 2017 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK & 2018 ~HAMMER_INODE_TRUNCATED); 2019 ip->sync_ino_leaf = ip->ino_leaf; 2020 ip->sync_ino_data = ip->ino_data; 2021 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED; 2022 #ifdef DEBUG_TRUNCATE 2023 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp) 2024 kprintf("truncateS %016llx\n", ip->sync_trunc_off); 2025 #endif 2026 2027 /* 2028 * The flusher list inherits our inode and reference. 2029 */ 2030 KKASSERT(flg->running == 0); 2031 TAILQ_INSERT_TAIL(&flg->flush_list, ip, flush_entry); 2032 if (--ip->hmp->flusher.group_lock == 0) 2033 wakeup(&ip->hmp->flusher.group_lock); 2034 2035 if (flags & HAMMER_FLUSH_SIGNAL) { 2036 hammer_flusher_async(ip->hmp, flg); 2037 } 2038 } 2039 2040 /* 2041 * Callback for scan of ip->rec_tree. Try to include each record in our 2042 * flush. ip->flush_group has been set but the inode has not yet been 2043 * moved into a flushing state. 2044 * 2045 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on 2046 * both inodes. 2047 * 2048 * We return 1 for any record placed or found in FST_FLUSH, which prevents 2049 * the caller from shortcutting the flush. 2050 */ 2051 static int 2052 hammer_setup_child_callback(hammer_record_t rec, void *data) 2053 { 2054 hammer_flush_group_t flg; 2055 hammer_inode_t target_ip; 2056 hammer_inode_t ip; 2057 int r; 2058 2059 /* 2060 * Records deleted or committed by the backend are ignored. 2061 * Note that the flush detects deleted frontend records at 2062 * multiple points to deal with races. This is just the first 2063 * line of defense. The only time HAMMER_RECF_DELETED_FE cannot 2064 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it 2065 * messes up link-count calculations. 2066 * 2067 * NOTE: Don't get confused between record deletion and, say, 2068 * directory entry deletion. The deletion of a directory entry 2069 * which is on-media has nothing to do with the record deletion 2070 * flags. 2071 */ 2072 if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE | 2073 HAMMER_RECF_COMMITTED)) { 2074 if (rec->flush_state == HAMMER_FST_FLUSH) { 2075 KKASSERT(rec->flush_group == rec->ip->flush_group); 2076 r = 1; 2077 } else { 2078 r = 0; 2079 } 2080 return(r); 2081 } 2082 2083 /* 2084 * If the record is in an idle state it has no dependancies and 2085 * can be flushed. 2086 */ 2087 ip = rec->ip; 2088 flg = ip->flush_group; 2089 r = 0; 2090 2091 switch(rec->flush_state) { 2092 case HAMMER_FST_IDLE: 2093 /* 2094 * The record has no setup dependancy, we can flush it. 2095 */ 2096 KKASSERT(rec->target_ip == NULL); 2097 rec->flush_state = HAMMER_FST_FLUSH; 2098 rec->flush_group = flg; 2099 ++flg->refs; 2100 hammer_ref(&rec->lock); 2101 r = 1; 2102 break; 2103 case HAMMER_FST_SETUP: 2104 /* 2105 * The record has a setup dependancy. These are typically 2106 * directory entry adds and deletes. Such entries will be 2107 * flushed when their inodes are flushed so we do not 2108 * usually have to add them to the flush here. However, 2109 * if the target_ip has set HAMMER_INODE_CONN_DOWN then 2110 * it is asking us to flush this record (and it). 2111 */ 2112 target_ip = rec->target_ip; 2113 KKASSERT(target_ip != NULL); 2114 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE); 2115 2116 /* 2117 * If the target IP is already flushing in our group 2118 * we could associate the record, but target_ip has 2119 * already synced ino_data to sync_ino_data and we 2120 * would also have to adjust nlinks. Plus there are 2121 * ordering issues for adds and deletes. 2122 * 2123 * Reflush downward if this is an ADD, and upward if 2124 * this is a DEL. 2125 */ 2126 if (target_ip->flush_state == HAMMER_FST_FLUSH) { 2127 if (rec->flush_state == HAMMER_MEM_RECORD_ADD) 2128 ip->flags |= HAMMER_INODE_REFLUSH; 2129 else 2130 target_ip->flags |= HAMMER_INODE_REFLUSH; 2131 break; 2132 } 2133 2134 /* 2135 * Target IP is not yet flushing. This can get complex 2136 * because we have to be careful about the recursion. 2137 * 2138 * Directories create an issue for us in that if a flush 2139 * of a directory is requested the expectation is to flush 2140 * any pending directory entries, but this will cause the 2141 * related inodes to recursively flush as well. We can't 2142 * really defer the operation so just get as many as we 2143 * can and 2144 */ 2145 #if 0 2146 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 && 2147 (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) { 2148 /* 2149 * We aren't reclaiming and the target ip was not 2150 * previously prevented from flushing due to this 2151 * record dependancy. Do not flush this record. 2152 */ 2153 /*r = 0;*/ 2154 } else 2155 #endif 2156 if (flg->total_count + flg->refs > 2157 ip->hmp->undo_rec_limit) { 2158 /* 2159 * Our flush group is over-full and we risk blowing 2160 * out the UNDO FIFO. Stop the scan, flush what we 2161 * have, then reflush the directory. 2162 * 2163 * The directory may be forced through multiple 2164 * flush groups before it can be completely 2165 * flushed. 2166 */ 2167 ip->flags |= HAMMER_INODE_RESIGNAL | 2168 HAMMER_INODE_REFLUSH; 2169 r = -1; 2170 } else if (rec->type == HAMMER_MEM_RECORD_ADD) { 2171 /* 2172 * If the target IP is not flushing we can force 2173 * it to flush, even if it is unable to write out 2174 * any of its own records we have at least one in 2175 * hand that we CAN deal with. 2176 */ 2177 rec->flush_state = HAMMER_FST_FLUSH; 2178 rec->flush_group = flg; 2179 ++flg->refs; 2180 hammer_ref(&rec->lock); 2181 hammer_flush_inode_core(target_ip, flg, 2182 HAMMER_FLUSH_RECURSION); 2183 r = 1; 2184 } else { 2185 /* 2186 * General or delete-on-disk record. 2187 * 2188 * XXX this needs help. If a delete-on-disk we could 2189 * disconnect the target. If the target has its own 2190 * dependancies they really need to be flushed. 2191 * 2192 * XXX 2193 */ 2194 rec->flush_state = HAMMER_FST_FLUSH; 2195 rec->flush_group = flg; 2196 ++flg->refs; 2197 hammer_ref(&rec->lock); 2198 hammer_flush_inode_core(target_ip, flg, 2199 HAMMER_FLUSH_RECURSION); 2200 r = 1; 2201 } 2202 break; 2203 case HAMMER_FST_FLUSH: 2204 /* 2205 * The flush_group should already match. 2206 */ 2207 KKASSERT(rec->flush_group == flg); 2208 r = 1; 2209 break; 2210 } 2211 return(r); 2212 } 2213 2214 #if 0 2215 /* 2216 * This version just moves records already in a flush state to the new 2217 * flush group and that is it. 2218 */ 2219 static int 2220 hammer_syncgrp_child_callback(hammer_record_t rec, void *data) 2221 { 2222 hammer_inode_t ip = rec->ip; 2223 2224 switch(rec->flush_state) { 2225 case HAMMER_FST_FLUSH: 2226 KKASSERT(rec->flush_group == ip->flush_group); 2227 break; 2228 default: 2229 break; 2230 } 2231 return(0); 2232 } 2233 #endif 2234 2235 /* 2236 * Wait for a previously queued flush to complete. 2237 * 2238 * If a critical error occured we don't try to wait. 2239 */ 2240 void 2241 hammer_wait_inode(hammer_inode_t ip) 2242 { 2243 hammer_flush_group_t flg; 2244 2245 flg = NULL; 2246 if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) { 2247 while (ip->flush_state != HAMMER_FST_IDLE && 2248 (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) { 2249 if (ip->flush_state == HAMMER_FST_SETUP) 2250 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2251 if (ip->flush_state != HAMMER_FST_IDLE) { 2252 ip->flags |= HAMMER_INODE_FLUSHW; 2253 tsleep(&ip->flags, 0, "hmrwin", 0); 2254 } 2255 } 2256 } 2257 } 2258 2259 /* 2260 * Called by the backend code when a flush has been completed. 2261 * The inode has already been removed from the flush list. 2262 * 2263 * A pipelined flush can occur, in which case we must re-enter the 2264 * inode on the list and re-copy its fields. 2265 */ 2266 void 2267 hammer_flush_inode_done(hammer_inode_t ip, int error) 2268 { 2269 hammer_mount_t hmp; 2270 int dorel; 2271 2272 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH); 2273 2274 hmp = ip->hmp; 2275 2276 /* 2277 * Auto-reflush if the backend could not completely flush 2278 * the inode. This fixes a case where a deferred buffer flush 2279 * could cause fsync to return early. 2280 */ 2281 if (ip->sync_flags & HAMMER_INODE_MODMASK) 2282 ip->flags |= HAMMER_INODE_REFLUSH; 2283 2284 /* 2285 * Merge left-over flags back into the frontend and fix the state. 2286 * Incomplete truncations are retained by the backend. 2287 */ 2288 ip->error = error; 2289 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED; 2290 ip->sync_flags &= HAMMER_INODE_TRUNCATED; 2291 2292 /* 2293 * The backend may have adjusted nlinks, so if the adjusted nlinks 2294 * does not match the fronttend set the frontend's RDIRTY flag again. 2295 */ 2296 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks) 2297 ip->flags |= HAMMER_INODE_DDIRTY; 2298 2299 /* 2300 * Fix up the dirty buffer status. 2301 */ 2302 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) { 2303 ip->flags |= HAMMER_INODE_BUFS; 2304 } 2305 2306 /* 2307 * Re-set the XDIRTY flag if some of the inode's in-memory records 2308 * could not be flushed. 2309 */ 2310 KKASSERT((RB_EMPTY(&ip->rec_tree) && 2311 (ip->flags & HAMMER_INODE_XDIRTY) == 0) || 2312 (!RB_EMPTY(&ip->rec_tree) && 2313 (ip->flags & HAMMER_INODE_XDIRTY) != 0)); 2314 2315 /* 2316 * Do not lose track of inodes which no longer have vnode 2317 * assocations, otherwise they may never get flushed again. 2318 * 2319 * The reflush flag can be set superfluously, causing extra pain 2320 * for no reason. If the inode is no longer modified it no longer 2321 * needs to be flushed. 2322 */ 2323 if (ip->flags & HAMMER_INODE_MODMASK) { 2324 if (ip->vp == NULL) 2325 ip->flags |= HAMMER_INODE_REFLUSH; 2326 } else { 2327 ip->flags &= ~HAMMER_INODE_REFLUSH; 2328 } 2329 2330 /* 2331 * Adjust the flush state. 2332 */ 2333 if (ip->flags & HAMMER_INODE_WOULDBLOCK) { 2334 /* 2335 * We were unable to flush out all our records, leave the 2336 * inode in a flush state and in the current flush group. 2337 * The flush group will be re-run. 2338 * 2339 * This occurs if the UNDO block gets too full or there is 2340 * too much dirty meta-data and allows the flusher to 2341 * finalize the UNDO block and then re-flush. 2342 */ 2343 ip->flags &= ~HAMMER_INODE_WOULDBLOCK; 2344 dorel = 0; 2345 } else { 2346 /* 2347 * Remove from the flush_group 2348 */ 2349 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry); 2350 ip->flush_group = NULL; 2351 2352 /* 2353 * Clean up the vnode ref and tracking counts. 2354 */ 2355 if (ip->flags & HAMMER_INODE_VHELD) { 2356 ip->flags &= ~HAMMER_INODE_VHELD; 2357 vrele(ip->vp); 2358 } 2359 --hmp->count_iqueued; 2360 --hammer_count_iqueued; 2361 2362 /* 2363 * And adjust the state. 2364 */ 2365 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) { 2366 ip->flush_state = HAMMER_FST_IDLE; 2367 dorel = 1; 2368 } else { 2369 ip->flush_state = HAMMER_FST_SETUP; 2370 dorel = 0; 2371 } 2372 2373 /* 2374 * If the frontend is waiting for a flush to complete, 2375 * wake it up. 2376 */ 2377 if (ip->flags & HAMMER_INODE_FLUSHW) { 2378 ip->flags &= ~HAMMER_INODE_FLUSHW; 2379 wakeup(&ip->flags); 2380 } 2381 2382 /* 2383 * If the frontend made more changes and requested another 2384 * flush, then try to get it running. 2385 * 2386 * Reflushes are aborted when the inode is errored out. 2387 */ 2388 if (ip->flags & HAMMER_INODE_REFLUSH) { 2389 ip->flags &= ~HAMMER_INODE_REFLUSH; 2390 if (ip->flags & HAMMER_INODE_RESIGNAL) { 2391 ip->flags &= ~HAMMER_INODE_RESIGNAL; 2392 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2393 } else { 2394 hammer_flush_inode(ip, 0); 2395 } 2396 } 2397 } 2398 2399 /* 2400 * If we have no parent dependancies we can clear CONN_DOWN 2401 */ 2402 if (TAILQ_EMPTY(&ip->target_list)) 2403 ip->flags &= ~HAMMER_INODE_CONN_DOWN; 2404 2405 /* 2406 * If the inode is now clean drop the space reservation. 2407 */ 2408 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 && 2409 (ip->flags & HAMMER_INODE_RSV_INODES)) { 2410 ip->flags &= ~HAMMER_INODE_RSV_INODES; 2411 --hmp->rsv_inodes; 2412 } 2413 2414 if (dorel) 2415 hammer_rel_inode(ip, 0); 2416 } 2417 2418 /* 2419 * Called from hammer_sync_inode() to synchronize in-memory records 2420 * to the media. 2421 */ 2422 static int 2423 hammer_sync_record_callback(hammer_record_t record, void *data) 2424 { 2425 hammer_cursor_t cursor = data; 2426 hammer_transaction_t trans = cursor->trans; 2427 hammer_mount_t hmp = trans->hmp; 2428 int error; 2429 2430 /* 2431 * Skip records that do not belong to the current flush. 2432 */ 2433 ++hammer_stats_record_iterations; 2434 if (record->flush_state != HAMMER_FST_FLUSH) 2435 return(0); 2436 2437 #if 1 2438 if (record->flush_group != record->ip->flush_group) { 2439 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group); 2440 Debugger("blah2"); 2441 return(0); 2442 } 2443 #endif 2444 KKASSERT(record->flush_group == record->ip->flush_group); 2445 2446 /* 2447 * Interlock the record using the BE flag. Once BE is set the 2448 * frontend cannot change the state of FE. 2449 * 2450 * NOTE: If FE is set prior to us setting BE we still sync the 2451 * record out, but the flush completion code converts it to 2452 * a delete-on-disk record instead of destroying it. 2453 */ 2454 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0); 2455 record->flags |= HAMMER_RECF_INTERLOCK_BE; 2456 2457 /* 2458 * The backend has already disposed of the record. 2459 */ 2460 if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) { 2461 error = 0; 2462 goto done; 2463 } 2464 2465 /* 2466 * If the whole inode is being deleting all on-disk records will 2467 * be deleted very soon, we can't sync any new records to disk 2468 * because they will be deleted in the same transaction they were 2469 * created in (delete_tid == create_tid), which will assert. 2470 * 2471 * XXX There may be a case with RECORD_ADD with DELETED_FE set 2472 * that we currently panic on. 2473 */ 2474 if (record->ip->sync_flags & HAMMER_INODE_DELETING) { 2475 switch(record->type) { 2476 case HAMMER_MEM_RECORD_DATA: 2477 /* 2478 * We don't have to do anything, if the record was 2479 * committed the space will have been accounted for 2480 * in the blockmap. 2481 */ 2482 /* fall through */ 2483 case HAMMER_MEM_RECORD_GENERAL: 2484 /* 2485 * Set deleted-by-backend flag. Do not set the 2486 * backend committed flag, because we are throwing 2487 * the record away. 2488 */ 2489 record->flags |= HAMMER_RECF_DELETED_BE; 2490 ++record->ip->rec_generation; 2491 error = 0; 2492 goto done; 2493 case HAMMER_MEM_RECORD_ADD: 2494 panic("hammer_sync_record_callback: illegal add " 2495 "during inode deletion record %p", record); 2496 break; /* NOT REACHED */ 2497 case HAMMER_MEM_RECORD_INODE: 2498 panic("hammer_sync_record_callback: attempt to " 2499 "sync inode record %p?", record); 2500 break; /* NOT REACHED */ 2501 case HAMMER_MEM_RECORD_DEL: 2502 /* 2503 * Follow through and issue the on-disk deletion 2504 */ 2505 break; 2506 } 2507 } 2508 2509 /* 2510 * If DELETED_FE is set special handling is needed for directory 2511 * entries. Dependant pieces related to the directory entry may 2512 * have already been synced to disk. If this occurs we have to 2513 * sync the directory entry and then change the in-memory record 2514 * from an ADD to a DELETE to cover the fact that it's been 2515 * deleted by the frontend. 2516 * 2517 * A directory delete covering record (MEM_RECORD_DEL) can never 2518 * be deleted by the frontend. 2519 * 2520 * Any other record type (aka DATA) can be deleted by the frontend. 2521 * XXX At the moment the flusher must skip it because there may 2522 * be another data record in the flush group for the same block, 2523 * meaning that some frontend data changes can leak into the backend's 2524 * synchronization point. 2525 */ 2526 if (record->flags & HAMMER_RECF_DELETED_FE) { 2527 if (record->type == HAMMER_MEM_RECORD_ADD) { 2528 /* 2529 * Convert a front-end deleted directory-add to 2530 * a directory-delete entry later. 2531 */ 2532 record->flags |= HAMMER_RECF_CONVERT_DELETE; 2533 } else { 2534 /* 2535 * Dispose of the record (race case). Mark as 2536 * deleted by backend (and not committed). 2537 */ 2538 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL); 2539 record->flags |= HAMMER_RECF_DELETED_BE; 2540 ++record->ip->rec_generation; 2541 error = 0; 2542 goto done; 2543 } 2544 } 2545 2546 /* 2547 * Assign the create_tid for new records. Deletions already 2548 * have the record's entire key properly set up. 2549 */ 2550 if (record->type != HAMMER_MEM_RECORD_DEL) { 2551 record->leaf.base.create_tid = trans->tid; 2552 record->leaf.create_ts = trans->time32; 2553 } 2554 for (;;) { 2555 error = hammer_ip_sync_record_cursor(cursor, record); 2556 if (error != EDEADLK) 2557 break; 2558 hammer_done_cursor(cursor); 2559 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0], 2560 record->ip); 2561 if (error) 2562 break; 2563 } 2564 record->flags &= ~HAMMER_RECF_CONVERT_DELETE; 2565 2566 if (error) 2567 error = -error; 2568 done: 2569 hammer_flush_record_done(record, error); 2570 2571 /* 2572 * Do partial finalization if we have built up too many dirty 2573 * buffers. Otherwise a buffer cache deadlock can occur when 2574 * doing things like creating tens of thousands of tiny files. 2575 * 2576 * We must release our cursor lock to avoid a 3-way deadlock 2577 * due to the exclusive sync lock the finalizer must get. 2578 */ 2579 if (hammer_flusher_meta_limit(hmp)) { 2580 hammer_unlock_cursor(cursor); 2581 hammer_flusher_finalize(trans, 0); 2582 hammer_lock_cursor(cursor); 2583 } 2584 2585 return(error); 2586 } 2587 2588 /* 2589 * Backend function called by the flusher to sync an inode to media. 2590 */ 2591 int 2592 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip) 2593 { 2594 struct hammer_cursor cursor; 2595 hammer_node_t tmp_node; 2596 hammer_record_t depend; 2597 hammer_record_t next; 2598 int error, tmp_error; 2599 u_int64_t nlinks; 2600 2601 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0) 2602 return(0); 2603 2604 error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip); 2605 if (error) 2606 goto done; 2607 2608 /* 2609 * Any directory records referencing this inode which are not in 2610 * our current flush group must adjust our nlink count for the 2611 * purposes of synchronization to disk. 2612 * 2613 * Records which are in our flush group can be unlinked from our 2614 * inode now, potentially allowing the inode to be physically 2615 * deleted. 2616 * 2617 * This cannot block. 2618 */ 2619 nlinks = ip->ino_data.nlinks; 2620 next = TAILQ_FIRST(&ip->target_list); 2621 while ((depend = next) != NULL) { 2622 next = TAILQ_NEXT(depend, target_entry); 2623 if (depend->flush_state == HAMMER_FST_FLUSH && 2624 depend->flush_group == ip->flush_group) { 2625 /* 2626 * If this is an ADD that was deleted by the frontend 2627 * the frontend nlinks count will have already been 2628 * decremented, but the backend is going to sync its 2629 * directory entry and must account for it. The 2630 * record will be converted to a delete-on-disk when 2631 * it gets synced. 2632 * 2633 * If the ADD was not deleted by the frontend we 2634 * can remove the dependancy from our target_list. 2635 */ 2636 if (depend->flags & HAMMER_RECF_DELETED_FE) { 2637 ++nlinks; 2638 } else { 2639 TAILQ_REMOVE(&ip->target_list, depend, 2640 target_entry); 2641 depend->target_ip = NULL; 2642 } 2643 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) { 2644 /* 2645 * Not part of our flush group and not deleted by 2646 * the front-end, adjust the link count synced to 2647 * the media (undo what the frontend did when it 2648 * queued the record). 2649 */ 2650 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0); 2651 switch(depend->type) { 2652 case HAMMER_MEM_RECORD_ADD: 2653 --nlinks; 2654 break; 2655 case HAMMER_MEM_RECORD_DEL: 2656 ++nlinks; 2657 break; 2658 default: 2659 break; 2660 } 2661 } 2662 } 2663 2664 /* 2665 * Set dirty if we had to modify the link count. 2666 */ 2667 if (ip->sync_ino_data.nlinks != nlinks) { 2668 KKASSERT((int64_t)nlinks >= 0); 2669 ip->sync_ino_data.nlinks = nlinks; 2670 ip->sync_flags |= HAMMER_INODE_DDIRTY; 2671 } 2672 2673 /* 2674 * If there is a trunction queued destroy any data past the (aligned) 2675 * truncation point. Userland will have dealt with the buffer 2676 * containing the truncation point for us. 2677 * 2678 * We don't flush pending frontend data buffers until after we've 2679 * dealt with the truncation. 2680 */ 2681 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) { 2682 /* 2683 * Interlock trunc_off. The VOP front-end may continue to 2684 * make adjustments to it while we are blocked. 2685 */ 2686 off_t trunc_off; 2687 off_t aligned_trunc_off; 2688 int blkmask; 2689 2690 trunc_off = ip->sync_trunc_off; 2691 blkmask = hammer_blocksize(trunc_off) - 1; 2692 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask; 2693 2694 /* 2695 * Delete any whole blocks on-media. The front-end has 2696 * already cleaned out any partial block and made it 2697 * pending. The front-end may have updated trunc_off 2698 * while we were blocked so we only use sync_trunc_off. 2699 * 2700 * This operation can blow out the buffer cache, EWOULDBLOCK 2701 * means we were unable to complete the deletion. The 2702 * deletion will update sync_trunc_off in that case. 2703 */ 2704 error = hammer_ip_delete_range(&cursor, ip, 2705 aligned_trunc_off, 2706 0x7FFFFFFFFFFFFFFFLL, 2); 2707 if (error == EWOULDBLOCK) { 2708 ip->flags |= HAMMER_INODE_WOULDBLOCK; 2709 error = 0; 2710 goto defer_buffer_flush; 2711 } 2712 2713 if (error) 2714 goto done; 2715 2716 /* 2717 * Clear the truncation flag on the backend after we have 2718 * complete the deletions. Backend data is now good again 2719 * (including new records we are about to sync, below). 2720 * 2721 * Leave sync_trunc_off intact. As we write additional 2722 * records the backend will update sync_trunc_off. This 2723 * tells the backend whether it can skip the overwrite 2724 * test. This should work properly even when the backend 2725 * writes full blocks where the truncation point straddles 2726 * the block because the comparison is against the base 2727 * offset of the record. 2728 */ 2729 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED; 2730 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */ 2731 } else { 2732 error = 0; 2733 } 2734 2735 /* 2736 * Now sync related records. These will typically be directory 2737 * entries, records tracking direct-writes, or delete-on-disk records. 2738 */ 2739 if (error == 0) { 2740 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL, 2741 hammer_sync_record_callback, &cursor); 2742 if (tmp_error < 0) 2743 tmp_error = -error; 2744 if (tmp_error) 2745 error = tmp_error; 2746 } 2747 hammer_cache_node(&ip->cache[1], cursor.node); 2748 2749 /* 2750 * Re-seek for inode update, assuming our cache hasn't been ripped 2751 * out from under us. 2752 */ 2753 if (error == 0) { 2754 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error); 2755 if (tmp_node) { 2756 hammer_cursor_downgrade(&cursor); 2757 hammer_lock_sh(&tmp_node->lock); 2758 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0) 2759 hammer_cursor_seek(&cursor, tmp_node, 0); 2760 hammer_unlock(&tmp_node->lock); 2761 hammer_rel_node(tmp_node); 2762 } 2763 error = 0; 2764 } 2765 2766 /* 2767 * If we are deleting the inode the frontend had better not have 2768 * any active references on elements making up the inode. 2769 * 2770 * The call to hammer_ip_delete_clean() cleans up auxillary records 2771 * but not DB or DATA records. Those must have already been deleted 2772 * by the normal truncation mechanic. 2773 */ 2774 if (error == 0 && ip->sync_ino_data.nlinks == 0 && 2775 RB_EMPTY(&ip->rec_tree) && 2776 (ip->sync_flags & HAMMER_INODE_DELETING) && 2777 (ip->flags & HAMMER_INODE_DELETED) == 0) { 2778 int count1 = 0; 2779 2780 error = hammer_ip_delete_clean(&cursor, ip, &count1); 2781 if (error == 0) { 2782 ip->flags |= HAMMER_INODE_DELETED; 2783 ip->sync_flags &= ~HAMMER_INODE_DELETING; 2784 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED; 2785 KKASSERT(RB_EMPTY(&ip->rec_tree)); 2786 2787 /* 2788 * Set delete_tid in both the frontend and backend 2789 * copy of the inode record. The DELETED flag handles 2790 * this, do not set RDIRTY. 2791 */ 2792 ip->ino_leaf.base.delete_tid = trans->tid; 2793 ip->sync_ino_leaf.base.delete_tid = trans->tid; 2794 ip->ino_leaf.delete_ts = trans->time32; 2795 ip->sync_ino_leaf.delete_ts = trans->time32; 2796 2797 2798 /* 2799 * Adjust the inode count in the volume header 2800 */ 2801 hammer_sync_lock_sh(trans); 2802 if (ip->flags & HAMMER_INODE_ONDISK) { 2803 hammer_modify_volume_field(trans, 2804 trans->rootvol, 2805 vol0_stat_inodes); 2806 --ip->hmp->rootvol->ondisk->vol0_stat_inodes; 2807 hammer_modify_volume_done(trans->rootvol); 2808 } 2809 hammer_sync_unlock(trans); 2810 } 2811 } 2812 2813 if (error) 2814 goto done; 2815 ip->sync_flags &= ~HAMMER_INODE_BUFS; 2816 2817 defer_buffer_flush: 2818 /* 2819 * Now update the inode's on-disk inode-data and/or on-disk record. 2820 * DELETED and ONDISK are managed only in ip->flags. 2821 * 2822 * In the case of a defered buffer flush we still update the on-disk 2823 * inode to satisfy visibility requirements if there happen to be 2824 * directory dependancies. 2825 */ 2826 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) { 2827 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK: 2828 /* 2829 * If deleted and on-disk, don't set any additional flags. 2830 * the delete flag takes care of things. 2831 * 2832 * Clear flags which may have been set by the frontend. 2833 */ 2834 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 2835 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME | 2836 HAMMER_INODE_DELETING); 2837 break; 2838 case HAMMER_INODE_DELETED: 2839 /* 2840 * Take care of the case where a deleted inode was never 2841 * flushed to the disk in the first place. 2842 * 2843 * Clear flags which may have been set by the frontend. 2844 */ 2845 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 2846 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME | 2847 HAMMER_INODE_DELETING); 2848 while (RB_ROOT(&ip->rec_tree)) { 2849 hammer_record_t record = RB_ROOT(&ip->rec_tree); 2850 hammer_ref(&record->lock); 2851 KKASSERT(record->lock.refs == 1); 2852 record->flags |= HAMMER_RECF_DELETED_BE; 2853 ++record->ip->rec_generation; 2854 hammer_rel_mem_record(record); 2855 } 2856 break; 2857 case HAMMER_INODE_ONDISK: 2858 /* 2859 * If already on-disk, do not set any additional flags. 2860 */ 2861 break; 2862 default: 2863 /* 2864 * If not on-disk and not deleted, set DDIRTY to force 2865 * an initial record to be written. 2866 * 2867 * Also set the create_tid in both the frontend and backend 2868 * copy of the inode record. 2869 */ 2870 ip->ino_leaf.base.create_tid = trans->tid; 2871 ip->ino_leaf.create_ts = trans->time32; 2872 ip->sync_ino_leaf.base.create_tid = trans->tid; 2873 ip->sync_ino_leaf.create_ts = trans->time32; 2874 ip->sync_flags |= HAMMER_INODE_DDIRTY; 2875 break; 2876 } 2877 2878 /* 2879 * If RDIRTY or DDIRTY is set, write out a new record. If the inode 2880 * is already on-disk the old record is marked as deleted. 2881 * 2882 * If DELETED is set hammer_update_inode() will delete the existing 2883 * record without writing out a new one. 2884 * 2885 * If *ONLY* the ITIMES flag is set we can update the record in-place. 2886 */ 2887 if (ip->flags & HAMMER_INODE_DELETED) { 2888 error = hammer_update_inode(&cursor, ip); 2889 } else 2890 if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 && 2891 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) { 2892 error = hammer_update_itimes(&cursor, ip); 2893 } else 2894 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) { 2895 error = hammer_update_inode(&cursor, ip); 2896 } 2897 done: 2898 if (error) { 2899 hammer_critical_error(ip->hmp, ip, error, 2900 "while syncing inode"); 2901 } 2902 hammer_done_cursor(&cursor); 2903 return(error); 2904 } 2905 2906 /* 2907 * This routine is called when the OS is no longer actively referencing 2908 * the inode (but might still be keeping it cached), or when releasing 2909 * the last reference to an inode. 2910 * 2911 * At this point if the inode's nlinks count is zero we want to destroy 2912 * it, which may mean destroying it on-media too. 2913 */ 2914 void 2915 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp) 2916 { 2917 struct vnode *vp; 2918 2919 /* 2920 * Set the DELETING flag when the link count drops to 0 and the 2921 * OS no longer has any opens on the inode. 2922 * 2923 * The backend will clear DELETING (a mod flag) and set DELETED 2924 * (a state flag) when it is actually able to perform the 2925 * operation. 2926 * 2927 * Don't reflag the deletion if the flusher is currently syncing 2928 * one that was already flagged. A previously set DELETING flag 2929 * may bounce around flags and sync_flags until the operation is 2930 * completely done. 2931 */ 2932 if (ip->ino_data.nlinks == 0 && 2933 ((ip->flags | ip->sync_flags) & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) { 2934 ip->flags |= HAMMER_INODE_DELETING; 2935 ip->flags |= HAMMER_INODE_TRUNCATED; 2936 ip->trunc_off = 0; 2937 vp = NULL; 2938 if (getvp) { 2939 if (hammer_get_vnode(ip, &vp) != 0) 2940 return; 2941 } 2942 2943 /* 2944 * Final cleanup 2945 */ 2946 if (ip->vp) { 2947 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE); 2948 vnode_pager_setsize(ip->vp, 0); 2949 } 2950 if (getvp) { 2951 vput(vp); 2952 } 2953 } 2954 } 2955 2956 /* 2957 * After potentially resolving a dependancy the inode is tested 2958 * to determine whether it needs to be reflushed. 2959 */ 2960 void 2961 hammer_test_inode(hammer_inode_t ip) 2962 { 2963 if (ip->flags & HAMMER_INODE_REFLUSH) { 2964 ip->flags &= ~HAMMER_INODE_REFLUSH; 2965 hammer_ref(&ip->lock); 2966 if (ip->flags & HAMMER_INODE_RESIGNAL) { 2967 ip->flags &= ~HAMMER_INODE_RESIGNAL; 2968 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL); 2969 } else { 2970 hammer_flush_inode(ip, 0); 2971 } 2972 hammer_rel_inode(ip, 0); 2973 } 2974 } 2975 2976 /* 2977 * Clear the RECLAIM flag on an inode. This occurs when the inode is 2978 * reassociated with a vp or just before it gets freed. 2979 * 2980 * Pipeline wakeups to threads blocked due to an excessive number of 2981 * detached inodes. The reclaim count generates a bit of negative 2982 * feedback. 2983 */ 2984 static void 2985 hammer_inode_wakereclaims(hammer_inode_t ip, int dowake) 2986 { 2987 struct hammer_reclaim *reclaim; 2988 hammer_mount_t hmp = ip->hmp; 2989 2990 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) 2991 return; 2992 2993 --hammer_count_reclaiming; 2994 --hmp->inode_reclaims; 2995 ip->flags &= ~HAMMER_INODE_RECLAIM; 2996 2997 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT || dowake) { 2998 reclaim = TAILQ_FIRST(&hmp->reclaim_list); 2999 if (reclaim && reclaim->count > 0 && --reclaim->count == 0) { 3000 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry); 3001 wakeup(reclaim); 3002 } 3003 } 3004 } 3005 3006 /* 3007 * Setup our reclaim pipeline. We only let so many detached (and dirty) 3008 * inodes build up before we start blocking. 3009 * 3010 * When we block we don't care *which* inode has finished reclaiming, 3011 * as lone as one does. This is somewhat heuristical... we also put a 3012 * cap on how long we are willing to wait. 3013 */ 3014 void 3015 hammer_inode_waitreclaims(hammer_mount_t hmp) 3016 { 3017 struct hammer_reclaim reclaim; 3018 int delay; 3019 3020 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT) 3021 return; 3022 delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz / 3023 (HAMMER_RECLAIM_WAIT * 3) + 1; 3024 if (delay > 0) { 3025 reclaim.count = 2; 3026 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry); 3027 tsleep(&reclaim, 0, "hmrrcm", delay); 3028 if (reclaim.count > 0) 3029 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry); 3030 } 3031 } 3032 3033 /* 3034 * A larger then normal backlog of inodes is sitting in the flusher, 3035 * enforce a general slowdown to let it catch up. This routine is only 3036 * called on completion of a non-flusher-related transaction which 3037 * performed B-Tree node I/O. 3038 * 3039 * It is possible for the flusher to stall in a continuous load. 3040 * blogbench -i1000 -o seems to do a good job generating this sort of load. 3041 * If the flusher is unable to catch up the inode count can bloat until 3042 * we run out of kvm. 3043 * 3044 * This is a bit of a hack. 3045 */ 3046 void 3047 hammer_inode_waithard(hammer_mount_t hmp) 3048 { 3049 /* 3050 * Hysteresis. 3051 */ 3052 if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) { 3053 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT / 2 && 3054 hmp->count_iqueued < hmp->count_inodes / 20) { 3055 hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY; 3056 return; 3057 } 3058 } else { 3059 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT || 3060 hmp->count_iqueued < hmp->count_inodes / 10) { 3061 return; 3062 } 3063 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY; 3064 } 3065 3066 /* 3067 * Block for one flush cycle. 3068 */ 3069 hammer_flusher_wait_next(hmp); 3070 } 3071 3072