1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 /* 36 * TRANSACTION AND FLUSH HANDLING 37 * 38 * Deceptively simple but actually fairly difficult to implement properly is 39 * how I would describe it. 40 * 41 * Flushing generally occurs bottom-up but requires a top-down scan to 42 * locate chains with MODIFIED and/or UPDATE bits set. The ONFLUSH flag 43 * tells how to recurse downward to find these chains. 44 */ 45 46 #include <sys/cdefs.h> 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/types.h> 50 #include <sys/lock.h> 51 #include <sys/uuid.h> 52 53 #include "hammer2.h" 54 55 #define FLUSH_DEBUG 0 56 57 #define HAMMER2_FLUSH_DEPTH_LIMIT 10 /* stack recursion limit */ 58 59 60 /* 61 * Recursively flush the specified chain. The chain is locked and 62 * referenced by the caller and will remain so on return. The chain 63 * will remain referenced throughout but can temporarily lose its 64 * lock during the recursion to avoid unnecessarily stalling user 65 * processes. 66 */ 67 struct hammer2_flush_info { 68 hammer2_chain_t *parent; 69 int depth; 70 int diddeferral; 71 int cache_index; 72 int flags; 73 struct h2_flush_list flushq; 74 hammer2_chain_t *debug; 75 }; 76 77 typedef struct hammer2_flush_info hammer2_flush_info_t; 78 79 static void hammer2_flush_core(hammer2_flush_info_t *info, 80 hammer2_chain_t *chain, int flags); 81 static int hammer2_flush_recurse(hammer2_chain_t *child, void *data); 82 83 /* 84 * Any per-pfs transaction initialization goes here. 85 */ 86 void 87 hammer2_trans_manage_init(hammer2_pfs_t *pmp) 88 { 89 } 90 91 /* 92 * Transaction support for any modifying operation. Transactions are used 93 * in the pmp layer by the frontend and in the spmp layer by the backend. 94 * 95 * 0 - Normal transaction, interlocked against flush 96 * transaction. 97 * 98 * TRANS_ISFLUSH - Flush transaction, interlocked against normal 99 * transaction. 100 * 101 * TRANS_BUFCACHE - Buffer cache transaction, no interlock. 102 * 103 * Initializing a new transaction allocates a transaction ID. Typically 104 * passed a pmp (hmp passed as NULL), indicating a cluster transaction. Can 105 * be passed a NULL pmp and non-NULL hmp to indicate a transaction on a single 106 * media target. The latter mode is used by the recovery code. 107 * 108 * TWO TRANSACTION IDs can run concurrently, where one is a flush and the 109 * other is a set of any number of concurrent filesystem operations. We 110 * can either have <running_fs_ops> + <waiting_flush> + <blocked_fs_ops> 111 * or we can have <running_flush> + <concurrent_fs_ops>. 112 * 113 * During a flush, new fs_ops are only blocked until the fs_ops prior to 114 * the flush complete. The new fs_ops can then run concurrent with the flush. 115 * 116 * Buffer-cache transactions operate as fs_ops but never block. A 117 * buffer-cache flush will run either before or after the current pending 118 * flush depending on its state. 119 */ 120 void 121 hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags) 122 { 123 uint32_t oflags; 124 uint32_t nflags; 125 int dowait; 126 127 for (;;) { 128 oflags = pmp->trans.flags; 129 cpu_ccfence(); 130 dowait = 0; 131 132 if (flags & HAMMER2_TRANS_ISFLUSH) { 133 /* 134 * Requesting flush transaction. Wait for all 135 * currently running transactions to finish. 136 */ 137 if (oflags & HAMMER2_TRANS_MASK) { 138 nflags = oflags | HAMMER2_TRANS_FPENDING | 139 HAMMER2_TRANS_WAITING; 140 dowait = 1; 141 } else { 142 nflags = (oflags | flags) + 1; 143 } 144 } else if (flags & HAMMER2_TRANS_BUFCACHE) { 145 /* 146 * Requesting strategy transaction. Generally 147 * allowed in all situations unless a flush 148 * is running without the preflush flag. 149 */ 150 if ((oflags & (HAMMER2_TRANS_ISFLUSH | 151 HAMMER2_TRANS_PREFLUSH)) == 152 HAMMER2_TRANS_ISFLUSH) { 153 nflags = oflags | HAMMER2_TRANS_WAITING; 154 dowait = 1; 155 } else { 156 nflags = (oflags | flags) + 1; 157 } 158 } else { 159 /* 160 * Requesting normal transaction. Wait for any 161 * flush to finish before allowing. 162 */ 163 if (oflags & HAMMER2_TRANS_ISFLUSH) { 164 nflags = oflags | HAMMER2_TRANS_WAITING; 165 dowait = 1; 166 } else { 167 nflags = (oflags | flags) + 1; 168 } 169 } 170 if (dowait) 171 tsleep_interlock(&pmp->trans.sync_wait, 0); 172 if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) { 173 if (dowait == 0) 174 break; 175 tsleep(&pmp->trans.sync_wait, PINTERLOCKED, 176 "h2trans", hz); 177 } else { 178 cpu_pause(); 179 } 180 /* retry */ 181 } 182 } 183 184 /* 185 * Start a sub-transaction, there is no 'subdone' function. This will 186 * issue a new modify_tid (mtid) for the current transaction, which is a 187 * CLC (cluster level change) id and not a per-node id. 188 * 189 * This function must be called for each XOP when multiple XOPs are run in 190 * sequence within a transaction. 191 * 192 * Callers typically update the inode with the transaction mtid manually 193 * to enforce sequencing. 194 */ 195 hammer2_tid_t 196 hammer2_trans_sub(hammer2_pfs_t *pmp) 197 { 198 hammer2_tid_t mtid; 199 200 mtid = atomic_fetchadd_64(&pmp->modify_tid, 1); 201 202 return (mtid); 203 } 204 205 /* 206 * Clears the PREFLUSH stage, called during a flush transaction after all 207 * logical buffer I/O has completed. 208 */ 209 void 210 hammer2_trans_clear_preflush(hammer2_pfs_t *pmp) 211 { 212 atomic_clear_int(&pmp->trans.flags, HAMMER2_TRANS_PREFLUSH); 213 } 214 215 void 216 hammer2_trans_done(hammer2_pfs_t *pmp) 217 { 218 uint32_t oflags; 219 uint32_t nflags; 220 221 for (;;) { 222 oflags = pmp->trans.flags; 223 cpu_ccfence(); 224 KKASSERT(oflags & HAMMER2_TRANS_MASK); 225 if ((oflags & HAMMER2_TRANS_MASK) == 1) { 226 /* 227 * This was the last transaction 228 */ 229 nflags = (oflags - 1) & ~(HAMMER2_TRANS_ISFLUSH | 230 HAMMER2_TRANS_BUFCACHE | 231 HAMMER2_TRANS_PREFLUSH | 232 HAMMER2_TRANS_FPENDING | 233 HAMMER2_TRANS_WAITING); 234 } else { 235 /* 236 * Still transactions pending 237 */ 238 nflags = oflags - 1; 239 } 240 if (atomic_cmpset_int(&pmp->trans.flags, oflags, nflags)) { 241 if ((nflags & HAMMER2_TRANS_MASK) == 0 && 242 (oflags & HAMMER2_TRANS_WAITING)) { 243 wakeup(&pmp->trans.sync_wait); 244 } 245 break; 246 } else { 247 cpu_pause(); 248 } 249 /* retry */ 250 } 251 } 252 253 /* 254 * Obtain new, unique inode number (not serialized by caller). 255 */ 256 hammer2_tid_t 257 hammer2_trans_newinum(hammer2_pfs_t *pmp) 258 { 259 hammer2_tid_t tid; 260 261 tid = atomic_fetchadd_64(&pmp->inode_tid, 1); 262 263 return tid; 264 } 265 266 /* 267 * Assert that a strategy call is ok here. Strategy calls are legal 268 * 269 * (1) In a normal transaction. 270 * (2) In a flush transaction only if PREFLUSH is also set. 271 */ 272 void 273 hammer2_trans_assert_strategy(hammer2_pfs_t *pmp) 274 { 275 KKASSERT((pmp->trans.flags & HAMMER2_TRANS_ISFLUSH) == 0 || 276 (pmp->trans.flags & HAMMER2_TRANS_PREFLUSH)); 277 } 278 279 280 /* 281 * Chains undergoing destruction are removed from the in-memory topology. 282 * To avoid getting lost these chains are placed on the delayed flush 283 * queue which will properly dispose of them. 284 * 285 * We do this instead of issuing an immediate flush in order to give 286 * recursive deletions (rm -rf, etc) a chance to remove more of the 287 * hierarchy, potentially allowing an enormous amount of write I/O to 288 * be avoided. 289 */ 290 void 291 hammer2_delayed_flush(hammer2_chain_t *chain) 292 { 293 if ((chain->flags & HAMMER2_CHAIN_DELAYED) == 0) { 294 hammer2_spin_ex(&chain->hmp->list_spin); 295 if ((chain->flags & (HAMMER2_CHAIN_DELAYED | 296 HAMMER2_CHAIN_DEFERRED)) == 0) { 297 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELAYED | 298 HAMMER2_CHAIN_DEFERRED); 299 TAILQ_INSERT_TAIL(&chain->hmp->flushq, 300 chain, flush_node); 301 hammer2_chain_ref(chain); 302 } 303 hammer2_spin_unex(&chain->hmp->list_spin); 304 hammer2_voldata_modify(chain->hmp); 305 } 306 } 307 308 /* 309 * Flush the chain and all modified sub-chains through the specified 310 * synchronization point, propagating blockref updates back up. As 311 * part of this propagation, mirror_tid and inode/data usage statistics 312 * propagates back upward. 313 * 314 * modify_tid (clc - cluster level change) is not propagated. 315 * 316 * update_tid (clc) is used for validation and is not propagated by this 317 * function. 318 * 319 * This routine can be called from several places but the most important 320 * is from VFS_SYNC (frontend) via hammer2_inode_xop_flush (backend). 321 * 322 * chain is locked on call and will remain locked on return. The chain's 323 * UPDATE flag indicates that its parent's block table (which is not yet 324 * part of the flush) should be updated. The chain may be replaced by 325 * the call if it was modified. 326 */ 327 void 328 hammer2_flush(hammer2_chain_t *chain, int flags) 329 { 330 hammer2_chain_t *scan; 331 hammer2_flush_info_t info; 332 hammer2_dev_t *hmp; 333 int loops; 334 335 /* 336 * Execute the recursive flush and handle deferrals. 337 * 338 * Chains can be ridiculously long (thousands deep), so to 339 * avoid blowing out the kernel stack the recursive flush has a 340 * depth limit. Elements at the limit are placed on a list 341 * for re-execution after the stack has been popped. 342 */ 343 bzero(&info, sizeof(info)); 344 TAILQ_INIT(&info.flushq); 345 info.cache_index = -1; 346 info.flags = flags & ~HAMMER2_FLUSH_TOP; 347 348 /* 349 * Calculate parent (can be NULL), if not NULL the flush core 350 * expects the parent to be referenced so it can easily lock/unlock 351 * it without it getting ripped up. 352 */ 353 if ((info.parent = chain->parent) != NULL) 354 hammer2_chain_ref(info.parent); 355 356 /* 357 * Extra ref needed because flush_core expects it when replacing 358 * chain. 359 */ 360 hammer2_chain_ref(chain); 361 hmp = chain->hmp; 362 loops = 0; 363 364 for (;;) { 365 /* 366 * Move hmp->flushq to info.flushq if non-empty so it can 367 * be processed. 368 */ 369 if (TAILQ_FIRST(&hmp->flushq) != NULL) { 370 hammer2_spin_ex(&chain->hmp->list_spin); 371 TAILQ_CONCAT(&info.flushq, &hmp->flushq, flush_node); 372 hammer2_spin_unex(&chain->hmp->list_spin); 373 } 374 375 /* 376 * Unwind deep recursions which had been deferred. This 377 * can leave the FLUSH_* bits set for these chains, which 378 * will be handled when we [re]flush chain after the unwind. 379 */ 380 while ((scan = TAILQ_FIRST(&info.flushq)) != NULL) { 381 KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED); 382 TAILQ_REMOVE(&info.flushq, scan, flush_node); 383 atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED | 384 HAMMER2_CHAIN_DELAYED); 385 386 /* 387 * Now that we've popped back up we can do a secondary 388 * recursion on the deferred elements. 389 * 390 * NOTE: hammer2_flush() may replace scan. 391 */ 392 if (hammer2_debug & 0x0040) 393 kprintf("deferred flush %p\n", scan); 394 hammer2_chain_lock(scan, HAMMER2_RESOLVE_MAYBE); 395 hammer2_flush(scan, flags & ~HAMMER2_FLUSH_TOP); 396 hammer2_chain_unlock(scan); 397 hammer2_chain_drop(scan); /* ref from deferral */ 398 } 399 400 /* 401 * [re]flush chain. 402 */ 403 info.diddeferral = 0; 404 hammer2_flush_core(&info, chain, flags); 405 406 /* 407 * Only loop if deep recursions have been deferred. 408 */ 409 if (TAILQ_EMPTY(&info.flushq)) 410 break; 411 412 if (++loops % 1000 == 0) { 413 kprintf("hammer2_flush: excessive loops on %p\n", 414 chain); 415 if (hammer2_debug & 0x100000) 416 Debugger("hell4"); 417 } 418 } 419 hammer2_chain_drop(chain); 420 if (info.parent) 421 hammer2_chain_drop(info.parent); 422 } 423 424 /* 425 * This is the core of the chain flushing code. The chain is locked by the 426 * caller and must also have an extra ref on it by the caller, and remains 427 * locked and will have an extra ref on return. Upon return, the caller can 428 * test the UPDATE bit on the child to determine if the parent needs updating. 429 * 430 * (1) Determine if this node is a candidate for the flush, return if it is 431 * not. fchain and vchain are always candidates for the flush. 432 * 433 * (2) If we recurse too deep the chain is entered onto the deferral list and 434 * the current flush stack is aborted until after the deferral list is 435 * run. 436 * 437 * (3) Recursively flush live children (rbtree). This can create deferrals. 438 * A successful flush clears the MODIFIED and UPDATE bits on the children 439 * and typically causes the parent to be marked MODIFIED as the children 440 * update the parent's block table. A parent might already be marked 441 * MODIFIED due to a deletion (whos blocktable update in the parent is 442 * handled by the frontend), or if the parent itself is modified by the 443 * frontend for other reasons. 444 * 445 * (4) Permanently disconnected sub-trees are cleaned up by the front-end. 446 * Deleted-but-open inodes can still be individually flushed via the 447 * filesystem syncer. 448 * 449 * (5) Delete parents on the way back up if they are normal indirect blocks 450 * and have no children. 451 * 452 * (6) Note that an unmodified child may still need the block table in its 453 * parent updated (e.g. rename/move). The child will have UPDATE set 454 * in this case. 455 * 456 * WARNING ON BREF MODIFY_TID/MIRROR_TID 457 * 458 * blockref.modify_tid is consistent only within a PFS, and will not be 459 * consistent during synchronization. mirror_tid is consistent across the 460 * block device regardless of the PFS. 461 */ 462 static void 463 hammer2_flush_core(hammer2_flush_info_t *info, hammer2_chain_t *chain, 464 int flags) 465 { 466 hammer2_chain_t *parent; 467 hammer2_dev_t *hmp; 468 int diddeferral; 469 470 /* 471 * (1) Optimize downward recursion to locate nodes needing action. 472 * Nothing to do if none of these flags are set. 473 */ 474 if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) == 0) { 475 if (hammer2_debug & 0x200) { 476 if (info->debug == NULL) 477 info->debug = chain; 478 } else { 479 return; 480 } 481 } 482 483 hmp = chain->hmp; 484 diddeferral = info->diddeferral; 485 parent = info->parent; /* can be NULL */ 486 487 /* 488 * Downward search recursion 489 */ 490 if (chain->flags & (HAMMER2_CHAIN_DEFERRED | HAMMER2_CHAIN_DELAYED)) { 491 /* 492 * Already deferred. 493 */ 494 ++info->diddeferral; 495 } else if ((chain->flags & HAMMER2_CHAIN_PFSBOUNDARY) && 496 (flags & HAMMER2_FLUSH_ALL) == 0 && 497 (flags & HAMMER2_FLUSH_TOP) == 0) { 498 /* 499 * We do not recurse through PFSROOTs. PFSROOT flushes are 500 * handled by the related pmp's (whether mounted or not, 501 * including during recovery). 502 * 503 * But we must still process the PFSROOT chains for block 504 * table updates in their parent (which IS part of our flush). 505 * 506 * Note that the volume root, vchain, does not set this flag. 507 * Note the logic here requires that this test be done before 508 * the depth-limit test, else it might become the top on a 509 * flushq iteration. 510 */ 511 ; 512 } else if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT) { 513 /* 514 * Recursion depth reached. 515 */ 516 KKASSERT((chain->flags & HAMMER2_CHAIN_DELAYED) == 0); 517 hammer2_chain_ref(chain); 518 TAILQ_INSERT_TAIL(&info->flushq, chain, flush_node); 519 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED); 520 ++info->diddeferral; 521 } else if (chain->flags & (HAMMER2_CHAIN_ONFLUSH | 522 HAMMER2_CHAIN_DESTROY)) { 523 /* 524 * Downward recursion search (actual flush occurs bottom-up). 525 * pre-clear ONFLUSH. It can get set again due to races, 526 * which we want so the scan finds us again in the next flush. 527 * 528 * We must also recurse if DESTROY is set so we can finally 529 * get rid of the related children, otherwise the node will 530 * just get re-flushed on lastdrop. 531 */ 532 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH); 533 info->parent = chain; 534 hammer2_spin_ex(&chain->core.spin); 535 RB_SCAN(hammer2_chain_tree, &chain->core.rbtree, 536 NULL, hammer2_flush_recurse, info); 537 hammer2_spin_unex(&chain->core.spin); 538 info->parent = parent; 539 if (info->diddeferral) 540 hammer2_chain_setflush(chain); 541 } 542 543 /* 544 * Now we are in the bottom-up part of the recursion. 545 * 546 * Do not update chain if lower layers were deferred. 547 */ 548 if (info->diddeferral) 549 goto done; 550 551 /* 552 * Propagate the DESTROY flag downwards. This dummies up the flush 553 * code and tries to invalidate related buffer cache buffers to 554 * avoid the disk write. 555 */ 556 if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY)) 557 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY); 558 559 /* 560 * Chain was already modified or has become modified, flush it out. 561 */ 562 again: 563 if ((hammer2_debug & 0x200) && 564 info->debug && 565 (chain->flags & (HAMMER2_CHAIN_MODIFIED | HAMMER2_CHAIN_UPDATE))) { 566 hammer2_chain_t *scan = chain; 567 568 kprintf("DISCONNECTED FLUSH %p->%p\n", info->debug, chain); 569 while (scan) { 570 kprintf(" chain %p [%08x] bref=%016jx:%02x\n", 571 scan, scan->flags, 572 scan->bref.key, scan->bref.type); 573 if (scan == info->debug) 574 break; 575 scan = scan->parent; 576 } 577 } 578 579 if (chain->flags & HAMMER2_CHAIN_MODIFIED) { 580 /* 581 * Dispose of the modified bit. 582 * 583 * If parent is present, the UPDATE bit should already be set. 584 * UPDATE should already be set. 585 * bref.mirror_tid should already be set. 586 */ 587 KKASSERT((chain->flags & HAMMER2_CHAIN_UPDATE) || 588 chain->parent == NULL); 589 if (hammer2_debug & 0x800000) { 590 hammer2_chain_t *pp; 591 592 for (pp = chain; pp->parent; pp = pp->parent) 593 ; 594 kprintf("FLUSH CHAIN %p (p=%p pp=%p/%d) TYPE %d FLAGS %08x (%s)\n", 595 chain, chain->parent, pp, pp->bref.type, 596 chain->bref.type, chain->flags, 597 (chain->bref.type == 1 ? (const char *)chain->data->ipdata.filename : "?") 598 599 ); 600 print_backtrace(10); 601 } 602 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED); 603 atomic_add_long(&hammer2_count_modified_chains, -1); 604 605 /* 606 * Manage threads waiting for excessive dirty memory to 607 * be retired. 608 */ 609 if (chain->pmp) 610 hammer2_pfs_memory_wakeup(chain->pmp); 611 612 #if 0 613 if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0 && 614 chain != &hmp->vchain && 615 chain != &hmp->fchain) { 616 /* 617 * Set UPDATE bit indicating that the parent block 618 * table requires updating. 619 */ 620 atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE); 621 } 622 #endif 623 624 /* 625 * Issue the flush. This is indirect via the DIO. 626 * 627 * NOTE: A DELETED node that reaches this point must be 628 * flushed for synchronization point consistency. 629 * 630 * NOTE: Even though MODIFIED was already set, the related DIO 631 * might not be dirty due to a system buffer cache 632 * flush and must be set dirty if we are going to make 633 * further modifications to the buffer. Chains with 634 * embedded data don't need this. 635 */ 636 if (hammer2_debug & 0x1000) { 637 kprintf("Flush %p.%d %016jx/%d data=%016jx\n", 638 chain, chain->bref.type, 639 (uintmax_t)chain->bref.key, 640 chain->bref.keybits, 641 (uintmax_t)chain->bref.data_off); 642 } 643 if (hammer2_debug & 0x2000) { 644 Debugger("Flush hell"); 645 } 646 647 /* 648 * Update chain CRCs for flush. 649 * 650 * NOTE: Volume headers are NOT flushed here as they require 651 * special processing. 652 */ 653 switch(chain->bref.type) { 654 case HAMMER2_BREF_TYPE_FREEMAP: 655 /* 656 * Update the volume header's freemap_tid to the 657 * freemap's flushing mirror_tid. 658 * 659 * (note: embedded data, do not call setdirty) 660 */ 661 KKASSERT(hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED); 662 KKASSERT(chain == &hmp->fchain); 663 hmp->voldata.freemap_tid = chain->bref.mirror_tid; 664 if (hammer2_debug & 0x8000) { 665 /* debug only, avoid syslogd loop */ 666 kprintf("sync freemap mirror_tid %08jx\n", 667 (intmax_t)chain->bref.mirror_tid); 668 } 669 670 /* 671 * The freemap can be flushed independently of the 672 * main topology, but for the case where it is 673 * flushed in the same transaction, and flushed 674 * before vchain (a case we want to allow for 675 * performance reasons), make sure modifications 676 * made during the flush under vchain use a new 677 * transaction id. 678 * 679 * Otherwise the mount recovery code will get confused. 680 */ 681 ++hmp->voldata.mirror_tid; 682 break; 683 case HAMMER2_BREF_TYPE_VOLUME: 684 /* 685 * The free block table is flushed by 686 * hammer2_vfs_sync() before it flushes vchain. 687 * We must still hold fchain locked while copying 688 * voldata to volsync, however. 689 * 690 * (note: embedded data, do not call setdirty) 691 */ 692 hammer2_chain_lock(&hmp->fchain, 693 HAMMER2_RESOLVE_ALWAYS); 694 hammer2_voldata_lock(hmp); 695 if (hammer2_debug & 0x8000) { 696 /* debug only, avoid syslogd loop */ 697 kprintf("sync volume mirror_tid %08jx\n", 698 (intmax_t)chain->bref.mirror_tid); 699 } 700 701 /* 702 * Update the volume header's mirror_tid to the 703 * main topology's flushing mirror_tid. It is 704 * possible that voldata.mirror_tid is already 705 * beyond bref.mirror_tid due to the bump we made 706 * above in BREF_TYPE_FREEMAP. 707 */ 708 if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) { 709 hmp->voldata.mirror_tid = 710 chain->bref.mirror_tid; 711 } 712 713 /* 714 * The volume header is flushed manually by the 715 * syncer, not here. All we do here is adjust the 716 * crc's. 717 */ 718 KKASSERT(chain->data != NULL); 719 KKASSERT(chain->dio == NULL); 720 721 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]= 722 hammer2_icrc32( 723 (char *)&hmp->voldata + 724 HAMMER2_VOLUME_ICRC1_OFF, 725 HAMMER2_VOLUME_ICRC1_SIZE); 726 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]= 727 hammer2_icrc32( 728 (char *)&hmp->voldata + 729 HAMMER2_VOLUME_ICRC0_OFF, 730 HAMMER2_VOLUME_ICRC0_SIZE); 731 hmp->voldata.icrc_volheader = 732 hammer2_icrc32( 733 (char *)&hmp->voldata + 734 HAMMER2_VOLUME_ICRCVH_OFF, 735 HAMMER2_VOLUME_ICRCVH_SIZE); 736 737 if (hammer2_debug & 0x8000) { 738 /* debug only, avoid syslogd loop */ 739 kprintf("syncvolhdr %016jx %016jx\n", 740 hmp->voldata.mirror_tid, 741 hmp->vchain.bref.mirror_tid); 742 } 743 hmp->volsync = hmp->voldata; 744 atomic_set_int(&chain->flags, HAMMER2_CHAIN_VOLUMESYNC); 745 hammer2_voldata_unlock(hmp); 746 hammer2_chain_unlock(&hmp->fchain); 747 break; 748 case HAMMER2_BREF_TYPE_DATA: 749 /* 750 * Data elements have already been flushed via the 751 * logical file buffer cache. Their hash was set in 752 * the bref by the vop_write code. Do not re-dirty. 753 * 754 * Make sure any device buffer(s) have been flushed 755 * out here (there aren't usually any to flush) XXX. 756 */ 757 break; 758 case HAMMER2_BREF_TYPE_INDIRECT: 759 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 760 case HAMMER2_BREF_TYPE_FREEMAP_LEAF: 761 /* 762 * Buffer I/O will be cleaned up when the volume is 763 * flushed (but the kernel is free to flush it before 764 * then, as well). 765 */ 766 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0); 767 hammer2_chain_setcheck(chain, chain->data); 768 break; 769 case HAMMER2_BREF_TYPE_INODE: 770 /* 771 * NOTE: We must call io_setdirty() to make any late 772 * changes to the inode data, the system might 773 * have already flushed the buffer. 774 */ 775 if (chain->data->ipdata.meta.op_flags & 776 HAMMER2_OPFLAG_PFSROOT) { 777 /* 778 * non-NULL pmp if mounted as a PFS. We must 779 * sync fields cached in the pmp? XXX 780 */ 781 hammer2_inode_data_t *ipdata; 782 783 hammer2_io_setdirty(chain->dio); 784 ipdata = &chain->data->ipdata; 785 if (chain->pmp) { 786 ipdata->meta.pfs_inum = 787 chain->pmp->inode_tid; 788 } 789 } else { 790 /* can't be mounted as a PFS */ 791 } 792 793 KKASSERT((chain->flags & HAMMER2_CHAIN_EMBEDDED) == 0); 794 hammer2_chain_setcheck(chain, chain->data); 795 break; 796 default: 797 KKASSERT(chain->flags & HAMMER2_CHAIN_EMBEDDED); 798 panic("hammer2_flush_core: unsupported " 799 "embedded bref %d", 800 chain->bref.type); 801 /* NOT REACHED */ 802 } 803 804 /* 805 * If the chain was destroyed try to avoid unnecessary I/O. 806 * The DIO system buffer may silently disallow the 807 * invalidation. 808 */ 809 if (chain->flags & HAMMER2_CHAIN_DESTROY) { 810 hammer2_io_t *dio; 811 812 if (chain->dio) { 813 hammer2_io_setinval(chain->dio, 814 chain->bref.data_off, 815 chain->bytes); 816 } else if ((dio = hammer2_io_getquick(hmp, 817 chain->bref.data_off, 818 chain->bytes)) != NULL) { 819 hammer2_io_setinval(dio, 820 chain->bref.data_off, 821 chain->bytes); 822 hammer2_io_putblk(&dio); 823 } 824 } 825 } 826 827 /* 828 * If UPDATE is set the parent block table may need to be updated. 829 * 830 * NOTE: UPDATE may be set on vchain or fchain in which case 831 * parent could be NULL. It's easiest to allow the case 832 * and test for NULL. parent can also wind up being NULL 833 * due to a deletion so we need to handle the case anyway. 834 * 835 * If no parent exists we can just clear the UPDATE bit. If the 836 * chain gets reattached later on the bit will simply get set 837 * again. 838 */ 839 if ((chain->flags & HAMMER2_CHAIN_UPDATE) && parent == NULL) 840 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE); 841 842 /* 843 * The chain may need its blockrefs updated in the parent. This 844 * requires some fancy footwork. 845 */ 846 if (chain->flags & HAMMER2_CHAIN_UPDATE) { 847 hammer2_blockref_t *base; 848 int count; 849 850 /* 851 * Both parent and chain must be locked. This requires 852 * temporarily unlocking the chain. We have to deal with 853 * the case where the chain might be reparented or modified 854 * while it was unlocked. 855 */ 856 hammer2_chain_unlock(chain); 857 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 858 hammer2_chain_lock(chain, HAMMER2_RESOLVE_MAYBE); 859 if (chain->parent != parent) { 860 kprintf("PARENT MISMATCH ch=%p p=%p/%p\n", 861 chain, chain->parent, parent); 862 hammer2_chain_unlock(parent); 863 goto done; 864 } 865 866 /* 867 * Check race condition. If someone got in and modified 868 * it again while it was unlocked, we have to loop up. 869 */ 870 if (chain->flags & HAMMER2_CHAIN_MODIFIED) { 871 hammer2_chain_unlock(parent); 872 kprintf("hammer2_flush: chain %p flush-mod race\n", 873 chain); 874 goto again; 875 } 876 877 /* 878 * Clear UPDATE flag, mark parent modified, update its 879 * modify_tid if necessary, and adjust the parent blockmap. 880 */ 881 if (chain->flags & HAMMER2_CHAIN_UPDATE) 882 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_UPDATE); 883 884 /* 885 * (optional code) 886 * 887 * Avoid actually modifying and updating the parent if it 888 * was flagged for destruction. This can greatly reduce 889 * disk I/O in large tree removals because the 890 * hammer2_io_setinval() call in the upward recursion 891 * (see MODIFIED code above) can only handle a few cases. 892 */ 893 if (parent->flags & HAMMER2_CHAIN_DESTROY) { 894 if (parent->bref.modify_tid < chain->bref.modify_tid) { 895 parent->bref.modify_tid = 896 chain->bref.modify_tid; 897 } 898 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED | 899 HAMMER2_CHAIN_BMAPUPD); 900 hammer2_chain_unlock(parent); 901 goto skipupdate; 902 } 903 904 /* 905 * (semi-optional code) 906 * 907 * The flusher is responsible for deleting empty indirect 908 * blocks at this point. If we don't do this, no major harm 909 * will be done but the empty indirect blocks will stay in 910 * the topology and make it a bit messy. 911 */ 912 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT && 913 chain->core.live_count == 0 && 914 (chain->flags & (HAMMER2_CHAIN_INITIAL | 915 HAMMER2_CHAIN_COUNTEDBREFS)) == 0) { 916 base = &chain->data->npdata[0]; 917 count = chain->bytes / sizeof(hammer2_blockref_t); 918 hammer2_chain_countbrefs(chain, base, count); 919 } 920 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT && 921 chain->core.live_count == 0) { 922 #if 0 923 kprintf("DELETE CHAIN %016jx.%02x %016jx/%d refs=%d\n", 924 chain->bref.data_off, chain->bref.type, 925 chain->bref.key, chain->bref.keybits, 926 chain->refs); 927 #endif 928 hammer2_chain_delete(parent, chain, 929 chain->bref.modify_tid, 930 HAMMER2_DELETE_PERMANENT); 931 hammer2_chain_unlock(parent); 932 goto skipupdate; 933 } 934 935 /* 936 * We are updating the parent's blockmap, the parent must 937 * be set modified. 938 */ 939 hammer2_chain_modify(parent, 0, 0, 0); 940 if (parent->bref.modify_tid < chain->bref.modify_tid) 941 parent->bref.modify_tid = chain->bref.modify_tid; 942 943 /* 944 * Calculate blockmap pointer 945 */ 946 switch(parent->bref.type) { 947 case HAMMER2_BREF_TYPE_INODE: 948 /* 949 * Access the inode's block array. However, there is 950 * no block array if the inode is flagged DIRECTDATA. 951 */ 952 if (parent->data && 953 (parent->data->ipdata.meta.op_flags & 954 HAMMER2_OPFLAG_DIRECTDATA) == 0) { 955 base = &parent->data-> 956 ipdata.u.blockset.blockref[0]; 957 } else { 958 base = NULL; 959 } 960 count = HAMMER2_SET_COUNT; 961 break; 962 case HAMMER2_BREF_TYPE_INDIRECT: 963 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 964 if (parent->data) 965 base = &parent->data->npdata[0]; 966 else 967 base = NULL; 968 count = parent->bytes / sizeof(hammer2_blockref_t); 969 break; 970 case HAMMER2_BREF_TYPE_VOLUME: 971 base = &chain->hmp->voldata.sroot_blockset.blockref[0]; 972 count = HAMMER2_SET_COUNT; 973 break; 974 case HAMMER2_BREF_TYPE_FREEMAP: 975 base = &parent->data->npdata[0]; 976 count = HAMMER2_SET_COUNT; 977 break; 978 default: 979 base = NULL; 980 count = 0; 981 panic("hammer2_flush_core: " 982 "unrecognized blockref type: %d", 983 parent->bref.type); 984 } 985 986 /* 987 * Blocktable updates 988 * 989 * We synchronize pending statistics at this time. Delta 990 * adjustments designated for the current and upper level 991 * are synchronized. 992 */ 993 if (base && (chain->flags & HAMMER2_CHAIN_BMAPUPD)) { 994 if (chain->flags & HAMMER2_CHAIN_BMAPPED) { 995 hammer2_spin_ex(&parent->core.spin); 996 hammer2_base_delete(parent, base, count, 997 &info->cache_index, chain); 998 hammer2_spin_unex(&parent->core.spin); 999 /* base_delete clears both bits */ 1000 } else { 1001 atomic_clear_int(&chain->flags, 1002 HAMMER2_CHAIN_BMAPUPD); 1003 } 1004 } 1005 if (base && (chain->flags & HAMMER2_CHAIN_BMAPPED) == 0) { 1006 hammer2_spin_ex(&parent->core.spin); 1007 hammer2_base_insert(parent, base, count, 1008 &info->cache_index, chain); 1009 hammer2_spin_unex(&parent->core.spin); 1010 /* base_insert sets BMAPPED */ 1011 } 1012 hammer2_chain_unlock(parent); 1013 } 1014 skipupdate: 1015 ; 1016 1017 /* 1018 * Final cleanup after flush 1019 */ 1020 done: 1021 KKASSERT(chain->refs > 0); 1022 if (hammer2_debug & 0x200) { 1023 if (info->debug == chain) 1024 info->debug = NULL; 1025 } 1026 } 1027 1028 /* 1029 * Flush recursion helper, called from flush_core, calls flush_core. 1030 * 1031 * Flushes the children of the caller's chain (info->parent), restricted 1032 * by sync_tid. Set info->domodify if the child's blockref must propagate 1033 * back up to the parent. 1034 * 1035 * Ripouts can move child from rbtree to dbtree or dbq but the caller's 1036 * flush scan order prevents any chains from being lost. A child can be 1037 * executes more than once. 1038 * 1039 * WARNING! If we do not call hammer2_flush_core() we must update 1040 * bref.mirror_tid ourselves to indicate that the flush has 1041 * processed the child. 1042 * 1043 * WARNING! parent->core spinlock is held on entry and return. 1044 */ 1045 static int 1046 hammer2_flush_recurse(hammer2_chain_t *child, void *data) 1047 { 1048 hammer2_flush_info_t *info = data; 1049 hammer2_chain_t *parent = info->parent; 1050 1051 /* 1052 * (child can never be fchain or vchain so a special check isn't 1053 * needed). 1054 * 1055 * We must ref the child before unlocking the spinlock. 1056 * 1057 * The caller has added a ref to the parent so we can temporarily 1058 * unlock it in order to lock the child. 1059 */ 1060 hammer2_chain_ref(child); 1061 hammer2_spin_unex(&parent->core.spin); 1062 1063 hammer2_chain_unlock(parent); 1064 hammer2_chain_lock(child, HAMMER2_RESOLVE_MAYBE); 1065 1066 /* 1067 * Must propagate the DESTROY flag downwards, otherwise the 1068 * parent could end up never being removed because it will 1069 * be requeued to the flusher if it survives this run due to 1070 * the flag. 1071 */ 1072 if (parent && (parent->flags & HAMMER2_CHAIN_DESTROY)) 1073 atomic_set_int(&child->flags, HAMMER2_CHAIN_DESTROY); 1074 1075 /* 1076 * Recurse and collect deferral data. We're in the media flush, 1077 * this can cross PFS boundaries. 1078 */ 1079 if (child->flags & HAMMER2_CHAIN_FLUSH_MASK) { 1080 ++info->depth; 1081 hammer2_flush_core(info, child, info->flags); 1082 --info->depth; 1083 } else if (hammer2_debug & 0x200) { 1084 if (info->debug == NULL) 1085 info->debug = child; 1086 ++info->depth; 1087 hammer2_flush_core(info, child, info->flags); 1088 --info->depth; 1089 if (info->debug == child) 1090 info->debug = NULL; 1091 } 1092 1093 /* 1094 * Relock to continue the loop 1095 */ 1096 hammer2_chain_unlock(child); 1097 hammer2_chain_lock(parent, HAMMER2_RESOLVE_MAYBE); 1098 hammer2_chain_drop(child); 1099 KKASSERT(info->parent == parent); 1100 hammer2_spin_ex(&parent->core.spin); 1101 1102 return (0); 1103 } 1104 1105 /* 1106 * flush helper (direct) 1107 * 1108 * Quickly flushes any dirty chains for a device. This will update our 1109 * concept of the volume root but does NOT flush the actual volume root 1110 * and does not flush dirty device buffers. 1111 * 1112 * This function is primarily used by the bulkfree code to allow it to 1113 * create a snapshot for the pass. It doesn't care about any pending 1114 * work (dirty vnodes, dirty inodes, dirty logical buffers) for which blocks 1115 * have not yet been allocated. 1116 */ 1117 void 1118 hammer2_flush_quick(hammer2_dev_t *hmp) 1119 { 1120 hammer2_chain_t *chain; 1121 1122 hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH); 1123 1124 hammer2_chain_ref(&hmp->vchain); 1125 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1126 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1127 chain = &hmp->vchain; 1128 hammer2_flush(chain, HAMMER2_FLUSH_TOP | 1129 HAMMER2_FLUSH_ALL); 1130 KKASSERT(chain == &hmp->vchain); 1131 } 1132 hammer2_chain_unlock(&hmp->vchain); 1133 hammer2_chain_drop(&hmp->vchain); 1134 1135 hammer2_trans_done(hmp->spmp); /* spmp trans */ 1136 } 1137 1138 /* 1139 * flush helper (backend threaded) 1140 * 1141 * Flushes core chains, issues disk sync, flushes volume roots. 1142 * 1143 * Primarily called from vfs_sync(). 1144 */ 1145 void 1146 hammer2_inode_xop_flush(hammer2_xop_t *arg, int clindex) 1147 { 1148 hammer2_xop_flush_t *xop = &arg->xop_flush; 1149 hammer2_chain_t *chain; 1150 hammer2_chain_t *parent; 1151 hammer2_dev_t *hmp; 1152 int error = 0; 1153 int total_error = 0; 1154 int j; 1155 1156 /* 1157 * Flush core chains 1158 */ 1159 chain = hammer2_inode_chain(xop->head.ip1, clindex, 1160 HAMMER2_RESOLVE_ALWAYS); 1161 if (chain) { 1162 hmp = chain->hmp; 1163 if ((chain->flags & HAMMER2_CHAIN_FLUSH_MASK) || 1164 TAILQ_FIRST(&hmp->flushq) != NULL) { 1165 hammer2_flush(chain, HAMMER2_FLUSH_TOP); 1166 parent = chain->parent; 1167 KKASSERT(chain->pmp != parent->pmp); 1168 hammer2_chain_setflush(parent); 1169 } 1170 hammer2_chain_unlock(chain); 1171 hammer2_chain_drop(chain); 1172 chain = NULL; 1173 } else { 1174 hmp = NULL; 1175 } 1176 1177 /* 1178 * Flush volume roots. Avoid replication, we only want to 1179 * flush each hammer2_dev (hmp) once. 1180 */ 1181 for (j = clindex - 1; j >= 0; --j) { 1182 if ((chain = xop->head.ip1->cluster.array[j].chain) != NULL) { 1183 if (chain->hmp == hmp) { 1184 chain = NULL; /* safety */ 1185 goto skip; 1186 } 1187 } 1188 } 1189 chain = NULL; /* safety */ 1190 1191 /* 1192 * spmp transaction. The super-root is never directly mounted so 1193 * there shouldn't be any vnodes, let alone any dirty vnodes 1194 * associated with it, so we shouldn't have to mess around with any 1195 * vnode flushes here. 1196 */ 1197 hammer2_trans_init(hmp->spmp, HAMMER2_TRANS_ISFLUSH); 1198 1199 /* 1200 * Media mounts have two 'roots', vchain for the topology 1201 * and fchain for the free block table. Flush both. 1202 * 1203 * Note that the topology and free block table are handled 1204 * independently, so the free block table can wind up being 1205 * ahead of the topology. We depend on the bulk free scan 1206 * code to deal with any loose ends. 1207 */ 1208 hammer2_chain_ref(&hmp->vchain); 1209 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1210 hammer2_chain_ref(&hmp->fchain); 1211 hammer2_chain_lock(&hmp->fchain, HAMMER2_RESOLVE_ALWAYS); 1212 if (hmp->fchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1213 /* 1214 * This will also modify vchain as a side effect, 1215 * mark vchain as modified now. 1216 */ 1217 hammer2_voldata_modify(hmp); 1218 chain = &hmp->fchain; 1219 hammer2_flush(chain, HAMMER2_FLUSH_TOP); 1220 KKASSERT(chain == &hmp->fchain); 1221 } 1222 hammer2_chain_unlock(&hmp->fchain); 1223 hammer2_chain_unlock(&hmp->vchain); 1224 hammer2_chain_drop(&hmp->fchain); 1225 /* vchain dropped down below */ 1226 1227 hammer2_chain_lock(&hmp->vchain, HAMMER2_RESOLVE_ALWAYS); 1228 if (hmp->vchain.flags & HAMMER2_CHAIN_FLUSH_MASK) { 1229 chain = &hmp->vchain; 1230 hammer2_flush(chain, HAMMER2_FLUSH_TOP); 1231 KKASSERT(chain == &hmp->vchain); 1232 } 1233 hammer2_chain_unlock(&hmp->vchain); 1234 hammer2_chain_drop(&hmp->vchain); 1235 1236 error = 0; 1237 1238 /* 1239 * We can't safely flush the volume header until we have 1240 * flushed any device buffers which have built up. 1241 * 1242 * XXX this isn't being incremental 1243 */ 1244 vn_lock(hmp->devvp, LK_EXCLUSIVE | LK_RETRY); 1245 error = VOP_FSYNC(hmp->devvp, MNT_WAIT, 0); 1246 vn_unlock(hmp->devvp); 1247 1248 /* 1249 * The flush code sets CHAIN_VOLUMESYNC to indicate that the 1250 * volume header needs synchronization via hmp->volsync. 1251 * 1252 * XXX synchronize the flag & data with only this flush XXX 1253 */ 1254 if (error == 0 && 1255 (hmp->vchain.flags & HAMMER2_CHAIN_VOLUMESYNC)) { 1256 struct buf *bp; 1257 1258 /* 1259 * Synchronize the disk before flushing the volume 1260 * header. 1261 */ 1262 bp = getpbuf(NULL); 1263 bp->b_bio1.bio_offset = 0; 1264 bp->b_bufsize = 0; 1265 bp->b_bcount = 0; 1266 bp->b_cmd = BUF_CMD_FLUSH; 1267 bp->b_bio1.bio_done = biodone_sync; 1268 bp->b_bio1.bio_flags |= BIO_SYNC; 1269 vn_strategy(hmp->devvp, &bp->b_bio1); 1270 biowait(&bp->b_bio1, "h2vol"); 1271 relpbuf(bp, NULL); 1272 1273 /* 1274 * Then we can safely flush the version of the 1275 * volume header synchronized by the flush code. 1276 */ 1277 j = hmp->volhdrno + 1; 1278 if (j >= HAMMER2_NUM_VOLHDRS) 1279 j = 0; 1280 if (j * HAMMER2_ZONE_BYTES64 + HAMMER2_SEGSIZE > 1281 hmp->volsync.volu_size) { 1282 j = 0; 1283 } 1284 if (hammer2_debug & 0x8000) { 1285 /* debug only, avoid syslogd loop */ 1286 kprintf("sync volhdr %d %jd\n", 1287 j, (intmax_t)hmp->volsync.volu_size); 1288 } 1289 bp = getblk(hmp->devvp, j * HAMMER2_ZONE_BYTES64, 1290 HAMMER2_PBUFSIZE, 0, 0); 1291 atomic_clear_int(&hmp->vchain.flags, 1292 HAMMER2_CHAIN_VOLUMESYNC); 1293 bcopy(&hmp->volsync, bp->b_data, HAMMER2_PBUFSIZE); 1294 bawrite(bp); 1295 hmp->volhdrno = j; 1296 } 1297 if (error) 1298 total_error = error; 1299 1300 hammer2_trans_done(hmp->spmp); /* spmp trans */ 1301 skip: 1302 error = hammer2_xop_feed(&xop->head, NULL, clindex, total_error); 1303 } 1304