1 /* 2 * Copyright (c) 2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $ 35 */ 36 /* 37 * HAMMER dependancy flusher thread 38 * 39 * Meta data updates create buffer dependancies which are arranged as a 40 * hierarchy of lists. 41 */ 42 43 #include "hammer.h" 44 45 static void hammer_flusher_master_thread(void *arg); 46 static void hammer_flusher_slave_thread(void *arg); 47 static int hammer_flusher_flush(hammer_mount_t hmp, int *nomorep); 48 static int hammer_flusher_flush_inode(hammer_inode_t ip, void *data); 49 50 RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode, 51 hammer_ino_rb_compare); 52 53 /* 54 * Support structures for the flusher threads. 55 */ 56 struct hammer_flusher_info { 57 TAILQ_ENTRY(hammer_flusher_info) entry; 58 struct hammer_mount *hmp; 59 thread_t td; 60 int runstate; 61 int count; 62 hammer_flush_group_t flg; 63 struct hammer_transaction trans; /* per-slave transaction */ 64 }; 65 66 typedef struct hammer_flusher_info *hammer_flusher_info_t; 67 68 /* 69 * Sync all inodes pending on the flusher. 70 * 71 * All flush groups will be flushed. This does not queue dirty inodes 72 * to the flush groups, it just flushes out what has already been queued! 73 */ 74 void 75 hammer_flusher_sync(hammer_mount_t hmp) 76 { 77 int seq; 78 79 seq = hammer_flusher_async(hmp, NULL); 80 hammer_flusher_wait(hmp, seq); 81 } 82 83 /* 84 * Sync all flush groups through to close_flg - return immediately. 85 * If close_flg is NULL all flush groups are synced. 86 * 87 * Returns the sequence number of the last closed flush group, 88 * which may be close_flg. When syncing to the end if there 89 * are no flush groups pending we still cycle the flusher, and 90 * must allocate a sequence number to placemark the spot even 91 * though no flush group will ever be associated with it. 92 */ 93 int 94 hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg) 95 { 96 hammer_flush_group_t flg; 97 int seq; 98 99 /* 100 * Already closed 101 */ 102 if (close_flg && close_flg->closed) 103 return(close_flg->seq); 104 105 /* 106 * Close flush groups until we hit the end of the list 107 * or close_flg. 108 */ 109 while ((flg = hmp->next_flush_group) != NULL) { 110 KKASSERT(flg->closed == 0 && flg->running == 0); 111 flg->closed = 1; 112 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry); 113 if (flg == close_flg) 114 break; 115 } 116 117 if (hmp->flusher.td) { 118 if (hmp->flusher.signal++ == 0) 119 wakeup(&hmp->flusher.signal); 120 if (flg) { 121 seq = flg->seq; 122 } else { 123 seq = hmp->flusher.next; 124 ++hmp->flusher.next; 125 } 126 } else { 127 seq = hmp->flusher.done; 128 } 129 return(seq); 130 } 131 132 /* 133 * Flush the current/next flushable flg. This function is typically called 134 * in a loop along with hammer_flusher_wait(hmp, returned_seq) to iterate 135 * flush groups until specific conditions are met. 136 * 137 * If a flush is currently in progress its seq is returned. 138 * 139 * If no flush is currently in progress the next available flush group 140 * will be flushed and its seq returned. 141 * 142 * If no flush groups are present a dummy seq will be allocated and 143 * returned and the flusher will be activated (e.g. to flush the 144 * undo/redo and the volume header). 145 */ 146 int 147 hammer_flusher_async_one(hammer_mount_t hmp) 148 { 149 hammer_flush_group_t flg; 150 int seq; 151 152 if (hmp->flusher.td) { 153 flg = TAILQ_FIRST(&hmp->flush_group_list); 154 seq = hammer_flusher_async(hmp, flg); 155 } else { 156 seq = hmp->flusher.done; 157 } 158 return(seq); 159 } 160 161 /* 162 * Wait for the flusher to finish flushing the specified sequence 163 * number. The flush is already running and will signal us on 164 * each completion. 165 */ 166 void 167 hammer_flusher_wait(hammer_mount_t hmp, int seq) 168 { 169 while ((int)(seq - hmp->flusher.done) > 0) 170 tsleep(&hmp->flusher.done, 0, "hmrfls", 0); 171 } 172 173 /* 174 * Returns non-zero if the flusher is currently running. Used for 175 * time-domain multiplexing of frontend operations in order to avoid 176 * starving the backend flusher. 177 */ 178 int 179 hammer_flusher_running(hammer_mount_t hmp) 180 { 181 int seq = hmp->flusher.next - 1; 182 if ((int)(seq - hmp->flusher.done) > 0) 183 return(1); 184 return (0); 185 } 186 187 void 188 hammer_flusher_wait_next(hammer_mount_t hmp) 189 { 190 int seq; 191 192 seq = hammer_flusher_async_one(hmp); 193 hammer_flusher_wait(hmp, seq); 194 } 195 196 void 197 hammer_flusher_create(hammer_mount_t hmp) 198 { 199 hammer_flusher_info_t info; 200 int i; 201 202 hmp->flusher.signal = 0; 203 hmp->flusher.done = 0; 204 hmp->flusher.next = 1; 205 hammer_ref(&hmp->flusher.finalize_lock); 206 TAILQ_INIT(&hmp->flusher.run_list); 207 TAILQ_INIT(&hmp->flusher.ready_list); 208 209 lwkt_create(hammer_flusher_master_thread, hmp, 210 &hmp->flusher.td, NULL, 0, -1, "hammer-M"); 211 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) { 212 info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO); 213 info->hmp = hmp; 214 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry); 215 lwkt_create(hammer_flusher_slave_thread, info, 216 &info->td, NULL, 0, -1, "hammer-S%d", i); 217 } 218 } 219 220 void 221 hammer_flusher_destroy(hammer_mount_t hmp) 222 { 223 hammer_flusher_info_t info; 224 225 /* 226 * Kill the master 227 */ 228 hmp->flusher.exiting = 1; 229 while (hmp->flusher.td) { 230 ++hmp->flusher.signal; 231 wakeup(&hmp->flusher.signal); 232 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz); 233 } 234 235 /* 236 * Kill the slaves 237 */ 238 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) { 239 KKASSERT(info->runstate == 0); 240 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry); 241 info->runstate = -1; 242 wakeup(&info->runstate); 243 while (info->td) 244 tsleep(&info->td, 0, "hmrwwc", 0); 245 kfree(info, hmp->m_misc); 246 } 247 } 248 249 /* 250 * The master flusher thread manages the flusher sequence id and 251 * synchronization with the slave work threads. 252 */ 253 static void 254 hammer_flusher_master_thread(void *arg) 255 { 256 hammer_mount_t hmp; 257 int seq; 258 int nomore; 259 260 hmp = arg; 261 262 lwkt_gettoken(&hmp->fs_token); 263 264 for (;;) { 265 /* 266 * Flush all sequence numbers up to but not including .next, 267 * or until an open flush group is encountered. 268 */ 269 for (;;) { 270 while (hmp->flusher.group_lock) 271 tsleep(&hmp->flusher.group_lock, 0, "hmrhld",0); 272 hammer_flusher_clean_loose_ios(hmp); 273 274 seq = hammer_flusher_flush(hmp, &nomore); 275 hmp->flusher.done = seq; 276 wakeup(&hmp->flusher.done); 277 278 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 279 break; 280 if (nomore) 281 break; 282 } 283 284 /* 285 * Wait for activity. 286 */ 287 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list)) 288 break; 289 while (hmp->flusher.signal == 0) 290 tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0); 291 hmp->flusher.signal = 0; 292 } 293 294 /* 295 * And we are done. 296 */ 297 hmp->flusher.td = NULL; 298 wakeup(&hmp->flusher.exiting); 299 lwkt_reltoken(&hmp->fs_token); 300 lwkt_exit(); 301 } 302 303 /* 304 * Flush the next sequence number until an open flush group is encountered 305 * or we reach (next). Not all sequence numbers will have flush groups 306 * associated with them. These require that the UNDO/REDO FIFO still be 307 * flushed since it can take at least one additional run to synchronize 308 * the FIFO, and more to also synchronize the reserve structures. 309 */ 310 static int 311 hammer_flusher_flush(hammer_mount_t hmp, int *nomorep) 312 { 313 hammer_flusher_info_t info; 314 hammer_flush_group_t flg; 315 hammer_reserve_t resv; 316 int count; 317 int seq; 318 319 /* 320 * Just in-case there's a flush race on mount. Seq number 321 * does not change. 322 */ 323 if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL) { 324 *nomorep = 1; 325 return (hmp->flusher.done); 326 } 327 *nomorep = 0; 328 329 /* 330 * Flush the next sequence number. Sequence numbers can exist 331 * without an assigned flush group, indicating that just a FIFO flush 332 * should occur. 333 */ 334 seq = hmp->flusher.done + 1; 335 flg = TAILQ_FIRST(&hmp->flush_group_list); 336 if (flg == NULL) { 337 if (seq == hmp->flusher.next) { 338 *nomorep = 1; 339 return (hmp->flusher.done); 340 } 341 } else if (seq == flg->seq) { 342 if (flg->closed) { 343 KKASSERT(flg->running == 0); 344 flg->running = 1; 345 if (hmp->fill_flush_group == flg) { 346 hmp->fill_flush_group = 347 TAILQ_NEXT(flg, flush_entry); 348 } 349 } else { 350 *nomorep = 1; 351 return (hmp->flusher.done); 352 } 353 } else { 354 /* 355 * Sequence number problems can only happen if a critical 356 * filesystem error occurred which forced the filesystem into 357 * read-only mode. 358 */ 359 KKASSERT((int)(flg->seq - seq) > 0 || hmp->ronly >= 2); 360 flg = NULL; 361 } 362 363 /* 364 * We only do one flg but we may have to loop/retry. 365 * 366 * Due to various races it is possible to come across a flush 367 * group which as not yet been closed. 368 */ 369 count = 0; 370 while (flg && flg->running) { 371 ++count; 372 if (hammer_debug_general & 0x0001) { 373 hdkprintf("%d ttl=%d recs=%d\n", 374 flg->seq, flg->total_count, flg->refs); 375 } 376 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 377 break; 378 hammer_start_transaction_fls(&hmp->flusher.trans, hmp); 379 380 /* 381 * If the previous flush cycle just about exhausted our 382 * UNDO space we may have to do a dummy cycle to move the 383 * first_offset up before actually digging into a new cycle, 384 * or the new cycle will not have sufficient undo space. 385 */ 386 if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3)) 387 hammer_flusher_finalize(&hmp->flusher.trans, 0); 388 389 KKASSERT(hmp->next_flush_group != flg); 390 391 /* 392 * Place the flg in the flusher structure and start the 393 * slaves running. The slaves will compete for inodes 394 * to flush. 395 * 396 * Make a per-thread copy of the transaction. 397 */ 398 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) { 399 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry); 400 info->flg = flg; 401 info->runstate = 1; 402 info->trans = hmp->flusher.trans; 403 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry); 404 wakeup(&info->runstate); 405 } 406 407 /* 408 * Wait for all slaves to finish running 409 */ 410 while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL) 411 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0); 412 413 /* 414 * Do the final finalization, clean up 415 */ 416 hammer_flusher_finalize(&hmp->flusher.trans, 1); 417 hmp->flusher.tid = hmp->flusher.trans.tid; 418 419 hammer_done_transaction(&hmp->flusher.trans); 420 421 /* 422 * Loop up on the same flg. If the flg is done clean it up 423 * and break out. We only flush one flg. 424 */ 425 if (RB_EMPTY(&flg->flush_tree)) { 426 KKASSERT(flg->refs == 0); 427 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry); 428 kfree(flg, hmp->m_misc); 429 break; 430 } 431 KKASSERT(TAILQ_FIRST(&hmp->flush_group_list) == flg); 432 } 433 434 /* 435 * We may have pure meta-data to flush, or we may have to finish 436 * cycling the UNDO FIFO, even if there were no flush groups. 437 */ 438 if (count == 0 && hammer_flusher_haswork(hmp)) { 439 hammer_start_transaction_fls(&hmp->flusher.trans, hmp); 440 hammer_flusher_finalize(&hmp->flusher.trans, 1); 441 hammer_done_transaction(&hmp->flusher.trans); 442 } 443 444 /* 445 * Clean up any freed big-blocks (typically zone-2). 446 * resv->flush_group is typically set several flush groups ahead 447 * of the free to ensure that the freed block is not reused until 448 * it can no longer be reused. 449 */ 450 while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) { 451 if ((int)(resv->flush_group - seq) > 0) 452 break; 453 hammer_reserve_clrdelay(hmp, resv); 454 } 455 return (seq); 456 } 457 458 459 /* 460 * The slave flusher thread pulls work off the master flush list until no 461 * work is left. 462 */ 463 static void 464 hammer_flusher_slave_thread(void *arg) 465 { 466 hammer_flush_group_t flg; 467 hammer_flusher_info_t info; 468 hammer_mount_t hmp; 469 470 info = arg; 471 hmp = info->hmp; 472 lwkt_gettoken(&hmp->fs_token); 473 474 for (;;) { 475 while (info->runstate == 0) 476 tsleep(&info->runstate, 0, "hmrssw", 0); 477 if (info->runstate < 0) 478 break; 479 flg = info->flg; 480 481 RB_SCAN(hammer_fls_rb_tree, &flg->flush_tree, NULL, 482 hammer_flusher_flush_inode, info); 483 484 info->count = 0; 485 info->runstate = 0; 486 info->flg = NULL; 487 TAILQ_REMOVE(&hmp->flusher.run_list, info, entry); 488 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry); 489 wakeup(&hmp->flusher.ready_list); 490 } 491 info->td = NULL; 492 wakeup(&info->td); 493 lwkt_reltoken(&hmp->fs_token); 494 lwkt_exit(); 495 } 496 497 void 498 hammer_flusher_clean_loose_ios(hammer_mount_t hmp) 499 { 500 hammer_buffer_t buffer; 501 hammer_io_t io; 502 503 /* 504 * loose ends - buffers without bp's aren't tracked by the kernel 505 * and can build up, so clean them out. This can occur when an 506 * IO completes on a buffer with no references left. 507 * 508 * The io_token is needed to protect the list. 509 */ 510 if ((io = RB_ROOT(&hmp->lose_root)) != NULL) { 511 lwkt_gettoken(&hmp->io_token); 512 while ((io = RB_ROOT(&hmp->lose_root)) != NULL) { 513 KKASSERT(io->mod_root == &hmp->lose_root); 514 RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io); 515 io->mod_root = NULL; 516 hammer_ref(&io->lock); 517 buffer = (void *)io; 518 hammer_rel_buffer(buffer, 0); 519 } 520 lwkt_reltoken(&hmp->io_token); 521 } 522 } 523 524 /* 525 * Flush a single inode that is part of a flush group. 526 * 527 * Flusher errors are extremely serious, even ENOSPC shouldn't occur because 528 * the front-end should have reserved sufficient space on the media. Any 529 * error other then EWOULDBLOCK will force the mount to be read-only. 530 */ 531 static 532 int 533 hammer_flusher_flush_inode(hammer_inode_t ip, void *data) 534 { 535 hammer_flusher_info_t info = data; 536 hammer_mount_t hmp = info->hmp; 537 hammer_transaction_t trans = &info->trans; 538 int error; 539 540 /* 541 * Several slaves are operating on the same flush group concurrently. 542 * The SLAVEFLUSH flag prevents them from tripping over each other. 543 * 544 * NOTE: It is possible for a EWOULDBLOCK'd ip returned by one slave 545 * to be resynced by another, but normally such inodes are not 546 * revisited until the master loop gets to them. 547 */ 548 if (ip->flags & HAMMER_INODE_SLAVEFLUSH) 549 return(0); 550 ip->flags |= HAMMER_INODE_SLAVEFLUSH; 551 ++hammer_stats_inode_flushes; 552 553 hammer_flusher_clean_loose_ios(hmp); 554 vm_wait_nominal(); 555 error = hammer_sync_inode(trans, ip); 556 557 /* 558 * EWOULDBLOCK can happen under normal operation, all other errors 559 * are considered extremely serious. We must set WOULDBLOCK 560 * mechanics to deal with the mess left over from the abort of the 561 * previous flush. 562 */ 563 if (error) { 564 ip->flags |= HAMMER_INODE_WOULDBLOCK; 565 if (error == EWOULDBLOCK) 566 error = 0; 567 } 568 hammer_flush_inode_done(ip, error); 569 /* ip invalid */ 570 571 while (hmp->flusher.finalize_want) 572 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0); 573 if (hammer_flusher_undo_exhausted(trans, 1)) { 574 hkprintf("Warning: UNDO area too small!\n"); 575 hammer_flusher_finalize(trans, 1); 576 } else if (hammer_flusher_meta_limit(trans->hmp)) { 577 hammer_flusher_finalize(trans, 0); 578 } 579 return (0); 580 } 581 582 /* 583 * Return non-zero if the UNDO area has less then (QUARTER / 4) of its 584 * space left. 585 * 586 * 1/4 - Emergency free undo space level. Below this point the flusher 587 * will finalize even if directory dependancies have not been resolved. 588 * 589 * 2/4 - Used by the pruning and reblocking code. These functions may be 590 * running in parallel with a flush and cannot be allowed to drop 591 * available undo space to emergency levels. 592 * 593 * 3/4 - Used at the beginning of a flush to force-sync the volume header 594 * to give the flush plenty of runway to work in. 595 */ 596 int 597 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter) 598 { 599 if (hammer_undo_space(trans) < 600 hammer_undo_max(trans->hmp) * quarter / 4) { 601 return(1); 602 } else { 603 return(0); 604 } 605 } 606 607 /* 608 * Flush all pending UNDOs, wait for write completion, update the volume 609 * header with the new UNDO end position, and flush it. Then 610 * asynchronously flush the meta-data. 611 * 612 * If this is the last finalization in a flush group we also synchronize 613 * our cached blockmap and set hmp->flusher_undo_start and our cached undo 614 * fifo first_offset so the next flush resets the FIFO pointers. 615 * 616 * If this is not final it is being called because too many dirty meta-data 617 * buffers have built up and must be flushed with UNDO synchronization to 618 * avoid a buffer cache deadlock. 619 */ 620 void 621 hammer_flusher_finalize(hammer_transaction_t trans, int final) 622 { 623 hammer_volume_t root_volume; 624 hammer_blockmap_t cundomap, dundomap; 625 hammer_mount_t hmp; 626 hammer_io_t io; 627 hammer_off_t save_undo_next_offset; 628 int count; 629 int i; 630 631 hmp = trans->hmp; 632 root_volume = trans->rootvol; 633 634 /* 635 * Exclusively lock the flusher. This guarantees that all dirty 636 * buffers will be idled (have a mod-count of 0). 637 */ 638 ++hmp->flusher.finalize_want; 639 hammer_lock_ex(&hmp->flusher.finalize_lock); 640 641 /* 642 * If this isn't the final sync several threads may have hit the 643 * meta-limit at the same time and raced. Only sync if we really 644 * have to, after acquiring the lock. 645 */ 646 if (final == 0 && !hammer_flusher_meta_limit(hmp)) 647 goto done; 648 649 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 650 goto done; 651 652 /* 653 * Flush data buffers. This can occur asynchronously and at any 654 * time. We must interlock against the frontend direct-data write 655 * but do not have to acquire the sync-lock yet. 656 * 657 * These data buffers have already been collected prior to the 658 * related inode(s) getting queued to the flush group. 659 */ 660 count = 0; 661 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->data_root)) != NULL) { 662 if (io->ioerror) 663 break; 664 hammer_ref(&io->lock); 665 hammer_io_write_interlock(io); 666 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME); 667 hammer_io_flush(io, 0); 668 hammer_io_done_interlock(io); 669 hammer_rel_buffer((hammer_buffer_t)io, 0); 670 hammer_io_limit_backlog(hmp); 671 ++count; 672 } 673 674 /* 675 * The sync-lock is required for the remaining sequence. This lock 676 * prevents meta-data from being modified. 677 */ 678 hammer_sync_lock_ex(trans); 679 680 /* 681 * If we have been asked to finalize the volume header sync the 682 * cached blockmap to the on-disk blockmap. Generate an UNDO 683 * record for the update. 684 */ 685 if (final) { 686 cundomap = &hmp->blockmap[0]; 687 dundomap = &root_volume->ondisk->vol0_blockmap[0]; 688 if (root_volume->io.modified) { 689 hammer_modify_volume(trans, root_volume, 690 dundomap, sizeof(hmp->blockmap)); 691 for (i = 0; i < HAMMER_MAX_ZONES; ++i) 692 hammer_crc_set_blockmap(&cundomap[i]); 693 bcopy(cundomap, dundomap, sizeof(hmp->blockmap)); 694 hammer_modify_volume_done(root_volume); 695 } 696 } 697 698 /* 699 * Flush UNDOs. This can occur concurrently with the data flush 700 * because data writes never overwrite. 701 * 702 * This also waits for I/Os to complete and flushes the cache on 703 * the target disk. 704 * 705 * Record the UNDO append point as this can continue to change 706 * after we have flushed the UNDOs. 707 */ 708 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX]; 709 hammer_lock_ex(&hmp->undo_lock); 710 save_undo_next_offset = cundomap->next_offset; 711 hammer_unlock(&hmp->undo_lock); 712 hammer_flusher_flush_undos(hmp, HAMMER_FLUSH_UNDOS_FORCED); 713 714 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 715 goto failed; 716 717 /* 718 * HAMMER VERSION < 4: 719 * Update the on-disk volume header with new UNDO FIFO end 720 * position (do not generate new UNDO records for this change). 721 * We have to do this for the UNDO FIFO whether (final) is 722 * set or not in order for the UNDOs to be recognized on 723 * recovery. 724 * 725 * HAMMER VERSION >= 4: 726 * The UNDO FIFO data written above will be recognized on 727 * recovery without us having to sync the volume header. 728 * 729 * Also update the on-disk next_tid field. This does not require 730 * an UNDO. However, because our TID is generated before we get 731 * the sync lock another sync may have beat us to the punch. 732 * 733 * This also has the side effect of updating first_offset based on 734 * a prior finalization when the first finalization of the next flush 735 * cycle occurs, removing any undo info from the prior finalization 736 * from consideration. 737 * 738 * The volume header will be flushed out synchronously. 739 */ 740 dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX]; 741 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX]; 742 743 if (dundomap->first_offset != cundomap->first_offset || 744 dundomap->next_offset != save_undo_next_offset) { 745 hammer_modify_volume_noundo(NULL, root_volume); 746 dundomap->first_offset = cundomap->first_offset; 747 dundomap->next_offset = save_undo_next_offset; 748 hammer_crc_set_blockmap(dundomap); 749 hammer_modify_volume_done(root_volume); 750 } 751 752 /* 753 * vol0_next_tid is used for TID selection and is updated without 754 * an UNDO so we do not reuse a TID that may have been rolled-back. 755 * 756 * vol0_last_tid is the highest fully-synchronized TID. It is 757 * set-up when the UNDO fifo is fully synced, later on (not here). 758 * 759 * The root volume can be open for modification by other threads 760 * generating UNDO or REDO records. For example, reblocking, 761 * pruning, REDO mode fast-fsyncs, so the write interlock is 762 * mandatory. 763 */ 764 if (root_volume->io.modified) { 765 hammer_modify_volume_noundo(NULL, root_volume); 766 if (root_volume->ondisk->vol0_next_tid < trans->tid) 767 root_volume->ondisk->vol0_next_tid = trans->tid; 768 hammer_crc_set_volume(root_volume->ondisk); 769 hammer_modify_volume_done(root_volume); 770 hammer_io_write_interlock(&root_volume->io); 771 hammer_io_flush(&root_volume->io, 0); 772 hammer_io_done_interlock(&root_volume->io); 773 } 774 775 /* 776 * Wait for I/Os to complete. 777 * 778 * For HAMMER VERSION 4+ filesystems we do not have to wait for 779 * the I/O to complete as the new UNDO FIFO entries are recognized 780 * even without the volume header update. This allows the volume 781 * header to flushed along with meta-data, significantly reducing 782 * flush overheads. 783 */ 784 hammer_flusher_clean_loose_ios(hmp); 785 if (hmp->version < HAMMER_VOL_VERSION_FOUR) 786 hammer_io_wait_all(hmp, "hmrfl3", 1); 787 788 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 789 goto failed; 790 791 /* 792 * Flush meta-data. The meta-data will be undone if we crash 793 * so we can safely flush it asynchronously. There is no need 794 * to wait for I/O to complete (or issue a synchronous disk flush). 795 * 796 * In fact, even if we did wait the meta-data will still be undone 797 * by a crash up until the next flush cycle due to the first_offset 798 * in the volume header for the UNDO FIFO not being adjusted until 799 * the following flush cycle. 800 * 801 * No io interlock is needed, bioops callbacks will not mess with 802 * meta data buffers. 803 */ 804 count = 0; 805 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->meta_root)) != NULL) { 806 if (io->ioerror) 807 break; 808 KKASSERT(io->modify_refs == 0); 809 hammer_ref(&io->lock); 810 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME); 811 hammer_io_flush(io, 0); 812 hammer_rel_buffer((hammer_buffer_t)io, 0); 813 hammer_io_limit_backlog(hmp); 814 ++count; 815 } 816 817 /* 818 * If this is the final finalization for the flush group set 819 * up for the next sequence by setting a new first_offset in 820 * our cached blockmap and clearing the undo history. 821 * 822 * Even though we have updated our cached first_offset, the on-disk 823 * first_offset still governs available-undo-space calculations. 824 * 825 * We synchronize to save_undo_next_offset rather than 826 * cundomap->next_offset because that is what we flushed out 827 * above. 828 * 829 * NOTE! UNDOs can only be added with the sync_lock held 830 * so we can clear the undo history without racing. 831 * REDOs can be added at any time which is why we 832 * have to be careful and use save_undo_next_offset 833 * when setting the new first_offset. 834 */ 835 if (final) { 836 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX]; 837 if (cundomap->first_offset != save_undo_next_offset) { 838 cundomap->first_offset = save_undo_next_offset; 839 hmp->hflags |= HMNT_UNDO_DIRTY; 840 } else if (cundomap->first_offset != cundomap->next_offset) { 841 hmp->hflags |= HMNT_UNDO_DIRTY; 842 } else { 843 hmp->hflags &= ~HMNT_UNDO_DIRTY; 844 } 845 hammer_clear_undo_history(hmp); 846 847 /* 848 * Flush tid sequencing. flush_tid1 is fully synchronized, 849 * meaning a crash will not roll it back. flush_tid2 has 850 * been written out asynchronously and a crash will roll 851 * it back. flush_tid1 is used for all mirroring masters. 852 */ 853 if (hmp->flush_tid1 != hmp->flush_tid2) { 854 hmp->flush_tid1 = hmp->flush_tid2; 855 wakeup(&hmp->flush_tid1); 856 } 857 hmp->flush_tid2 = trans->tid; 858 859 /* 860 * Clear the REDO SYNC flag. This flag is used to ensure 861 * that the recovery span in the UNDO/REDO FIFO contains 862 * at least one REDO SYNC record. 863 */ 864 hmp->flags &= ~HAMMER_MOUNT_REDO_SYNC; 865 } 866 867 /* 868 * Cleanup. Report any critical errors. 869 */ 870 failed: 871 hammer_sync_unlock(trans); 872 873 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) { 874 hvkprintf(root_volume, 875 "Critical write error during flush, " 876 "refusing to sync UNDO FIFO\n"); 877 } 878 879 done: 880 hammer_unlock(&hmp->flusher.finalize_lock); 881 882 if (--hmp->flusher.finalize_want == 0) 883 wakeup(&hmp->flusher.finalize_want); 884 hammer_stats_commits += final; 885 } 886 887 /* 888 * Flush UNDOs. 889 */ 890 void 891 hammer_flusher_flush_undos(hammer_mount_t hmp, int mode) 892 { 893 hammer_io_t io; 894 int count; 895 896 count = 0; 897 while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->undo_root)) != NULL) { 898 if (io->ioerror) 899 break; 900 hammer_ref(&io->lock); 901 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME); 902 hammer_io_write_interlock(io); 903 hammer_io_flush(io, hammer_undo_reclaim(io)); 904 hammer_io_done_interlock(io); 905 hammer_rel_buffer((hammer_buffer_t)io, 0); 906 hammer_io_limit_backlog(hmp); 907 ++count; 908 } 909 hammer_flusher_clean_loose_ios(hmp); 910 if (mode == HAMMER_FLUSH_UNDOS_FORCED || 911 (mode == HAMMER_FLUSH_UNDOS_AUTO && count)) { 912 hammer_io_wait_all(hmp, "hmrfl1", 1); 913 } else { 914 hammer_io_wait_all(hmp, "hmrfl2", 0); 915 } 916 } 917 918 /* 919 * Return non-zero if too many dirty meta-data buffers have built up. 920 * 921 * Since we cannot allow such buffers to flush until we have dealt with 922 * the UNDOs, we risk deadlocking the kernel's buffer cache. 923 */ 924 int 925 hammer_flusher_meta_limit(hammer_mount_t hmp) 926 { 927 if (hmp->locked_dirty_space + hmp->io_running_space > 928 hammer_limit_dirtybufspace) { 929 return(1); 930 } 931 return(0); 932 } 933 934 /* 935 * Return non-zero if too many dirty meta-data buffers have built up. 936 * 937 * This version is used by background operations (mirror, prune, reblock) 938 * to leave room for foreground operations. 939 */ 940 int 941 hammer_flusher_meta_halflimit(hammer_mount_t hmp) 942 { 943 if (hmp->locked_dirty_space + hmp->io_running_space > 944 hammer_limit_dirtybufspace / 2) { 945 return(1); 946 } 947 return(0); 948 } 949 950 /* 951 * Return non-zero if the flusher still has something to flush. 952 */ 953 int 954 hammer_flusher_haswork(hammer_mount_t hmp) 955 { 956 if (hmp->ronly) 957 return(0); 958 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 959 return(0); 960 if (TAILQ_FIRST(&hmp->flush_group_list) || /* dirty inodes */ 961 RB_ROOT(&hmp->volu_root) || /* dirty buffers */ 962 RB_ROOT(&hmp->undo_root) || 963 RB_ROOT(&hmp->data_root) || 964 RB_ROOT(&hmp->meta_root) || 965 (hmp->hflags & HMNT_UNDO_DIRTY)) { /* UNDO FIFO sync */ 966 return(1); 967 } 968 return(0); 969 } 970 971 int 972 hammer_flush_dirty(hammer_mount_t hmp, int max_count) 973 { 974 int count = 0; 975 int dummy; 976 977 while (hammer_flusher_haswork(hmp)) { 978 hammer_flusher_sync(hmp); 979 ++count; 980 if (count >= 5) { 981 if (count == 5) 982 hkprintf("flushing."); 983 else 984 kprintf("."); 985 tsleep(&dummy, 0, "hmrufl", hz); 986 } 987 if (max_count != -1 && count == max_count) { 988 kprintf("giving up"); 989 break; 990 } 991 } 992 if (count >= 5) 993 kprintf("\n"); 994 995 if (count >= max_count) 996 return(-1); 997 return(0); 998 } 999