1 /* 2 * Copyright (c) 2008 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $ 35 */ 36 /* 37 * HAMMER dependancy flusher thread 38 * 39 * Meta data updates create buffer dependancies which are arranged as a 40 * hierarchy of lists. 41 */ 42 43 #include "hammer.h" 44 45 static void hammer_flusher_master_thread(void *arg); 46 static void hammer_flusher_slave_thread(void *arg); 47 static void hammer_flusher_flush(hammer_mount_t hmp); 48 static void hammer_flusher_flush_inode(hammer_inode_t ip, 49 hammer_transaction_t trans); 50 51 /* 52 * Support structures for the flusher threads. 53 */ 54 struct hammer_flusher_info { 55 TAILQ_ENTRY(hammer_flusher_info) entry; 56 struct hammer_mount *hmp; 57 thread_t td; 58 int runstate; 59 int count; 60 hammer_flush_group_t flg; 61 hammer_inode_t work_array[HAMMER_FLUSH_GROUP_SIZE]; 62 }; 63 64 typedef struct hammer_flusher_info *hammer_flusher_info_t; 65 66 /* 67 * Sync all inodes pending on the flusher. 68 * 69 * All flush groups will be flushed. This does not queue dirty inodes 70 * to the flush groups, it just flushes out what has already been queued! 71 */ 72 void 73 hammer_flusher_sync(hammer_mount_t hmp) 74 { 75 int seq; 76 77 seq = hammer_flusher_async(hmp, NULL); 78 hammer_flusher_wait(hmp, seq); 79 } 80 81 /* 82 * Sync all inodes pending on the flusher - return immediately. 83 * 84 * All flush groups will be flushed. 85 */ 86 int 87 hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg) 88 { 89 hammer_flush_group_t flg; 90 int seq = hmp->flusher.next; 91 92 TAILQ_FOREACH(flg, &hmp->flush_group_list, flush_entry) { 93 if (flg->running == 0) 94 ++seq; 95 flg->closed = 1; 96 if (flg == close_flg) 97 break; 98 } 99 if (hmp->flusher.td) { 100 if (hmp->flusher.signal++ == 0) 101 wakeup(&hmp->flusher.signal); 102 } else { 103 seq = hmp->flusher.done; 104 } 105 return(seq); 106 } 107 108 int 109 hammer_flusher_async_one(hammer_mount_t hmp) 110 { 111 int seq; 112 113 if (hmp->flusher.td) { 114 seq = hmp->flusher.next; 115 if (hmp->flusher.signal++ == 0) 116 wakeup(&hmp->flusher.signal); 117 } else { 118 seq = hmp->flusher.done; 119 } 120 return(seq); 121 } 122 123 /* 124 * Wait for the flusher to get to the specified sequence number. 125 * Signal the flusher as often as necessary to keep it going. 126 */ 127 void 128 hammer_flusher_wait(hammer_mount_t hmp, int seq) 129 { 130 while ((int)(seq - hmp->flusher.done) > 0) { 131 if (hmp->flusher.act != seq) { 132 if (hmp->flusher.signal++ == 0) 133 wakeup(&hmp->flusher.signal); 134 } 135 tsleep(&hmp->flusher.done, 0, "hmrfls", 0); 136 } 137 } 138 139 void 140 hammer_flusher_create(hammer_mount_t hmp) 141 { 142 hammer_flusher_info_t info; 143 int i; 144 145 hmp->flusher.signal = 0; 146 hmp->flusher.act = 0; 147 hmp->flusher.done = 0; 148 hmp->flusher.next = 1; 149 hammer_ref(&hmp->flusher.finalize_lock); 150 TAILQ_INIT(&hmp->flusher.run_list); 151 TAILQ_INIT(&hmp->flusher.ready_list); 152 153 lwkt_create(hammer_flusher_master_thread, hmp, 154 &hmp->flusher.td, NULL, 0, -1, "hammer-M"); 155 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) { 156 info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO); 157 info->hmp = hmp; 158 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry); 159 lwkt_create(hammer_flusher_slave_thread, info, 160 &info->td, NULL, 0, -1, "hammer-S%d", i); 161 } 162 } 163 164 void 165 hammer_flusher_destroy(hammer_mount_t hmp) 166 { 167 hammer_flusher_info_t info; 168 169 /* 170 * Kill the master 171 */ 172 hmp->flusher.exiting = 1; 173 while (hmp->flusher.td) { 174 ++hmp->flusher.signal; 175 wakeup(&hmp->flusher.signal); 176 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz); 177 } 178 179 /* 180 * Kill the slaves 181 */ 182 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) { 183 KKASSERT(info->runstate == 0); 184 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry); 185 info->runstate = -1; 186 wakeup(&info->runstate); 187 while (info->td) 188 tsleep(&info->td, 0, "hmrwwc", 0); 189 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry); 190 kfree(info, M_HAMMER); 191 } 192 } 193 194 /* 195 * The master flusher thread manages the flusher sequence id and 196 * synchronization with the slave work threads. 197 */ 198 static void 199 hammer_flusher_master_thread(void *arg) 200 { 201 hammer_flush_group_t flg; 202 hammer_mount_t hmp; 203 204 hmp = arg; 205 206 for (;;) { 207 /* 208 * Do at least one flush cycle. We may have to update the 209 * UNDO FIFO even if no inodes are queued. 210 */ 211 for (;;) { 212 while (hmp->flusher.group_lock) 213 tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0); 214 hmp->flusher.act = hmp->flusher.next; 215 ++hmp->flusher.next; 216 hammer_flusher_clean_loose_ios(hmp); 217 hammer_flusher_flush(hmp); 218 hmp->flusher.done = hmp->flusher.act; 219 wakeup(&hmp->flusher.done); 220 flg = TAILQ_FIRST(&hmp->flush_group_list); 221 if (flg == NULL || flg->closed == 0) 222 break; 223 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 224 break; 225 } 226 227 /* 228 * Wait for activity. 229 */ 230 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list)) 231 break; 232 while (hmp->flusher.signal == 0) 233 tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0); 234 235 /* 236 * Flush for each count on signal but only allow one extra 237 * flush request to build up. 238 */ 239 if (--hmp->flusher.signal != 0) 240 hmp->flusher.signal = 1; 241 } 242 243 /* 244 * And we are done. 245 */ 246 hmp->flusher.td = NULL; 247 wakeup(&hmp->flusher.exiting); 248 lwkt_exit(); 249 } 250 251 /* 252 * Flush all inodes in the current flush group. 253 */ 254 static void 255 hammer_flusher_flush(hammer_mount_t hmp) 256 { 257 hammer_flusher_info_t info; 258 hammer_flush_group_t flg; 259 hammer_reserve_t resv; 260 hammer_inode_t ip; 261 hammer_inode_t next_ip; 262 int slave_index; 263 int count; 264 265 /* 266 * Just in-case there's a flush race on mount 267 */ 268 if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL) 269 return; 270 271 /* 272 * We only do one flg but we may have to loop/retry. 273 */ 274 count = 0; 275 while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) { 276 ++count; 277 if (hammer_debug_general & 0x0001) { 278 kprintf("hammer_flush %d ttl=%d recs=%d\n", 279 hmp->flusher.act, 280 flg->total_count, flg->refs); 281 } 282 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 283 break; 284 hammer_start_transaction_fls(&hmp->flusher.trans, hmp); 285 286 /* 287 * If the previous flush cycle just about exhausted our 288 * UNDO space we may have to do a dummy cycle to move the 289 * first_offset up before actually digging into a new cycle, 290 * or the new cycle will not have sufficient undo space. 291 */ 292 if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3)) 293 hammer_flusher_finalize(&hmp->flusher.trans, 0); 294 295 /* 296 * Ok, we are running this flush group now (this prevents new 297 * additions to it). 298 */ 299 flg->running = 1; 300 if (hmp->next_flush_group == flg) 301 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry); 302 303 /* 304 * Iterate the inodes in the flg's flush_list and assign 305 * them to slaves. 306 */ 307 slave_index = 0; 308 info = TAILQ_FIRST(&hmp->flusher.ready_list); 309 next_ip = TAILQ_FIRST(&flg->flush_list); 310 311 while ((ip = next_ip) != NULL) { 312 next_ip = TAILQ_NEXT(ip, flush_entry); 313 314 /* 315 * Add ip to the slave's work array. The slave is 316 * not currently running. 317 */ 318 info->work_array[info->count++] = ip; 319 if (info->count != HAMMER_FLUSH_GROUP_SIZE) 320 continue; 321 322 /* 323 * Get the slave running 324 */ 325 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry); 326 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry); 327 info->flg = flg; 328 info->runstate = 1; 329 wakeup(&info->runstate); 330 331 /* 332 * Get a new slave. We may have to wait for one to 333 * finish running. 334 */ 335 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) { 336 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0); 337 } 338 } 339 340 /* 341 * Run the current slave if necessary 342 */ 343 if (info->count) { 344 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry); 345 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry); 346 info->flg = flg; 347 info->runstate = 1; 348 wakeup(&info->runstate); 349 } 350 351 /* 352 * Wait for all slaves to finish running 353 */ 354 while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL) 355 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0); 356 357 /* 358 * Do the final finalization, clean up 359 */ 360 hammer_flusher_finalize(&hmp->flusher.trans, 1); 361 hmp->flusher.tid = hmp->flusher.trans.tid; 362 363 hammer_done_transaction(&hmp->flusher.trans); 364 365 /* 366 * Loop up on the same flg. If the flg is done clean it up 367 * and break out. We only flush one flg. 368 */ 369 if (TAILQ_FIRST(&flg->flush_list) == NULL) { 370 KKASSERT(TAILQ_EMPTY(&flg->flush_list)); 371 KKASSERT(flg->refs == 0); 372 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry); 373 kfree(flg, M_HAMMER); 374 break; 375 } 376 } 377 378 /* 379 * We may have pure meta-data to flush, or we may have to finish 380 * cycling the UNDO FIFO, even if there were no flush groups. 381 */ 382 if (count == 0 && hammer_flusher_haswork(hmp)) { 383 hammer_start_transaction_fls(&hmp->flusher.trans, hmp); 384 hammer_flusher_finalize(&hmp->flusher.trans, 1); 385 hammer_done_transaction(&hmp->flusher.trans); 386 } 387 388 /* 389 * Clean up any freed big-blocks (typically zone-2). 390 * resv->flush_group is typically set several flush groups ahead 391 * of the free to ensure that the freed block is not reused until 392 * it can no longer be reused. 393 */ 394 while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) { 395 if (resv->flush_group != hmp->flusher.act) 396 break; 397 hammer_reserve_clrdelay(hmp, resv); 398 } 399 } 400 401 402 /* 403 * The slave flusher thread pulls work off the master flush_list until no 404 * work is left. 405 */ 406 static void 407 hammer_flusher_slave_thread(void *arg) 408 { 409 hammer_flush_group_t flg; 410 hammer_flusher_info_t info; 411 hammer_mount_t hmp; 412 hammer_inode_t ip; 413 int i; 414 415 info = arg; 416 hmp = info->hmp; 417 418 for (;;) { 419 while (info->runstate == 0) 420 tsleep(&info->runstate, 0, "hmrssw", 0); 421 if (info->runstate < 0) 422 break; 423 flg = info->flg; 424 425 for (i = 0; i < info->count; ++i) { 426 ip = info->work_array[i]; 427 hammer_flusher_flush_inode(ip, &hmp->flusher.trans); 428 ++hammer_stats_inode_flushes; 429 } 430 info->count = 0; 431 info->runstate = 0; 432 TAILQ_REMOVE(&hmp->flusher.run_list, info, entry); 433 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry); 434 wakeup(&hmp->flusher.ready_list); 435 } 436 info->td = NULL; 437 wakeup(&info->td); 438 lwkt_exit(); 439 } 440 441 void 442 hammer_flusher_clean_loose_ios(hammer_mount_t hmp) 443 { 444 hammer_buffer_t buffer; 445 hammer_io_t io; 446 447 /* 448 * loose ends - buffers without bp's aren't tracked by the kernel 449 * and can build up, so clean them out. This can occur when an 450 * IO completes on a buffer with no references left. 451 */ 452 if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) { 453 crit_enter(); /* biodone() race */ 454 while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) { 455 KKASSERT(io->mod_list == &hmp->lose_list); 456 TAILQ_REMOVE(&hmp->lose_list, io, mod_entry); 457 io->mod_list = NULL; 458 if (io->lock.refs == 0) 459 ++hammer_count_refedbufs; 460 hammer_ref(&io->lock); 461 buffer = (void *)io; 462 hammer_rel_buffer(buffer, 0); 463 } 464 crit_exit(); 465 } 466 } 467 468 /* 469 * Flush a single inode that is part of a flush group. 470 * 471 * Flusher errors are extremely serious, even ENOSPC shouldn't occur because 472 * the front-end should have reserved sufficient space on the media. Any 473 * error other then EWOULDBLOCK will force the mount to be read-only. 474 */ 475 static 476 void 477 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans) 478 { 479 hammer_mount_t hmp = ip->hmp; 480 int error; 481 482 hammer_flusher_clean_loose_ios(hmp); 483 error = hammer_sync_inode(trans, ip); 484 485 /* 486 * EWOULDBLOCK can happen under normal operation, all other errors 487 * are considered extremely serious. We must set WOULDBLOCK 488 * mechanics to deal with the mess left over from the abort of the 489 * previous flush. 490 */ 491 if (error) { 492 ip->flags |= HAMMER_INODE_WOULDBLOCK; 493 if (error == EWOULDBLOCK) 494 error = 0; 495 } 496 hammer_flush_inode_done(ip, error); 497 while (hmp->flusher.finalize_want) 498 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0); 499 if (hammer_flusher_undo_exhausted(trans, 1)) { 500 kprintf("HAMMER: Warning: UNDO area too small!\n"); 501 hammer_flusher_finalize(trans, 1); 502 } else if (hammer_flusher_meta_limit(trans->hmp)) { 503 hammer_flusher_finalize(trans, 0); 504 } 505 } 506 507 /* 508 * Return non-zero if the UNDO area has less then (QUARTER / 4) of its 509 * space left. 510 * 511 * 1/4 - Emergency free undo space level. Below this point the flusher 512 * will finalize even if directory dependancies have not been resolved. 513 * 514 * 2/4 - Used by the pruning and reblocking code. These functions may be 515 * running in parallel with a flush and cannot be allowed to drop 516 * available undo space to emergency levels. 517 * 518 * 3/4 - Used at the beginning of a flush to force-sync the volume header 519 * to give the flush plenty of runway to work in. 520 */ 521 int 522 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter) 523 { 524 if (hammer_undo_space(trans) < 525 hammer_undo_max(trans->hmp) * quarter / 4) { 526 return(1); 527 } else { 528 return(0); 529 } 530 } 531 532 /* 533 * Flush all pending UNDOs, wait for write completion, update the volume 534 * header with the new UNDO end position, and flush it. Then 535 * asynchronously flush the meta-data. 536 * 537 * If this is the last finalization in a flush group we also synchronize 538 * our cached blockmap and set hmp->flusher_undo_start and our cached undo 539 * fifo first_offset so the next flush resets the FIFO pointers. 540 * 541 * If this is not final it is being called because too many dirty meta-data 542 * buffers have built up and must be flushed with UNDO synchronization to 543 * avoid a buffer cache deadlock. 544 */ 545 void 546 hammer_flusher_finalize(hammer_transaction_t trans, int final) 547 { 548 hammer_volume_t root_volume; 549 hammer_blockmap_t cundomap, dundomap; 550 hammer_mount_t hmp; 551 hammer_io_t io; 552 int count; 553 int i; 554 555 hmp = trans->hmp; 556 root_volume = trans->rootvol; 557 558 /* 559 * Exclusively lock the flusher. This guarantees that all dirty 560 * buffers will be idled (have a mod-count of 0). 561 */ 562 ++hmp->flusher.finalize_want; 563 hammer_lock_ex(&hmp->flusher.finalize_lock); 564 565 /* 566 * If this isn't the final sync several threads may have hit the 567 * meta-limit at the same time and raced. Only sync if we really 568 * have to, after acquiring the lock. 569 */ 570 if (final == 0 && !hammer_flusher_meta_limit(hmp)) 571 goto done; 572 573 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 574 goto done; 575 576 /* 577 * Flush data buffers. This can occur asynchronously and at any 578 * time. We must interlock against the frontend direct-data write 579 * but do not have to acquire the sync-lock yet. 580 */ 581 count = 0; 582 while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) { 583 if (io->ioerror) 584 break; 585 if (io->lock.refs == 0) 586 ++hammer_count_refedbufs; 587 hammer_ref(&io->lock); 588 hammer_io_write_interlock(io); 589 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME); 590 hammer_io_flush(io); 591 hammer_io_done_interlock(io); 592 hammer_rel_buffer((hammer_buffer_t)io, 0); 593 ++count; 594 } 595 596 /* 597 * The sync-lock is required for the remaining sequence. This lock 598 * prevents meta-data from being modified. 599 */ 600 hammer_sync_lock_ex(trans); 601 602 /* 603 * If we have been asked to finalize the volume header sync the 604 * cached blockmap to the on-disk blockmap. Generate an UNDO 605 * record for the update. 606 */ 607 if (final) { 608 cundomap = &hmp->blockmap[0]; 609 dundomap = &root_volume->ondisk->vol0_blockmap[0]; 610 if (root_volume->io.modified) { 611 hammer_modify_volume(trans, root_volume, 612 dundomap, sizeof(hmp->blockmap)); 613 for (i = 0; i < HAMMER_MAX_ZONES; ++i) 614 hammer_crc_set_blockmap(&cundomap[i]); 615 bcopy(cundomap, dundomap, sizeof(hmp->blockmap)); 616 hammer_modify_volume_done(root_volume); 617 } 618 } 619 620 /* 621 * Flush UNDOs 622 */ 623 count = 0; 624 while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) { 625 if (io->ioerror) 626 break; 627 KKASSERT(io->modify_refs == 0); 628 if (io->lock.refs == 0) 629 ++hammer_count_refedbufs; 630 hammer_ref(&io->lock); 631 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME); 632 hammer_io_flush(io); 633 hammer_rel_buffer((hammer_buffer_t)io, 0); 634 ++count; 635 } 636 637 /* 638 * Wait for I/Os to complete 639 */ 640 hammer_flusher_clean_loose_ios(hmp); 641 hammer_io_wait_all(hmp, "hmrfl1"); 642 643 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 644 goto failed; 645 646 /* 647 * Update the on-disk volume header with new UNDO FIFO end position 648 * (do not generate new UNDO records for this change). We have to 649 * do this for the UNDO FIFO whether (final) is set or not. 650 * 651 * Also update the on-disk next_tid field. This does not require 652 * an UNDO. However, because our TID is generated before we get 653 * the sync lock another sync may have beat us to the punch. 654 * 655 * This also has the side effect of updating first_offset based on 656 * a prior finalization when the first finalization of the next flush 657 * cycle occurs, removing any undo info from the prior finalization 658 * from consideration. 659 * 660 * The volume header will be flushed out synchronously. 661 */ 662 dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX]; 663 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX]; 664 665 if (dundomap->first_offset != cundomap->first_offset || 666 dundomap->next_offset != cundomap->next_offset) { 667 hammer_modify_volume(NULL, root_volume, NULL, 0); 668 dundomap->first_offset = cundomap->first_offset; 669 dundomap->next_offset = cundomap->next_offset; 670 hammer_crc_set_blockmap(dundomap); 671 hammer_modify_volume_done(root_volume); 672 } 673 674 /* 675 * vol0_next_tid is used for TID selection and is updated without 676 * an UNDO so we do not reuse a TID that may have been rolled-back. 677 * 678 * vol0_last_tid is the highest fully-synchronized TID. It is 679 * set-up when the UNDO fifo is fully synced, later on (not here). 680 */ 681 if (root_volume->io.modified) { 682 hammer_modify_volume(NULL, root_volume, NULL, 0); 683 if (root_volume->ondisk->vol0_next_tid < trans->tid) 684 root_volume->ondisk->vol0_next_tid = trans->tid; 685 hammer_crc_set_volume(root_volume->ondisk); 686 hammer_modify_volume_done(root_volume); 687 hammer_io_flush(&root_volume->io); 688 } 689 690 /* 691 * Wait for I/Os to complete 692 */ 693 hammer_flusher_clean_loose_ios(hmp); 694 hammer_io_wait_all(hmp, "hmrfl2"); 695 696 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 697 goto failed; 698 699 /* 700 * Flush meta-data. The meta-data will be undone if we crash 701 * so we can safely flush it asynchronously. 702 * 703 * Repeated catchups will wind up flushing this update's meta-data 704 * and the UNDO buffers for the next update simultaniously. This 705 * is ok. 706 */ 707 count = 0; 708 while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) { 709 if (io->ioerror) 710 break; 711 KKASSERT(io->modify_refs == 0); 712 if (io->lock.refs == 0) 713 ++hammer_count_refedbufs; 714 hammer_ref(&io->lock); 715 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME); 716 hammer_io_flush(io); 717 hammer_rel_buffer((hammer_buffer_t)io, 0); 718 ++count; 719 } 720 721 /* 722 * If this is the final finalization for the flush group set 723 * up for the next sequence by setting a new first_offset in 724 * our cached blockmap and clearing the undo history. 725 * 726 * Even though we have updated our cached first_offset, the on-disk 727 * first_offset still governs available-undo-space calculations. 728 */ 729 if (final) { 730 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX]; 731 if (cundomap->first_offset == cundomap->next_offset) { 732 hmp->hflags &= ~HMNT_UNDO_DIRTY; 733 } else { 734 cundomap->first_offset = cundomap->next_offset; 735 hmp->hflags |= HMNT_UNDO_DIRTY; 736 } 737 hammer_clear_undo_history(hmp); 738 739 /* 740 * Flush tid sequencing. flush_tid1 is fully synchronized, 741 * meaning a crash will not roll it back. flush_tid2 has 742 * been written out asynchronously and a crash will roll 743 * it back. flush_tid1 is used for all mirroring masters. 744 */ 745 if (hmp->flush_tid1 != hmp->flush_tid2) { 746 hmp->flush_tid1 = hmp->flush_tid2; 747 wakeup(&hmp->flush_tid1); 748 } 749 hmp->flush_tid2 = trans->tid; 750 } 751 752 /* 753 * Cleanup. Report any critical errors. 754 */ 755 failed: 756 hammer_sync_unlock(trans); 757 758 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) { 759 kprintf("HAMMER(%s): Critical write error during flush, " 760 "refusing to sync UNDO FIFO\n", 761 root_volume->ondisk->vol_name); 762 } 763 764 done: 765 hammer_unlock(&hmp->flusher.finalize_lock); 766 767 if (--hmp->flusher.finalize_want == 0) 768 wakeup(&hmp->flusher.finalize_want); 769 hammer_stats_commits += final; 770 } 771 772 /* 773 * Return non-zero if too many dirty meta-data buffers have built up. 774 * 775 * Since we cannot allow such buffers to flush until we have dealt with 776 * the UNDOs, we risk deadlocking the kernel's buffer cache. 777 */ 778 int 779 hammer_flusher_meta_limit(hammer_mount_t hmp) 780 { 781 if (hmp->locked_dirty_space + hmp->io_running_space > 782 hammer_limit_dirtybufspace) { 783 return(1); 784 } 785 return(0); 786 } 787 788 /* 789 * Return non-zero if too many dirty meta-data buffers have built up. 790 * 791 * This version is used by background operations (mirror, prune, reblock) 792 * to leave room for foreground operations. 793 */ 794 int 795 hammer_flusher_meta_halflimit(hammer_mount_t hmp) 796 { 797 if (hmp->locked_dirty_space + hmp->io_running_space > 798 hammer_limit_dirtybufspace / 2) { 799 return(1); 800 } 801 return(0); 802 } 803 804 /* 805 * Return non-zero if the flusher still has something to flush. 806 */ 807 int 808 hammer_flusher_haswork(hammer_mount_t hmp) 809 { 810 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) 811 return(0); 812 if (TAILQ_FIRST(&hmp->flush_group_list) || /* dirty inodes */ 813 TAILQ_FIRST(&hmp->volu_list) || /* dirty bufffers */ 814 TAILQ_FIRST(&hmp->undo_list) || 815 TAILQ_FIRST(&hmp->data_list) || 816 TAILQ_FIRST(&hmp->meta_list) || 817 (hmp->hflags & HMNT_UNDO_DIRTY) /* UNDO FIFO sync */ 818 ) { 819 return(1); 820 } 821 return(0); 822 } 823 824