1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "alloc_background.h" 5 #include "alloc_foreground.h" 6 #include "backpointers.h" 7 #include "bkey_buf.h" 8 #include "btree_gc.h" 9 #include "btree_update.h" 10 #include "btree_update_interior.h" 11 #include "btree_write_buffer.h" 12 #include "disk_groups.h" 13 #include "ec.h" 14 #include "errcode.h" 15 #include "error.h" 16 #include "inode.h" 17 #include "io_read.h" 18 #include "io_write.h" 19 #include "journal_reclaim.h" 20 #include "keylist.h" 21 #include "move.h" 22 #include "replicas.h" 23 #include "snapshot.h" 24 #include "super-io.h" 25 #include "trace.h" 26 27 #include <linux/ioprio.h> 28 #include <linux/kthread.h> 29 30 static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k) 31 { 32 if (trace_move_extent_enabled()) { 33 struct printbuf buf = PRINTBUF; 34 35 bch2_bkey_val_to_text(&buf, c, k); 36 trace_move_extent(c, buf.buf); 37 printbuf_exit(&buf); 38 } 39 } 40 41 static void trace_move_extent_read2(struct bch_fs *c, struct bkey_s_c k) 42 { 43 if (trace_move_extent_read_enabled()) { 44 struct printbuf buf = PRINTBUF; 45 46 bch2_bkey_val_to_text(&buf, c, k); 47 trace_move_extent_read(c, buf.buf); 48 printbuf_exit(&buf); 49 } 50 } 51 52 struct moving_io { 53 struct list_head read_list; 54 struct list_head io_list; 55 struct move_bucket_in_flight *b; 56 struct closure cl; 57 bool read_completed; 58 59 unsigned read_sectors; 60 unsigned write_sectors; 61 62 struct bch_read_bio rbio; 63 64 struct data_update write; 65 /* Must be last since it is variable size */ 66 struct bio_vec bi_inline_vecs[0]; 67 }; 68 69 static void move_free(struct moving_io *io) 70 { 71 struct moving_context *ctxt = io->write.ctxt; 72 73 if (io->b) 74 atomic_dec(&io->b->count); 75 76 bch2_data_update_exit(&io->write); 77 78 mutex_lock(&ctxt->lock); 79 list_del(&io->io_list); 80 wake_up(&ctxt->wait); 81 mutex_unlock(&ctxt->lock); 82 83 kfree(io); 84 } 85 86 static void move_write_done(struct bch_write_op *op) 87 { 88 struct moving_io *io = container_of(op, struct moving_io, write.op); 89 struct moving_context *ctxt = io->write.ctxt; 90 91 if (io->write.op.error) 92 ctxt->write_error = true; 93 94 atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors); 95 atomic_dec(&io->write.ctxt->write_ios); 96 move_free(io); 97 closure_put(&ctxt->cl); 98 } 99 100 static void move_write(struct moving_io *io) 101 { 102 if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) { 103 move_free(io); 104 return; 105 } 106 107 closure_get(&io->write.ctxt->cl); 108 atomic_add(io->write_sectors, &io->write.ctxt->write_sectors); 109 atomic_inc(&io->write.ctxt->write_ios); 110 111 bch2_data_update_read_done(&io->write, io->rbio.pick.crc); 112 } 113 114 struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *ctxt) 115 { 116 struct moving_io *io = 117 list_first_entry_or_null(&ctxt->reads, struct moving_io, read_list); 118 119 return io && io->read_completed ? io : NULL; 120 } 121 122 static void move_read_endio(struct bio *bio) 123 { 124 struct moving_io *io = container_of(bio, struct moving_io, rbio.bio); 125 struct moving_context *ctxt = io->write.ctxt; 126 127 atomic_sub(io->read_sectors, &ctxt->read_sectors); 128 atomic_dec(&ctxt->read_ios); 129 io->read_completed = true; 130 131 wake_up(&ctxt->wait); 132 closure_put(&ctxt->cl); 133 } 134 135 void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt) 136 { 137 struct moving_io *io; 138 139 while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) { 140 bch2_trans_unlock_long(ctxt->trans); 141 list_del(&io->read_list); 142 move_write(io); 143 } 144 } 145 146 void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt) 147 { 148 unsigned sectors_pending = atomic_read(&ctxt->write_sectors); 149 150 move_ctxt_wait_event(ctxt, 151 !atomic_read(&ctxt->write_sectors) || 152 atomic_read(&ctxt->write_sectors) != sectors_pending); 153 } 154 155 static void bch2_moving_ctxt_flush_all(struct moving_context *ctxt) 156 { 157 move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads)); 158 bch2_trans_unlock_long(ctxt->trans); 159 closure_sync(&ctxt->cl); 160 } 161 162 void bch2_moving_ctxt_exit(struct moving_context *ctxt) 163 { 164 struct bch_fs *c = ctxt->trans->c; 165 166 bch2_moving_ctxt_flush_all(ctxt); 167 168 EBUG_ON(atomic_read(&ctxt->write_sectors)); 169 EBUG_ON(atomic_read(&ctxt->write_ios)); 170 EBUG_ON(atomic_read(&ctxt->read_sectors)); 171 EBUG_ON(atomic_read(&ctxt->read_ios)); 172 173 mutex_lock(&c->moving_context_lock); 174 list_del(&ctxt->list); 175 mutex_unlock(&c->moving_context_lock); 176 177 bch2_trans_put(ctxt->trans); 178 memset(ctxt, 0, sizeof(*ctxt)); 179 } 180 181 void bch2_moving_ctxt_init(struct moving_context *ctxt, 182 struct bch_fs *c, 183 struct bch_ratelimit *rate, 184 struct bch_move_stats *stats, 185 struct write_point_specifier wp, 186 bool wait_on_copygc) 187 { 188 memset(ctxt, 0, sizeof(*ctxt)); 189 190 ctxt->trans = bch2_trans_get(c); 191 ctxt->fn = (void *) _RET_IP_; 192 ctxt->rate = rate; 193 ctxt->stats = stats; 194 ctxt->wp = wp; 195 ctxt->wait_on_copygc = wait_on_copygc; 196 197 closure_init_stack(&ctxt->cl); 198 199 mutex_init(&ctxt->lock); 200 INIT_LIST_HEAD(&ctxt->reads); 201 INIT_LIST_HEAD(&ctxt->ios); 202 init_waitqueue_head(&ctxt->wait); 203 204 mutex_lock(&c->moving_context_lock); 205 list_add(&ctxt->list, &c->moving_context_list); 206 mutex_unlock(&c->moving_context_lock); 207 } 208 209 void bch2_move_stats_exit(struct bch_move_stats *stats, struct bch_fs *c) 210 { 211 trace_move_data(c, stats); 212 } 213 214 void bch2_move_stats_init(struct bch_move_stats *stats, char *name) 215 { 216 memset(stats, 0, sizeof(*stats)); 217 stats->data_type = BCH_DATA_user; 218 scnprintf(stats->name, sizeof(stats->name), "%s", name); 219 } 220 221 int bch2_move_extent(struct moving_context *ctxt, 222 struct move_bucket_in_flight *bucket_in_flight, 223 struct btree_iter *iter, 224 struct bkey_s_c k, 225 struct bch_io_opts io_opts, 226 struct data_update_opts data_opts) 227 { 228 struct btree_trans *trans = ctxt->trans; 229 struct bch_fs *c = trans->c; 230 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 231 struct moving_io *io; 232 const union bch_extent_entry *entry; 233 struct extent_ptr_decoded p; 234 unsigned sectors = k.k->size, pages; 235 int ret = -ENOMEM; 236 237 if (ctxt->stats) 238 ctxt->stats->pos = BBPOS(iter->btree_id, iter->pos); 239 trace_move_extent2(c, k); 240 241 bch2_data_update_opts_normalize(k, &data_opts); 242 243 if (!data_opts.rewrite_ptrs && 244 !data_opts.extra_replicas) { 245 if (data_opts.kill_ptrs) 246 return bch2_extent_drop_ptrs(trans, iter, k, data_opts); 247 return 0; 248 } 249 250 /* 251 * Before memory allocations & taking nocow locks in 252 * bch2_data_update_init(): 253 */ 254 bch2_trans_unlock(trans); 255 256 /* write path might have to decompress data: */ 257 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) 258 sectors = max_t(unsigned, sectors, p.crc.uncompressed_size); 259 260 pages = DIV_ROUND_UP(sectors, PAGE_SECTORS); 261 io = kzalloc(sizeof(struct moving_io) + 262 sizeof(struct bio_vec) * pages, GFP_KERNEL); 263 if (!io) 264 goto err; 265 266 INIT_LIST_HEAD(&io->io_list); 267 io->write.ctxt = ctxt; 268 io->read_sectors = k.k->size; 269 io->write_sectors = k.k->size; 270 271 bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0); 272 bio_set_prio(&io->write.op.wbio.bio, 273 IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 274 275 if (bch2_bio_alloc_pages(&io->write.op.wbio.bio, sectors << 9, 276 GFP_KERNEL)) 277 goto err_free; 278 279 io->rbio.c = c; 280 io->rbio.opts = io_opts; 281 bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0); 282 io->rbio.bio.bi_vcnt = pages; 283 bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 284 io->rbio.bio.bi_iter.bi_size = sectors << 9; 285 286 io->rbio.bio.bi_opf = REQ_OP_READ; 287 io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k); 288 io->rbio.bio.bi_end_io = move_read_endio; 289 290 ret = bch2_data_update_init(trans, iter, ctxt, &io->write, ctxt->wp, 291 io_opts, data_opts, iter->btree_id, k); 292 if (ret) 293 goto err_free_pages; 294 295 io->write.op.end_io = move_write_done; 296 297 if (ctxt->rate) 298 bch2_ratelimit_increment(ctxt->rate, k.k->size); 299 300 if (ctxt->stats) { 301 atomic64_inc(&ctxt->stats->keys_moved); 302 atomic64_add(k.k->size, &ctxt->stats->sectors_moved); 303 } 304 305 if (bucket_in_flight) { 306 io->b = bucket_in_flight; 307 atomic_inc(&io->b->count); 308 } 309 310 this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size); 311 this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size); 312 trace_move_extent_read2(c, k); 313 314 mutex_lock(&ctxt->lock); 315 atomic_add(io->read_sectors, &ctxt->read_sectors); 316 atomic_inc(&ctxt->read_ios); 317 318 list_add_tail(&io->read_list, &ctxt->reads); 319 list_add_tail(&io->io_list, &ctxt->ios); 320 mutex_unlock(&ctxt->lock); 321 322 /* 323 * dropped by move_read_endio() - guards against use after free of 324 * ctxt when doing wakeup 325 */ 326 closure_get(&ctxt->cl); 327 bch2_read_extent(trans, &io->rbio, 328 bkey_start_pos(k.k), 329 iter->btree_id, k, 0, 330 BCH_READ_NODECODE| 331 BCH_READ_LAST_FRAGMENT); 332 return 0; 333 err_free_pages: 334 bio_free_pages(&io->write.op.wbio.bio); 335 err_free: 336 kfree(io); 337 err: 338 if (ret == -BCH_ERR_data_update_done) 339 return 0; 340 341 if (bch2_err_matches(ret, EROFS) || 342 bch2_err_matches(ret, BCH_ERR_transaction_restart)) 343 return ret; 344 345 this_cpu_inc(c->counters[BCH_COUNTER_move_extent_start_fail]); 346 if (trace_move_extent_start_fail_enabled()) { 347 struct printbuf buf = PRINTBUF; 348 349 bch2_bkey_val_to_text(&buf, c, k); 350 prt_str(&buf, ": "); 351 prt_str(&buf, bch2_err_str(ret)); 352 trace_move_extent_start_fail(c, buf.buf); 353 printbuf_exit(&buf); 354 } 355 return ret; 356 } 357 358 struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans, 359 struct per_snapshot_io_opts *io_opts, 360 struct bkey_s_c extent_k) 361 { 362 struct bch_fs *c = trans->c; 363 u32 restart_count = trans->restart_count; 364 int ret = 0; 365 366 if (io_opts->cur_inum != extent_k.k->p.inode) { 367 struct btree_iter iter; 368 struct bkey_s_c k; 369 370 io_opts->d.nr = 0; 371 372 for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, extent_k.k->p.inode), 373 BTREE_ITER_ALL_SNAPSHOTS, k, ret) { 374 if (k.k->p.offset != extent_k.k->p.inode) 375 break; 376 377 if (!bkey_is_inode(k.k)) 378 continue; 379 380 struct bch_inode_unpacked inode; 381 BUG_ON(bch2_inode_unpack(k, &inode)); 382 383 struct snapshot_io_opts_entry e = { .snapshot = k.k->p.snapshot }; 384 bch2_inode_opts_get(&e.io_opts, trans->c, &inode); 385 386 ret = darray_push(&io_opts->d, e); 387 if (ret) 388 break; 389 } 390 bch2_trans_iter_exit(trans, &iter); 391 io_opts->cur_inum = extent_k.k->p.inode; 392 } 393 394 ret = ret ?: trans_was_restarted(trans, restart_count); 395 if (ret) 396 return ERR_PTR(ret); 397 398 if (extent_k.k->p.snapshot) { 399 struct snapshot_io_opts_entry *i; 400 darray_for_each(io_opts->d, i) 401 if (bch2_snapshot_is_ancestor(c, extent_k.k->p.snapshot, i->snapshot)) 402 return &i->io_opts; 403 } 404 405 return &io_opts->fs_io_opts; 406 } 407 408 int bch2_move_get_io_opts_one(struct btree_trans *trans, 409 struct bch_io_opts *io_opts, 410 struct bkey_s_c extent_k) 411 { 412 struct btree_iter iter; 413 struct bkey_s_c k; 414 int ret; 415 416 /* reflink btree? */ 417 if (!extent_k.k->p.inode) { 418 *io_opts = bch2_opts_to_inode_opts(trans->c->opts); 419 return 0; 420 } 421 422 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, 423 SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot), 424 BTREE_ITER_CACHED); 425 ret = bkey_err(k); 426 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 427 return ret; 428 429 if (!ret && bkey_is_inode(k.k)) { 430 struct bch_inode_unpacked inode; 431 bch2_inode_unpack(k, &inode); 432 bch2_inode_opts_get(io_opts, trans->c, &inode); 433 } else { 434 *io_opts = bch2_opts_to_inode_opts(trans->c->opts); 435 } 436 437 bch2_trans_iter_exit(trans, &iter); 438 return 0; 439 } 440 441 int bch2_move_ratelimit(struct moving_context *ctxt) 442 { 443 struct bch_fs *c = ctxt->trans->c; 444 bool is_kthread = current->flags & PF_KTHREAD; 445 u64 delay; 446 447 if (ctxt->wait_on_copygc && c->copygc_running) { 448 bch2_moving_ctxt_flush_all(ctxt); 449 wait_event_killable(c->copygc_running_wq, 450 !c->copygc_running || 451 (is_kthread && kthread_should_stop())); 452 } 453 454 do { 455 delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0; 456 457 if (is_kthread && kthread_should_stop()) 458 return 1; 459 460 if (delay) 461 move_ctxt_wait_event_timeout(ctxt, 462 freezing(current) || 463 (is_kthread && kthread_should_stop()), 464 delay); 465 466 if (unlikely(freezing(current))) { 467 bch2_moving_ctxt_flush_all(ctxt); 468 try_to_freeze(); 469 } 470 } while (delay); 471 472 /* 473 * XXX: these limits really ought to be per device, SSDs and hard drives 474 * will want different limits 475 */ 476 move_ctxt_wait_event(ctxt, 477 atomic_read(&ctxt->write_sectors) < c->opts.move_bytes_in_flight >> 9 && 478 atomic_read(&ctxt->read_sectors) < c->opts.move_bytes_in_flight >> 9 && 479 atomic_read(&ctxt->write_ios) < c->opts.move_ios_in_flight && 480 atomic_read(&ctxt->read_ios) < c->opts.move_ios_in_flight); 481 482 return 0; 483 } 484 485 static int bch2_move_data_btree(struct moving_context *ctxt, 486 struct bpos start, 487 struct bpos end, 488 move_pred_fn pred, void *arg, 489 enum btree_id btree_id) 490 { 491 struct btree_trans *trans = ctxt->trans; 492 struct bch_fs *c = trans->c; 493 struct per_snapshot_io_opts snapshot_io_opts; 494 struct bch_io_opts *io_opts; 495 struct bkey_buf sk; 496 struct btree_iter iter; 497 struct bkey_s_c k; 498 struct data_update_opts data_opts; 499 int ret = 0, ret2; 500 501 per_snapshot_io_opts_init(&snapshot_io_opts, c); 502 bch2_bkey_buf_init(&sk); 503 504 if (ctxt->stats) { 505 ctxt->stats->data_type = BCH_DATA_user; 506 ctxt->stats->pos = BBPOS(btree_id, start); 507 } 508 509 bch2_trans_iter_init(trans, &iter, btree_id, start, 510 BTREE_ITER_PREFETCH| 511 BTREE_ITER_ALL_SNAPSHOTS); 512 513 if (ctxt->rate) 514 bch2_ratelimit_reset(ctxt->rate); 515 516 while (!bch2_move_ratelimit(ctxt)) { 517 bch2_trans_begin(trans); 518 519 k = bch2_btree_iter_peek(&iter); 520 if (!k.k) 521 break; 522 523 ret = bkey_err(k); 524 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 525 continue; 526 if (ret) 527 break; 528 529 if (bkey_ge(bkey_start_pos(k.k), end)) 530 break; 531 532 if (ctxt->stats) 533 ctxt->stats->pos = BBPOS(iter.btree_id, iter.pos); 534 535 if (!bkey_extent_is_direct_data(k.k)) 536 goto next_nondata; 537 538 io_opts = bch2_move_get_io_opts(trans, &snapshot_io_opts, k); 539 ret = PTR_ERR_OR_ZERO(io_opts); 540 if (ret) 541 continue; 542 543 memset(&data_opts, 0, sizeof(data_opts)); 544 if (!pred(c, arg, k, io_opts, &data_opts)) 545 goto next; 546 547 /* 548 * The iterator gets unlocked by __bch2_read_extent - need to 549 * save a copy of @k elsewhere: 550 */ 551 bch2_bkey_buf_reassemble(&sk, c, k); 552 k = bkey_i_to_s_c(sk.k); 553 554 ret2 = bch2_move_extent(ctxt, NULL, &iter, k, *io_opts, data_opts); 555 if (ret2) { 556 if (bch2_err_matches(ret2, BCH_ERR_transaction_restart)) 557 continue; 558 559 if (ret2 == -ENOMEM) { 560 /* memory allocation failure, wait for some IO to finish */ 561 bch2_move_ctxt_wait_for_io(ctxt); 562 continue; 563 } 564 565 /* XXX signal failure */ 566 goto next; 567 } 568 next: 569 if (ctxt->stats) 570 atomic64_add(k.k->size, &ctxt->stats->sectors_seen); 571 next_nondata: 572 bch2_btree_iter_advance(&iter); 573 } 574 575 bch2_trans_iter_exit(trans, &iter); 576 bch2_bkey_buf_exit(&sk, c); 577 per_snapshot_io_opts_exit(&snapshot_io_opts); 578 579 return ret; 580 } 581 582 int __bch2_move_data(struct moving_context *ctxt, 583 struct bbpos start, 584 struct bbpos end, 585 move_pred_fn pred, void *arg) 586 { 587 struct bch_fs *c = ctxt->trans->c; 588 enum btree_id id; 589 int ret = 0; 590 591 for (id = start.btree; 592 id <= min_t(unsigned, end.btree, btree_id_nr_alive(c) - 1); 593 id++) { 594 ctxt->stats->pos = BBPOS(id, POS_MIN); 595 596 if (!btree_type_has_ptrs(id) || 597 !bch2_btree_id_root(c, id)->b) 598 continue; 599 600 ret = bch2_move_data_btree(ctxt, 601 id == start.btree ? start.pos : POS_MIN, 602 id == end.btree ? end.pos : POS_MAX, 603 pred, arg, id); 604 if (ret) 605 break; 606 } 607 608 return ret; 609 } 610 611 int bch2_move_data(struct bch_fs *c, 612 struct bbpos start, 613 struct bbpos end, 614 struct bch_ratelimit *rate, 615 struct bch_move_stats *stats, 616 struct write_point_specifier wp, 617 bool wait_on_copygc, 618 move_pred_fn pred, void *arg) 619 { 620 621 struct moving_context ctxt; 622 int ret; 623 624 bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); 625 ret = __bch2_move_data(&ctxt, start, end, pred, arg); 626 bch2_moving_ctxt_exit(&ctxt); 627 628 return ret; 629 } 630 631 int __bch2_evacuate_bucket(struct moving_context *ctxt, 632 struct move_bucket_in_flight *bucket_in_flight, 633 struct bpos bucket, int gen, 634 struct data_update_opts _data_opts) 635 { 636 struct btree_trans *trans = ctxt->trans; 637 struct bch_fs *c = trans->c; 638 bool is_kthread = current->flags & PF_KTHREAD; 639 struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); 640 struct btree_iter iter; 641 struct bkey_buf sk; 642 struct bch_backpointer bp; 643 struct bch_alloc_v4 a_convert; 644 const struct bch_alloc_v4 *a; 645 struct bkey_s_c k; 646 struct data_update_opts data_opts; 647 unsigned dirty_sectors, bucket_size; 648 u64 fragmentation; 649 struct bpos bp_pos = POS_MIN; 650 int ret = 0; 651 652 trace_bucket_evacuate(c, &bucket); 653 654 bch2_bkey_buf_init(&sk); 655 656 /* 657 * We're not run in a context that handles transaction restarts: 658 */ 659 bch2_trans_begin(trans); 660 661 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, 662 bucket, BTREE_ITER_CACHED); 663 ret = lockrestart_do(trans, 664 bkey_err(k = bch2_btree_iter_peek_slot(&iter))); 665 bch2_trans_iter_exit(trans, &iter); 666 667 if (ret) { 668 bch_err_msg(c, ret, "looking up alloc key"); 669 goto err; 670 } 671 672 a = bch2_alloc_to_v4(k, &a_convert); 673 dirty_sectors = a->dirty_sectors; 674 bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size; 675 fragmentation = a->fragmentation_lru; 676 677 ret = bch2_btree_write_buffer_flush(trans); 678 if (ret) { 679 bch_err_msg(c, ret, "flushing btree write buffer"); 680 goto err; 681 } 682 683 while (!(ret = bch2_move_ratelimit(ctxt))) { 684 if (is_kthread && kthread_should_stop()) 685 break; 686 687 bch2_trans_begin(trans); 688 689 ret = bch2_get_next_backpointer(trans, bucket, gen, 690 &bp_pos, &bp, 691 BTREE_ITER_CACHED); 692 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 693 continue; 694 if (ret) 695 goto err; 696 if (bkey_eq(bp_pos, POS_MAX)) 697 break; 698 699 if (!bp.level) { 700 const struct bch_extent_ptr *ptr; 701 unsigned i = 0; 702 703 k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0); 704 ret = bkey_err(k); 705 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 706 continue; 707 if (ret) 708 goto err; 709 if (!k.k) 710 goto next; 711 712 bch2_bkey_buf_reassemble(&sk, c, k); 713 k = bkey_i_to_s_c(sk.k); 714 715 ret = bch2_move_get_io_opts_one(trans, &io_opts, k); 716 if (ret) { 717 bch2_trans_iter_exit(trans, &iter); 718 continue; 719 } 720 721 data_opts = _data_opts; 722 data_opts.target = io_opts.background_target; 723 data_opts.rewrite_ptrs = 0; 724 725 bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) { 726 if (ptr->dev == bucket.inode) { 727 data_opts.rewrite_ptrs |= 1U << i; 728 if (ptr->cached) { 729 bch2_trans_iter_exit(trans, &iter); 730 goto next; 731 } 732 } 733 i++; 734 } 735 736 ret = bch2_move_extent(ctxt, bucket_in_flight, 737 &iter, k, io_opts, data_opts); 738 bch2_trans_iter_exit(trans, &iter); 739 740 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 741 continue; 742 if (ret == -ENOMEM) { 743 /* memory allocation failure, wait for some IO to finish */ 744 bch2_move_ctxt_wait_for_io(ctxt); 745 continue; 746 } 747 if (ret) 748 goto err; 749 750 if (ctxt->stats) 751 atomic64_add(k.k->size, &ctxt->stats->sectors_seen); 752 } else { 753 struct btree *b; 754 755 b = bch2_backpointer_get_node(trans, &iter, bp_pos, bp); 756 ret = PTR_ERR_OR_ZERO(b); 757 if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) 758 continue; 759 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 760 continue; 761 if (ret) 762 goto err; 763 if (!b) 764 goto next; 765 766 ret = bch2_btree_node_rewrite(trans, &iter, b, 0); 767 bch2_trans_iter_exit(trans, &iter); 768 769 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 770 continue; 771 if (ret) 772 goto err; 773 774 if (ctxt->rate) 775 bch2_ratelimit_increment(ctxt->rate, 776 c->opts.btree_node_size >> 9); 777 if (ctxt->stats) { 778 atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen); 779 atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved); 780 } 781 } 782 next: 783 bp_pos = bpos_nosnap_successor(bp_pos); 784 } 785 786 trace_evacuate_bucket(c, &bucket, dirty_sectors, bucket_size, fragmentation, ret); 787 err: 788 bch2_bkey_buf_exit(&sk, c); 789 return ret; 790 } 791 792 int bch2_evacuate_bucket(struct bch_fs *c, 793 struct bpos bucket, int gen, 794 struct data_update_opts data_opts, 795 struct bch_ratelimit *rate, 796 struct bch_move_stats *stats, 797 struct write_point_specifier wp, 798 bool wait_on_copygc) 799 { 800 struct moving_context ctxt; 801 int ret; 802 803 bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); 804 ret = __bch2_evacuate_bucket(&ctxt, NULL, bucket, gen, data_opts); 805 bch2_moving_ctxt_exit(&ctxt); 806 807 return ret; 808 } 809 810 typedef bool (*move_btree_pred)(struct bch_fs *, void *, 811 struct btree *, struct bch_io_opts *, 812 struct data_update_opts *); 813 814 static int bch2_move_btree(struct bch_fs *c, 815 enum btree_id start_btree_id, struct bpos start_pos, 816 enum btree_id end_btree_id, struct bpos end_pos, 817 move_btree_pred pred, void *arg, 818 struct bch_move_stats *stats) 819 { 820 bool kthread = (current->flags & PF_KTHREAD) != 0; 821 struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); 822 struct moving_context ctxt; 823 struct btree_trans *trans; 824 struct btree_iter iter; 825 struct btree *b; 826 enum btree_id id; 827 struct data_update_opts data_opts; 828 int ret = 0; 829 830 bch2_moving_ctxt_init(&ctxt, c, NULL, stats, 831 writepoint_ptr(&c->btree_write_point), 832 true); 833 trans = ctxt.trans; 834 835 stats->data_type = BCH_DATA_btree; 836 837 for (id = start_btree_id; 838 id <= min_t(unsigned, end_btree_id, btree_id_nr_alive(c) - 1); 839 id++) { 840 stats->pos = BBPOS(id, POS_MIN); 841 842 if (!bch2_btree_id_root(c, id)->b) 843 continue; 844 845 bch2_trans_node_iter_init(trans, &iter, id, POS_MIN, 0, 0, 846 BTREE_ITER_PREFETCH); 847 retry: 848 ret = 0; 849 while (bch2_trans_begin(trans), 850 (b = bch2_btree_iter_peek_node(&iter)) && 851 !(ret = PTR_ERR_OR_ZERO(b))) { 852 if (kthread && kthread_should_stop()) 853 break; 854 855 if ((cmp_int(id, end_btree_id) ?: 856 bpos_cmp(b->key.k.p, end_pos)) > 0) 857 break; 858 859 stats->pos = BBPOS(iter.btree_id, iter.pos); 860 861 if (!pred(c, arg, b, &io_opts, &data_opts)) 862 goto next; 863 864 ret = bch2_btree_node_rewrite(trans, &iter, b, 0) ?: ret; 865 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 866 continue; 867 if (ret) 868 break; 869 next: 870 bch2_btree_iter_next_node(&iter); 871 } 872 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 873 goto retry; 874 875 bch2_trans_iter_exit(trans, &iter); 876 877 if (kthread && kthread_should_stop()) 878 break; 879 } 880 881 bch_err_fn(c, ret); 882 bch2_moving_ctxt_exit(&ctxt); 883 bch2_btree_interior_updates_flush(c); 884 885 return ret; 886 } 887 888 static bool rereplicate_pred(struct bch_fs *c, void *arg, 889 struct bkey_s_c k, 890 struct bch_io_opts *io_opts, 891 struct data_update_opts *data_opts) 892 { 893 unsigned nr_good = bch2_bkey_durability(c, k); 894 unsigned replicas = bkey_is_btree_ptr(k.k) 895 ? c->opts.metadata_replicas 896 : io_opts->data_replicas; 897 898 if (!nr_good || nr_good >= replicas) 899 return false; 900 901 data_opts->target = 0; 902 data_opts->extra_replicas = replicas - nr_good; 903 data_opts->btree_insert_flags = 0; 904 return true; 905 } 906 907 static bool migrate_pred(struct bch_fs *c, void *arg, 908 struct bkey_s_c k, 909 struct bch_io_opts *io_opts, 910 struct data_update_opts *data_opts) 911 { 912 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 913 const struct bch_extent_ptr *ptr; 914 struct bch_ioctl_data *op = arg; 915 unsigned i = 0; 916 917 data_opts->rewrite_ptrs = 0; 918 data_opts->target = 0; 919 data_opts->extra_replicas = 0; 920 data_opts->btree_insert_flags = 0; 921 922 bkey_for_each_ptr(ptrs, ptr) { 923 if (ptr->dev == op->migrate.dev) 924 data_opts->rewrite_ptrs |= 1U << i; 925 i++; 926 } 927 928 return data_opts->rewrite_ptrs != 0; 929 } 930 931 static bool rereplicate_btree_pred(struct bch_fs *c, void *arg, 932 struct btree *b, 933 struct bch_io_opts *io_opts, 934 struct data_update_opts *data_opts) 935 { 936 return rereplicate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts); 937 } 938 939 static bool migrate_btree_pred(struct bch_fs *c, void *arg, 940 struct btree *b, 941 struct bch_io_opts *io_opts, 942 struct data_update_opts *data_opts) 943 { 944 return migrate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts); 945 } 946 947 static bool bformat_needs_redo(struct bkey_format *f) 948 { 949 unsigned i; 950 951 for (i = 0; i < f->nr_fields; i++) { 952 unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i]; 953 u64 unpacked_mask = ~((~0ULL << 1) << (unpacked_bits - 1)); 954 u64 field_offset = le64_to_cpu(f->field_offset[i]); 955 956 if (f->bits_per_field[i] > unpacked_bits) 957 return true; 958 959 if ((f->bits_per_field[i] == unpacked_bits) && field_offset) 960 return true; 961 962 if (((field_offset + ((1ULL << f->bits_per_field[i]) - 1)) & 963 unpacked_mask) < 964 field_offset) 965 return true; 966 } 967 968 return false; 969 } 970 971 static bool rewrite_old_nodes_pred(struct bch_fs *c, void *arg, 972 struct btree *b, 973 struct bch_io_opts *io_opts, 974 struct data_update_opts *data_opts) 975 { 976 if (b->version_ondisk != c->sb.version || 977 btree_node_need_rewrite(b) || 978 bformat_needs_redo(&b->format)) { 979 data_opts->target = 0; 980 data_opts->extra_replicas = 0; 981 data_opts->btree_insert_flags = 0; 982 return true; 983 } 984 985 return false; 986 } 987 988 int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats) 989 { 990 int ret; 991 992 ret = bch2_move_btree(c, 993 0, POS_MIN, 994 BTREE_ID_NR, SPOS_MAX, 995 rewrite_old_nodes_pred, c, stats); 996 if (!ret) { 997 mutex_lock(&c->sb_lock); 998 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done); 999 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done); 1000 c->disk_sb.sb->version_min = c->disk_sb.sb->version; 1001 bch2_write_super(c); 1002 mutex_unlock(&c->sb_lock); 1003 } 1004 1005 bch_err_fn(c, ret); 1006 return ret; 1007 } 1008 1009 int bch2_data_job(struct bch_fs *c, 1010 struct bch_move_stats *stats, 1011 struct bch_ioctl_data op) 1012 { 1013 int ret = 0; 1014 1015 switch (op.op) { 1016 case BCH_DATA_OP_REREPLICATE: 1017 bch2_move_stats_init(stats, "rereplicate"); 1018 stats->data_type = BCH_DATA_journal; 1019 ret = bch2_journal_flush_device_pins(&c->journal, -1); 1020 1021 ret = bch2_move_btree(c, 1022 op.start_btree, op.start_pos, 1023 op.end_btree, op.end_pos, 1024 rereplicate_btree_pred, c, stats) ?: ret; 1025 ret = bch2_replicas_gc2(c) ?: ret; 1026 1027 ret = bch2_move_data(c, 1028 (struct bbpos) { op.start_btree, op.start_pos }, 1029 (struct bbpos) { op.end_btree, op.end_pos }, 1030 NULL, 1031 stats, 1032 writepoint_hashed((unsigned long) current), 1033 true, 1034 rereplicate_pred, c) ?: ret; 1035 ret = bch2_replicas_gc2(c) ?: ret; 1036 1037 bch2_move_stats_exit(stats, c); 1038 break; 1039 case BCH_DATA_OP_MIGRATE: 1040 if (op.migrate.dev >= c->sb.nr_devices) 1041 return -EINVAL; 1042 1043 bch2_move_stats_init(stats, "migrate"); 1044 stats->data_type = BCH_DATA_journal; 1045 ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev); 1046 1047 ret = bch2_move_btree(c, 1048 op.start_btree, op.start_pos, 1049 op.end_btree, op.end_pos, 1050 migrate_btree_pred, &op, stats) ?: ret; 1051 ret = bch2_replicas_gc2(c) ?: ret; 1052 1053 ret = bch2_move_data(c, 1054 (struct bbpos) { op.start_btree, op.start_pos }, 1055 (struct bbpos) { op.end_btree, op.end_pos }, 1056 NULL, 1057 stats, 1058 writepoint_hashed((unsigned long) current), 1059 true, 1060 migrate_pred, &op) ?: ret; 1061 ret = bch2_replicas_gc2(c) ?: ret; 1062 1063 bch2_move_stats_exit(stats, c); 1064 break; 1065 case BCH_DATA_OP_REWRITE_OLD_NODES: 1066 bch2_move_stats_init(stats, "rewrite_old_nodes"); 1067 ret = bch2_scan_old_btree_nodes(c, stats); 1068 bch2_move_stats_exit(stats, c); 1069 break; 1070 default: 1071 ret = -EINVAL; 1072 } 1073 1074 return ret; 1075 } 1076 1077 void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats) 1078 { 1079 prt_printf(out, "%s: data type=%s pos=", 1080 stats->name, 1081 bch2_data_types[stats->data_type]); 1082 bch2_bbpos_to_text(out, stats->pos); 1083 prt_newline(out); 1084 printbuf_indent_add(out, 2); 1085 1086 prt_str(out, "keys moved: "); 1087 prt_u64(out, atomic64_read(&stats->keys_moved)); 1088 prt_newline(out); 1089 1090 prt_str(out, "keys raced: "); 1091 prt_u64(out, atomic64_read(&stats->keys_raced)); 1092 prt_newline(out); 1093 1094 prt_str(out, "bytes seen: "); 1095 prt_human_readable_u64(out, atomic64_read(&stats->sectors_seen) << 9); 1096 prt_newline(out); 1097 1098 prt_str(out, "bytes moved: "); 1099 prt_human_readable_u64(out, atomic64_read(&stats->sectors_moved) << 9); 1100 prt_newline(out); 1101 1102 prt_str(out, "bytes raced: "); 1103 prt_human_readable_u64(out, atomic64_read(&stats->sectors_raced) << 9); 1104 prt_newline(out); 1105 1106 printbuf_indent_sub(out, 2); 1107 } 1108 1109 static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt) 1110 { 1111 struct moving_io *io; 1112 1113 bch2_move_stats_to_text(out, ctxt->stats); 1114 printbuf_indent_add(out, 2); 1115 1116 prt_printf(out, "reads: ios %u/%u sectors %u/%u", 1117 atomic_read(&ctxt->read_ios), 1118 c->opts.move_ios_in_flight, 1119 atomic_read(&ctxt->read_sectors), 1120 c->opts.move_bytes_in_flight >> 9); 1121 prt_newline(out); 1122 1123 prt_printf(out, "writes: ios %u/%u sectors %u/%u", 1124 atomic_read(&ctxt->write_ios), 1125 c->opts.move_ios_in_flight, 1126 atomic_read(&ctxt->write_sectors), 1127 c->opts.move_bytes_in_flight >> 9); 1128 prt_newline(out); 1129 1130 printbuf_indent_add(out, 2); 1131 1132 mutex_lock(&ctxt->lock); 1133 list_for_each_entry(io, &ctxt->ios, io_list) 1134 bch2_write_op_to_text(out, &io->write.op); 1135 mutex_unlock(&ctxt->lock); 1136 1137 printbuf_indent_sub(out, 4); 1138 } 1139 1140 void bch2_fs_moving_ctxts_to_text(struct printbuf *out, struct bch_fs *c) 1141 { 1142 struct moving_context *ctxt; 1143 1144 mutex_lock(&c->moving_context_lock); 1145 list_for_each_entry(ctxt, &c->moving_context_list, list) 1146 bch2_moving_ctxt_to_text(out, c, ctxt); 1147 mutex_unlock(&c->moving_context_lock); 1148 } 1149 1150 void bch2_fs_move_init(struct bch_fs *c) 1151 { 1152 INIT_LIST_HEAD(&c->moving_context_list); 1153 mutex_init(&c->moving_context_lock); 1154 } 1155