1/* 2 * raid10.c : Multiple Devices driver for Linux 3 * 4 * Copyright (C) 2000-2004 Neil Brown 5 * 6 * RAID-10 support for md. 7 * 8 * Base on code in raid1.c. See raid1.c for further copyright information. 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21#include <linux/slab.h> 22#include <linux/delay.h> 23#include <linux/blkdev.h> 24#include <linux/module.h> 25#include <linux/seq_file.h> 26#include <linux/ratelimit.h> 27#include <linux/kthread.h> 28#include "md.h" 29#include "raid10.h" 30#include "raid0.h" 31#include "bitmap.h" 32 33/* 34 * RAID10 provides a combination of RAID0 and RAID1 functionality. 35 * The layout of data is defined by 36 * chunk_size 37 * raid_disks 38 * near_copies (stored in low byte of layout) 39 * far_copies (stored in second byte of layout) 40 * far_offset (stored in bit 16 of layout ) 41 * 42 * The data to be stored is divided into chunks using chunksize. 43 * Each device is divided into far_copies sections. 44 * In each section, chunks are laid out in a style similar to raid0, but 45 * near_copies copies of each chunk is stored (each on a different drive). 46 * The starting device for each section is offset near_copies from the starting 47 * device of the previous section. 48 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different 49 * drive. 50 * near_copies and far_copies must be at least one, and their product is at most 51 * raid_disks. 52 * 53 * If far_offset is true, then the far_copies are handled a bit differently. 54 * The copies are still in different stripes, but instead of be very far apart 55 * on disk, there are adjacent stripes. 56 */ 57 58/* 59 * Number of guaranteed r10bios in case of extreme VM load: 60 */ 61#define NR_RAID10_BIOS 256 62 63/* when we get a read error on a read-only array, we redirect to another 64 * device without failing the first device, or trying to over-write to 65 * correct the read error. To keep track of bad blocks on a per-bio 66 * level, we store IO_BLOCKED in the appropriate 'bios' pointer 67 */ 68#define IO_BLOCKED ((struct bio *)1) 69/* When we successfully write to a known bad-block, we need to remove the 70 * bad-block marking which must be done from process context. So we record 71 * the success by setting devs[n].bio to IO_MADE_GOOD 72 */ 73#define IO_MADE_GOOD ((struct bio *)2) 74 75#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) 76 77/* When there are this many requests queued to be written by 78 * the raid10 thread, we become 'congested' to provide back-pressure 79 * for writeback. 80 */ 81static int max_queued_requests = 1024; 82 83static void allow_barrier(struct r10conf *conf); 84static void lower_barrier(struct r10conf *conf); 85static int enough(struct r10conf *conf, int ignore); 86static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, 87 int *skipped); 88static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio); 89static void end_reshape_write(struct bio *bio, int error); 90static void end_reshape(struct r10conf *conf); 91 92static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 93{ 94 struct r10conf *conf = data; 95 int size = offsetof(struct r10bio, devs[conf->copies]); 96 97 /* allocate a r10bio with room for raid_disks entries in the 98 * bios array */ 99 return kzalloc(size, gfp_flags); 100} 101 102static void r10bio_pool_free(void *r10_bio, void *data) 103{ 104 kfree(r10_bio); 105} 106 107/* Maximum size of each resync request */ 108#define RESYNC_BLOCK_SIZE (64*1024) 109#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 110/* amount of memory to reserve for resync requests */ 111#define RESYNC_WINDOW (1024*1024) 112/* maximum number of concurrent requests, memory permitting */ 113#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) 114 115/* 116 * When performing a resync, we need to read and compare, so 117 * we need as many pages are there are copies. 118 * When performing a recovery, we need 2 bios, one for read, 119 * one for write (we recover only one drive per r10buf) 120 * 121 */ 122static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) 123{ 124 struct r10conf *conf = data; 125 struct page *page; 126 struct r10bio *r10_bio; 127 struct bio *bio; 128 int i, j; 129 int nalloc; 130 131 r10_bio = r10bio_pool_alloc(gfp_flags, conf); 132 if (!r10_bio) 133 return NULL; 134 135 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || 136 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) 137 nalloc = conf->copies; /* resync */ 138 else 139 nalloc = 2; /* recovery */ 140 141 /* 142 * Allocate bios. 143 */ 144 for (j = nalloc ; j-- ; ) { 145 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 146 if (!bio) 147 goto out_free_bio; 148 r10_bio->devs[j].bio = bio; 149 if (!conf->have_replacement) 150 continue; 151 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 152 if (!bio) 153 goto out_free_bio; 154 r10_bio->devs[j].repl_bio = bio; 155 } 156 /* 157 * Allocate RESYNC_PAGES data pages and attach them 158 * where needed. 159 */ 160 for (j = 0 ; j < nalloc; j++) { 161 struct bio *rbio = r10_bio->devs[j].repl_bio; 162 bio = r10_bio->devs[j].bio; 163 for (i = 0; i < RESYNC_PAGES; i++) { 164 if (j > 0 && !test_bit(MD_RECOVERY_SYNC, 165 &conf->mddev->recovery)) { 166 /* we can share bv_page's during recovery 167 * and reshape */ 168 struct bio *rbio = r10_bio->devs[0].bio; 169 page = rbio->bi_io_vec[i].bv_page; 170 get_page(page); 171 } else 172 page = alloc_page(gfp_flags); 173 if (unlikely(!page)) 174 goto out_free_pages; 175 176 bio->bi_io_vec[i].bv_page = page; 177 if (rbio) 178 rbio->bi_io_vec[i].bv_page = page; 179 } 180 } 181 182 return r10_bio; 183 184out_free_pages: 185 for ( ; i > 0 ; i--) 186 safe_put_page(bio->bi_io_vec[i-1].bv_page); 187 while (j--) 188 for (i = 0; i < RESYNC_PAGES ; i++) 189 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); 190 j = 0; 191out_free_bio: 192 for ( ; j < nalloc; j++) { 193 if (r10_bio->devs[j].bio) 194 bio_put(r10_bio->devs[j].bio); 195 if (r10_bio->devs[j].repl_bio) 196 bio_put(r10_bio->devs[j].repl_bio); 197 } 198 r10bio_pool_free(r10_bio, conf); 199 return NULL; 200} 201 202static void r10buf_pool_free(void *__r10_bio, void *data) 203{ 204 int i; 205 struct r10conf *conf = data; 206 struct r10bio *r10bio = __r10_bio; 207 int j; 208 209 for (j=0; j < conf->copies; j++) { 210 struct bio *bio = r10bio->devs[j].bio; 211 if (bio) { 212 for (i = 0; i < RESYNC_PAGES; i++) { 213 safe_put_page(bio->bi_io_vec[i].bv_page); 214 bio->bi_io_vec[i].bv_page = NULL; 215 } 216 bio_put(bio); 217 } 218 bio = r10bio->devs[j].repl_bio; 219 if (bio) 220 bio_put(bio); 221 } 222 r10bio_pool_free(r10bio, conf); 223} 224 225static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) 226{ 227 int i; 228 229 for (i = 0; i < conf->copies; i++) { 230 struct bio **bio = & r10_bio->devs[i].bio; 231 if (!BIO_SPECIAL(*bio)) 232 bio_put(*bio); 233 *bio = NULL; 234 bio = &r10_bio->devs[i].repl_bio; 235 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) 236 bio_put(*bio); 237 *bio = NULL; 238 } 239} 240 241static void free_r10bio(struct r10bio *r10_bio) 242{ 243 struct r10conf *conf = r10_bio->mddev->private; 244 245 put_all_bios(conf, r10_bio); 246 mempool_free(r10_bio, conf->r10bio_pool); 247} 248 249static void put_buf(struct r10bio *r10_bio) 250{ 251 struct r10conf *conf = r10_bio->mddev->private; 252 253 mempool_free(r10_bio, conf->r10buf_pool); 254 255 lower_barrier(conf); 256} 257 258static void reschedule_retry(struct r10bio *r10_bio) 259{ 260 unsigned long flags; 261 struct mddev *mddev = r10_bio->mddev; 262 struct r10conf *conf = mddev->private; 263 264 spin_lock_irqsave(&conf->device_lock, flags); 265 list_add(&r10_bio->retry_list, &conf->retry_list); 266 conf->nr_queued ++; 267 spin_unlock_irqrestore(&conf->device_lock, flags); 268 269 /* wake up frozen array... */ 270 wake_up(&conf->wait_barrier); 271 272 md_wakeup_thread(mddev->thread); 273} 274 275/* 276 * raid_end_bio_io() is called when we have finished servicing a mirrored 277 * operation and are ready to return a success/failure code to the buffer 278 * cache layer. 279 */ 280static void raid_end_bio_io(struct r10bio *r10_bio) 281{ 282 struct bio *bio = r10_bio->master_bio; 283 int done; 284 struct r10conf *conf = r10_bio->mddev->private; 285 286 if (bio->bi_phys_segments) { 287 unsigned long flags; 288 spin_lock_irqsave(&conf->device_lock, flags); 289 bio->bi_phys_segments--; 290 done = (bio->bi_phys_segments == 0); 291 spin_unlock_irqrestore(&conf->device_lock, flags); 292 } else 293 done = 1; 294 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 295 clear_bit(BIO_UPTODATE, &bio->bi_flags); 296 if (done) { 297 bio_endio(bio, 0); 298 /* 299 * Wake up any possible resync thread that waits for the device 300 * to go idle. 301 */ 302 allow_barrier(conf); 303 } 304 free_r10bio(r10_bio); 305} 306 307/* 308 * Update disk head position estimator based on IRQ completion info. 309 */ 310static inline void update_head_pos(int slot, struct r10bio *r10_bio) 311{ 312 struct r10conf *conf = r10_bio->mddev->private; 313 314 conf->mirrors[r10_bio->devs[slot].devnum].head_position = 315 r10_bio->devs[slot].addr + (r10_bio->sectors); 316} 317 318/* 319 * Find the disk number which triggered given bio 320 */ 321static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, 322 struct bio *bio, int *slotp, int *replp) 323{ 324 int slot; 325 int repl = 0; 326 327 for (slot = 0; slot < conf->copies; slot++) { 328 if (r10_bio->devs[slot].bio == bio) 329 break; 330 if (r10_bio->devs[slot].repl_bio == bio) { 331 repl = 1; 332 break; 333 } 334 } 335 336 BUG_ON(slot == conf->copies); 337 update_head_pos(slot, r10_bio); 338 339 if (slotp) 340 *slotp = slot; 341 if (replp) 342 *replp = repl; 343 return r10_bio->devs[slot].devnum; 344} 345 346static void raid10_end_read_request(struct bio *bio, int error) 347{ 348 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 349 struct r10bio *r10_bio = bio->bi_private; 350 int slot, dev; 351 struct md_rdev *rdev; 352 struct r10conf *conf = r10_bio->mddev->private; 353 354 355 slot = r10_bio->read_slot; 356 dev = r10_bio->devs[slot].devnum; 357 rdev = r10_bio->devs[slot].rdev; 358 /* 359 * this branch is our 'one mirror IO has finished' event handler: 360 */ 361 update_head_pos(slot, r10_bio); 362 363 if (uptodate) { 364 /* 365 * Set R10BIO_Uptodate in our master bio, so that 366 * we will return a good error code to the higher 367 * levels even if IO on some other mirrored buffer fails. 368 * 369 * The 'master' represents the composite IO operation to 370 * user-side. So if something waits for IO, then it will 371 * wait for the 'master' bio. 372 */ 373 set_bit(R10BIO_Uptodate, &r10_bio->state); 374 } else { 375 /* If all other devices that store this block have 376 * failed, we want to return the error upwards rather 377 * than fail the last device. Here we redefine 378 * "uptodate" to mean "Don't want to retry" 379 */ 380 unsigned long flags; 381 spin_lock_irqsave(&conf->device_lock, flags); 382 if (!enough(conf, rdev->raid_disk)) 383 uptodate = 1; 384 spin_unlock_irqrestore(&conf->device_lock, flags); 385 } 386 if (uptodate) { 387 raid_end_bio_io(r10_bio); 388 rdev_dec_pending(rdev, conf->mddev); 389 } else { 390 /* 391 * oops, read error - keep the refcount on the rdev 392 */ 393 char b[BDEVNAME_SIZE]; 394 printk_ratelimited(KERN_ERR 395 "md/raid10:%s: %s: rescheduling sector %llu\n", 396 mdname(conf->mddev), 397 bdevname(rdev->bdev, b), 398 (unsigned long long)r10_bio->sector); 399 set_bit(R10BIO_ReadError, &r10_bio->state); 400 reschedule_retry(r10_bio); 401 } 402} 403 404static void close_write(struct r10bio *r10_bio) 405{ 406 /* clear the bitmap if all writes complete successfully */ 407 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, 408 r10_bio->sectors, 409 !test_bit(R10BIO_Degraded, &r10_bio->state), 410 0); 411 md_write_end(r10_bio->mddev); 412} 413 414static void one_write_done(struct r10bio *r10_bio) 415{ 416 if (atomic_dec_and_test(&r10_bio->remaining)) { 417 if (test_bit(R10BIO_WriteError, &r10_bio->state)) 418 reschedule_retry(r10_bio); 419 else { 420 close_write(r10_bio); 421 if (test_bit(R10BIO_MadeGood, &r10_bio->state)) 422 reschedule_retry(r10_bio); 423 else 424 raid_end_bio_io(r10_bio); 425 } 426 } 427} 428 429static void raid10_end_write_request(struct bio *bio, int error) 430{ 431 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 432 struct r10bio *r10_bio = bio->bi_private; 433 int dev; 434 int dec_rdev = 1; 435 struct r10conf *conf = r10_bio->mddev->private; 436 int slot, repl; 437 struct md_rdev *rdev = NULL; 438 439 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 440 441 if (repl) 442 rdev = conf->mirrors[dev].replacement; 443 if (!rdev) { 444 smp_rmb(); 445 repl = 0; 446 rdev = conf->mirrors[dev].rdev; 447 } 448 /* 449 * this branch is our 'one mirror IO has finished' event handler: 450 */ 451 if (!uptodate) { 452 if (repl) 453 /* Never record new bad blocks to replacement, 454 * just fail it. 455 */ 456 md_error(rdev->mddev, rdev); 457 else { 458 set_bit(WriteErrorSeen, &rdev->flags); 459 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 460 set_bit(MD_RECOVERY_NEEDED, 461 &rdev->mddev->recovery); 462 set_bit(R10BIO_WriteError, &r10_bio->state); 463 dec_rdev = 0; 464 } 465 } else { 466 /* 467 * Set R10BIO_Uptodate in our master bio, so that 468 * we will return a good error code for to the higher 469 * levels even if IO on some other mirrored buffer fails. 470 * 471 * The 'master' represents the composite IO operation to 472 * user-side. So if something waits for IO, then it will 473 * wait for the 'master' bio. 474 */ 475 sector_t first_bad; 476 int bad_sectors; 477 478 set_bit(R10BIO_Uptodate, &r10_bio->state); 479 480 /* Maybe we can clear some bad blocks. */ 481 if (is_badblock(rdev, 482 r10_bio->devs[slot].addr, 483 r10_bio->sectors, 484 &first_bad, &bad_sectors)) { 485 bio_put(bio); 486 if (repl) 487 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; 488 else 489 r10_bio->devs[slot].bio = IO_MADE_GOOD; 490 dec_rdev = 0; 491 set_bit(R10BIO_MadeGood, &r10_bio->state); 492 } 493 } 494 495 /* 496 * 497 * Let's see if all mirrored write operations have finished 498 * already. 499 */ 500 one_write_done(r10_bio); 501 if (dec_rdev) 502 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 503} 504 505/* 506 * RAID10 layout manager 507 * As well as the chunksize and raid_disks count, there are two 508 * parameters: near_copies and far_copies. 509 * near_copies * far_copies must be <= raid_disks. 510 * Normally one of these will be 1. 511 * If both are 1, we get raid0. 512 * If near_copies == raid_disks, we get raid1. 513 * 514 * Chunks are laid out in raid0 style with near_copies copies of the 515 * first chunk, followed by near_copies copies of the next chunk and 516 * so on. 517 * If far_copies > 1, then after 1/far_copies of the array has been assigned 518 * as described above, we start again with a device offset of near_copies. 519 * So we effectively have another copy of the whole array further down all 520 * the drives, but with blocks on different drives. 521 * With this layout, and block is never stored twice on the one device. 522 * 523 * raid10_find_phys finds the sector offset of a given virtual sector 524 * on each device that it is on. 525 * 526 * raid10_find_virt does the reverse mapping, from a device and a 527 * sector offset to a virtual address 528 */ 529 530static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio) 531{ 532 int n,f; 533 sector_t sector; 534 sector_t chunk; 535 sector_t stripe; 536 int dev; 537 int slot = 0; 538 539 /* now calculate first sector/dev */ 540 chunk = r10bio->sector >> geo->chunk_shift; 541 sector = r10bio->sector & geo->chunk_mask; 542 543 chunk *= geo->near_copies; 544 stripe = chunk; 545 dev = sector_div(stripe, geo->raid_disks); 546 if (geo->far_offset) 547 stripe *= geo->far_copies; 548 549 sector += stripe << geo->chunk_shift; 550 551 /* and calculate all the others */ 552 for (n = 0; n < geo->near_copies; n++) { 553 int d = dev; 554 sector_t s = sector; 555 r10bio->devs[slot].addr = sector; 556 r10bio->devs[slot].devnum = d; 557 slot++; 558 559 for (f = 1; f < geo->far_copies; f++) { 560 d += geo->near_copies; 561 if (d >= geo->raid_disks) 562 d -= geo->raid_disks; 563 s += geo->stride; 564 r10bio->devs[slot].devnum = d; 565 r10bio->devs[slot].addr = s; 566 slot++; 567 } 568 dev++; 569 if (dev >= geo->raid_disks) { 570 dev = 0; 571 sector += (geo->chunk_mask + 1); 572 } 573 } 574} 575 576static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) 577{ 578 struct geom *geo = &conf->geo; 579 580 if (conf->reshape_progress != MaxSector && 581 ((r10bio->sector >= conf->reshape_progress) != 582 conf->mddev->reshape_backwards)) { 583 set_bit(R10BIO_Previous, &r10bio->state); 584 geo = &conf->prev; 585 } else 586 clear_bit(R10BIO_Previous, &r10bio->state); 587 588 __raid10_find_phys(geo, r10bio); 589} 590 591static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) 592{ 593 sector_t offset, chunk, vchunk; 594 /* Never use conf->prev as this is only called during resync 595 * or recovery, so reshape isn't happening 596 */ 597 struct geom *geo = &conf->geo; 598 599 offset = sector & geo->chunk_mask; 600 if (geo->far_offset) { 601 int fc; 602 chunk = sector >> geo->chunk_shift; 603 fc = sector_div(chunk, geo->far_copies); 604 dev -= fc * geo->near_copies; 605 if (dev < 0) 606 dev += geo->raid_disks; 607 } else { 608 while (sector >= geo->stride) { 609 sector -= geo->stride; 610 if (dev < geo->near_copies) 611 dev += geo->raid_disks - geo->near_copies; 612 else 613 dev -= geo->near_copies; 614 } 615 chunk = sector >> geo->chunk_shift; 616 } 617 vchunk = chunk * geo->raid_disks + dev; 618 sector_div(vchunk, geo->near_copies); 619 return (vchunk << geo->chunk_shift) + offset; 620} 621 622/** 623 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged 624 * @q: request queue 625 * @bvm: properties of new bio 626 * @biovec: the request that could be merged to it. 627 * 628 * Return amount of bytes we can accept at this offset 629 * This requires checking for end-of-chunk if near_copies != raid_disks, 630 * and for subordinate merge_bvec_fns if merge_check_needed. 631 */ 632static int raid10_mergeable_bvec(struct request_queue *q, 633 struct bvec_merge_data *bvm, 634 struct bio_vec *biovec) 635{ 636 struct mddev *mddev = q->queuedata; 637 struct r10conf *conf = mddev->private; 638 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 639 int max; 640 unsigned int chunk_sectors; 641 unsigned int bio_sectors = bvm->bi_size >> 9; 642 struct geom *geo = &conf->geo; 643 644 chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1; 645 if (conf->reshape_progress != MaxSector && 646 ((sector >= conf->reshape_progress) != 647 conf->mddev->reshape_backwards)) 648 geo = &conf->prev; 649 650 if (geo->near_copies < geo->raid_disks) { 651 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) 652 + bio_sectors)) << 9; 653 if (max < 0) 654 /* bio_add cannot handle a negative return */ 655 max = 0; 656 if (max <= biovec->bv_len && bio_sectors == 0) 657 return biovec->bv_len; 658 } else 659 max = biovec->bv_len; 660 661 if (mddev->merge_check_needed) { 662 struct { 663 struct r10bio r10_bio; 664 struct r10dev devs[conf->copies]; 665 } on_stack; 666 struct r10bio *r10_bio = &on_stack.r10_bio; 667 int s; 668 if (conf->reshape_progress != MaxSector) { 669 /* Cannot give any guidance during reshape */ 670 if (max <= biovec->bv_len && bio_sectors == 0) 671 return biovec->bv_len; 672 return 0; 673 } 674 r10_bio->sector = sector; 675 raid10_find_phys(conf, r10_bio); 676 rcu_read_lock(); 677 for (s = 0; s < conf->copies; s++) { 678 int disk = r10_bio->devs[s].devnum; 679 struct md_rdev *rdev = rcu_dereference( 680 conf->mirrors[disk].rdev); 681 if (rdev && !test_bit(Faulty, &rdev->flags)) { 682 struct request_queue *q = 683 bdev_get_queue(rdev->bdev); 684 if (q->merge_bvec_fn) { 685 bvm->bi_sector = r10_bio->devs[s].addr 686 + rdev->data_offset; 687 bvm->bi_bdev = rdev->bdev; 688 max = min(max, q->merge_bvec_fn( 689 q, bvm, biovec)); 690 } 691 } 692 rdev = rcu_dereference(conf->mirrors[disk].replacement); 693 if (rdev && !test_bit(Faulty, &rdev->flags)) { 694 struct request_queue *q = 695 bdev_get_queue(rdev->bdev); 696 if (q->merge_bvec_fn) { 697 bvm->bi_sector = r10_bio->devs[s].addr 698 + rdev->data_offset; 699 bvm->bi_bdev = rdev->bdev; 700 max = min(max, q->merge_bvec_fn( 701 q, bvm, biovec)); 702 } 703 } 704 } 705 rcu_read_unlock(); 706 } 707 return max; 708} 709 710/* 711 * This routine returns the disk from which the requested read should 712 * be done. There is a per-array 'next expected sequential IO' sector 713 * number - if this matches on the next IO then we use the last disk. 714 * There is also a per-disk 'last know head position' sector that is 715 * maintained from IRQ contexts, both the normal and the resync IO 716 * completion handlers update this position correctly. If there is no 717 * perfect sequential match then we pick the disk whose head is closest. 718 * 719 * If there are 2 mirrors in the same 2 devices, performance degrades 720 * because position is mirror, not device based. 721 * 722 * The rdev for the device selected will have nr_pending incremented. 723 */ 724 725/* 726 * FIXME: possibly should rethink readbalancing and do it differently 727 * depending on near_copies / far_copies geometry. 728 */ 729static struct md_rdev *read_balance(struct r10conf *conf, 730 struct r10bio *r10_bio, 731 int *max_sectors) 732{ 733 const sector_t this_sector = r10_bio->sector; 734 int disk, slot; 735 int sectors = r10_bio->sectors; 736 int best_good_sectors; 737 sector_t new_distance, best_dist; 738 struct md_rdev *best_rdev, *rdev = NULL; 739 int do_balance; 740 int best_slot; 741 struct geom *geo = &conf->geo; 742 743 raid10_find_phys(conf, r10_bio); 744 rcu_read_lock(); 745retry: 746 sectors = r10_bio->sectors; 747 best_slot = -1; 748 best_rdev = NULL; 749 best_dist = MaxSector; 750 best_good_sectors = 0; 751 do_balance = 1; 752 /* 753 * Check if we can balance. We can balance on the whole 754 * device if no resync is going on (recovery is ok), or below 755 * the resync window. We take the first readable disk when 756 * above the resync window. 757 */ 758 if (conf->mddev->recovery_cp < MaxSector 759 && (this_sector + sectors >= conf->next_resync)) 760 do_balance = 0; 761 762 for (slot = 0; slot < conf->copies ; slot++) { 763 sector_t first_bad; 764 int bad_sectors; 765 sector_t dev_sector; 766 767 if (r10_bio->devs[slot].bio == IO_BLOCKED) 768 continue; 769 disk = r10_bio->devs[slot].devnum; 770 rdev = rcu_dereference(conf->mirrors[disk].replacement); 771 if (rdev == NULL || test_bit(Faulty, &rdev->flags) || 772 test_bit(Unmerged, &rdev->flags) || 773 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) 774 rdev = rcu_dereference(conf->mirrors[disk].rdev); 775 if (rdev == NULL || 776 test_bit(Faulty, &rdev->flags) || 777 test_bit(Unmerged, &rdev->flags)) 778 continue; 779 if (!test_bit(In_sync, &rdev->flags) && 780 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) 781 continue; 782 783 dev_sector = r10_bio->devs[slot].addr; 784 if (is_badblock(rdev, dev_sector, sectors, 785 &first_bad, &bad_sectors)) { 786 if (best_dist < MaxSector) 787 /* Already have a better slot */ 788 continue; 789 if (first_bad <= dev_sector) { 790 /* Cannot read here. If this is the 791 * 'primary' device, then we must not read 792 * beyond 'bad_sectors' from another device. 793 */ 794 bad_sectors -= (dev_sector - first_bad); 795 if (!do_balance && sectors > bad_sectors) 796 sectors = bad_sectors; 797 if (best_good_sectors > sectors) 798 best_good_sectors = sectors; 799 } else { 800 sector_t good_sectors = 801 first_bad - dev_sector; 802 if (good_sectors > best_good_sectors) { 803 best_good_sectors = good_sectors; 804 best_slot = slot; 805 best_rdev = rdev; 806 } 807 if (!do_balance) 808 /* Must read from here */ 809 break; 810 } 811 continue; 812 } else 813 best_good_sectors = sectors; 814 815 if (!do_balance) 816 break; 817 818 /* This optimisation is debatable, and completely destroys 819 * sequential read speed for 'far copies' arrays. So only 820 * keep it for 'near' arrays, and review those later. 821 */ 822 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending)) 823 break; 824 825 /* for far > 1 always use the lowest address */ 826 if (geo->far_copies > 1) 827 new_distance = r10_bio->devs[slot].addr; 828 else 829 new_distance = abs(r10_bio->devs[slot].addr - 830 conf->mirrors[disk].head_position); 831 if (new_distance < best_dist) { 832 best_dist = new_distance; 833 best_slot = slot; 834 best_rdev = rdev; 835 } 836 } 837 if (slot >= conf->copies) { 838 slot = best_slot; 839 rdev = best_rdev; 840 } 841 842 if (slot >= 0) { 843 atomic_inc(&rdev->nr_pending); 844 if (test_bit(Faulty, &rdev->flags)) { 845 /* Cannot risk returning a device that failed 846 * before we inc'ed nr_pending 847 */ 848 rdev_dec_pending(rdev, conf->mddev); 849 goto retry; 850 } 851 r10_bio->read_slot = slot; 852 } else 853 rdev = NULL; 854 rcu_read_unlock(); 855 *max_sectors = best_good_sectors; 856 857 return rdev; 858} 859 860int md_raid10_congested(struct mddev *mddev, int bits) 861{ 862 struct r10conf *conf = mddev->private; 863 int i, ret = 0; 864 865 if ((bits & (1 << BDI_async_congested)) && 866 conf->pending_count >= max_queued_requests) 867 return 1; 868 869 rcu_read_lock(); 870 for (i = 0; 871 (i < conf->geo.raid_disks || i < conf->prev.raid_disks) 872 && ret == 0; 873 i++) { 874 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 875 if (rdev && !test_bit(Faulty, &rdev->flags)) { 876 struct request_queue *q = bdev_get_queue(rdev->bdev); 877 878 ret |= bdi_congested(&q->backing_dev_info, bits); 879 } 880 } 881 rcu_read_unlock(); 882 return ret; 883} 884EXPORT_SYMBOL_GPL(md_raid10_congested); 885 886static int raid10_congested(void *data, int bits) 887{ 888 struct mddev *mddev = data; 889 890 return mddev_congested(mddev, bits) || 891 md_raid10_congested(mddev, bits); 892} 893 894static void flush_pending_writes(struct r10conf *conf) 895{ 896 /* Any writes that have been queued but are awaiting 897 * bitmap updates get flushed here. 898 */ 899 spin_lock_irq(&conf->device_lock); 900 901 if (conf->pending_bio_list.head) { 902 struct bio *bio; 903 bio = bio_list_get(&conf->pending_bio_list); 904 conf->pending_count = 0; 905 spin_unlock_irq(&conf->device_lock); 906 /* flush any pending bitmap writes to disk 907 * before proceeding w/ I/O */ 908 bitmap_unplug(conf->mddev->bitmap); 909 wake_up(&conf->wait_barrier); 910 911 while (bio) { /* submit pending writes */ 912 struct bio *next = bio->bi_next; 913 bio->bi_next = NULL; 914 generic_make_request(bio); 915 bio = next; 916 } 917 } else 918 spin_unlock_irq(&conf->device_lock); 919} 920 921/* Barriers.... 922 * Sometimes we need to suspend IO while we do something else, 923 * either some resync/recovery, or reconfigure the array. 924 * To do this we raise a 'barrier'. 925 * The 'barrier' is a counter that can be raised multiple times 926 * to count how many activities are happening which preclude 927 * normal IO. 928 * We can only raise the barrier if there is no pending IO. 929 * i.e. if nr_pending == 0. 930 * We choose only to raise the barrier if no-one is waiting for the 931 * barrier to go down. This means that as soon as an IO request 932 * is ready, no other operations which require a barrier will start 933 * until the IO request has had a chance. 934 * 935 * So: regular IO calls 'wait_barrier'. When that returns there 936 * is no backgroup IO happening, It must arrange to call 937 * allow_barrier when it has finished its IO. 938 * backgroup IO calls must call raise_barrier. Once that returns 939 * there is no normal IO happeing. It must arrange to call 940 * lower_barrier when the particular background IO completes. 941 */ 942 943static void raise_barrier(struct r10conf *conf, int force) 944{ 945 BUG_ON(force && !conf->barrier); 946 spin_lock_irq(&conf->resync_lock); 947 948 /* Wait until no block IO is waiting (unless 'force') */ 949 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 950 conf->resync_lock, ); 951 952 /* block any new IO from starting */ 953 conf->barrier++; 954 955 /* Now wait for all pending IO to complete */ 956 wait_event_lock_irq(conf->wait_barrier, 957 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 958 conf->resync_lock, ); 959 960 spin_unlock_irq(&conf->resync_lock); 961} 962 963static void lower_barrier(struct r10conf *conf) 964{ 965 unsigned long flags; 966 spin_lock_irqsave(&conf->resync_lock, flags); 967 conf->barrier--; 968 spin_unlock_irqrestore(&conf->resync_lock, flags); 969 wake_up(&conf->wait_barrier); 970} 971 972static void wait_barrier(struct r10conf *conf) 973{ 974 spin_lock_irq(&conf->resync_lock); 975 if (conf->barrier) { 976 conf->nr_waiting++; 977 /* Wait for the barrier to drop. 978 * However if there are already pending 979 * requests (preventing the barrier from 980 * rising completely), and the 981 * pre-process bio queue isn't empty, 982 * then don't wait, as we need to empty 983 * that queue to get the nr_pending 984 * count down. 985 */ 986 wait_event_lock_irq(conf->wait_barrier, 987 !conf->barrier || 988 (conf->nr_pending && 989 current->bio_list && 990 !bio_list_empty(current->bio_list)), 991 conf->resync_lock, 992 ); 993 conf->nr_waiting--; 994 } 995 conf->nr_pending++; 996 spin_unlock_irq(&conf->resync_lock); 997} 998 999static void allow_barrier(struct r10conf *conf) 1000{ 1001 unsigned long flags; 1002 spin_lock_irqsave(&conf->resync_lock, flags); 1003 conf->nr_pending--; 1004 spin_unlock_irqrestore(&conf->resync_lock, flags); 1005 wake_up(&conf->wait_barrier); 1006} 1007 1008static void freeze_array(struct r10conf *conf) 1009{ 1010 /* stop syncio and normal IO and wait for everything to 1011 * go quiet. 1012 * We increment barrier and nr_waiting, and then 1013 * wait until nr_pending match nr_queued+1 1014 * This is called in the context of one normal IO request 1015 * that has failed. Thus any sync request that might be pending 1016 * will be blocked by nr_pending, and we need to wait for 1017 * pending IO requests to complete or be queued for re-try. 1018 * Thus the number queued (nr_queued) plus this request (1) 1019 * must match the number of pending IOs (nr_pending) before 1020 * we continue. 1021 */ 1022 spin_lock_irq(&conf->resync_lock); 1023 conf->barrier++; 1024 conf->nr_waiting++; 1025 wait_event_lock_irq(conf->wait_barrier, 1026 conf->nr_pending == conf->nr_queued+1, 1027 conf->resync_lock, 1028 flush_pending_writes(conf)); 1029 1030 spin_unlock_irq(&conf->resync_lock); 1031} 1032 1033static void unfreeze_array(struct r10conf *conf) 1034{ 1035 /* reverse the effect of the freeze */ 1036 spin_lock_irq(&conf->resync_lock); 1037 conf->barrier--; 1038 conf->nr_waiting--; 1039 wake_up(&conf->wait_barrier); 1040 spin_unlock_irq(&conf->resync_lock); 1041} 1042 1043static sector_t choose_data_offset(struct r10bio *r10_bio, 1044 struct md_rdev *rdev) 1045{ 1046 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || 1047 test_bit(R10BIO_Previous, &r10_bio->state)) 1048 return rdev->data_offset; 1049 else 1050 return rdev->new_data_offset; 1051} 1052 1053static void make_request(struct mddev *mddev, struct bio * bio) 1054{ 1055 struct r10conf *conf = mddev->private; 1056 struct r10bio *r10_bio; 1057 struct bio *read_bio; 1058 int i; 1059 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); 1060 int chunk_sects = chunk_mask + 1; 1061 const int rw = bio_data_dir(bio); 1062 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 1063 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 1064 unsigned long flags; 1065 struct md_rdev *blocked_rdev; 1066 int sectors_handled; 1067 int max_sectors; 1068 int sectors; 1069 1070 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 1071 md_flush_request(mddev, bio); 1072 return; 1073 } 1074 1075 /* If this request crosses a chunk boundary, we need to 1076 * split it. This will only happen for 1 PAGE (or less) requests. 1077 */ 1078 if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9) 1079 > chunk_sects 1080 && (conf->geo.near_copies < conf->geo.raid_disks 1081 || conf->prev.near_copies < conf->prev.raid_disks))) { 1082 struct bio_pair *bp; 1083 /* Sanity check -- queue functions should prevent this happening */ 1084 if (bio->bi_vcnt != 1 || 1085 bio->bi_idx != 0) 1086 goto bad_map; 1087 /* This is a one page bio that upper layers 1088 * refuse to split for us, so we need to split it. 1089 */ 1090 bp = bio_split(bio, 1091 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); 1092 1093 /* Each of these 'make_request' calls will call 'wait_barrier'. 1094 * If the first succeeds but the second blocks due to the resync 1095 * thread raising the barrier, we will deadlock because the 1096 * IO to the underlying device will be queued in generic_make_request 1097 * and will never complete, so will never reduce nr_pending. 1098 * So increment nr_waiting here so no new raise_barriers will 1099 * succeed, and so the second wait_barrier cannot block. 1100 */ 1101 spin_lock_irq(&conf->resync_lock); 1102 conf->nr_waiting++; 1103 spin_unlock_irq(&conf->resync_lock); 1104 1105 make_request(mddev, &bp->bio1); 1106 make_request(mddev, &bp->bio2); 1107 1108 spin_lock_irq(&conf->resync_lock); 1109 conf->nr_waiting--; 1110 wake_up(&conf->wait_barrier); 1111 spin_unlock_irq(&conf->resync_lock); 1112 1113 bio_pair_release(bp); 1114 return; 1115 bad_map: 1116 printk("md/raid10:%s: make_request bug: can't convert block across chunks" 1117 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, 1118 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 1119 1120 bio_io_error(bio); 1121 return; 1122 } 1123 1124 md_write_start(mddev, bio); 1125 1126 /* 1127 * Register the new request and wait if the reconstruction 1128 * thread has put up a bar for new requests. 1129 * Continue immediately if no resync is active currently. 1130 */ 1131 wait_barrier(conf); 1132 1133 sectors = bio->bi_size >> 9; 1134 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1135 bio->bi_sector < conf->reshape_progress && 1136 bio->bi_sector + sectors > conf->reshape_progress) { 1137 /* IO spans the reshape position. Need to wait for 1138 * reshape to pass 1139 */ 1140 allow_barrier(conf); 1141 wait_event(conf->wait_barrier, 1142 conf->reshape_progress <= bio->bi_sector || 1143 conf->reshape_progress >= bio->bi_sector + sectors); 1144 wait_barrier(conf); 1145 } 1146 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1147 bio_data_dir(bio) == WRITE && 1148 (mddev->reshape_backwards 1149 ? (bio->bi_sector < conf->reshape_safe && 1150 bio->bi_sector + sectors > conf->reshape_progress) 1151 : (bio->bi_sector + sectors > conf->reshape_safe && 1152 bio->bi_sector < conf->reshape_progress))) { 1153 /* Need to update reshape_position in metadata */ 1154 mddev->reshape_position = conf->reshape_progress; 1155 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1156 set_bit(MD_CHANGE_PENDING, &mddev->flags); 1157 md_wakeup_thread(mddev->thread); 1158 wait_event(mddev->sb_wait, 1159 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 1160 1161 conf->reshape_safe = mddev->reshape_position; 1162 } 1163 1164 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1165 1166 r10_bio->master_bio = bio; 1167 r10_bio->sectors = sectors; 1168 1169 r10_bio->mddev = mddev; 1170 r10_bio->sector = bio->bi_sector; 1171 r10_bio->state = 0; 1172 1173 /* We might need to issue multiple reads to different 1174 * devices if there are bad blocks around, so we keep 1175 * track of the number of reads in bio->bi_phys_segments. 1176 * If this is 0, there is only one r10_bio and no locking 1177 * will be needed when the request completes. If it is 1178 * non-zero, then it is the number of not-completed requests. 1179 */ 1180 bio->bi_phys_segments = 0; 1181 clear_bit(BIO_SEG_VALID, &bio->bi_flags); 1182 1183 if (rw == READ) { 1184 /* 1185 * read balancing logic: 1186 */ 1187 struct md_rdev *rdev; 1188 int slot; 1189 1190read_again: 1191 rdev = read_balance(conf, r10_bio, &max_sectors); 1192 if (!rdev) { 1193 raid_end_bio_io(r10_bio); 1194 return; 1195 } 1196 slot = r10_bio->read_slot; 1197 1198 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1199 md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector, 1200 max_sectors); 1201 1202 r10_bio->devs[slot].bio = read_bio; 1203 r10_bio->devs[slot].rdev = rdev; 1204 1205 read_bio->bi_sector = r10_bio->devs[slot].addr + 1206 choose_data_offset(r10_bio, rdev); 1207 read_bio->bi_bdev = rdev->bdev; 1208 read_bio->bi_end_io = raid10_end_read_request; 1209 read_bio->bi_rw = READ | do_sync; 1210 read_bio->bi_private = r10_bio; 1211 1212 if (max_sectors < r10_bio->sectors) { 1213 /* Could not read all from this device, so we will 1214 * need another r10_bio. 1215 */ 1216 sectors_handled = (r10_bio->sectors + max_sectors 1217 - bio->bi_sector); 1218 r10_bio->sectors = max_sectors; 1219 spin_lock_irq(&conf->device_lock); 1220 if (bio->bi_phys_segments == 0) 1221 bio->bi_phys_segments = 2; 1222 else 1223 bio->bi_phys_segments++; 1224 spin_unlock(&conf->device_lock); 1225 /* Cannot call generic_make_request directly 1226 * as that will be queued in __generic_make_request 1227 * and subsequent mempool_alloc might block 1228 * waiting for it. so hand bio over to raid10d. 1229 */ 1230 reschedule_retry(r10_bio); 1231 1232 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1233 1234 r10_bio->master_bio = bio; 1235 r10_bio->sectors = ((bio->bi_size >> 9) 1236 - sectors_handled); 1237 r10_bio->state = 0; 1238 r10_bio->mddev = mddev; 1239 r10_bio->sector = bio->bi_sector + sectors_handled; 1240 goto read_again; 1241 } else 1242 generic_make_request(read_bio); 1243 return; 1244 } 1245 1246 /* 1247 * WRITE: 1248 */ 1249 if (conf->pending_count >= max_queued_requests) { 1250 md_wakeup_thread(mddev->thread); 1251 wait_event(conf->wait_barrier, 1252 conf->pending_count < max_queued_requests); 1253 } 1254 /* first select target devices under rcu_lock and 1255 * inc refcount on their rdev. Record them by setting 1256 * bios[x] to bio 1257 * If there are known/acknowledged bad blocks on any device 1258 * on which we have seen a write error, we want to avoid 1259 * writing to those blocks. This potentially requires several 1260 * writes to write around the bad blocks. Each set of writes 1261 * gets its own r10_bio with a set of bios attached. The number 1262 * of r10_bios is recored in bio->bi_phys_segments just as with 1263 * the read case. 1264 */ 1265 1266 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ 1267 raid10_find_phys(conf, r10_bio); 1268retry_write: 1269 blocked_rdev = NULL; 1270 rcu_read_lock(); 1271 max_sectors = r10_bio->sectors; 1272 1273 for (i = 0; i < conf->copies; i++) { 1274 int d = r10_bio->devs[i].devnum; 1275 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); 1276 struct md_rdev *rrdev = rcu_dereference( 1277 conf->mirrors[d].replacement); 1278 if (rdev == rrdev) 1279 rrdev = NULL; 1280 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 1281 atomic_inc(&rdev->nr_pending); 1282 blocked_rdev = rdev; 1283 break; 1284 } 1285 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) { 1286 atomic_inc(&rrdev->nr_pending); 1287 blocked_rdev = rrdev; 1288 break; 1289 } 1290 if (rdev && (test_bit(Faulty, &rdev->flags) 1291 || test_bit(Unmerged, &rdev->flags))) 1292 rdev = NULL; 1293 if (rrdev && (test_bit(Faulty, &rrdev->flags) 1294 || test_bit(Unmerged, &rrdev->flags))) 1295 rrdev = NULL; 1296 1297 r10_bio->devs[i].bio = NULL; 1298 r10_bio->devs[i].repl_bio = NULL; 1299 1300 if (!rdev && !rrdev) { 1301 set_bit(R10BIO_Degraded, &r10_bio->state); 1302 continue; 1303 } 1304 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { 1305 sector_t first_bad; 1306 sector_t dev_sector = r10_bio->devs[i].addr; 1307 int bad_sectors; 1308 int is_bad; 1309 1310 is_bad = is_badblock(rdev, dev_sector, 1311 max_sectors, 1312 &first_bad, &bad_sectors); 1313 if (is_bad < 0) { 1314 /* Mustn't write here until the bad block 1315 * is acknowledged 1316 */ 1317 atomic_inc(&rdev->nr_pending); 1318 set_bit(BlockedBadBlocks, &rdev->flags); 1319 blocked_rdev = rdev; 1320 break; 1321 } 1322 if (is_bad && first_bad <= dev_sector) { 1323 /* Cannot write here at all */ 1324 bad_sectors -= (dev_sector - first_bad); 1325 if (bad_sectors < max_sectors) 1326 /* Mustn't write more than bad_sectors 1327 * to other devices yet 1328 */ 1329 max_sectors = bad_sectors; 1330 /* We don't set R10BIO_Degraded as that 1331 * only applies if the disk is missing, 1332 * so it might be re-added, and we want to 1333 * know to recover this chunk. 1334 * In this case the device is here, and the 1335 * fact that this chunk is not in-sync is 1336 * recorded in the bad block log. 1337 */ 1338 continue; 1339 } 1340 if (is_bad) { 1341 int good_sectors = first_bad - dev_sector; 1342 if (good_sectors < max_sectors) 1343 max_sectors = good_sectors; 1344 } 1345 } 1346 if (rdev) { 1347 r10_bio->devs[i].bio = bio; 1348 atomic_inc(&rdev->nr_pending); 1349 } 1350 if (rrdev) { 1351 r10_bio->devs[i].repl_bio = bio; 1352 atomic_inc(&rrdev->nr_pending); 1353 } 1354 } 1355 rcu_read_unlock(); 1356 1357 if (unlikely(blocked_rdev)) { 1358 /* Have to wait for this device to get unblocked, then retry */ 1359 int j; 1360 int d; 1361 1362 for (j = 0; j < i; j++) { 1363 if (r10_bio->devs[j].bio) { 1364 d = r10_bio->devs[j].devnum; 1365 rdev_dec_pending(conf->mirrors[d].rdev, mddev); 1366 } 1367 if (r10_bio->devs[j].repl_bio) { 1368 struct md_rdev *rdev; 1369 d = r10_bio->devs[j].devnum; 1370 rdev = conf->mirrors[d].replacement; 1371 if (!rdev) { 1372 /* Race with remove_disk */ 1373 smp_mb(); 1374 rdev = conf->mirrors[d].rdev; 1375 } 1376 rdev_dec_pending(rdev, mddev); 1377 } 1378 } 1379 allow_barrier(conf); 1380 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1381 wait_barrier(conf); 1382 goto retry_write; 1383 } 1384 1385 if (max_sectors < r10_bio->sectors) { 1386 /* We are splitting this into multiple parts, so 1387 * we need to prepare for allocating another r10_bio. 1388 */ 1389 r10_bio->sectors = max_sectors; 1390 spin_lock_irq(&conf->device_lock); 1391 if (bio->bi_phys_segments == 0) 1392 bio->bi_phys_segments = 2; 1393 else 1394 bio->bi_phys_segments++; 1395 spin_unlock_irq(&conf->device_lock); 1396 } 1397 sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector; 1398 1399 atomic_set(&r10_bio->remaining, 1); 1400 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); 1401 1402 for (i = 0; i < conf->copies; i++) { 1403 struct bio *mbio; 1404 int d = r10_bio->devs[i].devnum; 1405 if (r10_bio->devs[i].bio) { 1406 struct md_rdev *rdev = conf->mirrors[d].rdev; 1407 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1408 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, 1409 max_sectors); 1410 r10_bio->devs[i].bio = mbio; 1411 1412 mbio->bi_sector = (r10_bio->devs[i].addr+ 1413 choose_data_offset(r10_bio, 1414 rdev)); 1415 mbio->bi_bdev = rdev->bdev; 1416 mbio->bi_end_io = raid10_end_write_request; 1417 mbio->bi_rw = WRITE | do_sync | do_fua; 1418 mbio->bi_private = r10_bio; 1419 1420<<<<<<< found 1421 atomic_inc(&r10_bio->remaining); 1422 spin_lock_irqsave(&conf->device_lock, flags); 1423||||||| expected 1424 atomic_inc(&r10_bio->remaining); 1425 1426 cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); 1427 if (cb) 1428 plug = container_of(cb, struct raid10_plug_cb, cb); 1429 else 1430 plug = NULL; 1431 spin_lock_irqsave(&conf->device_lock, flags); 1432======= 1433 atomic_inc(&r10_bio->remaining); 1434 1435 cb = blk_check_plugged(raid10_unplug, mddev, 1436 sizeof(*plug)); 1437 if (cb) 1438 plug = container_of(cb, struct raid10_plug_cb, 1439 cb); 1440 else 1441 plug = NULL; 1442 spin_lock_irqsave(&conf->device_lock, flags); 1443>>>>>>> replacement 1444<<<<<<< found 1445 bio_list_add(&conf->pending_bio_list, mbio); 1446 conf->pending_count++; 1447 spin_unlock_irqrestore(&conf->device_lock, flags); 1448||||||| expected 1449 if (plug) { 1450 bio_list_add(&plug->pending, mbio); 1451 plug->pending_cnt++; 1452 } else { 1453 bio_list_add(&conf->pending_bio_list, mbio); 1454 conf->pending_count++; 1455 } 1456 spin_unlock_irqrestore(&conf->device_lock, flags); 1457======= 1458 if (plug) { 1459 bio_list_add(&plug->pending, mbio); 1460 plug->pending_cnt++; 1461 } else { 1462 bio_list_add(&conf->pending_bio_list, mbio); 1463 conf->pending_count++; 1464 } 1465 spin_unlock_irqrestore(&conf->device_lock, flags); 1466>>>>>>> replacement 1467 if (!mddev_check_plugged(mddev)) 1468 md_wakeup_thread(mddev->thread); 1469 } 1470 1471 if (r10_bio->devs[i].repl_bio) { 1472 struct md_rdev *rdev = conf->mirrors[d].replacement; 1473 if (rdev == NULL) { 1474 /* Replacement just got moved to main 'rdev' */ 1475 smp_mb(); 1476 rdev = conf->mirrors[d].rdev; 1477 } 1478 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1479 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, 1480 max_sectors); 1481 r10_bio->devs[i].repl_bio = mbio; 1482 1483 mbio->bi_sector = (r10_bio->devs[i].addr + 1484 choose_data_offset( 1485 r10_bio, rdev)); 1486 mbio->bi_bdev = rdev->bdev; 1487 mbio->bi_end_io = raid10_end_write_request; 1488 mbio->bi_rw = WRITE | do_sync | do_fua; 1489 mbio->bi_private = r10_bio; 1490 1491 atomic_inc(&r10_bio->remaining); 1492 spin_lock_irqsave(&conf->device_lock, flags); 1493 bio_list_add(&conf->pending_bio_list, mbio); 1494 conf->pending_count++; 1495 spin_unlock_irqrestore(&conf->device_lock, flags); 1496 if (!mddev_check_plugged(mddev)) 1497 md_wakeup_thread(mddev->thread); 1498 } 1499 } 1500 1501 /* Don't remove the bias on 'remaining' (one_write_done) until 1502 * after checking if we need to go around again. 1503 */ 1504 1505 if (sectors_handled < (bio->bi_size >> 9)) { 1506 one_write_done(r10_bio); 1507 /* We need another r10_bio. It has already been counted 1508 * in bio->bi_phys_segments. 1509 */ 1510 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1511 1512 r10_bio->master_bio = bio; 1513 r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled; 1514 1515 r10_bio->mddev = mddev; 1516 r10_bio->sector = bio->bi_sector + sectors_handled; 1517 r10_bio->state = 0; 1518 goto retry_write; 1519 } 1520 one_write_done(r10_bio); 1521 1522 /* In case raid10d snuck in to freeze_array */ 1523 wake_up(&conf->wait_barrier); 1524} 1525 1526static void status(struct seq_file *seq, struct mddev *mddev) 1527{ 1528 struct r10conf *conf = mddev->private; 1529 int i; 1530 1531 if (conf->geo.near_copies < conf->geo.raid_disks) 1532 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); 1533 if (conf->geo.near_copies > 1) 1534 seq_printf(seq, " %d near-copies", conf->geo.near_copies); 1535 if (conf->geo.far_copies > 1) { 1536 if (conf->geo.far_offset) 1537 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); 1538 else 1539 seq_printf(seq, " %d far-copies", conf->geo.far_copies); 1540 } 1541 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, 1542 conf->geo.raid_disks - mddev->degraded); 1543 for (i = 0; i < conf->geo.raid_disks; i++) 1544 seq_printf(seq, "%s", 1545 conf->mirrors[i].rdev && 1546 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); 1547 seq_printf(seq, "]"); 1548} 1549 1550/* check if there are enough drives for 1551 * every block to appear on atleast one. 1552 * Don't consider the device numbered 'ignore' 1553 * as we might be about to remove it. 1554 */ 1555static int _enough(struct r10conf *conf, struct geom *geo, int ignore) 1556{ 1557 int first = 0; 1558 1559 do { 1560 int n = conf->copies; 1561 int cnt = 0; 1562 int this = first; 1563 while (n--) { 1564 if (conf->mirrors[this].rdev && 1565 this != ignore) 1566 cnt++; 1567 this = (this+1) % geo->raid_disks; 1568 } 1569 if (cnt == 0) 1570 return 0; 1571 first = (first + geo->near_copies) % geo->raid_disks; 1572 } while (first != 0); 1573 return 1; 1574} 1575 1576static int enough(struct r10conf *conf, int ignore) 1577{ 1578 return _enough(conf, &conf->geo, ignore) && 1579 _enough(conf, &conf->prev, ignore); 1580} 1581 1582static void error(struct mddev *mddev, struct md_rdev *rdev) 1583{ 1584 char b[BDEVNAME_SIZE]; 1585 struct r10conf *conf = mddev->private; 1586 1587 /* 1588 * If it is not operational, then we have already marked it as dead 1589 * else if it is the last working disks, ignore the error, let the 1590 * next level up know. 1591 * else mark the drive as failed 1592 */ 1593 if (test_bit(In_sync, &rdev->flags) 1594 && !enough(conf, rdev->raid_disk)) 1595 /* 1596 * Don't fail the drive, just return an IO error. 1597 */ 1598 return; 1599 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1600 unsigned long flags; 1601 spin_lock_irqsave(&conf->device_lock, flags); 1602 mddev->degraded++; 1603 spin_unlock_irqrestore(&conf->device_lock, flags); 1604 /* 1605 * if recovery is running, make sure it aborts. 1606 */ 1607 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1608 } 1609 set_bit(Blocked, &rdev->flags); 1610 set_bit(Faulty, &rdev->flags); 1611 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1612 printk(KERN_ALERT 1613 "md/raid10:%s: Disk failure on %s, disabling device.\n" 1614 "md/raid10:%s: Operation continuing on %d devices.\n", 1615 mdname(mddev), bdevname(rdev->bdev, b), 1616 mdname(mddev), conf->geo.raid_disks - mddev->degraded); 1617} 1618 1619static void print_conf(struct r10conf *conf) 1620{ 1621 int i; 1622 struct raid10_info *tmp; 1623 1624 printk(KERN_DEBUG "RAID10 conf printout:\n"); 1625 if (!conf) { 1626 printk(KERN_DEBUG "(!conf)\n"); 1627 return; 1628 } 1629 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, 1630 conf->geo.raid_disks); 1631 1632 for (i = 0; i < conf->geo.raid_disks; i++) { 1633 char b[BDEVNAME_SIZE]; 1634 tmp = conf->mirrors + i; 1635 if (tmp->rdev) 1636 printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", 1637 i, !test_bit(In_sync, &tmp->rdev->flags), 1638 !test_bit(Faulty, &tmp->rdev->flags), 1639 bdevname(tmp->rdev->bdev,b)); 1640 } 1641} 1642 1643static void close_sync(struct r10conf *conf) 1644{ 1645 wait_barrier(conf); 1646 allow_barrier(conf); 1647 1648 mempool_destroy(conf->r10buf_pool); 1649 conf->r10buf_pool = NULL; 1650} 1651 1652static int raid10_spare_active(struct mddev *mddev) 1653{ 1654 int i; 1655 struct r10conf *conf = mddev->private; 1656 struct raid10_info *tmp; 1657 int count = 0; 1658 unsigned long flags; 1659 1660 /* 1661 * Find all non-in_sync disks within the RAID10 configuration 1662 * and mark them in_sync 1663 */ 1664 for (i = 0; i < conf->geo.raid_disks; i++) { 1665 tmp = conf->mirrors + i; 1666 if (tmp->replacement 1667 && tmp->replacement->recovery_offset == MaxSector 1668 && !test_bit(Faulty, &tmp->replacement->flags) 1669 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { 1670 /* Replacement has just become active */ 1671 if (!tmp->rdev 1672 || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) 1673 count++; 1674 if (tmp->rdev) { 1675 /* Replaced device not technically faulty, 1676 * but we need to be sure it gets removed 1677 * and never re-added. 1678 */ 1679 set_bit(Faulty, &tmp->rdev->flags); 1680 sysfs_notify_dirent_safe( 1681 tmp->rdev->sysfs_state); 1682 } 1683 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 1684 } else if (tmp->rdev 1685 && !test_bit(Faulty, &tmp->rdev->flags) 1686 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1687 count++; 1688 sysfs_notify_dirent(tmp->rdev->sysfs_state); 1689 } 1690 } 1691 spin_lock_irqsave(&conf->device_lock, flags); 1692 mddev->degraded -= count; 1693 spin_unlock_irqrestore(&conf->device_lock, flags); 1694 1695 print_conf(conf); 1696 return count; 1697} 1698 1699 1700static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) 1701{ 1702 struct r10conf *conf = mddev->private; 1703 int err = -EEXIST; 1704 int mirror; 1705 int first = 0; 1706 int last = conf->geo.raid_disks - 1; 1707 struct request_queue *q = bdev_get_queue(rdev->bdev); 1708 1709 if (mddev->recovery_cp < MaxSector) 1710 /* only hot-add to in-sync arrays, as recovery is 1711 * very different from resync 1712 */ 1713 return -EBUSY; 1714 if (rdev->saved_raid_disk < 0 && !_enough(conf, &conf->prev, -1)) 1715 return -EINVAL; 1716 1717 if (rdev->raid_disk >= 0) 1718 first = last = rdev->raid_disk; 1719 1720 if (q->merge_bvec_fn) { 1721 set_bit(Unmerged, &rdev->flags); 1722 mddev->merge_check_needed = 1; 1723 } 1724 1725 if (rdev->saved_raid_disk >= first && 1726 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 1727 mirror = rdev->saved_raid_disk; 1728 else 1729 mirror = first; 1730 for ( ; mirror <= last ; mirror++) { 1731 struct raid10_info *p = &conf->mirrors[mirror]; 1732 if (p->recovery_disabled == mddev->recovery_disabled) 1733 continue; 1734 if (p->rdev) { 1735 if (!test_bit(WantReplacement, &p->rdev->flags) || 1736 p->replacement != NULL) 1737 continue; 1738 clear_bit(In_sync, &rdev->flags); 1739 set_bit(Replacement, &rdev->flags); 1740 rdev->raid_disk = mirror; 1741 err = 0; 1742 disk_stack_limits(mddev->gendisk, rdev->bdev, 1743 rdev->data_offset << 9); 1744 conf->fullsync = 1; 1745 rcu_assign_pointer(p->replacement, rdev); 1746 break; 1747 } 1748 1749 disk_stack_limits(mddev->gendisk, rdev->bdev, 1750 rdev->data_offset << 9); 1751 1752 p->head_position = 0; 1753 p->recovery_disabled = mddev->recovery_disabled - 1; 1754 rdev->raid_disk = mirror; 1755 err = 0; 1756 if (rdev->saved_raid_disk != mirror) 1757 conf->fullsync = 1; 1758 rcu_assign_pointer(p->rdev, rdev); 1759 break; 1760 } 1761 if (err == 0 && test_bit(Unmerged, &rdev->flags)) { 1762 /* Some requests might not have seen this new 1763 * merge_bvec_fn. We must wait for them to complete 1764 * before merging the device fully. 1765 * First we make sure any code which has tested 1766 * our function has submitted the request, then 1767 * we wait for all outstanding requests to complete. 1768 */ 1769 synchronize_sched(); 1770 raise_barrier(conf, 0); 1771 lower_barrier(conf); 1772 clear_bit(Unmerged, &rdev->flags); 1773 } 1774 md_integrity_add_rdev(rdev, mddev); 1775 print_conf(conf); 1776 return err; 1777} 1778 1779static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 1780{ 1781 struct r10conf *conf = mddev->private; 1782 int err = 0; 1783 int number = rdev->raid_disk; 1784 struct md_rdev **rdevp; 1785 struct raid10_info *p = conf->mirrors + number; 1786 1787 print_conf(conf); 1788 if (rdev == p->rdev) 1789 rdevp = &p->rdev; 1790 else if (rdev == p->replacement) 1791 rdevp = &p->replacement; 1792 else 1793 return 0; 1794 1795 if (test_bit(In_sync, &rdev->flags) || 1796 atomic_read(&rdev->nr_pending)) { 1797 err = -EBUSY; 1798 goto abort; 1799 } 1800 /* Only remove faulty devices if recovery 1801 * is not possible. 1802 */ 1803 if (!test_bit(Faulty, &rdev->flags) && 1804 mddev->recovery_disabled != p->recovery_disabled && 1805 (!p->replacement || p->replacement == rdev) && 1806 number < conf->geo.raid_disks && 1807 enough(conf, -1)) { 1808 err = -EBUSY; 1809 goto abort; 1810 } 1811 *rdevp = NULL; 1812 synchronize_rcu(); 1813 if (atomic_read(&rdev->nr_pending)) { 1814 /* lost the race, try later */ 1815 err = -EBUSY; 1816 *rdevp = rdev; 1817 goto abort; 1818 } else if (p->replacement) { 1819 /* We must have just cleared 'rdev' */ 1820 p->rdev = p->replacement; 1821 clear_bit(Replacement, &p->replacement->flags); 1822 smp_mb(); /* Make sure other CPUs may see both as identical 1823 * but will never see neither -- if they are careful. 1824 */ 1825 p->replacement = NULL; 1826 clear_bit(WantReplacement, &rdev->flags); 1827 } else 1828 /* We might have just remove the Replacement as faulty 1829 * Clear the flag just in case 1830 */ 1831 clear_bit(WantReplacement, &rdev->flags); 1832 1833 err = md_integrity_register(mddev); 1834 1835abort: 1836 1837 print_conf(conf); 1838 return err; 1839} 1840 1841 1842static void end_sync_read(struct bio *bio, int error) 1843{ 1844 struct r10bio *r10_bio = bio->bi_private; 1845 struct r10conf *conf = r10_bio->mddev->private; 1846 int d; 1847 1848 if (bio == r10_bio->master_bio) { 1849 /* this is a reshape read */ 1850 d = r10_bio->read_slot; /* really the read dev */ 1851 } else 1852 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); 1853 1854 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 1855 set_bit(R10BIO_Uptodate, &r10_bio->state); 1856 else 1857 /* The write handler will notice the lack of 1858 * R10BIO_Uptodate and record any errors etc 1859 */ 1860 atomic_add(r10_bio->sectors, 1861 &conf->mirrors[d].rdev->corrected_errors); 1862 1863 /* for reconstruct, we always reschedule after a read. 1864 * for resync, only after all reads 1865 */ 1866 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); 1867 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || 1868 atomic_dec_and_test(&r10_bio->remaining)) { 1869 /* we have read all the blocks, 1870 * do the comparison in process context in raid10d 1871 */ 1872 reschedule_retry(r10_bio); 1873 } 1874} 1875 1876static void end_sync_request(struct r10bio *r10_bio) 1877{ 1878 struct mddev *mddev = r10_bio->mddev; 1879 1880 while (atomic_dec_and_test(&r10_bio->remaining)) { 1881 if (r10_bio->master_bio == NULL) { 1882 /* the primary of several recovery bios */ 1883 sector_t s = r10_bio->sectors; 1884 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 1885 test_bit(R10BIO_WriteError, &r10_bio->state)) 1886 reschedule_retry(r10_bio); 1887 else 1888 put_buf(r10_bio); 1889 md_done_sync(mddev, s, 1); 1890 break; 1891 } else { 1892 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio; 1893 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 1894 test_bit(R10BIO_WriteError, &r10_bio->state)) 1895 reschedule_retry(r10_bio); 1896 else 1897 put_buf(r10_bio); 1898 r10_bio = r10_bio2; 1899 } 1900 } 1901} 1902 1903static void end_sync_write(struct bio *bio, int error) 1904{ 1905 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1906 struct r10bio *r10_bio = bio->bi_private; 1907 struct mddev *mddev = r10_bio->mddev; 1908 struct r10conf *conf = mddev->private; 1909 int d; 1910 sector_t first_bad; 1911 int bad_sectors; 1912 int slot; 1913 int repl; 1914 struct md_rdev *rdev = NULL; 1915 1916 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 1917 if (repl) 1918 rdev = conf->mirrors[d].replacement; 1919 else 1920 rdev = conf->mirrors[d].rdev; 1921 1922 if (!uptodate) { 1923 if (repl) 1924 md_error(mddev, rdev); 1925 else { 1926 set_bit(WriteErrorSeen, &rdev->flags); 1927 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 1928 set_bit(MD_RECOVERY_NEEDED, 1929 &rdev->mddev->recovery); 1930 set_bit(R10BIO_WriteError, &r10_bio->state); 1931 } 1932 } else if (is_badblock(rdev, 1933 r10_bio->devs[slot].addr, 1934 r10_bio->sectors, 1935 &first_bad, &bad_sectors)) 1936 set_bit(R10BIO_MadeGood, &r10_bio->state); 1937 1938 rdev_dec_pending(rdev, mddev); 1939 1940 end_sync_request(r10_bio); 1941} 1942 1943/* 1944 * Note: sync and recover and handled very differently for raid10 1945 * This code is for resync. 1946 * For resync, we read through virtual addresses and read all blocks. 1947 * If there is any error, we schedule a write. The lowest numbered 1948 * drive is authoritative. 1949 * However requests come for physical address, so we need to map. 1950 * For every physical address there are raid_disks/copies virtual addresses, 1951 * which is always are least one, but is not necessarly an integer. 1952 * This means that a physical address can span multiple chunks, so we may 1953 * have to submit multiple io requests for a single sync request. 1954 */ 1955/* 1956 * We check if all blocks are in-sync and only write to blocks that 1957 * aren't in sync 1958 */ 1959static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) 1960{ 1961 struct r10conf *conf = mddev->private; 1962 int i, first; 1963 struct bio *tbio, *fbio; 1964 int vcnt; 1965 1966 atomic_set(&r10_bio->remaining, 1); 1967 1968 /* find the first device with a block */ 1969 for (i=0; i<conf->copies; i++) 1970 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) 1971 break; 1972 1973 if (i == conf->copies) 1974 goto done; 1975 1976 first = i; 1977 fbio = r10_bio->devs[i].bio; 1978 1979 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); 1980 /* now find blocks with errors */ 1981 for (i=0 ; i < conf->copies ; i++) { 1982 int j, d; 1983 1984 tbio = r10_bio->devs[i].bio; 1985 1986 if (tbio->bi_end_io != end_sync_read) 1987 continue; 1988 if (i == first) 1989 continue; 1990 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) { 1991 /* We know that the bi_io_vec layout is the same for 1992 * both 'first' and 'i', so we just compare them. 1993 * All vec entries are PAGE_SIZE; 1994 */ 1995 for (j = 0; j < vcnt; j++) 1996 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), 1997 page_address(tbio->bi_io_vec[j].bv_page), 1998 fbio->bi_io_vec[j].bv_len)) 1999 break; 2000 if (j == vcnt) 2001 continue; 2002 mddev->resync_mismatches += r10_bio->sectors; 2003 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 2004 /* Don't fix anything. */ 2005 continue; 2006 } 2007 /* Ok, we need to write this bio, either to correct an 2008 * inconsistency or to correct an unreadable block. 2009 * First we need to fixup bv_offset, bv_len and 2010 * bi_vecs, as the read request might have corrupted these 2011 */ 2012 tbio->bi_vcnt = vcnt; 2013 tbio->bi_size = r10_bio->sectors << 9; 2014 tbio->bi_idx = 0; 2015 tbio->bi_phys_segments = 0; 2016 tbio->bi_flags &= ~(BIO_POOL_MASK - 1); 2017 tbio->bi_flags |= 1 << BIO_UPTODATE; 2018 tbio->bi_next = NULL; 2019 tbio->bi_rw = WRITE; 2020 tbio->bi_private = r10_bio; 2021 tbio->bi_sector = r10_bio->devs[i].addr; 2022 2023 for (j=0; j < vcnt ; j++) { 2024 tbio->bi_io_vec[j].bv_offset = 0; 2025 tbio->bi_io_vec[j].bv_len = PAGE_SIZE; 2026 2027 memcpy(page_address(tbio->bi_io_vec[j].bv_page), 2028 page_address(fbio->bi_io_vec[j].bv_page), 2029 PAGE_SIZE); 2030 } 2031 tbio->bi_end_io = end_sync_write; 2032 2033 d = r10_bio->devs[i].devnum; 2034 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2035 atomic_inc(&r10_bio->remaining); 2036 md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9); 2037 2038 tbio->bi_sector += conf->mirrors[d].rdev->data_offset; 2039 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2040 generic_make_request(tbio); 2041 } 2042 2043 /* Now write out to any replacement devices 2044 * that are active 2045 */ 2046 for (i = 0; i < conf->copies; i++) { 2047 int j, d; 2048 2049 tbio = r10_bio->devs[i].repl_bio; 2050 if (!tbio || !tbio->bi_end_io) 2051 continue; 2052 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write 2053 && r10_bio->devs[i].bio != fbio) 2054 for (j = 0; j < vcnt; j++) 2055 memcpy(page_address(tbio->bi_io_vec[j].bv_page), 2056 page_address(fbio->bi_io_vec[j].bv_page), 2057 PAGE_SIZE); 2058 d = r10_bio->devs[i].devnum; 2059 atomic_inc(&r10_bio->remaining); 2060 md_sync_acct(conf->mirrors[d].replacement->bdev, 2061 tbio->bi_size >> 9); 2062 generic_make_request(tbio); 2063 } 2064 2065done: 2066 if (atomic_dec_and_test(&r10_bio->remaining)) { 2067 md_done_sync(mddev, r10_bio->sectors, 1); 2068 put_buf(r10_bio); 2069 } 2070} 2071 2072/* 2073 * Now for the recovery code. 2074 * Recovery happens across physical sectors. 2075 * We recover all non-is_sync drives by finding the virtual address of 2076 * each, and then choose a working drive that also has that virt address. 2077 * There is a separate r10_bio for each non-in_sync drive. 2078 * Only the first two slots are in use. The first for reading, 2079 * The second for writing. 2080 * 2081 */ 2082static void fix_recovery_read_error(struct r10bio *r10_bio) 2083{ 2084 /* We got a read error during recovery. 2085 * We repeat the read in smaller page-sized sections. 2086 * If a read succeeds, write it to the new device or record 2087 * a bad block if we cannot. 2088 * If a read fails, record a bad block on both old and 2089 * new devices. 2090 */ 2091 struct mddev *mddev = r10_bio->mddev; 2092 struct r10conf *conf = mddev->private; 2093 struct bio *bio = r10_bio->devs[0].bio; 2094 sector_t sect = 0; 2095 int sectors = r10_bio->sectors; 2096 int idx = 0; 2097 int dr = r10_bio->devs[0].devnum; 2098 int dw = r10_bio->devs[1].devnum; 2099 2100 while (sectors) { 2101 int s = sectors; 2102 struct md_rdev *rdev; 2103 sector_t addr; 2104 int ok; 2105 2106 if (s > (PAGE_SIZE>>9)) 2107 s = PAGE_SIZE >> 9; 2108 2109 rdev = conf->mirrors[dr].rdev; 2110 addr = r10_bio->devs[0].addr + sect, 2111 ok = sync_page_io(rdev, 2112 addr, 2113 s << 9, 2114 bio->bi_io_vec[idx].bv_page, 2115 READ, false); 2116 if (ok) { 2117 rdev = conf->mirrors[dw].rdev; 2118 addr = r10_bio->devs[1].addr + sect; 2119 ok = sync_page_io(rdev, 2120 addr, 2121 s << 9, 2122 bio->bi_io_vec[idx].bv_page, 2123 WRITE, false); 2124 if (!ok) { 2125 set_bit(WriteErrorSeen, &rdev->flags); 2126 if (!test_and_set_bit(WantReplacement, 2127 &rdev->flags)) 2128 set_bit(MD_RECOVERY_NEEDED, 2129 &rdev->mddev->recovery); 2130 } 2131 } 2132 if (!ok) { 2133 /* We don't worry if we cannot set a bad block - 2134 * it really is bad so there is no loss in not 2135 * recording it yet 2136 */ 2137 rdev_set_badblocks(rdev, addr, s, 0); 2138 2139 if (rdev != conf->mirrors[dw].rdev) { 2140 /* need bad block on destination too */ 2141 struct md_rdev *rdev2 = conf->mirrors[dw].rdev; 2142 addr = r10_bio->devs[1].addr + sect; 2143 ok = rdev_set_badblocks(rdev2, addr, s, 0); 2144 if (!ok) { 2145 /* just abort the recovery */ 2146 printk(KERN_NOTICE 2147 "md/raid10:%s: recovery aborted" 2148 " due to read error\n", 2149 mdname(mddev)); 2150 2151 conf->mirrors[dw].recovery_disabled 2152 = mddev->recovery_disabled; 2153 set_bit(MD_RECOVERY_INTR, 2154 &mddev->recovery); 2155 break; 2156 } 2157 } 2158 } 2159 2160 sectors -= s; 2161 sect += s; 2162 idx++; 2163 } 2164} 2165 2166static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) 2167{ 2168 struct r10conf *conf = mddev->private; 2169 int d; 2170 struct bio *wbio, *wbio2; 2171 2172 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { 2173 fix_recovery_read_error(r10_bio); 2174 end_sync_request(r10_bio); 2175 return; 2176 } 2177 2178 /* 2179 * share the pages with the first bio 2180 * and submit the write request 2181 */ 2182 d = r10_bio->devs[1].devnum; 2183 wbio = r10_bio->devs[1].bio; 2184 wbio2 = r10_bio->devs[1].repl_bio; 2185 if (wbio->bi_end_io) { 2186 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2187 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); 2188 generic_make_request(wbio); 2189 } 2190 if (wbio2 && wbio2->bi_end_io) { 2191 atomic_inc(&conf->mirrors[d].replacement->nr_pending); 2192 md_sync_acct(conf->mirrors[d].replacement->bdev, 2193 wbio2->bi_size >> 9); 2194 generic_make_request(wbio2); 2195 } 2196} 2197 2198 2199/* 2200 * Used by fix_read_error() to decay the per rdev read_errors. 2201 * We halve the read error count for every hour that has elapsed 2202 * since the last recorded read error. 2203 * 2204 */ 2205static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) 2206{ 2207 struct timespec cur_time_mon; 2208 unsigned long hours_since_last; 2209 unsigned int read_errors = atomic_read(&rdev->read_errors); 2210 2211 ktime_get_ts(&cur_time_mon); 2212 2213 if (rdev->last_read_error.tv_sec == 0 && 2214 rdev->last_read_error.tv_nsec == 0) { 2215 /* first time we've seen a read error */ 2216 rdev->last_read_error = cur_time_mon; 2217 return; 2218 } 2219 2220 hours_since_last = (cur_time_mon.tv_sec - 2221 rdev->last_read_error.tv_sec) / 3600; 2222 2223 rdev->last_read_error = cur_time_mon; 2224 2225 /* 2226 * if hours_since_last is > the number of bits in read_errors 2227 * just set read errors to 0. We do this to avoid 2228 * overflowing the shift of read_errors by hours_since_last. 2229 */ 2230 if (hours_since_last >= 8 * sizeof(read_errors)) 2231 atomic_set(&rdev->read_errors, 0); 2232 else 2233 atomic_set(&rdev->read_errors, read_errors >> hours_since_last); 2234} 2235 2236static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, 2237 int sectors, struct page *page, int rw) 2238{ 2239 sector_t first_bad; 2240 int bad_sectors; 2241 2242 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) 2243 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) 2244 return -1; 2245 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) 2246 /* success */ 2247 return 1; 2248 if (rw == WRITE) { 2249 set_bit(WriteErrorSeen, &rdev->flags); 2250 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2251 set_bit(MD_RECOVERY_NEEDED, 2252 &rdev->mddev->recovery); 2253 } 2254 /* need to record an error - either for the block or the device */ 2255 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) 2256 md_error(rdev->mddev, rdev); 2257 return 0; 2258} 2259 2260/* 2261 * This is a kernel thread which: 2262 * 2263 * 1. Retries failed read operations on working mirrors. 2264 * 2. Updates the raid superblock when problems encounter. 2265 * 3. Performs writes following reads for array synchronising. 2266 */ 2267 2268static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) 2269{ 2270 int sect = 0; /* Offset from r10_bio->sector */ 2271 int sectors = r10_bio->sectors; 2272 struct md_rdev*rdev; 2273 int max_read_errors = atomic_read(&mddev->max_corr_read_errors); 2274 int d = r10_bio->devs[r10_bio->read_slot].devnum; 2275 2276 /* still own a reference to this rdev, so it cannot 2277 * have been cleared recently. 2278 */ 2279 rdev = conf->mirrors[d].rdev; 2280 2281 if (test_bit(Faulty, &rdev->flags)) 2282 /* drive has already been failed, just ignore any 2283 more fix_read_error() attempts */ 2284 return; 2285 2286 check_decay_read_errors(mddev, rdev); 2287 atomic_inc(&rdev->read_errors); 2288 if (atomic_read(&rdev->read_errors) > max_read_errors) { 2289 char b[BDEVNAME_SIZE]; 2290 bdevname(rdev->bdev, b); 2291 2292 printk(KERN_NOTICE 2293 "md/raid10:%s: %s: Raid device exceeded " 2294 "read_error threshold [cur %d:max %d]\n", 2295 mdname(mddev), b, 2296 atomic_read(&rdev->read_errors), max_read_errors); 2297 printk(KERN_NOTICE 2298 "md/raid10:%s: %s: Failing raid device\n", 2299 mdname(mddev), b); 2300 md_error(mddev, conf->mirrors[d].rdev); 2301 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; 2302 return; 2303 } 2304 2305 while(sectors) { 2306 int s = sectors; 2307 int sl = r10_bio->read_slot; 2308 int success = 0; 2309 int start; 2310 2311 if (s > (PAGE_SIZE>>9)) 2312 s = PAGE_SIZE >> 9; 2313 2314 rcu_read_lock(); 2315 do { 2316 sector_t first_bad; 2317 int bad_sectors; 2318 2319 d = r10_bio->devs[sl].devnum; 2320 rdev = rcu_dereference(conf->mirrors[d].rdev); 2321 if (rdev && 2322 !test_bit(Unmerged, &rdev->flags) && 2323 test_bit(In_sync, &rdev->flags) && 2324 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, 2325 &first_bad, &bad_sectors) == 0) { 2326 atomic_inc(&rdev->nr_pending); 2327 rcu_read_unlock(); 2328 success = sync_page_io(rdev, 2329 r10_bio->devs[sl].addr + 2330 sect, 2331 s<<9, 2332 conf->tmppage, READ, false); 2333 rdev_dec_pending(rdev, mddev); 2334 rcu_read_lock(); 2335 if (success) 2336 break; 2337 } 2338 sl++; 2339 if (sl == conf->copies) 2340 sl = 0; 2341 } while (!success && sl != r10_bio->read_slot); 2342 rcu_read_unlock(); 2343 2344 if (!success) { 2345 /* Cannot read from anywhere, just mark the block 2346 * as bad on the first device to discourage future 2347 * reads. 2348 */ 2349 int dn = r10_bio->devs[r10_bio->read_slot].devnum; 2350 rdev = conf->mirrors[dn].rdev; 2351 2352 if (!rdev_set_badblocks( 2353 rdev, 2354 r10_bio->devs[r10_bio->read_slot].addr 2355 + sect, 2356 s, 0)) { 2357 md_error(mddev, rdev); 2358 r10_bio->devs[r10_bio->read_slot].bio 2359 = IO_BLOCKED; 2360 } 2361 break; 2362 } 2363 2364 start = sl; 2365 /* write it back and re-read */ 2366 rcu_read_lock(); 2367 while (sl != r10_bio->read_slot) { 2368 char b[BDEVNAME_SIZE]; 2369 2370 if (sl==0) 2371 sl = conf->copies; 2372 sl--; 2373 d = r10_bio->devs[sl].devnum; 2374 rdev = rcu_dereference(conf->mirrors[d].rdev); 2375 if (!rdev || 2376 test_bit(Unmerged, &rdev->flags) || 2377 !test_bit(In_sync, &rdev->flags)) 2378 continue; 2379 2380 atomic_inc(&rdev->nr_pending); 2381 rcu_read_unlock(); 2382 if (r10_sync_page_io(rdev, 2383 r10_bio->devs[sl].addr + 2384 sect, 2385 s, conf->tmppage, WRITE) 2386 == 0) { 2387 /* Well, this device is dead */ 2388 printk(KERN_NOTICE 2389 "md/raid10:%s: read correction " 2390 "write failed" 2391 " (%d sectors at %llu on %s)\n", 2392 mdname(mddev), s, 2393 (unsigned long long)( 2394 sect + 2395 choose_data_offset(r10_bio, 2396 rdev)), 2397 bdevname(rdev->bdev, b)); 2398 printk(KERN_NOTICE "md/raid10:%s: %s: failing " 2399 "drive\n", 2400 mdname(mddev), 2401 bdevname(rdev->bdev, b)); 2402 } 2403 rdev_dec_pending(rdev, mddev); 2404 rcu_read_lock(); 2405 } 2406 sl = start; 2407 while (sl != r10_bio->read_slot) { 2408 char b[BDEVNAME_SIZE]; 2409 2410 if (sl==0) 2411 sl = conf->copies; 2412 sl--; 2413 d = r10_bio->devs[sl].devnum; 2414 rdev = rcu_dereference(conf->mirrors[d].rdev); 2415 if (!rdev || 2416 !test_bit(In_sync, &rdev->flags)) 2417 continue; 2418 2419 atomic_inc(&rdev->nr_pending); 2420 rcu_read_unlock(); 2421 switch (r10_sync_page_io(rdev, 2422 r10_bio->devs[sl].addr + 2423 sect, 2424 s, conf->tmppage, 2425 READ)) { 2426 case 0: 2427 /* Well, this device is dead */ 2428 printk(KERN_NOTICE 2429 "md/raid10:%s: unable to read back " 2430 "corrected sectors" 2431 " (%d sectors at %llu on %s)\n", 2432 mdname(mddev), s, 2433 (unsigned long long)( 2434 sect + 2435 choose_data_offset(r10_bio, rdev)), 2436 bdevname(rdev->bdev, b)); 2437 printk(KERN_NOTICE "md/raid10:%s: %s: failing " 2438 "drive\n", 2439 mdname(mddev), 2440 bdevname(rdev->bdev, b)); 2441 break; 2442 case 1: 2443 printk(KERN_INFO 2444 "md/raid10:%s: read error corrected" 2445 " (%d sectors at %llu on %s)\n", 2446 mdname(mddev), s, 2447 (unsigned long long)( 2448 sect + 2449 choose_data_offset(r10_bio, rdev)), 2450 bdevname(rdev->bdev, b)); 2451 atomic_add(s, &rdev->corrected_errors); 2452 } 2453 2454 rdev_dec_pending(rdev, mddev); 2455 rcu_read_lock(); 2456 } 2457 rcu_read_unlock(); 2458 2459 sectors -= s; 2460 sect += s; 2461 } 2462} 2463 2464static void bi_complete(struct bio *bio, int error) 2465{ 2466 complete((struct completion *)bio->bi_private); 2467} 2468 2469static int submit_bio_wait(int rw, struct bio *bio) 2470{ 2471 struct completion event; 2472 rw |= REQ_SYNC; 2473 2474 init_completion(&event); 2475 bio->bi_private = &event; 2476 bio->bi_end_io = bi_complete; 2477 submit_bio(rw, bio); 2478 wait_for_completion(&event); 2479 2480 return test_bit(BIO_UPTODATE, &bio->bi_flags); 2481} 2482 2483static int narrow_write_error(struct r10bio *r10_bio, int i) 2484{ 2485 struct bio *bio = r10_bio->master_bio; 2486 struct mddev *mddev = r10_bio->mddev; 2487 struct r10conf *conf = mddev->private; 2488 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; 2489 /* bio has the data to be written to slot 'i' where 2490 * we just recently had a write error. 2491 * We repeatedly clone the bio and trim down to one block, 2492 * then try the write. Where the write fails we record 2493 * a bad block. 2494 * It is conceivable that the bio doesn't exactly align with 2495 * blocks. We must handle this. 2496 * 2497 * We currently own a reference to the rdev. 2498 */ 2499 2500 int block_sectors; 2501 sector_t sector; 2502 int sectors; 2503 int sect_to_write = r10_bio->sectors; 2504 int ok = 1; 2505 2506 if (rdev->badblocks.shift < 0) 2507 return 0; 2508 2509 block_sectors = 1 << rdev->badblocks.shift; 2510 sector = r10_bio->sector; 2511 sectors = ((r10_bio->sector + block_sectors) 2512 & ~(sector_t)(block_sectors - 1)) 2513 - sector; 2514 2515 while (sect_to_write) { 2516 struct bio *wbio; 2517 if (sectors > sect_to_write) 2518 sectors = sect_to_write; 2519 /* Write at 'sector' for 'sectors' */ 2520 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 2521 md_trim_bio(wbio, sector - bio->bi_sector, sectors); 2522 wbio->bi_sector = (r10_bio->devs[i].addr+ 2523 choose_data_offset(r10_bio, rdev) + 2524 (sector - r10_bio->sector)); 2525 wbio->bi_bdev = rdev->bdev; 2526 if (submit_bio_wait(WRITE, wbio) == 0) 2527 /* Failure! */ 2528 ok = rdev_set_badblocks(rdev, sector, 2529 sectors, 0) 2530 && ok; 2531 2532 bio_put(wbio); 2533 sect_to_write -= sectors; 2534 sector += sectors; 2535 sectors = block_sectors; 2536 } 2537 return ok; 2538} 2539 2540static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) 2541{ 2542 int slot = r10_bio->read_slot; 2543 struct bio *bio; 2544 struct r10conf *conf = mddev->private; 2545 struct md_rdev *rdev = r10_bio->devs[slot].rdev; 2546 char b[BDEVNAME_SIZE]; 2547 unsigned long do_sync; 2548 int max_sectors; 2549 2550 /* we got a read error. Maybe the drive is bad. Maybe just 2551 * the block and we can fix it. 2552 * We freeze all other IO, and try reading the block from 2553 * other devices. When we find one, we re-write 2554 * and check it that fixes the read error. 2555 * This is all done synchronously while the array is 2556 * frozen. 2557 */ 2558 bio = r10_bio->devs[slot].bio; 2559 bdevname(bio->bi_bdev, b); 2560 bio_put(bio); 2561 r10_bio->devs[slot].bio = NULL; 2562 2563 if (mddev->ro == 0) { 2564 freeze_array(conf); 2565 fix_read_error(conf, mddev, r10_bio); 2566 unfreeze_array(conf); 2567 } else 2568 r10_bio->devs[slot].bio = IO_BLOCKED; 2569 2570 rdev_dec_pending(rdev, mddev); 2571 2572read_more: 2573 rdev = read_balance(conf, r10_bio, &max_sectors); 2574 if (rdev == NULL) { 2575 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O" 2576 " read error for block %llu\n", 2577 mdname(mddev), b, 2578 (unsigned long long)r10_bio->sector); 2579 raid_end_bio_io(r10_bio); 2580 return; 2581 } 2582 2583 do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); 2584 slot = r10_bio->read_slot; 2585 printk_ratelimited( 2586 KERN_ERR 2587 "md/raid10:%s: %s: redirecting " 2588 "sector %llu to another mirror\n", 2589 mdname(mddev), 2590 bdevname(rdev->bdev, b), 2591 (unsigned long long)r10_bio->sector); 2592 bio = bio_clone_mddev(r10_bio->master_bio, 2593 GFP_NOIO, mddev); 2594 md_trim_bio(bio, 2595 r10_bio->sector - bio->bi_sector, 2596 max_sectors); 2597 r10_bio->devs[slot].bio = bio; 2598 r10_bio->devs[slot].rdev = rdev; 2599 bio->bi_sector = r10_bio->devs[slot].addr 2600 + choose_data_offset(r10_bio, rdev); 2601 bio->bi_bdev = rdev->bdev; 2602 bio->bi_rw = READ | do_sync; 2603 bio->bi_private = r10_bio; 2604 bio->bi_end_io = raid10_end_read_request; 2605 if (max_sectors < r10_bio->sectors) { 2606 /* Drat - have to split this up more */ 2607 struct bio *mbio = r10_bio->master_bio; 2608 int sectors_handled = 2609 r10_bio->sector + max_sectors 2610 - mbio->bi_sector; 2611 r10_bio->sectors = max_sectors; 2612 spin_lock_irq(&conf->device_lock); 2613 if (mbio->bi_phys_segments == 0) 2614 mbio->bi_phys_segments = 2; 2615 else 2616 mbio->bi_phys_segments++; 2617 spin_unlock_irq(&conf->device_lock); 2618 generic_make_request(bio); 2619 2620 r10_bio = mempool_alloc(conf->r10bio_pool, 2621 GFP_NOIO); 2622 r10_bio->master_bio = mbio; 2623 r10_bio->sectors = (mbio->bi_size >> 9) 2624 - sectors_handled; 2625 r10_bio->state = 0; 2626 set_bit(R10BIO_ReadError, 2627 &r10_bio->state); 2628 r10_bio->mddev = mddev; 2629 r10_bio->sector = mbio->bi_sector 2630 + sectors_handled; 2631 2632 goto read_more; 2633 } else 2634 generic_make_request(bio); 2635} 2636 2637static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) 2638{ 2639 /* Some sort of write request has finished and it 2640 * succeeded in writing where we thought there was a 2641 * bad block. So forget the bad block. 2642 * Or possibly if failed and we need to record 2643 * a bad block. 2644 */ 2645 int m; 2646 struct md_rdev *rdev; 2647 2648 if (test_bit(R10BIO_IsSync, &r10_bio->state) || 2649 test_bit(R10BIO_IsRecover, &r10_bio->state)) { 2650 for (m = 0; m < conf->copies; m++) { 2651 int dev = r10_bio->devs[m].devnum; 2652 rdev = conf->mirrors[dev].rdev; 2653 if (r10_bio->devs[m].bio == NULL) 2654 continue; 2655 if (test_bit(BIO_UPTODATE, 2656 &r10_bio->devs[m].bio->bi_flags)) { 2657 rdev_clear_badblocks( 2658 rdev, 2659 r10_bio->devs[m].addr, 2660 r10_bio->sectors, 0); 2661 } else { 2662 if (!rdev_set_badblocks( 2663 rdev, 2664 r10_bio->devs[m].addr, 2665 r10_bio->sectors, 0)) 2666 md_error(conf->mddev, rdev); 2667 } 2668 rdev = conf->mirrors[dev].replacement; 2669 if (r10_bio->devs[m].repl_bio == NULL) 2670 continue; 2671 if (test_bit(BIO_UPTODATE, 2672 &r10_bio->devs[m].repl_bio->bi_flags)) { 2673 rdev_clear_badblocks( 2674 rdev, 2675 r10_bio->devs[m].addr, 2676 r10_bio->sectors, 0); 2677 } else { 2678 if (!rdev_set_badblocks( 2679 rdev, 2680 r10_bio->devs[m].addr, 2681 r10_bio->sectors, 0)) 2682 md_error(conf->mddev, rdev); 2683 } 2684 } 2685 put_buf(r10_bio); 2686 } else { 2687 for (m = 0; m < conf->copies; m++) { 2688 int dev = r10_bio->devs[m].devnum; 2689 struct bio *bio = r10_bio->devs[m].bio; 2690 rdev = conf->mirrors[dev].rdev; 2691 if (bio == IO_MADE_GOOD) { 2692 rdev_clear_badblocks( 2693 rdev, 2694 r10_bio->devs[m].addr, 2695 r10_bio->sectors, 0); 2696 rdev_dec_pending(rdev, conf->mddev); 2697 } else if (bio != NULL && 2698 !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 2699 if (!narrow_write_error(r10_bio, m)) { 2700 md_error(conf->mddev, rdev); 2701 set_bit(R10BIO_Degraded, 2702 &r10_bio->state); 2703 } 2704 rdev_dec_pending(rdev, conf->mddev); 2705 } 2706 bio = r10_bio->devs[m].repl_bio; 2707 rdev = conf->mirrors[dev].replacement; 2708 if (rdev && bio == IO_MADE_GOOD) { 2709 rdev_clear_badblocks( 2710 rdev, 2711 r10_bio->devs[m].addr, 2712 r10_bio->sectors, 0); 2713 rdev_dec_pending(rdev, conf->mddev); 2714 } 2715 } 2716 if (test_bit(R10BIO_WriteError, 2717 &r10_bio->state)) 2718 close_write(r10_bio); 2719 raid_end_bio_io(r10_bio); 2720 } 2721} 2722 2723static void raid10d(struct mddev *mddev) 2724{ 2725 struct r10bio *r10_bio; 2726 unsigned long flags; 2727 struct r10conf *conf = mddev->private; 2728 struct list_head *head = &conf->retry_list; 2729 struct blk_plug plug; 2730 2731 md_check_recovery(mddev); 2732 2733 blk_start_plug(&plug); 2734 for (;;) { 2735 2736 flush_pending_writes(conf); 2737 2738 spin_lock_irqsave(&conf->device_lock, flags); 2739 if (list_empty(head)) { 2740 spin_unlock_irqrestore(&conf->device_lock, flags); 2741 break; 2742 } 2743 r10_bio = list_entry(head->prev, struct r10bio, retry_list); 2744 list_del(head->prev); 2745 conf->nr_queued--; 2746 spin_unlock_irqrestore(&conf->device_lock, flags); 2747 2748 mddev = r10_bio->mddev; 2749 conf = mddev->private; 2750 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 2751 test_bit(R10BIO_WriteError, &r10_bio->state)) 2752 handle_write_completed(conf, r10_bio); 2753 else if (test_bit(R10BIO_IsReshape, &r10_bio->state)) 2754 reshape_request_write(mddev, r10_bio); 2755 else if (test_bit(R10BIO_IsSync, &r10_bio->state)) 2756 sync_request_write(mddev, r10_bio); 2757 else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) 2758 recovery_request_write(mddev, r10_bio); 2759 else if (test_bit(R10BIO_ReadError, &r10_bio->state)) 2760 handle_read_error(mddev, r10_bio); 2761 else { 2762 /* just a partial read to be scheduled from a 2763 * separate context 2764 */ 2765 int slot = r10_bio->read_slot; 2766 generic_make_request(r10_bio->devs[slot].bio); 2767 } 2768 2769 cond_resched(); 2770 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) 2771 md_check_recovery(mddev); 2772 } 2773 blk_finish_plug(&plug); 2774} 2775 2776 2777static int init_resync(struct r10conf *conf) 2778{ 2779 int buffs; 2780 int i; 2781 2782 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 2783 BUG_ON(conf->r10buf_pool); 2784 conf->have_replacement = 0; 2785 for (i = 0; i < conf->geo.raid_disks; i++) 2786 if (conf->mirrors[i].replacement) 2787 conf->have_replacement = 1; 2788 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); 2789 if (!conf->r10buf_pool) 2790 return -ENOMEM; 2791 conf->next_resync = 0; 2792 return 0; 2793} 2794 2795/* 2796 * perform a "sync" on one "block" 2797 * 2798 * We need to make sure that no normal I/O request - particularly write 2799 * requests - conflict with active sync requests. 2800 * 2801 * This is achieved by tracking pending requests and a 'barrier' concept 2802 * that can be installed to exclude normal IO requests. 2803 * 2804 * Resync and recovery are handled very differently. 2805 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery. 2806 * 2807 * For resync, we iterate over virtual addresses, read all copies, 2808 * and update if there are differences. If only one copy is live, 2809 * skip it. 2810 * For recovery, we iterate over physical addresses, read a good 2811 * value for each non-in_sync drive, and over-write. 2812 * 2813 * So, for recovery we may have several outstanding complex requests for a 2814 * given address, one for each out-of-sync device. We model this by allocating 2815 * a number of r10_bio structures, one for each out-of-sync device. 2816 * As we setup these structures, we collect all bio's together into a list 2817 * which we then process collectively to add pages, and then process again 2818 * to pass to generic_make_request. 2819 * 2820 * The r10_bio structures are linked using a borrowed master_bio pointer. 2821 * This link is counted in ->remaining. When the r10_bio that points to NULL 2822 * has its remaining count decremented to 0, the whole complex operation 2823 * is complete. 2824 * 2825 */ 2826 2827static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, 2828 int *skipped, int go_faster) 2829{ 2830 struct r10conf *conf = mddev->private; 2831 struct r10bio *r10_bio; 2832 struct bio *biolist = NULL, *bio; 2833 sector_t max_sector, nr_sectors; 2834 int i; 2835 int max_sync; 2836 sector_t sync_blocks; 2837 sector_t sectors_skipped = 0; 2838 int chunks_skipped = 0; 2839 sector_t chunk_mask = conf->geo.chunk_mask; 2840 2841 if (!conf->r10buf_pool) 2842 if (init_resync(conf)) 2843 return 0; 2844 2845 skipped: 2846 max_sector = mddev->dev_sectors; 2847 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 2848 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2849 max_sector = mddev->resync_max_sectors; 2850 if (sector_nr >= max_sector) { 2851 /* If we aborted, we need to abort the 2852 * sync on the 'current' bitmap chucks (there can 2853 * be several when recovering multiple devices). 2854 * as we may have started syncing it but not finished. 2855 * We can find the current address in 2856 * mddev->curr_resync, but for recovery, 2857 * we need to convert that to several 2858 * virtual addresses. 2859 */ 2860 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 2861 end_reshape(conf); 2862 return 0; 2863 } 2864 2865 if (mddev->curr_resync < max_sector) { /* aborted */ 2866 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 2867 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2868 &sync_blocks, 1); 2869 else for (i = 0; i < conf->geo.raid_disks; i++) { 2870 sector_t sect = 2871 raid10_find_virt(conf, mddev->curr_resync, i); 2872 bitmap_end_sync(mddev->bitmap, sect, 2873 &sync_blocks, 1); 2874 } 2875 } else { 2876 /* completed sync */ 2877 if ((!mddev->bitmap || conf->fullsync) 2878 && conf->have_replacement 2879 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2880 /* Completed a full sync so the replacements 2881 * are now fully recovered. 2882 */ 2883 for (i = 0; i < conf->geo.raid_disks; i++) 2884 if (conf->mirrors[i].replacement) 2885 conf->mirrors[i].replacement 2886 ->recovery_offset 2887 = MaxSector; 2888 } 2889 conf->fullsync = 0; 2890 } 2891 bitmap_close_sync(mddev->bitmap); 2892 close_sync(conf); 2893 *skipped = 1; 2894 return sectors_skipped; 2895 } 2896 2897 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2898 return reshape_request(mddev, sector_nr, skipped); 2899 2900 if (chunks_skipped >= conf->geo.raid_disks) { 2901 /* if there has been nothing to do on any drive, 2902 * then there is nothing to do at all.. 2903 */ 2904 *skipped = 1; 2905 return (max_sector - sector_nr) + sectors_skipped; 2906 } 2907 2908 if (max_sector > mddev->resync_max) 2909 max_sector = mddev->resync_max; /* Don't do IO beyond here */ 2910 2911 /* make sure whole request will fit in a chunk - if chunks 2912 * are meaningful 2913 */ 2914 if (conf->geo.near_copies < conf->geo.raid_disks && 2915 max_sector > (sector_nr | chunk_mask)) 2916 max_sector = (sector_nr | chunk_mask) + 1; 2917 /* 2918 * If there is non-resync activity waiting for us then 2919 * put in a delay to throttle resync. 2920 */ 2921 if (!go_faster && conf->nr_waiting) 2922 msleep_interruptible(1000); 2923 2924 /* Again, very different code for resync and recovery. 2925 * Both must result in an r10bio with a list of bios that 2926 * have bi_end_io, bi_sector, bi_bdev set, 2927 * and bi_private set to the r10bio. 2928 * For recovery, we may actually create several r10bios 2929 * with 2 bios in each, that correspond to the bios in the main one. 2930 * In this case, the subordinate r10bios link back through a 2931 * borrowed master_bio pointer, and the counter in the master 2932 * includes a ref from each subordinate. 2933 */ 2934 /* First, we decide what to do and set ->bi_end_io 2935 * To end_sync_read if we want to read, and 2936 * end_sync_write if we will want to write. 2937 */ 2938 2939 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); 2940 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2941 /* recovery... the complicated one */ 2942 int j; 2943 r10_bio = NULL; 2944 2945 for (i = 0 ; i < conf->geo.raid_disks; i++) { 2946 int still_degraded; 2947 struct r10bio *rb2; 2948 sector_t sect; 2949 int must_sync; 2950 int any_working; 2951 struct raid10_info *mirror = &conf->mirrors[i]; 2952 2953 if ((mirror->rdev == NULL || 2954 test_bit(In_sync, &mirror->rdev->flags)) 2955 && 2956 (mirror->replacement == NULL || 2957 test_bit(Faulty, 2958 &mirror->replacement->flags))) 2959 continue; 2960 2961 still_degraded = 0; 2962 /* want to reconstruct this device */ 2963 rb2 = r10_bio; 2964 sect = raid10_find_virt(conf, sector_nr, i); 2965 if (sect >= mddev->resync_max_sectors) { 2966 /* last stripe is not complete - don't 2967 * try to recover this sector. 2968 */ 2969 continue; 2970 } 2971 /* Unless we are doing a full sync, or a replacement 2972 * we only need to recover the block if it is set in 2973 * the bitmap 2974 */ 2975 must_sync = bitmap_start_sync(mddev->bitmap, sect, 2976 &sync_blocks, 1); 2977 if (sync_blocks < max_sync) 2978 max_sync = sync_blocks; 2979 if (!must_sync && 2980 mirror->replacement == NULL && 2981 !conf->fullsync) { 2982 /* yep, skip the sync_blocks here, but don't assume 2983 * that there will never be anything to do here 2984 */ 2985 chunks_skipped = -1; 2986 continue; 2987 } 2988 2989 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 2990 raise_barrier(conf, rb2 != NULL); 2991 atomic_set(&r10_bio->remaining, 0); 2992 2993 r10_bio->master_bio = (struct bio*)rb2; 2994 if (rb2) 2995 atomic_inc(&rb2->remaining); 2996 r10_bio->mddev = mddev; 2997 set_bit(R10BIO_IsRecover, &r10_bio->state); 2998 r10_bio->sector = sect; 2999 3000 raid10_find_phys(conf, r10_bio); 3001 3002 /* Need to check if the array will still be 3003 * degraded 3004 */ 3005 for (j = 0; j < conf->geo.raid_disks; j++) 3006 if (conf->mirrors[j].rdev == NULL || 3007 test_bit(Faulty, &conf->mirrors[j].rdev->flags)) { 3008 still_degraded = 1; 3009 break; 3010 } 3011 3012 must_sync = bitmap_start_sync(mddev->bitmap, sect, 3013 &sync_blocks, still_degraded); 3014 3015 any_working = 0; 3016 for (j=0; j<conf->copies;j++) { 3017 int k; 3018 int d = r10_bio->devs[j].devnum; 3019 sector_t from_addr, to_addr; 3020 struct md_rdev *rdev; 3021 sector_t sector, first_bad; 3022 int bad_sectors; 3023 if (!conf->mirrors[d].rdev || 3024 !test_bit(In_sync, &conf->mirrors[d].rdev->flags)) 3025 continue; 3026 /* This is where we read from */ 3027 any_working = 1; 3028 rdev = conf->mirrors[d].rdev; 3029 sector = r10_bio->devs[j].addr; 3030 3031 if (is_badblock(rdev, sector, max_sync, 3032 &first_bad, &bad_sectors)) { 3033 if (first_bad > sector) 3034 max_sync = first_bad - sector; 3035 else { 3036 bad_sectors -= (sector 3037 - first_bad); 3038 if (max_sync > bad_sectors) 3039 max_sync = bad_sectors; 3040 continue; 3041 } 3042 } 3043 bio = r10_bio->devs[0].bio; 3044 bio->bi_next = biolist; 3045 biolist = bio; 3046 bio->bi_private = r10_bio; 3047 bio->bi_end_io = end_sync_read; 3048 bio->bi_rw = READ; 3049 from_addr = r10_bio->devs[j].addr; 3050 bio->bi_sector = from_addr + rdev->data_offset; 3051 bio->bi_bdev = rdev->bdev; 3052 atomic_inc(&rdev->nr_pending); 3053 /* and we write to 'i' (if not in_sync) */ 3054 3055 for (k=0; k<conf->copies; k++) 3056 if (r10_bio->devs[k].devnum == i) 3057 break; 3058 BUG_ON(k == conf->copies); 3059 to_addr = r10_bio->devs[k].addr; 3060 r10_bio->devs[0].devnum = d; 3061 r10_bio->devs[0].addr = from_addr; 3062 r10_bio->devs[1].devnum = i; 3063 r10_bio->devs[1].addr = to_addr; 3064 3065 rdev = mirror->rdev; 3066 if (!test_bit(In_sync, &rdev->flags)) { 3067 bio = r10_bio->devs[1].bio; 3068 bio->bi_next = biolist; 3069 biolist = bio; 3070 bio->bi_private = r10_bio; 3071 bio->bi_end_io = end_sync_write; 3072 bio->bi_rw = WRITE; 3073 bio->bi_sector = to_addr 3074 + rdev->data_offset; 3075 bio->bi_bdev = rdev->bdev; 3076 atomic_inc(&r10_bio->remaining); 3077 } else 3078 r10_bio->devs[1].bio->bi_end_io = NULL; 3079 3080 /* and maybe write to replacement */ 3081 bio = r10_bio->devs[1].repl_bio; 3082 if (bio) 3083 bio->bi_end_io = NULL; 3084 rdev = mirror->replacement; 3085 /* Note: if rdev != NULL, then bio 3086 * cannot be NULL as r10buf_pool_alloc will 3087 * have allocated it. 3088 * So the second test here is pointless. 3089 * But it keeps semantic-checkers happy, and 3090 * this comment keeps human reviewers 3091 * happy. 3092 */ 3093 if (rdev == NULL || bio == NULL || 3094 test_bit(Faulty, &rdev->flags)) 3095 break; 3096 bio->bi_next = biolist; 3097 biolist = bio; 3098 bio->bi_private = r10_bio; 3099 bio->bi_end_io = end_sync_write; 3100 bio->bi_rw = WRITE; 3101 bio->bi_sector = to_addr + rdev->data_offset; 3102 bio->bi_bdev = rdev->bdev; 3103 atomic_inc(&r10_bio->remaining); 3104 break; 3105 } 3106 if (j == conf->copies) { 3107 /* Cannot recover, so abort the recovery or 3108 * record a bad block */ 3109 put_buf(r10_bio); 3110 if (rb2) 3111 atomic_dec(&rb2->remaining); 3112 r10_bio = rb2; 3113 if (any_working) { 3114 /* problem is that there are bad blocks 3115 * on other device(s) 3116 */ 3117 int k; 3118 for (k = 0; k < conf->copies; k++) 3119 if (r10_bio->devs[k].devnum == i) 3120 break; 3121 if (!test_bit(In_sync, 3122 &mirror->rdev->flags) 3123 && !rdev_set_badblocks( 3124 mirror->rdev, 3125 r10_bio->devs[k].addr, 3126 max_sync, 0)) 3127 any_working = 0; 3128 if (mirror->replacement && 3129 !rdev_set_badblocks( 3130 mirror->replacement, 3131 r10_bio->devs[k].addr, 3132 max_sync, 0)) 3133 any_working = 0; 3134 } 3135 if (!any_working) { 3136 if (!test_and_set_bit(MD_RECOVERY_INTR, 3137 &mddev->recovery)) 3138 printk(KERN_INFO "md/raid10:%s: insufficient " 3139 "working devices for recovery.\n", 3140 mdname(mddev)); 3141 mirror->recovery_disabled 3142 = mddev->recovery_disabled; 3143 } 3144 break; 3145 } 3146 } 3147 if (biolist == NULL) { 3148 while (r10_bio) { 3149 struct r10bio *rb2 = r10_bio; 3150 r10_bio = (struct r10bio*) rb2->master_bio; 3151 rb2->master_bio = NULL; 3152 put_buf(rb2); 3153 } 3154 goto giveup; 3155 } 3156 } else { 3157 /* resync. Schedule a read for every block at this virt offset */ 3158 int count = 0; 3159 3160 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 3161 3162 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 3163 &sync_blocks, mddev->degraded) && 3164 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, 3165 &mddev->recovery)) { 3166 /* We can skip this block */ 3167 *skipped = 1; 3168 return sync_blocks + sectors_skipped; 3169 } 3170 if (sync_blocks < max_sync) 3171 max_sync = sync_blocks; 3172 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 3173 3174 r10_bio->mddev = mddev; 3175 atomic_set(&r10_bio->remaining, 0); 3176 raise_barrier(conf, 0); 3177 conf->next_resync = sector_nr; 3178 3179 r10_bio->master_bio = NULL; 3180 r10_bio->sector = sector_nr; 3181 set_bit(R10BIO_IsSync, &r10_bio->state); 3182 raid10_find_phys(conf, r10_bio); 3183 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; 3184 3185 for (i = 0; i < conf->copies; i++) { 3186 int d = r10_bio->devs[i].devnum; 3187 sector_t first_bad, sector; 3188 int bad_sectors; 3189 3190 if (r10_bio->devs[i].repl_bio) 3191 r10_bio->devs[i].repl_bio->bi_end_io = NULL; 3192 3193 bio = r10_bio->devs[i].bio; 3194 bio->bi_end_io = NULL; 3195 clear_bit(BIO_UPTODATE, &bio->bi_flags); 3196 if (conf->mirrors[d].rdev == NULL || 3197 test_bit(Faulty, &conf->mirrors[d].rdev->flags)) 3198 continue; 3199 sector = r10_bio->devs[i].addr; 3200 if (is_badblock(conf->mirrors[d].rdev, 3201 sector, max_sync, 3202 &first_bad, &bad_sectors)) { 3203 if (first_bad > sector) 3204 max_sync = first_bad - sector; 3205 else { 3206 bad_sectors -= (sector - first_bad); 3207 if (max_sync > bad_sectors) 3208 max_sync = bad_sectors; 3209 continue; 3210 } 3211 } 3212 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 3213 atomic_inc(&r10_bio->remaining); 3214 bio->bi_next = biolist; 3215 biolist = bio; 3216 bio->bi_private = r10_bio; 3217 bio->bi_end_io = end_sync_read; 3218 bio->bi_rw = READ; 3219 bio->bi_sector = sector + 3220 conf->mirrors[d].rdev->data_offset; 3221 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 3222 count++; 3223 3224 if (conf->mirrors[d].replacement == NULL || 3225 test_bit(Faulty, 3226 &conf->mirrors[d].replacement->flags)) 3227 continue; 3228 3229 /* Need to set up for writing to the replacement */ 3230 bio = r10_bio->devs[i].repl_bio; 3231 clear_bit(BIO_UPTODATE, &bio->bi_flags); 3232 3233 sector = r10_bio->devs[i].addr; 3234 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 3235 bio->bi_next = biolist; 3236 biolist = bio; 3237 bio->bi_private = r10_bio; 3238 bio->bi_end_io = end_sync_write; 3239 bio->bi_rw = WRITE; 3240 bio->bi_sector = sector + 3241 conf->mirrors[d].replacement->data_offset; 3242 bio->bi_bdev = conf->mirrors[d].replacement->bdev; 3243 count++; 3244 } 3245 3246 if (count < 2) { 3247 for (i=0; i<conf->copies; i++) { 3248 int d = r10_bio->devs[i].devnum; 3249 if (r10_bio->devs[i].bio->bi_end_io) 3250 rdev_dec_pending(conf->mirrors[d].rdev, 3251 mddev); 3252 if (r10_bio->devs[i].repl_bio && 3253 r10_bio->devs[i].repl_bio->bi_end_io) 3254 rdev_dec_pending( 3255 conf->mirrors[d].replacement, 3256 mddev); 3257 } 3258 put_buf(r10_bio); 3259 biolist = NULL; 3260 goto giveup; 3261 } 3262 } 3263 3264 for (bio = biolist; bio ; bio=bio->bi_next) { 3265 3266 bio->bi_flags &= ~(BIO_POOL_MASK - 1); 3267 if (bio->bi_end_io) 3268 bio->bi_flags |= 1 << BIO_UPTODATE; 3269 bio->bi_vcnt = 0; 3270 bio->bi_idx = 0; 3271 bio->bi_phys_segments = 0; 3272 bio->bi_size = 0; 3273 } 3274 3275 nr_sectors = 0; 3276 if (sector_nr + max_sync < max_sector) 3277 max_sector = sector_nr + max_sync; 3278 do { 3279 struct page *page; 3280 int len = PAGE_SIZE; 3281 if (sector_nr + (len>>9) > max_sector) 3282 len = (max_sector - sector_nr) << 9; 3283 if (len == 0) 3284 break; 3285 for (bio= biolist ; bio ; bio=bio->bi_next) { 3286 struct bio *bio2; 3287 page = bio->bi_io_vec[bio->bi_vcnt].bv_page; 3288 if (bio_add_page(bio, page, len, 0)) 3289 continue; 3290 3291 /* stop here */ 3292 bio->bi_io_vec[bio->bi_vcnt].bv_page = page; 3293 for (bio2 = biolist; 3294 bio2 && bio2 != bio; 3295 bio2 = bio2->bi_next) { 3296 /* remove last page from this bio */ 3297 bio2->bi_vcnt--; 3298 bio2->bi_size -= len; 3299 bio2->bi_flags &= ~(1<< BIO_SEG_VALID); 3300 } 3301 goto bio_full; 3302 } 3303 nr_sectors += len>>9; 3304 sector_nr += len>>9; 3305 } while (biolist->bi_vcnt < RESYNC_PAGES); 3306 bio_full: 3307 r10_bio->sectors = nr_sectors; 3308 3309 while (biolist) { 3310 bio = biolist; 3311 biolist = biolist->bi_next; 3312 3313 bio->bi_next = NULL; 3314 r10_bio = bio->bi_private; 3315 r10_bio->sectors = nr_sectors; 3316 3317 if (bio->bi_end_io == end_sync_read) { 3318 md_sync_acct(bio->bi_bdev, nr_sectors); 3319 generic_make_request(bio); 3320 } 3321 } 3322 3323 if (sectors_skipped) 3324 /* pretend they weren't skipped, it makes 3325 * no important difference in this case 3326 */ 3327 md_done_sync(mddev, sectors_skipped, 1); 3328 3329 return sectors_skipped + nr_sectors; 3330 giveup: 3331 /* There is nowhere to write, so all non-sync 3332 * drives must be failed or in resync, all drives 3333 * have a bad block, so try the next chunk... 3334 */ 3335 if (sector_nr + max_sync < max_sector) 3336 max_sector = sector_nr + max_sync; 3337 3338 sectors_skipped += (max_sector - sector_nr); 3339 chunks_skipped ++; 3340 sector_nr = max_sector; 3341 goto skipped; 3342} 3343 3344static sector_t 3345raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) 3346{ 3347 sector_t size; 3348 struct r10conf *conf = mddev->private; 3349 3350 if (!raid_disks) 3351 raid_disks = min(conf->geo.raid_disks, 3352 conf->prev.raid_disks); 3353 if (!sectors) 3354 sectors = conf->dev_sectors; 3355 3356 size = sectors >> conf->geo.chunk_shift; 3357 sector_div(size, conf->geo.far_copies); 3358 size = size * raid_disks; 3359 sector_div(size, conf->geo.near_copies); 3360 3361 return size << conf->geo.chunk_shift; 3362} 3363 3364static void calc_sectors(struct r10conf *conf, sector_t size) 3365{ 3366 /* Calculate the number of sectors-per-device that will 3367 * actually be used, and set conf->dev_sectors and 3368 * conf->stride 3369 */ 3370 3371 size = size >> conf->geo.chunk_shift; 3372 sector_div(size, conf->geo.far_copies); 3373 size = size * conf->geo.raid_disks; 3374 sector_div(size, conf->geo.near_copies); 3375 /* 'size' is now the number of chunks in the array */ 3376 /* calculate "used chunks per device" */ 3377 size = size * conf->copies; 3378 3379 /* We need to round up when dividing by raid_disks to 3380 * get the stride size. 3381 */ 3382 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); 3383 3384 conf->dev_sectors = size << conf->geo.chunk_shift; 3385 3386 if (conf->geo.far_offset) 3387 conf->geo.stride = 1 << conf->geo.chunk_shift; 3388 else { 3389 sector_div(size, conf->geo.far_copies); 3390 conf->geo.stride = size << conf->geo.chunk_shift; 3391 } 3392} 3393 3394enum geo_type {geo_new, geo_old, geo_start}; 3395static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) 3396{ 3397 int nc, fc, fo; 3398 int layout, chunk, disks; 3399 switch (new) { 3400 case geo_old: 3401 layout = mddev->layout; 3402 chunk = mddev->chunk_sectors; 3403 disks = mddev->raid_disks - mddev->delta_disks; 3404 break; 3405 case geo_new: 3406 layout = mddev->new_layout; 3407 chunk = mddev->new_chunk_sectors; 3408 disks = mddev->raid_disks; 3409 break; 3410 default: /* avoid 'may be unused' warnings */ 3411 case geo_start: /* new when starting reshape - raid_disks not 3412 * updated yet. */ 3413 layout = mddev->new_layout; 3414 chunk = mddev->new_chunk_sectors; 3415 disks = mddev->raid_disks + mddev->delta_disks; 3416 break; 3417 } 3418 if (layout >> 17) 3419 return -1; 3420 if (chunk < (PAGE_SIZE >> 9) || 3421 !is_power_of_2(chunk)) 3422 return -2; 3423 nc = layout & 255; 3424 fc = (layout >> 8) & 255; 3425 fo = layout & (1<<16); 3426 geo->raid_disks = disks; 3427 geo->near_copies = nc; 3428 geo->far_copies = fc; 3429 geo->far_offset = fo; 3430 geo->chunk_mask = chunk - 1; 3431 geo->chunk_shift = ffz(~chunk); 3432 return nc*fc; 3433} 3434 3435static struct r10conf *setup_conf(struct mddev *mddev) 3436{ 3437 struct r10conf *conf = NULL; 3438 int err = -EINVAL; 3439 struct geom geo; 3440 int copies; 3441 3442 copies = setup_geo(&geo, mddev, geo_new); 3443 3444 if (copies == -2) { 3445 printk(KERN_ERR "md/raid10:%s: chunk size must be " 3446 "at least PAGE_SIZE(%ld) and be a power of 2.\n", 3447 mdname(mddev), PAGE_SIZE); 3448 goto out; 3449 } 3450 3451 if (copies < 2 || copies > mddev->raid_disks) { 3452 printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n", 3453 mdname(mddev), mddev->new_layout); 3454 goto out; 3455 } 3456 3457 err = -ENOMEM; 3458 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL); 3459 if (!conf) 3460 goto out; 3461 3462 /* FIXME calc properly */ 3463 conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks + 3464 max(0,mddev->delta_disks)), 3465 GFP_KERNEL); 3466 if (!conf->mirrors) 3467 goto out; 3468 3469 conf->tmppage = alloc_page(GFP_KERNEL); 3470 if (!conf->tmppage) 3471 goto out; 3472 3473 conf->geo = geo; 3474 conf->copies = copies; 3475 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc, 3476 r10bio_pool_free, conf); 3477 if (!conf->r10bio_pool) 3478 goto out; 3479 3480 calc_sectors(conf, mddev->dev_sectors); 3481 if (mddev->reshape_position == MaxSector) { 3482 conf->prev = conf->geo; 3483 conf->reshape_progress = MaxSector; 3484 } else { 3485 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { 3486 err = -EINVAL; 3487 goto out; 3488 } 3489 conf->reshape_progress = mddev->reshape_position; 3490 if (conf->prev.far_offset) 3491 conf->prev.stride = 1 << conf->prev.chunk_shift; 3492 else 3493 /* far_copies must be 1 */ 3494 conf->prev.stride = conf->dev_sectors; 3495 } 3496 spin_lock_init(&conf->device_lock); 3497 INIT_LIST_HEAD(&conf->retry_list); 3498 3499 spin_lock_init(&conf->resync_lock); 3500 init_waitqueue_head(&conf->wait_barrier); 3501 3502 conf->thread = md_register_thread(raid10d, mddev, "raid10"); 3503 if (!conf->thread) 3504 goto out; 3505 3506 conf->mddev = mddev; 3507 return conf; 3508 3509 out: 3510 if (err == -ENOMEM) 3511 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", 3512 mdname(mddev)); 3513 if (conf) { 3514 if (conf->r10bio_pool) 3515 mempool_destroy(conf->r10bio_pool); 3516 kfree(conf->mirrors); 3517 safe_put_page(conf->tmppage); 3518 kfree(conf); 3519 } 3520 return ERR_PTR(err); 3521} 3522 3523static int run(struct mddev *mddev) 3524{ 3525 struct r10conf *conf; 3526 int i, disk_idx, chunk_size; 3527 struct raid10_info *disk; 3528 struct md_rdev *rdev; 3529 sector_t size; 3530 sector_t min_offset_diff = 0; 3531 int first = 1; 3532 3533 if (mddev->private == NULL) { 3534 conf = setup_conf(mddev); 3535 if (IS_ERR(conf)) 3536 return PTR_ERR(conf); 3537 mddev->private = conf; 3538 } 3539 conf = mddev->private; 3540 if (!conf) 3541 goto out; 3542 3543 mddev->thread = conf->thread; 3544 conf->thread = NULL; 3545 3546 chunk_size = mddev->chunk_sectors << 9; 3547 if (mddev->queue) { 3548 blk_queue_io_min(mddev->queue, chunk_size); 3549 if (conf->geo.raid_disks % conf->geo.near_copies) 3550 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); 3551 else 3552 blk_queue_io_opt(mddev->queue, chunk_size * 3553 (conf->geo.raid_disks / conf->geo.near_copies)); 3554 } 3555 3556 rdev_for_each(rdev, mddev) { 3557 long long diff; 3558 struct request_queue *q; 3559 3560 disk_idx = rdev->raid_disk; 3561 if (disk_idx < 0) 3562 continue; 3563 if (disk_idx >= conf->geo.raid_disks && 3564 disk_idx >= conf->prev.raid_disks) 3565 continue; 3566 disk = conf->mirrors + disk_idx; 3567 3568 if (test_bit(Replacement, &rdev->flags)) { 3569 if (disk->replacement) 3570 goto out_free_conf; 3571 disk->replacement = rdev; 3572 } else { 3573 if (disk->rdev) 3574 goto out_free_conf; 3575 disk->rdev = rdev; 3576 } 3577 q = bdev_get_queue(rdev->bdev); 3578 if (q->merge_bvec_fn) 3579 mddev->merge_check_needed = 1; 3580 diff = (rdev->new_data_offset - rdev->data_offset); 3581 if (!mddev->reshape_backwards) 3582 diff = -diff; 3583 if (diff < 0) 3584 diff = 0; 3585 if (first || diff < min_offset_diff) 3586 min_offset_diff = diff; 3587 3588 if (mddev->gendisk) 3589 disk_stack_limits(mddev->gendisk, rdev->bdev, 3590 rdev->data_offset << 9); 3591 3592 disk->head_position = 0; 3593 } 3594 3595 /* need to check that every block has at least one working mirror */ 3596 if (!enough(conf, -1)) { 3597 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", 3598 mdname(mddev)); 3599 goto out_free_conf; 3600 } 3601 3602 if (conf->reshape_progress != MaxSector) { 3603 /* must ensure that shape change is supported */ 3604 if (conf->geo.far_copies != 1 && 3605 conf->geo.far_offset == 0) 3606 goto out_free_conf; 3607 if (conf->prev.far_copies != 1 && 3608 conf->geo.far_offset == 0) 3609 goto out_free_conf; 3610 } 3611 3612 mddev->degraded = 0; 3613 for (i = 0; 3614 i < conf->geo.raid_disks 3615 || i < conf->prev.raid_disks; 3616 i++) { 3617 3618 disk = conf->mirrors + i; 3619 3620 if (!disk->rdev && disk->replacement) { 3621 /* The replacement is all we have - use it */ 3622 disk->rdev = disk->replacement; 3623 disk->replacement = NULL; 3624 clear_bit(Replacement, &disk->rdev->flags); 3625 } 3626 3627 if (!disk->rdev || 3628 !test_bit(In_sync, &disk->rdev->flags)) { 3629 disk->head_position = 0; 3630 mddev->degraded++; 3631 if (disk->rdev) 3632 conf->fullsync = 1; 3633 } 3634 disk->recovery_disabled = mddev->recovery_disabled - 1; 3635 } 3636 3637 if (mddev->recovery_cp != MaxSector) 3638 printk(KERN_NOTICE "md/raid10:%s: not clean" 3639 " -- starting background reconstruction\n", 3640 mdname(mddev)); 3641 printk(KERN_INFO 3642 "md/raid10:%s: active with %d out of %d devices\n", 3643 mdname(mddev), conf->geo.raid_disks - mddev->degraded, 3644 conf->geo.raid_disks); 3645 /* 3646 * Ok, everything is just fine now 3647 */ 3648 mddev->dev_sectors = conf->dev_sectors; 3649 size = raid10_size(mddev, 0, 0); 3650 md_set_array_sectors(mddev, size); 3651 mddev->resync_max_sectors = size; 3652 3653 if (mddev->queue) { 3654 int stripe = conf->geo.raid_disks * 3655 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 3656 mddev->queue->backing_dev_info.congested_fn = raid10_congested; 3657 mddev->queue->backing_dev_info.congested_data = mddev; 3658 3659 /* Calculate max read-ahead size. 3660 * We need to readahead at least twice a whole stripe.... 3661 * maybe... 3662 */ 3663 stripe /= conf->geo.near_copies; 3664 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 3665 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 3666 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); 3667 } 3668 3669 3670 if (md_integrity_register(mddev)) 3671 goto out_free_conf; 3672 3673 if (conf->reshape_progress != MaxSector) { 3674 unsigned long before_length, after_length; 3675 3676 before_length = ((1 << conf->prev.chunk_shift) * 3677 conf->prev.far_copies); 3678 after_length = ((1 << conf->geo.chunk_shift) * 3679 conf->geo.far_copies); 3680 3681 if (max(before_length, after_length) > min_offset_diff) { 3682 /* This cannot work */ 3683 printk("md/raid10: offset difference not enough to continue reshape\n"); 3684 goto out_free_conf; 3685 } 3686 conf->offset_diff = min_offset_diff; 3687 3688 conf->reshape_safe = conf->reshape_progress; 3689 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3690 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3691 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 3692 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 3693 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 3694 "reshape"); 3695 } 3696 3697 return 0; 3698 3699out_free_conf: 3700 md_unregister_thread(&mddev->thread); 3701 if (conf->r10bio_pool) 3702 mempool_destroy(conf->r10bio_pool); 3703 safe_put_page(conf->tmppage); 3704 kfree(conf->mirrors); 3705 kfree(conf); 3706 mddev->private = NULL; 3707out: 3708 return -EIO; 3709} 3710 3711static int stop(struct mddev *mddev) 3712{ 3713 struct r10conf *conf = mddev->private; 3714 3715 raise_barrier(conf, 0); 3716 lower_barrier(conf); 3717 3718 md_unregister_thread(&mddev->thread); 3719 if (mddev->queue) 3720 /* the unplug fn references 'conf'*/ 3721 blk_sync_queue(mddev->queue); 3722 3723 if (conf->r10bio_pool) 3724 mempool_destroy(conf->r10bio_pool); 3725 kfree(conf->mirrors); 3726 kfree(conf); 3727 mddev->private = NULL; 3728 return 0; 3729} 3730 3731static void raid10_quiesce(struct mddev *mddev, int state) 3732{ 3733 struct r10conf *conf = mddev->private; 3734 3735 switch(state) { 3736 case 1: 3737 raise_barrier(conf, 0); 3738 break; 3739 case 0: 3740 lower_barrier(conf); 3741 break; 3742 } 3743} 3744 3745static int raid10_resize(struct mddev *mddev, sector_t sectors) 3746{ 3747 /* Resize of 'far' arrays is not supported. 3748 * For 'near' and 'offset' arrays we can set the 3749 * number of sectors used to be an appropriate multiple 3750 * of the chunk size. 3751 * For 'offset', this is far_copies*chunksize. 3752 * For 'near' the multiplier is the LCM of 3753 * near_copies and raid_disks. 3754 * So if far_copies > 1 && !far_offset, fail. 3755 * Else find LCM(raid_disks, near_copy)*far_copies and 3756 * multiply by chunk_size. Then round to this number. 3757 * This is mostly done by raid10_size() 3758 */ 3759 struct r10conf *conf = mddev->private; 3760 sector_t oldsize, size; 3761 3762 if (mddev->reshape_position != MaxSector) 3763 return -EBUSY; 3764 3765 if (conf->geo.far_copies > 1 && !conf->geo.far_offset) 3766 return -EINVAL; 3767 3768 oldsize = raid10_size(mddev, 0, 0); 3769 size = raid10_size(mddev, sectors, 0); 3770 if (mddev->external_size && 3771 mddev->array_sectors > size) 3772 return -EINVAL; 3773 if (mddev->bitmap) { 3774 int ret = bitmap_resize(mddev->bitmap, size, 0, 0); 3775 if (ret) 3776 return ret; 3777 } 3778 md_set_array_sectors(mddev, size); 3779 set_capacity(mddev->gendisk, mddev->array_sectors); 3780 revalidate_disk(mddev->gendisk); 3781 if (sectors > mddev->dev_sectors && 3782 mddev->recovery_cp > oldsize) { 3783 mddev->recovery_cp = oldsize; 3784 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3785 } 3786 calc_sectors(conf, sectors); 3787 mddev->dev_sectors = conf->dev_sectors; 3788 mddev->resync_max_sectors = size; 3789 return 0; 3790} 3791 3792static void *raid10_takeover_raid0(struct mddev *mddev) 3793{ 3794 struct md_rdev *rdev; 3795 struct r10conf *conf; 3796 3797 if (mddev->degraded > 0) { 3798 printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n", 3799 mdname(mddev)); 3800 return ERR_PTR(-EINVAL); 3801 } 3802 3803 /* Set new parameters */ 3804 mddev->new_level = 10; 3805 /* new layout: far_copies = 1, near_copies = 2 */ 3806 mddev->new_layout = (1<<8) + 2; 3807 mddev->new_chunk_sectors = mddev->chunk_sectors; 3808 mddev->delta_disks = mddev->raid_disks; 3809 mddev->raid_disks *= 2; 3810 /* make sure it will be not marked as dirty */ 3811 mddev->recovery_cp = MaxSector; 3812 3813 conf = setup_conf(mddev); 3814 if (!IS_ERR(conf)) { 3815 rdev_for_each(rdev, mddev) 3816 if (rdev->raid_disk >= 0) 3817 rdev->new_raid_disk = rdev->raid_disk * 2; 3818 conf->barrier = 1; 3819 } 3820 3821 return conf; 3822} 3823 3824static void *raid10_takeover(struct mddev *mddev) 3825{ 3826 struct r0conf *raid0_conf; 3827 3828 /* raid10 can take over: 3829 * raid0 - providing it has only two drives 3830 */ 3831 if (mddev->level == 0) { 3832 /* for raid0 takeover only one zone is supported */ 3833 raid0_conf = mddev->private; 3834 if (raid0_conf->nr_strip_zones > 1) { 3835 printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0" 3836 " with more than one zone.\n", 3837 mdname(mddev)); 3838 return ERR_PTR(-EINVAL); 3839 } 3840 return raid10_takeover_raid0(mddev); 3841 } 3842 return ERR_PTR(-EINVAL); 3843} 3844 3845static int raid10_check_reshape(struct mddev *mddev) 3846{ 3847 /* Called when there is a request to change 3848 * - layout (to ->new_layout) 3849 * - chunk size (to ->new_chunk_sectors) 3850 * - raid_disks (by delta_disks) 3851 * or when trying to restart a reshape that was ongoing. 3852 * 3853 * We need to validate the request and possibly allocate 3854 * space if that might be an issue later. 3855 * 3856 * Currently we reject any reshape of a 'far' mode array, 3857 * allow chunk size to change if new is generally acceptable, 3858 * allow raid_disks to increase, and allow 3859 * a switch between 'near' mode and 'offset' mode. 3860 */ 3861 struct r10conf *conf = mddev->private; 3862 struct geom geo; 3863 3864 if (conf->geo.far_copies != 1 && !conf->geo.far_offset) 3865 return -EINVAL; 3866 3867 if (setup_geo(&geo, mddev, geo_start) != conf->copies) 3868 /* mustn't change number of copies */ 3869 return -EINVAL; 3870 if (geo.far_copies > 1 && !geo.far_offset) 3871 /* Cannot switch to 'far' mode */ 3872 return -EINVAL; 3873 3874 if (mddev->array_sectors & geo.chunk_mask) 3875 /* not factor of array size */ 3876 return -EINVAL; 3877 3878 if (!enough(conf, -1)) 3879 return -EINVAL; 3880 3881 kfree(conf->mirrors_new); 3882 conf->mirrors_new = NULL; 3883 if (mddev->delta_disks > 0) { 3884 /* allocate new 'mirrors' list */ 3885 conf->mirrors_new = kzalloc( 3886 sizeof(struct raid10_info) 3887 *(mddev->raid_disks + 3888 mddev->delta_disks), 3889 GFP_KERNEL); 3890 if (!conf->mirrors_new) 3891 return -ENOMEM; 3892 } 3893 return 0; 3894} 3895 3896/* 3897 * Need to check if array has failed when deciding whether to: 3898 * - start an array 3899 * - remove non-faulty devices 3900 * - add a spare 3901 * - allow a reshape 3902 * This determination is simple when no reshape is happening. 3903 * However if there is a reshape, we need to carefully check 3904 * both the before and after sections. 3905 * This is because some failed devices may only affect one 3906 * of the two sections, and some non-in_sync devices may 3907 * be insync in the section most affected by failed devices. 3908 */ 3909static int calc_degraded(struct r10conf *conf) 3910{ 3911 int degraded, degraded2; 3912 int i; 3913 3914 rcu_read_lock(); 3915 degraded = 0; 3916 /* 'prev' section first */ 3917 for (i = 0; i < conf->prev.raid_disks; i++) { 3918 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 3919 if (!rdev || test_bit(Faulty, &rdev->flags)) 3920 degraded++; 3921 else if (!test_bit(In_sync, &rdev->flags)) 3922 /* When we can reduce the number of devices in 3923 * an array, this might not contribute to 3924 * 'degraded'. It does now. 3925 */ 3926 degraded++; 3927 } 3928 rcu_read_unlock(); 3929 if (conf->geo.raid_disks == conf->prev.raid_disks) 3930 return degraded; 3931 rcu_read_lock(); 3932 degraded2 = 0; 3933 for (i = 0; i < conf->geo.raid_disks; i++) { 3934 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 3935 if (!rdev || test_bit(Faulty, &rdev->flags)) 3936 degraded2++; 3937 else if (!test_bit(In_sync, &rdev->flags)) { 3938 /* If reshape is increasing the number of devices, 3939 * this section has already been recovered, so 3940 * it doesn't contribute to degraded. 3941 * else it does. 3942 */ 3943 if (conf->geo.raid_disks <= conf->prev.raid_disks) 3944 degraded2++; 3945 } 3946 } 3947 rcu_read_unlock(); 3948 if (degraded2 > degraded) 3949 return degraded2; 3950 return degraded; 3951} 3952 3953static int raid10_start_reshape(struct mddev *mddev) 3954{ 3955 /* A 'reshape' has been requested. This commits 3956 * the various 'new' fields and sets MD_RECOVER_RESHAPE 3957 * This also checks if there are enough spares and adds them 3958 * to the array. 3959 * We currently require enough spares to make the final 3960 * array non-degraded. We also require that the difference 3961 * between old and new data_offset - on each device - is 3962 * enough that we never risk over-writing. 3963 */ 3964 3965 unsigned long before_length, after_length; 3966 sector_t min_offset_diff = 0; 3967 int first = 1; 3968 struct geom new; 3969 struct r10conf *conf = mddev->private; 3970 struct md_rdev *rdev; 3971 int spares = 0; 3972 int ret; 3973 3974 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3975 return -EBUSY; 3976 3977 if (setup_geo(&new, mddev, geo_start) != conf->copies) 3978 return -EINVAL; 3979 3980 before_length = ((1 << conf->prev.chunk_shift) * 3981 conf->prev.far_copies); 3982 after_length = ((1 << conf->geo.chunk_shift) * 3983 conf->geo.far_copies); 3984 3985 rdev_for_each(rdev, mddev) { 3986 if (!test_bit(In_sync, &rdev->flags) 3987 && !test_bit(Faulty, &rdev->flags)) 3988 spares++; 3989 if (rdev->raid_disk >= 0) { 3990 long long diff = (rdev->new_data_offset 3991 - rdev->data_offset); 3992 if (!mddev->reshape_backwards) 3993 diff = -diff; 3994 if (diff < 0) 3995 diff = 0; 3996 if (first || diff < min_offset_diff) 3997 min_offset_diff = diff; 3998 } 3999 } 4000 4001 if (max(before_length, after_length) > min_offset_diff) 4002 return -EINVAL; 4003 4004 if (spares < mddev->delta_disks) 4005 return -EINVAL; 4006 4007 conf->offset_diff = min_offset_diff; 4008 spin_lock_irq(&conf->device_lock); 4009 if (conf->mirrors_new) { 4010 memcpy(conf->mirrors_new, conf->mirrors, 4011 sizeof(struct raid10_info)*conf->prev.raid_disks); 4012 smp_mb(); 4013 kfree(conf->mirrors_old); /* FIXME and elsewhere */ 4014 conf->mirrors_old = conf->mirrors; 4015 conf->mirrors = conf->mirrors_new; 4016 conf->mirrors_new = NULL; 4017 } 4018 setup_geo(&conf->geo, mddev, geo_start); 4019 smp_mb(); 4020 if (mddev->reshape_backwards) { 4021 sector_t size = raid10_size(mddev, 0, 0); 4022 if (size < mddev->array_sectors) { 4023 spin_unlock_irq(&conf->device_lock); 4024 printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n", 4025 mdname(mddev)); 4026 return -EINVAL; 4027 } 4028 mddev->resync_max_sectors = size; 4029 conf->reshape_progress = size; 4030 } else 4031 conf->reshape_progress = 0; 4032 spin_unlock_irq(&conf->device_lock); 4033 4034 if (mddev->delta_disks && mddev->bitmap) { 4035 ret = bitmap_resize(mddev->bitmap, 4036 raid10_size(mddev, 0, 4037 conf->geo.raid_disks), 4038 0, 0); 4039 if (ret) 4040 goto abort; 4041 } 4042 if (mddev->delta_disks > 0) { 4043 rdev_for_each(rdev, mddev) 4044 if (rdev->raid_disk < 0 && 4045 !test_bit(Faulty, &rdev->flags)) { 4046 if (raid10_add_disk(mddev, rdev) == 0) { 4047 if (rdev->raid_disk >= 4048 conf->prev.raid_disks) 4049 set_bit(In_sync, &rdev->flags); 4050 else 4051 rdev->recovery_offset = 0; 4052 4053 if (sysfs_link_rdev(mddev, rdev)) 4054 /* Failure here is OK */; 4055 } 4056 } else if (rdev->raid_disk >= conf->prev.raid_disks 4057 && !test_bit(Faulty, &rdev->flags)) { 4058 /* This is a spare that was manually added */ 4059 set_bit(In_sync, &rdev->flags); 4060 } 4061 } 4062 /* When a reshape changes the number of devices, 4063 * ->degraded is measured against the larger of the 4064 * pre and post numbers. 4065 */ 4066 spin_lock_irq(&conf->device_lock); 4067 mddev->degraded = calc_degraded(conf); 4068 spin_unlock_irq(&conf->device_lock); 4069 mddev->raid_disks = conf->geo.raid_disks; 4070 mddev->reshape_position = conf->reshape_progress; 4071 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4072 4073 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4074 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4075 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4076 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4077 4078 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4079 "reshape"); 4080 if (!mddev->sync_thread) { 4081 ret = -EAGAIN; 4082 goto abort; 4083 } 4084 conf->reshape_checkpoint = jiffies; 4085 md_wakeup_thread(mddev->sync_thread); 4086 md_new_event(mddev); 4087 return 0; 4088 4089abort: 4090 mddev->recovery = 0; 4091 spin_lock_irq(&conf->device_lock); 4092 conf->geo = conf->prev; 4093 mddev->raid_disks = conf->geo.raid_disks; 4094 rdev_for_each(rdev, mddev) 4095 rdev->new_data_offset = rdev->data_offset; 4096 smp_wmb(); 4097 conf->reshape_progress = MaxSector; 4098 mddev->reshape_position = MaxSector; 4099 spin_unlock_irq(&conf->device_lock); 4100 return ret; 4101} 4102 4103/* Calculate the last device-address that could contain 4104 * any block from the chunk that includes the array-address 's' 4105 * and report the next address. 4106 * i.e. the address returned will be chunk-aligned and after 4107 * any data that is in the chunk containing 's'. 4108 */ 4109static sector_t last_dev_address(sector_t s, struct geom *geo) 4110{ 4111 s = (s | geo->chunk_mask) + 1; 4112 s >>= geo->chunk_shift; 4113 s *= geo->near_copies; 4114 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks); 4115 s *= geo->far_copies; 4116 s <<= geo->chunk_shift; 4117 return s; 4118} 4119 4120/* Calculate the first device-address that could contain 4121 * any block from the chunk that includes the array-address 's'. 4122 * This too will be the start of a chunk 4123 */ 4124static sector_t first_dev_address(sector_t s, struct geom *geo) 4125{ 4126 s >>= geo->chunk_shift; 4127 s *= geo->near_copies; 4128 sector_div(s, geo->raid_disks); 4129 s *= geo->far_copies; 4130 s <<= geo->chunk_shift; 4131 return s; 4132} 4133 4134static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, 4135 int *skipped) 4136{ 4137 /* We simply copy at most one chunk (smallest of old and new) 4138 * at a time, possibly less if that exceeds RESYNC_PAGES, 4139 * or we hit a bad block or something. 4140 * This might mean we pause for normal IO in the middle of 4141 * a chunk, but that is not a problem was mddev->reshape_position 4142 * can record any location. 4143 * 4144 * If we will want to write to a location that isn't 4145 * yet recorded as 'safe' (i.e. in metadata on disk) then 4146 * we need to flush all reshape requests and update the metadata. 4147 * 4148 * When reshaping forwards (e.g. to more devices), we interpret 4149 * 'safe' as the earliest block which might not have been copied 4150 * down yet. We divide this by previous stripe size and multiply 4151 * by previous stripe length to get lowest device offset that we 4152 * cannot write to yet. 4153 * We interpret 'sector_nr' as an address that we want to write to. 4154 * From this we use last_device_address() to find where we might 4155 * write to, and first_device_address on the 'safe' position. 4156 * If this 'next' write position is after the 'safe' position, 4157 * we must update the metadata to increase the 'safe' position. 4158 * 4159 * When reshaping backwards, we round in the opposite direction 4160 * and perform the reverse test: next write position must not be 4161 * less than current safe position. 4162 * 4163 * In all this the minimum difference in data offsets 4164 * (conf->offset_diff - always positive) allows a bit of slack, 4165 * so next can be after 'safe', but not by more than offset_disk 4166 * 4167 * We need to prepare all the bios here before we start any IO 4168 * to ensure the size we choose is acceptable to all devices. 4169 * The means one for each copy for write-out and an extra one for 4170 * read-in. 4171 * We store the read-in bio in ->master_bio and the others in 4172 * ->devs[x].bio and ->devs[x].repl_bio. 4173 */ 4174 struct r10conf *conf = mddev->private; 4175 struct r10bio *r10_bio; 4176 sector_t next, safe, last; 4177 int max_sectors; 4178 int nr_sectors; 4179 int s; 4180 struct md_rdev *rdev; 4181 int need_flush = 0; 4182 struct bio *blist; 4183 struct bio *bio, *read_bio; 4184 int sectors_done = 0; 4185 4186 if (sector_nr == 0) { 4187 /* If restarting in the middle, skip the initial sectors */ 4188 if (mddev->reshape_backwards && 4189 conf->reshape_progress < raid10_size(mddev, 0, 0)) { 4190 sector_nr = (raid10_size(mddev, 0, 0) 4191 - conf->reshape_progress); 4192 } else if (!mddev->reshape_backwards && 4193 conf->reshape_progress > 0) 4194 sector_nr = conf->reshape_progress; 4195 if (sector_nr) { 4196 mddev->curr_resync_completed = sector_nr; 4197 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4198 *skipped = 1; 4199 return sector_nr; 4200 } 4201 } 4202 4203 /* We don't use sector_nr to track where we are up to 4204 * as that doesn't work well for ->reshape_backwards. 4205 * So just use ->reshape_progress. 4206 */ 4207 if (mddev->reshape_backwards) { 4208 /* 'next' is the earliest device address that we might 4209 * write to for this chunk in the new layout 4210 */ 4211 next = first_dev_address(conf->reshape_progress - 1, 4212 &conf->geo); 4213 4214 /* 'safe' is the last device address that we might read from 4215 * in the old layout after a restart 4216 */ 4217 safe = last_dev_address(conf->reshape_safe - 1, 4218 &conf->prev); 4219 4220 if (next + conf->offset_diff < safe) 4221 need_flush = 1; 4222 4223 last = conf->reshape_progress - 1; 4224 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask 4225 & conf->prev.chunk_mask); 4226 if (sector_nr + RESYNC_BLOCK_SIZE/512 < last) 4227 sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512; 4228 } else { 4229 /* 'next' is after the last device address that we 4230 * might write to for this chunk in the new layout 4231 */ 4232 next = last_dev_address(conf->reshape_progress, &conf->geo); 4233 4234 /* 'safe' is the earliest device address that we might 4235 * read from in the old layout after a restart 4236 */ 4237 safe = first_dev_address(conf->reshape_safe, &conf->prev); 4238 4239 /* Need to update metadata if 'next' might be beyond 'safe' 4240 * as that would possibly corrupt data 4241 */ 4242 if (next > safe + conf->offset_diff) 4243 need_flush = 1; 4244 4245 sector_nr = conf->reshape_progress; 4246 last = sector_nr | (conf->geo.chunk_mask 4247 & conf->prev.chunk_mask); 4248 4249 if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last) 4250 last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1; 4251 } 4252 4253 if (need_flush || 4254 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 4255 /* Need to update reshape_position in metadata */ 4256 wait_barrier(conf); 4257 mddev->reshape_position = conf->reshape_progress; 4258 if (mddev->reshape_backwards) 4259 mddev->curr_resync_completed = raid10_size(mddev, 0, 0) 4260 - conf->reshape_progress; 4261 else 4262 mddev->curr_resync_completed = conf->reshape_progress; 4263 conf->reshape_checkpoint = jiffies; 4264 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4265 md_wakeup_thread(mddev->thread); 4266 wait_event(mddev->sb_wait, mddev->flags == 0 || 4267 kthread_should_stop()); 4268 conf->reshape_safe = mddev->reshape_position; 4269 allow_barrier(conf); 4270 } 4271 4272read_more: 4273 /* Now schedule reads for blocks from sector_nr to last */ 4274 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 4275 raise_barrier(conf, sectors_done != 0); 4276 atomic_set(&r10_bio->remaining, 0); 4277 r10_bio->mddev = mddev; 4278 r10_bio->sector = sector_nr; 4279 set_bit(R10BIO_IsReshape, &r10_bio->state); 4280 r10_bio->sectors = last - sector_nr + 1; 4281 rdev = read_balance(conf, r10_bio, &max_sectors); 4282 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); 4283 4284 if (!rdev) { 4285 /* Cannot read from here, so need to record bad blocks 4286 * on all the target devices. 4287 */ 4288 // FIXME 4289 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4290 return sectors_done; 4291 } 4292 4293 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); 4294 4295 read_bio->bi_bdev = rdev->bdev; 4296 read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4297 + rdev->data_offset); 4298 read_bio->bi_private = r10_bio; 4299 read_bio->bi_end_io = end_sync_read; 4300 read_bio->bi_rw = READ; 4301 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); 4302 read_bio->bi_flags |= 1 << BIO_UPTODATE; 4303 read_bio->bi_vcnt = 0; 4304 read_bio->bi_idx = 0; 4305 read_bio->bi_size = 0; 4306 r10_bio->master_bio = read_bio; 4307 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; 4308 4309 /* Now find the locations in the new layout */ 4310 __raid10_find_phys(&conf->geo, r10_bio); 4311 4312 blist = read_bio; 4313 read_bio->bi_next = NULL; 4314 4315 for (s = 0; s < conf->copies*2; s++) { 4316 struct bio *b; 4317 int d = r10_bio->devs[s/2].devnum; 4318 struct md_rdev *rdev2; 4319 if (s&1) { 4320 rdev2 = conf->mirrors[d].replacement; 4321 b = r10_bio->devs[s/2].repl_bio; 4322 } else { 4323 rdev2 = conf->mirrors[d].rdev; 4324 b = r10_bio->devs[s/2].bio; 4325 } 4326 if (!rdev2 || test_bit(Faulty, &rdev2->flags)) 4327 continue; 4328 b->bi_bdev = rdev2->bdev; 4329 b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; 4330 b->bi_private = r10_bio; 4331 b->bi_end_io = end_reshape_write; 4332 b->bi_rw = WRITE; 4333 b->bi_flags &= ~(BIO_POOL_MASK - 1); 4334 b->bi_flags |= 1 << BIO_UPTODATE; 4335 b->bi_next = blist; 4336 b->bi_vcnt = 0; 4337 b->bi_idx = 0; 4338 b->bi_size = 0; 4339 blist = b; 4340 } 4341 4342 /* Now add as many pages as possible to all of these bios. */ 4343 4344 nr_sectors = 0; 4345 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) { 4346 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page; 4347 int len = (max_sectors - s) << 9; 4348 if (len > PAGE_SIZE) 4349 len = PAGE_SIZE; 4350 for (bio = blist; bio ; bio = bio->bi_next) { 4351 struct bio *bio2; 4352 if (bio_add_page(bio, page, len, 0)) 4353 continue; 4354 4355 /* Didn't fit, must stop */ 4356 for (bio2 = blist; 4357 bio2 && bio2 != bio; 4358 bio2 = bio2->bi_next) { 4359 /* Remove last page from this bio */ 4360 bio2->bi_vcnt--; 4361 bio2->bi_size -= len; 4362 bio2->bi_flags &= ~(1<<BIO_SEG_VALID); 4363 } 4364 goto bio_full; 4365 } 4366 sector_nr += len >> 9; 4367 nr_sectors += len >> 9; 4368 } 4369bio_full: 4370 r10_bio->sectors = nr_sectors; 4371 4372 /* Now submit the read */ 4373 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); 4374 atomic_inc(&r10_bio->remaining); 4375 read_bio->bi_next = NULL; 4376 generic_make_request(read_bio); 4377 sector_nr += nr_sectors; 4378 sectors_done += nr_sectors; 4379 if (sector_nr <= last) 4380 goto read_more; 4381 4382 /* Now that we have done the whole section we can 4383 * update reshape_progress 4384 */ 4385 if (mddev->reshape_backwards) 4386 conf->reshape_progress -= sectors_done; 4387 else 4388 conf->reshape_progress += sectors_done; 4389 4390 return sectors_done; 4391} 4392 4393static void end_reshape_request(struct r10bio *r10_bio); 4394static int handle_reshape_read_error(struct mddev *mddev, 4395 struct r10bio *r10_bio); 4396static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) 4397{ 4398 /* Reshape read completed. Hopefully we have a block 4399 * to write out. 4400 * If we got a read error then we do sync 1-page reads from 4401 * elsewhere until we find the data - or give up. 4402 */ 4403 struct r10conf *conf = mddev->private; 4404 int s; 4405 4406 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 4407 if (handle_reshape_read_error(mddev, r10_bio) < 0) { 4408 /* Reshape has been aborted */ 4409 md_done_sync(mddev, r10_bio->sectors, 0); 4410 return; 4411 } 4412 4413 /* We definitely have the data in the pages, schedule the 4414 * writes. 4415 */ 4416 atomic_set(&r10_bio->remaining, 1); 4417 for (s = 0; s < conf->copies*2; s++) { 4418 struct bio *b; 4419 int d = r10_bio->devs[s/2].devnum; 4420 struct md_rdev *rdev; 4421 if (s&1) { 4422 rdev = conf->mirrors[d].replacement; 4423 b = r10_bio->devs[s/2].repl_bio; 4424 } else { 4425 rdev = conf->mirrors[d].rdev; 4426 b = r10_bio->devs[s/2].bio; 4427 } 4428 if (!rdev || test_bit(Faulty, &rdev->flags)) 4429 continue; 4430 atomic_inc(&rdev->nr_pending); 4431 md_sync_acct(b->bi_bdev, r10_bio->sectors); 4432 atomic_inc(&r10_bio->remaining); 4433 b->bi_next = NULL; 4434 generic_make_request(b); 4435 } 4436 end_reshape_request(r10_bio); 4437} 4438 4439static void end_reshape(struct r10conf *conf) 4440{ 4441 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) 4442 return; 4443 4444 spin_lock_irq(&conf->device_lock); 4445 conf->prev = conf->geo; 4446 md_finish_reshape(conf->mddev); 4447 smp_wmb(); 4448 conf->reshape_progress = MaxSector; 4449 spin_unlock_irq(&conf->device_lock); 4450 4451 /* read-ahead size must cover two whole stripes, which is 4452 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4453 */ 4454 if (conf->mddev->queue) { 4455 int stripe = conf->geo.raid_disks * 4456 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); 4457 stripe /= conf->geo.near_copies; 4458 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4459 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4460 } 4461 conf->fullsync = 0; 4462} 4463 4464 4465static int handle_reshape_read_error(struct mddev *mddev, 4466 struct r10bio *r10_bio) 4467{ 4468 /* Use sync reads to get the blocks from somewhere else */ 4469 int sectors = r10_bio->sectors; 4470 struct r10conf *conf = mddev->private; 4471 struct { 4472 struct r10bio r10_bio; 4473 struct r10dev devs[conf->copies]; 4474 } on_stack; 4475 struct r10bio *r10b = &on_stack.r10_bio; 4476 int slot = 0; 4477 int idx = 0; 4478 struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec; 4479 4480 r10b->sector = r10_bio->sector; 4481 __raid10_find_phys(&conf->prev, r10b); 4482 4483 while (sectors) { 4484 int s = sectors; 4485 int success = 0; 4486 int first_slot = slot; 4487 4488 if (s > (PAGE_SIZE >> 9)) 4489 s = PAGE_SIZE >> 9; 4490 4491 while (!success) { 4492 int d = r10b->devs[slot].devnum; 4493 struct md_rdev *rdev = conf->mirrors[d].rdev; 4494 sector_t addr; 4495 if (rdev == NULL || 4496 test_bit(Faulty, &rdev->flags) || 4497 !test_bit(In_sync, &rdev->flags)) 4498 goto failed; 4499 4500 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; 4501 success = sync_page_io(rdev, 4502 addr, 4503 s << 9, 4504 bvec[idx].bv_page, 4505 READ, false); 4506 if (success) 4507 break; 4508 failed: 4509 slot++; 4510 if (slot >= conf->copies) 4511 slot = 0; 4512 if (slot == first_slot) 4513 break; 4514 } 4515 if (!success) { 4516 /* couldn't read this block, must give up */ 4517 set_bit(MD_RECOVERY_INTR, 4518 &mddev->recovery); 4519 return -EIO; 4520 } 4521 sectors -= s; 4522 idx++; 4523 } 4524 return 0; 4525} 4526 4527static void end_reshape_write(struct bio *bio, int error) 4528{ 4529 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 4530 struct r10bio *r10_bio = bio->bi_private; 4531 struct mddev *mddev = r10_bio->mddev; 4532 struct r10conf *conf = mddev->private; 4533 int d; 4534 int slot; 4535 int repl; 4536 struct md_rdev *rdev = NULL; 4537 4538 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 4539 if (repl) 4540 rdev = conf->mirrors[d].replacement; 4541 if (!rdev) { 4542 smp_mb(); 4543 rdev = conf->mirrors[d].rdev; 4544 } 4545 4546 if (!uptodate) { 4547 /* FIXME should record badblock */ 4548 md_error(mddev, rdev); 4549 } 4550 4551 rdev_dec_pending(rdev, mddev); 4552 end_reshape_request(r10_bio); 4553} 4554 4555static void end_reshape_request(struct r10bio *r10_bio) 4556{ 4557 if (!atomic_dec_and_test(&r10_bio->remaining)) 4558 return; 4559 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); 4560 bio_put(r10_bio->master_bio); 4561 put_buf(r10_bio); 4562} 4563 4564static void raid10_finish_reshape(struct mddev *mddev) 4565{ 4566 struct r10conf *conf = mddev->private; 4567 4568 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 4569 return; 4570 4571 if (mddev->delta_disks > 0) { 4572 sector_t size = raid10_size(mddev, 0, 0); 4573 md_set_array_sectors(mddev, size); 4574 if (mddev->recovery_cp > mddev->resync_max_sectors) { 4575 mddev->recovery_cp = mddev->resync_max_sectors; 4576 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4577 } 4578 mddev->resync_max_sectors = size; 4579 set_capacity(mddev->gendisk, mddev->array_sectors); 4580 revalidate_disk(mddev->gendisk); 4581 } else { 4582 int d; 4583 for (d = conf->geo.raid_disks ; 4584 d < conf->geo.raid_disks - mddev->delta_disks; 4585 d++) { 4586 struct md_rdev *rdev = conf->mirrors[d].rdev; 4587 if (rdev) 4588 clear_bit(In_sync, &rdev->flags); 4589 rdev = conf->mirrors[d].replacement; 4590 if (rdev) 4591 clear_bit(In_sync, &rdev->flags); 4592 } 4593 } 4594 mddev->layout = mddev->new_layout; 4595 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; 4596 mddev->reshape_position = MaxSector; 4597 mddev->delta_disks = 0; 4598 mddev->reshape_backwards = 0; 4599} 4600 4601static struct md_personality raid10_personality = 4602{ 4603 .name = "raid10", 4604 .level = 10, 4605 .owner = THIS_MODULE, 4606 .make_request = make_request, 4607 .run = run, 4608 .stop = stop, 4609 .status = status, 4610 .error_handler = error, 4611 .hot_add_disk = raid10_add_disk, 4612 .hot_remove_disk= raid10_remove_disk, 4613 .spare_active = raid10_spare_active, 4614 .sync_request = sync_request, 4615 .quiesce = raid10_quiesce, 4616 .size = raid10_size, 4617 .resize = raid10_resize, 4618 .takeover = raid10_takeover, 4619 .check_reshape = raid10_check_reshape, 4620 .start_reshape = raid10_start_reshape, 4621 .finish_reshape = raid10_finish_reshape, 4622}; 4623 4624static int __init raid_init(void) 4625{ 4626 return register_md_personality(&raid10_personality); 4627} 4628 4629static void raid_exit(void) 4630{ 4631 unregister_md_personality(&raid10_personality); 4632} 4633 4634module_init(raid_init); 4635module_exit(raid_exit); 4636MODULE_LICENSE("GPL"); 4637MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD"); 4638MODULE_ALIAS("md-personality-9"); /* RAID10 */ 4639MODULE_ALIAS("md-raid10"); 4640MODULE_ALIAS("md-level-10"); 4641 4642module_param(max_queued_requests, int, S_IRUGO|S_IWUSR); 4643