xref: /linux/drivers/md/raid0.c (revision e8360070)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    raid0.c : Multiple Devices driver for Linux
4 	     Copyright (C) 1994-96 Marc ZYNGIER
5 	     <zyngier@ufr-info-p7.ibp.fr> or
6 	     <maz@gloups.fdn.fr>
7 	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
8 
9    RAID-0 management functions.
10 
11 */
12 
13 #include <linux/blkdev.h>
14 #include <linux/seq_file.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <trace/events/block.h>
18 #include "md.h"
19 #include "raid0.h"
20 #include "raid5.h"
21 
22 static int default_layout = 0;
23 module_param(default_layout, int, 0644);
24 
25 #define UNSUPPORTED_MDDEV_FLAGS		\
26 	((1L << MD_HAS_JOURNAL) |	\
27 	 (1L << MD_JOURNAL_CLEAN) |	\
28 	 (1L << MD_FAILFAST_SUPPORTED) |\
29 	 (1L << MD_HAS_PPL) |		\
30 	 (1L << MD_HAS_MULTIPLE_PPLS))
31 
32 /*
33  * inform the user of the raid configuration
34 */
35 static void dump_zones(struct mddev *mddev)
36 {
37 	int j, k;
38 	sector_t zone_size = 0;
39 	sector_t zone_start = 0;
40 	struct r0conf *conf = mddev->private;
41 	int raid_disks = conf->strip_zone[0].nb_dev;
42 	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
43 		 mdname(mddev),
44 		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
45 	for (j = 0; j < conf->nr_strip_zones; j++) {
46 		char line[200];
47 		int len = 0;
48 
49 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
50 			len += scnprintf(line+len, 200-len, "%s%pg", k?"/":"",
51 				conf->devlist[j * raid_disks + k]->bdev);
52 		pr_debug("md: zone%d=[%s]\n", j, line);
53 
54 		zone_size  = conf->strip_zone[j].zone_end - zone_start;
55 		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
56 			(unsigned long long)zone_start>>1,
57 			(unsigned long long)conf->strip_zone[j].dev_start>>1,
58 			(unsigned long long)zone_size>>1);
59 		zone_start = conf->strip_zone[j].zone_end;
60 	}
61 }
62 
63 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
64 {
65 	int i, c, err;
66 	sector_t curr_zone_end, sectors;
67 	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
68 	struct strip_zone *zone;
69 	int cnt;
70 	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
71 	unsigned blksize = 512;
72 
73 	*private_conf = ERR_PTR(-ENOMEM);
74 	if (!conf)
75 		return -ENOMEM;
76 	rdev_for_each(rdev1, mddev) {
77 		pr_debug("md/raid0:%s: looking at %pg\n",
78 			 mdname(mddev),
79 			 rdev1->bdev);
80 		c = 0;
81 
82 		/* round size to chunk_size */
83 		sectors = rdev1->sectors;
84 		sector_div(sectors, mddev->chunk_sectors);
85 		rdev1->sectors = sectors * mddev->chunk_sectors;
86 
87 		blksize = max(blksize, queue_logical_block_size(
88 				      rdev1->bdev->bd_disk->queue));
89 
90 		rdev_for_each(rdev2, mddev) {
91 			pr_debug("md/raid0:%s:   comparing %pg(%llu)"
92 				 " with %pg(%llu)\n",
93 				 mdname(mddev),
94 				 rdev1->bdev,
95 				 (unsigned long long)rdev1->sectors,
96 				 rdev2->bdev,
97 				 (unsigned long long)rdev2->sectors);
98 			if (rdev2 == rdev1) {
99 				pr_debug("md/raid0:%s:   END\n",
100 					 mdname(mddev));
101 				break;
102 			}
103 			if (rdev2->sectors == rdev1->sectors) {
104 				/*
105 				 * Not unique, don't count it as a new
106 				 * group
107 				 */
108 				pr_debug("md/raid0:%s:   EQUAL\n",
109 					 mdname(mddev));
110 				c = 1;
111 				break;
112 			}
113 			pr_debug("md/raid0:%s:   NOT EQUAL\n",
114 				 mdname(mddev));
115 		}
116 		if (!c) {
117 			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
118 				 mdname(mddev));
119 			conf->nr_strip_zones++;
120 			pr_debug("md/raid0:%s: %d zones\n",
121 				 mdname(mddev), conf->nr_strip_zones);
122 		}
123 	}
124 	pr_debug("md/raid0:%s: FINAL %d zones\n",
125 		 mdname(mddev), conf->nr_strip_zones);
126 
127 	/*
128 	 * now since we have the hard sector sizes, we can make sure
129 	 * chunk size is a multiple of that sector size
130 	 */
131 	if ((mddev->chunk_sectors << 9) % blksize) {
132 		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
133 			mdname(mddev),
134 			mddev->chunk_sectors << 9, blksize);
135 		err = -EINVAL;
136 		goto abort;
137 	}
138 
139 	err = -ENOMEM;
140 	conf->strip_zone = kcalloc(conf->nr_strip_zones,
141 				   sizeof(struct strip_zone),
142 				   GFP_KERNEL);
143 	if (!conf->strip_zone)
144 		goto abort;
145 	conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
146 					    conf->nr_strip_zones,
147 					    mddev->raid_disks),
148 				GFP_KERNEL);
149 	if (!conf->devlist)
150 		goto abort;
151 
152 	/* The first zone must contain all devices, so here we check that
153 	 * there is a proper alignment of slots to devices and find them all
154 	 */
155 	zone = &conf->strip_zone[0];
156 	cnt = 0;
157 	smallest = NULL;
158 	dev = conf->devlist;
159 	err = -EINVAL;
160 	rdev_for_each(rdev1, mddev) {
161 		int j = rdev1->raid_disk;
162 
163 		if (mddev->level == 10) {
164 			/* taking over a raid10-n2 array */
165 			j /= 2;
166 			rdev1->new_raid_disk = j;
167 		}
168 
169 		if (mddev->level == 1) {
170 			/* taiking over a raid1 array-
171 			 * we have only one active disk
172 			 */
173 			j = 0;
174 			rdev1->new_raid_disk = j;
175 		}
176 
177 		if (j < 0) {
178 			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
179 				mdname(mddev));
180 			goto abort;
181 		}
182 		if (j >= mddev->raid_disks) {
183 			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
184 				mdname(mddev), j);
185 			goto abort;
186 		}
187 		if (dev[j]) {
188 			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
189 				mdname(mddev), j);
190 			goto abort;
191 		}
192 		dev[j] = rdev1;
193 
194 		if (!smallest || (rdev1->sectors < smallest->sectors))
195 			smallest = rdev1;
196 		cnt++;
197 	}
198 	if (cnt != mddev->raid_disks) {
199 		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
200 			mdname(mddev), cnt, mddev->raid_disks);
201 		goto abort;
202 	}
203 	zone->nb_dev = cnt;
204 	zone->zone_end = smallest->sectors * cnt;
205 
206 	curr_zone_end = zone->zone_end;
207 
208 	/* now do the other zones */
209 	for (i = 1; i < conf->nr_strip_zones; i++)
210 	{
211 		int j;
212 
213 		zone = conf->strip_zone + i;
214 		dev = conf->devlist + i * mddev->raid_disks;
215 
216 		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
217 		zone->dev_start = smallest->sectors;
218 		smallest = NULL;
219 		c = 0;
220 
221 		for (j=0; j<cnt; j++) {
222 			rdev = conf->devlist[j];
223 			if (rdev->sectors <= zone->dev_start) {
224 				pr_debug("md/raid0:%s: checking %pg ... nope\n",
225 					 mdname(mddev),
226 					 rdev->bdev);
227 				continue;
228 			}
229 			pr_debug("md/raid0:%s: checking %pg ..."
230 				 " contained as device %d\n",
231 				 mdname(mddev),
232 				 rdev->bdev, c);
233 			dev[c] = rdev;
234 			c++;
235 			if (!smallest || rdev->sectors < smallest->sectors) {
236 				smallest = rdev;
237 				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
238 					 mdname(mddev),
239 					 (unsigned long long)rdev->sectors);
240 			}
241 		}
242 
243 		zone->nb_dev = c;
244 		sectors = (smallest->sectors - zone->dev_start) * c;
245 		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
246 			 mdname(mddev),
247 			 zone->nb_dev, (unsigned long long)sectors);
248 
249 		curr_zone_end += sectors;
250 		zone->zone_end = curr_zone_end;
251 
252 		pr_debug("md/raid0:%s: current zone start: %llu\n",
253 			 mdname(mddev),
254 			 (unsigned long long)smallest->sectors);
255 	}
256 
257 	if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
258 		conf->layout = RAID0_ORIG_LAYOUT;
259 	} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
260 		   mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
261 		conf->layout = mddev->layout;
262 	} else if (default_layout == RAID0_ORIG_LAYOUT ||
263 		   default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
264 		conf->layout = default_layout;
265 	} else {
266 		pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
267 		       mdname(mddev));
268 		pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
269 		err = -EOPNOTSUPP;
270 		goto abort;
271 	}
272 
273 	if (conf->layout == RAID0_ORIG_LAYOUT) {
274 		for (i = 1; i < conf->nr_strip_zones; i++) {
275 			sector_t first_sector = conf->strip_zone[i-1].zone_end;
276 
277 			sector_div(first_sector, mddev->chunk_sectors);
278 			zone = conf->strip_zone + i;
279 			/* disk_shift is first disk index used in the zone */
280 			zone->disk_shift = sector_div(first_sector,
281 						      zone->nb_dev);
282 		}
283 	}
284 
285 	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
286 	*private_conf = conf;
287 
288 	return 0;
289 abort:
290 	kfree(conf->strip_zone);
291 	kfree(conf->devlist);
292 	kfree(conf);
293 	*private_conf = ERR_PTR(err);
294 	return err;
295 }
296 
297 /* Find the zone which holds a particular offset
298  * Update *sectorp to be an offset in that zone
299  */
300 static struct strip_zone *find_zone(struct r0conf *conf,
301 				    sector_t *sectorp)
302 {
303 	int i;
304 	struct strip_zone *z = conf->strip_zone;
305 	sector_t sector = *sectorp;
306 
307 	for (i = 0; i < conf->nr_strip_zones; i++)
308 		if (sector < z[i].zone_end) {
309 			if (i)
310 				*sectorp = sector - z[i-1].zone_end;
311 			return z + i;
312 		}
313 	BUG();
314 }
315 
316 /*
317  * remaps the bio to the target device. we separate two flows.
318  * power 2 flow and a general flow for the sake of performance
319 */
320 static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
321 				sector_t sector, sector_t *sector_offset)
322 {
323 	unsigned int sect_in_chunk;
324 	sector_t chunk;
325 	struct r0conf *conf = mddev->private;
326 	int raid_disks = conf->strip_zone[0].nb_dev;
327 	unsigned int chunk_sects = mddev->chunk_sectors;
328 
329 	if (is_power_of_2(chunk_sects)) {
330 		int chunksect_bits = ffz(~chunk_sects);
331 		/* find the sector offset inside the chunk */
332 		sect_in_chunk  = sector & (chunk_sects - 1);
333 		sector >>= chunksect_bits;
334 		/* chunk in zone */
335 		chunk = *sector_offset;
336 		/* quotient is the chunk in real device*/
337 		sector_div(chunk, zone->nb_dev << chunksect_bits);
338 	} else{
339 		sect_in_chunk = sector_div(sector, chunk_sects);
340 		chunk = *sector_offset;
341 		sector_div(chunk, chunk_sects * zone->nb_dev);
342 	}
343 	/*
344 	*  position the bio over the real device
345 	*  real sector = chunk in device + starting of zone
346 	*	+ the position in the chunk
347 	*/
348 	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
349 	return conf->devlist[(zone - conf->strip_zone)*raid_disks
350 			     + sector_div(sector, zone->nb_dev)];
351 }
352 
353 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
354 {
355 	sector_t array_sectors = 0;
356 	struct md_rdev *rdev;
357 
358 	WARN_ONCE(sectors || raid_disks,
359 		  "%s does not support generic reshape\n", __func__);
360 
361 	rdev_for_each(rdev, mddev)
362 		array_sectors += (rdev->sectors &
363 				  ~(sector_t)(mddev->chunk_sectors-1));
364 
365 	return array_sectors;
366 }
367 
368 static void free_conf(struct mddev *mddev, struct r0conf *conf)
369 {
370 	kfree(conf->strip_zone);
371 	kfree(conf->devlist);
372 	kfree(conf);
373 }
374 
375 static void raid0_free(struct mddev *mddev, void *priv)
376 {
377 	struct r0conf *conf = priv;
378 
379 	free_conf(mddev, conf);
380 	acct_bioset_exit(mddev);
381 }
382 
383 static int raid0_run(struct mddev *mddev)
384 {
385 	struct r0conf *conf;
386 	int ret;
387 
388 	if (mddev->chunk_sectors == 0) {
389 		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
390 		return -EINVAL;
391 	}
392 	if (md_check_no_bitmap(mddev))
393 		return -EINVAL;
394 
395 	if (acct_bioset_init(mddev)) {
396 		pr_err("md/raid0:%s: alloc acct bioset failed.\n", mdname(mddev));
397 		return -ENOMEM;
398 	}
399 
400 	/* if private is not null, we are here after takeover */
401 	if (mddev->private == NULL) {
402 		ret = create_strip_zones(mddev, &conf);
403 		if (ret < 0)
404 			goto exit_acct_set;
405 		mddev->private = conf;
406 	}
407 	conf = mddev->private;
408 	if (mddev->queue) {
409 		struct md_rdev *rdev;
410 
411 		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
412 		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
413 
414 		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
415 		blk_queue_io_opt(mddev->queue,
416 				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
417 
418 		rdev_for_each(rdev, mddev) {
419 			disk_stack_limits(mddev->gendisk, rdev->bdev,
420 					  rdev->data_offset << 9);
421 		}
422 	}
423 
424 	/* calculate array device size */
425 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
426 
427 	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
428 		 mdname(mddev),
429 		 (unsigned long long)mddev->array_sectors);
430 
431 	dump_zones(mddev);
432 
433 	ret = md_integrity_register(mddev);
434 	if (ret)
435 		goto free;
436 
437 	return ret;
438 
439 free:
440 	free_conf(mddev, conf);
441 exit_acct_set:
442 	acct_bioset_exit(mddev);
443 	return ret;
444 }
445 
446 /*
447  * Convert disk_index to the disk order in which it is read/written.
448  *  For example, if we have 4 disks, they are numbered 0,1,2,3. If we
449  *  write the disks starting at disk 3, then the read/write order would
450  *  be disk 3, then 0, then 1, and then disk 2 and we want map_disk_shift()
451  *  to map the disks as follows 0,1,2,3 => 1,2,3,0. So disk 0 would map
452  *  to 1, 1 to 2, 2 to 3, and 3 to 0. That way we can compare disks in
453  *  that 'output' space to understand the read/write disk ordering.
454  */
455 static int map_disk_shift(int disk_index, int num_disks, int disk_shift)
456 {
457 	return ((disk_index + num_disks - disk_shift) % num_disks);
458 }
459 
460 static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
461 {
462 	struct r0conf *conf = mddev->private;
463 	struct strip_zone *zone;
464 	sector_t start = bio->bi_iter.bi_sector;
465 	sector_t end;
466 	unsigned int stripe_size;
467 	sector_t first_stripe_index, last_stripe_index;
468 	sector_t start_disk_offset;
469 	unsigned int start_disk_index;
470 	sector_t end_disk_offset;
471 	unsigned int end_disk_index;
472 	unsigned int disk;
473 	sector_t orig_start, orig_end;
474 
475 	orig_start = start;
476 	zone = find_zone(conf, &start);
477 
478 	if (bio_end_sector(bio) > zone->zone_end) {
479 		struct bio *split = bio_split(bio,
480 			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
481 			&mddev->bio_set);
482 		bio_chain(split, bio);
483 		submit_bio_noacct(bio);
484 		bio = split;
485 		end = zone->zone_end;
486 	} else
487 		end = bio_end_sector(bio);
488 
489 	orig_end = end;
490 	if (zone != conf->strip_zone)
491 		end = end - zone[-1].zone_end;
492 
493 	/* Now start and end is the offset in zone */
494 	stripe_size = zone->nb_dev * mddev->chunk_sectors;
495 
496 	first_stripe_index = start;
497 	sector_div(first_stripe_index, stripe_size);
498 	last_stripe_index = end;
499 	sector_div(last_stripe_index, stripe_size);
500 
501 	/* In the first zone the original and alternate layouts are the same */
502 	if ((conf->layout == RAID0_ORIG_LAYOUT) && (zone != conf->strip_zone)) {
503 		sector_div(orig_start, mddev->chunk_sectors);
504 		start_disk_index = sector_div(orig_start, zone->nb_dev);
505 		start_disk_index = map_disk_shift(start_disk_index,
506 						  zone->nb_dev,
507 						  zone->disk_shift);
508 		sector_div(orig_end, mddev->chunk_sectors);
509 		end_disk_index = sector_div(orig_end, zone->nb_dev);
510 		end_disk_index = map_disk_shift(end_disk_index,
511 						zone->nb_dev, zone->disk_shift);
512 	} else {
513 		start_disk_index = (int)(start - first_stripe_index * stripe_size) /
514 			mddev->chunk_sectors;
515 		end_disk_index = (int)(end - last_stripe_index * stripe_size) /
516 			mddev->chunk_sectors;
517 	}
518 	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
519 		mddev->chunk_sectors) +
520 		first_stripe_index * mddev->chunk_sectors;
521 	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
522 		mddev->chunk_sectors) +
523 		last_stripe_index * mddev->chunk_sectors;
524 
525 	for (disk = 0; disk < zone->nb_dev; disk++) {
526 		sector_t dev_start, dev_end;
527 		struct md_rdev *rdev;
528 		int compare_disk;
529 
530 		compare_disk = map_disk_shift(disk, zone->nb_dev,
531 					      zone->disk_shift);
532 
533 		if (compare_disk < start_disk_index)
534 			dev_start = (first_stripe_index + 1) *
535 				mddev->chunk_sectors;
536 		else if (compare_disk > start_disk_index)
537 			dev_start = first_stripe_index * mddev->chunk_sectors;
538 		else
539 			dev_start = start_disk_offset;
540 
541 		if (compare_disk < end_disk_index)
542 			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
543 		else if (compare_disk > end_disk_index)
544 			dev_end = last_stripe_index * mddev->chunk_sectors;
545 		else
546 			dev_end = end_disk_offset;
547 
548 		if (dev_end <= dev_start)
549 			continue;
550 
551 		rdev = conf->devlist[(zone - conf->strip_zone) *
552 			conf->strip_zone[0].nb_dev + disk];
553 		md_submit_discard_bio(mddev, rdev, bio,
554 			dev_start + zone->dev_start + rdev->data_offset,
555 			dev_end - dev_start);
556 	}
557 	bio_endio(bio);
558 }
559 
560 static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
561 {
562 	struct r0conf *conf = mddev->private;
563 	struct strip_zone *zone;
564 	struct md_rdev *tmp_dev;
565 	sector_t bio_sector;
566 	sector_t sector;
567 	sector_t orig_sector;
568 	unsigned chunk_sects;
569 	unsigned sectors;
570 
571 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
572 	    && md_flush_request(mddev, bio))
573 		return true;
574 
575 	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
576 		raid0_handle_discard(mddev, bio);
577 		return true;
578 	}
579 
580 	bio_sector = bio->bi_iter.bi_sector;
581 	sector = bio_sector;
582 	chunk_sects = mddev->chunk_sectors;
583 
584 	sectors = chunk_sects -
585 		(likely(is_power_of_2(chunk_sects))
586 		 ? (sector & (chunk_sects-1))
587 		 : sector_div(sector, chunk_sects));
588 
589 	/* Restore due to sector_div */
590 	sector = bio_sector;
591 
592 	if (sectors < bio_sectors(bio)) {
593 		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
594 					      &mddev->bio_set);
595 		bio_chain(split, bio);
596 		submit_bio_noacct(bio);
597 		bio = split;
598 	}
599 
600 	if (bio->bi_pool != &mddev->bio_set)
601 		md_account_bio(mddev, &bio);
602 
603 	orig_sector = sector;
604 	zone = find_zone(mddev->private, &sector);
605 	switch (conf->layout) {
606 	case RAID0_ORIG_LAYOUT:
607 		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
608 		break;
609 	case RAID0_ALT_MULTIZONE_LAYOUT:
610 		tmp_dev = map_sector(mddev, zone, sector, &sector);
611 		break;
612 	default:
613 		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
614 		bio_io_error(bio);
615 		return true;
616 	}
617 
618 	if (unlikely(is_rdev_broken(tmp_dev))) {
619 		bio_io_error(bio);
620 		md_error(mddev, tmp_dev);
621 		return true;
622 	}
623 
624 	bio_set_dev(bio, tmp_dev->bdev);
625 	bio->bi_iter.bi_sector = sector + zone->dev_start +
626 		tmp_dev->data_offset;
627 
628 	if (mddev->gendisk)
629 		trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
630 				      bio_sector);
631 	mddev_check_write_zeroes(mddev, bio);
632 	submit_bio_noacct(bio);
633 	return true;
634 }
635 
636 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
637 {
638 	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
639 	return;
640 }
641 
642 static void raid0_error(struct mddev *mddev, struct md_rdev *rdev)
643 {
644 	if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) {
645 		char *md_name = mdname(mddev);
646 
647 		pr_crit("md/raid0%s: Disk failure on %pg detected, failing array.\n",
648 			md_name, rdev->bdev);
649 	}
650 }
651 
652 static void *raid0_takeover_raid45(struct mddev *mddev)
653 {
654 	struct md_rdev *rdev;
655 	struct r0conf *priv_conf;
656 
657 	if (mddev->degraded != 1) {
658 		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
659 			mdname(mddev),
660 			mddev->degraded);
661 		return ERR_PTR(-EINVAL);
662 	}
663 
664 	rdev_for_each(rdev, mddev) {
665 		/* check slot number for a disk */
666 		if (rdev->raid_disk == mddev->raid_disks-1) {
667 			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
668 				mdname(mddev));
669 			return ERR_PTR(-EINVAL);
670 		}
671 		rdev->sectors = mddev->dev_sectors;
672 	}
673 
674 	/* Set new parameters */
675 	mddev->new_level = 0;
676 	mddev->new_layout = 0;
677 	mddev->new_chunk_sectors = mddev->chunk_sectors;
678 	mddev->raid_disks--;
679 	mddev->delta_disks = -1;
680 	/* make sure it will be not marked as dirty */
681 	mddev->recovery_cp = MaxSector;
682 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
683 
684 	create_strip_zones(mddev, &priv_conf);
685 
686 	return priv_conf;
687 }
688 
689 static void *raid0_takeover_raid10(struct mddev *mddev)
690 {
691 	struct r0conf *priv_conf;
692 
693 	/* Check layout:
694 	 *  - far_copies must be 1
695 	 *  - near_copies must be 2
696 	 *  - disks number must be even
697 	 *  - all mirrors must be already degraded
698 	 */
699 	if (mddev->layout != ((1 << 8) + 2)) {
700 		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
701 			mdname(mddev),
702 			mddev->layout);
703 		return ERR_PTR(-EINVAL);
704 	}
705 	if (mddev->raid_disks & 1) {
706 		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
707 			mdname(mddev));
708 		return ERR_PTR(-EINVAL);
709 	}
710 	if (mddev->degraded != (mddev->raid_disks>>1)) {
711 		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
712 			mdname(mddev));
713 		return ERR_PTR(-EINVAL);
714 	}
715 
716 	/* Set new parameters */
717 	mddev->new_level = 0;
718 	mddev->new_layout = 0;
719 	mddev->new_chunk_sectors = mddev->chunk_sectors;
720 	mddev->delta_disks = - mddev->raid_disks / 2;
721 	mddev->raid_disks += mddev->delta_disks;
722 	mddev->degraded = 0;
723 	/* make sure it will be not marked as dirty */
724 	mddev->recovery_cp = MaxSector;
725 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
726 
727 	create_strip_zones(mddev, &priv_conf);
728 	return priv_conf;
729 }
730 
731 static void *raid0_takeover_raid1(struct mddev *mddev)
732 {
733 	struct r0conf *priv_conf;
734 	int chunksect;
735 
736 	/* Check layout:
737 	 *  - (N - 1) mirror drives must be already faulty
738 	 */
739 	if ((mddev->raid_disks - 1) != mddev->degraded) {
740 		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
741 		       mdname(mddev));
742 		return ERR_PTR(-EINVAL);
743 	}
744 
745 	/*
746 	 * a raid1 doesn't have the notion of chunk size, so
747 	 * figure out the largest suitable size we can use.
748 	 */
749 	chunksect = 64 * 2; /* 64K by default */
750 
751 	/* The array must be an exact multiple of chunksize */
752 	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
753 		chunksect >>= 1;
754 
755 	if ((chunksect << 9) < PAGE_SIZE)
756 		/* array size does not allow a suitable chunk size */
757 		return ERR_PTR(-EINVAL);
758 
759 	/* Set new parameters */
760 	mddev->new_level = 0;
761 	mddev->new_layout = 0;
762 	mddev->new_chunk_sectors = chunksect;
763 	mddev->chunk_sectors = chunksect;
764 	mddev->delta_disks = 1 - mddev->raid_disks;
765 	mddev->raid_disks = 1;
766 	/* make sure it will be not marked as dirty */
767 	mddev->recovery_cp = MaxSector;
768 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
769 
770 	create_strip_zones(mddev, &priv_conf);
771 	return priv_conf;
772 }
773 
774 static void *raid0_takeover(struct mddev *mddev)
775 {
776 	/* raid0 can take over:
777 	 *  raid4 - if all data disks are active.
778 	 *  raid5 - providing it is Raid4 layout and one disk is faulty
779 	 *  raid10 - assuming we have all necessary active disks
780 	 *  raid1 - with (N -1) mirror drives faulty
781 	 */
782 
783 	if (mddev->bitmap) {
784 		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
785 			mdname(mddev));
786 		return ERR_PTR(-EBUSY);
787 	}
788 	if (mddev->level == 4)
789 		return raid0_takeover_raid45(mddev);
790 
791 	if (mddev->level == 5) {
792 		if (mddev->layout == ALGORITHM_PARITY_N)
793 			return raid0_takeover_raid45(mddev);
794 
795 		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
796 			mdname(mddev), ALGORITHM_PARITY_N);
797 	}
798 
799 	if (mddev->level == 10)
800 		return raid0_takeover_raid10(mddev);
801 
802 	if (mddev->level == 1)
803 		return raid0_takeover_raid1(mddev);
804 
805 	pr_warn("Takeover from raid%i to raid0 not supported\n",
806 		mddev->level);
807 
808 	return ERR_PTR(-EINVAL);
809 }
810 
811 static void raid0_quiesce(struct mddev *mddev, int quiesce)
812 {
813 }
814 
815 static struct md_personality raid0_personality=
816 {
817 	.name		= "raid0",
818 	.level		= 0,
819 	.owner		= THIS_MODULE,
820 	.make_request	= raid0_make_request,
821 	.run		= raid0_run,
822 	.free		= raid0_free,
823 	.status		= raid0_status,
824 	.size		= raid0_size,
825 	.takeover	= raid0_takeover,
826 	.quiesce	= raid0_quiesce,
827 	.error_handler	= raid0_error,
828 };
829 
830 static int __init raid0_init (void)
831 {
832 	return register_md_personality (&raid0_personality);
833 }
834 
835 static void raid0_exit (void)
836 {
837 	unregister_md_personality (&raid0_personality);
838 }
839 
840 module_init(raid0_init);
841 module_exit(raid0_exit);
842 MODULE_LICENSE("GPL");
843 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
844 MODULE_ALIAS("md-personality-2"); /* RAID0 */
845 MODULE_ALIAS("md-raid0");
846 MODULE_ALIAS("md-level-0");
847