Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 raid0.c : Multiple Devices driver for Linux
4 Copyright (C) 1994-96 Marc ZYNGIER
5 <zyngier@ufr-info-p7.ibp.fr> or
6 <maz@gloups.fdn.fr>
7 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
8
9 RAID-0 management functions.
10
11*/
12
13#include <linux/blkdev.h>
14#include <linux/seq_file.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <trace/events/block.h>
18#include "md.h"
19#include "raid0.h"
20#include "raid5.h"
21
22static int default_layout = 0;
23module_param(default_layout, int, 0644);
24
25#define UNSUPPORTED_MDDEV_FLAGS \
26 ((1L << MD_HAS_JOURNAL) | \
27 (1L << MD_JOURNAL_CLEAN) | \
28 (1L << MD_FAILFAST_SUPPORTED) |\
29 (1L << MD_HAS_PPL) | \
30 (1L << MD_HAS_MULTIPLE_PPLS))
31
32/*
33 * inform the user of the raid configuration
34*/
35static void dump_zones(struct mddev *mddev)
36{
37 int j, k;
38 sector_t zone_size = 0;
39 sector_t zone_start = 0;
40 char b[BDEVNAME_SIZE];
41 struct r0conf *conf = mddev->private;
42 int raid_disks = conf->strip_zone[0].nb_dev;
43 pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
44 mdname(mddev),
45 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
46 for (j = 0; j < conf->nr_strip_zones; j++) {
47 char line[200];
48 int len = 0;
49
50 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
51 len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
52 bdevname(conf->devlist[j*raid_disks
53 + k]->bdev, b));
54 pr_debug("md: zone%d=[%s]\n", j, line);
55
56 zone_size = conf->strip_zone[j].zone_end - zone_start;
57 pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
58 (unsigned long long)zone_start>>1,
59 (unsigned long long)conf->strip_zone[j].dev_start>>1,
60 (unsigned long long)zone_size>>1);
61 zone_start = conf->strip_zone[j].zone_end;
62 }
63}
64
65static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
66{
67 int i, c, err;
68 sector_t curr_zone_end, sectors;
69 struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
70 struct strip_zone *zone;
71 int cnt;
72 char b[BDEVNAME_SIZE];
73 char b2[BDEVNAME_SIZE];
74 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
75 unsigned blksize = 512;
76
77 *private_conf = ERR_PTR(-ENOMEM);
78 if (!conf)
79 return -ENOMEM;
80 rdev_for_each(rdev1, mddev) {
81 pr_debug("md/raid0:%s: looking at %s\n",
82 mdname(mddev),
83 bdevname(rdev1->bdev, b));
84 c = 0;
85
86 /* round size to chunk_size */
87 sectors = rdev1->sectors;
88 sector_div(sectors, mddev->chunk_sectors);
89 rdev1->sectors = sectors * mddev->chunk_sectors;
90
91 blksize = max(blksize, queue_logical_block_size(
92 rdev1->bdev->bd_disk->queue));
93
94 rdev_for_each(rdev2, mddev) {
95 pr_debug("md/raid0:%s: comparing %s(%llu)"
96 " with %s(%llu)\n",
97 mdname(mddev),
98 bdevname(rdev1->bdev,b),
99 (unsigned long long)rdev1->sectors,
100 bdevname(rdev2->bdev,b2),
101 (unsigned long long)rdev2->sectors);
102 if (rdev2 == rdev1) {
103 pr_debug("md/raid0:%s: END\n",
104 mdname(mddev));
105 break;
106 }
107 if (rdev2->sectors == rdev1->sectors) {
108 /*
109 * Not unique, don't count it as a new
110 * group
111 */
112 pr_debug("md/raid0:%s: EQUAL\n",
113 mdname(mddev));
114 c = 1;
115 break;
116 }
117 pr_debug("md/raid0:%s: NOT EQUAL\n",
118 mdname(mddev));
119 }
120 if (!c) {
121 pr_debug("md/raid0:%s: ==> UNIQUE\n",
122 mdname(mddev));
123 conf->nr_strip_zones++;
124 pr_debug("md/raid0:%s: %d zones\n",
125 mdname(mddev), conf->nr_strip_zones);
126 }
127 }
128 pr_debug("md/raid0:%s: FINAL %d zones\n",
129 mdname(mddev), conf->nr_strip_zones);
130
131 if (conf->nr_strip_zones == 1) {
132 conf->layout = RAID0_ORIG_LAYOUT;
133 } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
134 mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
135 conf->layout = mddev->layout;
136 } else if (default_layout == RAID0_ORIG_LAYOUT ||
137 default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
138 conf->layout = default_layout;
139 } else {
140 pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
141 mdname(mddev));
142 pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
143 err = -ENOTSUPP;
144 goto abort;
145 }
146 /*
147 * now since we have the hard sector sizes, we can make sure
148 * chunk size is a multiple of that sector size
149 */
150 if ((mddev->chunk_sectors << 9) % blksize) {
151 pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
152 mdname(mddev),
153 mddev->chunk_sectors << 9, blksize);
154 err = -EINVAL;
155 goto abort;
156 }
157
158 err = -ENOMEM;
159 conf->strip_zone = kcalloc(conf->nr_strip_zones,
160 sizeof(struct strip_zone),
161 GFP_KERNEL);
162 if (!conf->strip_zone)
163 goto abort;
164 conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
165 conf->nr_strip_zones,
166 mddev->raid_disks),
167 GFP_KERNEL);
168 if (!conf->devlist)
169 goto abort;
170
171 /* The first zone must contain all devices, so here we check that
172 * there is a proper alignment of slots to devices and find them all
173 */
174 zone = &conf->strip_zone[0];
175 cnt = 0;
176 smallest = NULL;
177 dev = conf->devlist;
178 err = -EINVAL;
179 rdev_for_each(rdev1, mddev) {
180 int j = rdev1->raid_disk;
181
182 if (mddev->level == 10) {
183 /* taking over a raid10-n2 array */
184 j /= 2;
185 rdev1->new_raid_disk = j;
186 }
187
188 if (mddev->level == 1) {
189 /* taiking over a raid1 array-
190 * we have only one active disk
191 */
192 j = 0;
193 rdev1->new_raid_disk = j;
194 }
195
196 if (j < 0) {
197 pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
198 mdname(mddev));
199 goto abort;
200 }
201 if (j >= mddev->raid_disks) {
202 pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
203 mdname(mddev), j);
204 goto abort;
205 }
206 if (dev[j]) {
207 pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
208 mdname(mddev), j);
209 goto abort;
210 }
211 dev[j] = rdev1;
212
213 if (!smallest || (rdev1->sectors < smallest->sectors))
214 smallest = rdev1;
215 cnt++;
216 }
217 if (cnt != mddev->raid_disks) {
218 pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
219 mdname(mddev), cnt, mddev->raid_disks);
220 goto abort;
221 }
222 zone->nb_dev = cnt;
223 zone->zone_end = smallest->sectors * cnt;
224
225 curr_zone_end = zone->zone_end;
226
227 /* now do the other zones */
228 for (i = 1; i < conf->nr_strip_zones; i++)
229 {
230 int j;
231
232 zone = conf->strip_zone + i;
233 dev = conf->devlist + i * mddev->raid_disks;
234
235 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
236 zone->dev_start = smallest->sectors;
237 smallest = NULL;
238 c = 0;
239
240 for (j=0; j<cnt; j++) {
241 rdev = conf->devlist[j];
242 if (rdev->sectors <= zone->dev_start) {
243 pr_debug("md/raid0:%s: checking %s ... nope\n",
244 mdname(mddev),
245 bdevname(rdev->bdev, b));
246 continue;
247 }
248 pr_debug("md/raid0:%s: checking %s ..."
249 " contained as device %d\n",
250 mdname(mddev),
251 bdevname(rdev->bdev, b), c);
252 dev[c] = rdev;
253 c++;
254 if (!smallest || rdev->sectors < smallest->sectors) {
255 smallest = rdev;
256 pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
257 mdname(mddev),
258 (unsigned long long)rdev->sectors);
259 }
260 }
261
262 zone->nb_dev = c;
263 sectors = (smallest->sectors - zone->dev_start) * c;
264 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
265 mdname(mddev),
266 zone->nb_dev, (unsigned long long)sectors);
267
268 curr_zone_end += sectors;
269 zone->zone_end = curr_zone_end;
270
271 pr_debug("md/raid0:%s: current zone start: %llu\n",
272 mdname(mddev),
273 (unsigned long long)smallest->sectors);
274 }
275
276 pr_debug("md/raid0:%s: done.\n", mdname(mddev));
277 *private_conf = conf;
278
279 return 0;
280abort:
281 kfree(conf->strip_zone);
282 kfree(conf->devlist);
283 kfree(conf);
284 *private_conf = ERR_PTR(err);
285 return err;
286}
287
288/* Find the zone which holds a particular offset
289 * Update *sectorp to be an offset in that zone
290 */
291static struct strip_zone *find_zone(struct r0conf *conf,
292 sector_t *sectorp)
293{
294 int i;
295 struct strip_zone *z = conf->strip_zone;
296 sector_t sector = *sectorp;
297
298 for (i = 0; i < conf->nr_strip_zones; i++)
299 if (sector < z[i].zone_end) {
300 if (i)
301 *sectorp = sector - z[i-1].zone_end;
302 return z + i;
303 }
304 BUG();
305}
306
307/*
308 * remaps the bio to the target device. we separate two flows.
309 * power 2 flow and a general flow for the sake of performance
310*/
311static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
312 sector_t sector, sector_t *sector_offset)
313{
314 unsigned int sect_in_chunk;
315 sector_t chunk;
316 struct r0conf *conf = mddev->private;
317 int raid_disks = conf->strip_zone[0].nb_dev;
318 unsigned int chunk_sects = mddev->chunk_sectors;
319
320 if (is_power_of_2(chunk_sects)) {
321 int chunksect_bits = ffz(~chunk_sects);
322 /* find the sector offset inside the chunk */
323 sect_in_chunk = sector & (chunk_sects - 1);
324 sector >>= chunksect_bits;
325 /* chunk in zone */
326 chunk = *sector_offset;
327 /* quotient is the chunk in real device*/
328 sector_div(chunk, zone->nb_dev << chunksect_bits);
329 } else{
330 sect_in_chunk = sector_div(sector, chunk_sects);
331 chunk = *sector_offset;
332 sector_div(chunk, chunk_sects * zone->nb_dev);
333 }
334 /*
335 * position the bio over the real device
336 * real sector = chunk in device + starting of zone
337 * + the position in the chunk
338 */
339 *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
340 return conf->devlist[(zone - conf->strip_zone)*raid_disks
341 + sector_div(sector, zone->nb_dev)];
342}
343
344static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
345{
346 sector_t array_sectors = 0;
347 struct md_rdev *rdev;
348
349 WARN_ONCE(sectors || raid_disks,
350 "%s does not support generic reshape\n", __func__);
351
352 rdev_for_each(rdev, mddev)
353 array_sectors += (rdev->sectors &
354 ~(sector_t)(mddev->chunk_sectors-1));
355
356 return array_sectors;
357}
358
359static void free_conf(struct mddev *mddev, struct r0conf *conf)
360{
361 kfree(conf->strip_zone);
362 kfree(conf->devlist);
363 kfree(conf);
364 mddev->private = NULL;
365}
366
367static void raid0_free(struct mddev *mddev, void *priv)
368{
369 struct r0conf *conf = priv;
370
371 free_conf(mddev, conf);
372 acct_bioset_exit(mddev);
373}
374
375static int raid0_run(struct mddev *mddev)
376{
377 struct r0conf *conf;
378 int ret;
379
380 if (mddev->chunk_sectors == 0) {
381 pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
382 return -EINVAL;
383 }
384 if (md_check_no_bitmap(mddev))
385 return -EINVAL;
386
387 if (acct_bioset_init(mddev)) {
388 pr_err("md/raid0:%s: alloc acct bioset failed.\n", mdname(mddev));
389 return -ENOMEM;
390 }
391
392 /* if private is not null, we are here after takeover */
393 if (mddev->private == NULL) {
394 ret = create_strip_zones(mddev, &conf);
395 if (ret < 0)
396 goto exit_acct_set;
397 mddev->private = conf;
398 }
399 conf = mddev->private;
400 if (mddev->queue) {
401 struct md_rdev *rdev;
402 bool discard_supported = false;
403
404 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
405 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
406 blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
407
408 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
409 blk_queue_io_opt(mddev->queue,
410 (mddev->chunk_sectors << 9) * mddev->raid_disks);
411
412 rdev_for_each(rdev, mddev) {
413 disk_stack_limits(mddev->gendisk, rdev->bdev,
414 rdev->data_offset << 9);
415 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
416 discard_supported = true;
417 }
418 if (!discard_supported)
419 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
420 else
421 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
422 }
423
424 /* calculate array device size */
425 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
426
427 pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
428 mdname(mddev),
429 (unsigned long long)mddev->array_sectors);
430
431 dump_zones(mddev);
432
433 ret = md_integrity_register(mddev);
434 if (ret)
435 goto free;
436
437 return ret;
438
439free:
440 free_conf(mddev, conf);
441exit_acct_set:
442 acct_bioset_exit(mddev);
443 return ret;
444}
445
446static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
447{
448 struct r0conf *conf = mddev->private;
449 struct strip_zone *zone;
450 sector_t start = bio->bi_iter.bi_sector;
451 sector_t end;
452 unsigned int stripe_size;
453 sector_t first_stripe_index, last_stripe_index;
454 sector_t start_disk_offset;
455 unsigned int start_disk_index;
456 sector_t end_disk_offset;
457 unsigned int end_disk_index;
458 unsigned int disk;
459
460 zone = find_zone(conf, &start);
461
462 if (bio_end_sector(bio) > zone->zone_end) {
463 struct bio *split = bio_split(bio,
464 zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
465 &mddev->bio_set);
466 bio_chain(split, bio);
467 submit_bio_noacct(bio);
468 bio = split;
469 end = zone->zone_end;
470 } else
471 end = bio_end_sector(bio);
472
473 if (zone != conf->strip_zone)
474 end = end - zone[-1].zone_end;
475
476 /* Now start and end is the offset in zone */
477 stripe_size = zone->nb_dev * mddev->chunk_sectors;
478
479 first_stripe_index = start;
480 sector_div(first_stripe_index, stripe_size);
481 last_stripe_index = end;
482 sector_div(last_stripe_index, stripe_size);
483
484 start_disk_index = (int)(start - first_stripe_index * stripe_size) /
485 mddev->chunk_sectors;
486 start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
487 mddev->chunk_sectors) +
488 first_stripe_index * mddev->chunk_sectors;
489 end_disk_index = (int)(end - last_stripe_index * stripe_size) /
490 mddev->chunk_sectors;
491 end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
492 mddev->chunk_sectors) +
493 last_stripe_index * mddev->chunk_sectors;
494
495 for (disk = 0; disk < zone->nb_dev; disk++) {
496 sector_t dev_start, dev_end;
497 struct md_rdev *rdev;
498
499 if (disk < start_disk_index)
500 dev_start = (first_stripe_index + 1) *
501 mddev->chunk_sectors;
502 else if (disk > start_disk_index)
503 dev_start = first_stripe_index * mddev->chunk_sectors;
504 else
505 dev_start = start_disk_offset;
506
507 if (disk < end_disk_index)
508 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
509 else if (disk > end_disk_index)
510 dev_end = last_stripe_index * mddev->chunk_sectors;
511 else
512 dev_end = end_disk_offset;
513
514 if (dev_end <= dev_start)
515 continue;
516
517 rdev = conf->devlist[(zone - conf->strip_zone) *
518 conf->strip_zone[0].nb_dev + disk];
519 md_submit_discard_bio(mddev, rdev, bio,
520 dev_start + zone->dev_start + rdev->data_offset,
521 dev_end - dev_start);
522 }
523 bio_endio(bio);
524}
525
526static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
527{
528 struct r0conf *conf = mddev->private;
529 struct strip_zone *zone;
530 struct md_rdev *tmp_dev;
531 sector_t bio_sector;
532 sector_t sector;
533 sector_t orig_sector;
534 unsigned chunk_sects;
535 unsigned sectors;
536
537 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
538 && md_flush_request(mddev, bio))
539 return true;
540
541 if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
542 raid0_handle_discard(mddev, bio);
543 return true;
544 }
545
546 bio_sector = bio->bi_iter.bi_sector;
547 sector = bio_sector;
548 chunk_sects = mddev->chunk_sectors;
549
550 sectors = chunk_sects -
551 (likely(is_power_of_2(chunk_sects))
552 ? (sector & (chunk_sects-1))
553 : sector_div(sector, chunk_sects));
554
555 /* Restore due to sector_div */
556 sector = bio_sector;
557
558 if (sectors < bio_sectors(bio)) {
559 struct bio *split = bio_split(bio, sectors, GFP_NOIO,
560 &mddev->bio_set);
561 bio_chain(split, bio);
562 submit_bio_noacct(bio);
563 bio = split;
564 }
565
566 if (bio->bi_pool != &mddev->bio_set)
567 md_account_bio(mddev, &bio);
568
569 orig_sector = sector;
570 zone = find_zone(mddev->private, §or);
571 switch (conf->layout) {
572 case RAID0_ORIG_LAYOUT:
573 tmp_dev = map_sector(mddev, zone, orig_sector, §or);
574 break;
575 case RAID0_ALT_MULTIZONE_LAYOUT:
576 tmp_dev = map_sector(mddev, zone, sector, §or);
577 break;
578 default:
579 WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
580 bio_io_error(bio);
581 return true;
582 }
583
584 if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
585 bio_io_error(bio);
586 return true;
587 }
588
589 bio_set_dev(bio, tmp_dev->bdev);
590 bio->bi_iter.bi_sector = sector + zone->dev_start +
591 tmp_dev->data_offset;
592
593 if (mddev->gendisk)
594 trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
595 bio_sector);
596 mddev_check_write_zeroes(mddev, bio);
597 submit_bio_noacct(bio);
598 return true;
599}
600
601static void raid0_status(struct seq_file *seq, struct mddev *mddev)
602{
603 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
604 return;
605}
606
607static void *raid0_takeover_raid45(struct mddev *mddev)
608{
609 struct md_rdev *rdev;
610 struct r0conf *priv_conf;
611
612 if (mddev->degraded != 1) {
613 pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
614 mdname(mddev),
615 mddev->degraded);
616 return ERR_PTR(-EINVAL);
617 }
618
619 rdev_for_each(rdev, mddev) {
620 /* check slot number for a disk */
621 if (rdev->raid_disk == mddev->raid_disks-1) {
622 pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
623 mdname(mddev));
624 return ERR_PTR(-EINVAL);
625 }
626 rdev->sectors = mddev->dev_sectors;
627 }
628
629 /* Set new parameters */
630 mddev->new_level = 0;
631 mddev->new_layout = 0;
632 mddev->new_chunk_sectors = mddev->chunk_sectors;
633 mddev->raid_disks--;
634 mddev->delta_disks = -1;
635 /* make sure it will be not marked as dirty */
636 mddev->recovery_cp = MaxSector;
637 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
638
639 create_strip_zones(mddev, &priv_conf);
640
641 return priv_conf;
642}
643
644static void *raid0_takeover_raid10(struct mddev *mddev)
645{
646 struct r0conf *priv_conf;
647
648 /* Check layout:
649 * - far_copies must be 1
650 * - near_copies must be 2
651 * - disks number must be even
652 * - all mirrors must be already degraded
653 */
654 if (mddev->layout != ((1 << 8) + 2)) {
655 pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
656 mdname(mddev),
657 mddev->layout);
658 return ERR_PTR(-EINVAL);
659 }
660 if (mddev->raid_disks & 1) {
661 pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
662 mdname(mddev));
663 return ERR_PTR(-EINVAL);
664 }
665 if (mddev->degraded != (mddev->raid_disks>>1)) {
666 pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
667 mdname(mddev));
668 return ERR_PTR(-EINVAL);
669 }
670
671 /* Set new parameters */
672 mddev->new_level = 0;
673 mddev->new_layout = 0;
674 mddev->new_chunk_sectors = mddev->chunk_sectors;
675 mddev->delta_disks = - mddev->raid_disks / 2;
676 mddev->raid_disks += mddev->delta_disks;
677 mddev->degraded = 0;
678 /* make sure it will be not marked as dirty */
679 mddev->recovery_cp = MaxSector;
680 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
681
682 create_strip_zones(mddev, &priv_conf);
683 return priv_conf;
684}
685
686static void *raid0_takeover_raid1(struct mddev *mddev)
687{
688 struct r0conf *priv_conf;
689 int chunksect;
690
691 /* Check layout:
692 * - (N - 1) mirror drives must be already faulty
693 */
694 if ((mddev->raid_disks - 1) != mddev->degraded) {
695 pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
696 mdname(mddev));
697 return ERR_PTR(-EINVAL);
698 }
699
700 /*
701 * a raid1 doesn't have the notion of chunk size, so
702 * figure out the largest suitable size we can use.
703 */
704 chunksect = 64 * 2; /* 64K by default */
705
706 /* The array must be an exact multiple of chunksize */
707 while (chunksect && (mddev->array_sectors & (chunksect - 1)))
708 chunksect >>= 1;
709
710 if ((chunksect << 9) < PAGE_SIZE)
711 /* array size does not allow a suitable chunk size */
712 return ERR_PTR(-EINVAL);
713
714 /* Set new parameters */
715 mddev->new_level = 0;
716 mddev->new_layout = 0;
717 mddev->new_chunk_sectors = chunksect;
718 mddev->chunk_sectors = chunksect;
719 mddev->delta_disks = 1 - mddev->raid_disks;
720 mddev->raid_disks = 1;
721 /* make sure it will be not marked as dirty */
722 mddev->recovery_cp = MaxSector;
723 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
724
725 create_strip_zones(mddev, &priv_conf);
726 return priv_conf;
727}
728
729static void *raid0_takeover(struct mddev *mddev)
730{
731 /* raid0 can take over:
732 * raid4 - if all data disks are active.
733 * raid5 - providing it is Raid4 layout and one disk is faulty
734 * raid10 - assuming we have all necessary active disks
735 * raid1 - with (N -1) mirror drives faulty
736 */
737
738 if (mddev->bitmap) {
739 pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
740 mdname(mddev));
741 return ERR_PTR(-EBUSY);
742 }
743 if (mddev->level == 4)
744 return raid0_takeover_raid45(mddev);
745
746 if (mddev->level == 5) {
747 if (mddev->layout == ALGORITHM_PARITY_N)
748 return raid0_takeover_raid45(mddev);
749
750 pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
751 mdname(mddev), ALGORITHM_PARITY_N);
752 }
753
754 if (mddev->level == 10)
755 return raid0_takeover_raid10(mddev);
756
757 if (mddev->level == 1)
758 return raid0_takeover_raid1(mddev);
759
760 pr_warn("Takeover from raid%i to raid0 not supported\n",
761 mddev->level);
762
763 return ERR_PTR(-EINVAL);
764}
765
766static void raid0_quiesce(struct mddev *mddev, int quiesce)
767{
768}
769
770static struct md_personality raid0_personality=
771{
772 .name = "raid0",
773 .level = 0,
774 .owner = THIS_MODULE,
775 .make_request = raid0_make_request,
776 .run = raid0_run,
777 .free = raid0_free,
778 .status = raid0_status,
779 .size = raid0_size,
780 .takeover = raid0_takeover,
781 .quiesce = raid0_quiesce,
782};
783
784static int __init raid0_init (void)
785{
786 return register_md_personality (&raid0_personality);
787}
788
789static void raid0_exit (void)
790{
791 unregister_md_personality (&raid0_personality);
792}
793
794module_init(raid0_init);
795module_exit(raid0_exit);
796MODULE_LICENSE("GPL");
797MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
798MODULE_ALIAS("md-personality-2"); /* RAID0 */
799MODULE_ALIAS("md-raid0");
800MODULE_ALIAS("md-level-0");