1 // SPDX-License-Identifier: GPL-2.0-or-later
3 raid0.c : Multiple Devices driver for Linux
4 Copyright (C) 1994-96 Marc ZYNGIER
5 <zyngier@ufr-info-p7.ibp.fr> or
7 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
9 RAID-0 management functions.
13 #include <linux/blkdev.h>
14 #include <linux/seq_file.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <trace/events/block.h>
22 #define UNSUPPORTED_MDDEV_FLAGS \
23 ((1L << MD_HAS_JOURNAL) | \
24 (1L << MD_JOURNAL_CLEAN) | \
25 (1L << MD_FAILFAST_SUPPORTED) |\
26 (1L << MD_HAS_PPL) | \
27 (1L << MD_HAS_MULTIPLE_PPLS))
29 static int raid0_congested(struct mddev
*mddev
, int bits
)
31 struct r0conf
*conf
= mddev
->private;
32 struct md_rdev
**devlist
= conf
->devlist
;
33 int raid_disks
= conf
->strip_zone
[0].nb_dev
;
36 for (i
= 0; i
< raid_disks
&& !ret
; i
++) {
37 struct request_queue
*q
= bdev_get_queue(devlist
[i
]->bdev
);
39 ret
|= bdi_congested(q
->backing_dev_info
, bits
);
45 * inform the user of the raid configuration
47 static void dump_zones(struct mddev
*mddev
)
50 sector_t zone_size
= 0;
51 sector_t zone_start
= 0;
52 char b
[BDEVNAME_SIZE
];
53 struct r0conf
*conf
= mddev
->private;
54 int raid_disks
= conf
->strip_zone
[0].nb_dev
;
55 pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
57 conf
->nr_strip_zones
, conf
->nr_strip_zones
==1?"":"s");
58 for (j
= 0; j
< conf
->nr_strip_zones
; j
++) {
62 for (k
= 0; k
< conf
->strip_zone
[j
].nb_dev
; k
++)
63 len
+= snprintf(line
+len
, 200-len
, "%s%s", k
?"/":"",
64 bdevname(conf
->devlist
[j
*raid_disks
66 pr_debug("md: zone%d=[%s]\n", j
, line
);
68 zone_size
= conf
->strip_zone
[j
].zone_end
- zone_start
;
69 pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
70 (unsigned long long)zone_start
>>1,
71 (unsigned long long)conf
->strip_zone
[j
].dev_start
>>1,
72 (unsigned long long)zone_size
>>1);
73 zone_start
= conf
->strip_zone
[j
].zone_end
;
77 static int create_strip_zones(struct mddev
*mddev
, struct r0conf
**private_conf
)
80 sector_t curr_zone_end
, sectors
;
81 struct md_rdev
*smallest
, *rdev1
, *rdev2
, *rdev
, **dev
;
82 struct strip_zone
*zone
;
84 char b
[BDEVNAME_SIZE
];
85 char b2
[BDEVNAME_SIZE
];
86 struct r0conf
*conf
= kzalloc(sizeof(*conf
), GFP_KERNEL
);
87 unsigned short blksize
= 512;
89 *private_conf
= ERR_PTR(-ENOMEM
);
92 rdev_for_each(rdev1
, mddev
) {
93 pr_debug("md/raid0:%s: looking at %s\n",
95 bdevname(rdev1
->bdev
, b
));
98 /* round size to chunk_size */
99 sectors
= rdev1
->sectors
;
100 sector_div(sectors
, mddev
->chunk_sectors
);
101 rdev1
->sectors
= sectors
* mddev
->chunk_sectors
;
103 blksize
= max(blksize
, queue_logical_block_size(
104 rdev1
->bdev
->bd_disk
->queue
));
106 rdev_for_each(rdev2
, mddev
) {
107 pr_debug("md/raid0:%s: comparing %s(%llu)"
110 bdevname(rdev1
->bdev
,b
),
111 (unsigned long long)rdev1
->sectors
,
112 bdevname(rdev2
->bdev
,b2
),
113 (unsigned long long)rdev2
->sectors
);
114 if (rdev2
== rdev1
) {
115 pr_debug("md/raid0:%s: END\n",
119 if (rdev2
->sectors
== rdev1
->sectors
) {
121 * Not unique, don't count it as a new
124 pr_debug("md/raid0:%s: EQUAL\n",
129 pr_debug("md/raid0:%s: NOT EQUAL\n",
133 pr_debug("md/raid0:%s: ==> UNIQUE\n",
135 conf
->nr_strip_zones
++;
136 pr_debug("md/raid0:%s: %d zones\n",
137 mdname(mddev
), conf
->nr_strip_zones
);
140 pr_debug("md/raid0:%s: FINAL %d zones\n",
141 mdname(mddev
), conf
->nr_strip_zones
);
143 * now since we have the hard sector sizes, we can make sure
144 * chunk size is a multiple of that sector size
146 if ((mddev
->chunk_sectors
<< 9) % blksize
) {
147 pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
149 mddev
->chunk_sectors
<< 9, blksize
);
155 conf
->strip_zone
= kcalloc(conf
->nr_strip_zones
,
156 sizeof(struct strip_zone
),
158 if (!conf
->strip_zone
)
160 conf
->devlist
= kzalloc(array3_size(sizeof(struct md_rdev
*),
161 conf
->nr_strip_zones
,
167 /* The first zone must contain all devices, so here we check that
168 * there is a proper alignment of slots to devices and find them all
170 zone
= &conf
->strip_zone
[0];
175 rdev_for_each(rdev1
, mddev
) {
176 int j
= rdev1
->raid_disk
;
178 if (mddev
->level
== 10) {
179 /* taking over a raid10-n2 array */
181 rdev1
->new_raid_disk
= j
;
184 if (mddev
->level
== 1) {
185 /* taiking over a raid1 array-
186 * we have only one active disk
189 rdev1
->new_raid_disk
= j
;
193 pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
197 if (j
>= mddev
->raid_disks
) {
198 pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
203 pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
209 if (!smallest
|| (rdev1
->sectors
< smallest
->sectors
))
213 if (cnt
!= mddev
->raid_disks
) {
214 pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
215 mdname(mddev
), cnt
, mddev
->raid_disks
);
219 zone
->zone_end
= smallest
->sectors
* cnt
;
221 curr_zone_end
= zone
->zone_end
;
223 /* now do the other zones */
224 for (i
= 1; i
< conf
->nr_strip_zones
; i
++)
228 zone
= conf
->strip_zone
+ i
;
229 dev
= conf
->devlist
+ i
* mddev
->raid_disks
;
231 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev
), i
);
232 zone
->dev_start
= smallest
->sectors
;
236 for (j
=0; j
<cnt
; j
++) {
237 rdev
= conf
->devlist
[j
];
238 if (rdev
->sectors
<= zone
->dev_start
) {
239 pr_debug("md/raid0:%s: checking %s ... nope\n",
241 bdevname(rdev
->bdev
, b
));
244 pr_debug("md/raid0:%s: checking %s ..."
245 " contained as device %d\n",
247 bdevname(rdev
->bdev
, b
), c
);
250 if (!smallest
|| rdev
->sectors
< smallest
->sectors
) {
252 pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
254 (unsigned long long)rdev
->sectors
);
259 sectors
= (smallest
->sectors
- zone
->dev_start
) * c
;
260 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
262 zone
->nb_dev
, (unsigned long long)sectors
);
264 curr_zone_end
+= sectors
;
265 zone
->zone_end
= curr_zone_end
;
267 pr_debug("md/raid0:%s: current zone start: %llu\n",
269 (unsigned long long)smallest
->sectors
);
272 pr_debug("md/raid0:%s: done.\n", mdname(mddev
));
273 *private_conf
= conf
;
277 kfree(conf
->strip_zone
);
278 kfree(conf
->devlist
);
280 *private_conf
= ERR_PTR(err
);
284 /* Find the zone which holds a particular offset
285 * Update *sectorp to be an offset in that zone
287 static struct strip_zone
*find_zone(struct r0conf
*conf
,
291 struct strip_zone
*z
= conf
->strip_zone
;
292 sector_t sector
= *sectorp
;
294 for (i
= 0; i
< conf
->nr_strip_zones
; i
++)
295 if (sector
< z
[i
].zone_end
) {
297 *sectorp
= sector
- z
[i
-1].zone_end
;
304 * remaps the bio to the target device. we separate two flows.
305 * power 2 flow and a general flow for the sake of performance
307 static struct md_rdev
*map_sector(struct mddev
*mddev
, struct strip_zone
*zone
,
308 sector_t sector
, sector_t
*sector_offset
)
310 unsigned int sect_in_chunk
;
312 struct r0conf
*conf
= mddev
->private;
313 int raid_disks
= conf
->strip_zone
[0].nb_dev
;
314 unsigned int chunk_sects
= mddev
->chunk_sectors
;
316 if (is_power_of_2(chunk_sects
)) {
317 int chunksect_bits
= ffz(~chunk_sects
);
318 /* find the sector offset inside the chunk */
319 sect_in_chunk
= sector
& (chunk_sects
- 1);
320 sector
>>= chunksect_bits
;
322 chunk
= *sector_offset
;
323 /* quotient is the chunk in real device*/
324 sector_div(chunk
, zone
->nb_dev
<< chunksect_bits
);
326 sect_in_chunk
= sector_div(sector
, chunk_sects
);
327 chunk
= *sector_offset
;
328 sector_div(chunk
, chunk_sects
* zone
->nb_dev
);
331 * position the bio over the real device
332 * real sector = chunk in device + starting of zone
333 * + the position in the chunk
335 *sector_offset
= (chunk
* chunk_sects
) + sect_in_chunk
;
336 return conf
->devlist
[(zone
- conf
->strip_zone
)*raid_disks
337 + sector_div(sector
, zone
->nb_dev
)];
340 static sector_t
raid0_size(struct mddev
*mddev
, sector_t sectors
, int raid_disks
)
342 sector_t array_sectors
= 0;
343 struct md_rdev
*rdev
;
345 WARN_ONCE(sectors
|| raid_disks
,
346 "%s does not support generic reshape\n", __func__
);
348 rdev_for_each(rdev
, mddev
)
349 array_sectors
+= (rdev
->sectors
&
350 ~(sector_t
)(mddev
->chunk_sectors
-1));
352 return array_sectors
;
355 static void raid0_free(struct mddev
*mddev
, void *priv
);
357 static int raid0_run(struct mddev
*mddev
)
362 if (mddev
->chunk_sectors
== 0) {
363 pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev
));
366 if (md_check_no_bitmap(mddev
))
369 /* if private is not null, we are here after takeover */
370 if (mddev
->private == NULL
) {
371 ret
= create_strip_zones(mddev
, &conf
);
374 mddev
->private = conf
;
376 conf
= mddev
->private;
378 struct md_rdev
*rdev
;
379 bool discard_supported
= false;
381 blk_queue_max_hw_sectors(mddev
->queue
, mddev
->chunk_sectors
);
382 blk_queue_max_write_same_sectors(mddev
->queue
, mddev
->chunk_sectors
);
383 blk_queue_max_write_zeroes_sectors(mddev
->queue
, mddev
->chunk_sectors
);
384 blk_queue_max_discard_sectors(mddev
->queue
, UINT_MAX
);
386 blk_queue_io_min(mddev
->queue
, mddev
->chunk_sectors
<< 9);
387 blk_queue_io_opt(mddev
->queue
,
388 (mddev
->chunk_sectors
<< 9) * mddev
->raid_disks
);
390 rdev_for_each(rdev
, mddev
) {
391 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
392 rdev
->data_offset
<< 9);
393 if (blk_queue_discard(bdev_get_queue(rdev
->bdev
)))
394 discard_supported
= true;
396 if (!discard_supported
)
397 blk_queue_flag_clear(QUEUE_FLAG_DISCARD
, mddev
->queue
);
399 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, mddev
->queue
);
402 /* calculate array device size */
403 md_set_array_sectors(mddev
, raid0_size(mddev
, 0, 0));
405 pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
407 (unsigned long long)mddev
->array_sectors
);
410 /* calculate the max read-ahead size.
411 * For read-ahead of large files to be effective, we need to
412 * readahead at least twice a whole stripe. i.e. number of devices
413 * multiplied by chunk size times 2.
414 * If an individual device has an ra_pages greater than the
415 * chunk size, then we will not drive that device as hard as it
416 * wants. We consider this a configuration error: a larger
417 * chunksize should be used in that case.
419 int stripe
= mddev
->raid_disks
*
420 (mddev
->chunk_sectors
<< 9) / PAGE_SIZE
;
421 if (mddev
->queue
->backing_dev_info
->ra_pages
< 2* stripe
)
422 mddev
->queue
->backing_dev_info
->ra_pages
= 2* stripe
;
427 ret
= md_integrity_register(mddev
);
432 static void raid0_free(struct mddev
*mddev
, void *priv
)
434 struct r0conf
*conf
= priv
;
436 kfree(conf
->strip_zone
);
437 kfree(conf
->devlist
);
442 * Is io distribute over 1 or more chunks ?
444 static inline int is_io_in_chunk_boundary(struct mddev
*mddev
,
445 unsigned int chunk_sects
, struct bio
*bio
)
447 if (likely(is_power_of_2(chunk_sects
))) {
448 return chunk_sects
>=
449 ((bio
->bi_iter
.bi_sector
& (chunk_sects
-1))
452 sector_t sector
= bio
->bi_iter
.bi_sector
;
453 return chunk_sects
>= (sector_div(sector
, chunk_sects
)
458 static void raid0_handle_discard(struct mddev
*mddev
, struct bio
*bio
)
460 struct r0conf
*conf
= mddev
->private;
461 struct strip_zone
*zone
;
462 sector_t start
= bio
->bi_iter
.bi_sector
;
464 unsigned int stripe_size
;
465 sector_t first_stripe_index
, last_stripe_index
;
466 sector_t start_disk_offset
;
467 unsigned int start_disk_index
;
468 sector_t end_disk_offset
;
469 unsigned int end_disk_index
;
472 zone
= find_zone(conf
, &start
);
474 if (bio_end_sector(bio
) > zone
->zone_end
) {
475 struct bio
*split
= bio_split(bio
,
476 zone
->zone_end
- bio
->bi_iter
.bi_sector
, GFP_NOIO
,
478 bio_chain(split
, bio
);
479 generic_make_request(bio
);
481 end
= zone
->zone_end
;
483 end
= bio_end_sector(bio
);
485 if (zone
!= conf
->strip_zone
)
486 end
= end
- zone
[-1].zone_end
;
488 /* Now start and end is the offset in zone */
489 stripe_size
= zone
->nb_dev
* mddev
->chunk_sectors
;
491 first_stripe_index
= start
;
492 sector_div(first_stripe_index
, stripe_size
);
493 last_stripe_index
= end
;
494 sector_div(last_stripe_index
, stripe_size
);
496 start_disk_index
= (int)(start
- first_stripe_index
* stripe_size
) /
497 mddev
->chunk_sectors
;
498 start_disk_offset
= ((int)(start
- first_stripe_index
* stripe_size
) %
499 mddev
->chunk_sectors
) +
500 first_stripe_index
* mddev
->chunk_sectors
;
501 end_disk_index
= (int)(end
- last_stripe_index
* stripe_size
) /
502 mddev
->chunk_sectors
;
503 end_disk_offset
= ((int)(end
- last_stripe_index
* stripe_size
) %
504 mddev
->chunk_sectors
) +
505 last_stripe_index
* mddev
->chunk_sectors
;
507 for (disk
= 0; disk
< zone
->nb_dev
; disk
++) {
508 sector_t dev_start
, dev_end
;
509 struct bio
*discard_bio
= NULL
;
510 struct md_rdev
*rdev
;
512 if (disk
< start_disk_index
)
513 dev_start
= (first_stripe_index
+ 1) *
514 mddev
->chunk_sectors
;
515 else if (disk
> start_disk_index
)
516 dev_start
= first_stripe_index
* mddev
->chunk_sectors
;
518 dev_start
= start_disk_offset
;
520 if (disk
< end_disk_index
)
521 dev_end
= (last_stripe_index
+ 1) * mddev
->chunk_sectors
;
522 else if (disk
> end_disk_index
)
523 dev_end
= last_stripe_index
* mddev
->chunk_sectors
;
525 dev_end
= end_disk_offset
;
527 if (dev_end
<= dev_start
)
530 rdev
= conf
->devlist
[(zone
- conf
->strip_zone
) *
531 conf
->strip_zone
[0].nb_dev
+ disk
];
532 if (__blkdev_issue_discard(rdev
->bdev
,
533 dev_start
+ zone
->dev_start
+ rdev
->data_offset
,
534 dev_end
- dev_start
, GFP_NOIO
, 0, &discard_bio
) ||
537 bio_chain(discard_bio
, bio
);
538 bio_clone_blkg_association(discard_bio
, bio
);
540 trace_block_bio_remap(bdev_get_queue(rdev
->bdev
),
541 discard_bio
, disk_devt(mddev
->gendisk
),
542 bio
->bi_iter
.bi_sector
);
543 generic_make_request(discard_bio
);
548 static bool raid0_make_request(struct mddev
*mddev
, struct bio
*bio
)
550 struct strip_zone
*zone
;
551 struct md_rdev
*tmp_dev
;
554 unsigned chunk_sects
;
557 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
)) {
558 md_flush_request(mddev
, bio
);
562 if (unlikely((bio_op(bio
) == REQ_OP_DISCARD
))) {
563 raid0_handle_discard(mddev
, bio
);
567 bio_sector
= bio
->bi_iter
.bi_sector
;
569 chunk_sects
= mddev
->chunk_sectors
;
571 sectors
= chunk_sects
-
572 (likely(is_power_of_2(chunk_sects
))
573 ? (sector
& (chunk_sects
-1))
574 : sector_div(sector
, chunk_sects
));
576 /* Restore due to sector_div */
579 if (sectors
< bio_sectors(bio
)) {
580 struct bio
*split
= bio_split(bio
, sectors
, GFP_NOIO
,
582 bio_chain(split
, bio
);
583 generic_make_request(bio
);
587 zone
= find_zone(mddev
->private, §or
);
588 tmp_dev
= map_sector(mddev
, zone
, sector
, §or
);
589 bio_set_dev(bio
, tmp_dev
->bdev
);
590 bio
->bi_iter
.bi_sector
= sector
+ zone
->dev_start
+
591 tmp_dev
->data_offset
;
594 trace_block_bio_remap(bio
->bi_disk
->queue
, bio
,
595 disk_devt(mddev
->gendisk
), bio_sector
);
596 mddev_check_writesame(mddev
, bio
);
597 mddev_check_write_zeroes(mddev
, bio
);
598 generic_make_request(bio
);
602 static void raid0_status(struct seq_file
*seq
, struct mddev
*mddev
)
604 seq_printf(seq
, " %dk chunks", mddev
->chunk_sectors
/ 2);
608 static void *raid0_takeover_raid45(struct mddev
*mddev
)
610 struct md_rdev
*rdev
;
611 struct r0conf
*priv_conf
;
613 if (mddev
->degraded
!= 1) {
614 pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
617 return ERR_PTR(-EINVAL
);
620 rdev_for_each(rdev
, mddev
) {
621 /* check slot number for a disk */
622 if (rdev
->raid_disk
== mddev
->raid_disks
-1) {
623 pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
625 return ERR_PTR(-EINVAL
);
627 rdev
->sectors
= mddev
->dev_sectors
;
630 /* Set new parameters */
631 mddev
->new_level
= 0;
632 mddev
->new_layout
= 0;
633 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
635 mddev
->delta_disks
= -1;
636 /* make sure it will be not marked as dirty */
637 mddev
->recovery_cp
= MaxSector
;
638 mddev_clear_unsupported_flags(mddev
, UNSUPPORTED_MDDEV_FLAGS
);
640 create_strip_zones(mddev
, &priv_conf
);
645 static void *raid0_takeover_raid10(struct mddev
*mddev
)
647 struct r0conf
*priv_conf
;
650 * - far_copies must be 1
651 * - near_copies must be 2
652 * - disks number must be even
653 * - all mirrors must be already degraded
655 if (mddev
->layout
!= ((1 << 8) + 2)) {
656 pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
659 return ERR_PTR(-EINVAL
);
661 if (mddev
->raid_disks
& 1) {
662 pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
664 return ERR_PTR(-EINVAL
);
666 if (mddev
->degraded
!= (mddev
->raid_disks
>>1)) {
667 pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
669 return ERR_PTR(-EINVAL
);
672 /* Set new parameters */
673 mddev
->new_level
= 0;
674 mddev
->new_layout
= 0;
675 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
676 mddev
->delta_disks
= - mddev
->raid_disks
/ 2;
677 mddev
->raid_disks
+= mddev
->delta_disks
;
679 /* make sure it will be not marked as dirty */
680 mddev
->recovery_cp
= MaxSector
;
681 mddev_clear_unsupported_flags(mddev
, UNSUPPORTED_MDDEV_FLAGS
);
683 create_strip_zones(mddev
, &priv_conf
);
687 static void *raid0_takeover_raid1(struct mddev
*mddev
)
689 struct r0conf
*priv_conf
;
693 * - (N - 1) mirror drives must be already faulty
695 if ((mddev
->raid_disks
- 1) != mddev
->degraded
) {
696 pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
698 return ERR_PTR(-EINVAL
);
702 * a raid1 doesn't have the notion of chunk size, so
703 * figure out the largest suitable size we can use.
705 chunksect
= 64 * 2; /* 64K by default */
707 /* The array must be an exact multiple of chunksize */
708 while (chunksect
&& (mddev
->array_sectors
& (chunksect
- 1)))
711 if ((chunksect
<< 9) < PAGE_SIZE
)
712 /* array size does not allow a suitable chunk size */
713 return ERR_PTR(-EINVAL
);
715 /* Set new parameters */
716 mddev
->new_level
= 0;
717 mddev
->new_layout
= 0;
718 mddev
->new_chunk_sectors
= chunksect
;
719 mddev
->chunk_sectors
= chunksect
;
720 mddev
->delta_disks
= 1 - mddev
->raid_disks
;
721 mddev
->raid_disks
= 1;
722 /* make sure it will be not marked as dirty */
723 mddev
->recovery_cp
= MaxSector
;
724 mddev_clear_unsupported_flags(mddev
, UNSUPPORTED_MDDEV_FLAGS
);
726 create_strip_zones(mddev
, &priv_conf
);
730 static void *raid0_takeover(struct mddev
*mddev
)
732 /* raid0 can take over:
733 * raid4 - if all data disks are active.
734 * raid5 - providing it is Raid4 layout and one disk is faulty
735 * raid10 - assuming we have all necessary active disks
736 * raid1 - with (N -1) mirror drives faulty
740 pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
742 return ERR_PTR(-EBUSY
);
744 if (mddev
->level
== 4)
745 return raid0_takeover_raid45(mddev
);
747 if (mddev
->level
== 5) {
748 if (mddev
->layout
== ALGORITHM_PARITY_N
)
749 return raid0_takeover_raid45(mddev
);
751 pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
752 mdname(mddev
), ALGORITHM_PARITY_N
);
755 if (mddev
->level
== 10)
756 return raid0_takeover_raid10(mddev
);
758 if (mddev
->level
== 1)
759 return raid0_takeover_raid1(mddev
);
761 pr_warn("Takeover from raid%i to raid0 not supported\n",
764 return ERR_PTR(-EINVAL
);
767 static void raid0_quiesce(struct mddev
*mddev
, int quiesce
)
771 static struct md_personality raid0_personality
=
775 .owner
= THIS_MODULE
,
776 .make_request
= raid0_make_request
,
779 .status
= raid0_status
,
781 .takeover
= raid0_takeover
,
782 .quiesce
= raid0_quiesce
,
783 .congested
= raid0_congested
,
786 static int __init
raid0_init (void)
788 return register_md_personality (&raid0_personality
);
791 static void raid0_exit (void)
793 unregister_md_personality (&raid0_personality
);
796 module_init(raid0_init
);
797 module_exit(raid0_exit
);
798 MODULE_LICENSE("GPL");
799 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
800 MODULE_ALIAS("md-personality-2"); /* RAID0 */
801 MODULE_ALIAS("md-raid0");
802 MODULE_ALIAS("md-level-0");