2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
8 RAID-0 management functions.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 You should have received a copy of the GNU General Public License
16 (for example /usr/src/linux/COPYING); if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/blkdev.h>
21 #include <linux/seq_file.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <trace/events/block.h>
29 #define UNSUPPORTED_MDDEV_FLAGS \
30 ((1L << MD_HAS_JOURNAL) | \
31 (1L << MD_JOURNAL_CLEAN) | \
32 (1L << MD_FAILFAST_SUPPORTED))
34 static int raid0_congested(struct mddev
*mddev
, int bits
)
36 struct r0conf
*conf
= mddev
->private;
37 struct md_rdev
**devlist
= conf
->devlist
;
38 int raid_disks
= conf
->strip_zone
[0].nb_dev
;
41 for (i
= 0; i
< raid_disks
&& !ret
; i
++) {
42 struct request_queue
*q
= bdev_get_queue(devlist
[i
]->bdev
);
44 ret
|= bdi_congested(q
->backing_dev_info
, bits
);
50 * inform the user of the raid configuration
52 static void dump_zones(struct mddev
*mddev
)
55 sector_t zone_size
= 0;
56 sector_t zone_start
= 0;
57 char b
[BDEVNAME_SIZE
];
58 struct r0conf
*conf
= mddev
->private;
59 int raid_disks
= conf
->strip_zone
[0].nb_dev
;
60 pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
62 conf
->nr_strip_zones
, conf
->nr_strip_zones
==1?"":"s");
63 for (j
= 0; j
< conf
->nr_strip_zones
; j
++) {
67 for (k
= 0; k
< conf
->strip_zone
[j
].nb_dev
; k
++)
68 len
+= snprintf(line
+len
, 200-len
, "%s%s", k
?"/":"",
69 bdevname(conf
->devlist
[j
*raid_disks
71 pr_debug("md: zone%d=[%s]\n", j
, line
);
73 zone_size
= conf
->strip_zone
[j
].zone_end
- zone_start
;
74 pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
75 (unsigned long long)zone_start
>>1,
76 (unsigned long long)conf
->strip_zone
[j
].dev_start
>>1,
77 (unsigned long long)zone_size
>>1);
78 zone_start
= conf
->strip_zone
[j
].zone_end
;
82 static int create_strip_zones(struct mddev
*mddev
, struct r0conf
**private_conf
)
85 sector_t curr_zone_end
, sectors
;
86 struct md_rdev
*smallest
, *rdev1
, *rdev2
, *rdev
, **dev
;
87 struct strip_zone
*zone
;
89 char b
[BDEVNAME_SIZE
];
90 char b2
[BDEVNAME_SIZE
];
91 struct r0conf
*conf
= kzalloc(sizeof(*conf
), GFP_KERNEL
);
92 unsigned short blksize
= 512;
94 *private_conf
= ERR_PTR(-ENOMEM
);
97 rdev_for_each(rdev1
, mddev
) {
98 pr_debug("md/raid0:%s: looking at %s\n",
100 bdevname(rdev1
->bdev
, b
));
103 /* round size to chunk_size */
104 sectors
= rdev1
->sectors
;
105 sector_div(sectors
, mddev
->chunk_sectors
);
106 rdev1
->sectors
= sectors
* mddev
->chunk_sectors
;
108 blksize
= max(blksize
, queue_logical_block_size(
109 rdev1
->bdev
->bd_disk
->queue
));
111 rdev_for_each(rdev2
, mddev
) {
112 pr_debug("md/raid0:%s: comparing %s(%llu)"
115 bdevname(rdev1
->bdev
,b
),
116 (unsigned long long)rdev1
->sectors
,
117 bdevname(rdev2
->bdev
,b2
),
118 (unsigned long long)rdev2
->sectors
);
119 if (rdev2
== rdev1
) {
120 pr_debug("md/raid0:%s: END\n",
124 if (rdev2
->sectors
== rdev1
->sectors
) {
126 * Not unique, don't count it as a new
129 pr_debug("md/raid0:%s: EQUAL\n",
134 pr_debug("md/raid0:%s: NOT EQUAL\n",
138 pr_debug("md/raid0:%s: ==> UNIQUE\n",
140 conf
->nr_strip_zones
++;
141 pr_debug("md/raid0:%s: %d zones\n",
142 mdname(mddev
), conf
->nr_strip_zones
);
145 pr_debug("md/raid0:%s: FINAL %d zones\n",
146 mdname(mddev
), conf
->nr_strip_zones
);
148 * now since we have the hard sector sizes, we can make sure
149 * chunk size is a multiple of that sector size
151 if ((mddev
->chunk_sectors
<< 9) % blksize
) {
152 pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
154 mddev
->chunk_sectors
<< 9, blksize
);
160 conf
->strip_zone
= kzalloc(sizeof(struct strip_zone
)*
161 conf
->nr_strip_zones
, GFP_KERNEL
);
162 if (!conf
->strip_zone
)
164 conf
->devlist
= kzalloc(sizeof(struct md_rdev
*)*
165 conf
->nr_strip_zones
*mddev
->raid_disks
,
170 /* The first zone must contain all devices, so here we check that
171 * there is a proper alignment of slots to devices and find them all
173 zone
= &conf
->strip_zone
[0];
178 rdev_for_each(rdev1
, mddev
) {
179 int j
= rdev1
->raid_disk
;
181 if (mddev
->level
== 10) {
182 /* taking over a raid10-n2 array */
184 rdev1
->new_raid_disk
= j
;
187 if (mddev
->level
== 1) {
188 /* taiking over a raid1 array-
189 * we have only one active disk
192 rdev1
->new_raid_disk
= j
;
196 pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
200 if (j
>= mddev
->raid_disks
) {
201 pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
206 pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
212 if (!smallest
|| (rdev1
->sectors
< smallest
->sectors
))
216 if (cnt
!= mddev
->raid_disks
) {
217 pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
218 mdname(mddev
), cnt
, mddev
->raid_disks
);
222 zone
->zone_end
= smallest
->sectors
* cnt
;
224 curr_zone_end
= zone
->zone_end
;
226 /* now do the other zones */
227 for (i
= 1; i
< conf
->nr_strip_zones
; i
++)
231 zone
= conf
->strip_zone
+ i
;
232 dev
= conf
->devlist
+ i
* mddev
->raid_disks
;
234 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev
), i
);
235 zone
->dev_start
= smallest
->sectors
;
239 for (j
=0; j
<cnt
; j
++) {
240 rdev
= conf
->devlist
[j
];
241 if (rdev
->sectors
<= zone
->dev_start
) {
242 pr_debug("md/raid0:%s: checking %s ... nope\n",
244 bdevname(rdev
->bdev
, b
));
247 pr_debug("md/raid0:%s: checking %s ..."
248 " contained as device %d\n",
250 bdevname(rdev
->bdev
, b
), c
);
253 if (!smallest
|| rdev
->sectors
< smallest
->sectors
) {
255 pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
257 (unsigned long long)rdev
->sectors
);
262 sectors
= (smallest
->sectors
- zone
->dev_start
) * c
;
263 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
265 zone
->nb_dev
, (unsigned long long)sectors
);
267 curr_zone_end
+= sectors
;
268 zone
->zone_end
= curr_zone_end
;
270 pr_debug("md/raid0:%s: current zone start: %llu\n",
272 (unsigned long long)smallest
->sectors
);
275 pr_debug("md/raid0:%s: done.\n", mdname(mddev
));
276 *private_conf
= conf
;
280 kfree(conf
->strip_zone
);
281 kfree(conf
->devlist
);
283 *private_conf
= ERR_PTR(err
);
287 /* Find the zone which holds a particular offset
288 * Update *sectorp to be an offset in that zone
290 static struct strip_zone
*find_zone(struct r0conf
*conf
,
294 struct strip_zone
*z
= conf
->strip_zone
;
295 sector_t sector
= *sectorp
;
297 for (i
= 0; i
< conf
->nr_strip_zones
; i
++)
298 if (sector
< z
[i
].zone_end
) {
300 *sectorp
= sector
- z
[i
-1].zone_end
;
307 * remaps the bio to the target device. we separate two flows.
308 * power 2 flow and a general flow for the sake of performance
310 static struct md_rdev
*map_sector(struct mddev
*mddev
, struct strip_zone
*zone
,
311 sector_t sector
, sector_t
*sector_offset
)
313 unsigned int sect_in_chunk
;
315 struct r0conf
*conf
= mddev
->private;
316 int raid_disks
= conf
->strip_zone
[0].nb_dev
;
317 unsigned int chunk_sects
= mddev
->chunk_sectors
;
319 if (is_power_of_2(chunk_sects
)) {
320 int chunksect_bits
= ffz(~chunk_sects
);
321 /* find the sector offset inside the chunk */
322 sect_in_chunk
= sector
& (chunk_sects
- 1);
323 sector
>>= chunksect_bits
;
325 chunk
= *sector_offset
;
326 /* quotient is the chunk in real device*/
327 sector_div(chunk
, zone
->nb_dev
<< chunksect_bits
);
329 sect_in_chunk
= sector_div(sector
, chunk_sects
);
330 chunk
= *sector_offset
;
331 sector_div(chunk
, chunk_sects
* zone
->nb_dev
);
334 * position the bio over the real device
335 * real sector = chunk in device + starting of zone
336 * + the position in the chunk
338 *sector_offset
= (chunk
* chunk_sects
) + sect_in_chunk
;
339 return conf
->devlist
[(zone
- conf
->strip_zone
)*raid_disks
340 + sector_div(sector
, zone
->nb_dev
)];
343 static sector_t
raid0_size(struct mddev
*mddev
, sector_t sectors
, int raid_disks
)
345 sector_t array_sectors
= 0;
346 struct md_rdev
*rdev
;
348 WARN_ONCE(sectors
|| raid_disks
,
349 "%s does not support generic reshape\n", __func__
);
351 rdev_for_each(rdev
, mddev
)
352 array_sectors
+= (rdev
->sectors
&
353 ~(sector_t
)(mddev
->chunk_sectors
-1));
355 return array_sectors
;
358 static void raid0_free(struct mddev
*mddev
, void *priv
);
360 static int raid0_run(struct mddev
*mddev
)
365 if (mddev
->chunk_sectors
== 0) {
366 pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev
));
369 if (md_check_no_bitmap(mddev
))
372 /* if private is not null, we are here after takeover */
373 if (mddev
->private == NULL
) {
374 ret
= create_strip_zones(mddev
, &conf
);
377 mddev
->private = conf
;
379 conf
= mddev
->private;
381 struct md_rdev
*rdev
;
382 bool discard_supported
= false;
384 blk_queue_max_hw_sectors(mddev
->queue
, mddev
->chunk_sectors
);
385 blk_queue_max_write_same_sectors(mddev
->queue
, mddev
->chunk_sectors
);
386 blk_queue_max_discard_sectors(mddev
->queue
, mddev
->chunk_sectors
);
388 blk_queue_io_min(mddev
->queue
, mddev
->chunk_sectors
<< 9);
389 blk_queue_io_opt(mddev
->queue
,
390 (mddev
->chunk_sectors
<< 9) * mddev
->raid_disks
);
392 rdev_for_each(rdev
, mddev
) {
393 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
394 rdev
->data_offset
<< 9);
395 if (blk_queue_discard(bdev_get_queue(rdev
->bdev
)))
396 discard_supported
= true;
398 if (!discard_supported
)
399 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, mddev
->queue
);
401 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, mddev
->queue
);
404 /* calculate array device size */
405 md_set_array_sectors(mddev
, raid0_size(mddev
, 0, 0));
407 pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
409 (unsigned long long)mddev
->array_sectors
);
412 /* calculate the max read-ahead size.
413 * For read-ahead of large files to be effective, we need to
414 * readahead at least twice a whole stripe. i.e. number of devices
415 * multiplied by chunk size times 2.
416 * If an individual device has an ra_pages greater than the
417 * chunk size, then we will not drive that device as hard as it
418 * wants. We consider this a configuration error: a larger
419 * chunksize should be used in that case.
421 int stripe
= mddev
->raid_disks
*
422 (mddev
->chunk_sectors
<< 9) / PAGE_SIZE
;
423 if (mddev
->queue
->backing_dev_info
->ra_pages
< 2* stripe
)
424 mddev
->queue
->backing_dev_info
->ra_pages
= 2* stripe
;
429 ret
= md_integrity_register(mddev
);
434 static void raid0_free(struct mddev
*mddev
, void *priv
)
436 struct r0conf
*conf
= priv
;
438 kfree(conf
->strip_zone
);
439 kfree(conf
->devlist
);
444 * Is io distribute over 1 or more chunks ?
446 static inline int is_io_in_chunk_boundary(struct mddev
*mddev
,
447 unsigned int chunk_sects
, struct bio
*bio
)
449 if (likely(is_power_of_2(chunk_sects
))) {
450 return chunk_sects
>=
451 ((bio
->bi_iter
.bi_sector
& (chunk_sects
-1))
454 sector_t sector
= bio
->bi_iter
.bi_sector
;
455 return chunk_sects
>= (sector_div(sector
, chunk_sects
)
460 static void raid0_make_request(struct mddev
*mddev
, struct bio
*bio
)
462 struct strip_zone
*zone
;
463 struct md_rdev
*tmp_dev
;
466 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
)) {
467 md_flush_request(mddev
, bio
);
472 sector_t bio_sector
= bio
->bi_iter
.bi_sector
;
473 sector_t sector
= bio_sector
;
474 unsigned chunk_sects
= mddev
->chunk_sectors
;
476 unsigned sectors
= chunk_sects
-
477 (likely(is_power_of_2(chunk_sects
))
478 ? (sector
& (chunk_sects
-1))
479 : sector_div(sector
, chunk_sects
));
481 /* Restore due to sector_div */
484 if (sectors
< bio_sectors(bio
)) {
485 split
= bio_split(bio
, sectors
, GFP_NOIO
, fs_bio_set
);
486 bio_chain(split
, bio
);
491 zone
= find_zone(mddev
->private, §or
);
492 tmp_dev
= map_sector(mddev
, zone
, sector
, §or
);
493 split
->bi_bdev
= tmp_dev
->bdev
;
494 split
->bi_iter
.bi_sector
= sector
+ zone
->dev_start
+
495 tmp_dev
->data_offset
;
497 if (unlikely((bio_op(split
) == REQ_OP_DISCARD
) &&
498 !blk_queue_discard(bdev_get_queue(split
->bi_bdev
)))) {
503 trace_block_bio_remap(bdev_get_queue(split
->bi_bdev
),
504 split
, disk_devt(mddev
->gendisk
),
506 mddev_check_writesame(mddev
, split
);
507 generic_make_request(split
);
509 } while (split
!= bio
);
512 static void raid0_status(struct seq_file
*seq
, struct mddev
*mddev
)
514 seq_printf(seq
, " %dk chunks", mddev
->chunk_sectors
/ 2);
518 static void *raid0_takeover_raid45(struct mddev
*mddev
)
520 struct md_rdev
*rdev
;
521 struct r0conf
*priv_conf
;
523 if (mddev
->degraded
!= 1) {
524 pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
527 return ERR_PTR(-EINVAL
);
530 rdev_for_each(rdev
, mddev
) {
531 /* check slot number for a disk */
532 if (rdev
->raid_disk
== mddev
->raid_disks
-1) {
533 pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
535 return ERR_PTR(-EINVAL
);
537 rdev
->sectors
= mddev
->dev_sectors
;
540 /* Set new parameters */
541 mddev
->new_level
= 0;
542 mddev
->new_layout
= 0;
543 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
545 mddev
->delta_disks
= -1;
546 /* make sure it will be not marked as dirty */
547 mddev
->recovery_cp
= MaxSector
;
548 mddev_clear_unsupported_flags(mddev
, UNSUPPORTED_MDDEV_FLAGS
);
550 create_strip_zones(mddev
, &priv_conf
);
555 static void *raid0_takeover_raid10(struct mddev
*mddev
)
557 struct r0conf
*priv_conf
;
560 * - far_copies must be 1
561 * - near_copies must be 2
562 * - disks number must be even
563 * - all mirrors must be already degraded
565 if (mddev
->layout
!= ((1 << 8) + 2)) {
566 pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
569 return ERR_PTR(-EINVAL
);
571 if (mddev
->raid_disks
& 1) {
572 pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
574 return ERR_PTR(-EINVAL
);
576 if (mddev
->degraded
!= (mddev
->raid_disks
>>1)) {
577 pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
579 return ERR_PTR(-EINVAL
);
582 /* Set new parameters */
583 mddev
->new_level
= 0;
584 mddev
->new_layout
= 0;
585 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
586 mddev
->delta_disks
= - mddev
->raid_disks
/ 2;
587 mddev
->raid_disks
+= mddev
->delta_disks
;
589 /* make sure it will be not marked as dirty */
590 mddev
->recovery_cp
= MaxSector
;
591 mddev_clear_unsupported_flags(mddev
, UNSUPPORTED_MDDEV_FLAGS
);
593 create_strip_zones(mddev
, &priv_conf
);
597 static void *raid0_takeover_raid1(struct mddev
*mddev
)
599 struct r0conf
*priv_conf
;
603 * - (N - 1) mirror drives must be already faulty
605 if ((mddev
->raid_disks
- 1) != mddev
->degraded
) {
606 pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
608 return ERR_PTR(-EINVAL
);
612 * a raid1 doesn't have the notion of chunk size, so
613 * figure out the largest suitable size we can use.
615 chunksect
= 64 * 2; /* 64K by default */
617 /* The array must be an exact multiple of chunksize */
618 while (chunksect
&& (mddev
->array_sectors
& (chunksect
- 1)))
621 if ((chunksect
<< 9) < PAGE_SIZE
)
622 /* array size does not allow a suitable chunk size */
623 return ERR_PTR(-EINVAL
);
625 /* Set new parameters */
626 mddev
->new_level
= 0;
627 mddev
->new_layout
= 0;
628 mddev
->new_chunk_sectors
= chunksect
;
629 mddev
->chunk_sectors
= chunksect
;
630 mddev
->delta_disks
= 1 - mddev
->raid_disks
;
631 mddev
->raid_disks
= 1;
632 /* make sure it will be not marked as dirty */
633 mddev
->recovery_cp
= MaxSector
;
634 mddev_clear_unsupported_flags(mddev
, UNSUPPORTED_MDDEV_FLAGS
);
636 create_strip_zones(mddev
, &priv_conf
);
640 static void *raid0_takeover(struct mddev
*mddev
)
642 /* raid0 can take over:
643 * raid4 - if all data disks are active.
644 * raid5 - providing it is Raid4 layout and one disk is faulty
645 * raid10 - assuming we have all necessary active disks
646 * raid1 - with (N -1) mirror drives faulty
650 pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
652 return ERR_PTR(-EBUSY
);
654 if (mddev
->level
== 4)
655 return raid0_takeover_raid45(mddev
);
657 if (mddev
->level
== 5) {
658 if (mddev
->layout
== ALGORITHM_PARITY_N
)
659 return raid0_takeover_raid45(mddev
);
661 pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
662 mdname(mddev
), ALGORITHM_PARITY_N
);
665 if (mddev
->level
== 10)
666 return raid0_takeover_raid10(mddev
);
668 if (mddev
->level
== 1)
669 return raid0_takeover_raid1(mddev
);
671 pr_warn("Takeover from raid%i to raid0 not supported\n",
674 return ERR_PTR(-EINVAL
);
677 static void raid0_quiesce(struct mddev
*mddev
, int state
)
681 static struct md_personality raid0_personality
=
685 .owner
= THIS_MODULE
,
686 .make_request
= raid0_make_request
,
689 .status
= raid0_status
,
691 .takeover
= raid0_takeover
,
692 .quiesce
= raid0_quiesce
,
693 .congested
= raid0_congested
,
696 static int __init
raid0_init (void)
698 return register_md_personality (&raid0_personality
);
701 static void raid0_exit (void)
703 unregister_md_personality (&raid0_personality
);
706 module_init(raid0_init
);
707 module_exit(raid0_exit
);
708 MODULE_LICENSE("GPL");
709 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
710 MODULE_ALIAS("md-personality-2"); /* RAID0 */
711 MODULE_ALIAS("md-raid0");
712 MODULE_ALIAS("md-level-0");