]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/md/raid0.c
UBUNTU: upstream stable to v4.19.98, v5.4.14
[mirror_ubuntu-eoan-kernel.git] / drivers / md / raid0.c
CommitLineData
af1a8899 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 raid0.c : Multiple Devices driver for Linux
f72ffdd6 4 Copyright (C) 1994-96 Marc ZYNGIER
1da177e4
LT
5 <zyngier@ufr-info-p7.ibp.fr> or
6 <maz@gloups.fdn.fr>
f72ffdd6 7 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
1da177e4
LT
8
9 RAID-0 management functions.
10
1da177e4
LT
11*/
12
bff61975 13#include <linux/blkdev.h>
bff61975 14#include <linux/seq_file.h>
056075c7 15#include <linux/module.h>
5a0e3ad6 16#include <linux/slab.h>
109e3765 17#include <trace/events/block.h>
43b2e5d8 18#include "md.h"
ef740c37 19#include "raid0.h"
9af204cf 20#include "raid5.h"
1da177e4 21
ec8e731b
N
22static int default_layout = 0;
23module_param(default_layout, int, 0644);
24
394ed8e4
SL
25#define UNSUPPORTED_MDDEV_FLAGS \
26 ((1L << MD_HAS_JOURNAL) | \
27 (1L << MD_JOURNAL_CLEAN) | \
ea0213e0 28 (1L << MD_FAILFAST_SUPPORTED) |\
ddc08823
PB
29 (1L << MD_HAS_PPL) | \
30 (1L << MD_HAS_MULTIPLE_PPLS))
394ed8e4 31
5c675f83 32static int raid0_congested(struct mddev *mddev, int bits)
26be34dc 33{
e373ab10 34 struct r0conf *conf = mddev->private;
3cb03002 35 struct md_rdev **devlist = conf->devlist;
84707f38 36 int raid_disks = conf->strip_zone[0].nb_dev;
26be34dc
N
37 int i, ret = 0;
38
84707f38 39 for (i = 0; i < raid_disks && !ret ; i++) {
165125e1 40 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
26be34dc 41
dc3b17cc 42 ret |= bdi_congested(q->backing_dev_info, bits);
26be34dc
N
43 }
44 return ret;
45}
46
46994191 47/*
48 * inform the user of the raid configuration
49*/
fd01b88c 50static void dump_zones(struct mddev *mddev)
46994191 51{
50de8df4 52 int j, k;
46994191 53 sector_t zone_size = 0;
54 sector_t zone_start = 0;
55 char b[BDEVNAME_SIZE];
e373ab10 56 struct r0conf *conf = mddev->private;
84707f38 57 int raid_disks = conf->strip_zone[0].nb_dev;
76603884
N
58 pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
59 mdname(mddev),
60 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
46994191 61 for (j = 0; j < conf->nr_strip_zones; j++) {
76603884
N
62 char line[200];
63 int len = 0;
64
46994191 65 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
76603884
N
66 len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
67 bdevname(conf->devlist[j*raid_disks
68 + k]->bdev, b));
69 pr_debug("md: zone%d=[%s]\n", j, line);
46994191 70
71 zone_size = conf->strip_zone[j].zone_end - zone_start;
76603884 72 pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
46994191 73 (unsigned long long)zone_start>>1,
74 (unsigned long long)conf->strip_zone[j].dev_start>>1,
75 (unsigned long long)zone_size>>1);
76 zone_start = conf->strip_zone[j].zone_end;
77 }
46994191 78}
79
e373ab10 80static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
1da177e4 81{
a9f326eb 82 int i, c, err;
49f357a2 83 sector_t curr_zone_end, sectors;
3cb03002 84 struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
1da177e4
LT
85 struct strip_zone *zone;
86 int cnt;
87 char b[BDEVNAME_SIZE];
50de8df4 88 char b2[BDEVNAME_SIZE];
e373ab10 89 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
6b515788 90 unsigned blksize = 512;
ed7b0038 91
7dedd15d 92 *private_conf = ERR_PTR(-ENOMEM);
ed7b0038
AN
93 if (!conf)
94 return -ENOMEM;
dafb20fa 95 rdev_for_each(rdev1, mddev) {
50de8df4
N
96 pr_debug("md/raid0:%s: looking at %s\n",
97 mdname(mddev),
98 bdevname(rdev1->bdev, b));
1da177e4 99 c = 0;
13f2682b
N
100
101 /* round size to chunk_size */
102 sectors = rdev1->sectors;
103 sector_div(sectors, mddev->chunk_sectors);
104 rdev1->sectors = sectors * mddev->chunk_sectors;
105
199dc6ed
N
106 blksize = max(blksize, queue_logical_block_size(
107 rdev1->bdev->bd_disk->queue));
108
dafb20fa 109 rdev_for_each(rdev2, mddev) {
50de8df4
N
110 pr_debug("md/raid0:%s: comparing %s(%llu)"
111 " with %s(%llu)\n",
112 mdname(mddev),
113 bdevname(rdev1->bdev,b),
114 (unsigned long long)rdev1->sectors,
115 bdevname(rdev2->bdev,b2),
116 (unsigned long long)rdev2->sectors);
1da177e4 117 if (rdev2 == rdev1) {
50de8df4
N
118 pr_debug("md/raid0:%s: END\n",
119 mdname(mddev));
1da177e4
LT
120 break;
121 }
dd8ac336 122 if (rdev2->sectors == rdev1->sectors) {
1da177e4
LT
123 /*
124 * Not unique, don't count it as a new
125 * group
126 */
50de8df4
N
127 pr_debug("md/raid0:%s: EQUAL\n",
128 mdname(mddev));
1da177e4
LT
129 c = 1;
130 break;
131 }
50de8df4
N
132 pr_debug("md/raid0:%s: NOT EQUAL\n",
133 mdname(mddev));
1da177e4
LT
134 }
135 if (!c) {
50de8df4
N
136 pr_debug("md/raid0:%s: ==> UNIQUE\n",
137 mdname(mddev));
1da177e4 138 conf->nr_strip_zones++;
50de8df4
N
139 pr_debug("md/raid0:%s: %d zones\n",
140 mdname(mddev), conf->nr_strip_zones);
1da177e4
LT
141 }
142 }
50de8df4
N
143 pr_debug("md/raid0:%s: FINAL %d zones\n",
144 mdname(mddev), conf->nr_strip_zones);
ec8e731b
N
145
146 if (conf->nr_strip_zones == 1) {
147 conf->layout = RAID0_ORIG_LAYOUT;
376a57c2
N
148 } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
149 mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
150 conf->layout = mddev->layout;
ec8e731b
N
151 } else if (default_layout == RAID0_ORIG_LAYOUT ||
152 default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
153 conf->layout = default_layout;
154 } else {
26f7716b
SB
155 conf->layout = RAID0_ALT_MULTIZONE_LAYOUT;
156 pr_warn("md/raid0:%s: !!! DEFAULTING TO ALTERNATE LAYOUT !!!\n",
ec8e731b 157 mdname(mddev));
26f7716b
SB
158 pr_warn("md/raid0: Please set raid0.default_layout to 1 or 2\n");
159 pr_warn("md/raid0: Read the following page for more information:\n");
160 pr_warn("md/raid0: https://wiki.ubuntu.com/Kernel/Raid0LayoutMigration\n");
ec8e731b 161 }
199dc6ed
N
162 /*
163 * now since we have the hard sector sizes, we can make sure
164 * chunk size is a multiple of that sector size
165 */
166 if ((mddev->chunk_sectors << 9) % blksize) {
76603884
N
167 pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
168 mdname(mddev),
169 mddev->chunk_sectors << 9, blksize);
199dc6ed
N
170 err = -EINVAL;
171 goto abort;
172 }
173
ed7b0038 174 err = -ENOMEM;
6396bb22
KC
175 conf->strip_zone = kcalloc(conf->nr_strip_zones,
176 sizeof(struct strip_zone),
177 GFP_KERNEL);
1da177e4 178 if (!conf->strip_zone)
ed7b0038 179 goto abort;
6396bb22
KC
180 conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
181 conf->nr_strip_zones,
182 mddev->raid_disks),
1da177e4
LT
183 GFP_KERNEL);
184 if (!conf->devlist)
ed7b0038 185 goto abort;
1da177e4 186
1da177e4
LT
187 /* The first zone must contain all devices, so here we check that
188 * there is a proper alignment of slots to devices and find them all
189 */
190 zone = &conf->strip_zone[0];
191 cnt = 0;
192 smallest = NULL;
b414579f 193 dev = conf->devlist;
ed7b0038 194 err = -EINVAL;
dafb20fa 195 rdev_for_each(rdev1, mddev) {
1da177e4
LT
196 int j = rdev1->raid_disk;
197
e93f68a1 198 if (mddev->level == 10) {
9af204cf
TM
199 /* taking over a raid10-n2 array */
200 j /= 2;
e93f68a1
N
201 rdev1->new_raid_disk = j;
202 }
9af204cf 203
fc3a08b8
KW
204 if (mddev->level == 1) {
205 /* taiking over a raid1 array-
206 * we have only one active disk
207 */
208 j = 0;
209 rdev1->new_raid_disk = j;
210 }
211
f96c9f30 212 if (j < 0) {
76603884
N
213 pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
214 mdname(mddev));
f96c9f30
N
215 goto abort;
216 }
217 if (j >= mddev->raid_disks) {
76603884
N
218 pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
219 mdname(mddev), j);
1da177e4
LT
220 goto abort;
221 }
b414579f 222 if (dev[j]) {
76603884
N
223 pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
224 mdname(mddev), j);
1da177e4
LT
225 goto abort;
226 }
b414579f 227 dev[j] = rdev1;
1da177e4 228
dd8ac336 229 if (!smallest || (rdev1->sectors < smallest->sectors))
1da177e4
LT
230 smallest = rdev1;
231 cnt++;
232 }
233 if (cnt != mddev->raid_disks) {
76603884
N
234 pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
235 mdname(mddev), cnt, mddev->raid_disks);
1da177e4
LT
236 goto abort;
237 }
238 zone->nb_dev = cnt;
49f357a2 239 zone->zone_end = smallest->sectors * cnt;
1da177e4 240
49f357a2 241 curr_zone_end = zone->zone_end;
1da177e4
LT
242
243 /* now do the other zones */
244 for (i = 1; i < conf->nr_strip_zones; i++)
245 {
a9f326eb
N
246 int j;
247
1da177e4 248 zone = conf->strip_zone + i;
b414579f 249 dev = conf->devlist + i * mddev->raid_disks;
1da177e4 250
50de8df4 251 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
d27a43ab 252 zone->dev_start = smallest->sectors;
1da177e4
LT
253 smallest = NULL;
254 c = 0;
255
256 for (j=0; j<cnt; j++) {
b414579f 257 rdev = conf->devlist[j];
d27a43ab 258 if (rdev->sectors <= zone->dev_start) {
50de8df4
N
259 pr_debug("md/raid0:%s: checking %s ... nope\n",
260 mdname(mddev),
261 bdevname(rdev->bdev, b));
dd8ac336
AN
262 continue;
263 }
50de8df4
N
264 pr_debug("md/raid0:%s: checking %s ..."
265 " contained as device %d\n",
266 mdname(mddev),
267 bdevname(rdev->bdev, b), c);
b414579f 268 dev[c] = rdev;
dd8ac336
AN
269 c++;
270 if (!smallest || rdev->sectors < smallest->sectors) {
271 smallest = rdev;
50de8df4
N
272 pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
273 mdname(mddev),
274 (unsigned long long)rdev->sectors);
dd8ac336 275 }
1da177e4
LT
276 }
277
278 zone->nb_dev = c;
49f357a2 279 sectors = (smallest->sectors - zone->dev_start) * c;
50de8df4
N
280 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
281 mdname(mddev),
282 zone->nb_dev, (unsigned long long)sectors);
1da177e4 283
49f357a2 284 curr_zone_end += sectors;
d27a43ab 285 zone->zone_end = curr_zone_end;
1da177e4 286
50de8df4
N
287 pr_debug("md/raid0:%s: current zone start: %llu\n",
288 mdname(mddev),
289 (unsigned long long)smallest->sectors);
1da177e4 290 }
1da177e4 291
50de8df4 292 pr_debug("md/raid0:%s: done.\n", mdname(mddev));
9af204cf
TM
293 *private_conf = conf;
294
1da177e4 295 return 0;
5568a603 296abort:
ed7b0038
AN
297 kfree(conf->strip_zone);
298 kfree(conf->devlist);
299 kfree(conf);
58ebb34c 300 *private_conf = ERR_PTR(err);
ed7b0038 301 return err;
1da177e4
LT
302}
303
ba13da47
N
304/* Find the zone which holds a particular offset
305 * Update *sectorp to be an offset in that zone
306 */
307static struct strip_zone *find_zone(struct r0conf *conf,
308 sector_t *sectorp)
309{
310 int i;
311 struct strip_zone *z = conf->strip_zone;
312 sector_t sector = *sectorp;
313
314 for (i = 0; i < conf->nr_strip_zones; i++)
315 if (sector < z[i].zone_end) {
316 if (i)
317 *sectorp = sector - z[i-1].zone_end;
318 return z + i;
319 }
320 BUG();
321}
322
323/*
324 * remaps the bio to the target device. we separate two flows.
47d68979 325 * power 2 flow and a general flow for the sake of performance
ba13da47
N
326*/
327static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
328 sector_t sector, sector_t *sector_offset)
329{
330 unsigned int sect_in_chunk;
331 sector_t chunk;
332 struct r0conf *conf = mddev->private;
333 int raid_disks = conf->strip_zone[0].nb_dev;
334 unsigned int chunk_sects = mddev->chunk_sectors;
335
336 if (is_power_of_2(chunk_sects)) {
337 int chunksect_bits = ffz(~chunk_sects);
338 /* find the sector offset inside the chunk */
339 sect_in_chunk = sector & (chunk_sects - 1);
340 sector >>= chunksect_bits;
341 /* chunk in zone */
342 chunk = *sector_offset;
343 /* quotient is the chunk in real device*/
344 sector_div(chunk, zone->nb_dev << chunksect_bits);
345 } else{
346 sect_in_chunk = sector_div(sector, chunk_sects);
347 chunk = *sector_offset;
348 sector_div(chunk, chunk_sects * zone->nb_dev);
349 }
350 /*
351 * position the bio over the real device
352 * real sector = chunk in device + starting of zone
353 * + the position in the chunk
354 */
355 *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
356 return conf->devlist[(zone - conf->strip_zone)*raid_disks
357 + sector_div(sector, zone->nb_dev)];
358}
359
fd01b88c 360static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
80c3a6ce
DW
361{
362 sector_t array_sectors = 0;
3cb03002 363 struct md_rdev *rdev;
80c3a6ce
DW
364
365 WARN_ONCE(sectors || raid_disks,
366 "%s does not support generic reshape\n", __func__);
367
dafb20fa 368 rdev_for_each(rdev, mddev)
a6468539
N
369 array_sectors += (rdev->sectors &
370 ~(sector_t)(mddev->chunk_sectors-1));
80c3a6ce
DW
371
372 return array_sectors;
373}
374
afa0f557 375static void raid0_free(struct mddev *mddev, void *priv);
0366ef84 376
fd01b88c 377static int raid0_run(struct mddev *mddev)
1da177e4 378{
e373ab10 379 struct r0conf *conf;
5568a603 380 int ret;
1da177e4 381
9d8f0363 382 if (mddev->chunk_sectors == 0) {
76603884 383 pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
2604b703
N
384 return -EINVAL;
385 }
0894cc30
AN
386 if (md_check_no_bitmap(mddev))
387 return -EINVAL;
753f2856 388
9af204cf
TM
389 /* if private is not null, we are here after takeover */
390 if (mddev->private == NULL) {
391 ret = create_strip_zones(mddev, &conf);
392 if (ret < 0)
393 return ret;
394 mddev->private = conf;
395 }
396 conf = mddev->private;
199dc6ed
N
397 if (mddev->queue) {
398 struct md_rdev *rdev;
399 bool discard_supported = false;
400
199dc6ed
N
401 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
402 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
3deff1a7 403 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
29efc390 404 blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
199dc6ed
N
405
406 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
407 blk_queue_io_opt(mddev->queue,
408 (mddev->chunk_sectors << 9) * mddev->raid_disks);
409
66eefe5d
N
410 rdev_for_each(rdev, mddev) {
411 disk_stack_limits(mddev->gendisk, rdev->bdev,
412 rdev->data_offset << 9);
413 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
414 discard_supported = true;
415 }
199dc6ed 416 if (!discard_supported)
8b904b5b 417 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
199dc6ed 418 else
8b904b5b 419 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
199dc6ed 420 }
1da177e4
LT
421
422 /* calculate array device size */
1f403624 423 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
1da177e4 424
76603884
N
425 pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
426 mdname(mddev),
427 (unsigned long long)mddev->array_sectors);
753f2856
HM
428
429 if (mddev->queue) {
430 /* calculate the max read-ahead size.
431 * For read-ahead of large files to be effective, we need to
432 * readahead at least twice a whole stripe. i.e. number of devices
433 * multiplied by chunk size times 2.
434 * If an individual device has an ra_pages greater than the
435 * chunk size, then we will not drive that device as hard as it
436 * wants. We consider this a configuration error: a larger
437 * chunksize should be used in that case.
438 */
9d8f0363
AN
439 int stripe = mddev->raid_disks *
440 (mddev->chunk_sectors << 9) / PAGE_SIZE;
dc3b17cc
JK
441 if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
442 mddev->queue->backing_dev_info->ra_pages = 2* stripe;
1da177e4
LT
443 }
444
46994191 445 dump_zones(mddev);
0366ef84 446
447 ret = md_integrity_register(mddev);
0366ef84 448
449 return ret;
1da177e4
LT
450}
451
afa0f557 452static void raid0_free(struct mddev *mddev, void *priv)
1da177e4 453{
afa0f557 454 struct r0conf *conf = priv;
1da177e4 455
990a8baf 456 kfree(conf->strip_zone);
fb5ab4b5 457 kfree(conf->devlist);
990a8baf 458 kfree(conf);
1da177e4
LT
459}
460
fbb704ef 461/*
462 * Is io distribute over 1 or more chunks ?
463*/
fd01b88c 464static inline int is_io_in_chunk_boundary(struct mddev *mddev,
fbb704ef 465 unsigned int chunk_sects, struct bio *bio)
466{
d6e412ea 467 if (likely(is_power_of_2(chunk_sects))) {
4f024f37
KO
468 return chunk_sects >=
469 ((bio->bi_iter.bi_sector & (chunk_sects-1))
aa8b57aa 470 + bio_sectors(bio));
fbb704ef 471 } else{
4f024f37 472 sector_t sector = bio->bi_iter.bi_sector;
fbb704ef 473 return chunk_sects >= (sector_div(sector, chunk_sects)
aa8b57aa 474 + bio_sectors(bio));
fbb704ef 475 }
476}
477
29efc390
SL
478static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
479{
480 struct r0conf *conf = mddev->private;
481 struct strip_zone *zone;
482 sector_t start = bio->bi_iter.bi_sector;
483 sector_t end;
484 unsigned int stripe_size;
485 sector_t first_stripe_index, last_stripe_index;
486 sector_t start_disk_offset;
487 unsigned int start_disk_index;
488 sector_t end_disk_offset;
489 unsigned int end_disk_index;
490 unsigned int disk;
491
492 zone = find_zone(conf, &start);
493
494 if (bio_end_sector(bio) > zone->zone_end) {
495 struct bio *split = bio_split(bio,
496 zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
afeee514 497 &mddev->bio_set);
29efc390
SL
498 bio_chain(split, bio);
499 generic_make_request(bio);
500 bio = split;
501 end = zone->zone_end;
502 } else
503 end = bio_end_sector(bio);
504
505 if (zone != conf->strip_zone)
506 end = end - zone[-1].zone_end;
507
508 /* Now start and end is the offset in zone */
509 stripe_size = zone->nb_dev * mddev->chunk_sectors;
510
511 first_stripe_index = start;
512 sector_div(first_stripe_index, stripe_size);
513 last_stripe_index = end;
514 sector_div(last_stripe_index, stripe_size);
515
516 start_disk_index = (int)(start - first_stripe_index * stripe_size) /
517 mddev->chunk_sectors;
518 start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
519 mddev->chunk_sectors) +
520 first_stripe_index * mddev->chunk_sectors;
521 end_disk_index = (int)(end - last_stripe_index * stripe_size) /
522 mddev->chunk_sectors;
523 end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
524 mddev->chunk_sectors) +
525 last_stripe_index * mddev->chunk_sectors;
526
527 for (disk = 0; disk < zone->nb_dev; disk++) {
528 sector_t dev_start, dev_end;
529 struct bio *discard_bio = NULL;
530 struct md_rdev *rdev;
531
532 if (disk < start_disk_index)
533 dev_start = (first_stripe_index + 1) *
534 mddev->chunk_sectors;
535 else if (disk > start_disk_index)
536 dev_start = first_stripe_index * mddev->chunk_sectors;
537 else
538 dev_start = start_disk_offset;
539
540 if (disk < end_disk_index)
541 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
542 else if (disk > end_disk_index)
543 dev_end = last_stripe_index * mddev->chunk_sectors;
544 else
545 dev_end = end_disk_offset;
546
547 if (dev_end <= dev_start)
548 continue;
549
550 rdev = conf->devlist[(zone - conf->strip_zone) *
551 conf->strip_zone[0].nb_dev + disk];
552 if (__blkdev_issue_discard(rdev->bdev,
553 dev_start + zone->dev_start + rdev->data_offset,
554 dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
555 !discard_bio)
556 continue;
557 bio_chain(discard_bio, bio);
db6638d7 558 bio_clone_blkg_association(discard_bio, bio);
29efc390
SL
559 if (mddev->gendisk)
560 trace_block_bio_remap(bdev_get_queue(rdev->bdev),
561 discard_bio, disk_devt(mddev->gendisk),
562 bio->bi_iter.bi_sector);
563 generic_make_request(discard_bio);
564 }
565 bio_endio(bio);
566}
567
cc27b0c7 568static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
fbb704ef 569{
ec8e731b 570 struct r0conf *conf = mddev->private;
1da177e4 571 struct strip_zone *zone;
3cb03002 572 struct md_rdev *tmp_dev;
f00d7c85
N
573 sector_t bio_sector;
574 sector_t sector;
ec8e731b 575 sector_t orig_sector;
f00d7c85
N
576 unsigned chunk_sects;
577 unsigned sectors;
1da177e4 578
9a050e66
DJ
579 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
580 && md_flush_request(mddev, bio))
cc27b0c7 581 return true;
e5dcdd80 582
29efc390
SL
583 if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
584 raid0_handle_discard(mddev, bio);
cc27b0c7 585 return true;
29efc390
SL
586 }
587
f00d7c85
N
588 bio_sector = bio->bi_iter.bi_sector;
589 sector = bio_sector;
590 chunk_sects = mddev->chunk_sectors;
20d0189b 591
f00d7c85
N
592 sectors = chunk_sects -
593 (likely(is_power_of_2(chunk_sects))
594 ? (sector & (chunk_sects-1))
595 : sector_div(sector, chunk_sects));
20d0189b 596
f00d7c85
N
597 /* Restore due to sector_div */
598 sector = bio_sector;
a8115776 599
f00d7c85 600 if (sectors < bio_sectors(bio)) {
afeee514
KO
601 struct bio *split = bio_split(bio, sectors, GFP_NOIO,
602 &mddev->bio_set);
f00d7c85
N
603 bio_chain(split, bio);
604 generic_make_request(bio);
605 bio = split;
606 }
1da177e4 607
ec8e731b 608 orig_sector = sector;
f00d7c85 609 zone = find_zone(mddev->private, &sector);
b79fb794 610
ec8e731b
N
611 switch (conf->layout) {
612 case RAID0_ORIG_LAYOUT:
613 tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
614 break;
615 case RAID0_ALT_MULTIZONE_LAYOUT:
616 tmp_dev = map_sector(mddev, zone, sector, &sector);
617 break;
618 default:
d99900a6 619 WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
ec8e731b
N
620 bio_io_error(bio);
621 return true;
622 }
623
3163c3e7
GP
624 if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
625 bio_io_error(bio);
626 return true;
627 }
628
74d46992 629 bio_set_dev(bio, tmp_dev->bdev);
f00d7c85
N
630 bio->bi_iter.bi_sector = sector + zone->dev_start +
631 tmp_dev->data_offset;
632
29efc390 633 if (mddev->gendisk)
74d46992
CH
634 trace_block_bio_remap(bio->bi_disk->queue, bio,
635 disk_devt(mddev->gendisk), bio_sector);
29efc390
SL
636 mddev_check_writesame(mddev, bio);
637 mddev_check_write_zeroes(mddev, bio);
638 generic_make_request(bio);
cc27b0c7 639 return true;
1da177e4 640}
8299d7f7 641
fd01b88c 642static void raid0_status(struct seq_file *seq, struct mddev *mddev)
1da177e4 643{
9d8f0363 644 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
1da177e4
LT
645 return;
646}
647
fd01b88c 648static void *raid0_takeover_raid45(struct mddev *mddev)
9af204cf 649{
3cb03002 650 struct md_rdev *rdev;
e373ab10 651 struct r0conf *priv_conf;
9af204cf
TM
652
653 if (mddev->degraded != 1) {
76603884
N
654 pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
655 mdname(mddev),
656 mddev->degraded);
9af204cf
TM
657 return ERR_PTR(-EINVAL);
658 }
659
dafb20fa 660 rdev_for_each(rdev, mddev) {
9af204cf
TM
661 /* check slot number for a disk */
662 if (rdev->raid_disk == mddev->raid_disks-1) {
76603884
N
663 pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
664 mdname(mddev));
9af204cf
TM
665 return ERR_PTR(-EINVAL);
666 }
eea136d6 667 rdev->sectors = mddev->dev_sectors;
9af204cf
TM
668 }
669
670 /* Set new parameters */
671 mddev->new_level = 0;
001048a3 672 mddev->new_layout = 0;
9af204cf
TM
673 mddev->new_chunk_sectors = mddev->chunk_sectors;
674 mddev->raid_disks--;
675 mddev->delta_disks = -1;
676 /* make sure it will be not marked as dirty */
677 mddev->recovery_cp = MaxSector;
394ed8e4 678 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
9af204cf
TM
679
680 create_strip_zones(mddev, &priv_conf);
6995f0b2 681
9af204cf
TM
682 return priv_conf;
683}
684
fd01b88c 685static void *raid0_takeover_raid10(struct mddev *mddev)
9af204cf 686{
e373ab10 687 struct r0conf *priv_conf;
9af204cf
TM
688
689 /* Check layout:
690 * - far_copies must be 1
691 * - near_copies must be 2
692 * - disks number must be even
693 * - all mirrors must be already degraded
694 */
695 if (mddev->layout != ((1 << 8) + 2)) {
76603884
N
696 pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
697 mdname(mddev),
698 mddev->layout);
9af204cf
TM
699 return ERR_PTR(-EINVAL);
700 }
701 if (mddev->raid_disks & 1) {
76603884
N
702 pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
703 mdname(mddev));
9af204cf
TM
704 return ERR_PTR(-EINVAL);
705 }
706 if (mddev->degraded != (mddev->raid_disks>>1)) {
76603884
N
707 pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
708 mdname(mddev));
9af204cf
TM
709 return ERR_PTR(-EINVAL);
710 }
711
712 /* Set new parameters */
713 mddev->new_level = 0;
001048a3 714 mddev->new_layout = 0;
9af204cf
TM
715 mddev->new_chunk_sectors = mddev->chunk_sectors;
716 mddev->delta_disks = - mddev->raid_disks / 2;
717 mddev->raid_disks += mddev->delta_disks;
718 mddev->degraded = 0;
719 /* make sure it will be not marked as dirty */
720 mddev->recovery_cp = MaxSector;
394ed8e4 721 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
9af204cf
TM
722
723 create_strip_zones(mddev, &priv_conf);
9af204cf
TM
724 return priv_conf;
725}
726
fd01b88c 727static void *raid0_takeover_raid1(struct mddev *mddev)
fc3a08b8 728{
e373ab10 729 struct r0conf *priv_conf;
24b961f8 730 int chunksect;
fc3a08b8
KW
731
732 /* Check layout:
733 * - (N - 1) mirror drives must be already faulty
734 */
735 if ((mddev->raid_disks - 1) != mddev->degraded) {
76603884 736 pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
fc3a08b8
KW
737 mdname(mddev));
738 return ERR_PTR(-EINVAL);
739 }
740
24b961f8
JS
741 /*
742 * a raid1 doesn't have the notion of chunk size, so
743 * figure out the largest suitable size we can use.
744 */
745 chunksect = 64 * 2; /* 64K by default */
746
747 /* The array must be an exact multiple of chunksize */
748 while (chunksect && (mddev->array_sectors & (chunksect - 1)))
749 chunksect >>= 1;
750
751 if ((chunksect << 9) < PAGE_SIZE)
752 /* array size does not allow a suitable chunk size */
753 return ERR_PTR(-EINVAL);
754
fc3a08b8
KW
755 /* Set new parameters */
756 mddev->new_level = 0;
757 mddev->new_layout = 0;
24b961f8
JS
758 mddev->new_chunk_sectors = chunksect;
759 mddev->chunk_sectors = chunksect;
fc3a08b8 760 mddev->delta_disks = 1 - mddev->raid_disks;
f7bee809 761 mddev->raid_disks = 1;
fc3a08b8
KW
762 /* make sure it will be not marked as dirty */
763 mddev->recovery_cp = MaxSector;
394ed8e4 764 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
fc3a08b8
KW
765
766 create_strip_zones(mddev, &priv_conf);
767 return priv_conf;
768}
769
fd01b88c 770static void *raid0_takeover(struct mddev *mddev)
9af204cf
TM
771{
772 /* raid0 can take over:
049d6c1e 773 * raid4 - if all data disks are active.
9af204cf
TM
774 * raid5 - providing it is Raid4 layout and one disk is faulty
775 * raid10 - assuming we have all necessary active disks
fc3a08b8 776 * raid1 - with (N -1) mirror drives faulty
9af204cf 777 */
a8461a61
N
778
779 if (mddev->bitmap) {
76603884
N
780 pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
781 mdname(mddev));
a8461a61
N
782 return ERR_PTR(-EBUSY);
783 }
049d6c1e
MT
784 if (mddev->level == 4)
785 return raid0_takeover_raid45(mddev);
786
9af204cf
TM
787 if (mddev->level == 5) {
788 if (mddev->layout == ALGORITHM_PARITY_N)
049d6c1e 789 return raid0_takeover_raid45(mddev);
9af204cf 790
76603884
N
791 pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
792 mdname(mddev), ALGORITHM_PARITY_N);
9af204cf
TM
793 }
794
795 if (mddev->level == 10)
796 return raid0_takeover_raid10(mddev);
797
fc3a08b8
KW
798 if (mddev->level == 1)
799 return raid0_takeover_raid1(mddev);
800
76603884 801 pr_warn("Takeover from raid%i to raid0 not supported\n",
fc3a08b8
KW
802 mddev->level);
803
9af204cf
TM
804 return ERR_PTR(-EINVAL);
805}
806
b03e0ccb 807static void raid0_quiesce(struct mddev *mddev, int quiesce)
9af204cf
TM
808{
809}
810
84fc4b56 811static struct md_personality raid0_personality=
1da177e4
LT
812{
813 .name = "raid0",
2604b703 814 .level = 0,
1da177e4
LT
815 .owner = THIS_MODULE,
816 .make_request = raid0_make_request,
817 .run = raid0_run,
afa0f557 818 .free = raid0_free,
1da177e4 819 .status = raid0_status,
80c3a6ce 820 .size = raid0_size,
9af204cf
TM
821 .takeover = raid0_takeover,
822 .quiesce = raid0_quiesce,
5c675f83 823 .congested = raid0_congested,
1da177e4
LT
824};
825
826static int __init raid0_init (void)
827{
2604b703 828 return register_md_personality (&raid0_personality);
1da177e4
LT
829}
830
831static void raid0_exit (void)
832{
2604b703 833 unregister_md_personality (&raid0_personality);
1da177e4
LT
834}
835
836module_init(raid0_init);
837module_exit(raid0_exit);
838MODULE_LICENSE("GPL");
0efb9e61 839MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
1da177e4 840MODULE_ALIAS("md-personality-2"); /* RAID0 */
d9d166c2 841MODULE_ALIAS("md-raid0");
2604b703 842MODULE_ALIAS("md-level-0");