]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | raid0.c : Multiple Devices driver for Linux | |
3 | Copyright (C) 1994-96 Marc ZYNGIER | |
4 | <zyngier@ufr-info-p7.ibp.fr> or | |
5 | <maz@gloups.fdn.fr> | |
6 | Copyright (C) 1999, 2000 Ingo Molnar, Red Hat | |
7 | ||
8 | RAID-0 management functions. | |
9 | ||
10 | This program is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | You should have received a copy of the GNU General Public License | |
16 | (for example /usr/src/linux/COPYING); if not, write to the Free | |
17 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
18 | */ | |
19 | ||
20 | #include <linux/blkdev.h> | |
21 | #include <linux/seq_file.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/slab.h> | |
24 | #include "md.h" | |
25 | #include "raid0.h" | |
26 | #include "raid5.h" | |
27 | ||
28 | static int raid0_congested(struct mddev *mddev, int bits) | |
29 | { | |
30 | struct r0conf *conf = mddev->private; | |
31 | struct md_rdev **devlist = conf->devlist; | |
32 | int raid_disks = conf->strip_zone[0].nb_dev; | |
33 | int i, ret = 0; | |
34 | ||
35 | for (i = 0; i < raid_disks && !ret ; i++) { | |
36 | struct request_queue *q = bdev_get_queue(devlist[i]->bdev); | |
37 | ||
38 | ret |= bdi_congested(&q->backing_dev_info, bits); | |
39 | } | |
40 | return ret; | |
41 | } | |
42 | ||
43 | /* | |
44 | * inform the user of the raid configuration | |
45 | */ | |
46 | static void dump_zones(struct mddev *mddev) | |
47 | { | |
48 | int j, k; | |
49 | sector_t zone_size = 0; | |
50 | sector_t zone_start = 0; | |
51 | char b[BDEVNAME_SIZE]; | |
52 | struct r0conf *conf = mddev->private; | |
53 | int raid_disks = conf->strip_zone[0].nb_dev; | |
54 | printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n", | |
55 | mdname(mddev), | |
56 | conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s"); | |
57 | for (j = 0; j < conf->nr_strip_zones; j++) { | |
58 | printk(KERN_INFO "md: zone%d=[", j); | |
59 | for (k = 0; k < conf->strip_zone[j].nb_dev; k++) | |
60 | printk(KERN_CONT "%s%s", k?"/":"", | |
61 | bdevname(conf->devlist[j*raid_disks | |
62 | + k]->bdev, b)); | |
63 | printk(KERN_CONT "]\n"); | |
64 | ||
65 | zone_size = conf->strip_zone[j].zone_end - zone_start; | |
66 | printk(KERN_INFO " zone-offset=%10lluKB, " | |
67 | "device-offset=%10lluKB, size=%10lluKB\n", | |
68 | (unsigned long long)zone_start>>1, | |
69 | (unsigned long long)conf->strip_zone[j].dev_start>>1, | |
70 | (unsigned long long)zone_size>>1); | |
71 | zone_start = conf->strip_zone[j].zone_end; | |
72 | } | |
73 | printk(KERN_INFO "\n"); | |
74 | } | |
75 | ||
76 | static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) | |
77 | { | |
78 | int i, c, err; | |
79 | sector_t curr_zone_end, sectors; | |
80 | struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev; | |
81 | struct strip_zone *zone; | |
82 | int cnt; | |
83 | char b[BDEVNAME_SIZE]; | |
84 | char b2[BDEVNAME_SIZE]; | |
85 | struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); | |
86 | bool discard_supported = false; | |
87 | ||
88 | if (!conf) | |
89 | return -ENOMEM; | |
90 | rdev_for_each(rdev1, mddev) { | |
91 | pr_debug("md/raid0:%s: looking at %s\n", | |
92 | mdname(mddev), | |
93 | bdevname(rdev1->bdev, b)); | |
94 | c = 0; | |
95 | ||
96 | /* round size to chunk_size */ | |
97 | sectors = rdev1->sectors; | |
98 | sector_div(sectors, mddev->chunk_sectors); | |
99 | rdev1->sectors = sectors * mddev->chunk_sectors; | |
100 | ||
101 | rdev_for_each(rdev2, mddev) { | |
102 | pr_debug("md/raid0:%s: comparing %s(%llu)" | |
103 | " with %s(%llu)\n", | |
104 | mdname(mddev), | |
105 | bdevname(rdev1->bdev,b), | |
106 | (unsigned long long)rdev1->sectors, | |
107 | bdevname(rdev2->bdev,b2), | |
108 | (unsigned long long)rdev2->sectors); | |
109 | if (rdev2 == rdev1) { | |
110 | pr_debug("md/raid0:%s: END\n", | |
111 | mdname(mddev)); | |
112 | break; | |
113 | } | |
114 | if (rdev2->sectors == rdev1->sectors) { | |
115 | /* | |
116 | * Not unique, don't count it as a new | |
117 | * group | |
118 | */ | |
119 | pr_debug("md/raid0:%s: EQUAL\n", | |
120 | mdname(mddev)); | |
121 | c = 1; | |
122 | break; | |
123 | } | |
124 | pr_debug("md/raid0:%s: NOT EQUAL\n", | |
125 | mdname(mddev)); | |
126 | } | |
127 | if (!c) { | |
128 | pr_debug("md/raid0:%s: ==> UNIQUE\n", | |
129 | mdname(mddev)); | |
130 | conf->nr_strip_zones++; | |
131 | pr_debug("md/raid0:%s: %d zones\n", | |
132 | mdname(mddev), conf->nr_strip_zones); | |
133 | } | |
134 | } | |
135 | pr_debug("md/raid0:%s: FINAL %d zones\n", | |
136 | mdname(mddev), conf->nr_strip_zones); | |
137 | err = -ENOMEM; | |
138 | conf->strip_zone = kzalloc(sizeof(struct strip_zone)* | |
139 | conf->nr_strip_zones, GFP_KERNEL); | |
140 | if (!conf->strip_zone) | |
141 | goto abort; | |
142 | conf->devlist = kzalloc(sizeof(struct md_rdev*)* | |
143 | conf->nr_strip_zones*mddev->raid_disks, | |
144 | GFP_KERNEL); | |
145 | if (!conf->devlist) | |
146 | goto abort; | |
147 | ||
148 | /* The first zone must contain all devices, so here we check that | |
149 | * there is a proper alignment of slots to devices and find them all | |
150 | */ | |
151 | zone = &conf->strip_zone[0]; | |
152 | cnt = 0; | |
153 | smallest = NULL; | |
154 | dev = conf->devlist; | |
155 | err = -EINVAL; | |
156 | rdev_for_each(rdev1, mddev) { | |
157 | int j = rdev1->raid_disk; | |
158 | ||
159 | if (mddev->level == 10) { | |
160 | /* taking over a raid10-n2 array */ | |
161 | j /= 2; | |
162 | rdev1->new_raid_disk = j; | |
163 | } | |
164 | ||
165 | if (mddev->level == 1) { | |
166 | /* taiking over a raid1 array- | |
167 | * we have only one active disk | |
168 | */ | |
169 | j = 0; | |
170 | rdev1->new_raid_disk = j; | |
171 | } | |
172 | ||
173 | if (j < 0) { | |
174 | printk(KERN_ERR | |
175 | "md/raid0:%s: remove inactive devices before converting to RAID0\n", | |
176 | mdname(mddev)); | |
177 | goto abort; | |
178 | } | |
179 | if (j >= mddev->raid_disks) { | |
180 | printk(KERN_ERR "md/raid0:%s: bad disk number %d - " | |
181 | "aborting!\n", mdname(mddev), j); | |
182 | goto abort; | |
183 | } | |
184 | if (dev[j]) { | |
185 | printk(KERN_ERR "md/raid0:%s: multiple devices for %d - " | |
186 | "aborting!\n", mdname(mddev), j); | |
187 | goto abort; | |
188 | } | |
189 | dev[j] = rdev1; | |
190 | ||
191 | if (mddev->queue) | |
192 | disk_stack_limits(mddev->gendisk, rdev1->bdev, | |
193 | rdev1->data_offset << 9); | |
194 | ||
195 | if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) | |
196 | conf->has_merge_bvec = 1; | |
197 | ||
198 | if (!smallest || (rdev1->sectors < smallest->sectors)) | |
199 | smallest = rdev1; | |
200 | cnt++; | |
201 | ||
202 | if (blk_queue_discard(bdev_get_queue(rdev1->bdev))) | |
203 | discard_supported = true; | |
204 | } | |
205 | if (cnt != mddev->raid_disks) { | |
206 | printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " | |
207 | "aborting!\n", mdname(mddev), cnt, mddev->raid_disks); | |
208 | goto abort; | |
209 | } | |
210 | zone->nb_dev = cnt; | |
211 | zone->zone_end = smallest->sectors * cnt; | |
212 | ||
213 | curr_zone_end = zone->zone_end; | |
214 | ||
215 | /* now do the other zones */ | |
216 | for (i = 1; i < conf->nr_strip_zones; i++) | |
217 | { | |
218 | int j; | |
219 | ||
220 | zone = conf->strip_zone + i; | |
221 | dev = conf->devlist + i * mddev->raid_disks; | |
222 | ||
223 | pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i); | |
224 | zone->dev_start = smallest->sectors; | |
225 | smallest = NULL; | |
226 | c = 0; | |
227 | ||
228 | for (j=0; j<cnt; j++) { | |
229 | rdev = conf->devlist[j]; | |
230 | if (rdev->sectors <= zone->dev_start) { | |
231 | pr_debug("md/raid0:%s: checking %s ... nope\n", | |
232 | mdname(mddev), | |
233 | bdevname(rdev->bdev, b)); | |
234 | continue; | |
235 | } | |
236 | pr_debug("md/raid0:%s: checking %s ..." | |
237 | " contained as device %d\n", | |
238 | mdname(mddev), | |
239 | bdevname(rdev->bdev, b), c); | |
240 | dev[c] = rdev; | |
241 | c++; | |
242 | if (!smallest || rdev->sectors < smallest->sectors) { | |
243 | smallest = rdev; | |
244 | pr_debug("md/raid0:%s: (%llu) is smallest!.\n", | |
245 | mdname(mddev), | |
246 | (unsigned long long)rdev->sectors); | |
247 | } | |
248 | } | |
249 | ||
250 | zone->nb_dev = c; | |
251 | sectors = (smallest->sectors - zone->dev_start) * c; | |
252 | pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n", | |
253 | mdname(mddev), | |
254 | zone->nb_dev, (unsigned long long)sectors); | |
255 | ||
256 | curr_zone_end += sectors; | |
257 | zone->zone_end = curr_zone_end; | |
258 | ||
259 | pr_debug("md/raid0:%s: current zone start: %llu\n", | |
260 | mdname(mddev), | |
261 | (unsigned long long)smallest->sectors); | |
262 | } | |
263 | ||
264 | /* | |
265 | * now since we have the hard sector sizes, we can make sure | |
266 | * chunk size is a multiple of that sector size | |
267 | */ | |
268 | if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) { | |
269 | printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n", | |
270 | mdname(mddev), | |
271 | mddev->chunk_sectors << 9); | |
272 | goto abort; | |
273 | } | |
274 | ||
275 | if (mddev->queue) { | |
276 | blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); | |
277 | blk_queue_io_opt(mddev->queue, | |
278 | (mddev->chunk_sectors << 9) * mddev->raid_disks); | |
279 | ||
280 | if (!discard_supported) | |
281 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | |
282 | else | |
283 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | |
284 | } | |
285 | ||
286 | pr_debug("md/raid0:%s: done.\n", mdname(mddev)); | |
287 | *private_conf = conf; | |
288 | ||
289 | return 0; | |
290 | abort: | |
291 | kfree(conf->strip_zone); | |
292 | kfree(conf->devlist); | |
293 | kfree(conf); | |
294 | *private_conf = ERR_PTR(err); | |
295 | return err; | |
296 | } | |
297 | ||
298 | /* Find the zone which holds a particular offset | |
299 | * Update *sectorp to be an offset in that zone | |
300 | */ | |
301 | static struct strip_zone *find_zone(struct r0conf *conf, | |
302 | sector_t *sectorp) | |
303 | { | |
304 | int i; | |
305 | struct strip_zone *z = conf->strip_zone; | |
306 | sector_t sector = *sectorp; | |
307 | ||
308 | for (i = 0; i < conf->nr_strip_zones; i++) | |
309 | if (sector < z[i].zone_end) { | |
310 | if (i) | |
311 | *sectorp = sector - z[i-1].zone_end; | |
312 | return z + i; | |
313 | } | |
314 | BUG(); | |
315 | } | |
316 | ||
317 | /* | |
318 | * remaps the bio to the target device. we separate two flows. | |
319 | * power 2 flow and a general flow for the sake of performance | |
320 | */ | |
321 | static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone, | |
322 | sector_t sector, sector_t *sector_offset) | |
323 | { | |
324 | unsigned int sect_in_chunk; | |
325 | sector_t chunk; | |
326 | struct r0conf *conf = mddev->private; | |
327 | int raid_disks = conf->strip_zone[0].nb_dev; | |
328 | unsigned int chunk_sects = mddev->chunk_sectors; | |
329 | ||
330 | if (is_power_of_2(chunk_sects)) { | |
331 | int chunksect_bits = ffz(~chunk_sects); | |
332 | /* find the sector offset inside the chunk */ | |
333 | sect_in_chunk = sector & (chunk_sects - 1); | |
334 | sector >>= chunksect_bits; | |
335 | /* chunk in zone */ | |
336 | chunk = *sector_offset; | |
337 | /* quotient is the chunk in real device*/ | |
338 | sector_div(chunk, zone->nb_dev << chunksect_bits); | |
339 | } else{ | |
340 | sect_in_chunk = sector_div(sector, chunk_sects); | |
341 | chunk = *sector_offset; | |
342 | sector_div(chunk, chunk_sects * zone->nb_dev); | |
343 | } | |
344 | /* | |
345 | * position the bio over the real device | |
346 | * real sector = chunk in device + starting of zone | |
347 | * + the position in the chunk | |
348 | */ | |
349 | *sector_offset = (chunk * chunk_sects) + sect_in_chunk; | |
350 | return conf->devlist[(zone - conf->strip_zone)*raid_disks | |
351 | + sector_div(sector, zone->nb_dev)]; | |
352 | } | |
353 | ||
354 | /** | |
355 | * raid0_mergeable_bvec -- tell bio layer if two requests can be merged | |
356 | * @mddev: the md device | |
357 | * @bvm: properties of new bio | |
358 | * @biovec: the request that could be merged to it. | |
359 | * | |
360 | * Return amount of bytes we can accept at this offset | |
361 | */ | |
362 | static int raid0_mergeable_bvec(struct mddev *mddev, | |
363 | struct bvec_merge_data *bvm, | |
364 | struct bio_vec *biovec) | |
365 | { | |
366 | struct r0conf *conf = mddev->private; | |
367 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); | |
368 | sector_t sector_offset = sector; | |
369 | int max; | |
370 | unsigned int chunk_sectors = mddev->chunk_sectors; | |
371 | unsigned int bio_sectors = bvm->bi_size >> 9; | |
372 | struct strip_zone *zone; | |
373 | struct md_rdev *rdev; | |
374 | struct request_queue *subq; | |
375 | ||
376 | if (is_power_of_2(chunk_sectors)) | |
377 | max = (chunk_sectors - ((sector & (chunk_sectors-1)) | |
378 | + bio_sectors)) << 9; | |
379 | else | |
380 | max = (chunk_sectors - (sector_div(sector, chunk_sectors) | |
381 | + bio_sectors)) << 9; | |
382 | if (max < 0) | |
383 | max = 0; /* bio_add cannot handle a negative return */ | |
384 | if (max <= biovec->bv_len && bio_sectors == 0) | |
385 | return biovec->bv_len; | |
386 | if (max < biovec->bv_len) | |
387 | /* too small already, no need to check further */ | |
388 | return max; | |
389 | if (!conf->has_merge_bvec) | |
390 | return max; | |
391 | ||
392 | /* May need to check subordinate device */ | |
393 | sector = sector_offset; | |
394 | zone = find_zone(mddev->private, §or_offset); | |
395 | rdev = map_sector(mddev, zone, sector, §or_offset); | |
396 | subq = bdev_get_queue(rdev->bdev); | |
397 | if (subq->merge_bvec_fn) { | |
398 | bvm->bi_bdev = rdev->bdev; | |
399 | bvm->bi_sector = sector_offset + zone->dev_start + | |
400 | rdev->data_offset; | |
401 | return min(max, subq->merge_bvec_fn(subq, bvm, biovec)); | |
402 | } else | |
403 | return max; | |
404 | } | |
405 | ||
406 | static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) | |
407 | { | |
408 | sector_t array_sectors = 0; | |
409 | struct md_rdev *rdev; | |
410 | ||
411 | WARN_ONCE(sectors || raid_disks, | |
412 | "%s does not support generic reshape\n", __func__); | |
413 | ||
414 | rdev_for_each(rdev, mddev) | |
415 | array_sectors += (rdev->sectors & | |
416 | ~(sector_t)(mddev->chunk_sectors-1)); | |
417 | ||
418 | return array_sectors; | |
419 | } | |
420 | ||
421 | static void raid0_free(struct mddev *mddev, void *priv); | |
422 | ||
423 | static int raid0_run(struct mddev *mddev) | |
424 | { | |
425 | struct r0conf *conf; | |
426 | int ret; | |
427 | ||
428 | if (mddev->chunk_sectors == 0) { | |
429 | printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n", | |
430 | mdname(mddev)); | |
431 | return -EINVAL; | |
432 | } | |
433 | if (md_check_no_bitmap(mddev)) | |
434 | return -EINVAL; | |
435 | ||
436 | if (mddev->queue) { | |
437 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); | |
438 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); | |
439 | blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); | |
440 | } | |
441 | ||
442 | /* if private is not null, we are here after takeover */ | |
443 | if (mddev->private == NULL) { | |
444 | ret = create_strip_zones(mddev, &conf); | |
445 | if (ret < 0) | |
446 | return ret; | |
447 | mddev->private = conf; | |
448 | } | |
449 | conf = mddev->private; | |
450 | ||
451 | /* calculate array device size */ | |
452 | md_set_array_sectors(mddev, raid0_size(mddev, 0, 0)); | |
453 | ||
454 | printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n", | |
455 | mdname(mddev), | |
456 | (unsigned long long)mddev->array_sectors); | |
457 | ||
458 | if (mddev->queue) { | |
459 | /* calculate the max read-ahead size. | |
460 | * For read-ahead of large files to be effective, we need to | |
461 | * readahead at least twice a whole stripe. i.e. number of devices | |
462 | * multiplied by chunk size times 2. | |
463 | * If an individual device has an ra_pages greater than the | |
464 | * chunk size, then we will not drive that device as hard as it | |
465 | * wants. We consider this a configuration error: a larger | |
466 | * chunksize should be used in that case. | |
467 | */ | |
468 | int stripe = mddev->raid_disks * | |
469 | (mddev->chunk_sectors << 9) / PAGE_SIZE; | |
470 | if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) | |
471 | mddev->queue->backing_dev_info.ra_pages = 2* stripe; | |
472 | } | |
473 | ||
474 | dump_zones(mddev); | |
475 | ||
476 | ret = md_integrity_register(mddev); | |
477 | ||
478 | return ret; | |
479 | } | |
480 | ||
481 | static void raid0_free(struct mddev *mddev, void *priv) | |
482 | { | |
483 | struct r0conf *conf = priv; | |
484 | ||
485 | kfree(conf->strip_zone); | |
486 | kfree(conf->devlist); | |
487 | kfree(conf); | |
488 | } | |
489 | ||
490 | /* | |
491 | * Is io distribute over 1 or more chunks ? | |
492 | */ | |
493 | static inline int is_io_in_chunk_boundary(struct mddev *mddev, | |
494 | unsigned int chunk_sects, struct bio *bio) | |
495 | { | |
496 | if (likely(is_power_of_2(chunk_sects))) { | |
497 | return chunk_sects >= | |
498 | ((bio->bi_iter.bi_sector & (chunk_sects-1)) | |
499 | + bio_sectors(bio)); | |
500 | } else{ | |
501 | sector_t sector = bio->bi_iter.bi_sector; | |
502 | return chunk_sects >= (sector_div(sector, chunk_sects) | |
503 | + bio_sectors(bio)); | |
504 | } | |
505 | } | |
506 | ||
507 | static void raid0_make_request(struct mddev *mddev, struct bio *bio) | |
508 | { | |
509 | struct strip_zone *zone; | |
510 | struct md_rdev *tmp_dev; | |
511 | struct bio *split; | |
512 | ||
513 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | |
514 | md_flush_request(mddev, bio); | |
515 | return; | |
516 | } | |
517 | ||
518 | do { | |
519 | sector_t sector = bio->bi_iter.bi_sector; | |
520 | unsigned chunk_sects = mddev->chunk_sectors; | |
521 | ||
522 | unsigned sectors = chunk_sects - | |
523 | (likely(is_power_of_2(chunk_sects)) | |
524 | ? (sector & (chunk_sects-1)) | |
525 | : sector_div(sector, chunk_sects)); | |
526 | ||
527 | if (sectors < bio_sectors(bio)) { | |
528 | split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); | |
529 | bio_chain(split, bio); | |
530 | } else { | |
531 | split = bio; | |
532 | } | |
533 | ||
534 | sector = bio->bi_iter.bi_sector; | |
535 | zone = find_zone(mddev->private, §or); | |
536 | tmp_dev = map_sector(mddev, zone, sector, §or); | |
537 | split->bi_bdev = tmp_dev->bdev; | |
538 | split->bi_iter.bi_sector = sector + zone->dev_start + | |
539 | tmp_dev->data_offset; | |
540 | ||
541 | if (unlikely((split->bi_rw & REQ_DISCARD) && | |
542 | !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { | |
543 | /* Just ignore it */ | |
544 | bio_endio(split, 0); | |
545 | } else | |
546 | generic_make_request(split); | |
547 | } while (split != bio); | |
548 | } | |
549 | ||
550 | static void raid0_status(struct seq_file *seq, struct mddev *mddev) | |
551 | { | |
552 | seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); | |
553 | return; | |
554 | } | |
555 | ||
556 | static void *raid0_takeover_raid45(struct mddev *mddev) | |
557 | { | |
558 | struct md_rdev *rdev; | |
559 | struct r0conf *priv_conf; | |
560 | ||
561 | if (mddev->degraded != 1) { | |
562 | printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n", | |
563 | mdname(mddev), | |
564 | mddev->degraded); | |
565 | return ERR_PTR(-EINVAL); | |
566 | } | |
567 | ||
568 | rdev_for_each(rdev, mddev) { | |
569 | /* check slot number for a disk */ | |
570 | if (rdev->raid_disk == mddev->raid_disks-1) { | |
571 | printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n", | |
572 | mdname(mddev)); | |
573 | return ERR_PTR(-EINVAL); | |
574 | } | |
575 | rdev->sectors = mddev->dev_sectors; | |
576 | } | |
577 | ||
578 | /* Set new parameters */ | |
579 | mddev->new_level = 0; | |
580 | mddev->new_layout = 0; | |
581 | mddev->new_chunk_sectors = mddev->chunk_sectors; | |
582 | mddev->raid_disks--; | |
583 | mddev->delta_disks = -1; | |
584 | /* make sure it will be not marked as dirty */ | |
585 | mddev->recovery_cp = MaxSector; | |
586 | ||
587 | create_strip_zones(mddev, &priv_conf); | |
588 | return priv_conf; | |
589 | } | |
590 | ||
591 | static void *raid0_takeover_raid10(struct mddev *mddev) | |
592 | { | |
593 | struct r0conf *priv_conf; | |
594 | ||
595 | /* Check layout: | |
596 | * - far_copies must be 1 | |
597 | * - near_copies must be 2 | |
598 | * - disks number must be even | |
599 | * - all mirrors must be already degraded | |
600 | */ | |
601 | if (mddev->layout != ((1 << 8) + 2)) { | |
602 | printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n", | |
603 | mdname(mddev), | |
604 | mddev->layout); | |
605 | return ERR_PTR(-EINVAL); | |
606 | } | |
607 | if (mddev->raid_disks & 1) { | |
608 | printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n", | |
609 | mdname(mddev)); | |
610 | return ERR_PTR(-EINVAL); | |
611 | } | |
612 | if (mddev->degraded != (mddev->raid_disks>>1)) { | |
613 | printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n", | |
614 | mdname(mddev)); | |
615 | return ERR_PTR(-EINVAL); | |
616 | } | |
617 | ||
618 | /* Set new parameters */ | |
619 | mddev->new_level = 0; | |
620 | mddev->new_layout = 0; | |
621 | mddev->new_chunk_sectors = mddev->chunk_sectors; | |
622 | mddev->delta_disks = - mddev->raid_disks / 2; | |
623 | mddev->raid_disks += mddev->delta_disks; | |
624 | mddev->degraded = 0; | |
625 | /* make sure it will be not marked as dirty */ | |
626 | mddev->recovery_cp = MaxSector; | |
627 | ||
628 | create_strip_zones(mddev, &priv_conf); | |
629 | return priv_conf; | |
630 | } | |
631 | ||
632 | static void *raid0_takeover_raid1(struct mddev *mddev) | |
633 | { | |
634 | struct r0conf *priv_conf; | |
635 | int chunksect; | |
636 | ||
637 | /* Check layout: | |
638 | * - (N - 1) mirror drives must be already faulty | |
639 | */ | |
640 | if ((mddev->raid_disks - 1) != mddev->degraded) { | |
641 | printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n", | |
642 | mdname(mddev)); | |
643 | return ERR_PTR(-EINVAL); | |
644 | } | |
645 | ||
646 | /* | |
647 | * a raid1 doesn't have the notion of chunk size, so | |
648 | * figure out the largest suitable size we can use. | |
649 | */ | |
650 | chunksect = 64 * 2; /* 64K by default */ | |
651 | ||
652 | /* The array must be an exact multiple of chunksize */ | |
653 | while (chunksect && (mddev->array_sectors & (chunksect - 1))) | |
654 | chunksect >>= 1; | |
655 | ||
656 | if ((chunksect << 9) < PAGE_SIZE) | |
657 | /* array size does not allow a suitable chunk size */ | |
658 | return ERR_PTR(-EINVAL); | |
659 | ||
660 | /* Set new parameters */ | |
661 | mddev->new_level = 0; | |
662 | mddev->new_layout = 0; | |
663 | mddev->new_chunk_sectors = chunksect; | |
664 | mddev->chunk_sectors = chunksect; | |
665 | mddev->delta_disks = 1 - mddev->raid_disks; | |
666 | mddev->raid_disks = 1; | |
667 | /* make sure it will be not marked as dirty */ | |
668 | mddev->recovery_cp = MaxSector; | |
669 | ||
670 | create_strip_zones(mddev, &priv_conf); | |
671 | return priv_conf; | |
672 | } | |
673 | ||
674 | static void *raid0_takeover(struct mddev *mddev) | |
675 | { | |
676 | /* raid0 can take over: | |
677 | * raid4 - if all data disks are active. | |
678 | * raid5 - providing it is Raid4 layout and one disk is faulty | |
679 | * raid10 - assuming we have all necessary active disks | |
680 | * raid1 - with (N -1) mirror drives faulty | |
681 | */ | |
682 | ||
683 | if (mddev->bitmap) { | |
684 | printk(KERN_ERR "md/raid0: %s: cannot takeover array with bitmap\n", | |
685 | mdname(mddev)); | |
686 | return ERR_PTR(-EBUSY); | |
687 | } | |
688 | if (mddev->level == 4) | |
689 | return raid0_takeover_raid45(mddev); | |
690 | ||
691 | if (mddev->level == 5) { | |
692 | if (mddev->layout == ALGORITHM_PARITY_N) | |
693 | return raid0_takeover_raid45(mddev); | |
694 | ||
695 | printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n", | |
696 | mdname(mddev), ALGORITHM_PARITY_N); | |
697 | } | |
698 | ||
699 | if (mddev->level == 10) | |
700 | return raid0_takeover_raid10(mddev); | |
701 | ||
702 | if (mddev->level == 1) | |
703 | return raid0_takeover_raid1(mddev); | |
704 | ||
705 | printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n", | |
706 | mddev->level); | |
707 | ||
708 | return ERR_PTR(-EINVAL); | |
709 | } | |
710 | ||
711 | static void raid0_quiesce(struct mddev *mddev, int state) | |
712 | { | |
713 | } | |
714 | ||
715 | static struct md_personality raid0_personality= | |
716 | { | |
717 | .name = "raid0", | |
718 | .level = 0, | |
719 | .owner = THIS_MODULE, | |
720 | .make_request = raid0_make_request, | |
721 | .run = raid0_run, | |
722 | .free = raid0_free, | |
723 | .status = raid0_status, | |
724 | .size = raid0_size, | |
725 | .takeover = raid0_takeover, | |
726 | .quiesce = raid0_quiesce, | |
727 | .congested = raid0_congested, | |
728 | .mergeable_bvec = raid0_mergeable_bvec, | |
729 | }; | |
730 | ||
731 | static int __init raid0_init (void) | |
732 | { | |
733 | return register_md_personality (&raid0_personality); | |
734 | } | |
735 | ||
736 | static void raid0_exit (void) | |
737 | { | |
738 | unregister_md_personality (&raid0_personality); | |
739 | } | |
740 | ||
741 | module_init(raid0_init); | |
742 | module_exit(raid0_exit); | |
743 | MODULE_LICENSE("GPL"); | |
744 | MODULE_DESCRIPTION("RAID0 (striping) personality for MD"); | |
745 | MODULE_ALIAS("md-personality-2"); /* RAID0 */ | |
746 | MODULE_ALIAS("md-raid0"); | |
747 | MODULE_ALIAS("md-level-0"); |