]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/md/raid0.c
Merge master.kernel.org:/home/rmk/linux-2.6-mmc
[mirror_ubuntu-artful-kernel.git] / drivers / md / raid0.c
1 /*
2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
5 <maz@gloups.fdn.fr>
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7
8
9 RAID-0 management functions.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
15
16 You should have received a copy of the GNU General Public License
17 (for example /usr/src/linux/COPYING); if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21 #include <linux/module.h>
22 #include <linux/raid/raid0.h>
23
24 #define MAJOR_NR MD_MAJOR
25 #define MD_DRIVER
26 #define MD_PERSONALITY
27
28 static void raid0_unplug(request_queue_t *q)
29 {
30 mddev_t *mddev = q->queuedata;
31 raid0_conf_t *conf = mddev_to_conf(mddev);
32 mdk_rdev_t **devlist = conf->strip_zone[0].dev;
33 int i;
34
35 for (i=0; i<mddev->raid_disks; i++) {
36 request_queue_t *r_queue = bdev_get_queue(devlist[i]->bdev);
37
38 if (r_queue->unplug_fn)
39 r_queue->unplug_fn(r_queue);
40 }
41 }
42
43 static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,
44 sector_t *error_sector)
45 {
46 mddev_t *mddev = q->queuedata;
47 raid0_conf_t *conf = mddev_to_conf(mddev);
48 mdk_rdev_t **devlist = conf->strip_zone[0].dev;
49 int i, ret = 0;
50
51 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
52 struct block_device *bdev = devlist[i]->bdev;
53 request_queue_t *r_queue = bdev_get_queue(bdev);
54
55 if (!r_queue->issue_flush_fn)
56 ret = -EOPNOTSUPP;
57 else
58 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
59 }
60 return ret;
61 }
62
63
64 static int create_strip_zones (mddev_t *mddev)
65 {
66 int i, c, j;
67 sector_t current_offset, curr_zone_offset;
68 sector_t min_spacing;
69 raid0_conf_t *conf = mddev_to_conf(mddev);
70 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
71 struct list_head *tmp1, *tmp2;
72 struct strip_zone *zone;
73 int cnt;
74 char b[BDEVNAME_SIZE];
75
76 /*
77 * The number of 'same size groups'
78 */
79 conf->nr_strip_zones = 0;
80
81 ITERATE_RDEV(mddev,rdev1,tmp1) {
82 printk("raid0: looking at %s\n",
83 bdevname(rdev1->bdev,b));
84 c = 0;
85 ITERATE_RDEV(mddev,rdev2,tmp2) {
86 printk("raid0: comparing %s(%llu)",
87 bdevname(rdev1->bdev,b),
88 (unsigned long long)rdev1->size);
89 printk(" with %s(%llu)\n",
90 bdevname(rdev2->bdev,b),
91 (unsigned long long)rdev2->size);
92 if (rdev2 == rdev1) {
93 printk("raid0: END\n");
94 break;
95 }
96 if (rdev2->size == rdev1->size)
97 {
98 /*
99 * Not unique, don't count it as a new
100 * group
101 */
102 printk("raid0: EQUAL\n");
103 c = 1;
104 break;
105 }
106 printk("raid0: NOT EQUAL\n");
107 }
108 if (!c) {
109 printk("raid0: ==> UNIQUE\n");
110 conf->nr_strip_zones++;
111 printk("raid0: %d zones\n", conf->nr_strip_zones);
112 }
113 }
114 printk("raid0: FINAL %d zones\n", conf->nr_strip_zones);
115
116 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
117 conf->nr_strip_zones, GFP_KERNEL);
118 if (!conf->strip_zone)
119 return 1;
120 conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
121 conf->nr_strip_zones*mddev->raid_disks,
122 GFP_KERNEL);
123 if (!conf->devlist)
124 return 1;
125
126 /* The first zone must contain all devices, so here we check that
127 * there is a proper alignment of slots to devices and find them all
128 */
129 zone = &conf->strip_zone[0];
130 cnt = 0;
131 smallest = NULL;
132 zone->dev = conf->devlist;
133 ITERATE_RDEV(mddev, rdev1, tmp1) {
134 int j = rdev1->raid_disk;
135
136 if (j < 0 || j >= mddev->raid_disks) {
137 printk("raid0: bad disk number %d - aborting!\n", j);
138 goto abort;
139 }
140 if (zone->dev[j]) {
141 printk("raid0: multiple devices for %d - aborting!\n",
142 j);
143 goto abort;
144 }
145 zone->dev[j] = rdev1;
146
147 blk_queue_stack_limits(mddev->queue,
148 rdev1->bdev->bd_disk->queue);
149 /* as we don't honour merge_bvec_fn, we must never risk
150 * violating it, so limit ->max_sector to one PAGE, as
151 * a one page request is never in violation.
152 */
153
154 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
155 mddev->queue->max_sectors > (PAGE_SIZE>>9))
156 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
157
158 if (!smallest || (rdev1->size <smallest->size))
159 smallest = rdev1;
160 cnt++;
161 }
162 if (cnt != mddev->raid_disks) {
163 printk("raid0: too few disks (%d of %d) - aborting!\n",
164 cnt, mddev->raid_disks);
165 goto abort;
166 }
167 zone->nb_dev = cnt;
168 zone->size = smallest->size * cnt;
169 zone->zone_offset = 0;
170
171 current_offset = smallest->size;
172 curr_zone_offset = zone->size;
173
174 /* now do the other zones */
175 for (i = 1; i < conf->nr_strip_zones; i++)
176 {
177 zone = conf->strip_zone + i;
178 zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks;
179
180 printk("raid0: zone %d\n", i);
181 zone->dev_offset = current_offset;
182 smallest = NULL;
183 c = 0;
184
185 for (j=0; j<cnt; j++) {
186 char b[BDEVNAME_SIZE];
187 rdev = conf->strip_zone[0].dev[j];
188 printk("raid0: checking %s ...", bdevname(rdev->bdev,b));
189 if (rdev->size > current_offset)
190 {
191 printk(" contained as device %d\n", c);
192 zone->dev[c] = rdev;
193 c++;
194 if (!smallest || (rdev->size <smallest->size)) {
195 smallest = rdev;
196 printk(" (%llu) is smallest!.\n",
197 (unsigned long long)rdev->size);
198 }
199 } else
200 printk(" nope.\n");
201 }
202
203 zone->nb_dev = c;
204 zone->size = (smallest->size - current_offset) * c;
205 printk("raid0: zone->nb_dev: %d, size: %llu\n",
206 zone->nb_dev, (unsigned long long)zone->size);
207
208 zone->zone_offset = curr_zone_offset;
209 curr_zone_offset += zone->size;
210
211 current_offset = smallest->size;
212 printk("raid0: current zone offset: %llu\n",
213 (unsigned long long)current_offset);
214 }
215
216 /* Now find appropriate hash spacing.
217 * We want a number which causes most hash entries to cover
218 * at most two strips, but the hash table must be at most
219 * 1 PAGE. We choose the smallest strip, or contiguous collection
220 * of strips, that has big enough size. We never consider the last
221 * strip though as it's size has no bearing on the efficacy of the hash
222 * table.
223 */
224 conf->hash_spacing = curr_zone_offset;
225 min_spacing = curr_zone_offset;
226 sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
227 for (i=0; i < conf->nr_strip_zones-1; i++) {
228 sector_t sz = 0;
229 for (j=i; j<conf->nr_strip_zones-1 &&
230 sz < min_spacing ; j++)
231 sz += conf->strip_zone[j].size;
232 if (sz >= min_spacing && sz < conf->hash_spacing)
233 conf->hash_spacing = sz;
234 }
235
236 mddev->queue->unplug_fn = raid0_unplug;
237
238 mddev->queue->issue_flush_fn = raid0_issue_flush;
239
240 printk("raid0: done.\n");
241 return 0;
242 abort:
243 return 1;
244 }
245
246 /**
247 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
248 * @q: request queue
249 * @bio: the buffer head that's been built up so far
250 * @biovec: the request that could be merged to it.
251 *
252 * Return amount of bytes we can accept at this offset
253 */
254 static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
255 {
256 mddev_t *mddev = q->queuedata;
257 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
258 int max;
259 unsigned int chunk_sectors = mddev->chunk_size >> 9;
260 unsigned int bio_sectors = bio->bi_size >> 9;
261
262 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
263 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
264 if (max <= biovec->bv_len && bio_sectors == 0)
265 return biovec->bv_len;
266 else
267 return max;
268 }
269
270 static int raid0_run (mddev_t *mddev)
271 {
272 unsigned cur=0, i=0, nb_zone;
273 s64 size;
274 raid0_conf_t *conf;
275 mdk_rdev_t *rdev;
276 struct list_head *tmp;
277
278 if (mddev->chunk_size == 0) {
279 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
280 return -EINVAL;
281 }
282 printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n",
283 mdname(mddev),
284 mddev->chunk_size >> 9,
285 (mddev->chunk_size>>1)-1);
286 blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
287 blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
288
289 conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL);
290 if (!conf)
291 goto out;
292 mddev->private = (void *)conf;
293
294 conf->strip_zone = NULL;
295 conf->devlist = NULL;
296 if (create_strip_zones (mddev))
297 goto out_free_conf;
298
299 /* calculate array device size */
300 mddev->array_size = 0;
301 ITERATE_RDEV(mddev,rdev,tmp)
302 mddev->array_size += rdev->size;
303
304 printk("raid0 : md_size is %llu blocks.\n",
305 (unsigned long long)mddev->array_size);
306 printk("raid0 : conf->hash_spacing is %llu blocks.\n",
307 (unsigned long long)conf->hash_spacing);
308 {
309 sector_t s = mddev->array_size;
310 sector_t space = conf->hash_spacing;
311 int round;
312 conf->preshift = 0;
313 if (sizeof(sector_t) > sizeof(u32)) {
314 /*shift down space and s so that sector_div will work */
315 while (space > (sector_t) (~(u32)0)) {
316 s >>= 1;
317 space >>= 1;
318 s += 1; /* force round-up */
319 conf->preshift++;
320 }
321 }
322 round = sector_div(s, (u32)space) ? 1 : 0;
323 nb_zone = s + round;
324 }
325 printk("raid0 : nb_zone is %d.\n", nb_zone);
326
327 printk("raid0 : Allocating %Zd bytes for hash.\n",
328 nb_zone*sizeof(struct strip_zone*));
329 conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
330 if (!conf->hash_table)
331 goto out_free_conf;
332 size = conf->strip_zone[cur].size;
333
334 for (i=0; i< nb_zone; i++) {
335 conf->hash_table[i] = conf->strip_zone + cur;
336 while (size <= conf->hash_spacing) {
337 cur++;
338 size += conf->strip_zone[cur].size;
339 }
340 size -= conf->hash_spacing;
341 }
342 if (conf->preshift) {
343 conf->hash_spacing >>= conf->preshift;
344 /* round hash_spacing up so when we divide by it, we
345 * err on the side of too-low, which is safest
346 */
347 conf->hash_spacing++;
348 }
349
350 /* calculate the max read-ahead size.
351 * For read-ahead of large files to be effective, we need to
352 * readahead at least twice a whole stripe. i.e. number of devices
353 * multiplied by chunk size times 2.
354 * If an individual device has an ra_pages greater than the
355 * chunk size, then we will not drive that device as hard as it
356 * wants. We consider this a configuration error: a larger
357 * chunksize should be used in that case.
358 */
359 {
360 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
361 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
362 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
363 }
364
365
366 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
367 return 0;
368
369 out_free_conf:
370 kfree(conf->strip_zone);
371 kfree(conf->devlist);
372 kfree(conf);
373 mddev->private = NULL;
374 out:
375 return -ENOMEM;
376 }
377
378 static int raid0_stop (mddev_t *mddev)
379 {
380 raid0_conf_t *conf = mddev_to_conf(mddev);
381
382 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
383 kfree(conf->hash_table);
384 conf->hash_table = NULL;
385 kfree(conf->strip_zone);
386 conf->strip_zone = NULL;
387 kfree(conf);
388 mddev->private = NULL;
389
390 return 0;
391 }
392
393 static int raid0_make_request (request_queue_t *q, struct bio *bio)
394 {
395 mddev_t *mddev = q->queuedata;
396 unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects;
397 raid0_conf_t *conf = mddev_to_conf(mddev);
398 struct strip_zone *zone;
399 mdk_rdev_t *tmp_dev;
400 unsigned long chunk;
401 sector_t block, rsect;
402 const int rw = bio_data_dir(bio);
403
404 if (unlikely(bio_barrier(bio))) {
405 bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
406 return 0;
407 }
408
409 disk_stat_inc(mddev->gendisk, ios[rw]);
410 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
411
412 chunk_size = mddev->chunk_size >> 10;
413 chunk_sects = mddev->chunk_size >> 9;
414 chunksize_bits = ffz(~chunk_size);
415 block = bio->bi_sector >> 1;
416
417
418 if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
419 struct bio_pair *bp;
420 /* Sanity check -- queue functions should prevent this happening */
421 if (bio->bi_vcnt != 1 ||
422 bio->bi_idx != 0)
423 goto bad_map;
424 /* This is a one page bio that upper layers
425 * refuse to split for us, so we need to split it.
426 */
427 bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
428 if (raid0_make_request(q, &bp->bio1))
429 generic_make_request(&bp->bio1);
430 if (raid0_make_request(q, &bp->bio2))
431 generic_make_request(&bp->bio2);
432
433 bio_pair_release(bp);
434 return 0;
435 }
436
437
438 {
439 sector_t x = block >> conf->preshift;
440 sector_div(x, (u32)conf->hash_spacing);
441 zone = conf->hash_table[x];
442 }
443
444 while (block >= (zone->zone_offset + zone->size))
445 zone++;
446
447 sect_in_chunk = bio->bi_sector & ((chunk_size<<1) -1);
448
449
450 {
451 sector_t x = (block - zone->zone_offset) >> chunksize_bits;
452
453 sector_div(x, zone->nb_dev);
454 chunk = x;
455 BUG_ON(x != (sector_t)chunk);
456
457 x = block >> chunksize_bits;
458 tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
459 }
460 rsect = (((chunk << chunksize_bits) + zone->dev_offset)<<1)
461 + sect_in_chunk;
462
463 bio->bi_bdev = tmp_dev->bdev;
464 bio->bi_sector = rsect + tmp_dev->data_offset;
465
466 /*
467 * Let the main block layer submit the IO and resolve recursion:
468 */
469 return 1;
470
471 bad_map:
472 printk("raid0_make_request bug: can't convert block across chunks"
473 " or bigger than %dk %llu %d\n", chunk_size,
474 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
475
476 bio_io_error(bio, bio->bi_size);
477 return 0;
478 }
479
480 static void raid0_status (struct seq_file *seq, mddev_t *mddev)
481 {
482 #undef MD_DEBUG
483 #ifdef MD_DEBUG
484 int j, k, h;
485 char b[BDEVNAME_SIZE];
486 raid0_conf_t *conf = mddev_to_conf(mddev);
487
488 h = 0;
489 for (j = 0; j < conf->nr_strip_zones; j++) {
490 seq_printf(seq, " z%d", j);
491 if (conf->hash_table[h] == conf->strip_zone+j)
492 seq_printf("(h%d)", h++);
493 seq_printf(seq, "=[");
494 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
495 seq_printf (seq, "%s/", bdevname(
496 conf->strip_zone[j].dev[k]->bdev,b));
497
498 seq_printf (seq, "] zo=%d do=%d s=%d\n",
499 conf->strip_zone[j].zone_offset,
500 conf->strip_zone[j].dev_offset,
501 conf->strip_zone[j].size);
502 }
503 #endif
504 seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
505 return;
506 }
507
508 static struct mdk_personality raid0_personality=
509 {
510 .name = "raid0",
511 .level = 0,
512 .owner = THIS_MODULE,
513 .make_request = raid0_make_request,
514 .run = raid0_run,
515 .stop = raid0_stop,
516 .status = raid0_status,
517 };
518
519 static int __init raid0_init (void)
520 {
521 return register_md_personality (&raid0_personality);
522 }
523
524 static void raid0_exit (void)
525 {
526 unregister_md_personality (&raid0_personality);
527 }
528
529 module_init(raid0_init);
530 module_exit(raid0_exit);
531 MODULE_LICENSE("GPL");
532 MODULE_ALIAS("md-personality-2"); /* RAID0 */
533 MODULE_ALIAS("md-raid0");
534 MODULE_ALIAS("md-level-0");