2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
9 RAID-0 management functions.
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 You should have received a copy of the GNU General Public License
17 (for example /usr/src/linux/COPYING); if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/blkdev.h>
22 #include <linux/raid/md_k.h>
23 #include <linux/seq_file.h>
26 static void raid0_unplug(struct request_queue
*q
)
28 mddev_t
*mddev
= q
->queuedata
;
29 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
30 mdk_rdev_t
**devlist
= conf
->strip_zone
[0].dev
;
33 for (i
=0; i
<mddev
->raid_disks
; i
++) {
34 struct request_queue
*r_queue
= bdev_get_queue(devlist
[i
]->bdev
);
40 static int raid0_congested(void *data
, int bits
)
42 mddev_t
*mddev
= data
;
43 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
44 mdk_rdev_t
**devlist
= conf
->strip_zone
[0].dev
;
47 for (i
= 0; i
< mddev
->raid_disks
&& !ret
; i
++) {
48 struct request_queue
*q
= bdev_get_queue(devlist
[i
]->bdev
);
50 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
56 static int create_strip_zones (mddev_t
*mddev
)
59 sector_t current_start
, curr_zone_start
;
61 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
62 mdk_rdev_t
*smallest
, *rdev1
, *rdev2
, *rdev
;
63 struct strip_zone
*zone
;
65 char b
[BDEVNAME_SIZE
];
68 * The number of 'same size groups'
70 conf
->nr_strip_zones
= 0;
72 list_for_each_entry(rdev1
, &mddev
->disks
, same_set
) {
73 printk(KERN_INFO
"raid0: looking at %s\n",
74 bdevname(rdev1
->bdev
,b
));
76 list_for_each_entry(rdev2
, &mddev
->disks
, same_set
) {
77 printk(KERN_INFO
"raid0: comparing %s(%llu)",
78 bdevname(rdev1
->bdev
,b
),
79 (unsigned long long)rdev1
->size
);
80 printk(KERN_INFO
" with %s(%llu)\n",
81 bdevname(rdev2
->bdev
,b
),
82 (unsigned long long)rdev2
->size
);
84 printk(KERN_INFO
"raid0: END\n");
87 if (rdev2
->size
== rdev1
->size
)
90 * Not unique, don't count it as a new
93 printk(KERN_INFO
"raid0: EQUAL\n");
97 printk(KERN_INFO
"raid0: NOT EQUAL\n");
100 printk(KERN_INFO
"raid0: ==> UNIQUE\n");
101 conf
->nr_strip_zones
++;
102 printk(KERN_INFO
"raid0: %d zones\n",
103 conf
->nr_strip_zones
);
106 printk(KERN_INFO
"raid0: FINAL %d zones\n", conf
->nr_strip_zones
);
108 conf
->strip_zone
= kzalloc(sizeof(struct strip_zone
)*
109 conf
->nr_strip_zones
, GFP_KERNEL
);
110 if (!conf
->strip_zone
)
112 conf
->devlist
= kzalloc(sizeof(mdk_rdev_t
*)*
113 conf
->nr_strip_zones
*mddev
->raid_disks
,
118 /* The first zone must contain all devices, so here we check that
119 * there is a proper alignment of slots to devices and find them all
121 zone
= &conf
->strip_zone
[0];
124 zone
->dev
= conf
->devlist
;
125 list_for_each_entry(rdev1
, &mddev
->disks
, same_set
) {
126 int j
= rdev1
->raid_disk
;
128 if (j
< 0 || j
>= mddev
->raid_disks
) {
129 printk(KERN_ERR
"raid0: bad disk number %d - "
134 printk(KERN_ERR
"raid0: multiple devices for %d - "
138 zone
->dev
[j
] = rdev1
;
140 blk_queue_stack_limits(mddev
->queue
,
141 rdev1
->bdev
->bd_disk
->queue
);
142 /* as we don't honour merge_bvec_fn, we must never risk
143 * violating it, so limit ->max_sector to one PAGE, as
144 * a one page request is never in violation.
147 if (rdev1
->bdev
->bd_disk
->queue
->merge_bvec_fn
&&
148 mddev
->queue
->max_sectors
> (PAGE_SIZE
>>9))
149 blk_queue_max_sectors(mddev
->queue
, PAGE_SIZE
>>9);
151 if (!smallest
|| (rdev1
->size
<smallest
->size
))
155 if (cnt
!= mddev
->raid_disks
) {
156 printk(KERN_ERR
"raid0: too few disks (%d of %d) - "
157 "aborting!\n", cnt
, mddev
->raid_disks
);
161 zone
->sectors
= smallest
->size
* cnt
* 2;
162 zone
->zone_start
= 0;
164 current_start
= smallest
->size
* 2;
165 curr_zone_start
= zone
->sectors
;
167 /* now do the other zones */
168 for (i
= 1; i
< conf
->nr_strip_zones
; i
++)
170 zone
= conf
->strip_zone
+ i
;
171 zone
->dev
= conf
->strip_zone
[i
-1].dev
+ mddev
->raid_disks
;
173 printk(KERN_INFO
"raid0: zone %d\n", i
);
174 zone
->dev_start
= current_start
;
178 for (j
=0; j
<cnt
; j
++) {
179 char b
[BDEVNAME_SIZE
];
180 rdev
= conf
->strip_zone
[0].dev
[j
];
181 printk(KERN_INFO
"raid0: checking %s ...",
182 bdevname(rdev
->bdev
, b
));
183 if (rdev
->size
> current_start
/ 2) {
184 printk(KERN_INFO
" contained as device %d\n",
188 if (!smallest
|| (rdev
->size
<smallest
->size
)) {
190 printk(KERN_INFO
" (%llu) is smallest!.\n",
191 (unsigned long long)rdev
->size
);
194 printk(KERN_INFO
" nope.\n");
198 zone
->sectors
= (smallest
->size
* 2 - current_start
) * c
;
199 printk(KERN_INFO
"raid0: zone->nb_dev: %d, sectors: %llu\n",
200 zone
->nb_dev
, (unsigned long long)zone
->sectors
);
202 zone
->zone_start
= curr_zone_start
;
203 curr_zone_start
+= zone
->sectors
;
205 current_start
= smallest
->size
* 2;
206 printk(KERN_INFO
"raid0: current zone start: %llu\n",
207 (unsigned long long)current_start
);
210 /* Now find appropriate hash spacing.
211 * We want a number which causes most hash entries to cover
212 * at most two strips, but the hash table must be at most
213 * 1 PAGE. We choose the smallest strip, or contiguous collection
214 * of strips, that has big enough size. We never consider the last
215 * strip though as it's size has no bearing on the efficacy of the hash
218 conf
->spacing
= curr_zone_start
;
219 min_spacing
= curr_zone_start
;
220 sector_div(min_spacing
, PAGE_SIZE
/sizeof(struct strip_zone
*));
221 for (i
=0; i
< conf
->nr_strip_zones
-1; i
++) {
223 for (j
= i
; j
< conf
->nr_strip_zones
- 1 &&
224 s
< min_spacing
; j
++)
225 s
+= conf
->strip_zone
[j
].sectors
;
226 if (s
>= min_spacing
&& s
< conf
->spacing
)
230 mddev
->queue
->unplug_fn
= raid0_unplug
;
232 mddev
->queue
->backing_dev_info
.congested_fn
= raid0_congested
;
233 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
235 printk(KERN_INFO
"raid0: done.\n");
242 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
244 * @bvm: properties of new bio
245 * @biovec: the request that could be merged to it.
247 * Return amount of bytes we can accept at this offset
249 static int raid0_mergeable_bvec(struct request_queue
*q
,
250 struct bvec_merge_data
*bvm
,
251 struct bio_vec
*biovec
)
253 mddev_t
*mddev
= q
->queuedata
;
254 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
256 unsigned int chunk_sectors
= mddev
->chunk_size
>> 9;
257 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
259 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
260 if (max
< 0) max
= 0; /* bio_add cannot handle a negative return */
261 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
262 return biovec
->bv_len
;
267 static int raid0_run (mddev_t
*mddev
)
269 unsigned cur
=0, i
=0, nb_zone
;
274 if (mddev
->chunk_size
== 0) {
275 printk(KERN_ERR
"md/raid0: non-zero chunk size required.\n");
278 printk(KERN_INFO
"%s: setting max_sectors to %d, segment boundary to %d\n",
280 mddev
->chunk_size
>> 9,
281 (mddev
->chunk_size
>>1)-1);
282 blk_queue_max_sectors(mddev
->queue
, mddev
->chunk_size
>> 9);
283 blk_queue_segment_boundary(mddev
->queue
, (mddev
->chunk_size
>>1) - 1);
284 mddev
->queue
->queue_lock
= &mddev
->queue
->__queue_lock
;
286 conf
= kmalloc(sizeof (raid0_conf_t
), GFP_KERNEL
);
289 mddev
->private = (void *)conf
;
291 conf
->strip_zone
= NULL
;
292 conf
->devlist
= NULL
;
293 if (create_strip_zones (mddev
))
296 /* calculate array device size */
297 mddev
->array_sectors
= 0;
298 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
299 mddev
->array_sectors
+= rdev
->size
* 2;
301 printk(KERN_INFO
"raid0 : md_size is %llu sectors.\n",
302 (unsigned long long)mddev
->array_sectors
);
303 printk(KERN_INFO
"raid0 : conf->spacing is %llu sectors.\n",
304 (unsigned long long)conf
->spacing
);
306 sector_t s
= mddev
->array_sectors
;
307 sector_t space
= conf
->spacing
;
309 conf
->sector_shift
= 0;
310 if (sizeof(sector_t
) > sizeof(u32
)) {
311 /*shift down space and s so that sector_div will work */
312 while (space
> (sector_t
) (~(u32
)0)) {
315 s
+= 1; /* force round-up */
316 conf
->sector_shift
++;
319 round
= sector_div(s
, (u32
)space
) ? 1 : 0;
322 printk(KERN_INFO
"raid0 : nb_zone is %d.\n", nb_zone
);
324 printk(KERN_INFO
"raid0 : Allocating %zu bytes for hash.\n",
325 nb_zone
*sizeof(struct strip_zone
*));
326 conf
->hash_table
= kmalloc (sizeof (struct strip_zone
*)*nb_zone
, GFP_KERNEL
);
327 if (!conf
->hash_table
)
329 sectors
= conf
->strip_zone
[cur
].sectors
;
331 conf
->hash_table
[0] = conf
->strip_zone
+ cur
;
332 for (i
=1; i
< nb_zone
; i
++) {
333 while (sectors
<= conf
->spacing
) {
335 sectors
+= conf
->strip_zone
[cur
].sectors
;
337 sectors
-= conf
->spacing
;
338 conf
->hash_table
[i
] = conf
->strip_zone
+ cur
;
340 if (conf
->sector_shift
) {
341 conf
->spacing
>>= conf
->sector_shift
;
342 /* round spacing up so when we divide by it, we
343 * err on the side of too-low, which is safest
348 /* calculate the max read-ahead size.
349 * For read-ahead of large files to be effective, we need to
350 * readahead at least twice a whole stripe. i.e. number of devices
351 * multiplied by chunk size times 2.
352 * If an individual device has an ra_pages greater than the
353 * chunk size, then we will not drive that device as hard as it
354 * wants. We consider this a configuration error: a larger
355 * chunksize should be used in that case.
358 int stripe
= mddev
->raid_disks
* mddev
->chunk_size
/ PAGE_SIZE
;
359 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2* stripe
)
360 mddev
->queue
->backing_dev_info
.ra_pages
= 2* stripe
;
364 blk_queue_merge_bvec(mddev
->queue
, raid0_mergeable_bvec
);
368 kfree(conf
->strip_zone
);
369 kfree(conf
->devlist
);
371 mddev
->private = NULL
;
376 static int raid0_stop (mddev_t
*mddev
)
378 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
380 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
381 kfree(conf
->hash_table
);
382 conf
->hash_table
= NULL
;
383 kfree(conf
->strip_zone
);
384 conf
->strip_zone
= NULL
;
386 mddev
->private = NULL
;
391 static int raid0_make_request (struct request_queue
*q
, struct bio
*bio
)
393 mddev_t
*mddev
= q
->queuedata
;
394 unsigned int sect_in_chunk
, chunksect_bits
, chunk_sects
;
395 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
396 struct strip_zone
*zone
;
399 sector_t sector
, rsect
;
400 const int rw
= bio_data_dir(bio
);
403 if (unlikely(bio_barrier(bio
))) {
404 bio_endio(bio
, -EOPNOTSUPP
);
408 cpu
= part_stat_lock();
409 part_stat_inc(cpu
, &mddev
->gendisk
->part0
, ios
[rw
]);
410 part_stat_add(cpu
, &mddev
->gendisk
->part0
, sectors
[rw
],
414 chunk_sects
= mddev
->chunk_size
>> 9;
415 chunksect_bits
= ffz(~chunk_sects
);
416 sector
= bio
->bi_sector
;
418 if (unlikely(chunk_sects
< (bio
->bi_sector
& (chunk_sects
- 1)) + (bio
->bi_size
>> 9))) {
420 /* Sanity check -- queue functions should prevent this happening */
421 if (bio
->bi_vcnt
!= 1 ||
424 /* This is a one page bio that upper layers
425 * refuse to split for us, so we need to split it.
427 bp
= bio_split(bio
, chunk_sects
- (bio
->bi_sector
& (chunk_sects
- 1)));
428 if (raid0_make_request(q
, &bp
->bio1
))
429 generic_make_request(&bp
->bio1
);
430 if (raid0_make_request(q
, &bp
->bio2
))
431 generic_make_request(&bp
->bio2
);
433 bio_pair_release(bp
);
439 sector_t x
= sector
>> conf
->sector_shift
;
440 sector_div(x
, (u32
)conf
->spacing
);
441 zone
= conf
->hash_table
[x
];
444 while (sector
>= zone
->zone_start
+ zone
->sectors
)
447 sect_in_chunk
= bio
->bi_sector
& (chunk_sects
- 1);
451 sector_t x
= (sector
- zone
->zone_start
) >> chunksect_bits
;
453 sector_div(x
, zone
->nb_dev
);
456 x
= sector
>> chunksect_bits
;
457 tmp_dev
= zone
->dev
[sector_div(x
, zone
->nb_dev
)];
459 rsect
= (chunk
<< chunksect_bits
) + zone
->dev_start
+ sect_in_chunk
;
461 bio
->bi_bdev
= tmp_dev
->bdev
;
462 bio
->bi_sector
= rsect
+ tmp_dev
->data_offset
;
465 * Let the main block layer submit the IO and resolve recursion:
470 printk("raid0_make_request bug: can't convert block across chunks"
471 " or bigger than %dk %llu %d\n", chunk_sects
/ 2,
472 (unsigned long long)bio
->bi_sector
, bio
->bi_size
>> 10);
478 static void raid0_status (struct seq_file
*seq
, mddev_t
*mddev
)
483 char b
[BDEVNAME_SIZE
];
484 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
487 for (j
= 0; j
< conf
->nr_strip_zones
; j
++) {
488 seq_printf(seq
, " z%d", j
);
489 if (conf
->hash_table
[h
] == conf
->strip_zone
+j
)
490 seq_printf(seq
, "(h%d)", h
++);
491 seq_printf(seq
, "=[");
492 for (k
= 0; k
< conf
->strip_zone
[j
].nb_dev
; k
++)
493 seq_printf(seq
, "%s/", bdevname(
494 conf
->strip_zone
[j
].dev
[k
]->bdev
,b
));
496 seq_printf(seq
, "] zs=%d ds=%d s=%d\n",
497 conf
->strip_zone
[j
].zone_start
,
498 conf
->strip_zone
[j
].dev_start
,
499 conf
->strip_zone
[j
].sectors
);
502 seq_printf(seq
, " %dk chunks", mddev
->chunk_size
/1024);
506 static struct mdk_personality raid0_personality
=
510 .owner
= THIS_MODULE
,
511 .make_request
= raid0_make_request
,
514 .status
= raid0_status
,
517 static int __init
raid0_init (void)
519 return register_md_personality (&raid0_personality
);
522 static void raid0_exit (void)
524 unregister_md_personality (&raid0_personality
);
527 module_init(raid0_init
);
528 module_exit(raid0_exit
);
529 MODULE_LICENSE("GPL");
530 MODULE_ALIAS("md-personality-2"); /* RAID0 */
531 MODULE_ALIAS("md-raid0");
532 MODULE_ALIAS("md-level-0");