]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/md/dm-stripe.c
Merge commit 'v2.6.34-rc6' into core/locking
[mirror_ubuntu-artful-kernel.git] / drivers / md / dm-stripe.c
1 /*
2 * Copyright (C) 2001-2003 Sistina Software (UK) Limited.
3 *
4 * This file is released under the GPL.
5 */
6
7 #include <linux/device-mapper.h>
8
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/blkdev.h>
12 #include <linux/bio.h>
13 #include <linux/slab.h>
14 #include <linux/log2.h>
15
16 #define DM_MSG_PREFIX "striped"
17 #define DM_IO_ERROR_THRESHOLD 15
18
19 struct stripe {
20 struct dm_dev *dev;
21 sector_t physical_start;
22
23 atomic_t error_count;
24 };
25
26 struct stripe_c {
27 uint32_t stripes;
28
29 /* The size of this target / num. stripes */
30 sector_t stripe_width;
31
32 /* stripe chunk size */
33 uint32_t chunk_shift;
34 sector_t chunk_mask;
35
36 /* Needed for handling events */
37 struct dm_target *ti;
38
39 /* Work struct used for triggering events*/
40 struct work_struct kstriped_ws;
41
42 struct stripe stripe[0];
43 };
44
45 static struct workqueue_struct *kstriped;
46
47 /*
48 * An event is triggered whenever a drive
49 * drops out of a stripe volume.
50 */
51 static void trigger_event(struct work_struct *work)
52 {
53 struct stripe_c *sc = container_of(work, struct stripe_c, kstriped_ws);
54
55 dm_table_event(sc->ti->table);
56
57 }
58
59 static inline struct stripe_c *alloc_context(unsigned int stripes)
60 {
61 size_t len;
62
63 if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
64 stripes))
65 return NULL;
66
67 len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
68
69 return kmalloc(len, GFP_KERNEL);
70 }
71
72 /*
73 * Parse a single <dev> <sector> pair
74 */
75 static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
76 unsigned int stripe, char **argv)
77 {
78 unsigned long long start;
79
80 if (sscanf(argv[1], "%llu", &start) != 1)
81 return -EINVAL;
82
83 if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
84 &sc->stripe[stripe].dev))
85 return -ENXIO;
86
87 sc->stripe[stripe].physical_start = start;
88
89 return 0;
90 }
91
92 /*
93 * Construct a striped mapping.
94 * <number of stripes> <chunk size (2^^n)> [<dev_path> <offset>]+
95 */
96 static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
97 {
98 struct stripe_c *sc;
99 sector_t width;
100 uint32_t stripes;
101 uint32_t chunk_size;
102 char *end;
103 int r;
104 unsigned int i;
105
106 if (argc < 2) {
107 ti->error = "Not enough arguments";
108 return -EINVAL;
109 }
110
111 stripes = simple_strtoul(argv[0], &end, 10);
112 if (!stripes || *end) {
113 ti->error = "Invalid stripe count";
114 return -EINVAL;
115 }
116
117 chunk_size = simple_strtoul(argv[1], &end, 10);
118 if (*end) {
119 ti->error = "Invalid chunk_size";
120 return -EINVAL;
121 }
122
123 /*
124 * chunk_size is a power of two
125 */
126 if (!is_power_of_2(chunk_size) ||
127 (chunk_size < (PAGE_SIZE >> SECTOR_SHIFT))) {
128 ti->error = "Invalid chunk size";
129 return -EINVAL;
130 }
131
132 if (ti->len & (chunk_size - 1)) {
133 ti->error = "Target length not divisible by "
134 "chunk size";
135 return -EINVAL;
136 }
137
138 width = ti->len;
139 if (sector_div(width, stripes)) {
140 ti->error = "Target length not divisible by "
141 "number of stripes";
142 return -EINVAL;
143 }
144
145 /*
146 * Do we have enough arguments for that many stripes ?
147 */
148 if (argc != (2 + 2 * stripes)) {
149 ti->error = "Not enough destinations "
150 "specified";
151 return -EINVAL;
152 }
153
154 sc = alloc_context(stripes);
155 if (!sc) {
156 ti->error = "Memory allocation for striped context "
157 "failed";
158 return -ENOMEM;
159 }
160
161 INIT_WORK(&sc->kstriped_ws, trigger_event);
162
163 /* Set pointer to dm target; used in trigger_event */
164 sc->ti = ti;
165
166 sc->stripes = stripes;
167 sc->stripe_width = width;
168 ti->split_io = chunk_size;
169 ti->num_flush_requests = stripes;
170
171 sc->chunk_mask = ((sector_t) chunk_size) - 1;
172 for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++)
173 chunk_size >>= 1;
174 sc->chunk_shift--;
175
176 /*
177 * Get the stripe destinations.
178 */
179 for (i = 0; i < stripes; i++) {
180 argv += 2;
181
182 r = get_stripe(ti, sc, i, argv);
183 if (r < 0) {
184 ti->error = "Couldn't parse stripe destination";
185 while (i--)
186 dm_put_device(ti, sc->stripe[i].dev);
187 kfree(sc);
188 return r;
189 }
190 atomic_set(&(sc->stripe[i].error_count), 0);
191 }
192
193 ti->private = sc;
194
195 return 0;
196 }
197
198 static void stripe_dtr(struct dm_target *ti)
199 {
200 unsigned int i;
201 struct stripe_c *sc = (struct stripe_c *) ti->private;
202
203 for (i = 0; i < sc->stripes; i++)
204 dm_put_device(ti, sc->stripe[i].dev);
205
206 flush_workqueue(kstriped);
207 kfree(sc);
208 }
209
210 static int stripe_map(struct dm_target *ti, struct bio *bio,
211 union map_info *map_context)
212 {
213 struct stripe_c *sc = (struct stripe_c *) ti->private;
214 sector_t offset, chunk;
215 uint32_t stripe;
216
217 if (unlikely(bio_empty_barrier(bio))) {
218 BUG_ON(map_context->flush_request >= sc->stripes);
219 bio->bi_bdev = sc->stripe[map_context->flush_request].dev->bdev;
220 return DM_MAPIO_REMAPPED;
221 }
222
223 offset = bio->bi_sector - ti->begin;
224 chunk = offset >> sc->chunk_shift;
225 stripe = sector_div(chunk, sc->stripes);
226
227 bio->bi_bdev = sc->stripe[stripe].dev->bdev;
228 bio->bi_sector = sc->stripe[stripe].physical_start +
229 (chunk << sc->chunk_shift) + (offset & sc->chunk_mask);
230 return DM_MAPIO_REMAPPED;
231 }
232
233 /*
234 * Stripe status:
235 *
236 * INFO
237 * #stripes [stripe_name <stripe_name>] [group word count]
238 * [error count 'A|D' <error count 'A|D'>]
239 *
240 * TABLE
241 * #stripes [stripe chunk size]
242 * [stripe_name physical_start <stripe_name physical_start>]
243 *
244 */
245
246 static int stripe_status(struct dm_target *ti,
247 status_type_t type, char *result, unsigned int maxlen)
248 {
249 struct stripe_c *sc = (struct stripe_c *) ti->private;
250 char buffer[sc->stripes + 1];
251 unsigned int sz = 0;
252 unsigned int i;
253
254 switch (type) {
255 case STATUSTYPE_INFO:
256 DMEMIT("%d ", sc->stripes);
257 for (i = 0; i < sc->stripes; i++) {
258 DMEMIT("%s ", sc->stripe[i].dev->name);
259 buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
260 'D' : 'A';
261 }
262 buffer[i] = '\0';
263 DMEMIT("1 %s", buffer);
264 break;
265
266 case STATUSTYPE_TABLE:
267 DMEMIT("%d %llu", sc->stripes,
268 (unsigned long long)sc->chunk_mask + 1);
269 for (i = 0; i < sc->stripes; i++)
270 DMEMIT(" %s %llu", sc->stripe[i].dev->name,
271 (unsigned long long)sc->stripe[i].physical_start);
272 break;
273 }
274 return 0;
275 }
276
277 static int stripe_end_io(struct dm_target *ti, struct bio *bio,
278 int error, union map_info *map_context)
279 {
280 unsigned i;
281 char major_minor[16];
282 struct stripe_c *sc = ti->private;
283
284 if (!error)
285 return 0; /* I/O complete */
286
287 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
288 return error;
289
290 if (error == -EOPNOTSUPP)
291 return error;
292
293 memset(major_minor, 0, sizeof(major_minor));
294 sprintf(major_minor, "%d:%d",
295 MAJOR(disk_devt(bio->bi_bdev->bd_disk)),
296 MINOR(disk_devt(bio->bi_bdev->bd_disk)));
297
298 /*
299 * Test to see which stripe drive triggered the event
300 * and increment error count for all stripes on that device.
301 * If the error count for a given device exceeds the threshold
302 * value we will no longer trigger any further events.
303 */
304 for (i = 0; i < sc->stripes; i++)
305 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
306 atomic_inc(&(sc->stripe[i].error_count));
307 if (atomic_read(&(sc->stripe[i].error_count)) <
308 DM_IO_ERROR_THRESHOLD)
309 queue_work(kstriped, &sc->kstriped_ws);
310 }
311
312 return error;
313 }
314
315 static int stripe_iterate_devices(struct dm_target *ti,
316 iterate_devices_callout_fn fn, void *data)
317 {
318 struct stripe_c *sc = ti->private;
319 int ret = 0;
320 unsigned i = 0;
321
322 do {
323 ret = fn(ti, sc->stripe[i].dev,
324 sc->stripe[i].physical_start,
325 sc->stripe_width, data);
326 } while (!ret && ++i < sc->stripes);
327
328 return ret;
329 }
330
331 static void stripe_io_hints(struct dm_target *ti,
332 struct queue_limits *limits)
333 {
334 struct stripe_c *sc = ti->private;
335 unsigned chunk_size = (sc->chunk_mask + 1) << 9;
336
337 blk_limits_io_min(limits, chunk_size);
338 blk_limits_io_opt(limits, chunk_size * sc->stripes);
339 }
340
341 static struct target_type stripe_target = {
342 .name = "striped",
343 .version = {1, 3, 0},
344 .module = THIS_MODULE,
345 .ctr = stripe_ctr,
346 .dtr = stripe_dtr,
347 .map = stripe_map,
348 .end_io = stripe_end_io,
349 .status = stripe_status,
350 .iterate_devices = stripe_iterate_devices,
351 .io_hints = stripe_io_hints,
352 };
353
354 int __init dm_stripe_init(void)
355 {
356 int r;
357
358 r = dm_register_target(&stripe_target);
359 if (r < 0) {
360 DMWARN("target registration failed");
361 return r;
362 }
363
364 kstriped = create_singlethread_workqueue("kstriped");
365 if (!kstriped) {
366 DMERR("failed to create workqueue kstriped");
367 dm_unregister_target(&stripe_target);
368 return -ENOMEM;
369 }
370
371 return r;
372 }
373
374 void dm_stripe_exit(void)
375 {
376 dm_unregister_target(&stripe_target);
377 destroy_workqueue(kstriped);
378
379 return;
380 }