]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2001 Sistina Software (UK) Limited. | |
3 | * Copyright (C) 2004 Red Hat, Inc. All rights reserved. | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include "dm.h" | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/vmalloc.h> | |
12 | #include <linux/blkdev.h> | |
13 | #include <linux/namei.h> | |
14 | #include <linux/ctype.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/interrupt.h> | |
48c9c27b | 17 | #include <linux/mutex.h> |
1da177e4 LT |
18 | #include <asm/atomic.h> |
19 | ||
72d94861 AK |
20 | #define DM_MSG_PREFIX "table" |
21 | ||
1da177e4 LT |
22 | #define MAX_DEPTH 16 |
23 | #define NODE_SIZE L1_CACHE_BYTES | |
24 | #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) | |
25 | #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) | |
26 | ||
27 | struct dm_table { | |
1134e5ae | 28 | struct mapped_device *md; |
1da177e4 LT |
29 | atomic_t holders; |
30 | ||
31 | /* btree table */ | |
32 | unsigned int depth; | |
33 | unsigned int counts[MAX_DEPTH]; /* in nodes */ | |
34 | sector_t *index[MAX_DEPTH]; | |
35 | ||
36 | unsigned int num_targets; | |
37 | unsigned int num_allocated; | |
38 | sector_t *highs; | |
39 | struct dm_target *targets; | |
40 | ||
41 | /* | |
42 | * Indicates the rw permissions for the new logical | |
43 | * device. This should be a combination of FMODE_READ | |
44 | * and FMODE_WRITE. | |
45 | */ | |
46 | int mode; | |
47 | ||
48 | /* a list of devices used by this table */ | |
49 | struct list_head devices; | |
50 | ||
51 | /* | |
52 | * These are optimistic limits taken from all the | |
53 | * targets, some targets will need smaller limits. | |
54 | */ | |
55 | struct io_restrictions limits; | |
56 | ||
57 | /* events get handed up using this callback */ | |
58 | void (*event_fn)(void *); | |
59 | void *event_context; | |
60 | }; | |
61 | ||
62 | /* | |
63 | * Similar to ceiling(log_size(n)) | |
64 | */ | |
65 | static unsigned int int_log(unsigned int n, unsigned int base) | |
66 | { | |
67 | int result = 0; | |
68 | ||
69 | while (n > 1) { | |
70 | n = dm_div_up(n, base); | |
71 | result++; | |
72 | } | |
73 | ||
74 | return result; | |
75 | } | |
76 | ||
77 | /* | |
78 | * Returns the minimum that is _not_ zero, unless both are zero. | |
79 | */ | |
80 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | |
81 | ||
82 | /* | |
83 | * Combine two io_restrictions, always taking the lower value. | |
84 | */ | |
85 | static void combine_restrictions_low(struct io_restrictions *lhs, | |
86 | struct io_restrictions *rhs) | |
87 | { | |
88 | lhs->max_sectors = | |
89 | min_not_zero(lhs->max_sectors, rhs->max_sectors); | |
90 | ||
91 | lhs->max_phys_segments = | |
92 | min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments); | |
93 | ||
94 | lhs->max_hw_segments = | |
95 | min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); | |
96 | ||
97 | lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); | |
98 | ||
99 | lhs->max_segment_size = | |
100 | min_not_zero(lhs->max_segment_size, rhs->max_segment_size); | |
101 | ||
91212507 NB |
102 | lhs->max_hw_sectors = |
103 | min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors); | |
104 | ||
1da177e4 LT |
105 | lhs->seg_boundary_mask = |
106 | min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); | |
969429b5 | 107 | |
5ec140e6 VA |
108 | lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn); |
109 | ||
969429b5 | 110 | lhs->no_cluster |= rhs->no_cluster; |
1da177e4 LT |
111 | } |
112 | ||
113 | /* | |
114 | * Calculate the index of the child node of the n'th node k'th key. | |
115 | */ | |
116 | static inline unsigned int get_child(unsigned int n, unsigned int k) | |
117 | { | |
118 | return (n * CHILDREN_PER_NODE) + k; | |
119 | } | |
120 | ||
121 | /* | |
122 | * Return the n'th node of level l from table t. | |
123 | */ | |
124 | static inline sector_t *get_node(struct dm_table *t, | |
125 | unsigned int l, unsigned int n) | |
126 | { | |
127 | return t->index[l] + (n * KEYS_PER_NODE); | |
128 | } | |
129 | ||
130 | /* | |
131 | * Return the highest key that you could lookup from the n'th | |
132 | * node on level l of the btree. | |
133 | */ | |
134 | static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) | |
135 | { | |
136 | for (; l < t->depth - 1; l++) | |
137 | n = get_child(n, CHILDREN_PER_NODE - 1); | |
138 | ||
139 | if (n >= t->counts[l]) | |
140 | return (sector_t) - 1; | |
141 | ||
142 | return get_node(t, l, n)[KEYS_PER_NODE - 1]; | |
143 | } | |
144 | ||
145 | /* | |
146 | * Fills in a level of the btree based on the highs of the level | |
147 | * below it. | |
148 | */ | |
149 | static int setup_btree_index(unsigned int l, struct dm_table *t) | |
150 | { | |
151 | unsigned int n, k; | |
152 | sector_t *node; | |
153 | ||
154 | for (n = 0U; n < t->counts[l]; n++) { | |
155 | node = get_node(t, l, n); | |
156 | ||
157 | for (k = 0U; k < KEYS_PER_NODE; k++) | |
158 | node[k] = high(t, l + 1, get_child(n, k)); | |
159 | } | |
160 | ||
161 | return 0; | |
162 | } | |
163 | ||
164 | void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) | |
165 | { | |
166 | unsigned long size; | |
167 | void *addr; | |
168 | ||
169 | /* | |
170 | * Check that we're not going to overflow. | |
171 | */ | |
172 | if (nmemb > (ULONG_MAX / elem_size)) | |
173 | return NULL; | |
174 | ||
175 | size = nmemb * elem_size; | |
176 | addr = vmalloc(size); | |
177 | if (addr) | |
178 | memset(addr, 0, size); | |
179 | ||
180 | return addr; | |
181 | } | |
182 | ||
183 | /* | |
184 | * highs, and targets are managed as dynamic arrays during a | |
185 | * table load. | |
186 | */ | |
187 | static int alloc_targets(struct dm_table *t, unsigned int num) | |
188 | { | |
189 | sector_t *n_highs; | |
190 | struct dm_target *n_targets; | |
191 | int n = t->num_targets; | |
192 | ||
193 | /* | |
194 | * Allocate both the target array and offset array at once. | |
512875bd JN |
195 | * Append an empty entry to catch sectors beyond the end of |
196 | * the device. | |
1da177e4 | 197 | */ |
512875bd | 198 | n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + |
1da177e4 LT |
199 | sizeof(sector_t)); |
200 | if (!n_highs) | |
201 | return -ENOMEM; | |
202 | ||
203 | n_targets = (struct dm_target *) (n_highs + num); | |
204 | ||
205 | if (n) { | |
206 | memcpy(n_highs, t->highs, sizeof(*n_highs) * n); | |
207 | memcpy(n_targets, t->targets, sizeof(*n_targets) * n); | |
208 | } | |
209 | ||
210 | memset(n_highs + n, -1, sizeof(*n_highs) * (num - n)); | |
211 | vfree(t->highs); | |
212 | ||
213 | t->num_allocated = num; | |
214 | t->highs = n_highs; | |
215 | t->targets = n_targets; | |
216 | ||
217 | return 0; | |
218 | } | |
219 | ||
1134e5ae MA |
220 | int dm_table_create(struct dm_table **result, int mode, |
221 | unsigned num_targets, struct mapped_device *md) | |
1da177e4 | 222 | { |
094262db | 223 | struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); |
1da177e4 LT |
224 | |
225 | if (!t) | |
226 | return -ENOMEM; | |
227 | ||
1da177e4 LT |
228 | INIT_LIST_HEAD(&t->devices); |
229 | atomic_set(&t->holders, 1); | |
230 | ||
231 | if (!num_targets) | |
232 | num_targets = KEYS_PER_NODE; | |
233 | ||
234 | num_targets = dm_round_up(num_targets, KEYS_PER_NODE); | |
235 | ||
236 | if (alloc_targets(t, num_targets)) { | |
237 | kfree(t); | |
238 | t = NULL; | |
239 | return -ENOMEM; | |
240 | } | |
241 | ||
242 | t->mode = mode; | |
1134e5ae | 243 | t->md = md; |
1da177e4 LT |
244 | *result = t; |
245 | return 0; | |
246 | } | |
247 | ||
c2ade42d DT |
248 | int dm_create_error_table(struct dm_table **result, struct mapped_device *md) |
249 | { | |
250 | struct dm_table *t; | |
251 | sector_t dev_size = 1; | |
252 | int r; | |
253 | ||
254 | /* | |
255 | * Find current size of device. | |
256 | * Default to 1 sector if inactive. | |
257 | */ | |
258 | t = dm_get_table(md); | |
259 | if (t) { | |
260 | dev_size = dm_table_get_size(t); | |
261 | dm_table_put(t); | |
262 | } | |
263 | ||
264 | r = dm_table_create(&t, FMODE_READ, 1, md); | |
265 | if (r) | |
266 | return r; | |
267 | ||
268 | r = dm_table_add_target(t, "error", 0, dev_size, NULL); | |
269 | if (r) | |
270 | goto out; | |
271 | ||
272 | r = dm_table_complete(t); | |
273 | if (r) | |
274 | goto out; | |
275 | ||
276 | *result = t; | |
277 | ||
278 | out: | |
279 | if (r) | |
280 | dm_table_put(t); | |
281 | ||
282 | return r; | |
283 | } | |
284 | EXPORT_SYMBOL_GPL(dm_create_error_table); | |
285 | ||
1da177e4 LT |
286 | static void free_devices(struct list_head *devices) |
287 | { | |
288 | struct list_head *tmp, *next; | |
289 | ||
afb24528 | 290 | list_for_each_safe(tmp, next, devices) { |
1da177e4 | 291 | struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); |
1da177e4 LT |
292 | kfree(dd); |
293 | } | |
294 | } | |
295 | ||
5e198d94 | 296 | static void table_destroy(struct dm_table *t) |
1da177e4 LT |
297 | { |
298 | unsigned int i; | |
299 | ||
300 | /* free the indexes (see dm_table_complete) */ | |
301 | if (t->depth >= 2) | |
302 | vfree(t->index[t->depth - 2]); | |
303 | ||
304 | /* free the targets */ | |
305 | for (i = 0; i < t->num_targets; i++) { | |
306 | struct dm_target *tgt = t->targets + i; | |
307 | ||
308 | if (tgt->type->dtr) | |
309 | tgt->type->dtr(tgt); | |
310 | ||
311 | dm_put_target_type(tgt->type); | |
312 | } | |
313 | ||
314 | vfree(t->highs); | |
315 | ||
316 | /* free the device list */ | |
317 | if (t->devices.next != &t->devices) { | |
318 | DMWARN("devices still present during destroy: " | |
319 | "dm_table_remove_device calls missing"); | |
320 | ||
321 | free_devices(&t->devices); | |
322 | } | |
323 | ||
324 | kfree(t); | |
325 | } | |
326 | ||
327 | void dm_table_get(struct dm_table *t) | |
328 | { | |
329 | atomic_inc(&t->holders); | |
330 | } | |
331 | ||
332 | void dm_table_put(struct dm_table *t) | |
333 | { | |
334 | if (!t) | |
335 | return; | |
336 | ||
337 | if (atomic_dec_and_test(&t->holders)) | |
338 | table_destroy(t); | |
339 | } | |
340 | ||
341 | /* | |
342 | * Checks to see if we need to extend highs or targets. | |
343 | */ | |
344 | static inline int check_space(struct dm_table *t) | |
345 | { | |
346 | if (t->num_targets >= t->num_allocated) | |
347 | return alloc_targets(t, t->num_allocated * 2); | |
348 | ||
349 | return 0; | |
350 | } | |
351 | ||
352 | /* | |
353 | * Convert a device path to a dev_t. | |
354 | */ | |
355 | static int lookup_device(const char *path, dev_t *dev) | |
356 | { | |
357 | int r; | |
358 | struct nameidata nd; | |
359 | struct inode *inode; | |
360 | ||
361 | if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd))) | |
362 | return r; | |
363 | ||
364 | inode = nd.dentry->d_inode; | |
365 | if (!inode) { | |
366 | r = -ENOENT; | |
367 | goto out; | |
368 | } | |
369 | ||
370 | if (!S_ISBLK(inode->i_mode)) { | |
371 | r = -ENOTBLK; | |
372 | goto out; | |
373 | } | |
374 | ||
375 | *dev = inode->i_rdev; | |
376 | ||
377 | out: | |
378 | path_release(&nd); | |
379 | return r; | |
380 | } | |
381 | ||
382 | /* | |
383 | * See if we've already got a device in the list. | |
384 | */ | |
385 | static struct dm_dev *find_device(struct list_head *l, dev_t dev) | |
386 | { | |
387 | struct dm_dev *dd; | |
388 | ||
389 | list_for_each_entry (dd, l, list) | |
390 | if (dd->bdev->bd_dev == dev) | |
391 | return dd; | |
392 | ||
393 | return NULL; | |
394 | } | |
395 | ||
396 | /* | |
397 | * Open a device so we can use it as a map destination. | |
398 | */ | |
f165921d | 399 | static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md) |
1da177e4 LT |
400 | { |
401 | static char *_claim_ptr = "I belong to device-mapper"; | |
402 | struct block_device *bdev; | |
403 | ||
404 | int r; | |
405 | ||
547bc926 | 406 | BUG_ON(d->bdev); |
1da177e4 LT |
407 | |
408 | bdev = open_by_devnum(dev, d->mode); | |
409 | if (IS_ERR(bdev)) | |
410 | return PTR_ERR(bdev); | |
f165921d | 411 | r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); |
1da177e4 LT |
412 | if (r) |
413 | blkdev_put(bdev); | |
414 | else | |
415 | d->bdev = bdev; | |
416 | return r; | |
417 | } | |
418 | ||
419 | /* | |
420 | * Close a device that we've been using. | |
421 | */ | |
f165921d | 422 | static void close_dev(struct dm_dev *d, struct mapped_device *md) |
1da177e4 LT |
423 | { |
424 | if (!d->bdev) | |
425 | return; | |
426 | ||
f165921d | 427 | bd_release_from_disk(d->bdev, dm_disk(md)); |
1da177e4 LT |
428 | blkdev_put(d->bdev); |
429 | d->bdev = NULL; | |
430 | } | |
431 | ||
432 | /* | |
2cd54d9b | 433 | * If possible, this checks an area of a destination device is valid. |
1da177e4 LT |
434 | */ |
435 | static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) | |
436 | { | |
2cd54d9b MA |
437 | sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT; |
438 | ||
439 | if (!dev_size) | |
440 | return 1; | |
441 | ||
1da177e4 LT |
442 | return ((start < dev_size) && (len <= (dev_size - start))); |
443 | } | |
444 | ||
445 | /* | |
446 | * This upgrades the mode on an already open dm_dev. Being | |
447 | * careful to leave things as they were if we fail to reopen the | |
448 | * device. | |
449 | */ | |
f165921d | 450 | static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md) |
1da177e4 LT |
451 | { |
452 | int r; | |
453 | struct dm_dev dd_copy; | |
454 | dev_t dev = dd->bdev->bd_dev; | |
455 | ||
456 | dd_copy = *dd; | |
457 | ||
458 | dd->mode |= new_mode; | |
459 | dd->bdev = NULL; | |
f165921d | 460 | r = open_dev(dd, dev, md); |
1da177e4 | 461 | if (!r) |
f165921d | 462 | close_dev(&dd_copy, md); |
1da177e4 LT |
463 | else |
464 | *dd = dd_copy; | |
465 | ||
466 | return r; | |
467 | } | |
468 | ||
469 | /* | |
470 | * Add a device to the list, or just increment the usage count if | |
471 | * it's already present. | |
472 | */ | |
473 | static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |
474 | const char *path, sector_t start, sector_t len, | |
475 | int mode, struct dm_dev **result) | |
476 | { | |
477 | int r; | |
478 | dev_t dev; | |
479 | struct dm_dev *dd; | |
480 | unsigned int major, minor; | |
481 | ||
547bc926 | 482 | BUG_ON(!t); |
1da177e4 LT |
483 | |
484 | if (sscanf(path, "%u:%u", &major, &minor) == 2) { | |
485 | /* Extract the major/minor numbers */ | |
486 | dev = MKDEV(major, minor); | |
487 | if (MAJOR(dev) != major || MINOR(dev) != minor) | |
488 | return -EOVERFLOW; | |
489 | } else { | |
490 | /* convert the path to a device */ | |
491 | if ((r = lookup_device(path, &dev))) | |
492 | return r; | |
493 | } | |
494 | ||
495 | dd = find_device(&t->devices, dev); | |
496 | if (!dd) { | |
497 | dd = kmalloc(sizeof(*dd), GFP_KERNEL); | |
498 | if (!dd) | |
499 | return -ENOMEM; | |
500 | ||
501 | dd->mode = mode; | |
502 | dd->bdev = NULL; | |
503 | ||
f165921d | 504 | if ((r = open_dev(dd, dev, t->md))) { |
1da177e4 LT |
505 | kfree(dd); |
506 | return r; | |
507 | } | |
508 | ||
509 | format_dev_t(dd->name, dev); | |
510 | ||
511 | atomic_set(&dd->count, 0); | |
512 | list_add(&dd->list, &t->devices); | |
513 | ||
514 | } else if (dd->mode != (mode | dd->mode)) { | |
f165921d | 515 | r = upgrade_mode(dd, mode, t->md); |
1da177e4 LT |
516 | if (r) |
517 | return r; | |
518 | } | |
519 | atomic_inc(&dd->count); | |
520 | ||
521 | if (!check_device_area(dd, start, len)) { | |
522 | DMWARN("device %s too small for target", path); | |
523 | dm_put_device(ti, dd); | |
524 | return -EINVAL; | |
525 | } | |
526 | ||
527 | *result = dd; | |
528 | ||
529 | return 0; | |
530 | } | |
531 | ||
3cb40214 | 532 | void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) |
1da177e4 | 533 | { |
165125e1 | 534 | struct request_queue *q = bdev_get_queue(bdev); |
3cb40214 BR |
535 | struct io_restrictions *rs = &ti->limits; |
536 | ||
537 | /* | |
538 | * Combine the device limits low. | |
539 | * | |
540 | * FIXME: if we move an io_restriction struct | |
541 | * into q this would just be a call to | |
542 | * combine_restrictions_low() | |
543 | */ | |
544 | rs->max_sectors = | |
545 | min_not_zero(rs->max_sectors, q->max_sectors); | |
546 | ||
547 | /* FIXME: Device-Mapper on top of RAID-0 breaks because DM | |
548 | * currently doesn't honor MD's merge_bvec_fn routine. | |
549 | * In this case, we'll force DM to use PAGE_SIZE or | |
550 | * smaller I/O, just to be safe. A better fix is in the | |
551 | * works, but add this for the time being so it will at | |
552 | * least operate correctly. | |
553 | */ | |
554 | if (q->merge_bvec_fn) | |
1da177e4 | 555 | rs->max_sectors = |
3cb40214 BR |
556 | min_not_zero(rs->max_sectors, |
557 | (unsigned int) (PAGE_SIZE >> 9)); | |
1da177e4 | 558 | |
3cb40214 BR |
559 | rs->max_phys_segments = |
560 | min_not_zero(rs->max_phys_segments, | |
561 | q->max_phys_segments); | |
1da177e4 | 562 | |
3cb40214 BR |
563 | rs->max_hw_segments = |
564 | min_not_zero(rs->max_hw_segments, q->max_hw_segments); | |
1da177e4 | 565 | |
3cb40214 | 566 | rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); |
1da177e4 | 567 | |
3cb40214 BR |
568 | rs->max_segment_size = |
569 | min_not_zero(rs->max_segment_size, q->max_segment_size); | |
1da177e4 | 570 | |
91212507 NB |
571 | rs->max_hw_sectors = |
572 | min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); | |
573 | ||
3cb40214 BR |
574 | rs->seg_boundary_mask = |
575 | min_not_zero(rs->seg_boundary_mask, | |
576 | q->seg_boundary_mask); | |
1da177e4 | 577 | |
5ec140e6 VA |
578 | rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); |
579 | ||
3cb40214 BR |
580 | rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
581 | } | |
582 | EXPORT_SYMBOL_GPL(dm_set_device_limits); | |
969429b5 | 583 | |
3cb40214 BR |
584 | int dm_get_device(struct dm_target *ti, const char *path, sector_t start, |
585 | sector_t len, int mode, struct dm_dev **result) | |
586 | { | |
587 | int r = __table_get_device(ti->table, ti, path, | |
588 | start, len, mode, result); | |
589 | ||
590 | if (!r) | |
591 | dm_set_device_limits(ti, (*result)->bdev); | |
1da177e4 LT |
592 | |
593 | return r; | |
594 | } | |
595 | ||
596 | /* | |
597 | * Decrement a devices use count and remove it if necessary. | |
598 | */ | |
599 | void dm_put_device(struct dm_target *ti, struct dm_dev *dd) | |
600 | { | |
601 | if (atomic_dec_and_test(&dd->count)) { | |
f165921d | 602 | close_dev(dd, ti->table->md); |
1da177e4 LT |
603 | list_del(&dd->list); |
604 | kfree(dd); | |
605 | } | |
606 | } | |
607 | ||
608 | /* | |
609 | * Checks to see if the target joins onto the end of the table. | |
610 | */ | |
611 | static int adjoin(struct dm_table *table, struct dm_target *ti) | |
612 | { | |
613 | struct dm_target *prev; | |
614 | ||
615 | if (!table->num_targets) | |
616 | return !ti->begin; | |
617 | ||
618 | prev = &table->targets[table->num_targets - 1]; | |
619 | return (ti->begin == (prev->begin + prev->len)); | |
620 | } | |
621 | ||
622 | /* | |
623 | * Used to dynamically allocate the arg array. | |
624 | */ | |
625 | static char **realloc_argv(unsigned *array_size, char **old_argv) | |
626 | { | |
627 | char **argv; | |
628 | unsigned new_size; | |
629 | ||
630 | new_size = *array_size ? *array_size * 2 : 64; | |
631 | argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL); | |
632 | if (argv) { | |
633 | memcpy(argv, old_argv, *array_size * sizeof(*argv)); | |
634 | *array_size = new_size; | |
635 | } | |
636 | ||
637 | kfree(old_argv); | |
638 | return argv; | |
639 | } | |
640 | ||
641 | /* | |
642 | * Destructively splits up the argument list to pass to ctr. | |
643 | */ | |
644 | int dm_split_args(int *argc, char ***argvp, char *input) | |
645 | { | |
646 | char *start, *end = input, *out, **argv = NULL; | |
647 | unsigned array_size = 0; | |
648 | ||
649 | *argc = 0; | |
814d6862 DT |
650 | |
651 | if (!input) { | |
652 | *argvp = NULL; | |
653 | return 0; | |
654 | } | |
655 | ||
1da177e4 LT |
656 | argv = realloc_argv(&array_size, argv); |
657 | if (!argv) | |
658 | return -ENOMEM; | |
659 | ||
660 | while (1) { | |
661 | start = end; | |
662 | ||
663 | /* Skip whitespace */ | |
664 | while (*start && isspace(*start)) | |
665 | start++; | |
666 | ||
667 | if (!*start) | |
668 | break; /* success, we hit the end */ | |
669 | ||
670 | /* 'out' is used to remove any back-quotes */ | |
671 | end = out = start; | |
672 | while (*end) { | |
673 | /* Everything apart from '\0' can be quoted */ | |
674 | if (*end == '\\' && *(end + 1)) { | |
675 | *out++ = *(end + 1); | |
676 | end += 2; | |
677 | continue; | |
678 | } | |
679 | ||
680 | if (isspace(*end)) | |
681 | break; /* end of token */ | |
682 | ||
683 | *out++ = *end++; | |
684 | } | |
685 | ||
686 | /* have we already filled the array ? */ | |
687 | if ((*argc + 1) > array_size) { | |
688 | argv = realloc_argv(&array_size, argv); | |
689 | if (!argv) | |
690 | return -ENOMEM; | |
691 | } | |
692 | ||
693 | /* we know this is whitespace */ | |
694 | if (*end) | |
695 | end++; | |
696 | ||
697 | /* terminate the string and put it in the array */ | |
698 | *out = '\0'; | |
699 | argv[*argc] = start; | |
700 | (*argc)++; | |
701 | } | |
702 | ||
703 | *argvp = argv; | |
704 | return 0; | |
705 | } | |
706 | ||
707 | static void check_for_valid_limits(struct io_restrictions *rs) | |
708 | { | |
709 | if (!rs->max_sectors) | |
defd94b7 | 710 | rs->max_sectors = SAFE_MAX_SECTORS; |
91212507 NB |
711 | if (!rs->max_hw_sectors) |
712 | rs->max_hw_sectors = SAFE_MAX_SECTORS; | |
1da177e4 LT |
713 | if (!rs->max_phys_segments) |
714 | rs->max_phys_segments = MAX_PHYS_SEGMENTS; | |
715 | if (!rs->max_hw_segments) | |
716 | rs->max_hw_segments = MAX_HW_SEGMENTS; | |
717 | if (!rs->hardsect_size) | |
718 | rs->hardsect_size = 1 << SECTOR_SHIFT; | |
719 | if (!rs->max_segment_size) | |
720 | rs->max_segment_size = MAX_SEGMENT_SIZE; | |
721 | if (!rs->seg_boundary_mask) | |
722 | rs->seg_boundary_mask = -1; | |
5ec140e6 VA |
723 | if (!rs->bounce_pfn) |
724 | rs->bounce_pfn = -1; | |
1da177e4 LT |
725 | } |
726 | ||
727 | int dm_table_add_target(struct dm_table *t, const char *type, | |
728 | sector_t start, sector_t len, char *params) | |
729 | { | |
730 | int r = -EINVAL, argc; | |
731 | char **argv; | |
732 | struct dm_target *tgt; | |
733 | ||
734 | if ((r = check_space(t))) | |
735 | return r; | |
736 | ||
737 | tgt = t->targets + t->num_targets; | |
738 | memset(tgt, 0, sizeof(*tgt)); | |
739 | ||
740 | if (!len) { | |
72d94861 | 741 | DMERR("%s: zero-length target", dm_device_name(t->md)); |
1da177e4 LT |
742 | return -EINVAL; |
743 | } | |
744 | ||
745 | tgt->type = dm_get_target_type(type); | |
746 | if (!tgt->type) { | |
72d94861 AK |
747 | DMERR("%s: %s: unknown target type", dm_device_name(t->md), |
748 | type); | |
1da177e4 LT |
749 | return -EINVAL; |
750 | } | |
751 | ||
752 | tgt->table = t; | |
753 | tgt->begin = start; | |
754 | tgt->len = len; | |
755 | tgt->error = "Unknown error"; | |
756 | ||
757 | /* | |
758 | * Does this target adjoin the previous one ? | |
759 | */ | |
760 | if (!adjoin(t, tgt)) { | |
761 | tgt->error = "Gap in table"; | |
762 | r = -EINVAL; | |
763 | goto bad; | |
764 | } | |
765 | ||
766 | r = dm_split_args(&argc, &argv, params); | |
767 | if (r) { | |
768 | tgt->error = "couldn't split parameters (insufficient memory)"; | |
769 | goto bad; | |
770 | } | |
771 | ||
772 | r = tgt->type->ctr(tgt, argc, argv); | |
773 | kfree(argv); | |
774 | if (r) | |
775 | goto bad; | |
776 | ||
777 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; | |
778 | ||
779 | /* FIXME: the plan is to combine high here and then have | |
780 | * the merge fn apply the target level restrictions. */ | |
781 | combine_restrictions_low(&t->limits, &tgt->limits); | |
782 | return 0; | |
783 | ||
784 | bad: | |
72d94861 | 785 | DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); |
1da177e4 LT |
786 | dm_put_target_type(tgt->type); |
787 | return r; | |
788 | } | |
789 | ||
790 | static int setup_indexes(struct dm_table *t) | |
791 | { | |
792 | int i; | |
793 | unsigned int total = 0; | |
794 | sector_t *indexes; | |
795 | ||
796 | /* allocate the space for *all* the indexes */ | |
797 | for (i = t->depth - 2; i >= 0; i--) { | |
798 | t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); | |
799 | total += t->counts[i]; | |
800 | } | |
801 | ||
802 | indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); | |
803 | if (!indexes) | |
804 | return -ENOMEM; | |
805 | ||
806 | /* set up internal nodes, bottom-up */ | |
82d601dc | 807 | for (i = t->depth - 2; i >= 0; i--) { |
1da177e4 LT |
808 | t->index[i] = indexes; |
809 | indexes += (KEYS_PER_NODE * t->counts[i]); | |
810 | setup_btree_index(i, t); | |
811 | } | |
812 | ||
813 | return 0; | |
814 | } | |
815 | ||
816 | /* | |
817 | * Builds the btree to index the map. | |
818 | */ | |
819 | int dm_table_complete(struct dm_table *t) | |
820 | { | |
821 | int r = 0; | |
822 | unsigned int leaf_nodes; | |
823 | ||
824 | check_for_valid_limits(&t->limits); | |
825 | ||
826 | /* how many indexes will the btree have ? */ | |
827 | leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); | |
828 | t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); | |
829 | ||
830 | /* leaf layer has already been set up */ | |
831 | t->counts[t->depth - 1] = leaf_nodes; | |
832 | t->index[t->depth - 1] = t->highs; | |
833 | ||
834 | if (t->depth >= 2) | |
835 | r = setup_indexes(t); | |
836 | ||
837 | return r; | |
838 | } | |
839 | ||
48c9c27b | 840 | static DEFINE_MUTEX(_event_lock); |
1da177e4 LT |
841 | void dm_table_event_callback(struct dm_table *t, |
842 | void (*fn)(void *), void *context) | |
843 | { | |
48c9c27b | 844 | mutex_lock(&_event_lock); |
1da177e4 LT |
845 | t->event_fn = fn; |
846 | t->event_context = context; | |
48c9c27b | 847 | mutex_unlock(&_event_lock); |
1da177e4 LT |
848 | } |
849 | ||
850 | void dm_table_event(struct dm_table *t) | |
851 | { | |
852 | /* | |
853 | * You can no longer call dm_table_event() from interrupt | |
854 | * context, use a bottom half instead. | |
855 | */ | |
856 | BUG_ON(in_interrupt()); | |
857 | ||
48c9c27b | 858 | mutex_lock(&_event_lock); |
1da177e4 LT |
859 | if (t->event_fn) |
860 | t->event_fn(t->event_context); | |
48c9c27b | 861 | mutex_unlock(&_event_lock); |
1da177e4 LT |
862 | } |
863 | ||
864 | sector_t dm_table_get_size(struct dm_table *t) | |
865 | { | |
866 | return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; | |
867 | } | |
868 | ||
869 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) | |
870 | { | |
14353539 | 871 | if (index >= t->num_targets) |
1da177e4 LT |
872 | return NULL; |
873 | ||
874 | return t->targets + index; | |
875 | } | |
876 | ||
877 | /* | |
878 | * Search the btree for the correct target. | |
512875bd JN |
879 | * |
880 | * Caller should check returned pointer with dm_target_is_valid() | |
881 | * to trap I/O beyond end of device. | |
1da177e4 LT |
882 | */ |
883 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) | |
884 | { | |
885 | unsigned int l, n = 0, k = 0; | |
886 | sector_t *node; | |
887 | ||
888 | for (l = 0; l < t->depth; l++) { | |
889 | n = get_child(n, k); | |
890 | node = get_node(t, l, n); | |
891 | ||
892 | for (k = 0; k < KEYS_PER_NODE; k++) | |
893 | if (node[k] >= sector) | |
894 | break; | |
895 | } | |
896 | ||
897 | return &t->targets[(KEYS_PER_NODE * n) + k]; | |
898 | } | |
899 | ||
900 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) | |
901 | { | |
902 | /* | |
903 | * Make sure we obey the optimistic sub devices | |
904 | * restrictions. | |
905 | */ | |
906 | blk_queue_max_sectors(q, t->limits.max_sectors); | |
907 | q->max_phys_segments = t->limits.max_phys_segments; | |
908 | q->max_hw_segments = t->limits.max_hw_segments; | |
909 | q->hardsect_size = t->limits.hardsect_size; | |
910 | q->max_segment_size = t->limits.max_segment_size; | |
91212507 | 911 | q->max_hw_sectors = t->limits.max_hw_sectors; |
1da177e4 | 912 | q->seg_boundary_mask = t->limits.seg_boundary_mask; |
5ec140e6 | 913 | q->bounce_pfn = t->limits.bounce_pfn; |
969429b5 N |
914 | if (t->limits.no_cluster) |
915 | q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER); | |
916 | else | |
917 | q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER); | |
918 | ||
1da177e4 LT |
919 | } |
920 | ||
921 | unsigned int dm_table_get_num_targets(struct dm_table *t) | |
922 | { | |
923 | return t->num_targets; | |
924 | } | |
925 | ||
926 | struct list_head *dm_table_get_devices(struct dm_table *t) | |
927 | { | |
928 | return &t->devices; | |
929 | } | |
930 | ||
931 | int dm_table_get_mode(struct dm_table *t) | |
932 | { | |
933 | return t->mode; | |
934 | } | |
935 | ||
936 | static void suspend_targets(struct dm_table *t, unsigned postsuspend) | |
937 | { | |
938 | int i = t->num_targets; | |
939 | struct dm_target *ti = t->targets; | |
940 | ||
941 | while (i--) { | |
942 | if (postsuspend) { | |
943 | if (ti->type->postsuspend) | |
944 | ti->type->postsuspend(ti); | |
945 | } else if (ti->type->presuspend) | |
946 | ti->type->presuspend(ti); | |
947 | ||
948 | ti++; | |
949 | } | |
950 | } | |
951 | ||
952 | void dm_table_presuspend_targets(struct dm_table *t) | |
953 | { | |
cf222b37 AK |
954 | if (!t) |
955 | return; | |
956 | ||
1da177e4 LT |
957 | return suspend_targets(t, 0); |
958 | } | |
959 | ||
960 | void dm_table_postsuspend_targets(struct dm_table *t) | |
961 | { | |
cf222b37 AK |
962 | if (!t) |
963 | return; | |
964 | ||
1da177e4 LT |
965 | return suspend_targets(t, 1); |
966 | } | |
967 | ||
8757b776 | 968 | int dm_table_resume_targets(struct dm_table *t) |
1da177e4 | 969 | { |
8757b776 MB |
970 | int i, r = 0; |
971 | ||
972 | for (i = 0; i < t->num_targets; i++) { | |
973 | struct dm_target *ti = t->targets + i; | |
974 | ||
975 | if (!ti->type->preresume) | |
976 | continue; | |
977 | ||
978 | r = ti->type->preresume(ti); | |
979 | if (r) | |
980 | return r; | |
981 | } | |
1da177e4 LT |
982 | |
983 | for (i = 0; i < t->num_targets; i++) { | |
984 | struct dm_target *ti = t->targets + i; | |
985 | ||
986 | if (ti->type->resume) | |
987 | ti->type->resume(ti); | |
988 | } | |
8757b776 MB |
989 | |
990 | return 0; | |
1da177e4 LT |
991 | } |
992 | ||
993 | int dm_table_any_congested(struct dm_table *t, int bdi_bits) | |
994 | { | |
afb24528 PJ |
995 | struct dm_dev *dd; |
996 | struct list_head *devices = dm_table_get_devices(t); | |
1da177e4 LT |
997 | int r = 0; |
998 | ||
afb24528 | 999 | list_for_each_entry(dd, devices, list) { |
165125e1 | 1000 | struct request_queue *q = bdev_get_queue(dd->bdev); |
1da177e4 LT |
1001 | r |= bdi_congested(&q->backing_dev_info, bdi_bits); |
1002 | } | |
1003 | ||
1004 | return r; | |
1005 | } | |
1006 | ||
1007 | void dm_table_unplug_all(struct dm_table *t) | |
1008 | { | |
afb24528 PJ |
1009 | struct dm_dev *dd; |
1010 | struct list_head *devices = dm_table_get_devices(t); | |
1da177e4 | 1011 | |
afb24528 | 1012 | list_for_each_entry(dd, devices, list) { |
165125e1 | 1013 | struct request_queue *q = bdev_get_queue(dd->bdev); |
1da177e4 | 1014 | |
2ad8b1ef | 1015 | blk_unplug(q); |
1da177e4 LT |
1016 | } |
1017 | } | |
1018 | ||
1134e5ae MA |
1019 | struct mapped_device *dm_table_get_md(struct dm_table *t) |
1020 | { | |
1021 | dm_get(t->md); | |
1022 | ||
1023 | return t->md; | |
1024 | } | |
1025 | ||
1da177e4 LT |
1026 | EXPORT_SYMBOL(dm_vcalloc); |
1027 | EXPORT_SYMBOL(dm_get_device); | |
1028 | EXPORT_SYMBOL(dm_put_device); | |
1029 | EXPORT_SYMBOL(dm_table_event); | |
d5e404c1 | 1030 | EXPORT_SYMBOL(dm_table_get_size); |
1da177e4 | 1031 | EXPORT_SYMBOL(dm_table_get_mode); |
1134e5ae | 1032 | EXPORT_SYMBOL(dm_table_get_md); |
1da177e4 LT |
1033 | EXPORT_SYMBOL(dm_table_put); |
1034 | EXPORT_SYMBOL(dm_table_get); | |
1035 | EXPORT_SYMBOL(dm_table_unplug_all); |