]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2001 Sistina Software (UK) Limited. | |
d5816876 | 3 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
1da177e4 LT |
4 | * |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include "dm.h" | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/vmalloc.h> | |
12 | #include <linux/blkdev.h> | |
13 | #include <linux/namei.h> | |
14 | #include <linux/ctype.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/interrupt.h> | |
48c9c27b | 17 | #include <linux/mutex.h> |
d5816876 | 18 | #include <linux/delay.h> |
1da177e4 LT |
19 | #include <asm/atomic.h> |
20 | ||
72d94861 AK |
21 | #define DM_MSG_PREFIX "table" |
22 | ||
1da177e4 LT |
23 | #define MAX_DEPTH 16 |
24 | #define NODE_SIZE L1_CACHE_BYTES | |
25 | #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) | |
26 | #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) | |
27 | ||
d5816876 MP |
28 | /* |
29 | * The table has always exactly one reference from either mapped_device->map | |
30 | * or hash_cell->new_map. This reference is not counted in table->holders. | |
31 | * A pair of dm_create_table/dm_destroy_table functions is used for table | |
32 | * creation/destruction. | |
33 | * | |
34 | * Temporary references from the other code increase table->holders. A pair | |
35 | * of dm_table_get/dm_table_put functions is used to manipulate it. | |
36 | * | |
37 | * When the table is about to be destroyed, we wait for table->holders to | |
38 | * drop to zero. | |
39 | */ | |
40 | ||
1da177e4 | 41 | struct dm_table { |
1134e5ae | 42 | struct mapped_device *md; |
1da177e4 LT |
43 | atomic_t holders; |
44 | ||
45 | /* btree table */ | |
46 | unsigned int depth; | |
47 | unsigned int counts[MAX_DEPTH]; /* in nodes */ | |
48 | sector_t *index[MAX_DEPTH]; | |
49 | ||
50 | unsigned int num_targets; | |
51 | unsigned int num_allocated; | |
52 | sector_t *highs; | |
53 | struct dm_target *targets; | |
54 | ||
55 | /* | |
56 | * Indicates the rw permissions for the new logical | |
57 | * device. This should be a combination of FMODE_READ | |
58 | * and FMODE_WRITE. | |
59 | */ | |
aeb5d727 | 60 | fmode_t mode; |
1da177e4 LT |
61 | |
62 | /* a list of devices used by this table */ | |
63 | struct list_head devices; | |
64 | ||
65 | /* | |
66 | * These are optimistic limits taken from all the | |
67 | * targets, some targets will need smaller limits. | |
68 | */ | |
69 | struct io_restrictions limits; | |
70 | ||
71 | /* events get handed up using this callback */ | |
72 | void (*event_fn)(void *); | |
73 | void *event_context; | |
74 | }; | |
75 | ||
76 | /* | |
77 | * Similar to ceiling(log_size(n)) | |
78 | */ | |
79 | static unsigned int int_log(unsigned int n, unsigned int base) | |
80 | { | |
81 | int result = 0; | |
82 | ||
83 | while (n > 1) { | |
84 | n = dm_div_up(n, base); | |
85 | result++; | |
86 | } | |
87 | ||
88 | return result; | |
89 | } | |
90 | ||
91 | /* | |
92 | * Returns the minimum that is _not_ zero, unless both are zero. | |
93 | */ | |
94 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | |
95 | ||
96 | /* | |
97 | * Combine two io_restrictions, always taking the lower value. | |
98 | */ | |
99 | static void combine_restrictions_low(struct io_restrictions *lhs, | |
100 | struct io_restrictions *rhs) | |
101 | { | |
102 | lhs->max_sectors = | |
103 | min_not_zero(lhs->max_sectors, rhs->max_sectors); | |
104 | ||
105 | lhs->max_phys_segments = | |
106 | min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments); | |
107 | ||
108 | lhs->max_hw_segments = | |
109 | min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); | |
110 | ||
111 | lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); | |
112 | ||
113 | lhs->max_segment_size = | |
114 | min_not_zero(lhs->max_segment_size, rhs->max_segment_size); | |
115 | ||
91212507 NB |
116 | lhs->max_hw_sectors = |
117 | min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors); | |
118 | ||
1da177e4 LT |
119 | lhs->seg_boundary_mask = |
120 | min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); | |
969429b5 | 121 | |
5ec140e6 VA |
122 | lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn); |
123 | ||
969429b5 | 124 | lhs->no_cluster |= rhs->no_cluster; |
1da177e4 LT |
125 | } |
126 | ||
127 | /* | |
128 | * Calculate the index of the child node of the n'th node k'th key. | |
129 | */ | |
130 | static inline unsigned int get_child(unsigned int n, unsigned int k) | |
131 | { | |
132 | return (n * CHILDREN_PER_NODE) + k; | |
133 | } | |
134 | ||
135 | /* | |
136 | * Return the n'th node of level l from table t. | |
137 | */ | |
138 | static inline sector_t *get_node(struct dm_table *t, | |
139 | unsigned int l, unsigned int n) | |
140 | { | |
141 | return t->index[l] + (n * KEYS_PER_NODE); | |
142 | } | |
143 | ||
144 | /* | |
145 | * Return the highest key that you could lookup from the n'th | |
146 | * node on level l of the btree. | |
147 | */ | |
148 | static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) | |
149 | { | |
150 | for (; l < t->depth - 1; l++) | |
151 | n = get_child(n, CHILDREN_PER_NODE - 1); | |
152 | ||
153 | if (n >= t->counts[l]) | |
154 | return (sector_t) - 1; | |
155 | ||
156 | return get_node(t, l, n)[KEYS_PER_NODE - 1]; | |
157 | } | |
158 | ||
159 | /* | |
160 | * Fills in a level of the btree based on the highs of the level | |
161 | * below it. | |
162 | */ | |
163 | static int setup_btree_index(unsigned int l, struct dm_table *t) | |
164 | { | |
165 | unsigned int n, k; | |
166 | sector_t *node; | |
167 | ||
168 | for (n = 0U; n < t->counts[l]; n++) { | |
169 | node = get_node(t, l, n); | |
170 | ||
171 | for (k = 0U; k < KEYS_PER_NODE; k++) | |
172 | node[k] = high(t, l + 1, get_child(n, k)); | |
173 | } | |
174 | ||
175 | return 0; | |
176 | } | |
177 | ||
178 | void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) | |
179 | { | |
180 | unsigned long size; | |
181 | void *addr; | |
182 | ||
183 | /* | |
184 | * Check that we're not going to overflow. | |
185 | */ | |
186 | if (nmemb > (ULONG_MAX / elem_size)) | |
187 | return NULL; | |
188 | ||
189 | size = nmemb * elem_size; | |
190 | addr = vmalloc(size); | |
191 | if (addr) | |
192 | memset(addr, 0, size); | |
193 | ||
194 | return addr; | |
195 | } | |
196 | ||
197 | /* | |
198 | * highs, and targets are managed as dynamic arrays during a | |
199 | * table load. | |
200 | */ | |
201 | static int alloc_targets(struct dm_table *t, unsigned int num) | |
202 | { | |
203 | sector_t *n_highs; | |
204 | struct dm_target *n_targets; | |
205 | int n = t->num_targets; | |
206 | ||
207 | /* | |
208 | * Allocate both the target array and offset array at once. | |
512875bd JN |
209 | * Append an empty entry to catch sectors beyond the end of |
210 | * the device. | |
1da177e4 | 211 | */ |
512875bd | 212 | n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + |
1da177e4 LT |
213 | sizeof(sector_t)); |
214 | if (!n_highs) | |
215 | return -ENOMEM; | |
216 | ||
217 | n_targets = (struct dm_target *) (n_highs + num); | |
218 | ||
219 | if (n) { | |
220 | memcpy(n_highs, t->highs, sizeof(*n_highs) * n); | |
221 | memcpy(n_targets, t->targets, sizeof(*n_targets) * n); | |
222 | } | |
223 | ||
224 | memset(n_highs + n, -1, sizeof(*n_highs) * (num - n)); | |
225 | vfree(t->highs); | |
226 | ||
227 | t->num_allocated = num; | |
228 | t->highs = n_highs; | |
229 | t->targets = n_targets; | |
230 | ||
231 | return 0; | |
232 | } | |
233 | ||
aeb5d727 | 234 | int dm_table_create(struct dm_table **result, fmode_t mode, |
1134e5ae | 235 | unsigned num_targets, struct mapped_device *md) |
1da177e4 | 236 | { |
094262db | 237 | struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); |
1da177e4 LT |
238 | |
239 | if (!t) | |
240 | return -ENOMEM; | |
241 | ||
1da177e4 | 242 | INIT_LIST_HEAD(&t->devices); |
d5816876 | 243 | atomic_set(&t->holders, 0); |
1da177e4 LT |
244 | |
245 | if (!num_targets) | |
246 | num_targets = KEYS_PER_NODE; | |
247 | ||
248 | num_targets = dm_round_up(num_targets, KEYS_PER_NODE); | |
249 | ||
250 | if (alloc_targets(t, num_targets)) { | |
251 | kfree(t); | |
252 | t = NULL; | |
253 | return -ENOMEM; | |
254 | } | |
255 | ||
256 | t->mode = mode; | |
1134e5ae | 257 | t->md = md; |
1da177e4 LT |
258 | *result = t; |
259 | return 0; | |
260 | } | |
261 | ||
262 | static void free_devices(struct list_head *devices) | |
263 | { | |
264 | struct list_head *tmp, *next; | |
265 | ||
afb24528 | 266 | list_for_each_safe(tmp, next, devices) { |
82b1519b MP |
267 | struct dm_dev_internal *dd = |
268 | list_entry(tmp, struct dm_dev_internal, list); | |
1da177e4 LT |
269 | kfree(dd); |
270 | } | |
271 | } | |
272 | ||
d5816876 | 273 | void dm_table_destroy(struct dm_table *t) |
1da177e4 LT |
274 | { |
275 | unsigned int i; | |
276 | ||
d5816876 MP |
277 | while (atomic_read(&t->holders)) |
278 | msleep(1); | |
279 | smp_mb(); | |
280 | ||
1da177e4 LT |
281 | /* free the indexes (see dm_table_complete) */ |
282 | if (t->depth >= 2) | |
283 | vfree(t->index[t->depth - 2]); | |
284 | ||
285 | /* free the targets */ | |
286 | for (i = 0; i < t->num_targets; i++) { | |
287 | struct dm_target *tgt = t->targets + i; | |
288 | ||
289 | if (tgt->type->dtr) | |
290 | tgt->type->dtr(tgt); | |
291 | ||
292 | dm_put_target_type(tgt->type); | |
293 | } | |
294 | ||
295 | vfree(t->highs); | |
296 | ||
297 | /* free the device list */ | |
298 | if (t->devices.next != &t->devices) { | |
299 | DMWARN("devices still present during destroy: " | |
300 | "dm_table_remove_device calls missing"); | |
301 | ||
302 | free_devices(&t->devices); | |
303 | } | |
304 | ||
305 | kfree(t); | |
306 | } | |
307 | ||
308 | void dm_table_get(struct dm_table *t) | |
309 | { | |
310 | atomic_inc(&t->holders); | |
311 | } | |
312 | ||
313 | void dm_table_put(struct dm_table *t) | |
314 | { | |
315 | if (!t) | |
316 | return; | |
317 | ||
d5816876 MP |
318 | smp_mb__before_atomic_dec(); |
319 | atomic_dec(&t->holders); | |
1da177e4 LT |
320 | } |
321 | ||
322 | /* | |
323 | * Checks to see if we need to extend highs or targets. | |
324 | */ | |
325 | static inline int check_space(struct dm_table *t) | |
326 | { | |
327 | if (t->num_targets >= t->num_allocated) | |
328 | return alloc_targets(t, t->num_allocated * 2); | |
329 | ||
330 | return 0; | |
331 | } | |
332 | ||
1da177e4 LT |
333 | /* |
334 | * See if we've already got a device in the list. | |
335 | */ | |
82b1519b | 336 | static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) |
1da177e4 | 337 | { |
82b1519b | 338 | struct dm_dev_internal *dd; |
1da177e4 LT |
339 | |
340 | list_for_each_entry (dd, l, list) | |
82b1519b | 341 | if (dd->dm_dev.bdev->bd_dev == dev) |
1da177e4 LT |
342 | return dd; |
343 | ||
344 | return NULL; | |
345 | } | |
346 | ||
347 | /* | |
348 | * Open a device so we can use it as a map destination. | |
349 | */ | |
82b1519b MP |
350 | static int open_dev(struct dm_dev_internal *d, dev_t dev, |
351 | struct mapped_device *md) | |
1da177e4 LT |
352 | { |
353 | static char *_claim_ptr = "I belong to device-mapper"; | |
354 | struct block_device *bdev; | |
355 | ||
356 | int r; | |
357 | ||
82b1519b | 358 | BUG_ON(d->dm_dev.bdev); |
1da177e4 | 359 | |
82b1519b | 360 | bdev = open_by_devnum(dev, d->dm_dev.mode); |
1da177e4 LT |
361 | if (IS_ERR(bdev)) |
362 | return PTR_ERR(bdev); | |
f165921d | 363 | r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); |
1da177e4 | 364 | if (r) |
9a1c3542 | 365 | blkdev_put(bdev, d->dm_dev.mode); |
1da177e4 | 366 | else |
82b1519b | 367 | d->dm_dev.bdev = bdev; |
1da177e4 LT |
368 | return r; |
369 | } | |
370 | ||
371 | /* | |
372 | * Close a device that we've been using. | |
373 | */ | |
82b1519b | 374 | static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) |
1da177e4 | 375 | { |
82b1519b | 376 | if (!d->dm_dev.bdev) |
1da177e4 LT |
377 | return; |
378 | ||
82b1519b | 379 | bd_release_from_disk(d->dm_dev.bdev, dm_disk(md)); |
9a1c3542 | 380 | blkdev_put(d->dm_dev.bdev, d->dm_dev.mode); |
82b1519b | 381 | d->dm_dev.bdev = NULL; |
1da177e4 LT |
382 | } |
383 | ||
384 | /* | |
2cd54d9b | 385 | * If possible, this checks an area of a destination device is valid. |
1da177e4 | 386 | */ |
82b1519b MP |
387 | static int check_device_area(struct dm_dev_internal *dd, sector_t start, |
388 | sector_t len) | |
1da177e4 | 389 | { |
82b1519b | 390 | sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT; |
2cd54d9b MA |
391 | |
392 | if (!dev_size) | |
393 | return 1; | |
394 | ||
1da177e4 LT |
395 | return ((start < dev_size) && (len <= (dev_size - start))); |
396 | } | |
397 | ||
398 | /* | |
570b9d96 | 399 | * This upgrades the mode on an already open dm_dev, being |
1da177e4 | 400 | * careful to leave things as they were if we fail to reopen the |
570b9d96 AK |
401 | * device and not to touch the existing bdev field in case |
402 | * it is accessed concurrently inside dm_table_any_congested(). | |
1da177e4 | 403 | */ |
aeb5d727 | 404 | static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, |
82b1519b | 405 | struct mapped_device *md) |
1da177e4 LT |
406 | { |
407 | int r; | |
570b9d96 | 408 | struct dm_dev_internal dd_new, dd_old; |
1da177e4 | 409 | |
570b9d96 AK |
410 | dd_new = dd_old = *dd; |
411 | ||
412 | dd_new.dm_dev.mode |= new_mode; | |
413 | dd_new.dm_dev.bdev = NULL; | |
414 | ||
415 | r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md); | |
416 | if (r) | |
417 | return r; | |
1da177e4 | 418 | |
82b1519b | 419 | dd->dm_dev.mode |= new_mode; |
570b9d96 | 420 | close_dev(&dd_old, md); |
1da177e4 | 421 | |
570b9d96 | 422 | return 0; |
1da177e4 LT |
423 | } |
424 | ||
425 | /* | |
426 | * Add a device to the list, or just increment the usage count if | |
427 | * it's already present. | |
428 | */ | |
429 | static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |
430 | const char *path, sector_t start, sector_t len, | |
aeb5d727 | 431 | fmode_t mode, struct dm_dev **result) |
1da177e4 LT |
432 | { |
433 | int r; | |
69a2ce72 | 434 | dev_t uninitialized_var(dev); |
82b1519b | 435 | struct dm_dev_internal *dd; |
1da177e4 LT |
436 | unsigned int major, minor; |
437 | ||
547bc926 | 438 | BUG_ON(!t); |
1da177e4 LT |
439 | |
440 | if (sscanf(path, "%u:%u", &major, &minor) == 2) { | |
441 | /* Extract the major/minor numbers */ | |
442 | dev = MKDEV(major, minor); | |
443 | if (MAJOR(dev) != major || MINOR(dev) != minor) | |
444 | return -EOVERFLOW; | |
445 | } else { | |
446 | /* convert the path to a device */ | |
72e8264e CH |
447 | struct block_device *bdev = lookup_bdev(path); |
448 | ||
449 | if (IS_ERR(bdev)) | |
450 | return PTR_ERR(bdev); | |
451 | dev = bdev->bd_dev; | |
452 | bdput(bdev); | |
1da177e4 LT |
453 | } |
454 | ||
455 | dd = find_device(&t->devices, dev); | |
456 | if (!dd) { | |
457 | dd = kmalloc(sizeof(*dd), GFP_KERNEL); | |
458 | if (!dd) | |
459 | return -ENOMEM; | |
460 | ||
82b1519b MP |
461 | dd->dm_dev.mode = mode; |
462 | dd->dm_dev.bdev = NULL; | |
1da177e4 | 463 | |
f165921d | 464 | if ((r = open_dev(dd, dev, t->md))) { |
1da177e4 LT |
465 | kfree(dd); |
466 | return r; | |
467 | } | |
468 | ||
82b1519b | 469 | format_dev_t(dd->dm_dev.name, dev); |
1da177e4 LT |
470 | |
471 | atomic_set(&dd->count, 0); | |
472 | list_add(&dd->list, &t->devices); | |
473 | ||
82b1519b | 474 | } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) { |
f165921d | 475 | r = upgrade_mode(dd, mode, t->md); |
1da177e4 LT |
476 | if (r) |
477 | return r; | |
478 | } | |
479 | atomic_inc(&dd->count); | |
480 | ||
481 | if (!check_device_area(dd, start, len)) { | |
482 | DMWARN("device %s too small for target", path); | |
82b1519b | 483 | dm_put_device(ti, &dd->dm_dev); |
1da177e4 LT |
484 | return -EINVAL; |
485 | } | |
486 | ||
82b1519b | 487 | *result = &dd->dm_dev; |
1da177e4 LT |
488 | |
489 | return 0; | |
490 | } | |
491 | ||
3cb40214 | 492 | void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) |
1da177e4 | 493 | { |
165125e1 | 494 | struct request_queue *q = bdev_get_queue(bdev); |
3cb40214 | 495 | struct io_restrictions *rs = &ti->limits; |
0c2322e4 AK |
496 | char b[BDEVNAME_SIZE]; |
497 | ||
498 | if (unlikely(!q)) { | |
499 | DMWARN("%s: Cannot set limits for nonexistent device %s", | |
500 | dm_device_name(ti->table->md), bdevname(bdev, b)); | |
501 | return; | |
502 | } | |
3cb40214 BR |
503 | |
504 | /* | |
505 | * Combine the device limits low. | |
506 | * | |
507 | * FIXME: if we move an io_restriction struct | |
508 | * into q this would just be a call to | |
509 | * combine_restrictions_low() | |
510 | */ | |
511 | rs->max_sectors = | |
512 | min_not_zero(rs->max_sectors, q->max_sectors); | |
513 | ||
9980c638 MB |
514 | /* |
515 | * Check if merge fn is supported. | |
516 | * If not we'll force DM to use PAGE_SIZE or | |
517 | * smaller I/O, just to be safe. | |
3cb40214 | 518 | */ |
9980c638 MB |
519 | |
520 | if (q->merge_bvec_fn && !ti->type->merge) | |
1da177e4 | 521 | rs->max_sectors = |
3cb40214 BR |
522 | min_not_zero(rs->max_sectors, |
523 | (unsigned int) (PAGE_SIZE >> 9)); | |
1da177e4 | 524 | |
3cb40214 BR |
525 | rs->max_phys_segments = |
526 | min_not_zero(rs->max_phys_segments, | |
527 | q->max_phys_segments); | |
1da177e4 | 528 | |
3cb40214 BR |
529 | rs->max_hw_segments = |
530 | min_not_zero(rs->max_hw_segments, q->max_hw_segments); | |
1da177e4 | 531 | |
3cb40214 | 532 | rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); |
1da177e4 | 533 | |
3cb40214 BR |
534 | rs->max_segment_size = |
535 | min_not_zero(rs->max_segment_size, q->max_segment_size); | |
1da177e4 | 536 | |
91212507 NB |
537 | rs->max_hw_sectors = |
538 | min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); | |
539 | ||
3cb40214 BR |
540 | rs->seg_boundary_mask = |
541 | min_not_zero(rs->seg_boundary_mask, | |
542 | q->seg_boundary_mask); | |
1da177e4 | 543 | |
5ec140e6 VA |
544 | rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); |
545 | ||
3cb40214 BR |
546 | rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
547 | } | |
548 | EXPORT_SYMBOL_GPL(dm_set_device_limits); | |
969429b5 | 549 | |
3cb40214 | 550 | int dm_get_device(struct dm_target *ti, const char *path, sector_t start, |
aeb5d727 | 551 | sector_t len, fmode_t mode, struct dm_dev **result) |
3cb40214 BR |
552 | { |
553 | int r = __table_get_device(ti->table, ti, path, | |
554 | start, len, mode, result); | |
555 | ||
556 | if (!r) | |
557 | dm_set_device_limits(ti, (*result)->bdev); | |
1da177e4 LT |
558 | |
559 | return r; | |
560 | } | |
561 | ||
562 | /* | |
563 | * Decrement a devices use count and remove it if necessary. | |
564 | */ | |
82b1519b | 565 | void dm_put_device(struct dm_target *ti, struct dm_dev *d) |
1da177e4 | 566 | { |
82b1519b MP |
567 | struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal, |
568 | dm_dev); | |
569 | ||
1da177e4 | 570 | if (atomic_dec_and_test(&dd->count)) { |
f165921d | 571 | close_dev(dd, ti->table->md); |
1da177e4 LT |
572 | list_del(&dd->list); |
573 | kfree(dd); | |
574 | } | |
575 | } | |
576 | ||
577 | /* | |
578 | * Checks to see if the target joins onto the end of the table. | |
579 | */ | |
580 | static int adjoin(struct dm_table *table, struct dm_target *ti) | |
581 | { | |
582 | struct dm_target *prev; | |
583 | ||
584 | if (!table->num_targets) | |
585 | return !ti->begin; | |
586 | ||
587 | prev = &table->targets[table->num_targets - 1]; | |
588 | return (ti->begin == (prev->begin + prev->len)); | |
589 | } | |
590 | ||
591 | /* | |
592 | * Used to dynamically allocate the arg array. | |
593 | */ | |
594 | static char **realloc_argv(unsigned *array_size, char **old_argv) | |
595 | { | |
596 | char **argv; | |
597 | unsigned new_size; | |
598 | ||
599 | new_size = *array_size ? *array_size * 2 : 64; | |
600 | argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL); | |
601 | if (argv) { | |
602 | memcpy(argv, old_argv, *array_size * sizeof(*argv)); | |
603 | *array_size = new_size; | |
604 | } | |
605 | ||
606 | kfree(old_argv); | |
607 | return argv; | |
608 | } | |
609 | ||
610 | /* | |
611 | * Destructively splits up the argument list to pass to ctr. | |
612 | */ | |
613 | int dm_split_args(int *argc, char ***argvp, char *input) | |
614 | { | |
615 | char *start, *end = input, *out, **argv = NULL; | |
616 | unsigned array_size = 0; | |
617 | ||
618 | *argc = 0; | |
814d6862 DT |
619 | |
620 | if (!input) { | |
621 | *argvp = NULL; | |
622 | return 0; | |
623 | } | |
624 | ||
1da177e4 LT |
625 | argv = realloc_argv(&array_size, argv); |
626 | if (!argv) | |
627 | return -ENOMEM; | |
628 | ||
629 | while (1) { | |
630 | start = end; | |
631 | ||
632 | /* Skip whitespace */ | |
633 | while (*start && isspace(*start)) | |
634 | start++; | |
635 | ||
636 | if (!*start) | |
637 | break; /* success, we hit the end */ | |
638 | ||
639 | /* 'out' is used to remove any back-quotes */ | |
640 | end = out = start; | |
641 | while (*end) { | |
642 | /* Everything apart from '\0' can be quoted */ | |
643 | if (*end == '\\' && *(end + 1)) { | |
644 | *out++ = *(end + 1); | |
645 | end += 2; | |
646 | continue; | |
647 | } | |
648 | ||
649 | if (isspace(*end)) | |
650 | break; /* end of token */ | |
651 | ||
652 | *out++ = *end++; | |
653 | } | |
654 | ||
655 | /* have we already filled the array ? */ | |
656 | if ((*argc + 1) > array_size) { | |
657 | argv = realloc_argv(&array_size, argv); | |
658 | if (!argv) | |
659 | return -ENOMEM; | |
660 | } | |
661 | ||
662 | /* we know this is whitespace */ | |
663 | if (*end) | |
664 | end++; | |
665 | ||
666 | /* terminate the string and put it in the array */ | |
667 | *out = '\0'; | |
668 | argv[*argc] = start; | |
669 | (*argc)++; | |
670 | } | |
671 | ||
672 | *argvp = argv; | |
673 | return 0; | |
674 | } | |
675 | ||
676 | static void check_for_valid_limits(struct io_restrictions *rs) | |
677 | { | |
678 | if (!rs->max_sectors) | |
defd94b7 | 679 | rs->max_sectors = SAFE_MAX_SECTORS; |
91212507 NB |
680 | if (!rs->max_hw_sectors) |
681 | rs->max_hw_sectors = SAFE_MAX_SECTORS; | |
1da177e4 LT |
682 | if (!rs->max_phys_segments) |
683 | rs->max_phys_segments = MAX_PHYS_SEGMENTS; | |
684 | if (!rs->max_hw_segments) | |
685 | rs->max_hw_segments = MAX_HW_SEGMENTS; | |
686 | if (!rs->hardsect_size) | |
687 | rs->hardsect_size = 1 << SECTOR_SHIFT; | |
688 | if (!rs->max_segment_size) | |
689 | rs->max_segment_size = MAX_SEGMENT_SIZE; | |
690 | if (!rs->seg_boundary_mask) | |
0e435ac2 | 691 | rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
5ec140e6 VA |
692 | if (!rs->bounce_pfn) |
693 | rs->bounce_pfn = -1; | |
1da177e4 LT |
694 | } |
695 | ||
696 | int dm_table_add_target(struct dm_table *t, const char *type, | |
697 | sector_t start, sector_t len, char *params) | |
698 | { | |
699 | int r = -EINVAL, argc; | |
700 | char **argv; | |
701 | struct dm_target *tgt; | |
702 | ||
703 | if ((r = check_space(t))) | |
704 | return r; | |
705 | ||
706 | tgt = t->targets + t->num_targets; | |
707 | memset(tgt, 0, sizeof(*tgt)); | |
708 | ||
709 | if (!len) { | |
72d94861 | 710 | DMERR("%s: zero-length target", dm_device_name(t->md)); |
1da177e4 LT |
711 | return -EINVAL; |
712 | } | |
713 | ||
714 | tgt->type = dm_get_target_type(type); | |
715 | if (!tgt->type) { | |
72d94861 AK |
716 | DMERR("%s: %s: unknown target type", dm_device_name(t->md), |
717 | type); | |
1da177e4 LT |
718 | return -EINVAL; |
719 | } | |
720 | ||
721 | tgt->table = t; | |
722 | tgt->begin = start; | |
723 | tgt->len = len; | |
724 | tgt->error = "Unknown error"; | |
725 | ||
726 | /* | |
727 | * Does this target adjoin the previous one ? | |
728 | */ | |
729 | if (!adjoin(t, tgt)) { | |
730 | tgt->error = "Gap in table"; | |
731 | r = -EINVAL; | |
732 | goto bad; | |
733 | } | |
734 | ||
735 | r = dm_split_args(&argc, &argv, params); | |
736 | if (r) { | |
737 | tgt->error = "couldn't split parameters (insufficient memory)"; | |
738 | goto bad; | |
739 | } | |
740 | ||
741 | r = tgt->type->ctr(tgt, argc, argv); | |
742 | kfree(argv); | |
743 | if (r) | |
744 | goto bad; | |
745 | ||
746 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; | |
747 | ||
748 | /* FIXME: the plan is to combine high here and then have | |
749 | * the merge fn apply the target level restrictions. */ | |
750 | combine_restrictions_low(&t->limits, &tgt->limits); | |
751 | return 0; | |
752 | ||
753 | bad: | |
72d94861 | 754 | DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); |
1da177e4 LT |
755 | dm_put_target_type(tgt->type); |
756 | return r; | |
757 | } | |
758 | ||
759 | static int setup_indexes(struct dm_table *t) | |
760 | { | |
761 | int i; | |
762 | unsigned int total = 0; | |
763 | sector_t *indexes; | |
764 | ||
765 | /* allocate the space for *all* the indexes */ | |
766 | for (i = t->depth - 2; i >= 0; i--) { | |
767 | t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); | |
768 | total += t->counts[i]; | |
769 | } | |
770 | ||
771 | indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); | |
772 | if (!indexes) | |
773 | return -ENOMEM; | |
774 | ||
775 | /* set up internal nodes, bottom-up */ | |
82d601dc | 776 | for (i = t->depth - 2; i >= 0; i--) { |
1da177e4 LT |
777 | t->index[i] = indexes; |
778 | indexes += (KEYS_PER_NODE * t->counts[i]); | |
779 | setup_btree_index(i, t); | |
780 | } | |
781 | ||
782 | return 0; | |
783 | } | |
784 | ||
785 | /* | |
786 | * Builds the btree to index the map. | |
787 | */ | |
788 | int dm_table_complete(struct dm_table *t) | |
789 | { | |
790 | int r = 0; | |
791 | unsigned int leaf_nodes; | |
792 | ||
793 | check_for_valid_limits(&t->limits); | |
794 | ||
795 | /* how many indexes will the btree have ? */ | |
796 | leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); | |
797 | t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); | |
798 | ||
799 | /* leaf layer has already been set up */ | |
800 | t->counts[t->depth - 1] = leaf_nodes; | |
801 | t->index[t->depth - 1] = t->highs; | |
802 | ||
803 | if (t->depth >= 2) | |
804 | r = setup_indexes(t); | |
805 | ||
806 | return r; | |
807 | } | |
808 | ||
48c9c27b | 809 | static DEFINE_MUTEX(_event_lock); |
1da177e4 LT |
810 | void dm_table_event_callback(struct dm_table *t, |
811 | void (*fn)(void *), void *context) | |
812 | { | |
48c9c27b | 813 | mutex_lock(&_event_lock); |
1da177e4 LT |
814 | t->event_fn = fn; |
815 | t->event_context = context; | |
48c9c27b | 816 | mutex_unlock(&_event_lock); |
1da177e4 LT |
817 | } |
818 | ||
819 | void dm_table_event(struct dm_table *t) | |
820 | { | |
821 | /* | |
822 | * You can no longer call dm_table_event() from interrupt | |
823 | * context, use a bottom half instead. | |
824 | */ | |
825 | BUG_ON(in_interrupt()); | |
826 | ||
48c9c27b | 827 | mutex_lock(&_event_lock); |
1da177e4 LT |
828 | if (t->event_fn) |
829 | t->event_fn(t->event_context); | |
48c9c27b | 830 | mutex_unlock(&_event_lock); |
1da177e4 LT |
831 | } |
832 | ||
833 | sector_t dm_table_get_size(struct dm_table *t) | |
834 | { | |
835 | return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; | |
836 | } | |
837 | ||
838 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) | |
839 | { | |
14353539 | 840 | if (index >= t->num_targets) |
1da177e4 LT |
841 | return NULL; |
842 | ||
843 | return t->targets + index; | |
844 | } | |
845 | ||
846 | /* | |
847 | * Search the btree for the correct target. | |
512875bd JN |
848 | * |
849 | * Caller should check returned pointer with dm_target_is_valid() | |
850 | * to trap I/O beyond end of device. | |
1da177e4 LT |
851 | */ |
852 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) | |
853 | { | |
854 | unsigned int l, n = 0, k = 0; | |
855 | sector_t *node; | |
856 | ||
857 | for (l = 0; l < t->depth; l++) { | |
858 | n = get_child(n, k); | |
859 | node = get_node(t, l, n); | |
860 | ||
861 | for (k = 0; k < KEYS_PER_NODE; k++) | |
862 | if (node[k] >= sector) | |
863 | break; | |
864 | } | |
865 | ||
866 | return &t->targets[(KEYS_PER_NODE * n) + k]; | |
867 | } | |
868 | ||
9c47008d MP |
869 | /* |
870 | * Set the integrity profile for this device if all devices used have | |
871 | * matching profiles. | |
872 | */ | |
873 | static void dm_table_set_integrity(struct dm_table *t) | |
874 | { | |
875 | struct list_head *devices = dm_table_get_devices(t); | |
876 | struct dm_dev_internal *prev = NULL, *dd = NULL; | |
877 | ||
878 | if (!blk_get_integrity(dm_disk(t->md))) | |
879 | return; | |
880 | ||
881 | list_for_each_entry(dd, devices, list) { | |
882 | if (prev && | |
883 | blk_integrity_compare(prev->dm_dev.bdev->bd_disk, | |
884 | dd->dm_dev.bdev->bd_disk) < 0) { | |
885 | DMWARN("%s: integrity not set: %s and %s mismatch", | |
886 | dm_device_name(t->md), | |
887 | prev->dm_dev.bdev->bd_disk->disk_name, | |
888 | dd->dm_dev.bdev->bd_disk->disk_name); | |
889 | goto no_integrity; | |
890 | } | |
891 | prev = dd; | |
892 | } | |
893 | ||
894 | if (!prev || !bdev_get_integrity(prev->dm_dev.bdev)) | |
895 | goto no_integrity; | |
896 | ||
897 | blk_integrity_register(dm_disk(t->md), | |
898 | bdev_get_integrity(prev->dm_dev.bdev)); | |
899 | ||
900 | return; | |
901 | ||
902 | no_integrity: | |
903 | blk_integrity_register(dm_disk(t->md), NULL); | |
904 | ||
905 | return; | |
906 | } | |
907 | ||
1da177e4 LT |
908 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) |
909 | { | |
910 | /* | |
911 | * Make sure we obey the optimistic sub devices | |
912 | * restrictions. | |
913 | */ | |
914 | blk_queue_max_sectors(q, t->limits.max_sectors); | |
915 | q->max_phys_segments = t->limits.max_phys_segments; | |
916 | q->max_hw_segments = t->limits.max_hw_segments; | |
917 | q->hardsect_size = t->limits.hardsect_size; | |
918 | q->max_segment_size = t->limits.max_segment_size; | |
91212507 | 919 | q->max_hw_sectors = t->limits.max_hw_sectors; |
1da177e4 | 920 | q->seg_boundary_mask = t->limits.seg_boundary_mask; |
5ec140e6 | 921 | q->bounce_pfn = t->limits.bounce_pfn; |
c9a3f6d6 | 922 | |
969429b5 | 923 | if (t->limits.no_cluster) |
c9a3f6d6 | 924 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); |
969429b5 | 925 | else |
c9a3f6d6 | 926 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); |
969429b5 | 927 | |
9c47008d | 928 | dm_table_set_integrity(t); |
1da177e4 LT |
929 | } |
930 | ||
931 | unsigned int dm_table_get_num_targets(struct dm_table *t) | |
932 | { | |
933 | return t->num_targets; | |
934 | } | |
935 | ||
936 | struct list_head *dm_table_get_devices(struct dm_table *t) | |
937 | { | |
938 | return &t->devices; | |
939 | } | |
940 | ||
aeb5d727 | 941 | fmode_t dm_table_get_mode(struct dm_table *t) |
1da177e4 LT |
942 | { |
943 | return t->mode; | |
944 | } | |
945 | ||
946 | static void suspend_targets(struct dm_table *t, unsigned postsuspend) | |
947 | { | |
948 | int i = t->num_targets; | |
949 | struct dm_target *ti = t->targets; | |
950 | ||
951 | while (i--) { | |
952 | if (postsuspend) { | |
953 | if (ti->type->postsuspend) | |
954 | ti->type->postsuspend(ti); | |
955 | } else if (ti->type->presuspend) | |
956 | ti->type->presuspend(ti); | |
957 | ||
958 | ti++; | |
959 | } | |
960 | } | |
961 | ||
962 | void dm_table_presuspend_targets(struct dm_table *t) | |
963 | { | |
cf222b37 AK |
964 | if (!t) |
965 | return; | |
966 | ||
e8488d08 | 967 | suspend_targets(t, 0); |
1da177e4 LT |
968 | } |
969 | ||
970 | void dm_table_postsuspend_targets(struct dm_table *t) | |
971 | { | |
cf222b37 AK |
972 | if (!t) |
973 | return; | |
974 | ||
e8488d08 | 975 | suspend_targets(t, 1); |
1da177e4 LT |
976 | } |
977 | ||
8757b776 | 978 | int dm_table_resume_targets(struct dm_table *t) |
1da177e4 | 979 | { |
8757b776 MB |
980 | int i, r = 0; |
981 | ||
982 | for (i = 0; i < t->num_targets; i++) { | |
983 | struct dm_target *ti = t->targets + i; | |
984 | ||
985 | if (!ti->type->preresume) | |
986 | continue; | |
987 | ||
988 | r = ti->type->preresume(ti); | |
989 | if (r) | |
990 | return r; | |
991 | } | |
1da177e4 LT |
992 | |
993 | for (i = 0; i < t->num_targets; i++) { | |
994 | struct dm_target *ti = t->targets + i; | |
995 | ||
996 | if (ti->type->resume) | |
997 | ti->type->resume(ti); | |
998 | } | |
8757b776 MB |
999 | |
1000 | return 0; | |
1da177e4 LT |
1001 | } |
1002 | ||
1003 | int dm_table_any_congested(struct dm_table *t, int bdi_bits) | |
1004 | { | |
82b1519b | 1005 | struct dm_dev_internal *dd; |
afb24528 | 1006 | struct list_head *devices = dm_table_get_devices(t); |
1da177e4 LT |
1007 | int r = 0; |
1008 | ||
afb24528 | 1009 | list_for_each_entry(dd, devices, list) { |
82b1519b | 1010 | struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); |
0c2322e4 AK |
1011 | char b[BDEVNAME_SIZE]; |
1012 | ||
1013 | if (likely(q)) | |
1014 | r |= bdi_congested(&q->backing_dev_info, bdi_bits); | |
1015 | else | |
1016 | DMWARN_LIMIT("%s: any_congested: nonexistent device %s", | |
1017 | dm_device_name(t->md), | |
1018 | bdevname(dd->dm_dev.bdev, b)); | |
1da177e4 LT |
1019 | } |
1020 | ||
1021 | return r; | |
1022 | } | |
1023 | ||
1024 | void dm_table_unplug_all(struct dm_table *t) | |
1025 | { | |
82b1519b | 1026 | struct dm_dev_internal *dd; |
afb24528 | 1027 | struct list_head *devices = dm_table_get_devices(t); |
1da177e4 | 1028 | |
afb24528 | 1029 | list_for_each_entry(dd, devices, list) { |
82b1519b | 1030 | struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); |
0c2322e4 AK |
1031 | char b[BDEVNAME_SIZE]; |
1032 | ||
1033 | if (likely(q)) | |
1034 | blk_unplug(q); | |
1035 | else | |
1036 | DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s", | |
1037 | dm_device_name(t->md), | |
1038 | bdevname(dd->dm_dev.bdev, b)); | |
1da177e4 LT |
1039 | } |
1040 | } | |
1041 | ||
1134e5ae MA |
1042 | struct mapped_device *dm_table_get_md(struct dm_table *t) |
1043 | { | |
1044 | dm_get(t->md); | |
1045 | ||
1046 | return t->md; | |
1047 | } | |
1048 | ||
1da177e4 LT |
1049 | EXPORT_SYMBOL(dm_vcalloc); |
1050 | EXPORT_SYMBOL(dm_get_device); | |
1051 | EXPORT_SYMBOL(dm_put_device); | |
1052 | EXPORT_SYMBOL(dm_table_event); | |
d5e404c1 | 1053 | EXPORT_SYMBOL(dm_table_get_size); |
1da177e4 | 1054 | EXPORT_SYMBOL(dm_table_get_mode); |
1134e5ae | 1055 | EXPORT_SYMBOL(dm_table_get_md); |
1da177e4 LT |
1056 | EXPORT_SYMBOL(dm_table_put); |
1057 | EXPORT_SYMBOL(dm_table_get); | |
1058 | EXPORT_SYMBOL(dm_table_unplug_all); |