]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/md/dm-table.c
Merge branch 'async-tx-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop...
[mirror_ubuntu-zesty-kernel.git] / drivers / md / dm-table.c
1 /*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8 #include "dm.h"
9
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/mutex.h>
18 #include <asm/atomic.h>
19
20 #define DM_MSG_PREFIX "table"
21
22 #define MAX_DEPTH 16
23 #define NODE_SIZE L1_CACHE_BYTES
24 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
25 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
26
27 struct dm_table {
28 struct mapped_device *md;
29 atomic_t holders;
30
31 /* btree table */
32 unsigned int depth;
33 unsigned int counts[MAX_DEPTH]; /* in nodes */
34 sector_t *index[MAX_DEPTH];
35
36 unsigned int num_targets;
37 unsigned int num_allocated;
38 sector_t *highs;
39 struct dm_target *targets;
40
41 /*
42 * Indicates the rw permissions for the new logical
43 * device. This should be a combination of FMODE_READ
44 * and FMODE_WRITE.
45 */
46 int mode;
47
48 /* a list of devices used by this table */
49 struct list_head devices;
50
51 /*
52 * These are optimistic limits taken from all the
53 * targets, some targets will need smaller limits.
54 */
55 struct io_restrictions limits;
56
57 /* events get handed up using this callback */
58 void (*event_fn)(void *);
59 void *event_context;
60 };
61
62 /*
63 * Similar to ceiling(log_size(n))
64 */
65 static unsigned int int_log(unsigned int n, unsigned int base)
66 {
67 int result = 0;
68
69 while (n > 1) {
70 n = dm_div_up(n, base);
71 result++;
72 }
73
74 return result;
75 }
76
77 /*
78 * Returns the minimum that is _not_ zero, unless both are zero.
79 */
80 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
81
82 /*
83 * Combine two io_restrictions, always taking the lower value.
84 */
85 static void combine_restrictions_low(struct io_restrictions *lhs,
86 struct io_restrictions *rhs)
87 {
88 lhs->max_sectors =
89 min_not_zero(lhs->max_sectors, rhs->max_sectors);
90
91 lhs->max_phys_segments =
92 min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments);
93
94 lhs->max_hw_segments =
95 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
96
97 lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
98
99 lhs->max_segment_size =
100 min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
101
102 lhs->max_hw_sectors =
103 min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors);
104
105 lhs->seg_boundary_mask =
106 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
107
108 lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
109
110 lhs->no_cluster |= rhs->no_cluster;
111 }
112
113 /*
114 * Calculate the index of the child node of the n'th node k'th key.
115 */
116 static inline unsigned int get_child(unsigned int n, unsigned int k)
117 {
118 return (n * CHILDREN_PER_NODE) + k;
119 }
120
121 /*
122 * Return the n'th node of level l from table t.
123 */
124 static inline sector_t *get_node(struct dm_table *t,
125 unsigned int l, unsigned int n)
126 {
127 return t->index[l] + (n * KEYS_PER_NODE);
128 }
129
130 /*
131 * Return the highest key that you could lookup from the n'th
132 * node on level l of the btree.
133 */
134 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
135 {
136 for (; l < t->depth - 1; l++)
137 n = get_child(n, CHILDREN_PER_NODE - 1);
138
139 if (n >= t->counts[l])
140 return (sector_t) - 1;
141
142 return get_node(t, l, n)[KEYS_PER_NODE - 1];
143 }
144
145 /*
146 * Fills in a level of the btree based on the highs of the level
147 * below it.
148 */
149 static int setup_btree_index(unsigned int l, struct dm_table *t)
150 {
151 unsigned int n, k;
152 sector_t *node;
153
154 for (n = 0U; n < t->counts[l]; n++) {
155 node = get_node(t, l, n);
156
157 for (k = 0U; k < KEYS_PER_NODE; k++)
158 node[k] = high(t, l + 1, get_child(n, k));
159 }
160
161 return 0;
162 }
163
164 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
165 {
166 unsigned long size;
167 void *addr;
168
169 /*
170 * Check that we're not going to overflow.
171 */
172 if (nmemb > (ULONG_MAX / elem_size))
173 return NULL;
174
175 size = nmemb * elem_size;
176 addr = vmalloc(size);
177 if (addr)
178 memset(addr, 0, size);
179
180 return addr;
181 }
182
183 /*
184 * highs, and targets are managed as dynamic arrays during a
185 * table load.
186 */
187 static int alloc_targets(struct dm_table *t, unsigned int num)
188 {
189 sector_t *n_highs;
190 struct dm_target *n_targets;
191 int n = t->num_targets;
192
193 /*
194 * Allocate both the target array and offset array at once.
195 * Append an empty entry to catch sectors beyond the end of
196 * the device.
197 */
198 n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
199 sizeof(sector_t));
200 if (!n_highs)
201 return -ENOMEM;
202
203 n_targets = (struct dm_target *) (n_highs + num);
204
205 if (n) {
206 memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
207 memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
208 }
209
210 memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
211 vfree(t->highs);
212
213 t->num_allocated = num;
214 t->highs = n_highs;
215 t->targets = n_targets;
216
217 return 0;
218 }
219
220 int dm_table_create(struct dm_table **result, int mode,
221 unsigned num_targets, struct mapped_device *md)
222 {
223 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
224
225 if (!t)
226 return -ENOMEM;
227
228 INIT_LIST_HEAD(&t->devices);
229 atomic_set(&t->holders, 1);
230
231 if (!num_targets)
232 num_targets = KEYS_PER_NODE;
233
234 num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
235
236 if (alloc_targets(t, num_targets)) {
237 kfree(t);
238 t = NULL;
239 return -ENOMEM;
240 }
241
242 t->mode = mode;
243 t->md = md;
244 *result = t;
245 return 0;
246 }
247
248 int dm_create_error_table(struct dm_table **result, struct mapped_device *md)
249 {
250 struct dm_table *t;
251 sector_t dev_size = 1;
252 int r;
253
254 /*
255 * Find current size of device.
256 * Default to 1 sector if inactive.
257 */
258 t = dm_get_table(md);
259 if (t) {
260 dev_size = dm_table_get_size(t);
261 dm_table_put(t);
262 }
263
264 r = dm_table_create(&t, FMODE_READ, 1, md);
265 if (r)
266 return r;
267
268 r = dm_table_add_target(t, "error", 0, dev_size, NULL);
269 if (r)
270 goto out;
271
272 r = dm_table_complete(t);
273 if (r)
274 goto out;
275
276 *result = t;
277
278 out:
279 if (r)
280 dm_table_put(t);
281
282 return r;
283 }
284 EXPORT_SYMBOL_GPL(dm_create_error_table);
285
286 static void free_devices(struct list_head *devices)
287 {
288 struct list_head *tmp, *next;
289
290 for (tmp = devices->next; tmp != devices; tmp = next) {
291 struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
292 next = tmp->next;
293 kfree(dd);
294 }
295 }
296
297 static void table_destroy(struct dm_table *t)
298 {
299 unsigned int i;
300
301 /* free the indexes (see dm_table_complete) */
302 if (t->depth >= 2)
303 vfree(t->index[t->depth - 2]);
304
305 /* free the targets */
306 for (i = 0; i < t->num_targets; i++) {
307 struct dm_target *tgt = t->targets + i;
308
309 if (tgt->type->dtr)
310 tgt->type->dtr(tgt);
311
312 dm_put_target_type(tgt->type);
313 }
314
315 vfree(t->highs);
316
317 /* free the device list */
318 if (t->devices.next != &t->devices) {
319 DMWARN("devices still present during destroy: "
320 "dm_table_remove_device calls missing");
321
322 free_devices(&t->devices);
323 }
324
325 kfree(t);
326 }
327
328 void dm_table_get(struct dm_table *t)
329 {
330 atomic_inc(&t->holders);
331 }
332
333 void dm_table_put(struct dm_table *t)
334 {
335 if (!t)
336 return;
337
338 if (atomic_dec_and_test(&t->holders))
339 table_destroy(t);
340 }
341
342 /*
343 * Checks to see if we need to extend highs or targets.
344 */
345 static inline int check_space(struct dm_table *t)
346 {
347 if (t->num_targets >= t->num_allocated)
348 return alloc_targets(t, t->num_allocated * 2);
349
350 return 0;
351 }
352
353 /*
354 * Convert a device path to a dev_t.
355 */
356 static int lookup_device(const char *path, dev_t *dev)
357 {
358 int r;
359 struct nameidata nd;
360 struct inode *inode;
361
362 if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd)))
363 return r;
364
365 inode = nd.dentry->d_inode;
366 if (!inode) {
367 r = -ENOENT;
368 goto out;
369 }
370
371 if (!S_ISBLK(inode->i_mode)) {
372 r = -ENOTBLK;
373 goto out;
374 }
375
376 *dev = inode->i_rdev;
377
378 out:
379 path_release(&nd);
380 return r;
381 }
382
383 /*
384 * See if we've already got a device in the list.
385 */
386 static struct dm_dev *find_device(struct list_head *l, dev_t dev)
387 {
388 struct dm_dev *dd;
389
390 list_for_each_entry (dd, l, list)
391 if (dd->bdev->bd_dev == dev)
392 return dd;
393
394 return NULL;
395 }
396
397 /*
398 * Open a device so we can use it as a map destination.
399 */
400 static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md)
401 {
402 static char *_claim_ptr = "I belong to device-mapper";
403 struct block_device *bdev;
404
405 int r;
406
407 BUG_ON(d->bdev);
408
409 bdev = open_by_devnum(dev, d->mode);
410 if (IS_ERR(bdev))
411 return PTR_ERR(bdev);
412 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
413 if (r)
414 blkdev_put(bdev);
415 else
416 d->bdev = bdev;
417 return r;
418 }
419
420 /*
421 * Close a device that we've been using.
422 */
423 static void close_dev(struct dm_dev *d, struct mapped_device *md)
424 {
425 if (!d->bdev)
426 return;
427
428 bd_release_from_disk(d->bdev, dm_disk(md));
429 blkdev_put(d->bdev);
430 d->bdev = NULL;
431 }
432
433 /*
434 * If possible, this checks an area of a destination device is valid.
435 */
436 static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
437 {
438 sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
439
440 if (!dev_size)
441 return 1;
442
443 return ((start < dev_size) && (len <= (dev_size - start)));
444 }
445
446 /*
447 * This upgrades the mode on an already open dm_dev. Being
448 * careful to leave things as they were if we fail to reopen the
449 * device.
450 */
451 static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md)
452 {
453 int r;
454 struct dm_dev dd_copy;
455 dev_t dev = dd->bdev->bd_dev;
456
457 dd_copy = *dd;
458
459 dd->mode |= new_mode;
460 dd->bdev = NULL;
461 r = open_dev(dd, dev, md);
462 if (!r)
463 close_dev(&dd_copy, md);
464 else
465 *dd = dd_copy;
466
467 return r;
468 }
469
470 /*
471 * Add a device to the list, or just increment the usage count if
472 * it's already present.
473 */
474 static int __table_get_device(struct dm_table *t, struct dm_target *ti,
475 const char *path, sector_t start, sector_t len,
476 int mode, struct dm_dev **result)
477 {
478 int r;
479 dev_t dev;
480 struct dm_dev *dd;
481 unsigned int major, minor;
482
483 BUG_ON(!t);
484
485 if (sscanf(path, "%u:%u", &major, &minor) == 2) {
486 /* Extract the major/minor numbers */
487 dev = MKDEV(major, minor);
488 if (MAJOR(dev) != major || MINOR(dev) != minor)
489 return -EOVERFLOW;
490 } else {
491 /* convert the path to a device */
492 if ((r = lookup_device(path, &dev)))
493 return r;
494 }
495
496 dd = find_device(&t->devices, dev);
497 if (!dd) {
498 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
499 if (!dd)
500 return -ENOMEM;
501
502 dd->mode = mode;
503 dd->bdev = NULL;
504
505 if ((r = open_dev(dd, dev, t->md))) {
506 kfree(dd);
507 return r;
508 }
509
510 format_dev_t(dd->name, dev);
511
512 atomic_set(&dd->count, 0);
513 list_add(&dd->list, &t->devices);
514
515 } else if (dd->mode != (mode | dd->mode)) {
516 r = upgrade_mode(dd, mode, t->md);
517 if (r)
518 return r;
519 }
520 atomic_inc(&dd->count);
521
522 if (!check_device_area(dd, start, len)) {
523 DMWARN("device %s too small for target", path);
524 dm_put_device(ti, dd);
525 return -EINVAL;
526 }
527
528 *result = dd;
529
530 return 0;
531 }
532
533 void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
534 {
535 struct request_queue *q = bdev_get_queue(bdev);
536 struct io_restrictions *rs = &ti->limits;
537
538 /*
539 * Combine the device limits low.
540 *
541 * FIXME: if we move an io_restriction struct
542 * into q this would just be a call to
543 * combine_restrictions_low()
544 */
545 rs->max_sectors =
546 min_not_zero(rs->max_sectors, q->max_sectors);
547
548 /* FIXME: Device-Mapper on top of RAID-0 breaks because DM
549 * currently doesn't honor MD's merge_bvec_fn routine.
550 * In this case, we'll force DM to use PAGE_SIZE or
551 * smaller I/O, just to be safe. A better fix is in the
552 * works, but add this for the time being so it will at
553 * least operate correctly.
554 */
555 if (q->merge_bvec_fn)
556 rs->max_sectors =
557 min_not_zero(rs->max_sectors,
558 (unsigned int) (PAGE_SIZE >> 9));
559
560 rs->max_phys_segments =
561 min_not_zero(rs->max_phys_segments,
562 q->max_phys_segments);
563
564 rs->max_hw_segments =
565 min_not_zero(rs->max_hw_segments, q->max_hw_segments);
566
567 rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
568
569 rs->max_segment_size =
570 min_not_zero(rs->max_segment_size, q->max_segment_size);
571
572 rs->max_hw_sectors =
573 min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
574
575 rs->seg_boundary_mask =
576 min_not_zero(rs->seg_boundary_mask,
577 q->seg_boundary_mask);
578
579 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
580
581 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
582 }
583 EXPORT_SYMBOL_GPL(dm_set_device_limits);
584
585 int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
586 sector_t len, int mode, struct dm_dev **result)
587 {
588 int r = __table_get_device(ti->table, ti, path,
589 start, len, mode, result);
590
591 if (!r)
592 dm_set_device_limits(ti, (*result)->bdev);
593
594 return r;
595 }
596
597 /*
598 * Decrement a devices use count and remove it if necessary.
599 */
600 void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
601 {
602 if (atomic_dec_and_test(&dd->count)) {
603 close_dev(dd, ti->table->md);
604 list_del(&dd->list);
605 kfree(dd);
606 }
607 }
608
609 /*
610 * Checks to see if the target joins onto the end of the table.
611 */
612 static int adjoin(struct dm_table *table, struct dm_target *ti)
613 {
614 struct dm_target *prev;
615
616 if (!table->num_targets)
617 return !ti->begin;
618
619 prev = &table->targets[table->num_targets - 1];
620 return (ti->begin == (prev->begin + prev->len));
621 }
622
623 /*
624 * Used to dynamically allocate the arg array.
625 */
626 static char **realloc_argv(unsigned *array_size, char **old_argv)
627 {
628 char **argv;
629 unsigned new_size;
630
631 new_size = *array_size ? *array_size * 2 : 64;
632 argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
633 if (argv) {
634 memcpy(argv, old_argv, *array_size * sizeof(*argv));
635 *array_size = new_size;
636 }
637
638 kfree(old_argv);
639 return argv;
640 }
641
642 /*
643 * Destructively splits up the argument list to pass to ctr.
644 */
645 int dm_split_args(int *argc, char ***argvp, char *input)
646 {
647 char *start, *end = input, *out, **argv = NULL;
648 unsigned array_size = 0;
649
650 *argc = 0;
651
652 if (!input) {
653 *argvp = NULL;
654 return 0;
655 }
656
657 argv = realloc_argv(&array_size, argv);
658 if (!argv)
659 return -ENOMEM;
660
661 while (1) {
662 start = end;
663
664 /* Skip whitespace */
665 while (*start && isspace(*start))
666 start++;
667
668 if (!*start)
669 break; /* success, we hit the end */
670
671 /* 'out' is used to remove any back-quotes */
672 end = out = start;
673 while (*end) {
674 /* Everything apart from '\0' can be quoted */
675 if (*end == '\\' && *(end + 1)) {
676 *out++ = *(end + 1);
677 end += 2;
678 continue;
679 }
680
681 if (isspace(*end))
682 break; /* end of token */
683
684 *out++ = *end++;
685 }
686
687 /* have we already filled the array ? */
688 if ((*argc + 1) > array_size) {
689 argv = realloc_argv(&array_size, argv);
690 if (!argv)
691 return -ENOMEM;
692 }
693
694 /* we know this is whitespace */
695 if (*end)
696 end++;
697
698 /* terminate the string and put it in the array */
699 *out = '\0';
700 argv[*argc] = start;
701 (*argc)++;
702 }
703
704 *argvp = argv;
705 return 0;
706 }
707
708 static void check_for_valid_limits(struct io_restrictions *rs)
709 {
710 if (!rs->max_sectors)
711 rs->max_sectors = SAFE_MAX_SECTORS;
712 if (!rs->max_hw_sectors)
713 rs->max_hw_sectors = SAFE_MAX_SECTORS;
714 if (!rs->max_phys_segments)
715 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
716 if (!rs->max_hw_segments)
717 rs->max_hw_segments = MAX_HW_SEGMENTS;
718 if (!rs->hardsect_size)
719 rs->hardsect_size = 1 << SECTOR_SHIFT;
720 if (!rs->max_segment_size)
721 rs->max_segment_size = MAX_SEGMENT_SIZE;
722 if (!rs->seg_boundary_mask)
723 rs->seg_boundary_mask = -1;
724 if (!rs->bounce_pfn)
725 rs->bounce_pfn = -1;
726 }
727
728 int dm_table_add_target(struct dm_table *t, const char *type,
729 sector_t start, sector_t len, char *params)
730 {
731 int r = -EINVAL, argc;
732 char **argv;
733 struct dm_target *tgt;
734
735 if ((r = check_space(t)))
736 return r;
737
738 tgt = t->targets + t->num_targets;
739 memset(tgt, 0, sizeof(*tgt));
740
741 if (!len) {
742 DMERR("%s: zero-length target", dm_device_name(t->md));
743 return -EINVAL;
744 }
745
746 tgt->type = dm_get_target_type(type);
747 if (!tgt->type) {
748 DMERR("%s: %s: unknown target type", dm_device_name(t->md),
749 type);
750 return -EINVAL;
751 }
752
753 tgt->table = t;
754 tgt->begin = start;
755 tgt->len = len;
756 tgt->error = "Unknown error";
757
758 /*
759 * Does this target adjoin the previous one ?
760 */
761 if (!adjoin(t, tgt)) {
762 tgt->error = "Gap in table";
763 r = -EINVAL;
764 goto bad;
765 }
766
767 r = dm_split_args(&argc, &argv, params);
768 if (r) {
769 tgt->error = "couldn't split parameters (insufficient memory)";
770 goto bad;
771 }
772
773 r = tgt->type->ctr(tgt, argc, argv);
774 kfree(argv);
775 if (r)
776 goto bad;
777
778 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
779
780 /* FIXME: the plan is to combine high here and then have
781 * the merge fn apply the target level restrictions. */
782 combine_restrictions_low(&t->limits, &tgt->limits);
783 return 0;
784
785 bad:
786 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
787 dm_put_target_type(tgt->type);
788 return r;
789 }
790
791 static int setup_indexes(struct dm_table *t)
792 {
793 int i;
794 unsigned int total = 0;
795 sector_t *indexes;
796
797 /* allocate the space for *all* the indexes */
798 for (i = t->depth - 2; i >= 0; i--) {
799 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
800 total += t->counts[i];
801 }
802
803 indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
804 if (!indexes)
805 return -ENOMEM;
806
807 /* set up internal nodes, bottom-up */
808 for (i = t->depth - 2, total = 0; i >= 0; i--) {
809 t->index[i] = indexes;
810 indexes += (KEYS_PER_NODE * t->counts[i]);
811 setup_btree_index(i, t);
812 }
813
814 return 0;
815 }
816
817 /*
818 * Builds the btree to index the map.
819 */
820 int dm_table_complete(struct dm_table *t)
821 {
822 int r = 0;
823 unsigned int leaf_nodes;
824
825 check_for_valid_limits(&t->limits);
826
827 /* how many indexes will the btree have ? */
828 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
829 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
830
831 /* leaf layer has already been set up */
832 t->counts[t->depth - 1] = leaf_nodes;
833 t->index[t->depth - 1] = t->highs;
834
835 if (t->depth >= 2)
836 r = setup_indexes(t);
837
838 return r;
839 }
840
841 static DEFINE_MUTEX(_event_lock);
842 void dm_table_event_callback(struct dm_table *t,
843 void (*fn)(void *), void *context)
844 {
845 mutex_lock(&_event_lock);
846 t->event_fn = fn;
847 t->event_context = context;
848 mutex_unlock(&_event_lock);
849 }
850
851 void dm_table_event(struct dm_table *t)
852 {
853 /*
854 * You can no longer call dm_table_event() from interrupt
855 * context, use a bottom half instead.
856 */
857 BUG_ON(in_interrupt());
858
859 mutex_lock(&_event_lock);
860 if (t->event_fn)
861 t->event_fn(t->event_context);
862 mutex_unlock(&_event_lock);
863 }
864
865 sector_t dm_table_get_size(struct dm_table *t)
866 {
867 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
868 }
869
870 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
871 {
872 if (index >= t->num_targets)
873 return NULL;
874
875 return t->targets + index;
876 }
877
878 /*
879 * Search the btree for the correct target.
880 *
881 * Caller should check returned pointer with dm_target_is_valid()
882 * to trap I/O beyond end of device.
883 */
884 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
885 {
886 unsigned int l, n = 0, k = 0;
887 sector_t *node;
888
889 for (l = 0; l < t->depth; l++) {
890 n = get_child(n, k);
891 node = get_node(t, l, n);
892
893 for (k = 0; k < KEYS_PER_NODE; k++)
894 if (node[k] >= sector)
895 break;
896 }
897
898 return &t->targets[(KEYS_PER_NODE * n) + k];
899 }
900
901 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
902 {
903 /*
904 * Make sure we obey the optimistic sub devices
905 * restrictions.
906 */
907 blk_queue_max_sectors(q, t->limits.max_sectors);
908 q->max_phys_segments = t->limits.max_phys_segments;
909 q->max_hw_segments = t->limits.max_hw_segments;
910 q->hardsect_size = t->limits.hardsect_size;
911 q->max_segment_size = t->limits.max_segment_size;
912 q->max_hw_sectors = t->limits.max_hw_sectors;
913 q->seg_boundary_mask = t->limits.seg_boundary_mask;
914 q->bounce_pfn = t->limits.bounce_pfn;
915 if (t->limits.no_cluster)
916 q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
917 else
918 q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER);
919
920 }
921
922 unsigned int dm_table_get_num_targets(struct dm_table *t)
923 {
924 return t->num_targets;
925 }
926
927 struct list_head *dm_table_get_devices(struct dm_table *t)
928 {
929 return &t->devices;
930 }
931
932 int dm_table_get_mode(struct dm_table *t)
933 {
934 return t->mode;
935 }
936
937 static void suspend_targets(struct dm_table *t, unsigned postsuspend)
938 {
939 int i = t->num_targets;
940 struct dm_target *ti = t->targets;
941
942 while (i--) {
943 if (postsuspend) {
944 if (ti->type->postsuspend)
945 ti->type->postsuspend(ti);
946 } else if (ti->type->presuspend)
947 ti->type->presuspend(ti);
948
949 ti++;
950 }
951 }
952
953 void dm_table_presuspend_targets(struct dm_table *t)
954 {
955 if (!t)
956 return;
957
958 return suspend_targets(t, 0);
959 }
960
961 void dm_table_postsuspend_targets(struct dm_table *t)
962 {
963 if (!t)
964 return;
965
966 return suspend_targets(t, 1);
967 }
968
969 int dm_table_resume_targets(struct dm_table *t)
970 {
971 int i, r = 0;
972
973 for (i = 0; i < t->num_targets; i++) {
974 struct dm_target *ti = t->targets + i;
975
976 if (!ti->type->preresume)
977 continue;
978
979 r = ti->type->preresume(ti);
980 if (r)
981 return r;
982 }
983
984 for (i = 0; i < t->num_targets; i++) {
985 struct dm_target *ti = t->targets + i;
986
987 if (ti->type->resume)
988 ti->type->resume(ti);
989 }
990
991 return 0;
992 }
993
994 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
995 {
996 struct list_head *d, *devices;
997 int r = 0;
998
999 devices = dm_table_get_devices(t);
1000 for (d = devices->next; d != devices; d = d->next) {
1001 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
1002 struct request_queue *q = bdev_get_queue(dd->bdev);
1003 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1004 }
1005
1006 return r;
1007 }
1008
1009 void dm_table_unplug_all(struct dm_table *t)
1010 {
1011 struct list_head *d, *devices = dm_table_get_devices(t);
1012
1013 for (d = devices->next; d != devices; d = d->next) {
1014 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
1015 struct request_queue *q = bdev_get_queue(dd->bdev);
1016
1017 blk_unplug(q);
1018 }
1019 }
1020
1021 struct mapped_device *dm_table_get_md(struct dm_table *t)
1022 {
1023 dm_get(t->md);
1024
1025 return t->md;
1026 }
1027
1028 EXPORT_SYMBOL(dm_vcalloc);
1029 EXPORT_SYMBOL(dm_get_device);
1030 EXPORT_SYMBOL(dm_put_device);
1031 EXPORT_SYMBOL(dm_table_event);
1032 EXPORT_SYMBOL(dm_table_get_size);
1033 EXPORT_SYMBOL(dm_table_get_mode);
1034 EXPORT_SYMBOL(dm_table_get_md);
1035 EXPORT_SYMBOL(dm_table_put);
1036 EXPORT_SYMBOL(dm_table_get);
1037 EXPORT_SYMBOL(dm_table_unplug_all);