2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/device-mapper.h>
11 #include "dm-path-selector.h"
12 #include "dm-uevent.h"
14 #include <linux/blkdev.h>
15 #include <linux/ctype.h>
16 #include <linux/init.h>
17 #include <linux/mempool.h>
18 #include <linux/module.h>
19 #include <linux/pagemap.h>
20 #include <linux/slab.h>
21 #include <linux/time.h>
22 #include <linux/workqueue.h>
23 #include <linux/delay.h>
24 #include <scsi/scsi_dh.h>
25 #include <linux/atomic.h>
26 #include <linux/blk-mq.h>
28 #define DM_MSG_PREFIX "multipath"
29 #define DM_PG_INIT_DELAY_MSECS 2000
30 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
34 struct list_head list
;
36 struct priority_group
*pg
; /* Owning PG */
37 unsigned fail_count
; /* Cumulative failure count */
40 struct delayed_work activate_path
;
42 bool is_active
:1; /* Path status */
45 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
48 * Paths are grouped into Priority Groups and numbered from 1 upwards.
49 * Each has a path selector which controls which path gets used.
51 struct priority_group
{
52 struct list_head list
;
54 struct multipath
*m
; /* Owning multipath instance */
55 struct path_selector ps
;
57 unsigned pg_num
; /* Reference number */
58 unsigned nr_pgpaths
; /* Number of paths in PG */
59 struct list_head pgpaths
;
61 bool bypassed
:1; /* Temporarily bypass this PG? */
64 /* Multipath context */
66 struct list_head list
;
69 const char *hw_handler_name
;
70 char *hw_handler_params
;
74 unsigned nr_priority_groups
;
75 struct list_head priority_groups
;
77 wait_queue_head_t pg_init_wait
; /* Wait for pg_init completion */
79 unsigned pg_init_in_progress
; /* Only one pg_init allowed at once */
81 unsigned nr_valid_paths
; /* Total number of usable paths */
82 struct pgpath
*current_pgpath
;
83 struct priority_group
*current_pg
;
84 struct priority_group
*next_pg
; /* Switch to this PG if set */
86 unsigned long flags
; /* Multipath state flags */
88 unsigned pg_init_retries
; /* Number of times to retry pg_init */
89 unsigned pg_init_count
; /* Number of times pg_init called */
90 unsigned pg_init_delay_msecs
; /* Number of msecs before pg_init retry */
92 struct work_struct trigger_event
;
95 * We must use a mempool of dm_mpath_io structs so that we
96 * can resubmit bios on error.
100 struct mutex work_mutex
;
104 * Context information attached to each bio we process.
107 struct pgpath
*pgpath
;
111 typedef int (*action_fn
) (struct pgpath
*pgpath
);
113 static struct kmem_cache
*_mpio_cache
;
115 static struct workqueue_struct
*kmultipathd
, *kmpath_handlerd
;
116 static void trigger_event(struct work_struct
*work
);
117 static void activate_path(struct work_struct
*work
);
119 /*-----------------------------------------------
120 * Multipath state flags.
121 *-----------------------------------------------*/
123 #define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
124 #define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
125 #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
126 #define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
127 #define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
128 #define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
129 #define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
131 /*-----------------------------------------------
132 * Allocation routines
133 *-----------------------------------------------*/
135 static struct pgpath
*alloc_pgpath(void)
137 struct pgpath
*pgpath
= kzalloc(sizeof(*pgpath
), GFP_KERNEL
);
140 pgpath
->is_active
= true;
141 INIT_DELAYED_WORK(&pgpath
->activate_path
, activate_path
);
147 static void free_pgpath(struct pgpath
*pgpath
)
152 static struct priority_group
*alloc_priority_group(void)
154 struct priority_group
*pg
;
156 pg
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
159 INIT_LIST_HEAD(&pg
->pgpaths
);
164 static void free_pgpaths(struct list_head
*pgpaths
, struct dm_target
*ti
)
166 struct pgpath
*pgpath
, *tmp
;
168 list_for_each_entry_safe(pgpath
, tmp
, pgpaths
, list
) {
169 list_del(&pgpath
->list
);
170 dm_put_device(ti
, pgpath
->path
.dev
);
175 static void free_priority_group(struct priority_group
*pg
,
176 struct dm_target
*ti
)
178 struct path_selector
*ps
= &pg
->ps
;
181 ps
->type
->destroy(ps
);
182 dm_put_path_selector(ps
->type
);
185 free_pgpaths(&pg
->pgpaths
, ti
);
189 static struct multipath
*alloc_multipath(struct dm_target
*ti
, bool use_blk_mq
)
193 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
195 INIT_LIST_HEAD(&m
->priority_groups
);
196 spin_lock_init(&m
->lock
);
197 set_bit(MPATHF_QUEUE_IO
, &m
->flags
);
198 m
->pg_init_delay_msecs
= DM_PG_INIT_DELAY_DEFAULT
;
199 INIT_WORK(&m
->trigger_event
, trigger_event
);
200 init_waitqueue_head(&m
->pg_init_wait
);
201 mutex_init(&m
->work_mutex
);
205 unsigned min_ios
= dm_get_reserved_rq_based_ios();
207 m
->mpio_pool
= mempool_create_slab_pool(min_ios
, _mpio_cache
);
221 static void free_multipath(struct multipath
*m
)
223 struct priority_group
*pg
, *tmp
;
225 list_for_each_entry_safe(pg
, tmp
, &m
->priority_groups
, list
) {
227 free_priority_group(pg
, m
->ti
);
230 kfree(m
->hw_handler_name
);
231 kfree(m
->hw_handler_params
);
232 mempool_destroy(m
->mpio_pool
);
236 static struct dm_mpath_io
*get_mpio(union map_info
*info
)
241 static struct dm_mpath_io
*set_mpio(struct multipath
*m
, union map_info
*info
)
243 struct dm_mpath_io
*mpio
;
246 /* Use blk-mq pdu memory requested via per_io_data_size */
247 mpio
= get_mpio(info
);
248 memset(mpio
, 0, sizeof(*mpio
));
252 mpio
= mempool_alloc(m
->mpio_pool
, GFP_ATOMIC
);
256 memset(mpio
, 0, sizeof(*mpio
));
262 static void clear_request_fn_mpio(struct multipath
*m
, union map_info
*info
)
264 /* Only needed for non blk-mq (.request_fn) multipath */
266 struct dm_mpath_io
*mpio
= info
->ptr
;
269 mempool_free(mpio
, m
->mpio_pool
);
273 /*-----------------------------------------------
275 *-----------------------------------------------*/
277 static int __pg_init_all_paths(struct multipath
*m
)
279 struct pgpath
*pgpath
;
280 unsigned long pg_init_delay
= 0;
282 if (m
->pg_init_in_progress
|| test_bit(MPATHF_PG_INIT_DISABLED
, &m
->flags
))
286 clear_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
);
288 /* Check here to reset pg_init_required */
292 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY
, &m
->flags
))
293 pg_init_delay
= msecs_to_jiffies(m
->pg_init_delay_msecs
!= DM_PG_INIT_DELAY_DEFAULT
?
294 m
->pg_init_delay_msecs
: DM_PG_INIT_DELAY_MSECS
);
295 list_for_each_entry(pgpath
, &m
->current_pg
->pgpaths
, list
) {
296 /* Skip failed paths */
297 if (!pgpath
->is_active
)
299 if (queue_delayed_work(kmpath_handlerd
, &pgpath
->activate_path
,
301 m
->pg_init_in_progress
++;
303 return m
->pg_init_in_progress
;
306 static void __switch_pg(struct multipath
*m
, struct pgpath
*pgpath
)
308 m
->current_pg
= pgpath
->pg
;
310 /* Must we initialise the PG first, and queue I/O till it's ready? */
311 if (m
->hw_handler_name
) {
312 set_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
);
313 set_bit(MPATHF_QUEUE_IO
, &m
->flags
);
315 clear_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
);
316 clear_bit(MPATHF_QUEUE_IO
, &m
->flags
);
319 m
->pg_init_count
= 0;
322 static int __choose_path_in_pg(struct multipath
*m
, struct priority_group
*pg
,
325 struct dm_path
*path
;
327 path
= pg
->ps
.type
->select_path(&pg
->ps
, nr_bytes
);
331 m
->current_pgpath
= path_to_pgpath(path
);
333 if (m
->current_pg
!= pg
)
334 __switch_pg(m
, m
->current_pgpath
);
339 static void __choose_pgpath(struct multipath
*m
, size_t nr_bytes
)
341 struct priority_group
*pg
;
342 bool bypassed
= true;
344 if (!m
->nr_valid_paths
) {
345 clear_bit(MPATHF_QUEUE_IO
, &m
->flags
);
349 /* Were we instructed to switch PG? */
353 if (!__choose_path_in_pg(m
, pg
, nr_bytes
))
357 /* Don't change PG until it has no remaining paths */
358 if (m
->current_pg
&& !__choose_path_in_pg(m
, m
->current_pg
, nr_bytes
))
362 * Loop through priority groups until we find a valid path.
363 * First time we skip PGs marked 'bypassed'.
364 * Second time we only try the ones we skipped, but set
365 * pg_init_delay_retry so we do not hammer controllers.
368 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
369 if (pg
->bypassed
== bypassed
)
371 if (!__choose_path_in_pg(m
, pg
, nr_bytes
)) {
373 set_bit(MPATHF_PG_INIT_DELAY_RETRY
, &m
->flags
);
377 } while (bypassed
--);
380 m
->current_pgpath
= NULL
;
381 m
->current_pg
= NULL
;
385 * Check whether bios must be queued in the device-mapper core rather
386 * than here in the target.
388 * m->lock must be held on entry.
390 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
391 * same value then we are not between multipath_presuspend()
392 * and multipath_resume() calls and we have no need to check
393 * for the DMF_NOFLUSH_SUSPENDING flag.
395 static int __must_push_back(struct multipath
*m
)
397 return (test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
) ||
398 ((test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
) !=
399 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
)) &&
400 dm_noflush_suspending(m
->ti
)));
404 * Map cloned requests
406 static int __multipath_map(struct dm_target
*ti
, struct request
*clone
,
407 union map_info
*map_context
,
408 struct request
*rq
, struct request
**__clone
)
410 struct multipath
*m
= ti
->private;
411 int r
= DM_MAPIO_REQUEUE
;
412 size_t nr_bytes
= clone
? blk_rq_bytes(clone
) : blk_rq_bytes(rq
);
413 struct pgpath
*pgpath
;
414 struct block_device
*bdev
;
415 struct dm_mpath_io
*mpio
;
417 spin_lock_irq(&m
->lock
);
419 /* Do we need to select a new pgpath? */
420 if (!m
->current_pgpath
|| !test_bit(MPATHF_QUEUE_IO
, &m
->flags
))
421 __choose_pgpath(m
, nr_bytes
);
423 pgpath
= m
->current_pgpath
;
426 if (!__must_push_back(m
))
427 r
= -EIO
; /* Failed */
429 } else if (test_bit(MPATHF_QUEUE_IO
, &m
->flags
) ||
430 test_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
)) {
431 __pg_init_all_paths(m
);
435 mpio
= set_mpio(m
, map_context
);
437 /* ENOMEM, requeue */
440 mpio
->pgpath
= pgpath
;
441 mpio
->nr_bytes
= nr_bytes
;
443 bdev
= pgpath
->path
.dev
->bdev
;
445 spin_unlock_irq(&m
->lock
);
449 * Old request-based interface: allocated clone is passed in.
450 * Used by: .request_fn stacked on .request_fn path(s).
452 clone
->q
= bdev_get_queue(bdev
);
453 clone
->rq_disk
= bdev
->bd_disk
;
454 clone
->cmd_flags
|= REQ_FAILFAST_TRANSPORT
;
457 * blk-mq request-based interface; used by both:
458 * .request_fn stacked on blk-mq path(s) and
459 * blk-mq stacked on blk-mq path(s).
461 *__clone
= blk_mq_alloc_request(bdev_get_queue(bdev
),
462 rq_data_dir(rq
), BLK_MQ_REQ_NOWAIT
);
463 if (IS_ERR(*__clone
)) {
464 /* ENOMEM, requeue */
465 clear_request_fn_mpio(m
, map_context
);
468 (*__clone
)->bio
= (*__clone
)->biotail
= NULL
;
469 (*__clone
)->rq_disk
= bdev
->bd_disk
;
470 (*__clone
)->cmd_flags
|= REQ_FAILFAST_TRANSPORT
;
473 if (pgpath
->pg
->ps
.type
->start_io
)
474 pgpath
->pg
->ps
.type
->start_io(&pgpath
->pg
->ps
,
477 return DM_MAPIO_REMAPPED
;
480 spin_unlock_irq(&m
->lock
);
485 static int multipath_map(struct dm_target
*ti
, struct request
*clone
,
486 union map_info
*map_context
)
488 return __multipath_map(ti
, clone
, map_context
, NULL
, NULL
);
491 static int multipath_clone_and_map(struct dm_target
*ti
, struct request
*rq
,
492 union map_info
*map_context
,
493 struct request
**clone
)
495 return __multipath_map(ti
, NULL
, map_context
, rq
, clone
);
498 static void multipath_release_clone(struct request
*clone
)
500 blk_mq_free_request(clone
);
504 * If we run out of usable paths, should we queue I/O or error it?
506 static int queue_if_no_path(struct multipath
*m
, bool queue_if_no_path
,
511 spin_lock_irqsave(&m
->lock
, flags
);
513 if (save_old_value
) {
514 if (test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
))
515 set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
);
517 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
);
519 if (queue_if_no_path
)
520 set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
);
522 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
);
524 if (queue_if_no_path
)
525 set_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
);
527 clear_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
);
529 spin_unlock_irqrestore(&m
->lock
, flags
);
531 if (!queue_if_no_path
)
532 dm_table_run_md_queue_async(m
->ti
->table
);
538 * An event is triggered whenever a path is taken out of use.
539 * Includes path failure and PG bypass.
541 static void trigger_event(struct work_struct
*work
)
543 struct multipath
*m
=
544 container_of(work
, struct multipath
, trigger_event
);
546 dm_table_event(m
->ti
->table
);
549 /*-----------------------------------------------------------------
550 * Constructor/argument parsing:
551 * <#multipath feature args> [<arg>]*
552 * <#hw_handler args> [hw_handler [<arg>]*]
554 * <initial priority group>
555 * [<selector> <#selector args> [<arg>]*
556 * <#paths> <#per-path selector args>
557 * [<path> [<arg>]* ]+ ]+
558 *---------------------------------------------------------------*/
559 static int parse_path_selector(struct dm_arg_set
*as
, struct priority_group
*pg
,
560 struct dm_target
*ti
)
563 struct path_selector_type
*pst
;
566 static struct dm_arg _args
[] = {
567 {0, 1024, "invalid number of path selector args"},
570 pst
= dm_get_path_selector(dm_shift_arg(as
));
572 ti
->error
= "unknown path selector type";
576 r
= dm_read_arg_group(_args
, as
, &ps_argc
, &ti
->error
);
578 dm_put_path_selector(pst
);
582 r
= pst
->create(&pg
->ps
, ps_argc
, as
->argv
);
584 dm_put_path_selector(pst
);
585 ti
->error
= "path selector constructor failed";
590 dm_consume_args(as
, ps_argc
);
595 static struct pgpath
*parse_path(struct dm_arg_set
*as
, struct path_selector
*ps
,
596 struct dm_target
*ti
)
600 struct multipath
*m
= ti
->private;
601 struct request_queue
*q
= NULL
;
602 const char *attached_handler_name
;
604 /* we need at least a path arg */
606 ti
->error
= "no device given";
607 return ERR_PTR(-EINVAL
);
612 return ERR_PTR(-ENOMEM
);
614 r
= dm_get_device(ti
, dm_shift_arg(as
), dm_table_get_mode(ti
->table
),
617 ti
->error
= "error getting device";
621 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
) || m
->hw_handler_name
)
622 q
= bdev_get_queue(p
->path
.dev
->bdev
);
624 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
)) {
626 attached_handler_name
= scsi_dh_attached_handler_name(q
, GFP_KERNEL
);
627 if (attached_handler_name
) {
629 * Reset hw_handler_name to match the attached handler
630 * and clear any hw_handler_params associated with the
633 * NB. This modifies the table line to show the actual
634 * handler instead of the original table passed in.
636 kfree(m
->hw_handler_name
);
637 m
->hw_handler_name
= attached_handler_name
;
639 kfree(m
->hw_handler_params
);
640 m
->hw_handler_params
= NULL
;
644 if (m
->hw_handler_name
) {
645 r
= scsi_dh_attach(q
, m
->hw_handler_name
);
647 char b
[BDEVNAME_SIZE
];
649 printk(KERN_INFO
"dm-mpath: retaining handler on device %s\n",
650 bdevname(p
->path
.dev
->bdev
, b
));
654 ti
->error
= "error attaching hardware handler";
655 dm_put_device(ti
, p
->path
.dev
);
659 if (m
->hw_handler_params
) {
660 r
= scsi_dh_set_params(q
, m
->hw_handler_params
);
662 ti
->error
= "unable to set hardware "
663 "handler parameters";
664 dm_put_device(ti
, p
->path
.dev
);
670 r
= ps
->type
->add_path(ps
, &p
->path
, as
->argc
, as
->argv
, &ti
->error
);
672 dm_put_device(ti
, p
->path
.dev
);
683 static struct priority_group
*parse_priority_group(struct dm_arg_set
*as
,
686 static struct dm_arg _args
[] = {
687 {1, 1024, "invalid number of paths"},
688 {0, 1024, "invalid number of selector args"}
692 unsigned i
, nr_selector_args
, nr_args
;
693 struct priority_group
*pg
;
694 struct dm_target
*ti
= m
->ti
;
698 ti
->error
= "not enough priority group arguments";
699 return ERR_PTR(-EINVAL
);
702 pg
= alloc_priority_group();
704 ti
->error
= "couldn't allocate priority group";
705 return ERR_PTR(-ENOMEM
);
709 r
= parse_path_selector(as
, pg
, ti
);
716 r
= dm_read_arg(_args
, as
, &pg
->nr_pgpaths
, &ti
->error
);
720 r
= dm_read_arg(_args
+ 1, as
, &nr_selector_args
, &ti
->error
);
724 nr_args
= 1 + nr_selector_args
;
725 for (i
= 0; i
< pg
->nr_pgpaths
; i
++) {
726 struct pgpath
*pgpath
;
727 struct dm_arg_set path_args
;
729 if (as
->argc
< nr_args
) {
730 ti
->error
= "not enough path parameters";
735 path_args
.argc
= nr_args
;
736 path_args
.argv
= as
->argv
;
738 pgpath
= parse_path(&path_args
, &pg
->ps
, ti
);
739 if (IS_ERR(pgpath
)) {
745 list_add_tail(&pgpath
->list
, &pg
->pgpaths
);
746 dm_consume_args(as
, nr_args
);
752 free_priority_group(pg
, ti
);
756 static int parse_hw_handler(struct dm_arg_set
*as
, struct multipath
*m
)
760 struct dm_target
*ti
= m
->ti
;
762 static struct dm_arg _args
[] = {
763 {0, 1024, "invalid number of hardware handler args"},
766 if (dm_read_arg_group(_args
, as
, &hw_argc
, &ti
->error
))
772 m
->hw_handler_name
= kstrdup(dm_shift_arg(as
), GFP_KERNEL
);
778 for (i
= 0; i
<= hw_argc
- 2; i
++)
779 len
+= strlen(as
->argv
[i
]) + 1;
780 p
= m
->hw_handler_params
= kzalloc(len
, GFP_KERNEL
);
782 ti
->error
= "memory allocation failed";
786 j
= sprintf(p
, "%d", hw_argc
- 1);
787 for (i
= 0, p
+=j
+1; i
<= hw_argc
- 2; i
++, p
+=j
+1)
788 j
= sprintf(p
, "%s", as
->argv
[i
]);
790 dm_consume_args(as
, hw_argc
- 1);
794 kfree(m
->hw_handler_name
);
795 m
->hw_handler_name
= NULL
;
799 static int parse_features(struct dm_arg_set
*as
, struct multipath
*m
)
803 struct dm_target
*ti
= m
->ti
;
804 const char *arg_name
;
806 static struct dm_arg _args
[] = {
807 {0, 6, "invalid number of feature args"},
808 {1, 50, "pg_init_retries must be between 1 and 50"},
809 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
812 r
= dm_read_arg_group(_args
, as
, &argc
, &ti
->error
);
820 arg_name
= dm_shift_arg(as
);
823 if (!strcasecmp(arg_name
, "queue_if_no_path")) {
824 r
= queue_if_no_path(m
, true, false);
828 if (!strcasecmp(arg_name
, "retain_attached_hw_handler")) {
829 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
);
833 if (!strcasecmp(arg_name
, "pg_init_retries") &&
835 r
= dm_read_arg(_args
+ 1, as
, &m
->pg_init_retries
, &ti
->error
);
840 if (!strcasecmp(arg_name
, "pg_init_delay_msecs") &&
842 r
= dm_read_arg(_args
+ 2, as
, &m
->pg_init_delay_msecs
, &ti
->error
);
847 ti
->error
= "Unrecognised multipath feature request";
849 } while (argc
&& !r
);
854 static int multipath_ctr(struct dm_target
*ti
, unsigned int argc
,
857 /* target arguments */
858 static struct dm_arg _args
[] = {
859 {0, 1024, "invalid number of priority groups"},
860 {0, 1024, "invalid initial priority group number"},
865 struct dm_arg_set as
;
866 unsigned pg_count
= 0;
867 unsigned next_pg_num
;
868 bool use_blk_mq
= dm_use_blk_mq(dm_table_get_md(ti
->table
));
873 m
= alloc_multipath(ti
, use_blk_mq
);
875 ti
->error
= "can't allocate multipath";
879 r
= parse_features(&as
, m
);
883 r
= parse_hw_handler(&as
, m
);
887 r
= dm_read_arg(_args
, &as
, &m
->nr_priority_groups
, &ti
->error
);
891 r
= dm_read_arg(_args
+ 1, &as
, &next_pg_num
, &ti
->error
);
895 if ((!m
->nr_priority_groups
&& next_pg_num
) ||
896 (m
->nr_priority_groups
&& !next_pg_num
)) {
897 ti
->error
= "invalid initial priority group";
902 /* parse the priority groups */
904 struct priority_group
*pg
;
906 pg
= parse_priority_group(&as
, m
);
912 m
->nr_valid_paths
+= pg
->nr_pgpaths
;
913 list_add_tail(&pg
->list
, &m
->priority_groups
);
915 pg
->pg_num
= pg_count
;
920 if (pg_count
!= m
->nr_priority_groups
) {
921 ti
->error
= "priority group count mismatch";
926 ti
->num_flush_bios
= 1;
927 ti
->num_discard_bios
= 1;
928 ti
->num_write_same_bios
= 1;
930 ti
->per_io_data_size
= sizeof(struct dm_mpath_io
);
939 static void multipath_wait_for_pg_init_completion(struct multipath
*m
)
941 DECLARE_WAITQUEUE(wait
, current
);
944 add_wait_queue(&m
->pg_init_wait
, &wait
);
947 set_current_state(TASK_UNINTERRUPTIBLE
);
949 spin_lock_irqsave(&m
->lock
, flags
);
950 if (!m
->pg_init_in_progress
) {
951 spin_unlock_irqrestore(&m
->lock
, flags
);
954 spin_unlock_irqrestore(&m
->lock
, flags
);
958 set_current_state(TASK_RUNNING
);
960 remove_wait_queue(&m
->pg_init_wait
, &wait
);
963 static void flush_multipath_work(struct multipath
*m
)
965 set_bit(MPATHF_PG_INIT_DISABLED
, &m
->flags
);
966 smp_mb__after_atomic();
968 flush_workqueue(kmpath_handlerd
);
969 multipath_wait_for_pg_init_completion(m
);
970 flush_workqueue(kmultipathd
);
971 flush_work(&m
->trigger_event
);
973 clear_bit(MPATHF_PG_INIT_DISABLED
, &m
->flags
);
974 smp_mb__after_atomic();
977 static void multipath_dtr(struct dm_target
*ti
)
979 struct multipath
*m
= ti
->private;
981 flush_multipath_work(m
);
986 * Take a path out of use.
988 static int fail_path(struct pgpath
*pgpath
)
991 struct multipath
*m
= pgpath
->pg
->m
;
993 spin_lock_irqsave(&m
->lock
, flags
);
995 if (!pgpath
->is_active
)
998 DMWARN("Failing path %s.", pgpath
->path
.dev
->name
);
1000 pgpath
->pg
->ps
.type
->fail_path(&pgpath
->pg
->ps
, &pgpath
->path
);
1001 pgpath
->is_active
= false;
1002 pgpath
->fail_count
++;
1004 m
->nr_valid_paths
--;
1006 if (pgpath
== m
->current_pgpath
)
1007 m
->current_pgpath
= NULL
;
1009 dm_path_uevent(DM_UEVENT_PATH_FAILED
, m
->ti
,
1010 pgpath
->path
.dev
->name
, m
->nr_valid_paths
);
1012 schedule_work(&m
->trigger_event
);
1015 spin_unlock_irqrestore(&m
->lock
, flags
);
1021 * Reinstate a previously-failed path
1023 static int reinstate_path(struct pgpath
*pgpath
)
1025 int r
= 0, run_queue
= 0;
1026 unsigned long flags
;
1027 struct multipath
*m
= pgpath
->pg
->m
;
1029 spin_lock_irqsave(&m
->lock
, flags
);
1031 if (pgpath
->is_active
)
1034 DMWARN("Reinstating path %s.", pgpath
->path
.dev
->name
);
1036 r
= pgpath
->pg
->ps
.type
->reinstate_path(&pgpath
->pg
->ps
, &pgpath
->path
);
1040 pgpath
->is_active
= true;
1042 if (!m
->nr_valid_paths
++) {
1043 m
->current_pgpath
= NULL
;
1045 } else if (m
->hw_handler_name
&& (m
->current_pg
== pgpath
->pg
)) {
1046 if (queue_work(kmpath_handlerd
, &pgpath
->activate_path
.work
))
1047 m
->pg_init_in_progress
++;
1050 dm_path_uevent(DM_UEVENT_PATH_REINSTATED
, m
->ti
,
1051 pgpath
->path
.dev
->name
, m
->nr_valid_paths
);
1053 schedule_work(&m
->trigger_event
);
1056 spin_unlock_irqrestore(&m
->lock
, flags
);
1058 dm_table_run_md_queue_async(m
->ti
->table
);
1064 * Fail or reinstate all paths that match the provided struct dm_dev.
1066 static int action_dev(struct multipath
*m
, struct dm_dev
*dev
,
1070 struct pgpath
*pgpath
;
1071 struct priority_group
*pg
;
1073 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1074 list_for_each_entry(pgpath
, &pg
->pgpaths
, list
) {
1075 if (pgpath
->path
.dev
== dev
)
1084 * Temporarily try to avoid having to use the specified PG
1086 static void bypass_pg(struct multipath
*m
, struct priority_group
*pg
,
1089 unsigned long flags
;
1091 spin_lock_irqsave(&m
->lock
, flags
);
1093 pg
->bypassed
= bypassed
;
1094 m
->current_pgpath
= NULL
;
1095 m
->current_pg
= NULL
;
1097 spin_unlock_irqrestore(&m
->lock
, flags
);
1099 schedule_work(&m
->trigger_event
);
1103 * Switch to using the specified PG from the next I/O that gets mapped
1105 static int switch_pg_num(struct multipath
*m
, const char *pgstr
)
1107 struct priority_group
*pg
;
1109 unsigned long flags
;
1112 if (!pgstr
|| (sscanf(pgstr
, "%u%c", &pgnum
, &dummy
) != 1) || !pgnum
||
1113 (pgnum
> m
->nr_priority_groups
)) {
1114 DMWARN("invalid PG number supplied to switch_pg_num");
1118 spin_lock_irqsave(&m
->lock
, flags
);
1119 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1120 pg
->bypassed
= false;
1124 m
->current_pgpath
= NULL
;
1125 m
->current_pg
= NULL
;
1128 spin_unlock_irqrestore(&m
->lock
, flags
);
1130 schedule_work(&m
->trigger_event
);
1135 * Set/clear bypassed status of a PG.
1136 * PGs are numbered upwards from 1 in the order they were declared.
1138 static int bypass_pg_num(struct multipath
*m
, const char *pgstr
, bool bypassed
)
1140 struct priority_group
*pg
;
1144 if (!pgstr
|| (sscanf(pgstr
, "%u%c", &pgnum
, &dummy
) != 1) || !pgnum
||
1145 (pgnum
> m
->nr_priority_groups
)) {
1146 DMWARN("invalid PG number supplied to bypass_pg");
1150 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1155 bypass_pg(m
, pg
, bypassed
);
1160 * Should we retry pg_init immediately?
1162 static bool pg_init_limit_reached(struct multipath
*m
, struct pgpath
*pgpath
)
1164 unsigned long flags
;
1165 bool limit_reached
= false;
1167 spin_lock_irqsave(&m
->lock
, flags
);
1169 if (m
->pg_init_count
<= m
->pg_init_retries
&& !test_bit(MPATHF_PG_INIT_DISABLED
, &m
->flags
))
1170 set_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
);
1172 limit_reached
= true;
1174 spin_unlock_irqrestore(&m
->lock
, flags
);
1176 return limit_reached
;
1179 static void pg_init_done(void *data
, int errors
)
1181 struct pgpath
*pgpath
= data
;
1182 struct priority_group
*pg
= pgpath
->pg
;
1183 struct multipath
*m
= pg
->m
;
1184 unsigned long flags
;
1185 bool delay_retry
= false;
1187 /* device or driver problems */
1192 if (!m
->hw_handler_name
) {
1196 DMERR("Could not failover the device: Handler scsi_dh_%s "
1197 "Error %d.", m
->hw_handler_name
, errors
);
1199 * Fail path for now, so we do not ping pong
1203 case SCSI_DH_DEV_TEMP_BUSY
:
1205 * Probably doing something like FW upgrade on the
1206 * controller so try the other pg.
1208 bypass_pg(m
, pg
, true);
1211 /* Wait before retrying. */
1213 case SCSI_DH_IMM_RETRY
:
1214 case SCSI_DH_RES_TEMP_UNAVAIL
:
1215 if (pg_init_limit_reached(m
, pgpath
))
1219 case SCSI_DH_DEV_OFFLINED
:
1222 * We probably do not want to fail the path for a device
1223 * error, but this is what the old dm did. In future
1224 * patches we can do more advanced handling.
1229 spin_lock_irqsave(&m
->lock
, flags
);
1231 if (pgpath
== m
->current_pgpath
) {
1232 DMERR("Could not failover device. Error %d.", errors
);
1233 m
->current_pgpath
= NULL
;
1234 m
->current_pg
= NULL
;
1236 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
))
1237 pg
->bypassed
= false;
1239 if (--m
->pg_init_in_progress
)
1240 /* Activations of other paths are still on going */
1243 if (test_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
)) {
1245 set_bit(MPATHF_PG_INIT_DELAY_RETRY
, &m
->flags
);
1247 clear_bit(MPATHF_PG_INIT_DELAY_RETRY
, &m
->flags
);
1249 if (__pg_init_all_paths(m
))
1252 clear_bit(MPATHF_QUEUE_IO
, &m
->flags
);
1255 * Wake up any thread waiting to suspend.
1257 wake_up(&m
->pg_init_wait
);
1260 spin_unlock_irqrestore(&m
->lock
, flags
);
1263 static void activate_path(struct work_struct
*work
)
1265 struct pgpath
*pgpath
=
1266 container_of(work
, struct pgpath
, activate_path
.work
);
1268 if (pgpath
->is_active
)
1269 scsi_dh_activate(bdev_get_queue(pgpath
->path
.dev
->bdev
),
1270 pg_init_done
, pgpath
);
1272 pg_init_done(pgpath
, SCSI_DH_DEV_OFFLINED
);
1275 static int noretry_error(int error
)
1286 /* Anything else could be a path failure, so should be retried */
1293 static int do_end_io(struct multipath
*m
, struct request
*clone
,
1294 int error
, struct dm_mpath_io
*mpio
)
1297 * We don't queue any clone request inside the multipath target
1298 * during end I/O handling, since those clone requests don't have
1299 * bio clones. If we queue them inside the multipath target,
1300 * we need to make bio clones, that requires memory allocation.
1301 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1302 * don't have bio clones.)
1303 * Instead of queueing the clone request here, we queue the original
1304 * request into dm core, which will remake a clone request and
1305 * clone bios for it and resubmit it later.
1307 int r
= DM_ENDIO_REQUEUE
;
1308 unsigned long flags
;
1310 if (!error
&& !clone
->errors
)
1311 return 0; /* I/O complete */
1313 if (noretry_error(error
))
1317 fail_path(mpio
->pgpath
);
1319 spin_lock_irqsave(&m
->lock
, flags
);
1320 if (!m
->nr_valid_paths
) {
1321 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
)) {
1322 if (!__must_push_back(m
))
1325 if (error
== -EBADE
)
1329 spin_unlock_irqrestore(&m
->lock
, flags
);
1334 static int multipath_end_io(struct dm_target
*ti
, struct request
*clone
,
1335 int error
, union map_info
*map_context
)
1337 struct multipath
*m
= ti
->private;
1338 struct dm_mpath_io
*mpio
= get_mpio(map_context
);
1339 struct pgpath
*pgpath
;
1340 struct path_selector
*ps
;
1345 r
= do_end_io(m
, clone
, error
, mpio
);
1346 pgpath
= mpio
->pgpath
;
1348 ps
= &pgpath
->pg
->ps
;
1349 if (ps
->type
->end_io
)
1350 ps
->type
->end_io(ps
, &pgpath
->path
, mpio
->nr_bytes
);
1352 clear_request_fn_mpio(m
, map_context
);
1358 * Suspend can't complete until all the I/O is processed so if
1359 * the last path fails we must error any remaining I/O.
1360 * Note that if the freeze_bdev fails while suspending, the
1361 * queue_if_no_path state is lost - userspace should reset it.
1363 static void multipath_presuspend(struct dm_target
*ti
)
1365 struct multipath
*m
= ti
->private;
1367 queue_if_no_path(m
, false, true);
1370 static void multipath_postsuspend(struct dm_target
*ti
)
1372 struct multipath
*m
= ti
->private;
1374 mutex_lock(&m
->work_mutex
);
1375 flush_multipath_work(m
);
1376 mutex_unlock(&m
->work_mutex
);
1380 * Restore the queue_if_no_path setting.
1382 static void multipath_resume(struct dm_target
*ti
)
1384 struct multipath
*m
= ti
->private;
1386 if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
))
1387 set_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
);
1389 clear_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
);
1390 smp_mb__after_atomic();
1394 * Info output has the following format:
1395 * num_multipath_feature_args [multipath_feature_args]*
1396 * num_handler_status_args [handler_status_args]*
1397 * num_groups init_group_number
1398 * [A|D|E num_ps_status_args [ps_status_args]*
1399 * num_paths num_selector_args
1400 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1402 * Table output has the following format (identical to the constructor string):
1403 * num_feature_args [features_args]*
1404 * num_handler_args hw_handler [hw_handler_args]*
1405 * num_groups init_group_number
1406 * [priority selector-name num_ps_args [ps_args]*
1407 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1409 static void multipath_status(struct dm_target
*ti
, status_type_t type
,
1410 unsigned status_flags
, char *result
, unsigned maxlen
)
1413 unsigned long flags
;
1414 struct multipath
*m
= ti
->private;
1415 struct priority_group
*pg
;
1420 spin_lock_irqsave(&m
->lock
, flags
);
1423 if (type
== STATUSTYPE_INFO
)
1424 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO
, &m
->flags
), m
->pg_init_count
);
1426 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
) +
1427 (m
->pg_init_retries
> 0) * 2 +
1428 (m
->pg_init_delay_msecs
!= DM_PG_INIT_DELAY_DEFAULT
) * 2 +
1429 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
));
1430 if (test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
))
1431 DMEMIT("queue_if_no_path ");
1432 if (m
->pg_init_retries
)
1433 DMEMIT("pg_init_retries %u ", m
->pg_init_retries
);
1434 if (m
->pg_init_delay_msecs
!= DM_PG_INIT_DELAY_DEFAULT
)
1435 DMEMIT("pg_init_delay_msecs %u ", m
->pg_init_delay_msecs
);
1436 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
))
1437 DMEMIT("retain_attached_hw_handler ");
1440 if (!m
->hw_handler_name
|| type
== STATUSTYPE_INFO
)
1443 DMEMIT("1 %s ", m
->hw_handler_name
);
1445 DMEMIT("%u ", m
->nr_priority_groups
);
1448 pg_num
= m
->next_pg
->pg_num
;
1449 else if (m
->current_pg
)
1450 pg_num
= m
->current_pg
->pg_num
;
1452 pg_num
= (m
->nr_priority_groups
? 1 : 0);
1454 DMEMIT("%u ", pg_num
);
1457 case STATUSTYPE_INFO
:
1458 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1460 state
= 'D'; /* Disabled */
1461 else if (pg
== m
->current_pg
)
1462 state
= 'A'; /* Currently Active */
1464 state
= 'E'; /* Enabled */
1466 DMEMIT("%c ", state
);
1468 if (pg
->ps
.type
->status
)
1469 sz
+= pg
->ps
.type
->status(&pg
->ps
, NULL
, type
,
1475 DMEMIT("%u %u ", pg
->nr_pgpaths
,
1476 pg
->ps
.type
->info_args
);
1478 list_for_each_entry(p
, &pg
->pgpaths
, list
) {
1479 DMEMIT("%s %s %u ", p
->path
.dev
->name
,
1480 p
->is_active
? "A" : "F",
1482 if (pg
->ps
.type
->status
)
1483 sz
+= pg
->ps
.type
->status(&pg
->ps
,
1484 &p
->path
, type
, result
+ sz
,
1490 case STATUSTYPE_TABLE
:
1491 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1492 DMEMIT("%s ", pg
->ps
.type
->name
);
1494 if (pg
->ps
.type
->status
)
1495 sz
+= pg
->ps
.type
->status(&pg
->ps
, NULL
, type
,
1501 DMEMIT("%u %u ", pg
->nr_pgpaths
,
1502 pg
->ps
.type
->table_args
);
1504 list_for_each_entry(p
, &pg
->pgpaths
, list
) {
1505 DMEMIT("%s ", p
->path
.dev
->name
);
1506 if (pg
->ps
.type
->status
)
1507 sz
+= pg
->ps
.type
->status(&pg
->ps
,
1508 &p
->path
, type
, result
+ sz
,
1515 spin_unlock_irqrestore(&m
->lock
, flags
);
1518 static int multipath_message(struct dm_target
*ti
, unsigned argc
, char **argv
)
1522 struct multipath
*m
= ti
->private;
1525 mutex_lock(&m
->work_mutex
);
1527 if (dm_suspended(ti
)) {
1533 if (!strcasecmp(argv
[0], "queue_if_no_path")) {
1534 r
= queue_if_no_path(m
, true, false);
1536 } else if (!strcasecmp(argv
[0], "fail_if_no_path")) {
1537 r
= queue_if_no_path(m
, false, false);
1543 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc
);
1547 if (!strcasecmp(argv
[0], "disable_group")) {
1548 r
= bypass_pg_num(m
, argv
[1], true);
1550 } else if (!strcasecmp(argv
[0], "enable_group")) {
1551 r
= bypass_pg_num(m
, argv
[1], false);
1553 } else if (!strcasecmp(argv
[0], "switch_group")) {
1554 r
= switch_pg_num(m
, argv
[1]);
1556 } else if (!strcasecmp(argv
[0], "reinstate_path"))
1557 action
= reinstate_path
;
1558 else if (!strcasecmp(argv
[0], "fail_path"))
1561 DMWARN("Unrecognised multipath message received: %s", argv
[0]);
1565 r
= dm_get_device(ti
, argv
[1], dm_table_get_mode(ti
->table
), &dev
);
1567 DMWARN("message: error getting device %s",
1572 r
= action_dev(m
, dev
, action
);
1574 dm_put_device(ti
, dev
);
1577 mutex_unlock(&m
->work_mutex
);
1581 static int multipath_prepare_ioctl(struct dm_target
*ti
,
1582 struct block_device
**bdev
, fmode_t
*mode
)
1584 struct multipath
*m
= ti
->private;
1585 unsigned long flags
;
1588 spin_lock_irqsave(&m
->lock
, flags
);
1590 if (!m
->current_pgpath
)
1591 __choose_pgpath(m
, 0);
1593 if (m
->current_pgpath
) {
1594 if (!test_bit(MPATHF_QUEUE_IO
, &m
->flags
)) {
1595 *bdev
= m
->current_pgpath
->path
.dev
->bdev
;
1596 *mode
= m
->current_pgpath
->path
.dev
->mode
;
1599 /* pg_init has not started or completed */
1603 /* No path is available */
1604 if (test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
))
1610 spin_unlock_irqrestore(&m
->lock
, flags
);
1612 if (r
== -ENOTCONN
) {
1613 spin_lock_irqsave(&m
->lock
, flags
);
1614 if (!m
->current_pg
) {
1615 /* Path status changed, redo selection */
1616 __choose_pgpath(m
, 0);
1618 if (test_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
))
1619 __pg_init_all_paths(m
);
1620 spin_unlock_irqrestore(&m
->lock
, flags
);
1621 dm_table_run_md_queue_async(m
->ti
->table
);
1625 * Only pass ioctls through if the device sizes match exactly.
1627 if (!r
&& ti
->len
!= i_size_read((*bdev
)->bd_inode
) >> SECTOR_SHIFT
)
1632 static int multipath_iterate_devices(struct dm_target
*ti
,
1633 iterate_devices_callout_fn fn
, void *data
)
1635 struct multipath
*m
= ti
->private;
1636 struct priority_group
*pg
;
1640 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1641 list_for_each_entry(p
, &pg
->pgpaths
, list
) {
1642 ret
= fn(ti
, p
->path
.dev
, ti
->begin
, ti
->len
, data
);
1652 static int pgpath_busy(struct pgpath
*pgpath
)
1654 struct request_queue
*q
= bdev_get_queue(pgpath
->path
.dev
->bdev
);
1656 return blk_lld_busy(q
);
1660 * We return "busy", only when we can map I/Os but underlying devices
1661 * are busy (so even if we map I/Os now, the I/Os will wait on
1662 * the underlying queue).
1663 * In other words, if we want to kill I/Os or queue them inside us
1664 * due to map unavailability, we don't return "busy". Otherwise,
1665 * dm core won't give us the I/Os and we can't do what we want.
1667 static int multipath_busy(struct dm_target
*ti
)
1669 bool busy
= false, has_active
= false;
1670 struct multipath
*m
= ti
->private;
1671 struct priority_group
*pg
;
1672 struct pgpath
*pgpath
;
1673 unsigned long flags
;
1675 spin_lock_irqsave(&m
->lock
, flags
);
1677 /* pg_init in progress or no paths available */
1678 if (m
->pg_init_in_progress
||
1679 (!m
->nr_valid_paths
&& test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
))) {
1683 /* Guess which priority_group will be used at next mapping time */
1684 if (unlikely(!m
->current_pgpath
&& m
->next_pg
))
1686 else if (likely(m
->current_pg
))
1690 * We don't know which pg will be used at next mapping time.
1691 * We don't call __choose_pgpath() here to avoid to trigger
1692 * pg_init just by busy checking.
1693 * So we don't know whether underlying devices we will be using
1694 * at next mapping time are busy or not. Just try mapping.
1699 * If there is one non-busy active path at least, the path selector
1700 * will be able to select it. So we consider such a pg as not busy.
1703 list_for_each_entry(pgpath
, &pg
->pgpaths
, list
)
1704 if (pgpath
->is_active
) {
1706 if (!pgpath_busy(pgpath
)) {
1714 * No active path in this pg, so this pg won't be used and
1715 * the current_pg will be changed at next mapping time.
1716 * We need to try mapping to determine it.
1721 spin_unlock_irqrestore(&m
->lock
, flags
);
1726 /*-----------------------------------------------------------------
1728 *---------------------------------------------------------------*/
1729 static struct target_type multipath_target
= {
1730 .name
= "multipath",
1731 .version
= {1, 11, 0},
1732 .features
= DM_TARGET_SINGLETON
| DM_TARGET_IMMUTABLE
,
1733 .module
= THIS_MODULE
,
1734 .ctr
= multipath_ctr
,
1735 .dtr
= multipath_dtr
,
1736 .map_rq
= multipath_map
,
1737 .clone_and_map_rq
= multipath_clone_and_map
,
1738 .release_clone_rq
= multipath_release_clone
,
1739 .rq_end_io
= multipath_end_io
,
1740 .presuspend
= multipath_presuspend
,
1741 .postsuspend
= multipath_postsuspend
,
1742 .resume
= multipath_resume
,
1743 .status
= multipath_status
,
1744 .message
= multipath_message
,
1745 .prepare_ioctl
= multipath_prepare_ioctl
,
1746 .iterate_devices
= multipath_iterate_devices
,
1747 .busy
= multipath_busy
,
1750 static int __init
dm_multipath_init(void)
1754 /* allocate a slab for the dm_ios */
1755 _mpio_cache
= KMEM_CACHE(dm_mpath_io
, 0);
1759 r
= dm_register_target(&multipath_target
);
1761 DMERR("register failed %d", r
);
1763 goto bad_register_target
;
1766 kmultipathd
= alloc_workqueue("kmpathd", WQ_MEM_RECLAIM
, 0);
1768 DMERR("failed to create workqueue kmpathd");
1770 goto bad_alloc_kmultipathd
;
1774 * A separate workqueue is used to handle the device handlers
1775 * to avoid overloading existing workqueue. Overloading the
1776 * old workqueue would also create a bottleneck in the
1777 * path of the storage hardware device activation.
1779 kmpath_handlerd
= alloc_ordered_workqueue("kmpath_handlerd",
1781 if (!kmpath_handlerd
) {
1782 DMERR("failed to create workqueue kmpath_handlerd");
1784 goto bad_alloc_kmpath_handlerd
;
1787 DMINFO("version %u.%u.%u loaded",
1788 multipath_target
.version
[0], multipath_target
.version
[1],
1789 multipath_target
.version
[2]);
1793 bad_alloc_kmpath_handlerd
:
1794 destroy_workqueue(kmultipathd
);
1795 bad_alloc_kmultipathd
:
1796 dm_unregister_target(&multipath_target
);
1797 bad_register_target
:
1798 kmem_cache_destroy(_mpio_cache
);
1803 static void __exit
dm_multipath_exit(void)
1805 destroy_workqueue(kmpath_handlerd
);
1806 destroy_workqueue(kmultipathd
);
1808 dm_unregister_target(&multipath_target
);
1809 kmem_cache_destroy(_mpio_cache
);
1812 module_init(dm_multipath_init
);
1813 module_exit(dm_multipath_exit
);
1815 MODULE_DESCRIPTION(DM_NAME
" multipath target");
1816 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1817 MODULE_LICENSE("GPL");