2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/device-mapper.h>
11 #include "dm-bio-record.h"
12 #include "dm-path-selector.h"
13 #include "dm-uevent.h"
15 #include <linux/blkdev.h>
16 #include <linux/ctype.h>
17 #include <linux/init.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/time.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <scsi/scsi_dh.h>
26 #include <linux/atomic.h>
27 #include <linux/blk-mq.h>
29 #define DM_MSG_PREFIX "multipath"
30 #define DM_PG_INIT_DELAY_MSECS 2000
31 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
35 struct list_head list
;
37 struct priority_group
*pg
; /* Owning PG */
38 unsigned fail_count
; /* Cumulative failure count */
41 struct delayed_work activate_path
;
43 bool is_active
:1; /* Path status */
46 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
49 * Paths are grouped into Priority Groups and numbered from 1 upwards.
50 * Each has a path selector which controls which path gets used.
52 struct priority_group
{
53 struct list_head list
;
55 struct multipath
*m
; /* Owning multipath instance */
56 struct path_selector ps
;
58 unsigned pg_num
; /* Reference number */
59 unsigned nr_pgpaths
; /* Number of paths in PG */
60 struct list_head pgpaths
;
62 bool bypassed
:1; /* Temporarily bypass this PG? */
65 /* Multipath context */
67 struct list_head list
;
70 const char *hw_handler_name
;
71 char *hw_handler_params
;
75 unsigned nr_priority_groups
;
76 struct list_head priority_groups
;
78 wait_queue_head_t pg_init_wait
; /* Wait for pg_init completion */
80 struct pgpath
*current_pgpath
;
81 struct priority_group
*current_pg
;
82 struct priority_group
*next_pg
; /* Switch to this PG if set */
84 unsigned long flags
; /* Multipath state flags */
86 unsigned pg_init_retries
; /* Number of times to retry pg_init */
87 unsigned pg_init_delay_msecs
; /* Number of msecs before pg_init retry */
89 atomic_t nr_valid_paths
; /* Total number of usable paths */
90 atomic_t pg_init_in_progress
; /* Only one pg_init allowed at once */
91 atomic_t pg_init_count
; /* Number of times pg_init called */
93 enum dm_queue_mode queue_mode
;
95 struct mutex work_mutex
;
96 struct work_struct trigger_event
;
98 struct work_struct process_queued_bios
;
99 struct bio_list queued_bios
;
103 * Context information attached to each io we process.
106 struct pgpath
*pgpath
;
110 typedef int (*action_fn
) (struct pgpath
*pgpath
);
112 static struct workqueue_struct
*kmultipathd
, *kmpath_handlerd
;
113 static void trigger_event(struct work_struct
*work
);
114 static void activate_or_offline_path(struct pgpath
*pgpath
);
115 static void activate_path_work(struct work_struct
*work
);
116 static void process_queued_bios(struct work_struct
*work
);
118 /*-----------------------------------------------
119 * Multipath state flags.
120 *-----------------------------------------------*/
122 #define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
123 #define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
124 #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
125 #define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
126 #define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
127 #define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
128 #define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
130 /*-----------------------------------------------
131 * Allocation routines
132 *-----------------------------------------------*/
134 static struct pgpath
*alloc_pgpath(void)
136 struct pgpath
*pgpath
= kzalloc(sizeof(*pgpath
), GFP_KERNEL
);
139 pgpath
->is_active
= true;
140 INIT_DELAYED_WORK(&pgpath
->activate_path
, activate_path_work
);
146 static void free_pgpath(struct pgpath
*pgpath
)
151 static struct priority_group
*alloc_priority_group(void)
153 struct priority_group
*pg
;
155 pg
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
158 INIT_LIST_HEAD(&pg
->pgpaths
);
163 static void free_pgpaths(struct list_head
*pgpaths
, struct dm_target
*ti
)
165 struct pgpath
*pgpath
, *tmp
;
167 list_for_each_entry_safe(pgpath
, tmp
, pgpaths
, list
) {
168 list_del(&pgpath
->list
);
169 dm_put_device(ti
, pgpath
->path
.dev
);
174 static void free_priority_group(struct priority_group
*pg
,
175 struct dm_target
*ti
)
177 struct path_selector
*ps
= &pg
->ps
;
180 ps
->type
->destroy(ps
);
181 dm_put_path_selector(ps
->type
);
184 free_pgpaths(&pg
->pgpaths
, ti
);
188 static struct multipath
*alloc_multipath(struct dm_target
*ti
)
192 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
194 INIT_LIST_HEAD(&m
->priority_groups
);
195 spin_lock_init(&m
->lock
);
196 set_bit(MPATHF_QUEUE_IO
, &m
->flags
);
197 atomic_set(&m
->nr_valid_paths
, 0);
198 atomic_set(&m
->pg_init_in_progress
, 0);
199 atomic_set(&m
->pg_init_count
, 0);
200 m
->pg_init_delay_msecs
= DM_PG_INIT_DELAY_DEFAULT
;
201 INIT_WORK(&m
->trigger_event
, trigger_event
);
202 init_waitqueue_head(&m
->pg_init_wait
);
203 mutex_init(&m
->work_mutex
);
205 m
->queue_mode
= DM_TYPE_NONE
;
214 static int alloc_multipath_stage2(struct dm_target
*ti
, struct multipath
*m
)
216 if (m
->queue_mode
== DM_TYPE_NONE
) {
218 * Default to request-based.
220 if (dm_use_blk_mq(dm_table_get_md(ti
->table
)))
221 m
->queue_mode
= DM_TYPE_MQ_REQUEST_BASED
;
223 m
->queue_mode
= DM_TYPE_REQUEST_BASED
;
224 } else if (m
->queue_mode
== DM_TYPE_BIO_BASED
) {
225 INIT_WORK(&m
->process_queued_bios
, process_queued_bios
);
227 * bio-based doesn't support any direct scsi_dh management;
228 * it just discovers if a scsi_dh is attached.
230 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
);
233 dm_table_set_type(ti
->table
, m
->queue_mode
);
238 static void free_multipath(struct multipath
*m
)
240 struct priority_group
*pg
, *tmp
;
242 list_for_each_entry_safe(pg
, tmp
, &m
->priority_groups
, list
) {
244 free_priority_group(pg
, m
->ti
);
247 kfree(m
->hw_handler_name
);
248 kfree(m
->hw_handler_params
);
252 static struct dm_mpath_io
*get_mpio(union map_info
*info
)
257 static size_t multipath_per_bio_data_size(void)
259 return sizeof(struct dm_mpath_io
) + sizeof(struct dm_bio_details
);
262 static struct dm_mpath_io
*get_mpio_from_bio(struct bio
*bio
)
264 return dm_per_bio_data(bio
, multipath_per_bio_data_size());
267 static struct dm_bio_details
*get_bio_details_from_bio(struct bio
*bio
)
269 /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
270 struct dm_mpath_io
*mpio
= get_mpio_from_bio(bio
);
271 void *bio_details
= mpio
+ 1;
276 static void multipath_init_per_bio_data(struct bio
*bio
, struct dm_mpath_io
**mpio_p
,
277 struct dm_bio_details
**bio_details_p
)
279 struct dm_mpath_io
*mpio
= get_mpio_from_bio(bio
);
280 struct dm_bio_details
*bio_details
= get_bio_details_from_bio(bio
);
282 memset(mpio
, 0, sizeof(*mpio
));
283 memset(bio_details
, 0, sizeof(*bio_details
));
284 dm_bio_record(bio_details
, bio
);
289 *bio_details_p
= bio_details
;
292 /*-----------------------------------------------
294 *-----------------------------------------------*/
296 static int __pg_init_all_paths(struct multipath
*m
)
298 struct pgpath
*pgpath
;
299 unsigned long pg_init_delay
= 0;
301 lockdep_assert_held(&m
->lock
);
303 if (atomic_read(&m
->pg_init_in_progress
) || test_bit(MPATHF_PG_INIT_DISABLED
, &m
->flags
))
306 atomic_inc(&m
->pg_init_count
);
307 clear_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
);
309 /* Check here to reset pg_init_required */
313 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY
, &m
->flags
))
314 pg_init_delay
= msecs_to_jiffies(m
->pg_init_delay_msecs
!= DM_PG_INIT_DELAY_DEFAULT
?
315 m
->pg_init_delay_msecs
: DM_PG_INIT_DELAY_MSECS
);
316 list_for_each_entry(pgpath
, &m
->current_pg
->pgpaths
, list
) {
317 /* Skip failed paths */
318 if (!pgpath
->is_active
)
320 if (queue_delayed_work(kmpath_handlerd
, &pgpath
->activate_path
,
322 atomic_inc(&m
->pg_init_in_progress
);
324 return atomic_read(&m
->pg_init_in_progress
);
327 static int pg_init_all_paths(struct multipath
*m
)
332 spin_lock_irqsave(&m
->lock
, flags
);
333 ret
= __pg_init_all_paths(m
);
334 spin_unlock_irqrestore(&m
->lock
, flags
);
339 static void __switch_pg(struct multipath
*m
, struct priority_group
*pg
)
343 /* Must we initialise the PG first, and queue I/O till it's ready? */
344 if (m
->hw_handler_name
) {
345 set_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
);
346 set_bit(MPATHF_QUEUE_IO
, &m
->flags
);
348 clear_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
);
349 clear_bit(MPATHF_QUEUE_IO
, &m
->flags
);
352 atomic_set(&m
->pg_init_count
, 0);
355 static struct pgpath
*choose_path_in_pg(struct multipath
*m
,
356 struct priority_group
*pg
,
360 struct dm_path
*path
;
361 struct pgpath
*pgpath
;
363 path
= pg
->ps
.type
->select_path(&pg
->ps
, nr_bytes
);
365 return ERR_PTR(-ENXIO
);
367 pgpath
= path_to_pgpath(path
);
369 if (unlikely(lockless_dereference(m
->current_pg
) != pg
)) {
370 /* Only update current_pgpath if pg changed */
371 spin_lock_irqsave(&m
->lock
, flags
);
372 m
->current_pgpath
= pgpath
;
374 spin_unlock_irqrestore(&m
->lock
, flags
);
380 static struct pgpath
*choose_pgpath(struct multipath
*m
, size_t nr_bytes
)
383 struct priority_group
*pg
;
384 struct pgpath
*pgpath
;
385 unsigned bypassed
= 1;
387 if (!atomic_read(&m
->nr_valid_paths
)) {
388 clear_bit(MPATHF_QUEUE_IO
, &m
->flags
);
392 /* Were we instructed to switch PG? */
393 if (lockless_dereference(m
->next_pg
)) {
394 spin_lock_irqsave(&m
->lock
, flags
);
397 spin_unlock_irqrestore(&m
->lock
, flags
);
398 goto check_current_pg
;
401 spin_unlock_irqrestore(&m
->lock
, flags
);
402 pgpath
= choose_path_in_pg(m
, pg
, nr_bytes
);
403 if (!IS_ERR_OR_NULL(pgpath
))
407 /* Don't change PG until it has no remaining paths */
409 pg
= lockless_dereference(m
->current_pg
);
411 pgpath
= choose_path_in_pg(m
, pg
, nr_bytes
);
412 if (!IS_ERR_OR_NULL(pgpath
))
417 * Loop through priority groups until we find a valid path.
418 * First time we skip PGs marked 'bypassed'.
419 * Second time we only try the ones we skipped, but set
420 * pg_init_delay_retry so we do not hammer controllers.
423 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
424 if (pg
->bypassed
== !!bypassed
)
426 pgpath
= choose_path_in_pg(m
, pg
, nr_bytes
);
427 if (!IS_ERR_OR_NULL(pgpath
)) {
429 set_bit(MPATHF_PG_INIT_DELAY_RETRY
, &m
->flags
);
433 } while (bypassed
--);
436 spin_lock_irqsave(&m
->lock
, flags
);
437 m
->current_pgpath
= NULL
;
438 m
->current_pg
= NULL
;
439 spin_unlock_irqrestore(&m
->lock
, flags
);
445 * dm_report_EIO() is a macro instead of a function to make pr_debug()
446 * report the function name and line number of the function from which
447 * it has been invoked.
449 #define dm_report_EIO(m) \
451 struct mapped_device *md = dm_table_get_md((m)->ti->table); \
453 pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
454 dm_device_name(md), \
455 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
456 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
457 dm_noflush_suspending((m)->ti)); \
462 * Map cloned requests (request-based multipath)
464 static int multipath_clone_and_map(struct dm_target
*ti
, struct request
*rq
,
465 union map_info
*map_context
,
466 struct request
**__clone
)
468 struct multipath
*m
= ti
->private;
469 size_t nr_bytes
= blk_rq_bytes(rq
);
470 struct pgpath
*pgpath
;
471 struct block_device
*bdev
;
472 struct dm_mpath_io
*mpio
= get_mpio(map_context
);
473 struct request_queue
*q
;
474 struct request
*clone
;
476 /* Do we need to select a new pgpath? */
477 pgpath
= lockless_dereference(m
->current_pgpath
);
478 if (!pgpath
|| !test_bit(MPATHF_QUEUE_IO
, &m
->flags
))
479 pgpath
= choose_pgpath(m
, nr_bytes
);
482 if (test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
))
483 return DM_MAPIO_DELAY_REQUEUE
;
484 return dm_report_EIO(m
); /* Failed */
485 } else if (test_bit(MPATHF_QUEUE_IO
, &m
->flags
) ||
486 test_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
)) {
487 if (pg_init_all_paths(m
))
488 return DM_MAPIO_DELAY_REQUEUE
;
489 return DM_MAPIO_REQUEUE
;
492 memset(mpio
, 0, sizeof(*mpio
));
493 mpio
->pgpath
= pgpath
;
494 mpio
->nr_bytes
= nr_bytes
;
496 bdev
= pgpath
->path
.dev
->bdev
;
497 q
= bdev_get_queue(bdev
);
498 clone
= blk_get_request(q
, rq
->cmd_flags
| REQ_NOMERGE
, GFP_ATOMIC
);
500 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
501 bool queue_dying
= blk_queue_dying(q
);
502 DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing",
503 PTR_ERR(clone
), queue_dying
? " (path offline)" : "");
505 atomic_inc(&m
->pg_init_in_progress
);
506 activate_or_offline_path(pgpath
);
507 return DM_MAPIO_REQUEUE
;
509 return DM_MAPIO_DELAY_REQUEUE
;
511 clone
->bio
= clone
->biotail
= NULL
;
512 clone
->rq_disk
= bdev
->bd_disk
;
513 clone
->cmd_flags
|= REQ_FAILFAST_TRANSPORT
;
516 if (pgpath
->pg
->ps
.type
->start_io
)
517 pgpath
->pg
->ps
.type
->start_io(&pgpath
->pg
->ps
,
520 return DM_MAPIO_REMAPPED
;
523 static void multipath_release_clone(struct request
*clone
)
525 blk_put_request(clone
);
529 * Map cloned bios (bio-based multipath)
531 static int __multipath_map_bio(struct multipath
*m
, struct bio
*bio
, struct dm_mpath_io
*mpio
)
533 size_t nr_bytes
= bio
->bi_iter
.bi_size
;
534 struct pgpath
*pgpath
;
538 /* Do we need to select a new pgpath? */
539 pgpath
= lockless_dereference(m
->current_pgpath
);
540 queue_io
= test_bit(MPATHF_QUEUE_IO
, &m
->flags
);
541 if (!pgpath
|| !queue_io
)
542 pgpath
= choose_pgpath(m
, nr_bytes
);
544 if ((pgpath
&& queue_io
) ||
545 (!pgpath
&& test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
))) {
546 /* Queue for the daemon to resubmit */
547 spin_lock_irqsave(&m
->lock
, flags
);
548 bio_list_add(&m
->queued_bios
, bio
);
549 spin_unlock_irqrestore(&m
->lock
, flags
);
550 /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
551 if (queue_io
|| test_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
))
552 pg_init_all_paths(m
);
554 queue_work(kmultipathd
, &m
->process_queued_bios
);
555 return DM_MAPIO_SUBMITTED
;
559 if (test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
))
560 return DM_MAPIO_REQUEUE
;
561 return dm_report_EIO(m
);
564 mpio
->pgpath
= pgpath
;
565 mpio
->nr_bytes
= nr_bytes
;
568 bio
->bi_bdev
= pgpath
->path
.dev
->bdev
;
569 bio
->bi_opf
|= REQ_FAILFAST_TRANSPORT
;
571 if (pgpath
->pg
->ps
.type
->start_io
)
572 pgpath
->pg
->ps
.type
->start_io(&pgpath
->pg
->ps
,
575 return DM_MAPIO_REMAPPED
;
578 static int multipath_map_bio(struct dm_target
*ti
, struct bio
*bio
)
580 struct multipath
*m
= ti
->private;
581 struct dm_mpath_io
*mpio
= NULL
;
583 multipath_init_per_bio_data(bio
, &mpio
, NULL
);
585 return __multipath_map_bio(m
, bio
, mpio
);
588 static void process_queued_io_list(struct multipath
*m
)
590 if (m
->queue_mode
== DM_TYPE_MQ_REQUEST_BASED
)
591 dm_mq_kick_requeue_list(dm_table_get_md(m
->ti
->table
));
592 else if (m
->queue_mode
== DM_TYPE_BIO_BASED
)
593 queue_work(kmultipathd
, &m
->process_queued_bios
);
596 static void process_queued_bios(struct work_struct
*work
)
601 struct bio_list bios
;
602 struct blk_plug plug
;
603 struct multipath
*m
=
604 container_of(work
, struct multipath
, process_queued_bios
);
606 bio_list_init(&bios
);
608 spin_lock_irqsave(&m
->lock
, flags
);
610 if (bio_list_empty(&m
->queued_bios
)) {
611 spin_unlock_irqrestore(&m
->lock
, flags
);
615 bio_list_merge(&bios
, &m
->queued_bios
);
616 bio_list_init(&m
->queued_bios
);
618 spin_unlock_irqrestore(&m
->lock
, flags
);
620 blk_start_plug(&plug
);
621 while ((bio
= bio_list_pop(&bios
))) {
622 r
= __multipath_map_bio(m
, bio
, get_mpio_from_bio(bio
));
623 if (r
< 0 || r
== DM_MAPIO_REQUEUE
) {
626 } else if (r
== DM_MAPIO_REMAPPED
)
627 generic_make_request(bio
);
629 blk_finish_plug(&plug
);
632 static void assign_bit(bool value
, long nr
, unsigned long *addr
)
641 * If we run out of usable paths, should we queue I/O or error it?
643 static int queue_if_no_path(struct multipath
*m
, bool queue_if_no_path
,
648 spin_lock_irqsave(&m
->lock
, flags
);
649 assign_bit((save_old_value
&& test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
)) ||
650 (!save_old_value
&& queue_if_no_path
),
651 MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
);
652 assign_bit(queue_if_no_path
|| dm_noflush_suspending(m
->ti
),
653 MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
);
654 spin_unlock_irqrestore(&m
->lock
, flags
);
656 if (!queue_if_no_path
) {
657 dm_table_run_md_queue_async(m
->ti
->table
);
658 process_queued_io_list(m
);
665 * An event is triggered whenever a path is taken out of use.
666 * Includes path failure and PG bypass.
668 static void trigger_event(struct work_struct
*work
)
670 struct multipath
*m
=
671 container_of(work
, struct multipath
, trigger_event
);
673 dm_table_event(m
->ti
->table
);
676 /*-----------------------------------------------------------------
677 * Constructor/argument parsing:
678 * <#multipath feature args> [<arg>]*
679 * <#hw_handler args> [hw_handler [<arg>]*]
681 * <initial priority group>
682 * [<selector> <#selector args> [<arg>]*
683 * <#paths> <#per-path selector args>
684 * [<path> [<arg>]* ]+ ]+
685 *---------------------------------------------------------------*/
686 static int parse_path_selector(struct dm_arg_set
*as
, struct priority_group
*pg
,
687 struct dm_target
*ti
)
690 struct path_selector_type
*pst
;
693 static struct dm_arg _args
[] = {
694 {0, 1024, "invalid number of path selector args"},
697 pst
= dm_get_path_selector(dm_shift_arg(as
));
699 ti
->error
= "unknown path selector type";
703 r
= dm_read_arg_group(_args
, as
, &ps_argc
, &ti
->error
);
705 dm_put_path_selector(pst
);
709 r
= pst
->create(&pg
->ps
, ps_argc
, as
->argv
);
711 dm_put_path_selector(pst
);
712 ti
->error
= "path selector constructor failed";
717 dm_consume_args(as
, ps_argc
);
722 static struct pgpath
*parse_path(struct dm_arg_set
*as
, struct path_selector
*ps
,
723 struct dm_target
*ti
)
727 struct multipath
*m
= ti
->private;
728 struct request_queue
*q
= NULL
;
729 const char *attached_handler_name
;
731 /* we need at least a path arg */
733 ti
->error
= "no device given";
734 return ERR_PTR(-EINVAL
);
739 return ERR_PTR(-ENOMEM
);
741 r
= dm_get_device(ti
, dm_shift_arg(as
), dm_table_get_mode(ti
->table
),
744 ti
->error
= "error getting device";
748 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
) || m
->hw_handler_name
)
749 q
= bdev_get_queue(p
->path
.dev
->bdev
);
751 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
)) {
753 attached_handler_name
= scsi_dh_attached_handler_name(q
, GFP_KERNEL
);
754 if (attached_handler_name
) {
756 * Clear any hw_handler_params associated with a
757 * handler that isn't already attached.
759 if (m
->hw_handler_name
&& strcmp(attached_handler_name
, m
->hw_handler_name
)) {
760 kfree(m
->hw_handler_params
);
761 m
->hw_handler_params
= NULL
;
765 * Reset hw_handler_name to match the attached handler
767 * NB. This modifies the table line to show the actual
768 * handler instead of the original table passed in.
770 kfree(m
->hw_handler_name
);
771 m
->hw_handler_name
= attached_handler_name
;
775 if (m
->hw_handler_name
) {
776 r
= scsi_dh_attach(q
, m
->hw_handler_name
);
778 char b
[BDEVNAME_SIZE
];
780 printk(KERN_INFO
"dm-mpath: retaining handler on device %s\n",
781 bdevname(p
->path
.dev
->bdev
, b
));
785 ti
->error
= "error attaching hardware handler";
786 dm_put_device(ti
, p
->path
.dev
);
790 if (m
->hw_handler_params
) {
791 r
= scsi_dh_set_params(q
, m
->hw_handler_params
);
793 ti
->error
= "unable to set hardware "
794 "handler parameters";
795 dm_put_device(ti
, p
->path
.dev
);
801 r
= ps
->type
->add_path(ps
, &p
->path
, as
->argc
, as
->argv
, &ti
->error
);
803 dm_put_device(ti
, p
->path
.dev
);
814 static struct priority_group
*parse_priority_group(struct dm_arg_set
*as
,
817 static struct dm_arg _args
[] = {
818 {1, 1024, "invalid number of paths"},
819 {0, 1024, "invalid number of selector args"}
823 unsigned i
, nr_selector_args
, nr_args
;
824 struct priority_group
*pg
;
825 struct dm_target
*ti
= m
->ti
;
829 ti
->error
= "not enough priority group arguments";
830 return ERR_PTR(-EINVAL
);
833 pg
= alloc_priority_group();
835 ti
->error
= "couldn't allocate priority group";
836 return ERR_PTR(-ENOMEM
);
840 r
= parse_path_selector(as
, pg
, ti
);
847 r
= dm_read_arg(_args
, as
, &pg
->nr_pgpaths
, &ti
->error
);
851 r
= dm_read_arg(_args
+ 1, as
, &nr_selector_args
, &ti
->error
);
855 nr_args
= 1 + nr_selector_args
;
856 for (i
= 0; i
< pg
->nr_pgpaths
; i
++) {
857 struct pgpath
*pgpath
;
858 struct dm_arg_set path_args
;
860 if (as
->argc
< nr_args
) {
861 ti
->error
= "not enough path parameters";
866 path_args
.argc
= nr_args
;
867 path_args
.argv
= as
->argv
;
869 pgpath
= parse_path(&path_args
, &pg
->ps
, ti
);
870 if (IS_ERR(pgpath
)) {
876 list_add_tail(&pgpath
->list
, &pg
->pgpaths
);
877 dm_consume_args(as
, nr_args
);
883 free_priority_group(pg
, ti
);
887 static int parse_hw_handler(struct dm_arg_set
*as
, struct multipath
*m
)
891 struct dm_target
*ti
= m
->ti
;
893 static struct dm_arg _args
[] = {
894 {0, 1024, "invalid number of hardware handler args"},
897 if (dm_read_arg_group(_args
, as
, &hw_argc
, &ti
->error
))
903 if (m
->queue_mode
== DM_TYPE_BIO_BASED
) {
904 dm_consume_args(as
, hw_argc
);
905 DMERR("bio-based multipath doesn't allow hardware handler args");
909 m
->hw_handler_name
= kstrdup(dm_shift_arg(as
), GFP_KERNEL
);
910 if (!m
->hw_handler_name
)
917 for (i
= 0; i
<= hw_argc
- 2; i
++)
918 len
+= strlen(as
->argv
[i
]) + 1;
919 p
= m
->hw_handler_params
= kzalloc(len
, GFP_KERNEL
);
921 ti
->error
= "memory allocation failed";
925 j
= sprintf(p
, "%d", hw_argc
- 1);
926 for (i
= 0, p
+=j
+1; i
<= hw_argc
- 2; i
++, p
+=j
+1)
927 j
= sprintf(p
, "%s", as
->argv
[i
]);
929 dm_consume_args(as
, hw_argc
- 1);
933 kfree(m
->hw_handler_name
);
934 m
->hw_handler_name
= NULL
;
938 static int parse_features(struct dm_arg_set
*as
, struct multipath
*m
)
942 struct dm_target
*ti
= m
->ti
;
943 const char *arg_name
;
945 static struct dm_arg _args
[] = {
946 {0, 8, "invalid number of feature args"},
947 {1, 50, "pg_init_retries must be between 1 and 50"},
948 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
951 r
= dm_read_arg_group(_args
, as
, &argc
, &ti
->error
);
959 arg_name
= dm_shift_arg(as
);
962 if (!strcasecmp(arg_name
, "queue_if_no_path")) {
963 r
= queue_if_no_path(m
, true, false);
967 if (!strcasecmp(arg_name
, "retain_attached_hw_handler")) {
968 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
);
972 if (!strcasecmp(arg_name
, "pg_init_retries") &&
974 r
= dm_read_arg(_args
+ 1, as
, &m
->pg_init_retries
, &ti
->error
);
979 if (!strcasecmp(arg_name
, "pg_init_delay_msecs") &&
981 r
= dm_read_arg(_args
+ 2, as
, &m
->pg_init_delay_msecs
, &ti
->error
);
986 if (!strcasecmp(arg_name
, "queue_mode") &&
988 const char *queue_mode_name
= dm_shift_arg(as
);
990 if (!strcasecmp(queue_mode_name
, "bio"))
991 m
->queue_mode
= DM_TYPE_BIO_BASED
;
992 else if (!strcasecmp(queue_mode_name
, "rq"))
993 m
->queue_mode
= DM_TYPE_REQUEST_BASED
;
994 else if (!strcasecmp(queue_mode_name
, "mq"))
995 m
->queue_mode
= DM_TYPE_MQ_REQUEST_BASED
;
997 ti
->error
= "Unknown 'queue_mode' requested";
1004 ti
->error
= "Unrecognised multipath feature request";
1006 } while (argc
&& !r
);
1011 static int multipath_ctr(struct dm_target
*ti
, unsigned argc
, char **argv
)
1013 /* target arguments */
1014 static struct dm_arg _args
[] = {
1015 {0, 1024, "invalid number of priority groups"},
1016 {0, 1024, "invalid initial priority group number"},
1020 struct multipath
*m
;
1021 struct dm_arg_set as
;
1022 unsigned pg_count
= 0;
1023 unsigned next_pg_num
;
1028 m
= alloc_multipath(ti
);
1030 ti
->error
= "can't allocate multipath";
1034 r
= parse_features(&as
, m
);
1038 r
= alloc_multipath_stage2(ti
, m
);
1042 r
= parse_hw_handler(&as
, m
);
1046 r
= dm_read_arg(_args
, &as
, &m
->nr_priority_groups
, &ti
->error
);
1050 r
= dm_read_arg(_args
+ 1, &as
, &next_pg_num
, &ti
->error
);
1054 if ((!m
->nr_priority_groups
&& next_pg_num
) ||
1055 (m
->nr_priority_groups
&& !next_pg_num
)) {
1056 ti
->error
= "invalid initial priority group";
1061 /* parse the priority groups */
1063 struct priority_group
*pg
;
1064 unsigned nr_valid_paths
= atomic_read(&m
->nr_valid_paths
);
1066 pg
= parse_priority_group(&as
, m
);
1072 nr_valid_paths
+= pg
->nr_pgpaths
;
1073 atomic_set(&m
->nr_valid_paths
, nr_valid_paths
);
1075 list_add_tail(&pg
->list
, &m
->priority_groups
);
1077 pg
->pg_num
= pg_count
;
1082 if (pg_count
!= m
->nr_priority_groups
) {
1083 ti
->error
= "priority group count mismatch";
1088 ti
->num_flush_bios
= 1;
1089 ti
->num_discard_bios
= 1;
1090 ti
->num_write_same_bios
= 1;
1091 if (m
->queue_mode
== DM_TYPE_BIO_BASED
)
1092 ti
->per_io_data_size
= multipath_per_bio_data_size();
1094 ti
->per_io_data_size
= sizeof(struct dm_mpath_io
);
1103 static void multipath_wait_for_pg_init_completion(struct multipath
*m
)
1108 prepare_to_wait(&m
->pg_init_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
1110 if (!atomic_read(&m
->pg_init_in_progress
))
1115 finish_wait(&m
->pg_init_wait
, &wait
);
1118 static void flush_multipath_work(struct multipath
*m
)
1120 set_bit(MPATHF_PG_INIT_DISABLED
, &m
->flags
);
1121 smp_mb__after_atomic();
1123 flush_workqueue(kmpath_handlerd
);
1124 multipath_wait_for_pg_init_completion(m
);
1125 flush_workqueue(kmultipathd
);
1126 flush_work(&m
->trigger_event
);
1128 clear_bit(MPATHF_PG_INIT_DISABLED
, &m
->flags
);
1129 smp_mb__after_atomic();
1132 static void multipath_dtr(struct dm_target
*ti
)
1134 struct multipath
*m
= ti
->private;
1136 flush_multipath_work(m
);
1141 * Take a path out of use.
1143 static int fail_path(struct pgpath
*pgpath
)
1145 unsigned long flags
;
1146 struct multipath
*m
= pgpath
->pg
->m
;
1148 spin_lock_irqsave(&m
->lock
, flags
);
1150 if (!pgpath
->is_active
)
1153 DMWARN("Failing path %s.", pgpath
->path
.dev
->name
);
1155 pgpath
->pg
->ps
.type
->fail_path(&pgpath
->pg
->ps
, &pgpath
->path
);
1156 pgpath
->is_active
= false;
1157 pgpath
->fail_count
++;
1159 atomic_dec(&m
->nr_valid_paths
);
1161 if (pgpath
== m
->current_pgpath
)
1162 m
->current_pgpath
= NULL
;
1164 dm_path_uevent(DM_UEVENT_PATH_FAILED
, m
->ti
,
1165 pgpath
->path
.dev
->name
, atomic_read(&m
->nr_valid_paths
));
1167 schedule_work(&m
->trigger_event
);
1170 spin_unlock_irqrestore(&m
->lock
, flags
);
1176 * Reinstate a previously-failed path
1178 static int reinstate_path(struct pgpath
*pgpath
)
1180 int r
= 0, run_queue
= 0;
1181 unsigned long flags
;
1182 struct multipath
*m
= pgpath
->pg
->m
;
1183 unsigned nr_valid_paths
;
1185 spin_lock_irqsave(&m
->lock
, flags
);
1187 if (pgpath
->is_active
)
1190 DMWARN("Reinstating path %s.", pgpath
->path
.dev
->name
);
1192 r
= pgpath
->pg
->ps
.type
->reinstate_path(&pgpath
->pg
->ps
, &pgpath
->path
);
1196 pgpath
->is_active
= true;
1198 nr_valid_paths
= atomic_inc_return(&m
->nr_valid_paths
);
1199 if (nr_valid_paths
== 1) {
1200 m
->current_pgpath
= NULL
;
1202 } else if (m
->hw_handler_name
&& (m
->current_pg
== pgpath
->pg
)) {
1203 if (queue_work(kmpath_handlerd
, &pgpath
->activate_path
.work
))
1204 atomic_inc(&m
->pg_init_in_progress
);
1207 dm_path_uevent(DM_UEVENT_PATH_REINSTATED
, m
->ti
,
1208 pgpath
->path
.dev
->name
, nr_valid_paths
);
1210 schedule_work(&m
->trigger_event
);
1213 spin_unlock_irqrestore(&m
->lock
, flags
);
1215 dm_table_run_md_queue_async(m
->ti
->table
);
1216 process_queued_io_list(m
);
1223 * Fail or reinstate all paths that match the provided struct dm_dev.
1225 static int action_dev(struct multipath
*m
, struct dm_dev
*dev
,
1229 struct pgpath
*pgpath
;
1230 struct priority_group
*pg
;
1232 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1233 list_for_each_entry(pgpath
, &pg
->pgpaths
, list
) {
1234 if (pgpath
->path
.dev
== dev
)
1243 * Temporarily try to avoid having to use the specified PG
1245 static void bypass_pg(struct multipath
*m
, struct priority_group
*pg
,
1248 unsigned long flags
;
1250 spin_lock_irqsave(&m
->lock
, flags
);
1252 pg
->bypassed
= bypassed
;
1253 m
->current_pgpath
= NULL
;
1254 m
->current_pg
= NULL
;
1256 spin_unlock_irqrestore(&m
->lock
, flags
);
1258 schedule_work(&m
->trigger_event
);
1262 * Switch to using the specified PG from the next I/O that gets mapped
1264 static int switch_pg_num(struct multipath
*m
, const char *pgstr
)
1266 struct priority_group
*pg
;
1268 unsigned long flags
;
1271 if (!pgstr
|| (sscanf(pgstr
, "%u%c", &pgnum
, &dummy
) != 1) || !pgnum
||
1272 !m
->nr_priority_groups
|| (pgnum
> m
->nr_priority_groups
)) {
1273 DMWARN("invalid PG number supplied to switch_pg_num");
1277 spin_lock_irqsave(&m
->lock
, flags
);
1278 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1279 pg
->bypassed
= false;
1283 m
->current_pgpath
= NULL
;
1284 m
->current_pg
= NULL
;
1287 spin_unlock_irqrestore(&m
->lock
, flags
);
1289 schedule_work(&m
->trigger_event
);
1294 * Set/clear bypassed status of a PG.
1295 * PGs are numbered upwards from 1 in the order they were declared.
1297 static int bypass_pg_num(struct multipath
*m
, const char *pgstr
, bool bypassed
)
1299 struct priority_group
*pg
;
1303 if (!pgstr
|| (sscanf(pgstr
, "%u%c", &pgnum
, &dummy
) != 1) || !pgnum
||
1304 !m
->nr_priority_groups
|| (pgnum
> m
->nr_priority_groups
)) {
1305 DMWARN("invalid PG number supplied to bypass_pg");
1309 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1314 bypass_pg(m
, pg
, bypassed
);
1319 * Should we retry pg_init immediately?
1321 static bool pg_init_limit_reached(struct multipath
*m
, struct pgpath
*pgpath
)
1323 unsigned long flags
;
1324 bool limit_reached
= false;
1326 spin_lock_irqsave(&m
->lock
, flags
);
1328 if (atomic_read(&m
->pg_init_count
) <= m
->pg_init_retries
&&
1329 !test_bit(MPATHF_PG_INIT_DISABLED
, &m
->flags
))
1330 set_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
);
1332 limit_reached
= true;
1334 spin_unlock_irqrestore(&m
->lock
, flags
);
1336 return limit_reached
;
1339 static void pg_init_done(void *data
, int errors
)
1341 struct pgpath
*pgpath
= data
;
1342 struct priority_group
*pg
= pgpath
->pg
;
1343 struct multipath
*m
= pg
->m
;
1344 unsigned long flags
;
1345 bool delay_retry
= false;
1347 /* device or driver problems */
1352 if (!m
->hw_handler_name
) {
1356 DMERR("Could not failover the device: Handler scsi_dh_%s "
1357 "Error %d.", m
->hw_handler_name
, errors
);
1359 * Fail path for now, so we do not ping pong
1363 case SCSI_DH_DEV_TEMP_BUSY
:
1365 * Probably doing something like FW upgrade on the
1366 * controller so try the other pg.
1368 bypass_pg(m
, pg
, true);
1371 /* Wait before retrying. */
1373 case SCSI_DH_IMM_RETRY
:
1374 case SCSI_DH_RES_TEMP_UNAVAIL
:
1375 if (pg_init_limit_reached(m
, pgpath
))
1379 case SCSI_DH_DEV_OFFLINED
:
1382 * We probably do not want to fail the path for a device
1383 * error, but this is what the old dm did. In future
1384 * patches we can do more advanced handling.
1389 spin_lock_irqsave(&m
->lock
, flags
);
1391 if (pgpath
== m
->current_pgpath
) {
1392 DMERR("Could not failover device. Error %d.", errors
);
1393 m
->current_pgpath
= NULL
;
1394 m
->current_pg
= NULL
;
1396 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
))
1397 pg
->bypassed
= false;
1399 if (atomic_dec_return(&m
->pg_init_in_progress
) > 0)
1400 /* Activations of other paths are still on going */
1403 if (test_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
)) {
1405 set_bit(MPATHF_PG_INIT_DELAY_RETRY
, &m
->flags
);
1407 clear_bit(MPATHF_PG_INIT_DELAY_RETRY
, &m
->flags
);
1409 if (__pg_init_all_paths(m
))
1412 clear_bit(MPATHF_QUEUE_IO
, &m
->flags
);
1414 process_queued_io_list(m
);
1417 * Wake up any thread waiting to suspend.
1419 wake_up(&m
->pg_init_wait
);
1422 spin_unlock_irqrestore(&m
->lock
, flags
);
1425 static void activate_or_offline_path(struct pgpath
*pgpath
)
1427 struct request_queue
*q
= bdev_get_queue(pgpath
->path
.dev
->bdev
);
1429 if (pgpath
->is_active
&& !blk_queue_dying(q
))
1430 scsi_dh_activate(q
, pg_init_done
, pgpath
);
1432 pg_init_done(pgpath
, SCSI_DH_DEV_OFFLINED
);
1435 static void activate_path_work(struct work_struct
*work
)
1437 struct pgpath
*pgpath
=
1438 container_of(work
, struct pgpath
, activate_path
.work
);
1440 activate_or_offline_path(pgpath
);
1443 static int noretry_error(int error
)
1448 * EBADE signals an reservation conflict.
1449 * We shouldn't fail the path here as we can communicate with
1450 * the target. We should failover to the next path, but in
1451 * doing so we might be causing a ping-pong between paths.
1452 * So just return the reservation conflict error.
1462 /* Anything else could be a path failure, so should be retried */
1469 static int do_end_io(struct multipath
*m
, struct request
*clone
,
1470 int error
, struct dm_mpath_io
*mpio
)
1473 * We don't queue any clone request inside the multipath target
1474 * during end I/O handling, since those clone requests don't have
1475 * bio clones. If we queue them inside the multipath target,
1476 * we need to make bio clones, that requires memory allocation.
1477 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1478 * don't have bio clones.)
1479 * Instead of queueing the clone request here, we queue the original
1480 * request into dm core, which will remake a clone request and
1481 * clone bios for it and resubmit it later.
1483 int r
= DM_ENDIO_REQUEUE
;
1485 if (!error
&& !clone
->errors
)
1486 return 0; /* I/O complete */
1488 if (noretry_error(error
))
1492 fail_path(mpio
->pgpath
);
1494 if (atomic_read(&m
->nr_valid_paths
) == 0 &&
1495 !test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
))
1496 r
= dm_report_EIO(m
);
1501 static int multipath_end_io(struct dm_target
*ti
, struct request
*clone
,
1502 int error
, union map_info
*map_context
)
1504 struct multipath
*m
= ti
->private;
1505 struct dm_mpath_io
*mpio
= get_mpio(map_context
);
1506 struct pgpath
*pgpath
;
1507 struct path_selector
*ps
;
1512 r
= do_end_io(m
, clone
, error
, mpio
);
1513 pgpath
= mpio
->pgpath
;
1515 ps
= &pgpath
->pg
->ps
;
1516 if (ps
->type
->end_io
)
1517 ps
->type
->end_io(ps
, &pgpath
->path
, mpio
->nr_bytes
);
1523 static int do_end_io_bio(struct multipath
*m
, struct bio
*clone
,
1524 int error
, struct dm_mpath_io
*mpio
)
1526 unsigned long flags
;
1529 return 0; /* I/O complete */
1531 if (noretry_error(error
))
1535 fail_path(mpio
->pgpath
);
1537 if (atomic_read(&m
->nr_valid_paths
) == 0 &&
1538 !test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
))
1539 return dm_report_EIO(m
);
1541 /* Queue for the daemon to resubmit */
1542 dm_bio_restore(get_bio_details_from_bio(clone
), clone
);
1544 spin_lock_irqsave(&m
->lock
, flags
);
1545 bio_list_add(&m
->queued_bios
, clone
);
1546 spin_unlock_irqrestore(&m
->lock
, flags
);
1547 if (!test_bit(MPATHF_QUEUE_IO
, &m
->flags
))
1548 queue_work(kmultipathd
, &m
->process_queued_bios
);
1550 return DM_ENDIO_INCOMPLETE
;
1553 static int multipath_end_io_bio(struct dm_target
*ti
, struct bio
*clone
, int error
)
1555 struct multipath
*m
= ti
->private;
1556 struct dm_mpath_io
*mpio
= get_mpio_from_bio(clone
);
1557 struct pgpath
*pgpath
;
1558 struct path_selector
*ps
;
1563 r
= do_end_io_bio(m
, clone
, error
, mpio
);
1564 pgpath
= mpio
->pgpath
;
1566 ps
= &pgpath
->pg
->ps
;
1567 if (ps
->type
->end_io
)
1568 ps
->type
->end_io(ps
, &pgpath
->path
, mpio
->nr_bytes
);
1575 * Suspend can't complete until all the I/O is processed so if
1576 * the last path fails we must error any remaining I/O.
1577 * Note that if the freeze_bdev fails while suspending, the
1578 * queue_if_no_path state is lost - userspace should reset it.
1580 static void multipath_presuspend(struct dm_target
*ti
)
1582 struct multipath
*m
= ti
->private;
1584 queue_if_no_path(m
, false, true);
1587 static void multipath_postsuspend(struct dm_target
*ti
)
1589 struct multipath
*m
= ti
->private;
1591 mutex_lock(&m
->work_mutex
);
1592 flush_multipath_work(m
);
1593 mutex_unlock(&m
->work_mutex
);
1597 * Restore the queue_if_no_path setting.
1599 static void multipath_resume(struct dm_target
*ti
)
1601 struct multipath
*m
= ti
->private;
1602 unsigned long flags
;
1604 spin_lock_irqsave(&m
->lock
, flags
);
1605 assign_bit(test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH
, &m
->flags
),
1606 MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
);
1607 spin_unlock_irqrestore(&m
->lock
, flags
);
1611 * Info output has the following format:
1612 * num_multipath_feature_args [multipath_feature_args]*
1613 * num_handler_status_args [handler_status_args]*
1614 * num_groups init_group_number
1615 * [A|D|E num_ps_status_args [ps_status_args]*
1616 * num_paths num_selector_args
1617 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1619 * Table output has the following format (identical to the constructor string):
1620 * num_feature_args [features_args]*
1621 * num_handler_args hw_handler [hw_handler_args]*
1622 * num_groups init_group_number
1623 * [priority selector-name num_ps_args [ps_args]*
1624 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1626 static void multipath_status(struct dm_target
*ti
, status_type_t type
,
1627 unsigned status_flags
, char *result
, unsigned maxlen
)
1630 unsigned long flags
;
1631 struct multipath
*m
= ti
->private;
1632 struct priority_group
*pg
;
1637 spin_lock_irqsave(&m
->lock
, flags
);
1640 if (type
== STATUSTYPE_INFO
)
1641 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO
, &m
->flags
),
1642 atomic_read(&m
->pg_init_count
));
1644 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
) +
1645 (m
->pg_init_retries
> 0) * 2 +
1646 (m
->pg_init_delay_msecs
!= DM_PG_INIT_DELAY_DEFAULT
) * 2 +
1647 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
) +
1648 (m
->queue_mode
!= DM_TYPE_REQUEST_BASED
) * 2);
1650 if (test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
))
1651 DMEMIT("queue_if_no_path ");
1652 if (m
->pg_init_retries
)
1653 DMEMIT("pg_init_retries %u ", m
->pg_init_retries
);
1654 if (m
->pg_init_delay_msecs
!= DM_PG_INIT_DELAY_DEFAULT
)
1655 DMEMIT("pg_init_delay_msecs %u ", m
->pg_init_delay_msecs
);
1656 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER
, &m
->flags
))
1657 DMEMIT("retain_attached_hw_handler ");
1658 if (m
->queue_mode
!= DM_TYPE_REQUEST_BASED
) {
1659 switch(m
->queue_mode
) {
1660 case DM_TYPE_BIO_BASED
:
1661 DMEMIT("queue_mode bio ");
1663 case DM_TYPE_MQ_REQUEST_BASED
:
1664 DMEMIT("queue_mode mq ");
1673 if (!m
->hw_handler_name
|| type
== STATUSTYPE_INFO
)
1676 DMEMIT("1 %s ", m
->hw_handler_name
);
1678 DMEMIT("%u ", m
->nr_priority_groups
);
1681 pg_num
= m
->next_pg
->pg_num
;
1682 else if (m
->current_pg
)
1683 pg_num
= m
->current_pg
->pg_num
;
1685 pg_num
= (m
->nr_priority_groups
? 1 : 0);
1687 DMEMIT("%u ", pg_num
);
1690 case STATUSTYPE_INFO
:
1691 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1693 state
= 'D'; /* Disabled */
1694 else if (pg
== m
->current_pg
)
1695 state
= 'A'; /* Currently Active */
1697 state
= 'E'; /* Enabled */
1699 DMEMIT("%c ", state
);
1701 if (pg
->ps
.type
->status
)
1702 sz
+= pg
->ps
.type
->status(&pg
->ps
, NULL
, type
,
1708 DMEMIT("%u %u ", pg
->nr_pgpaths
,
1709 pg
->ps
.type
->info_args
);
1711 list_for_each_entry(p
, &pg
->pgpaths
, list
) {
1712 DMEMIT("%s %s %u ", p
->path
.dev
->name
,
1713 p
->is_active
? "A" : "F",
1715 if (pg
->ps
.type
->status
)
1716 sz
+= pg
->ps
.type
->status(&pg
->ps
,
1717 &p
->path
, type
, result
+ sz
,
1723 case STATUSTYPE_TABLE
:
1724 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1725 DMEMIT("%s ", pg
->ps
.type
->name
);
1727 if (pg
->ps
.type
->status
)
1728 sz
+= pg
->ps
.type
->status(&pg
->ps
, NULL
, type
,
1734 DMEMIT("%u %u ", pg
->nr_pgpaths
,
1735 pg
->ps
.type
->table_args
);
1737 list_for_each_entry(p
, &pg
->pgpaths
, list
) {
1738 DMEMIT("%s ", p
->path
.dev
->name
);
1739 if (pg
->ps
.type
->status
)
1740 sz
+= pg
->ps
.type
->status(&pg
->ps
,
1741 &p
->path
, type
, result
+ sz
,
1748 spin_unlock_irqrestore(&m
->lock
, flags
);
1751 static int multipath_message(struct dm_target
*ti
, unsigned argc
, char **argv
)
1755 struct multipath
*m
= ti
->private;
1758 mutex_lock(&m
->work_mutex
);
1760 if (dm_suspended(ti
)) {
1766 if (!strcasecmp(argv
[0], "queue_if_no_path")) {
1767 r
= queue_if_no_path(m
, true, false);
1769 } else if (!strcasecmp(argv
[0], "fail_if_no_path")) {
1770 r
= queue_if_no_path(m
, false, false);
1776 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc
);
1780 if (!strcasecmp(argv
[0], "disable_group")) {
1781 r
= bypass_pg_num(m
, argv
[1], true);
1783 } else if (!strcasecmp(argv
[0], "enable_group")) {
1784 r
= bypass_pg_num(m
, argv
[1], false);
1786 } else if (!strcasecmp(argv
[0], "switch_group")) {
1787 r
= switch_pg_num(m
, argv
[1]);
1789 } else if (!strcasecmp(argv
[0], "reinstate_path"))
1790 action
= reinstate_path
;
1791 else if (!strcasecmp(argv
[0], "fail_path"))
1794 DMWARN("Unrecognised multipath message received: %s", argv
[0]);
1798 r
= dm_get_device(ti
, argv
[1], dm_table_get_mode(ti
->table
), &dev
);
1800 DMWARN("message: error getting device %s",
1805 r
= action_dev(m
, dev
, action
);
1807 dm_put_device(ti
, dev
);
1810 mutex_unlock(&m
->work_mutex
);
1814 static int multipath_prepare_ioctl(struct dm_target
*ti
,
1815 struct block_device
**bdev
, fmode_t
*mode
)
1817 struct multipath
*m
= ti
->private;
1818 struct pgpath
*current_pgpath
;
1821 current_pgpath
= lockless_dereference(m
->current_pgpath
);
1822 if (!current_pgpath
)
1823 current_pgpath
= choose_pgpath(m
, 0);
1825 if (current_pgpath
) {
1826 if (!test_bit(MPATHF_QUEUE_IO
, &m
->flags
)) {
1827 *bdev
= current_pgpath
->path
.dev
->bdev
;
1828 *mode
= current_pgpath
->path
.dev
->mode
;
1831 /* pg_init has not started or completed */
1835 /* No path is available */
1836 if (test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
))
1842 if (r
== -ENOTCONN
) {
1843 if (!lockless_dereference(m
->current_pg
)) {
1844 /* Path status changed, redo selection */
1845 (void) choose_pgpath(m
, 0);
1847 if (test_bit(MPATHF_PG_INIT_REQUIRED
, &m
->flags
))
1848 pg_init_all_paths(m
);
1849 dm_table_run_md_queue_async(m
->ti
->table
);
1850 process_queued_io_list(m
);
1854 * Only pass ioctls through if the device sizes match exactly.
1856 if (!r
&& ti
->len
!= i_size_read((*bdev
)->bd_inode
) >> SECTOR_SHIFT
)
1861 static int multipath_iterate_devices(struct dm_target
*ti
,
1862 iterate_devices_callout_fn fn
, void *data
)
1864 struct multipath
*m
= ti
->private;
1865 struct priority_group
*pg
;
1869 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1870 list_for_each_entry(p
, &pg
->pgpaths
, list
) {
1871 ret
= fn(ti
, p
->path
.dev
, ti
->begin
, ti
->len
, data
);
1881 static int pgpath_busy(struct pgpath
*pgpath
)
1883 struct request_queue
*q
= bdev_get_queue(pgpath
->path
.dev
->bdev
);
1885 return blk_lld_busy(q
);
1889 * We return "busy", only when we can map I/Os but underlying devices
1890 * are busy (so even if we map I/Os now, the I/Os will wait on
1891 * the underlying queue).
1892 * In other words, if we want to kill I/Os or queue them inside us
1893 * due to map unavailability, we don't return "busy". Otherwise,
1894 * dm core won't give us the I/Os and we can't do what we want.
1896 static int multipath_busy(struct dm_target
*ti
)
1898 bool busy
= false, has_active
= false;
1899 struct multipath
*m
= ti
->private;
1900 struct priority_group
*pg
, *next_pg
;
1901 struct pgpath
*pgpath
;
1903 /* pg_init in progress */
1904 if (atomic_read(&m
->pg_init_in_progress
))
1907 /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
1908 if (!atomic_read(&m
->nr_valid_paths
) && test_bit(MPATHF_QUEUE_IF_NO_PATH
, &m
->flags
))
1909 return (m
->queue_mode
!= DM_TYPE_MQ_REQUEST_BASED
);
1911 /* Guess which priority_group will be used at next mapping time */
1912 pg
= lockless_dereference(m
->current_pg
);
1913 next_pg
= lockless_dereference(m
->next_pg
);
1914 if (unlikely(!lockless_dereference(m
->current_pgpath
) && next_pg
))
1919 * We don't know which pg will be used at next mapping time.
1920 * We don't call choose_pgpath() here to avoid to trigger
1921 * pg_init just by busy checking.
1922 * So we don't know whether underlying devices we will be using
1923 * at next mapping time are busy or not. Just try mapping.
1929 * If there is one non-busy active path at least, the path selector
1930 * will be able to select it. So we consider such a pg as not busy.
1933 list_for_each_entry(pgpath
, &pg
->pgpaths
, list
) {
1934 if (pgpath
->is_active
) {
1936 if (!pgpath_busy(pgpath
)) {
1945 * No active path in this pg, so this pg won't be used and
1946 * the current_pg will be changed at next mapping time.
1947 * We need to try mapping to determine it.
1955 /*-----------------------------------------------------------------
1957 *---------------------------------------------------------------*/
1958 static struct target_type multipath_target
= {
1959 .name
= "multipath",
1960 .version
= {1, 12, 0},
1961 .features
= DM_TARGET_SINGLETON
| DM_TARGET_IMMUTABLE
,
1962 .module
= THIS_MODULE
,
1963 .ctr
= multipath_ctr
,
1964 .dtr
= multipath_dtr
,
1965 .clone_and_map_rq
= multipath_clone_and_map
,
1966 .release_clone_rq
= multipath_release_clone
,
1967 .rq_end_io
= multipath_end_io
,
1968 .map
= multipath_map_bio
,
1969 .end_io
= multipath_end_io_bio
,
1970 .presuspend
= multipath_presuspend
,
1971 .postsuspend
= multipath_postsuspend
,
1972 .resume
= multipath_resume
,
1973 .status
= multipath_status
,
1974 .message
= multipath_message
,
1975 .prepare_ioctl
= multipath_prepare_ioctl
,
1976 .iterate_devices
= multipath_iterate_devices
,
1977 .busy
= multipath_busy
,
1980 static int __init
dm_multipath_init(void)
1984 r
= dm_register_target(&multipath_target
);
1986 DMERR("request-based register failed %d", r
);
1988 goto bad_register_target
;
1991 kmultipathd
= alloc_workqueue("kmpathd", WQ_MEM_RECLAIM
, 0);
1993 DMERR("failed to create workqueue kmpathd");
1995 goto bad_alloc_kmultipathd
;
1999 * A separate workqueue is used to handle the device handlers
2000 * to avoid overloading existing workqueue. Overloading the
2001 * old workqueue would also create a bottleneck in the
2002 * path of the storage hardware device activation.
2004 kmpath_handlerd
= alloc_ordered_workqueue("kmpath_handlerd",
2006 if (!kmpath_handlerd
) {
2007 DMERR("failed to create workqueue kmpath_handlerd");
2009 goto bad_alloc_kmpath_handlerd
;
2014 bad_alloc_kmpath_handlerd
:
2015 destroy_workqueue(kmultipathd
);
2016 bad_alloc_kmultipathd
:
2017 dm_unregister_target(&multipath_target
);
2018 bad_register_target
:
2022 static void __exit
dm_multipath_exit(void)
2024 destroy_workqueue(kmpath_handlerd
);
2025 destroy_workqueue(kmultipathd
);
2027 dm_unregister_target(&multipath_target
);
2030 module_init(dm_multipath_init
);
2031 module_exit(dm_multipath_exit
);
2033 MODULE_DESCRIPTION(DM_NAME
" multipath target");
2034 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2035 MODULE_LICENSE("GPL");