]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/md/dm-mpath.c
Merge branches 'for-4.10/upstream-fixes', 'for-4.11/intel-ish', 'for-4.11/mayflash...
[mirror_ubuntu-artful-kernel.git] / drivers / md / dm-mpath.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
586e80e6
MP
8#include <linux/device-mapper.h>
9
4cc96131 10#include "dm-rq.h"
76e33fe4 11#include "dm-bio-record.h"
1da177e4 12#include "dm-path-selector.h"
b15546f9 13#include "dm-uevent.h"
1da177e4 14
e5863d9a 15#include <linux/blkdev.h>
1da177e4
LT
16#include <linux/ctype.h>
17#include <linux/init.h>
18#include <linux/mempool.h>
19#include <linux/module.h>
20#include <linux/pagemap.h>
21#include <linux/slab.h>
22#include <linux/time.h>
23#include <linux/workqueue.h>
35991652 24#include <linux/delay.h>
cfae5c9b 25#include <scsi/scsi_dh.h>
60063497 26#include <linux/atomic.h>
78ce23b5 27#include <linux/blk-mq.h>
1da177e4 28
72d94861 29#define DM_MSG_PREFIX "multipath"
4e2d19e4
CS
30#define DM_PG_INIT_DELAY_MSECS 2000
31#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
1da177e4
LT
32
33/* Path properties */
34struct pgpath {
35 struct list_head list;
36
37 struct priority_group *pg; /* Owning PG */
38 unsigned fail_count; /* Cumulative failure count */
39
c922d5f7 40 struct dm_path path;
4e2d19e4 41 struct delayed_work activate_path;
be7d31cc
MS
42
43 bool is_active:1; /* Path status */
1da177e4
LT
44};
45
46#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
47
48/*
49 * Paths are grouped into Priority Groups and numbered from 1 upwards.
50 * Each has a path selector which controls which path gets used.
51 */
52struct priority_group {
53 struct list_head list;
54
55 struct multipath *m; /* Owning multipath instance */
56 struct path_selector ps;
57
58 unsigned pg_num; /* Reference number */
1da177e4
LT
59 unsigned nr_pgpaths; /* Number of paths in PG */
60 struct list_head pgpaths;
be7d31cc
MS
61
62 bool bypassed:1; /* Temporarily bypass this PG? */
1da177e4
LT
63};
64
65/* Multipath context */
66struct multipath {
67 struct list_head list;
68 struct dm_target *ti;
69
cfae5c9b 70 const char *hw_handler_name;
2bfd2e13 71 char *hw_handler_params;
4e2d19e4 72
1fbdd2b3
MS
73 spinlock_t lock;
74
1da177e4
LT
75 unsigned nr_priority_groups;
76 struct list_head priority_groups;
4e2d19e4
CS
77
78 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
79
1da177e4
LT
80 struct pgpath *current_pgpath;
81 struct priority_group *current_pg;
82 struct priority_group *next_pg; /* Switch to this PG if set */
1da177e4 83
518257b1 84 unsigned long flags; /* Multipath state flags */
1fbdd2b3 85
c9e45581 86 unsigned pg_init_retries; /* Number of times to retry pg_init */
4e2d19e4 87 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
1da177e4 88
91e968aa
MS
89 atomic_t nr_valid_paths; /* Total number of usable paths */
90 atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
91 atomic_t pg_init_count; /* Number of times pg_init called */
92
e83068a5
MS
93 unsigned queue_mode;
94
1da177e4 95 /*
028867ac 96 * We must use a mempool of dm_mpath_io structs so that we
1da177e4
LT
97 * can resubmit bios on error.
98 */
99 mempool_t *mpio_pool;
6380f26f
MA
100
101 struct mutex work_mutex;
20800cb3 102 struct work_struct trigger_event;
76e33fe4
MS
103
104 struct work_struct process_queued_bios;
105 struct bio_list queued_bios;
1da177e4
LT
106};
107
108/*
76e33fe4 109 * Context information attached to each io we process.
1da177e4 110 */
028867ac 111struct dm_mpath_io {
1da177e4 112 struct pgpath *pgpath;
02ab823f 113 size_t nr_bytes;
1da177e4
LT
114};
115
116typedef int (*action_fn) (struct pgpath *pgpath);
117
e18b890b 118static struct kmem_cache *_mpio_cache;
1da177e4 119
bab7cfc7 120static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
c4028958 121static void trigger_event(struct work_struct *work);
bab7cfc7 122static void activate_path(struct work_struct *work);
76e33fe4 123static void process_queued_bios(struct work_struct *work);
1da177e4 124
518257b1
MS
125/*-----------------------------------------------
126 * Multipath state flags.
127 *-----------------------------------------------*/
128
129#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
130#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
131#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
132#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
133#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
134#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
135#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
1da177e4
LT
136
137/*-----------------------------------------------
138 * Allocation routines
139 *-----------------------------------------------*/
140
141static struct pgpath *alloc_pgpath(void)
142{
e69fae56 143 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
1da177e4 144
224cb3e9 145 if (pgpath) {
be7d31cc 146 pgpath->is_active = true;
4e2d19e4 147 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
224cb3e9 148 }
1da177e4
LT
149
150 return pgpath;
151}
152
028867ac 153static void free_pgpath(struct pgpath *pgpath)
1da177e4
LT
154{
155 kfree(pgpath);
156}
157
158static struct priority_group *alloc_priority_group(void)
159{
160 struct priority_group *pg;
161
e69fae56 162 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
1da177e4 163
e69fae56
MM
164 if (pg)
165 INIT_LIST_HEAD(&pg->pgpaths);
1da177e4
LT
166
167 return pg;
168}
169
170static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
171{
172 struct pgpath *pgpath, *tmp;
173
174 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
175 list_del(&pgpath->list);
176 dm_put_device(ti, pgpath->path.dev);
177 free_pgpath(pgpath);
178 }
179}
180
181static void free_priority_group(struct priority_group *pg,
182 struct dm_target *ti)
183{
184 struct path_selector *ps = &pg->ps;
185
186 if (ps->type) {
187 ps->type->destroy(ps);
188 dm_put_path_selector(ps->type);
189 }
190
191 free_pgpaths(&pg->pgpaths, ti);
192 kfree(pg);
193}
194
e83068a5 195static struct multipath *alloc_multipath(struct dm_target *ti)
1da177e4
LT
196{
197 struct multipath *m;
198
e69fae56 199 m = kzalloc(sizeof(*m), GFP_KERNEL);
1da177e4 200 if (m) {
1da177e4
LT
201 INIT_LIST_HEAD(&m->priority_groups);
202 spin_lock_init(&m->lock);
518257b1 203 set_bit(MPATHF_QUEUE_IO, &m->flags);
91e968aa
MS
204 atomic_set(&m->nr_valid_paths, 0);
205 atomic_set(&m->pg_init_in_progress, 0);
206 atomic_set(&m->pg_init_count, 0);
4e2d19e4 207 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
c4028958 208 INIT_WORK(&m->trigger_event, trigger_event);
2bded7bd 209 init_waitqueue_head(&m->pg_init_wait);
6380f26f 210 mutex_init(&m->work_mutex);
8637a6bf
MS
211
212 m->mpio_pool = NULL;
e83068a5 213 m->queue_mode = DM_TYPE_NONE;
76e33fe4 214
28f16c20
MM
215 m->ti = ti;
216 ti->private = m;
1da177e4
LT
217 }
218
219 return m;
220}
221
e83068a5
MS
222static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
223{
224 if (m->queue_mode == DM_TYPE_NONE) {
225 /*
226 * Default to request-based.
227 */
228 if (dm_use_blk_mq(dm_table_get_md(ti->table)))
229 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
230 else
231 m->queue_mode = DM_TYPE_REQUEST_BASED;
232 }
233
234 if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
235 unsigned min_ios = dm_get_reserved_rq_based_ios();
236
237 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
238 if (!m->mpio_pool)
239 return -ENOMEM;
240 }
241 else if (m->queue_mode == DM_TYPE_BIO_BASED) {
242 INIT_WORK(&m->process_queued_bios, process_queued_bios);
243 /*
244 * bio-based doesn't support any direct scsi_dh management;
245 * it just discovers if a scsi_dh is attached.
246 */
247 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
248 }
249
250 dm_table_set_type(ti->table, m->queue_mode);
251
252 return 0;
253}
254
1da177e4
LT
255static void free_multipath(struct multipath *m)
256{
257 struct priority_group *pg, *tmp;
1da177e4
LT
258
259 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
260 list_del(&pg->list);
261 free_priority_group(pg, m->ti);
262 }
263
cfae5c9b 264 kfree(m->hw_handler_name);
2bfd2e13 265 kfree(m->hw_handler_params);
1da177e4
LT
266 mempool_destroy(m->mpio_pool);
267 kfree(m);
268}
269
2eff1924
MS
270static struct dm_mpath_io *get_mpio(union map_info *info)
271{
272 return info->ptr;
273}
274
275static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
466891f9
JN
276{
277 struct dm_mpath_io *mpio;
278
8637a6bf
MS
279 if (!m->mpio_pool) {
280 /* Use blk-mq pdu memory requested via per_io_data_size */
2eff1924 281 mpio = get_mpio(info);
8637a6bf
MS
282 memset(mpio, 0, sizeof(*mpio));
283 return mpio;
284 }
285
466891f9
JN
286 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
287 if (!mpio)
2eff1924 288 return NULL;
466891f9
JN
289
290 memset(mpio, 0, sizeof(*mpio));
291 info->ptr = mpio;
292
2eff1924 293 return mpio;
466891f9
JN
294}
295
2eff1924 296static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
466891f9 297{
2eff1924 298 /* Only needed for non blk-mq (.request_fn) multipath */
8637a6bf
MS
299 if (m->mpio_pool) {
300 struct dm_mpath_io *mpio = info->ptr;
466891f9 301
8637a6bf
MS
302 info->ptr = NULL;
303 mempool_free(mpio, m->mpio_pool);
304 }
466891f9 305}
1da177e4 306
bf661be1
MS
307static size_t multipath_per_bio_data_size(void)
308{
309 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
310}
311
76e33fe4
MS
312static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
313{
bf661be1 314 return dm_per_bio_data(bio, multipath_per_bio_data_size());
76e33fe4
MS
315}
316
bf661be1 317static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio)
76e33fe4 318{
bf661be1 319 /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
76e33fe4 320 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
bf661be1
MS
321 void *bio_details = mpio + 1;
322
323 return bio_details;
324}
325
326static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p,
327 struct dm_bio_details **bio_details_p)
328{
329 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
330 struct dm_bio_details *bio_details = get_bio_details_from_bio(bio);
76e33fe4
MS
331
332 memset(mpio, 0, sizeof(*mpio));
bf661be1
MS
333 memset(bio_details, 0, sizeof(*bio_details));
334 dm_bio_record(bio_details, bio);
76e33fe4 335
bf661be1
MS
336 if (mpio_p)
337 *mpio_p = mpio;
338 if (bio_details_p)
339 *bio_details_p = bio_details;
76e33fe4
MS
340}
341
1da177e4
LT
342/*-----------------------------------------------
343 * Path selection
344 *-----------------------------------------------*/
345
3e9f1be1 346static int __pg_init_all_paths(struct multipath *m)
fb612642
KU
347{
348 struct pgpath *pgpath;
4e2d19e4 349 unsigned long pg_init_delay = 0;
fb612642 350
91e968aa 351 if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
3e9f1be1 352 return 0;
17f4ff45 353
91e968aa 354 atomic_inc(&m->pg_init_count);
518257b1 355 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
3e9f1be1
HR
356
357 /* Check here to reset pg_init_required */
358 if (!m->current_pg)
359 return 0;
360
518257b1 361 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
4e2d19e4
CS
362 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
363 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
fb612642
KU
364 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
365 /* Skip failed paths */
366 if (!pgpath->is_active)
367 continue;
4e2d19e4
CS
368 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
369 pg_init_delay))
91e968aa 370 atomic_inc(&m->pg_init_in_progress);
fb612642 371 }
91e968aa 372 return atomic_read(&m->pg_init_in_progress);
fb612642
KU
373}
374
4813577f 375static void pg_init_all_paths(struct multipath *m)
1da177e4 376{
2da1610a
MS
377 unsigned long flags;
378
379 spin_lock_irqsave(&m->lock, flags);
4813577f 380 __pg_init_all_paths(m);
2da1610a 381 spin_unlock_irqrestore(&m->lock, flags);
2da1610a
MS
382}
383
384static void __switch_pg(struct multipath *m, struct priority_group *pg)
385{
386 m->current_pg = pg;
1da177e4
LT
387
388 /* Must we initialise the PG first, and queue I/O till it's ready? */
cfae5c9b 389 if (m->hw_handler_name) {
518257b1
MS
390 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
391 set_bit(MPATHF_QUEUE_IO, &m->flags);
1da177e4 392 } else {
518257b1
MS
393 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
394 clear_bit(MPATHF_QUEUE_IO, &m->flags);
1da177e4 395 }
c9e45581 396
91e968aa 397 atomic_set(&m->pg_init_count, 0);
1da177e4
LT
398}
399
2da1610a
MS
400static struct pgpath *choose_path_in_pg(struct multipath *m,
401 struct priority_group *pg,
402 size_t nr_bytes)
1da177e4 403{
2da1610a 404 unsigned long flags;
c922d5f7 405 struct dm_path *path;
2da1610a 406 struct pgpath *pgpath;
1da177e4 407
90a4323c 408 path = pg->ps.type->select_path(&pg->ps, nr_bytes);
1da177e4 409 if (!path)
2da1610a 410 return ERR_PTR(-ENXIO);
1da177e4 411
2da1610a 412 pgpath = path_to_pgpath(path);
1da177e4 413
2da1610a
MS
414 if (unlikely(lockless_dereference(m->current_pg) != pg)) {
415 /* Only update current_pgpath if pg changed */
416 spin_lock_irqsave(&m->lock, flags);
417 m->current_pgpath = pgpath;
418 __switch_pg(m, pg);
419 spin_unlock_irqrestore(&m->lock, flags);
420 }
1da177e4 421
2da1610a 422 return pgpath;
1da177e4
LT
423}
424
2da1610a 425static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
1da177e4 426{
2da1610a 427 unsigned long flags;
1da177e4 428 struct priority_group *pg;
2da1610a 429 struct pgpath *pgpath;
be7d31cc 430 bool bypassed = true;
1da177e4 431
91e968aa 432 if (!atomic_read(&m->nr_valid_paths)) {
518257b1 433 clear_bit(MPATHF_QUEUE_IO, &m->flags);
1da177e4 434 goto failed;
1f271972 435 }
1da177e4
LT
436
437 /* Were we instructed to switch PG? */
2da1610a
MS
438 if (lockless_dereference(m->next_pg)) {
439 spin_lock_irqsave(&m->lock, flags);
1da177e4 440 pg = m->next_pg;
2da1610a
MS
441 if (!pg) {
442 spin_unlock_irqrestore(&m->lock, flags);
443 goto check_current_pg;
444 }
1da177e4 445 m->next_pg = NULL;
2da1610a
MS
446 spin_unlock_irqrestore(&m->lock, flags);
447 pgpath = choose_path_in_pg(m, pg, nr_bytes);
448 if (!IS_ERR_OR_NULL(pgpath))
449 return pgpath;
1da177e4
LT
450 }
451
452 /* Don't change PG until it has no remaining paths */
2da1610a
MS
453check_current_pg:
454 pg = lockless_dereference(m->current_pg);
455 if (pg) {
456 pgpath = choose_path_in_pg(m, pg, nr_bytes);
457 if (!IS_ERR_OR_NULL(pgpath))
458 return pgpath;
459 }
1da177e4
LT
460
461 /*
462 * Loop through priority groups until we find a valid path.
463 * First time we skip PGs marked 'bypassed'.
f220fd4e
MC
464 * Second time we only try the ones we skipped, but set
465 * pg_init_delay_retry so we do not hammer controllers.
1da177e4
LT
466 */
467 do {
468 list_for_each_entry(pg, &m->priority_groups, list) {
469 if (pg->bypassed == bypassed)
470 continue;
2da1610a
MS
471 pgpath = choose_path_in_pg(m, pg, nr_bytes);
472 if (!IS_ERR_OR_NULL(pgpath)) {
f220fd4e 473 if (!bypassed)
518257b1 474 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
2da1610a 475 return pgpath;
f220fd4e 476 }
1da177e4
LT
477 }
478 } while (bypassed--);
479
480failed:
2da1610a 481 spin_lock_irqsave(&m->lock, flags);
1da177e4
LT
482 m->current_pgpath = NULL;
483 m->current_pg = NULL;
2da1610a
MS
484 spin_unlock_irqrestore(&m->lock, flags);
485
486 return NULL;
1da177e4
LT
487}
488
45e15720
KU
489/*
490 * Check whether bios must be queued in the device-mapper core rather
491 * than here in the target.
492 *
45e15720
KU
493 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
494 * same value then we are not between multipath_presuspend()
495 * and multipath_resume() calls and we have no need to check
496 * for the DMF_NOFLUSH_SUSPENDING flag.
497 */
76e33fe4
MS
498static bool __must_push_back(struct multipath *m)
499{
500 return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
501 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
502 dm_noflush_suspending(m->ti));
503}
504
505static bool must_push_back_rq(struct multipath *m)
45e15720 506{
1814f2e3
MS
507 bool r;
508 unsigned long flags;
509
510 spin_lock_irqsave(&m->lock, flags);
511 r = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
512 __must_push_back(m));
513 spin_unlock_irqrestore(&m->lock, flags);
514
515 return r;
76e33fe4
MS
516}
517
518static bool must_push_back_bio(struct multipath *m)
519{
1814f2e3
MS
520 bool r;
521 unsigned long flags;
522
523 spin_lock_irqsave(&m->lock, flags);
524 r = __must_push_back(m);
525 spin_unlock_irqrestore(&m->lock, flags);
526
527 return r;
45e15720
KU
528}
529
36fcffcc 530/*
76e33fe4 531 * Map cloned requests (request-based multipath)
36fcffcc 532 */
e5863d9a
MS
533static int __multipath_map(struct dm_target *ti, struct request *clone,
534 union map_info *map_context,
535 struct request *rq, struct request **__clone)
1da177e4 536{
7943bd6d 537 struct multipath *m = ti->private;
e3bde04f 538 int r = DM_MAPIO_REQUEUE;
e5863d9a 539 size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
1da177e4 540 struct pgpath *pgpath;
f40c67f0 541 struct block_device *bdev;
e3bde04f 542 struct dm_mpath_io *mpio;
1da177e4 543
1da177e4 544 /* Do we need to select a new pgpath? */
2da1610a
MS
545 pgpath = lockless_dereference(m->current_pgpath);
546 if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
547 pgpath = choose_pgpath(m, nr_bytes);
1da177e4 548
9bf59a61 549 if (!pgpath) {
b88efd43
MS
550 if (must_push_back_rq(m))
551 return DM_MAPIO_DELAY_REQUEUE;
552 return -EIO; /* Failed */
518257b1
MS
553 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
554 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
2da1610a
MS
555 pg_init_all_paths(m);
556 return r;
9bf59a61 557 }
6afbc01d 558
2eff1924
MS
559 mpio = set_mpio(m, map_context);
560 if (!mpio)
9bf59a61 561 /* ENOMEM, requeue */
2da1610a 562 return r;
9bf59a61 563
2eb6e1e3
KB
564 mpio->pgpath = pgpath;
565 mpio->nr_bytes = nr_bytes;
566
9bf59a61 567 bdev = pgpath->path.dev->bdev;
2eb6e1e3 568
e5863d9a 569 if (clone) {
c5248f79
MS
570 /*
571 * Old request-based interface: allocated clone is passed in.
572 * Used by: .request_fn stacked on .request_fn path(s).
573 */
e5863d9a
MS
574 clone->q = bdev_get_queue(bdev);
575 clone->rq_disk = bdev->bd_disk;
576 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
577 } else {
eca7ee6d
MS
578 /*
579 * blk-mq request-based interface; used by both:
580 * .request_fn stacked on blk-mq path(s) and
581 * blk-mq stacked on blk-mq path(s).
582 */
6599c84e
BVA
583 clone = blk_mq_alloc_request(bdev_get_queue(bdev),
584 rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
585 if (IS_ERR(clone)) {
586 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
2eff1924 587 clear_request_fn_mpio(m, map_context);
e5863d9a 588 return r;
4c6dd53d 589 }
6599c84e
BVA
590 clone->bio = clone->biotail = NULL;
591 clone->rq_disk = bdev->bd_disk;
592 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
593 *__clone = clone;
e5863d9a
MS
594 }
595
9bf59a61
MS
596 if (pgpath->pg->ps.type->start_io)
597 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
598 &pgpath->path,
599 nr_bytes);
2eb6e1e3 600 return DM_MAPIO_REMAPPED;
1da177e4
LT
601}
602
e5863d9a
MS
603static int multipath_map(struct dm_target *ti, struct request *clone,
604 union map_info *map_context)
605{
606 return __multipath_map(ti, clone, map_context, NULL, NULL);
607}
608
609static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
610 union map_info *map_context,
611 struct request **clone)
612{
613 return __multipath_map(ti, NULL, map_context, rq, clone);
614}
615
616static void multipath_release_clone(struct request *clone)
617{
78ce23b5 618 blk_mq_free_request(clone);
e5863d9a
MS
619}
620
76e33fe4
MS
621/*
622 * Map cloned bios (bio-based multipath)
623 */
624static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
625{
626 size_t nr_bytes = bio->bi_iter.bi_size;
627 struct pgpath *pgpath;
628 unsigned long flags;
629 bool queue_io;
630
631 /* Do we need to select a new pgpath? */
632 pgpath = lockless_dereference(m->current_pgpath);
633 queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
634 if (!pgpath || !queue_io)
635 pgpath = choose_pgpath(m, nr_bytes);
636
637 if ((pgpath && queue_io) ||
638 (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
639 /* Queue for the daemon to resubmit */
640 spin_lock_irqsave(&m->lock, flags);
641 bio_list_add(&m->queued_bios, bio);
642 spin_unlock_irqrestore(&m->lock, flags);
643 /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
644 if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
645 pg_init_all_paths(m);
646 else if (!queue_io)
647 queue_work(kmultipathd, &m->process_queued_bios);
648 return DM_MAPIO_SUBMITTED;
649 }
650
651 if (!pgpath) {
652 if (!must_push_back_bio(m))
653 return -EIO;
654 return DM_MAPIO_REQUEUE;
655 }
656
657 mpio->pgpath = pgpath;
658 mpio->nr_bytes = nr_bytes;
659
660 bio->bi_error = 0;
661 bio->bi_bdev = pgpath->path.dev->bdev;
1eff9d32 662 bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
76e33fe4
MS
663
664 if (pgpath->pg->ps.type->start_io)
665 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
666 &pgpath->path,
667 nr_bytes);
668 return DM_MAPIO_REMAPPED;
669}
670
671static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
672{
673 struct multipath *m = ti->private;
bf661be1
MS
674 struct dm_mpath_io *mpio = NULL;
675
676 multipath_init_per_bio_data(bio, &mpio, NULL);
76e33fe4
MS
677
678 return __multipath_map_bio(m, bio, mpio);
679}
680
7e48c768 681static void process_queued_io_list(struct multipath *m)
76e33fe4 682{
7e48c768
MS
683 if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
684 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
685 else if (m->queue_mode == DM_TYPE_BIO_BASED)
76e33fe4
MS
686 queue_work(kmultipathd, &m->process_queued_bios);
687}
688
689static void process_queued_bios(struct work_struct *work)
690{
691 int r;
692 unsigned long flags;
693 struct bio *bio;
694 struct bio_list bios;
695 struct blk_plug plug;
696 struct multipath *m =
697 container_of(work, struct multipath, process_queued_bios);
698
699 bio_list_init(&bios);
700
701 spin_lock_irqsave(&m->lock, flags);
702
703 if (bio_list_empty(&m->queued_bios)) {
704 spin_unlock_irqrestore(&m->lock, flags);
705 return;
706 }
707
708 bio_list_merge(&bios, &m->queued_bios);
709 bio_list_init(&m->queued_bios);
710
711 spin_unlock_irqrestore(&m->lock, flags);
712
713 blk_start_plug(&plug);
714 while ((bio = bio_list_pop(&bios))) {
715 r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
716 if (r < 0 || r == DM_MAPIO_REQUEUE) {
717 bio->bi_error = r;
718 bio_endio(bio);
719 } else if (r == DM_MAPIO_REMAPPED)
720 generic_make_request(bio);
721 }
722 blk_finish_plug(&plug);
723}
724
1da177e4
LT
725/*
726 * If we run out of usable paths, should we queue I/O or error it?
727 */
be7d31cc
MS
728static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
729 bool save_old_value)
1da177e4
LT
730{
731 unsigned long flags;
732
733 spin_lock_irqsave(&m->lock, flags);
734
518257b1
MS
735 if (save_old_value) {
736 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
737 set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
738 else
739 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
740 } else {
741 if (queue_if_no_path)
742 set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
743 else
744 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
745 }
746 if (queue_if_no_path)
747 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
485ef69e 748 else
518257b1
MS
749 clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
750
1da177e4
LT
751 spin_unlock_irqrestore(&m->lock, flags);
752
76e33fe4 753 if (!queue_if_no_path) {
63d832c3 754 dm_table_run_md_queue_async(m->ti->table);
7e48c768 755 process_queued_io_list(m);
76e33fe4 756 }
63d832c3 757
1da177e4
LT
758 return 0;
759}
760
1da177e4
LT
761/*
762 * An event is triggered whenever a path is taken out of use.
763 * Includes path failure and PG bypass.
764 */
c4028958 765static void trigger_event(struct work_struct *work)
1da177e4 766{
c4028958
DH
767 struct multipath *m =
768 container_of(work, struct multipath, trigger_event);
1da177e4
LT
769
770 dm_table_event(m->ti->table);
771}
772
773/*-----------------------------------------------------------------
774 * Constructor/argument parsing:
775 * <#multipath feature args> [<arg>]*
776 * <#hw_handler args> [hw_handler [<arg>]*]
777 * <#priority groups>
778 * <initial priority group>
779 * [<selector> <#selector args> [<arg>]*
780 * <#paths> <#per-path selector args>
781 * [<path> [<arg>]* ]+ ]+
782 *---------------------------------------------------------------*/
498f0103 783static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
1da177e4
LT
784 struct dm_target *ti)
785{
786 int r;
787 struct path_selector_type *pst;
788 unsigned ps_argc;
789
498f0103 790 static struct dm_arg _args[] = {
72d94861 791 {0, 1024, "invalid number of path selector args"},
1da177e4
LT
792 };
793
498f0103 794 pst = dm_get_path_selector(dm_shift_arg(as));
1da177e4 795 if (!pst) {
72d94861 796 ti->error = "unknown path selector type";
1da177e4
LT
797 return -EINVAL;
798 }
799
498f0103 800 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
371b2e34
MP
801 if (r) {
802 dm_put_path_selector(pst);
1da177e4 803 return -EINVAL;
371b2e34 804 }
1da177e4
LT
805
806 r = pst->create(&pg->ps, ps_argc, as->argv);
807 if (r) {
808 dm_put_path_selector(pst);
72d94861 809 ti->error = "path selector constructor failed";
1da177e4
LT
810 return r;
811 }
812
813 pg->ps.type = pst;
498f0103 814 dm_consume_args(as, ps_argc);
1da177e4
LT
815
816 return 0;
817}
818
498f0103 819static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
1da177e4
LT
820 struct dm_target *ti)
821{
822 int r;
823 struct pgpath *p;
ae11b1b3 824 struct multipath *m = ti->private;
a58a935d
MS
825 struct request_queue *q = NULL;
826 const char *attached_handler_name;
1da177e4
LT
827
828 /* we need at least a path arg */
829 if (as->argc < 1) {
72d94861 830 ti->error = "no device given";
01460f35 831 return ERR_PTR(-EINVAL);
1da177e4
LT
832 }
833
834 p = alloc_pgpath();
835 if (!p)
01460f35 836 return ERR_PTR(-ENOMEM);
1da177e4 837
498f0103 838 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
8215d6ec 839 &p->path.dev);
1da177e4 840 if (r) {
72d94861 841 ti->error = "error getting device";
1da177e4
LT
842 goto bad;
843 }
844
518257b1 845 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
a58a935d
MS
846 q = bdev_get_queue(p->path.dev->bdev);
847
518257b1 848 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
1bab0de0 849retain:
a58a935d
MS
850 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
851 if (attached_handler_name) {
54cd640d 852 /*
853 * Clear any hw_handler_params associated with a
854 * handler that isn't already attached.
855 */
856 if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
857 kfree(m->hw_handler_params);
858 m->hw_handler_params = NULL;
859 }
860
a58a935d
MS
861 /*
862 * Reset hw_handler_name to match the attached handler
a58a935d
MS
863 *
864 * NB. This modifies the table line to show the actual
865 * handler instead of the original table passed in.
866 */
867 kfree(m->hw_handler_name);
868 m->hw_handler_name = attached_handler_name;
a58a935d
MS
869 }
870 }
a0cf7ea9 871
a58a935d 872 if (m->hw_handler_name) {
a0cf7ea9
HR
873 r = scsi_dh_attach(q, m->hw_handler_name);
874 if (r == -EBUSY) {
1bab0de0 875 char b[BDEVNAME_SIZE];
a0cf7ea9 876
1bab0de0
CH
877 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
878 bdevname(p->path.dev->bdev, b));
879 goto retain;
880 }
ae11b1b3 881 if (r < 0) {
a0cf7ea9 882 ti->error = "error attaching hardware handler";
ae11b1b3
HR
883 dm_put_device(ti, p->path.dev);
884 goto bad;
885 }
2bfd2e13
CS
886
887 if (m->hw_handler_params) {
888 r = scsi_dh_set_params(q, m->hw_handler_params);
889 if (r < 0) {
890 ti->error = "unable to set hardware "
891 "handler parameters";
2bfd2e13
CS
892 dm_put_device(ti, p->path.dev);
893 goto bad;
894 }
895 }
ae11b1b3
HR
896 }
897
1da177e4
LT
898 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
899 if (r) {
900 dm_put_device(ti, p->path.dev);
901 goto bad;
902 }
903
904 return p;
905
906 bad:
907 free_pgpath(p);
01460f35 908 return ERR_PTR(r);
1da177e4
LT
909}
910
498f0103 911static struct priority_group *parse_priority_group(struct dm_arg_set *as,
28f16c20 912 struct multipath *m)
1da177e4 913{
498f0103 914 static struct dm_arg _args[] = {
72d94861
AK
915 {1, 1024, "invalid number of paths"},
916 {0, 1024, "invalid number of selector args"}
1da177e4
LT
917 };
918
919 int r;
498f0103 920 unsigned i, nr_selector_args, nr_args;
1da177e4 921 struct priority_group *pg;
28f16c20 922 struct dm_target *ti = m->ti;
1da177e4
LT
923
924 if (as->argc < 2) {
925 as->argc = 0;
01460f35
BM
926 ti->error = "not enough priority group arguments";
927 return ERR_PTR(-EINVAL);
1da177e4
LT
928 }
929
930 pg = alloc_priority_group();
931 if (!pg) {
72d94861 932 ti->error = "couldn't allocate priority group";
01460f35 933 return ERR_PTR(-ENOMEM);
1da177e4
LT
934 }
935 pg->m = m;
936
937 r = parse_path_selector(as, pg, ti);
938 if (r)
939 goto bad;
940
941 /*
942 * read the paths
943 */
498f0103 944 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
1da177e4
LT
945 if (r)
946 goto bad;
947
498f0103 948 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
1da177e4
LT
949 if (r)
950 goto bad;
951
498f0103 952 nr_args = 1 + nr_selector_args;
1da177e4
LT
953 for (i = 0; i < pg->nr_pgpaths; i++) {
954 struct pgpath *pgpath;
498f0103 955 struct dm_arg_set path_args;
1da177e4 956
498f0103 957 if (as->argc < nr_args) {
148acff6 958 ti->error = "not enough path parameters";
6bbf79a1 959 r = -EINVAL;
1da177e4 960 goto bad;
148acff6 961 }
1da177e4 962
498f0103 963 path_args.argc = nr_args;
1da177e4
LT
964 path_args.argv = as->argv;
965
966 pgpath = parse_path(&path_args, &pg->ps, ti);
01460f35
BM
967 if (IS_ERR(pgpath)) {
968 r = PTR_ERR(pgpath);
1da177e4 969 goto bad;
01460f35 970 }
1da177e4
LT
971
972 pgpath->pg = pg;
973 list_add_tail(&pgpath->list, &pg->pgpaths);
498f0103 974 dm_consume_args(as, nr_args);
1da177e4
LT
975 }
976
977 return pg;
978
979 bad:
980 free_priority_group(pg, ti);
01460f35 981 return ERR_PTR(r);
1da177e4
LT
982}
983
498f0103 984static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1da177e4 985{
1da177e4 986 unsigned hw_argc;
2bfd2e13 987 int ret;
28f16c20 988 struct dm_target *ti = m->ti;
1da177e4 989
498f0103 990 static struct dm_arg _args[] = {
72d94861 991 {0, 1024, "invalid number of hardware handler args"},
1da177e4
LT
992 };
993
498f0103 994 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
1da177e4
LT
995 return -EINVAL;
996
997 if (!hw_argc)
998 return 0;
999
e83068a5 1000 if (m->queue_mode == DM_TYPE_BIO_BASED) {
76e33fe4
MS
1001 dm_consume_args(as, hw_argc);
1002 DMERR("bio-based multipath doesn't allow hardware handler args");
1003 return 0;
1004 }
1005
498f0103 1006 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
f97dc421 1007 if (!m->hw_handler_name)
1008 return -EINVAL;
14e98c5c 1009
2bfd2e13
CS
1010 if (hw_argc > 1) {
1011 char *p;
1012 int i, j, len = 4;
1013
1014 for (i = 0; i <= hw_argc - 2; i++)
1015 len += strlen(as->argv[i]) + 1;
1016 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
1017 if (!p) {
1018 ti->error = "memory allocation failed";
1019 ret = -ENOMEM;
1020 goto fail;
1021 }
1022 j = sprintf(p, "%d", hw_argc - 1);
1023 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
1024 j = sprintf(p, "%s", as->argv[i]);
1025 }
498f0103 1026 dm_consume_args(as, hw_argc - 1);
1da177e4
LT
1027
1028 return 0;
2bfd2e13
CS
1029fail:
1030 kfree(m->hw_handler_name);
1031 m->hw_handler_name = NULL;
1032 return ret;
1da177e4
LT
1033}
1034
498f0103 1035static int parse_features(struct dm_arg_set *as, struct multipath *m)
1da177e4
LT
1036{
1037 int r;
1038 unsigned argc;
28f16c20 1039 struct dm_target *ti = m->ti;
498f0103 1040 const char *arg_name;
1da177e4 1041
498f0103 1042 static struct dm_arg _args[] = {
e83068a5 1043 {0, 8, "invalid number of feature args"},
c9e45581 1044 {1, 50, "pg_init_retries must be between 1 and 50"},
4e2d19e4 1045 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1da177e4
LT
1046 };
1047
498f0103 1048 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1da177e4
LT
1049 if (r)
1050 return -EINVAL;
1051
1052 if (!argc)
1053 return 0;
1054
c9e45581 1055 do {
498f0103 1056 arg_name = dm_shift_arg(as);
c9e45581
DW
1057 argc--;
1058
498f0103 1059 if (!strcasecmp(arg_name, "queue_if_no_path")) {
be7d31cc 1060 r = queue_if_no_path(m, true, false);
c9e45581
DW
1061 continue;
1062 }
1063
a58a935d 1064 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
518257b1 1065 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
a58a935d
MS
1066 continue;
1067 }
1068
498f0103 1069 if (!strcasecmp(arg_name, "pg_init_retries") &&
c9e45581 1070 (argc >= 1)) {
498f0103 1071 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
c9e45581
DW
1072 argc--;
1073 continue;
1074 }
1075
498f0103 1076 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
4e2d19e4 1077 (argc >= 1)) {
498f0103 1078 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
4e2d19e4
CS
1079 argc--;
1080 continue;
1081 }
1082
e83068a5
MS
1083 if (!strcasecmp(arg_name, "queue_mode") &&
1084 (argc >= 1)) {
1085 const char *queue_mode_name = dm_shift_arg(as);
1086
1087 if (!strcasecmp(queue_mode_name, "bio"))
1088 m->queue_mode = DM_TYPE_BIO_BASED;
1089 else if (!strcasecmp(queue_mode_name, "rq"))
1090 m->queue_mode = DM_TYPE_REQUEST_BASED;
1091 else if (!strcasecmp(queue_mode_name, "mq"))
1092 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
1093 else {
1094 ti->error = "Unknown 'queue_mode' requested";
1095 r = -EINVAL;
1096 }
1097 argc--;
1098 continue;
1099 }
1100
1da177e4 1101 ti->error = "Unrecognised multipath feature request";
c9e45581
DW
1102 r = -EINVAL;
1103 } while (argc && !r);
1104
1105 return r;
1da177e4
LT
1106}
1107
e83068a5 1108static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1da177e4 1109{
498f0103
MS
1110 /* target arguments */
1111 static struct dm_arg _args[] = {
a490a07a
MS
1112 {0, 1024, "invalid number of priority groups"},
1113 {0, 1024, "invalid initial priority group number"},
1da177e4
LT
1114 };
1115
1116 int r;
1117 struct multipath *m;
498f0103 1118 struct dm_arg_set as;
1da177e4
LT
1119 unsigned pg_count = 0;
1120 unsigned next_pg_num;
1121
1122 as.argc = argc;
1123 as.argv = argv;
1124
e83068a5 1125 m = alloc_multipath(ti);
1da177e4 1126 if (!m) {
72d94861 1127 ti->error = "can't allocate multipath";
1da177e4
LT
1128 return -EINVAL;
1129 }
1130
28f16c20 1131 r = parse_features(&as, m);
1da177e4
LT
1132 if (r)
1133 goto bad;
1134
e83068a5
MS
1135 r = alloc_multipath_stage2(ti, m);
1136 if (r)
1137 goto bad;
1138
28f16c20 1139 r = parse_hw_handler(&as, m);
1da177e4
LT
1140 if (r)
1141 goto bad;
1142
498f0103 1143 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1da177e4
LT
1144 if (r)
1145 goto bad;
1146
498f0103 1147 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1da177e4
LT
1148 if (r)
1149 goto bad;
1150
a490a07a
MS
1151 if ((!m->nr_priority_groups && next_pg_num) ||
1152 (m->nr_priority_groups && !next_pg_num)) {
1153 ti->error = "invalid initial priority group";
1154 r = -EINVAL;
1155 goto bad;
1156 }
1157
1da177e4
LT
1158 /* parse the priority groups */
1159 while (as.argc) {
1160 struct priority_group *pg;
91e968aa 1161 unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1da177e4 1162
28f16c20 1163 pg = parse_priority_group(&as, m);
01460f35
BM
1164 if (IS_ERR(pg)) {
1165 r = PTR_ERR(pg);
1da177e4
LT
1166 goto bad;
1167 }
1168
91e968aa
MS
1169 nr_valid_paths += pg->nr_pgpaths;
1170 atomic_set(&m->nr_valid_paths, nr_valid_paths);
1171
1da177e4
LT
1172 list_add_tail(&pg->list, &m->priority_groups);
1173 pg_count++;
1174 pg->pg_num = pg_count;
1175 if (!--next_pg_num)
1176 m->next_pg = pg;
1177 }
1178
1179 if (pg_count != m->nr_priority_groups) {
72d94861 1180 ti->error = "priority group count mismatch";
1da177e4
LT
1181 r = -EINVAL;
1182 goto bad;
1183 }
1184
55a62eef
AK
1185 ti->num_flush_bios = 1;
1186 ti->num_discard_bios = 1;
042bcef8 1187 ti->num_write_same_bios = 1;
e83068a5 1188 if (m->queue_mode == DM_TYPE_BIO_BASED)
bf661be1 1189 ti->per_io_data_size = multipath_per_bio_data_size();
e83068a5 1190 else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
8637a6bf 1191 ti->per_io_data_size = sizeof(struct dm_mpath_io);
8627921f 1192
1da177e4
LT
1193 return 0;
1194
1195 bad:
1196 free_multipath(m);
1197 return r;
1198}
1199
2bded7bd
KU
1200static void multipath_wait_for_pg_init_completion(struct multipath *m)
1201{
9f4c3f87 1202 DEFINE_WAIT(wait);
2bded7bd
KU
1203
1204 while (1) {
9f4c3f87 1205 prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
2bded7bd 1206
91e968aa 1207 if (!atomic_read(&m->pg_init_in_progress))
2bded7bd 1208 break;
2bded7bd
KU
1209
1210 io_schedule();
1211 }
9f4c3f87 1212 finish_wait(&m->pg_init_wait, &wait);
2bded7bd
KU
1213}
1214
1215static void flush_multipath_work(struct multipath *m)
1da177e4 1216{
518257b1
MS
1217 set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1218 smp_mb__after_atomic();
954a73d5 1219
bab7cfc7 1220 flush_workqueue(kmpath_handlerd);
2bded7bd 1221 multipath_wait_for_pg_init_completion(m);
a044d016 1222 flush_workqueue(kmultipathd);
43829731 1223 flush_work(&m->trigger_event);
954a73d5 1224
518257b1
MS
1225 clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1226 smp_mb__after_atomic();
6df400ab
KU
1227}
1228
1229static void multipath_dtr(struct dm_target *ti)
1230{
1231 struct multipath *m = ti->private;
1232
2bded7bd 1233 flush_multipath_work(m);
1da177e4
LT
1234 free_multipath(m);
1235}
1236
1da177e4
LT
1237/*
1238 * Take a path out of use.
1239 */
1240static int fail_path(struct pgpath *pgpath)
1241{
1242 unsigned long flags;
1243 struct multipath *m = pgpath->pg->m;
1244
1245 spin_lock_irqsave(&m->lock, flags);
1246
6680073d 1247 if (!pgpath->is_active)
1da177e4
LT
1248 goto out;
1249
72d94861 1250 DMWARN("Failing path %s.", pgpath->path.dev->name);
1da177e4
LT
1251
1252 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
be7d31cc 1253 pgpath->is_active = false;
1da177e4
LT
1254 pgpath->fail_count++;
1255
91e968aa 1256 atomic_dec(&m->nr_valid_paths);
1da177e4
LT
1257
1258 if (pgpath == m->current_pgpath)
1259 m->current_pgpath = NULL;
1260
b15546f9 1261 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
91e968aa 1262 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
b15546f9 1263
fe9cf30e 1264 schedule_work(&m->trigger_event);
1da177e4
LT
1265
1266out:
1267 spin_unlock_irqrestore(&m->lock, flags);
1268
1269 return 0;
1270}
1271
1272/*
1273 * Reinstate a previously-failed path
1274 */
1275static int reinstate_path(struct pgpath *pgpath)
1276{
63d832c3 1277 int r = 0, run_queue = 0;
1da177e4
LT
1278 unsigned long flags;
1279 struct multipath *m = pgpath->pg->m;
91e968aa 1280 unsigned nr_valid_paths;
1da177e4
LT
1281
1282 spin_lock_irqsave(&m->lock, flags);
1283
6680073d 1284 if (pgpath->is_active)
1da177e4
LT
1285 goto out;
1286
ec31f3f7 1287 DMWARN("Reinstating path %s.", pgpath->path.dev->name);
1da177e4
LT
1288
1289 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1290 if (r)
1291 goto out;
1292
be7d31cc 1293 pgpath->is_active = true;
1da177e4 1294
91e968aa
MS
1295 nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1296 if (nr_valid_paths == 1) {
e54f77dd 1297 m->current_pgpath = NULL;
63d832c3 1298 run_queue = 1;
e54f77dd 1299 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
4e2d19e4 1300 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
91e968aa 1301 atomic_inc(&m->pg_init_in_progress);
e54f77dd 1302 }
1da177e4 1303
b15546f9 1304 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
91e968aa 1305 pgpath->path.dev->name, nr_valid_paths);
b15546f9 1306
fe9cf30e 1307 schedule_work(&m->trigger_event);
1da177e4
LT
1308
1309out:
1310 spin_unlock_irqrestore(&m->lock, flags);
76e33fe4 1311 if (run_queue) {
63d832c3 1312 dm_table_run_md_queue_async(m->ti->table);
7e48c768 1313 process_queued_io_list(m);
76e33fe4 1314 }
1da177e4
LT
1315
1316 return r;
1317}
1318
1319/*
1320 * Fail or reinstate all paths that match the provided struct dm_dev.
1321 */
1322static int action_dev(struct multipath *m, struct dm_dev *dev,
1323 action_fn action)
1324{
19040c0b 1325 int r = -EINVAL;
1da177e4
LT
1326 struct pgpath *pgpath;
1327 struct priority_group *pg;
1328
1329 list_for_each_entry(pg, &m->priority_groups, list) {
1330 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1331 if (pgpath->path.dev == dev)
1332 r = action(pgpath);
1333 }
1334 }
1335
1336 return r;
1337}
1338
1339/*
1340 * Temporarily try to avoid having to use the specified PG
1341 */
1342static void bypass_pg(struct multipath *m, struct priority_group *pg,
be7d31cc 1343 bool bypassed)
1da177e4
LT
1344{
1345 unsigned long flags;
1346
1347 spin_lock_irqsave(&m->lock, flags);
1348
1349 pg->bypassed = bypassed;
1350 m->current_pgpath = NULL;
1351 m->current_pg = NULL;
1352
1353 spin_unlock_irqrestore(&m->lock, flags);
1354
fe9cf30e 1355 schedule_work(&m->trigger_event);
1da177e4
LT
1356}
1357
1358/*
1359 * Switch to using the specified PG from the next I/O that gets mapped
1360 */
1361static int switch_pg_num(struct multipath *m, const char *pgstr)
1362{
1363 struct priority_group *pg;
1364 unsigned pgnum;
1365 unsigned long flags;
31998ef1 1366 char dummy;
1da177e4 1367
31998ef1 1368 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
cc5bd925 1369 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1da177e4
LT
1370 DMWARN("invalid PG number supplied to switch_pg_num");
1371 return -EINVAL;
1372 }
1373
1374 spin_lock_irqsave(&m->lock, flags);
1375 list_for_each_entry(pg, &m->priority_groups, list) {
be7d31cc 1376 pg->bypassed = false;
1da177e4
LT
1377 if (--pgnum)
1378 continue;
1379
1380 m->current_pgpath = NULL;
1381 m->current_pg = NULL;
1382 m->next_pg = pg;
1383 }
1384 spin_unlock_irqrestore(&m->lock, flags);
1385
fe9cf30e 1386 schedule_work(&m->trigger_event);
1da177e4
LT
1387 return 0;
1388}
1389
1390/*
1391 * Set/clear bypassed status of a PG.
1392 * PGs are numbered upwards from 1 in the order they were declared.
1393 */
be7d31cc 1394static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1da177e4
LT
1395{
1396 struct priority_group *pg;
1397 unsigned pgnum;
31998ef1 1398 char dummy;
1da177e4 1399
31998ef1 1400 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
cc5bd925 1401 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1da177e4
LT
1402 DMWARN("invalid PG number supplied to bypass_pg");
1403 return -EINVAL;
1404 }
1405
1406 list_for_each_entry(pg, &m->priority_groups, list) {
1407 if (!--pgnum)
1408 break;
1409 }
1410
1411 bypass_pg(m, pg, bypassed);
1412 return 0;
1413}
1414
c9e45581
DW
1415/*
1416 * Should we retry pg_init immediately?
1417 */
be7d31cc 1418static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
c9e45581
DW
1419{
1420 unsigned long flags;
be7d31cc 1421 bool limit_reached = false;
c9e45581
DW
1422
1423 spin_lock_irqsave(&m->lock, flags);
1424
91e968aa
MS
1425 if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1426 !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
518257b1 1427 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
c9e45581 1428 else
be7d31cc 1429 limit_reached = true;
c9e45581
DW
1430
1431 spin_unlock_irqrestore(&m->lock, flags);
1432
1433 return limit_reached;
1434}
1435
3ae31f6a 1436static void pg_init_done(void *data, int errors)
cfae5c9b 1437{
83c0d5d5 1438 struct pgpath *pgpath = data;
cfae5c9b
CS
1439 struct priority_group *pg = pgpath->pg;
1440 struct multipath *m = pg->m;
1441 unsigned long flags;
be7d31cc 1442 bool delay_retry = false;
cfae5c9b
CS
1443
1444 /* device or driver problems */
1445 switch (errors) {
1446 case SCSI_DH_OK:
1447 break;
1448 case SCSI_DH_NOSYS:
1449 if (!m->hw_handler_name) {
1450 errors = 0;
1451 break;
1452 }
f7b934c8
MB
1453 DMERR("Could not failover the device: Handler scsi_dh_%s "
1454 "Error %d.", m->hw_handler_name, errors);
cfae5c9b
CS
1455 /*
1456 * Fail path for now, so we do not ping pong
1457 */
1458 fail_path(pgpath);
1459 break;
1460 case SCSI_DH_DEV_TEMP_BUSY:
1461 /*
1462 * Probably doing something like FW upgrade on the
1463 * controller so try the other pg.
1464 */
be7d31cc 1465 bypass_pg(m, pg, true);
cfae5c9b 1466 break;
cfae5c9b 1467 case SCSI_DH_RETRY:
4e2d19e4
CS
1468 /* Wait before retrying. */
1469 delay_retry = 1;
cfae5c9b
CS
1470 case SCSI_DH_IMM_RETRY:
1471 case SCSI_DH_RES_TEMP_UNAVAIL:
1472 if (pg_init_limit_reached(m, pgpath))
1473 fail_path(pgpath);
1474 errors = 0;
1475 break;
ec31f3f7 1476 case SCSI_DH_DEV_OFFLINED:
cfae5c9b
CS
1477 default:
1478 /*
1479 * We probably do not want to fail the path for a device
1480 * error, but this is what the old dm did. In future
1481 * patches we can do more advanced handling.
1482 */
1483 fail_path(pgpath);
1484 }
1485
1486 spin_lock_irqsave(&m->lock, flags);
1487 if (errors) {
e54f77dd
CS
1488 if (pgpath == m->current_pgpath) {
1489 DMERR("Could not failover device. Error %d.", errors);
1490 m->current_pgpath = NULL;
1491 m->current_pg = NULL;
1492 }
518257b1 1493 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
be7d31cc 1494 pg->bypassed = false;
cfae5c9b 1495
91e968aa 1496 if (atomic_dec_return(&m->pg_init_in_progress) > 0)
d0259bf0
KU
1497 /* Activations of other paths are still on going */
1498 goto out;
1499
518257b1
MS
1500 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1501 if (delay_retry)
1502 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1503 else
1504 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1505
3e9f1be1
HR
1506 if (__pg_init_all_paths(m))
1507 goto out;
1508 }
518257b1 1509 clear_bit(MPATHF_QUEUE_IO, &m->flags);
d0259bf0 1510
7e48c768 1511 process_queued_io_list(m);
76e33fe4 1512
2bded7bd
KU
1513 /*
1514 * Wake up any thread waiting to suspend.
1515 */
1516 wake_up(&m->pg_init_wait);
1517
d0259bf0 1518out:
cfae5c9b
CS
1519 spin_unlock_irqrestore(&m->lock, flags);
1520}
1521
bab7cfc7
CS
1522static void activate_path(struct work_struct *work)
1523{
e54f77dd 1524 struct pgpath *pgpath =
4e2d19e4 1525 container_of(work, struct pgpath, activate_path.work);
f10e06b7 1526 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
bab7cfc7 1527
f10e06b7
MS
1528 if (pgpath->is_active && !blk_queue_dying(q))
1529 scsi_dh_activate(q, pg_init_done, pgpath);
3a017509
HR
1530 else
1531 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
bab7cfc7
CS
1532}
1533
7e782af5
HR
1534static int noretry_error(int error)
1535{
1536 switch (error) {
8ff232c1
HR
1537 case -EBADE:
1538 /*
1539 * EBADE signals an reservation conflict.
1540 * We shouldn't fail the path here as we can communicate with
1541 * the target. We should failover to the next path, but in
1542 * doing so we might be causing a ping-pong between paths.
1543 * So just return the reservation conflict error.
1544 */
7e782af5
HR
1545 case -EOPNOTSUPP:
1546 case -EREMOTEIO:
1547 case -EILSEQ:
1548 case -ENODATA:
cc9d3c38 1549 case -ENOSPC:
7e782af5
HR
1550 return 1;
1551 }
1552
1553 /* Anything else could be a path failure, so should be retried */
1554 return 0;
1555}
1556
1da177e4
LT
1557/*
1558 * end_io handling
1559 */
f40c67f0 1560static int do_end_io(struct multipath *m, struct request *clone,
028867ac 1561 int error, struct dm_mpath_io *mpio)
1da177e4 1562{
f40c67f0
KU
1563 /*
1564 * We don't queue any clone request inside the multipath target
1565 * during end I/O handling, since those clone requests don't have
1566 * bio clones. If we queue them inside the multipath target,
1567 * we need to make bio clones, that requires memory allocation.
4cc96131 1568 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
f40c67f0
KU
1569 * don't have bio clones.)
1570 * Instead of queueing the clone request here, we queue the original
1571 * request into dm core, which will remake a clone request and
1572 * clone bios for it and resubmit it later.
1573 */
1574 int r = DM_ENDIO_REQUEUE;
1da177e4 1575
f40c67f0 1576 if (!error && !clone->errors)
1da177e4
LT
1577 return 0; /* I/O complete */
1578
7eee4ae2 1579 if (noretry_error(error))
959eb4e5
MS
1580 return error;
1581
cfae5c9b
CS
1582 if (mpio->pgpath)
1583 fail_path(mpio->pgpath);
1da177e4 1584
91e968aa 1585 if (!atomic_read(&m->nr_valid_paths)) {
518257b1 1586 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
76e33fe4 1587 if (!must_push_back_rq(m))
751b2a7d 1588 r = -EIO;
751b2a7d
HR
1589 }
1590 }
1da177e4 1591
f40c67f0 1592 return r;
1da177e4
LT
1593}
1594
f40c67f0 1595static int multipath_end_io(struct dm_target *ti, struct request *clone,
1da177e4
LT
1596 int error, union map_info *map_context)
1597{
028867ac 1598 struct multipath *m = ti->private;
2eff1924 1599 struct dm_mpath_io *mpio = get_mpio(map_context);
a71a261f 1600 struct pgpath *pgpath;
1da177e4
LT
1601 struct path_selector *ps;
1602 int r;
1603
466891f9
JN
1604 BUG_ON(!mpio);
1605
2eff1924 1606 r = do_end_io(m, clone, error, mpio);
a71a261f 1607 pgpath = mpio->pgpath;
1da177e4
LT
1608 if (pgpath) {
1609 ps = &pgpath->pg->ps;
1610 if (ps->type->end_io)
02ab823f 1611 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1da177e4 1612 }
2eff1924 1613 clear_request_fn_mpio(m, map_context);
1da177e4
LT
1614
1615 return r;
1616}
1617
76e33fe4
MS
1618static int do_end_io_bio(struct multipath *m, struct bio *clone,
1619 int error, struct dm_mpath_io *mpio)
1620{
1621 unsigned long flags;
1622
1623 if (!error)
1624 return 0; /* I/O complete */
1625
1626 if (noretry_error(error))
1627 return error;
1628
1629 if (mpio->pgpath)
1630 fail_path(mpio->pgpath);
1631
1632 if (!atomic_read(&m->nr_valid_paths)) {
1633 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1634 if (!must_push_back_bio(m))
1635 return -EIO;
1636 return DM_ENDIO_REQUEUE;
76e33fe4
MS
1637 }
1638 }
1639
1640 /* Queue for the daemon to resubmit */
bf661be1 1641 dm_bio_restore(get_bio_details_from_bio(clone), clone);
76e33fe4
MS
1642
1643 spin_lock_irqsave(&m->lock, flags);
1644 bio_list_add(&m->queued_bios, clone);
1645 spin_unlock_irqrestore(&m->lock, flags);
1646 if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
1647 queue_work(kmultipathd, &m->process_queued_bios);
1648
1649 return DM_ENDIO_INCOMPLETE;
1650}
1651
1652static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int error)
1653{
1654 struct multipath *m = ti->private;
1655 struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1656 struct pgpath *pgpath;
1657 struct path_selector *ps;
1658 int r;
1659
1660 BUG_ON(!mpio);
1661
1662 r = do_end_io_bio(m, clone, error, mpio);
1663 pgpath = mpio->pgpath;
1664 if (pgpath) {
1665 ps = &pgpath->pg->ps;
1666 if (ps->type->end_io)
1667 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1668 }
1669
1670 return r;
1671}
1672
1da177e4
LT
1673/*
1674 * Suspend can't complete until all the I/O is processed so if
436d4108
AK
1675 * the last path fails we must error any remaining I/O.
1676 * Note that if the freeze_bdev fails while suspending, the
1677 * queue_if_no_path state is lost - userspace should reset it.
1da177e4
LT
1678 */
1679static void multipath_presuspend(struct dm_target *ti)
1680{
7943bd6d 1681 struct multipath *m = ti->private;
1da177e4 1682
be7d31cc 1683 queue_if_no_path(m, false, true);
1da177e4
LT
1684}
1685
6df400ab
KU
1686static void multipath_postsuspend(struct dm_target *ti)
1687{
6380f26f
MA
1688 struct multipath *m = ti->private;
1689
1690 mutex_lock(&m->work_mutex);
2bded7bd 1691 flush_multipath_work(m);
6380f26f 1692 mutex_unlock(&m->work_mutex);
6df400ab
KU
1693}
1694
436d4108
AK
1695/*
1696 * Restore the queue_if_no_path setting.
1697 */
1da177e4
LT
1698static void multipath_resume(struct dm_target *ti)
1699{
7943bd6d 1700 struct multipath *m = ti->private;
1814f2e3 1701 unsigned long flags;
1da177e4 1702
1814f2e3 1703 spin_lock_irqsave(&m->lock, flags);
518257b1
MS
1704 if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
1705 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1706 else
1707 clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1814f2e3 1708 spin_unlock_irqrestore(&m->lock, flags);
1da177e4
LT
1709}
1710
1711/*
1712 * Info output has the following format:
1713 * num_multipath_feature_args [multipath_feature_args]*
1714 * num_handler_status_args [handler_status_args]*
1715 * num_groups init_group_number
1716 * [A|D|E num_ps_status_args [ps_status_args]*
1717 * num_paths num_selector_args
1718 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1719 *
1720 * Table output has the following format (identical to the constructor string):
1721 * num_feature_args [features_args]*
1722 * num_handler_args hw_handler [hw_handler_args]*
1723 * num_groups init_group_number
1724 * [priority selector-name num_ps_args [ps_args]*
1725 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1726 */
fd7c092e
MP
1727static void multipath_status(struct dm_target *ti, status_type_t type,
1728 unsigned status_flags, char *result, unsigned maxlen)
1da177e4
LT
1729{
1730 int sz = 0;
1731 unsigned long flags;
7943bd6d 1732 struct multipath *m = ti->private;
1da177e4
LT
1733 struct priority_group *pg;
1734 struct pgpath *p;
1735 unsigned pg_num;
1736 char state;
1737
1738 spin_lock_irqsave(&m->lock, flags);
1739
1740 /* Features */
1741 if (type == STATUSTYPE_INFO)
91e968aa
MS
1742 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1743 atomic_read(&m->pg_init_count));
c9e45581 1744 else {
518257b1 1745 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
4e2d19e4 1746 (m->pg_init_retries > 0) * 2 +
a58a935d 1747 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
e83068a5
MS
1748 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1749 (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1750
518257b1 1751 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
c9e45581
DW
1752 DMEMIT("queue_if_no_path ");
1753 if (m->pg_init_retries)
1754 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
4e2d19e4
CS
1755 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1756 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
518257b1 1757 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
a58a935d 1758 DMEMIT("retain_attached_hw_handler ");
e83068a5
MS
1759 if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1760 switch(m->queue_mode) {
1761 case DM_TYPE_BIO_BASED:
1762 DMEMIT("queue_mode bio ");
1763 break;
1764 case DM_TYPE_MQ_REQUEST_BASED:
1765 DMEMIT("queue_mode mq ");
1766 break;
1767 }
1768 }
c9e45581 1769 }
1da177e4 1770
cfae5c9b 1771 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1da177e4
LT
1772 DMEMIT("0 ");
1773 else
cfae5c9b 1774 DMEMIT("1 %s ", m->hw_handler_name);
1da177e4
LT
1775
1776 DMEMIT("%u ", m->nr_priority_groups);
1777
1778 if (m->next_pg)
1779 pg_num = m->next_pg->pg_num;
1780 else if (m->current_pg)
1781 pg_num = m->current_pg->pg_num;
1782 else
a490a07a 1783 pg_num = (m->nr_priority_groups ? 1 : 0);
1da177e4
LT
1784
1785 DMEMIT("%u ", pg_num);
1786
1787 switch (type) {
1788 case STATUSTYPE_INFO:
1789 list_for_each_entry(pg, &m->priority_groups, list) {
1790 if (pg->bypassed)
1791 state = 'D'; /* Disabled */
1792 else if (pg == m->current_pg)
1793 state = 'A'; /* Currently Active */
1794 else
1795 state = 'E'; /* Enabled */
1796
1797 DMEMIT("%c ", state);
1798
1799 if (pg->ps.type->status)
1800 sz += pg->ps.type->status(&pg->ps, NULL, type,
1801 result + sz,
1802 maxlen - sz);
1803 else
1804 DMEMIT("0 ");
1805
1806 DMEMIT("%u %u ", pg->nr_pgpaths,
1807 pg->ps.type->info_args);
1808
1809 list_for_each_entry(p, &pg->pgpaths, list) {
1810 DMEMIT("%s %s %u ", p->path.dev->name,
6680073d 1811 p->is_active ? "A" : "F",
1da177e4
LT
1812 p->fail_count);
1813 if (pg->ps.type->status)
1814 sz += pg->ps.type->status(&pg->ps,
1815 &p->path, type, result + sz,
1816 maxlen - sz);
1817 }
1818 }
1819 break;
1820
1821 case STATUSTYPE_TABLE:
1822 list_for_each_entry(pg, &m->priority_groups, list) {
1823 DMEMIT("%s ", pg->ps.type->name);
1824
1825 if (pg->ps.type->status)
1826 sz += pg->ps.type->status(&pg->ps, NULL, type,
1827 result + sz,
1828 maxlen - sz);
1829 else
1830 DMEMIT("0 ");
1831
1832 DMEMIT("%u %u ", pg->nr_pgpaths,
1833 pg->ps.type->table_args);
1834
1835 list_for_each_entry(p, &pg->pgpaths, list) {
1836 DMEMIT("%s ", p->path.dev->name);
1837 if (pg->ps.type->status)
1838 sz += pg->ps.type->status(&pg->ps,
1839 &p->path, type, result + sz,
1840 maxlen - sz);
1841 }
1842 }
1843 break;
1844 }
1845
1846 spin_unlock_irqrestore(&m->lock, flags);
1da177e4
LT
1847}
1848
1849static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1850{
6380f26f 1851 int r = -EINVAL;
1da177e4 1852 struct dm_dev *dev;
7943bd6d 1853 struct multipath *m = ti->private;
1da177e4
LT
1854 action_fn action;
1855
6380f26f
MA
1856 mutex_lock(&m->work_mutex);
1857
c2f3d24b
KU
1858 if (dm_suspended(ti)) {
1859 r = -EBUSY;
1860 goto out;
1861 }
1862
1da177e4 1863 if (argc == 1) {
498f0103 1864 if (!strcasecmp(argv[0], "queue_if_no_path")) {
be7d31cc 1865 r = queue_if_no_path(m, true, false);
6380f26f 1866 goto out;
498f0103 1867 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
be7d31cc 1868 r = queue_if_no_path(m, false, false);
6380f26f
MA
1869 goto out;
1870 }
1da177e4
LT
1871 }
1872
6380f26f 1873 if (argc != 2) {
a356e426 1874 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
6380f26f
MA
1875 goto out;
1876 }
1da177e4 1877
498f0103 1878 if (!strcasecmp(argv[0], "disable_group")) {
be7d31cc 1879 r = bypass_pg_num(m, argv[1], true);
6380f26f 1880 goto out;
498f0103 1881 } else if (!strcasecmp(argv[0], "enable_group")) {
be7d31cc 1882 r = bypass_pg_num(m, argv[1], false);
6380f26f 1883 goto out;
498f0103 1884 } else if (!strcasecmp(argv[0], "switch_group")) {
6380f26f
MA
1885 r = switch_pg_num(m, argv[1]);
1886 goto out;
498f0103 1887 } else if (!strcasecmp(argv[0], "reinstate_path"))
1da177e4 1888 action = reinstate_path;
498f0103 1889 else if (!strcasecmp(argv[0], "fail_path"))
1da177e4 1890 action = fail_path;
6380f26f 1891 else {
a356e426 1892 DMWARN("Unrecognised multipath message received: %s", argv[0]);
6380f26f
MA
1893 goto out;
1894 }
1da177e4 1895
8215d6ec 1896 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1da177e4 1897 if (r) {
72d94861 1898 DMWARN("message: error getting device %s",
1da177e4 1899 argv[1]);
6380f26f 1900 goto out;
1da177e4
LT
1901 }
1902
1903 r = action_dev(m, dev, action);
1904
1905 dm_put_device(ti, dev);
1906
6380f26f
MA
1907out:
1908 mutex_unlock(&m->work_mutex);
1da177e4 1909 return r;
1da177e4
LT
1910}
1911
e56f81e0
CH
1912static int multipath_prepare_ioctl(struct dm_target *ti,
1913 struct block_device **bdev, fmode_t *mode)
9af4aa30 1914{
35991652 1915 struct multipath *m = ti->private;
2da1610a 1916 struct pgpath *current_pgpath;
35991652
MP
1917 int r;
1918
2da1610a
MS
1919 current_pgpath = lockless_dereference(m->current_pgpath);
1920 if (!current_pgpath)
1921 current_pgpath = choose_pgpath(m, 0);
9af4aa30 1922
2da1610a 1923 if (current_pgpath) {
518257b1 1924 if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
2da1610a
MS
1925 *bdev = current_pgpath->path.dev->bdev;
1926 *mode = current_pgpath->path.dev->mode;
43e43c9e
JN
1927 r = 0;
1928 } else {
1929 /* pg_init has not started or completed */
1930 r = -ENOTCONN;
1931 }
1932 } else {
1933 /* No path is available */
518257b1 1934 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
43e43c9e
JN
1935 r = -ENOTCONN;
1936 else
1937 r = -EIO;
e90dae1f 1938 }
9af4aa30 1939
5bbbfdf6 1940 if (r == -ENOTCONN) {
2da1610a 1941 if (!lockless_dereference(m->current_pg)) {
3e9f1be1 1942 /* Path status changed, redo selection */
2da1610a 1943 (void) choose_pgpath(m, 0);
3e9f1be1 1944 }
518257b1 1945 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
2da1610a 1946 pg_init_all_paths(m);
63d832c3 1947 dm_table_run_md_queue_async(m->ti->table);
7e48c768 1948 process_queued_io_list(m);
3e9f1be1 1949 }
35991652 1950
e56f81e0
CH
1951 /*
1952 * Only pass ioctls through if the device sizes match exactly.
1953 */
1954 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1955 return 1;
1956 return r;
9af4aa30
MB
1957}
1958
af4874e0
MS
1959static int multipath_iterate_devices(struct dm_target *ti,
1960 iterate_devices_callout_fn fn, void *data)
1961{
1962 struct multipath *m = ti->private;
1963 struct priority_group *pg;
1964 struct pgpath *p;
1965 int ret = 0;
1966
1967 list_for_each_entry(pg, &m->priority_groups, list) {
1968 list_for_each_entry(p, &pg->pgpaths, list) {
5dea271b 1969 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
af4874e0
MS
1970 if (ret)
1971 goto out;
1972 }
1973 }
1974
1975out:
1976 return ret;
1977}
1978
9f54cec5 1979static int pgpath_busy(struct pgpath *pgpath)
f40c67f0
KU
1980{
1981 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1982
52b09914 1983 return blk_lld_busy(q);
f40c67f0
KU
1984}
1985
1986/*
1987 * We return "busy", only when we can map I/Os but underlying devices
1988 * are busy (so even if we map I/Os now, the I/Os will wait on
1989 * the underlying queue).
1990 * In other words, if we want to kill I/Os or queue them inside us
1991 * due to map unavailability, we don't return "busy". Otherwise,
1992 * dm core won't give us the I/Os and we can't do what we want.
1993 */
1994static int multipath_busy(struct dm_target *ti)
1995{
be7d31cc 1996 bool busy = false, has_active = false;
f40c67f0 1997 struct multipath *m = ti->private;
2da1610a 1998 struct priority_group *pg, *next_pg;
f40c67f0 1999 struct pgpath *pgpath;
f40c67f0 2000
b88efd43
MS
2001 /* pg_init in progress */
2002 if (atomic_read(&m->pg_init_in_progress))
2da1610a
MS
2003 return true;
2004
b88efd43
MS
2005 /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
2006 if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
2007 return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
2008
f40c67f0 2009 /* Guess which priority_group will be used at next mapping time */
2da1610a
MS
2010 pg = lockless_dereference(m->current_pg);
2011 next_pg = lockless_dereference(m->next_pg);
2012 if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
2013 pg = next_pg;
2014
2015 if (!pg) {
f40c67f0
KU
2016 /*
2017 * We don't know which pg will be used at next mapping time.
2da1610a 2018 * We don't call choose_pgpath() here to avoid to trigger
f40c67f0
KU
2019 * pg_init just by busy checking.
2020 * So we don't know whether underlying devices we will be using
2021 * at next mapping time are busy or not. Just try mapping.
2022 */
2da1610a
MS
2023 return busy;
2024 }
f40c67f0
KU
2025
2026 /*
2027 * If there is one non-busy active path at least, the path selector
2028 * will be able to select it. So we consider such a pg as not busy.
2029 */
be7d31cc 2030 busy = true;
2da1610a 2031 list_for_each_entry(pgpath, &pg->pgpaths, list) {
f40c67f0 2032 if (pgpath->is_active) {
be7d31cc 2033 has_active = true;
9f54cec5 2034 if (!pgpath_busy(pgpath)) {
be7d31cc 2035 busy = false;
f40c67f0
KU
2036 break;
2037 }
2038 }
2da1610a 2039 }
f40c67f0 2040
2da1610a 2041 if (!has_active) {
f40c67f0
KU
2042 /*
2043 * No active path in this pg, so this pg won't be used and
2044 * the current_pg will be changed at next mapping time.
2045 * We need to try mapping to determine it.
2046 */
be7d31cc 2047 busy = false;
2da1610a 2048 }
f40c67f0
KU
2049
2050 return busy;
2051}
2052
1da177e4
LT
2053/*-----------------------------------------------------------------
2054 * Module setup
2055 *---------------------------------------------------------------*/
2056static struct target_type multipath_target = {
2057 .name = "multipath",
e83068a5 2058 .version = {1, 12, 0},
16f12266 2059 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
1da177e4
LT
2060 .module = THIS_MODULE,
2061 .ctr = multipath_ctr,
2062 .dtr = multipath_dtr,
f40c67f0 2063 .map_rq = multipath_map,
e5863d9a
MS
2064 .clone_and_map_rq = multipath_clone_and_map,
2065 .release_clone_rq = multipath_release_clone,
f40c67f0 2066 .rq_end_io = multipath_end_io,
76e33fe4
MS
2067 .map = multipath_map_bio,
2068 .end_io = multipath_end_io_bio,
2069 .presuspend = multipath_presuspend,
2070 .postsuspend = multipath_postsuspend,
2071 .resume = multipath_resume,
2072 .status = multipath_status,
2073 .message = multipath_message,
2074 .prepare_ioctl = multipath_prepare_ioctl,
2075 .iterate_devices = multipath_iterate_devices,
2076 .busy = multipath_busy,
2077};
2078
1da177e4
LT
2079static int __init dm_multipath_init(void)
2080{
2081 int r;
2082
76e33fe4 2083 /* allocate a slab for the dm_mpath_ios */
028867ac 2084 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
1da177e4
LT
2085 if (!_mpio_cache)
2086 return -ENOMEM;
2087
2088 r = dm_register_target(&multipath_target);
2089 if (r < 0) {
76e33fe4 2090 DMERR("request-based register failed %d", r);
ff658e9c
JT
2091 r = -EINVAL;
2092 goto bad_register_target;
1da177e4
LT
2093 }
2094
4d4d66ab 2095 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
c557308e 2096 if (!kmultipathd) {
0cd33124 2097 DMERR("failed to create workqueue kmpathd");
ff658e9c
JT
2098 r = -ENOMEM;
2099 goto bad_alloc_kmultipathd;
c557308e
AK
2100 }
2101
bab7cfc7
CS
2102 /*
2103 * A separate workqueue is used to handle the device handlers
2104 * to avoid overloading existing workqueue. Overloading the
2105 * old workqueue would also create a bottleneck in the
2106 * path of the storage hardware device activation.
2107 */
4d4d66ab
TH
2108 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2109 WQ_MEM_RECLAIM);
bab7cfc7
CS
2110 if (!kmpath_handlerd) {
2111 DMERR("failed to create workqueue kmpath_handlerd");
ff658e9c
JT
2112 r = -ENOMEM;
2113 goto bad_alloc_kmpath_handlerd;
bab7cfc7
CS
2114 }
2115
ff658e9c
JT
2116 return 0;
2117
2118bad_alloc_kmpath_handlerd:
2119 destroy_workqueue(kmultipathd);
2120bad_alloc_kmultipathd:
2121 dm_unregister_target(&multipath_target);
2122bad_register_target:
2123 kmem_cache_destroy(_mpio_cache);
2124
1da177e4
LT
2125 return r;
2126}
2127
2128static void __exit dm_multipath_exit(void)
2129{
bab7cfc7 2130 destroy_workqueue(kmpath_handlerd);
c557308e
AK
2131 destroy_workqueue(kmultipathd);
2132
10d3bd09 2133 dm_unregister_target(&multipath_target);
1da177e4
LT
2134 kmem_cache_destroy(_mpio_cache);
2135}
2136
1da177e4
LT
2137module_init(dm_multipath_init);
2138module_exit(dm_multipath_exit);
2139
2140MODULE_DESCRIPTION(DM_NAME " multipath target");
2141MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2142MODULE_LICENSE("GPL");