]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/md/dm-mpath.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm...
[mirror_ubuntu-bionic-kernel.git] / drivers / md / dm-mpath.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
586e80e6
MP
8#include <linux/device-mapper.h>
9
4cc96131 10#include "dm-rq.h"
76e33fe4 11#include "dm-bio-record.h"
1da177e4 12#include "dm-path-selector.h"
b15546f9 13#include "dm-uevent.h"
1da177e4 14
e5863d9a 15#include <linux/blkdev.h>
1da177e4
LT
16#include <linux/ctype.h>
17#include <linux/init.h>
18#include <linux/mempool.h>
19#include <linux/module.h>
20#include <linux/pagemap.h>
21#include <linux/slab.h>
22#include <linux/time.h>
23#include <linux/workqueue.h>
35991652 24#include <linux/delay.h>
cfae5c9b 25#include <scsi/scsi_dh.h>
60063497 26#include <linux/atomic.h>
78ce23b5 27#include <linux/blk-mq.h>
1da177e4 28
72d94861 29#define DM_MSG_PREFIX "multipath"
4e2d19e4
CS
30#define DM_PG_INIT_DELAY_MSECS 2000
31#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
1da177e4
LT
32
33/* Path properties */
34struct pgpath {
35 struct list_head list;
36
37 struct priority_group *pg; /* Owning PG */
38 unsigned fail_count; /* Cumulative failure count */
39
c922d5f7 40 struct dm_path path;
4e2d19e4 41 struct delayed_work activate_path;
be7d31cc
MS
42
43 bool is_active:1; /* Path status */
1da177e4
LT
44};
45
46#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
47
48/*
49 * Paths are grouped into Priority Groups and numbered from 1 upwards.
50 * Each has a path selector which controls which path gets used.
51 */
52struct priority_group {
53 struct list_head list;
54
55 struct multipath *m; /* Owning multipath instance */
56 struct path_selector ps;
57
58 unsigned pg_num; /* Reference number */
1da177e4
LT
59 unsigned nr_pgpaths; /* Number of paths in PG */
60 struct list_head pgpaths;
be7d31cc
MS
61
62 bool bypassed:1; /* Temporarily bypass this PG? */
1da177e4
LT
63};
64
65/* Multipath context */
66struct multipath {
67 struct list_head list;
68 struct dm_target *ti;
69
cfae5c9b 70 const char *hw_handler_name;
2bfd2e13 71 char *hw_handler_params;
4e2d19e4 72
1fbdd2b3
MS
73 spinlock_t lock;
74
1da177e4
LT
75 unsigned nr_priority_groups;
76 struct list_head priority_groups;
4e2d19e4
CS
77
78 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
79
1da177e4
LT
80 struct pgpath *current_pgpath;
81 struct priority_group *current_pg;
82 struct priority_group *next_pg; /* Switch to this PG if set */
1da177e4 83
518257b1 84 unsigned long flags; /* Multipath state flags */
1fbdd2b3 85
c9e45581 86 unsigned pg_init_retries; /* Number of times to retry pg_init */
4e2d19e4 87 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
1da177e4 88
91e968aa
MS
89 atomic_t nr_valid_paths; /* Total number of usable paths */
90 atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
91 atomic_t pg_init_count; /* Number of times pg_init called */
92
7e0d574f 93 enum dm_queue_mode queue_mode;
e83068a5 94
6380f26f 95 struct mutex work_mutex;
20800cb3 96 struct work_struct trigger_event;
76e33fe4
MS
97
98 struct work_struct process_queued_bios;
99 struct bio_list queued_bios;
1da177e4
LT
100};
101
102/*
76e33fe4 103 * Context information attached to each io we process.
1da177e4 104 */
028867ac 105struct dm_mpath_io {
1da177e4 106 struct pgpath *pgpath;
02ab823f 107 size_t nr_bytes;
1da177e4
LT
108};
109
110typedef int (*action_fn) (struct pgpath *pgpath);
111
bab7cfc7 112static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
c4028958 113static void trigger_event(struct work_struct *work);
89bfce76
BVA
114static void activate_or_offline_path(struct pgpath *pgpath);
115static void activate_path_work(struct work_struct *work);
76e33fe4 116static void process_queued_bios(struct work_struct *work);
1da177e4 117
518257b1
MS
118/*-----------------------------------------------
119 * Multipath state flags.
120 *-----------------------------------------------*/
121
122#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
123#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
124#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
125#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
126#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
127#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
128#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
1da177e4
LT
129
130/*-----------------------------------------------
131 * Allocation routines
132 *-----------------------------------------------*/
133
134static struct pgpath *alloc_pgpath(void)
135{
e69fae56 136 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
1da177e4 137
224cb3e9 138 if (pgpath) {
be7d31cc 139 pgpath->is_active = true;
89bfce76 140 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
224cb3e9 141 }
1da177e4
LT
142
143 return pgpath;
144}
145
028867ac 146static void free_pgpath(struct pgpath *pgpath)
1da177e4
LT
147{
148 kfree(pgpath);
149}
150
151static struct priority_group *alloc_priority_group(void)
152{
153 struct priority_group *pg;
154
e69fae56 155 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
1da177e4 156
e69fae56
MM
157 if (pg)
158 INIT_LIST_HEAD(&pg->pgpaths);
1da177e4
LT
159
160 return pg;
161}
162
163static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
164{
165 struct pgpath *pgpath, *tmp;
166
167 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
168 list_del(&pgpath->list);
169 dm_put_device(ti, pgpath->path.dev);
170 free_pgpath(pgpath);
171 }
172}
173
174static void free_priority_group(struct priority_group *pg,
175 struct dm_target *ti)
176{
177 struct path_selector *ps = &pg->ps;
178
179 if (ps->type) {
180 ps->type->destroy(ps);
181 dm_put_path_selector(ps->type);
182 }
183
184 free_pgpaths(&pg->pgpaths, ti);
185 kfree(pg);
186}
187
e83068a5 188static struct multipath *alloc_multipath(struct dm_target *ti)
1da177e4
LT
189{
190 struct multipath *m;
191
e69fae56 192 m = kzalloc(sizeof(*m), GFP_KERNEL);
1da177e4 193 if (m) {
1da177e4
LT
194 INIT_LIST_HEAD(&m->priority_groups);
195 spin_lock_init(&m->lock);
518257b1 196 set_bit(MPATHF_QUEUE_IO, &m->flags);
91e968aa
MS
197 atomic_set(&m->nr_valid_paths, 0);
198 atomic_set(&m->pg_init_in_progress, 0);
199 atomic_set(&m->pg_init_count, 0);
4e2d19e4 200 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
c4028958 201 INIT_WORK(&m->trigger_event, trigger_event);
2bded7bd 202 init_waitqueue_head(&m->pg_init_wait);
6380f26f 203 mutex_init(&m->work_mutex);
8637a6bf 204
e83068a5 205 m->queue_mode = DM_TYPE_NONE;
76e33fe4 206
28f16c20
MM
207 m->ti = ti;
208 ti->private = m;
1da177e4
LT
209 }
210
211 return m;
212}
213
e83068a5
MS
214static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
215{
216 if (m->queue_mode == DM_TYPE_NONE) {
217 /*
218 * Default to request-based.
219 */
220 if (dm_use_blk_mq(dm_table_get_md(ti->table)))
221 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
222 else
223 m->queue_mode = DM_TYPE_REQUEST_BASED;
eb8db831 224 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
e83068a5
MS
225 INIT_WORK(&m->process_queued_bios, process_queued_bios);
226 /*
227 * bio-based doesn't support any direct scsi_dh management;
228 * it just discovers if a scsi_dh is attached.
229 */
230 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
231 }
232
233 dm_table_set_type(ti->table, m->queue_mode);
234
235 return 0;
236}
237
1da177e4
LT
238static void free_multipath(struct multipath *m)
239{
240 struct priority_group *pg, *tmp;
1da177e4
LT
241
242 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
243 list_del(&pg->list);
244 free_priority_group(pg, m->ti);
245 }
246
cfae5c9b 247 kfree(m->hw_handler_name);
2bfd2e13 248 kfree(m->hw_handler_params);
1da177e4
LT
249 kfree(m);
250}
251
2eff1924
MS
252static struct dm_mpath_io *get_mpio(union map_info *info)
253{
254 return info->ptr;
255}
256
bf661be1
MS
257static size_t multipath_per_bio_data_size(void)
258{
259 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
260}
261
76e33fe4
MS
262static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
263{
bf661be1 264 return dm_per_bio_data(bio, multipath_per_bio_data_size());
76e33fe4
MS
265}
266
bf661be1 267static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio)
76e33fe4 268{
bf661be1 269 /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
76e33fe4 270 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
bf661be1
MS
271 void *bio_details = mpio + 1;
272
273 return bio_details;
274}
275
276static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p,
277 struct dm_bio_details **bio_details_p)
278{
279 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
280 struct dm_bio_details *bio_details = get_bio_details_from_bio(bio);
76e33fe4
MS
281
282 memset(mpio, 0, sizeof(*mpio));
bf661be1
MS
283 memset(bio_details, 0, sizeof(*bio_details));
284 dm_bio_record(bio_details, bio);
76e33fe4 285
bf661be1
MS
286 if (mpio_p)
287 *mpio_p = mpio;
288 if (bio_details_p)
289 *bio_details_p = bio_details;
76e33fe4
MS
290}
291
1da177e4
LT
292/*-----------------------------------------------
293 * Path selection
294 *-----------------------------------------------*/
295
3e9f1be1 296static int __pg_init_all_paths(struct multipath *m)
fb612642
KU
297{
298 struct pgpath *pgpath;
4e2d19e4 299 unsigned long pg_init_delay = 0;
fb612642 300
b194679f
BVA
301 lockdep_assert_held(&m->lock);
302
91e968aa 303 if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
3e9f1be1 304 return 0;
17f4ff45 305
91e968aa 306 atomic_inc(&m->pg_init_count);
518257b1 307 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
3e9f1be1
HR
308
309 /* Check here to reset pg_init_required */
310 if (!m->current_pg)
311 return 0;
312
518257b1 313 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
4e2d19e4
CS
314 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
315 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
fb612642
KU
316 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
317 /* Skip failed paths */
318 if (!pgpath->is_active)
319 continue;
4e2d19e4
CS
320 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
321 pg_init_delay))
91e968aa 322 atomic_inc(&m->pg_init_in_progress);
fb612642 323 }
91e968aa 324 return atomic_read(&m->pg_init_in_progress);
fb612642
KU
325}
326
c1d7ecf7 327static int pg_init_all_paths(struct multipath *m)
1da177e4 328{
c1d7ecf7 329 int ret;
2da1610a
MS
330 unsigned long flags;
331
332 spin_lock_irqsave(&m->lock, flags);
c1d7ecf7 333 ret = __pg_init_all_paths(m);
2da1610a 334 spin_unlock_irqrestore(&m->lock, flags);
c1d7ecf7
BVA
335
336 return ret;
2da1610a
MS
337}
338
339static void __switch_pg(struct multipath *m, struct priority_group *pg)
340{
341 m->current_pg = pg;
1da177e4
LT
342
343 /* Must we initialise the PG first, and queue I/O till it's ready? */
cfae5c9b 344 if (m->hw_handler_name) {
518257b1
MS
345 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
346 set_bit(MPATHF_QUEUE_IO, &m->flags);
1da177e4 347 } else {
518257b1
MS
348 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
349 clear_bit(MPATHF_QUEUE_IO, &m->flags);
1da177e4 350 }
c9e45581 351
91e968aa 352 atomic_set(&m->pg_init_count, 0);
1da177e4
LT
353}
354
2da1610a
MS
355static struct pgpath *choose_path_in_pg(struct multipath *m,
356 struct priority_group *pg,
357 size_t nr_bytes)
1da177e4 358{
2da1610a 359 unsigned long flags;
c922d5f7 360 struct dm_path *path;
2da1610a 361 struct pgpath *pgpath;
1da177e4 362
90a4323c 363 path = pg->ps.type->select_path(&pg->ps, nr_bytes);
1da177e4 364 if (!path)
2da1610a 365 return ERR_PTR(-ENXIO);
1da177e4 366
2da1610a 367 pgpath = path_to_pgpath(path);
1da177e4 368
2da1610a
MS
369 if (unlikely(lockless_dereference(m->current_pg) != pg)) {
370 /* Only update current_pgpath if pg changed */
371 spin_lock_irqsave(&m->lock, flags);
372 m->current_pgpath = pgpath;
373 __switch_pg(m, pg);
374 spin_unlock_irqrestore(&m->lock, flags);
375 }
1da177e4 376
2da1610a 377 return pgpath;
1da177e4
LT
378}
379
2da1610a 380static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
1da177e4 381{
2da1610a 382 unsigned long flags;
1da177e4 383 struct priority_group *pg;
2da1610a 384 struct pgpath *pgpath;
d19a55cc 385 unsigned bypassed = 1;
1da177e4 386
91e968aa 387 if (!atomic_read(&m->nr_valid_paths)) {
518257b1 388 clear_bit(MPATHF_QUEUE_IO, &m->flags);
1da177e4 389 goto failed;
1f271972 390 }
1da177e4
LT
391
392 /* Were we instructed to switch PG? */
2da1610a
MS
393 if (lockless_dereference(m->next_pg)) {
394 spin_lock_irqsave(&m->lock, flags);
1da177e4 395 pg = m->next_pg;
2da1610a
MS
396 if (!pg) {
397 spin_unlock_irqrestore(&m->lock, flags);
398 goto check_current_pg;
399 }
1da177e4 400 m->next_pg = NULL;
2da1610a
MS
401 spin_unlock_irqrestore(&m->lock, flags);
402 pgpath = choose_path_in_pg(m, pg, nr_bytes);
403 if (!IS_ERR_OR_NULL(pgpath))
404 return pgpath;
1da177e4
LT
405 }
406
407 /* Don't change PG until it has no remaining paths */
2da1610a
MS
408check_current_pg:
409 pg = lockless_dereference(m->current_pg);
410 if (pg) {
411 pgpath = choose_path_in_pg(m, pg, nr_bytes);
412 if (!IS_ERR_OR_NULL(pgpath))
413 return pgpath;
414 }
1da177e4
LT
415
416 /*
417 * Loop through priority groups until we find a valid path.
418 * First time we skip PGs marked 'bypassed'.
f220fd4e
MC
419 * Second time we only try the ones we skipped, but set
420 * pg_init_delay_retry so we do not hammer controllers.
1da177e4
LT
421 */
422 do {
423 list_for_each_entry(pg, &m->priority_groups, list) {
d19a55cc 424 if (pg->bypassed == !!bypassed)
1da177e4 425 continue;
2da1610a
MS
426 pgpath = choose_path_in_pg(m, pg, nr_bytes);
427 if (!IS_ERR_OR_NULL(pgpath)) {
f220fd4e 428 if (!bypassed)
518257b1 429 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
2da1610a 430 return pgpath;
f220fd4e 431 }
1da177e4
LT
432 }
433 } while (bypassed--);
434
435failed:
2da1610a 436 spin_lock_irqsave(&m->lock, flags);
1da177e4
LT
437 m->current_pgpath = NULL;
438 m->current_pg = NULL;
2da1610a
MS
439 spin_unlock_irqrestore(&m->lock, flags);
440
441 return NULL;
1da177e4
LT
442}
443
45e15720 444/*
86331f39
BVA
445 * dm_report_EIO() is a macro instead of a function to make pr_debug()
446 * report the function name and line number of the function from which
447 * it has been invoked.
45e15720 448 */
86331f39 449#define dm_report_EIO(m) \
18a482f5 450do { \
86331f39
BVA
451 struct mapped_device *md = dm_table_get_md((m)->ti->table); \
452 \
453 pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
454 dm_device_name(md), \
455 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
456 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
457 dm_noflush_suspending((m)->ti)); \
18a482f5 458} while (0)
45e15720 459
36fcffcc 460/*
76e33fe4 461 * Map cloned requests (request-based multipath)
36fcffcc 462 */
eb8db831
CH
463static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
464 union map_info *map_context,
465 struct request **__clone)
1da177e4 466{
7943bd6d 467 struct multipath *m = ti->private;
eb8db831 468 size_t nr_bytes = blk_rq_bytes(rq);
1da177e4 469 struct pgpath *pgpath;
f40c67f0 470 struct block_device *bdev;
eb8db831 471 struct dm_mpath_io *mpio = get_mpio(map_context);
7083abbb 472 struct request_queue *q;
eb8db831 473 struct request *clone;
1da177e4 474
1da177e4 475 /* Do we need to select a new pgpath? */
2da1610a
MS
476 pgpath = lockless_dereference(m->current_pgpath);
477 if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
478 pgpath = choose_pgpath(m, nr_bytes);
1da177e4 479
9bf59a61 480 if (!pgpath) {
ca5beb76 481 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
b88efd43 482 return DM_MAPIO_DELAY_REQUEUE;
18a482f5 483 dm_report_EIO(m); /* Failed */
f98e0eb6 484 return DM_MAPIO_KILL;
518257b1
MS
485 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
486 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
c1d7ecf7
BVA
487 if (pg_init_all_paths(m))
488 return DM_MAPIO_DELAY_REQUEUE;
06eb061f 489 return DM_MAPIO_REQUEUE;
9bf59a61 490 }
6afbc01d 491
eb8db831 492 memset(mpio, 0, sizeof(*mpio));
2eb6e1e3
KB
493 mpio->pgpath = pgpath;
494 mpio->nr_bytes = nr_bytes;
495
9bf59a61 496 bdev = pgpath->path.dev->bdev;
7083abbb
BVA
497 q = bdev_get_queue(bdev);
498 clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
eb8db831
CH
499 if (IS_ERR(clone)) {
500 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
7083abbb
BVA
501 bool queue_dying = blk_queue_dying(q);
502 DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing",
503 PTR_ERR(clone), queue_dying ? " (path offline)" : "");
504 if (queue_dying) {
505 atomic_inc(&m->pg_init_in_progress);
506 activate_or_offline_path(pgpath);
7083abbb 507 }
06eb061f 508 return DM_MAPIO_DELAY_REQUEUE;
e5863d9a 509 }
eb8db831
CH
510 clone->bio = clone->biotail = NULL;
511 clone->rq_disk = bdev->bd_disk;
512 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
513 *__clone = clone;
e5863d9a 514
9bf59a61
MS
515 if (pgpath->pg->ps.type->start_io)
516 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
517 &pgpath->path,
518 nr_bytes);
2eb6e1e3 519 return DM_MAPIO_REMAPPED;
1da177e4
LT
520}
521
e5863d9a
MS
522static void multipath_release_clone(struct request *clone)
523{
eb8db831 524 blk_put_request(clone);
e5863d9a
MS
525}
526
76e33fe4
MS
527/*
528 * Map cloned bios (bio-based multipath)
529 */
530static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
531{
532 size_t nr_bytes = bio->bi_iter.bi_size;
533 struct pgpath *pgpath;
534 unsigned long flags;
535 bool queue_io;
536
537 /* Do we need to select a new pgpath? */
538 pgpath = lockless_dereference(m->current_pgpath);
539 queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
540 if (!pgpath || !queue_io)
541 pgpath = choose_pgpath(m, nr_bytes);
542
543 if ((pgpath && queue_io) ||
544 (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
545 /* Queue for the daemon to resubmit */
546 spin_lock_irqsave(&m->lock, flags);
547 bio_list_add(&m->queued_bios, bio);
548 spin_unlock_irqrestore(&m->lock, flags);
549 /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
550 if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
551 pg_init_all_paths(m);
552 else if (!queue_io)
553 queue_work(kmultipathd, &m->process_queued_bios);
554 return DM_MAPIO_SUBMITTED;
555 }
556
557 if (!pgpath) {
ca5beb76
BVA
558 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
559 return DM_MAPIO_REQUEUE;
18a482f5 560 dm_report_EIO(m);
846785e6 561 return DM_MAPIO_KILL;
76e33fe4
MS
562 }
563
564 mpio->pgpath = pgpath;
565 mpio->nr_bytes = nr_bytes;
566
4e4cbee9 567 bio->bi_status = 0;
74d46992 568 bio_set_dev(bio, pgpath->path.dev->bdev);
1eff9d32 569 bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
76e33fe4
MS
570
571 if (pgpath->pg->ps.type->start_io)
572 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
573 &pgpath->path,
574 nr_bytes);
575 return DM_MAPIO_REMAPPED;
576}
577
578static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
579{
580 struct multipath *m = ti->private;
bf661be1
MS
581 struct dm_mpath_io *mpio = NULL;
582
583 multipath_init_per_bio_data(bio, &mpio, NULL);
76e33fe4
MS
584
585 return __multipath_map_bio(m, bio, mpio);
586}
587
7e48c768 588static void process_queued_io_list(struct multipath *m)
76e33fe4 589{
7e48c768
MS
590 if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
591 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
592 else if (m->queue_mode == DM_TYPE_BIO_BASED)
76e33fe4
MS
593 queue_work(kmultipathd, &m->process_queued_bios);
594}
595
596static void process_queued_bios(struct work_struct *work)
597{
598 int r;
599 unsigned long flags;
600 struct bio *bio;
601 struct bio_list bios;
602 struct blk_plug plug;
603 struct multipath *m =
604 container_of(work, struct multipath, process_queued_bios);
605
606 bio_list_init(&bios);
607
608 spin_lock_irqsave(&m->lock, flags);
609
610 if (bio_list_empty(&m->queued_bios)) {
611 spin_unlock_irqrestore(&m->lock, flags);
612 return;
613 }
614
615 bio_list_merge(&bios, &m->queued_bios);
616 bio_list_init(&m->queued_bios);
617
618 spin_unlock_irqrestore(&m->lock, flags);
619
620 blk_start_plug(&plug);
621 while ((bio = bio_list_pop(&bios))) {
622 r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
846785e6
CH
623 switch (r) {
624 case DM_MAPIO_KILL:
4e4cbee9
CH
625 bio->bi_status = BLK_STS_IOERR;
626 bio_endio(bio);
047385b3 627 break;
846785e6 628 case DM_MAPIO_REQUEUE:
4e4cbee9 629 bio->bi_status = BLK_STS_DM_REQUEUE;
76e33fe4 630 bio_endio(bio);
846785e6
CH
631 break;
632 case DM_MAPIO_REMAPPED:
76e33fe4 633 generic_make_request(bio);
846785e6 634 break;
9157c8d3
BVA
635 case 0:
636 break;
637 default:
638 WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
846785e6 639 }
76e33fe4
MS
640 }
641 blk_finish_plug(&plug);
642}
643
9a8ac3ae
BVA
644static void assign_bit(bool value, long nr, unsigned long *addr)
645{
646 if (value)
647 set_bit(nr, addr);
648 else
649 clear_bit(nr, addr);
650}
651
1da177e4
LT
652/*
653 * If we run out of usable paths, should we queue I/O or error it?
654 */
be7d31cc
MS
655static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
656 bool save_old_value)
1da177e4
LT
657{
658 unsigned long flags;
659
660 spin_lock_irqsave(&m->lock, flags);
9a8ac3ae
BVA
661 assign_bit((save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
662 (!save_old_value && queue_if_no_path),
663 MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
664 assign_bit(queue_if_no_path || dm_noflush_suspending(m->ti),
665 MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1da177e4
LT
666 spin_unlock_irqrestore(&m->lock, flags);
667
76e33fe4 668 if (!queue_if_no_path) {
63d832c3 669 dm_table_run_md_queue_async(m->ti->table);
7e48c768 670 process_queued_io_list(m);
76e33fe4 671 }
63d832c3 672
1da177e4
LT
673 return 0;
674}
675
1da177e4
LT
676/*
677 * An event is triggered whenever a path is taken out of use.
678 * Includes path failure and PG bypass.
679 */
c4028958 680static void trigger_event(struct work_struct *work)
1da177e4 681{
c4028958
DH
682 struct multipath *m =
683 container_of(work, struct multipath, trigger_event);
1da177e4
LT
684
685 dm_table_event(m->ti->table);
686}
687
688/*-----------------------------------------------------------------
689 * Constructor/argument parsing:
690 * <#multipath feature args> [<arg>]*
691 * <#hw_handler args> [hw_handler [<arg>]*]
692 * <#priority groups>
693 * <initial priority group>
694 * [<selector> <#selector args> [<arg>]*
695 * <#paths> <#per-path selector args>
696 * [<path> [<arg>]* ]+ ]+
697 *---------------------------------------------------------------*/
498f0103 698static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
1da177e4
LT
699 struct dm_target *ti)
700{
701 int r;
702 struct path_selector_type *pst;
703 unsigned ps_argc;
704
5916a22b 705 static const struct dm_arg _args[] = {
72d94861 706 {0, 1024, "invalid number of path selector args"},
1da177e4
LT
707 };
708
498f0103 709 pst = dm_get_path_selector(dm_shift_arg(as));
1da177e4 710 if (!pst) {
72d94861 711 ti->error = "unknown path selector type";
1da177e4
LT
712 return -EINVAL;
713 }
714
498f0103 715 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
371b2e34
MP
716 if (r) {
717 dm_put_path_selector(pst);
1da177e4 718 return -EINVAL;
371b2e34 719 }
1da177e4
LT
720
721 r = pst->create(&pg->ps, ps_argc, as->argv);
722 if (r) {
723 dm_put_path_selector(pst);
72d94861 724 ti->error = "path selector constructor failed";
1da177e4
LT
725 return r;
726 }
727
728 pg->ps.type = pst;
498f0103 729 dm_consume_args(as, ps_argc);
1da177e4
LT
730
731 return 0;
732}
733
498f0103 734static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
1da177e4
LT
735 struct dm_target *ti)
736{
737 int r;
738 struct pgpath *p;
ae11b1b3 739 struct multipath *m = ti->private;
a58a935d
MS
740 struct request_queue *q = NULL;
741 const char *attached_handler_name;
1da177e4
LT
742
743 /* we need at least a path arg */
744 if (as->argc < 1) {
72d94861 745 ti->error = "no device given";
01460f35 746 return ERR_PTR(-EINVAL);
1da177e4
LT
747 }
748
749 p = alloc_pgpath();
750 if (!p)
01460f35 751 return ERR_PTR(-ENOMEM);
1da177e4 752
498f0103 753 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
8215d6ec 754 &p->path.dev);
1da177e4 755 if (r) {
72d94861 756 ti->error = "error getting device";
1da177e4
LT
757 goto bad;
758 }
759
518257b1 760 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
a58a935d
MS
761 q = bdev_get_queue(p->path.dev->bdev);
762
518257b1 763 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
1bab0de0 764retain:
a58a935d
MS
765 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
766 if (attached_handler_name) {
54cd640d 767 /*
768 * Clear any hw_handler_params associated with a
769 * handler that isn't already attached.
770 */
771 if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
772 kfree(m->hw_handler_params);
773 m->hw_handler_params = NULL;
774 }
775
a58a935d
MS
776 /*
777 * Reset hw_handler_name to match the attached handler
a58a935d
MS
778 *
779 * NB. This modifies the table line to show the actual
780 * handler instead of the original table passed in.
781 */
782 kfree(m->hw_handler_name);
783 m->hw_handler_name = attached_handler_name;
a58a935d
MS
784 }
785 }
a0cf7ea9 786
a58a935d 787 if (m->hw_handler_name) {
a0cf7ea9
HR
788 r = scsi_dh_attach(q, m->hw_handler_name);
789 if (r == -EBUSY) {
1bab0de0 790 char b[BDEVNAME_SIZE];
a0cf7ea9 791
1bab0de0
CH
792 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
793 bdevname(p->path.dev->bdev, b));
794 goto retain;
795 }
ae11b1b3 796 if (r < 0) {
a0cf7ea9 797 ti->error = "error attaching hardware handler";
ae11b1b3
HR
798 dm_put_device(ti, p->path.dev);
799 goto bad;
800 }
2bfd2e13
CS
801
802 if (m->hw_handler_params) {
803 r = scsi_dh_set_params(q, m->hw_handler_params);
804 if (r < 0) {
805 ti->error = "unable to set hardware "
806 "handler parameters";
2bfd2e13
CS
807 dm_put_device(ti, p->path.dev);
808 goto bad;
809 }
810 }
ae11b1b3
HR
811 }
812
1da177e4
LT
813 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
814 if (r) {
815 dm_put_device(ti, p->path.dev);
816 goto bad;
817 }
818
819 return p;
820
821 bad:
822 free_pgpath(p);
01460f35 823 return ERR_PTR(r);
1da177e4
LT
824}
825
498f0103 826static struct priority_group *parse_priority_group(struct dm_arg_set *as,
28f16c20 827 struct multipath *m)
1da177e4 828{
5916a22b 829 static const struct dm_arg _args[] = {
72d94861
AK
830 {1, 1024, "invalid number of paths"},
831 {0, 1024, "invalid number of selector args"}
1da177e4
LT
832 };
833
834 int r;
498f0103 835 unsigned i, nr_selector_args, nr_args;
1da177e4 836 struct priority_group *pg;
28f16c20 837 struct dm_target *ti = m->ti;
1da177e4
LT
838
839 if (as->argc < 2) {
840 as->argc = 0;
01460f35
BM
841 ti->error = "not enough priority group arguments";
842 return ERR_PTR(-EINVAL);
1da177e4
LT
843 }
844
845 pg = alloc_priority_group();
846 if (!pg) {
72d94861 847 ti->error = "couldn't allocate priority group";
01460f35 848 return ERR_PTR(-ENOMEM);
1da177e4
LT
849 }
850 pg->m = m;
851
852 r = parse_path_selector(as, pg, ti);
853 if (r)
854 goto bad;
855
856 /*
857 * read the paths
858 */
498f0103 859 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
1da177e4
LT
860 if (r)
861 goto bad;
862
498f0103 863 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
1da177e4
LT
864 if (r)
865 goto bad;
866
498f0103 867 nr_args = 1 + nr_selector_args;
1da177e4
LT
868 for (i = 0; i < pg->nr_pgpaths; i++) {
869 struct pgpath *pgpath;
498f0103 870 struct dm_arg_set path_args;
1da177e4 871
498f0103 872 if (as->argc < nr_args) {
148acff6 873 ti->error = "not enough path parameters";
6bbf79a1 874 r = -EINVAL;
1da177e4 875 goto bad;
148acff6 876 }
1da177e4 877
498f0103 878 path_args.argc = nr_args;
1da177e4
LT
879 path_args.argv = as->argv;
880
881 pgpath = parse_path(&path_args, &pg->ps, ti);
01460f35
BM
882 if (IS_ERR(pgpath)) {
883 r = PTR_ERR(pgpath);
1da177e4 884 goto bad;
01460f35 885 }
1da177e4
LT
886
887 pgpath->pg = pg;
888 list_add_tail(&pgpath->list, &pg->pgpaths);
498f0103 889 dm_consume_args(as, nr_args);
1da177e4
LT
890 }
891
892 return pg;
893
894 bad:
895 free_priority_group(pg, ti);
01460f35 896 return ERR_PTR(r);
1da177e4
LT
897}
898
498f0103 899static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1da177e4 900{
1da177e4 901 unsigned hw_argc;
2bfd2e13 902 int ret;
28f16c20 903 struct dm_target *ti = m->ti;
1da177e4 904
5916a22b 905 static const struct dm_arg _args[] = {
72d94861 906 {0, 1024, "invalid number of hardware handler args"},
1da177e4
LT
907 };
908
498f0103 909 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
1da177e4
LT
910 return -EINVAL;
911
912 if (!hw_argc)
913 return 0;
914
e83068a5 915 if (m->queue_mode == DM_TYPE_BIO_BASED) {
76e33fe4
MS
916 dm_consume_args(as, hw_argc);
917 DMERR("bio-based multipath doesn't allow hardware handler args");
918 return 0;
919 }
920
498f0103 921 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
f97dc421 922 if (!m->hw_handler_name)
923 return -EINVAL;
14e98c5c 924
2bfd2e13
CS
925 if (hw_argc > 1) {
926 char *p;
927 int i, j, len = 4;
928
929 for (i = 0; i <= hw_argc - 2; i++)
930 len += strlen(as->argv[i]) + 1;
931 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
932 if (!p) {
933 ti->error = "memory allocation failed";
934 ret = -ENOMEM;
935 goto fail;
936 }
937 j = sprintf(p, "%d", hw_argc - 1);
938 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
939 j = sprintf(p, "%s", as->argv[i]);
940 }
498f0103 941 dm_consume_args(as, hw_argc - 1);
1da177e4
LT
942
943 return 0;
2bfd2e13
CS
944fail:
945 kfree(m->hw_handler_name);
946 m->hw_handler_name = NULL;
947 return ret;
1da177e4
LT
948}
949
498f0103 950static int parse_features(struct dm_arg_set *as, struct multipath *m)
1da177e4
LT
951{
952 int r;
953 unsigned argc;
28f16c20 954 struct dm_target *ti = m->ti;
498f0103 955 const char *arg_name;
1da177e4 956
5916a22b 957 static const struct dm_arg _args[] = {
e83068a5 958 {0, 8, "invalid number of feature args"},
c9e45581 959 {1, 50, "pg_init_retries must be between 1 and 50"},
4e2d19e4 960 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1da177e4
LT
961 };
962
498f0103 963 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1da177e4
LT
964 if (r)
965 return -EINVAL;
966
967 if (!argc)
968 return 0;
969
c9e45581 970 do {
498f0103 971 arg_name = dm_shift_arg(as);
c9e45581
DW
972 argc--;
973
498f0103 974 if (!strcasecmp(arg_name, "queue_if_no_path")) {
be7d31cc 975 r = queue_if_no_path(m, true, false);
c9e45581
DW
976 continue;
977 }
978
a58a935d 979 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
518257b1 980 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
a58a935d
MS
981 continue;
982 }
983
498f0103 984 if (!strcasecmp(arg_name, "pg_init_retries") &&
c9e45581 985 (argc >= 1)) {
498f0103 986 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
c9e45581
DW
987 argc--;
988 continue;
989 }
990
498f0103 991 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
4e2d19e4 992 (argc >= 1)) {
498f0103 993 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
4e2d19e4
CS
994 argc--;
995 continue;
996 }
997
e83068a5
MS
998 if (!strcasecmp(arg_name, "queue_mode") &&
999 (argc >= 1)) {
1000 const char *queue_mode_name = dm_shift_arg(as);
1001
1002 if (!strcasecmp(queue_mode_name, "bio"))
1003 m->queue_mode = DM_TYPE_BIO_BASED;
1004 else if (!strcasecmp(queue_mode_name, "rq"))
1005 m->queue_mode = DM_TYPE_REQUEST_BASED;
1006 else if (!strcasecmp(queue_mode_name, "mq"))
1007 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
1008 else {
1009 ti->error = "Unknown 'queue_mode' requested";
1010 r = -EINVAL;
1011 }
1012 argc--;
1013 continue;
1014 }
1015
1da177e4 1016 ti->error = "Unrecognised multipath feature request";
c9e45581
DW
1017 r = -EINVAL;
1018 } while (argc && !r);
1019
1020 return r;
1da177e4
LT
1021}
1022
e83068a5 1023static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1da177e4 1024{
498f0103 1025 /* target arguments */
5916a22b 1026 static const struct dm_arg _args[] = {
a490a07a
MS
1027 {0, 1024, "invalid number of priority groups"},
1028 {0, 1024, "invalid initial priority group number"},
1da177e4
LT
1029 };
1030
1031 int r;
1032 struct multipath *m;
498f0103 1033 struct dm_arg_set as;
1da177e4
LT
1034 unsigned pg_count = 0;
1035 unsigned next_pg_num;
1036
1037 as.argc = argc;
1038 as.argv = argv;
1039
e83068a5 1040 m = alloc_multipath(ti);
1da177e4 1041 if (!m) {
72d94861 1042 ti->error = "can't allocate multipath";
1da177e4
LT
1043 return -EINVAL;
1044 }
1045
28f16c20 1046 r = parse_features(&as, m);
1da177e4
LT
1047 if (r)
1048 goto bad;
1049
e83068a5
MS
1050 r = alloc_multipath_stage2(ti, m);
1051 if (r)
1052 goto bad;
1053
28f16c20 1054 r = parse_hw_handler(&as, m);
1da177e4
LT
1055 if (r)
1056 goto bad;
1057
498f0103 1058 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1da177e4
LT
1059 if (r)
1060 goto bad;
1061
498f0103 1062 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1da177e4
LT
1063 if (r)
1064 goto bad;
1065
a490a07a
MS
1066 if ((!m->nr_priority_groups && next_pg_num) ||
1067 (m->nr_priority_groups && !next_pg_num)) {
1068 ti->error = "invalid initial priority group";
1069 r = -EINVAL;
1070 goto bad;
1071 }
1072
1da177e4
LT
1073 /* parse the priority groups */
1074 while (as.argc) {
1075 struct priority_group *pg;
91e968aa 1076 unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1da177e4 1077
28f16c20 1078 pg = parse_priority_group(&as, m);
01460f35
BM
1079 if (IS_ERR(pg)) {
1080 r = PTR_ERR(pg);
1da177e4
LT
1081 goto bad;
1082 }
1083
91e968aa
MS
1084 nr_valid_paths += pg->nr_pgpaths;
1085 atomic_set(&m->nr_valid_paths, nr_valid_paths);
1086
1da177e4
LT
1087 list_add_tail(&pg->list, &m->priority_groups);
1088 pg_count++;
1089 pg->pg_num = pg_count;
1090 if (!--next_pg_num)
1091 m->next_pg = pg;
1092 }
1093
1094 if (pg_count != m->nr_priority_groups) {
72d94861 1095 ti->error = "priority group count mismatch";
1da177e4
LT
1096 r = -EINVAL;
1097 goto bad;
1098 }
1099
55a62eef
AK
1100 ti->num_flush_bios = 1;
1101 ti->num_discard_bios = 1;
042bcef8 1102 ti->num_write_same_bios = 1;
ac62d620 1103 ti->num_write_zeroes_bios = 1;
e83068a5 1104 if (m->queue_mode == DM_TYPE_BIO_BASED)
bf661be1 1105 ti->per_io_data_size = multipath_per_bio_data_size();
eb8db831 1106 else
8637a6bf 1107 ti->per_io_data_size = sizeof(struct dm_mpath_io);
8627921f 1108
1da177e4
LT
1109 return 0;
1110
1111 bad:
1112 free_multipath(m);
1113 return r;
1114}
1115
2bded7bd
KU
1116static void multipath_wait_for_pg_init_completion(struct multipath *m)
1117{
9f4c3f87 1118 DEFINE_WAIT(wait);
2bded7bd
KU
1119
1120 while (1) {
9f4c3f87 1121 prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
2bded7bd 1122
91e968aa 1123 if (!atomic_read(&m->pg_init_in_progress))
2bded7bd 1124 break;
2bded7bd
KU
1125
1126 io_schedule();
1127 }
9f4c3f87 1128 finish_wait(&m->pg_init_wait, &wait);
2bded7bd
KU
1129}
1130
1131static void flush_multipath_work(struct multipath *m)
1da177e4 1132{
518257b1
MS
1133 set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1134 smp_mb__after_atomic();
954a73d5 1135
bab7cfc7 1136 flush_workqueue(kmpath_handlerd);
2bded7bd 1137 multipath_wait_for_pg_init_completion(m);
a044d016 1138 flush_workqueue(kmultipathd);
43829731 1139 flush_work(&m->trigger_event);
954a73d5 1140
518257b1
MS
1141 clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1142 smp_mb__after_atomic();
6df400ab
KU
1143}
1144
1145static void multipath_dtr(struct dm_target *ti)
1146{
1147 struct multipath *m = ti->private;
1148
2bded7bd 1149 flush_multipath_work(m);
1da177e4
LT
1150 free_multipath(m);
1151}
1152
1da177e4
LT
1153/*
1154 * Take a path out of use.
1155 */
1156static int fail_path(struct pgpath *pgpath)
1157{
1158 unsigned long flags;
1159 struct multipath *m = pgpath->pg->m;
1160
1161 spin_lock_irqsave(&m->lock, flags);
1162
6680073d 1163 if (!pgpath->is_active)
1da177e4
LT
1164 goto out;
1165
72d94861 1166 DMWARN("Failing path %s.", pgpath->path.dev->name);
1da177e4
LT
1167
1168 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
be7d31cc 1169 pgpath->is_active = false;
1da177e4
LT
1170 pgpath->fail_count++;
1171
91e968aa 1172 atomic_dec(&m->nr_valid_paths);
1da177e4
LT
1173
1174 if (pgpath == m->current_pgpath)
1175 m->current_pgpath = NULL;
1176
b15546f9 1177 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
91e968aa 1178 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
b15546f9 1179
fe9cf30e 1180 schedule_work(&m->trigger_event);
1da177e4
LT
1181
1182out:
1183 spin_unlock_irqrestore(&m->lock, flags);
1184
1185 return 0;
1186}
1187
1188/*
1189 * Reinstate a previously-failed path
1190 */
1191static int reinstate_path(struct pgpath *pgpath)
1192{
63d832c3 1193 int r = 0, run_queue = 0;
1da177e4
LT
1194 unsigned long flags;
1195 struct multipath *m = pgpath->pg->m;
91e968aa 1196 unsigned nr_valid_paths;
1da177e4
LT
1197
1198 spin_lock_irqsave(&m->lock, flags);
1199
6680073d 1200 if (pgpath->is_active)
1da177e4
LT
1201 goto out;
1202
ec31f3f7 1203 DMWARN("Reinstating path %s.", pgpath->path.dev->name);
1da177e4
LT
1204
1205 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1206 if (r)
1207 goto out;
1208
be7d31cc 1209 pgpath->is_active = true;
1da177e4 1210
91e968aa
MS
1211 nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1212 if (nr_valid_paths == 1) {
e54f77dd 1213 m->current_pgpath = NULL;
63d832c3 1214 run_queue = 1;
e54f77dd 1215 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
4e2d19e4 1216 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
91e968aa 1217 atomic_inc(&m->pg_init_in_progress);
e54f77dd 1218 }
1da177e4 1219
b15546f9 1220 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
91e968aa 1221 pgpath->path.dev->name, nr_valid_paths);
b15546f9 1222
fe9cf30e 1223 schedule_work(&m->trigger_event);
1da177e4
LT
1224
1225out:
1226 spin_unlock_irqrestore(&m->lock, flags);
76e33fe4 1227 if (run_queue) {
63d832c3 1228 dm_table_run_md_queue_async(m->ti->table);
7e48c768 1229 process_queued_io_list(m);
76e33fe4 1230 }
1da177e4
LT
1231
1232 return r;
1233}
1234
1235/*
1236 * Fail or reinstate all paths that match the provided struct dm_dev.
1237 */
1238static int action_dev(struct multipath *m, struct dm_dev *dev,
1239 action_fn action)
1240{
19040c0b 1241 int r = -EINVAL;
1da177e4
LT
1242 struct pgpath *pgpath;
1243 struct priority_group *pg;
1244
1245 list_for_each_entry(pg, &m->priority_groups, list) {
1246 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1247 if (pgpath->path.dev == dev)
1248 r = action(pgpath);
1249 }
1250 }
1251
1252 return r;
1253}
1254
1255/*
1256 * Temporarily try to avoid having to use the specified PG
1257 */
1258static void bypass_pg(struct multipath *m, struct priority_group *pg,
be7d31cc 1259 bool bypassed)
1da177e4
LT
1260{
1261 unsigned long flags;
1262
1263 spin_lock_irqsave(&m->lock, flags);
1264
1265 pg->bypassed = bypassed;
1266 m->current_pgpath = NULL;
1267 m->current_pg = NULL;
1268
1269 spin_unlock_irqrestore(&m->lock, flags);
1270
fe9cf30e 1271 schedule_work(&m->trigger_event);
1da177e4
LT
1272}
1273
1274/*
1275 * Switch to using the specified PG from the next I/O that gets mapped
1276 */
1277static int switch_pg_num(struct multipath *m, const char *pgstr)
1278{
1279 struct priority_group *pg;
1280 unsigned pgnum;
1281 unsigned long flags;
31998ef1 1282 char dummy;
1da177e4 1283
31998ef1 1284 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
cc5bd925 1285 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1da177e4
LT
1286 DMWARN("invalid PG number supplied to switch_pg_num");
1287 return -EINVAL;
1288 }
1289
1290 spin_lock_irqsave(&m->lock, flags);
1291 list_for_each_entry(pg, &m->priority_groups, list) {
be7d31cc 1292 pg->bypassed = false;
1da177e4
LT
1293 if (--pgnum)
1294 continue;
1295
1296 m->current_pgpath = NULL;
1297 m->current_pg = NULL;
1298 m->next_pg = pg;
1299 }
1300 spin_unlock_irqrestore(&m->lock, flags);
1301
fe9cf30e 1302 schedule_work(&m->trigger_event);
1da177e4
LT
1303 return 0;
1304}
1305
1306/*
1307 * Set/clear bypassed status of a PG.
1308 * PGs are numbered upwards from 1 in the order they were declared.
1309 */
be7d31cc 1310static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1da177e4
LT
1311{
1312 struct priority_group *pg;
1313 unsigned pgnum;
31998ef1 1314 char dummy;
1da177e4 1315
31998ef1 1316 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
cc5bd925 1317 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1da177e4
LT
1318 DMWARN("invalid PG number supplied to bypass_pg");
1319 return -EINVAL;
1320 }
1321
1322 list_for_each_entry(pg, &m->priority_groups, list) {
1323 if (!--pgnum)
1324 break;
1325 }
1326
1327 bypass_pg(m, pg, bypassed);
1328 return 0;
1329}
1330
c9e45581
DW
1331/*
1332 * Should we retry pg_init immediately?
1333 */
be7d31cc 1334static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
c9e45581
DW
1335{
1336 unsigned long flags;
be7d31cc 1337 bool limit_reached = false;
c9e45581
DW
1338
1339 spin_lock_irqsave(&m->lock, flags);
1340
91e968aa
MS
1341 if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1342 !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
518257b1 1343 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
c9e45581 1344 else
be7d31cc 1345 limit_reached = true;
c9e45581
DW
1346
1347 spin_unlock_irqrestore(&m->lock, flags);
1348
1349 return limit_reached;
1350}
1351
3ae31f6a 1352static void pg_init_done(void *data, int errors)
cfae5c9b 1353{
83c0d5d5 1354 struct pgpath *pgpath = data;
cfae5c9b
CS
1355 struct priority_group *pg = pgpath->pg;
1356 struct multipath *m = pg->m;
1357 unsigned long flags;
be7d31cc 1358 bool delay_retry = false;
cfae5c9b
CS
1359
1360 /* device or driver problems */
1361 switch (errors) {
1362 case SCSI_DH_OK:
1363 break;
1364 case SCSI_DH_NOSYS:
1365 if (!m->hw_handler_name) {
1366 errors = 0;
1367 break;
1368 }
f7b934c8
MB
1369 DMERR("Could not failover the device: Handler scsi_dh_%s "
1370 "Error %d.", m->hw_handler_name, errors);
cfae5c9b
CS
1371 /*
1372 * Fail path for now, so we do not ping pong
1373 */
1374 fail_path(pgpath);
1375 break;
1376 case SCSI_DH_DEV_TEMP_BUSY:
1377 /*
1378 * Probably doing something like FW upgrade on the
1379 * controller so try the other pg.
1380 */
be7d31cc 1381 bypass_pg(m, pg, true);
cfae5c9b 1382 break;
cfae5c9b 1383 case SCSI_DH_RETRY:
4e2d19e4
CS
1384 /* Wait before retrying. */
1385 delay_retry = 1;
7b06e09a 1386 /* fall through */
cfae5c9b
CS
1387 case SCSI_DH_IMM_RETRY:
1388 case SCSI_DH_RES_TEMP_UNAVAIL:
1389 if (pg_init_limit_reached(m, pgpath))
1390 fail_path(pgpath);
1391 errors = 0;
1392 break;
ec31f3f7 1393 case SCSI_DH_DEV_OFFLINED:
cfae5c9b
CS
1394 default:
1395 /*
1396 * We probably do not want to fail the path for a device
1397 * error, but this is what the old dm did. In future
1398 * patches we can do more advanced handling.
1399 */
1400 fail_path(pgpath);
1401 }
1402
1403 spin_lock_irqsave(&m->lock, flags);
1404 if (errors) {
e54f77dd
CS
1405 if (pgpath == m->current_pgpath) {
1406 DMERR("Could not failover device. Error %d.", errors);
1407 m->current_pgpath = NULL;
1408 m->current_pg = NULL;
1409 }
518257b1 1410 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
be7d31cc 1411 pg->bypassed = false;
cfae5c9b 1412
91e968aa 1413 if (atomic_dec_return(&m->pg_init_in_progress) > 0)
d0259bf0
KU
1414 /* Activations of other paths are still on going */
1415 goto out;
1416
518257b1
MS
1417 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1418 if (delay_retry)
1419 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1420 else
1421 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1422
3e9f1be1
HR
1423 if (__pg_init_all_paths(m))
1424 goto out;
1425 }
518257b1 1426 clear_bit(MPATHF_QUEUE_IO, &m->flags);
d0259bf0 1427
7e48c768 1428 process_queued_io_list(m);
76e33fe4 1429
2bded7bd
KU
1430 /*
1431 * Wake up any thread waiting to suspend.
1432 */
1433 wake_up(&m->pg_init_wait);
1434
d0259bf0 1435out:
cfae5c9b
CS
1436 spin_unlock_irqrestore(&m->lock, flags);
1437}
1438
89bfce76 1439static void activate_or_offline_path(struct pgpath *pgpath)
bab7cfc7 1440{
f10e06b7 1441 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
bab7cfc7 1442
f10e06b7
MS
1443 if (pgpath->is_active && !blk_queue_dying(q))
1444 scsi_dh_activate(q, pg_init_done, pgpath);
3a017509
HR
1445 else
1446 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
bab7cfc7
CS
1447}
1448
89bfce76
BVA
1449static void activate_path_work(struct work_struct *work)
1450{
1451 struct pgpath *pgpath =
1452 container_of(work, struct pgpath, activate_path.work);
1453
1454 activate_or_offline_path(pgpath);
1455}
1456
2a842aca 1457static int noretry_error(blk_status_t error)
7e782af5
HR
1458{
1459 switch (error) {
2a842aca
CH
1460 case BLK_STS_NOTSUPP:
1461 case BLK_STS_NOSPC:
1462 case BLK_STS_TARGET:
1463 case BLK_STS_NEXUS:
1464 case BLK_STS_MEDIUM:
7e782af5
HR
1465 return 1;
1466 }
1467
1468 /* Anything else could be a path failure, so should be retried */
1469 return 0;
1470}
1471
b79f10ee 1472static int multipath_end_io(struct dm_target *ti, struct request *clone,
2a842aca 1473 blk_status_t error, union map_info *map_context)
1da177e4 1474{
b79f10ee
CH
1475 struct dm_mpath_io *mpio = get_mpio(map_context);
1476 struct pgpath *pgpath = mpio->pgpath;
7ed8578a 1477 int r = DM_ENDIO_DONE;
b79f10ee 1478
f40c67f0
KU
1479 /*
1480 * We don't queue any clone request inside the multipath target
1481 * during end I/O handling, since those clone requests don't have
1482 * bio clones. If we queue them inside the multipath target,
1483 * we need to make bio clones, that requires memory allocation.
4cc96131 1484 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
f40c67f0
KU
1485 * don't have bio clones.)
1486 * Instead of queueing the clone request here, we queue the original
1487 * request into dm core, which will remake a clone request and
1488 * clone bios for it and resubmit it later.
1489 */
b79f10ee
CH
1490 if (error && !noretry_error(error)) {
1491 struct multipath *m = ti->private;
1da177e4 1492
7ed8578a 1493 r = DM_ENDIO_REQUEUE;
1da177e4 1494
b79f10ee
CH
1495 if (pgpath)
1496 fail_path(pgpath);
1da177e4 1497
b79f10ee 1498 if (atomic_read(&m->nr_valid_paths) == 0 &&
7ed8578a 1499 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
2a842aca 1500 if (error == BLK_STS_IOERR)
18a482f5 1501 dm_report_EIO(m);
7ed8578a
CH
1502 /* complete with the original error */
1503 r = DM_ENDIO_DONE;
1504 }
b79f10ee 1505 }
466891f9 1506
1da177e4 1507 if (pgpath) {
b79f10ee
CH
1508 struct path_selector *ps = &pgpath->pg->ps;
1509
1da177e4 1510 if (ps->type->end_io)
02ab823f 1511 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1da177e4 1512 }
1da177e4 1513
7ed8578a 1514 return r;
1da177e4
LT
1515}
1516
4e4cbee9
CH
1517static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1518 blk_status_t *error)
76e33fe4 1519{
14ef1e48
CH
1520 struct multipath *m = ti->private;
1521 struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1522 struct pgpath *pgpath = mpio->pgpath;
76e33fe4 1523 unsigned long flags;
1be56909 1524 int r = DM_ENDIO_DONE;
76e33fe4 1525
4e4cbee9 1526 if (!*error || noretry_error(*error))
14ef1e48 1527 goto done;
76e33fe4 1528
14ef1e48
CH
1529 if (pgpath)
1530 fail_path(pgpath);
76e33fe4 1531
ca5beb76 1532 if (atomic_read(&m->nr_valid_paths) == 0 &&
18a482f5
CH
1533 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1534 dm_report_EIO(m);
4e4cbee9 1535 *error = BLK_STS_IOERR;
14ef1e48 1536 goto done;
18a482f5 1537 }
76e33fe4
MS
1538
1539 /* Queue for the daemon to resubmit */
bf661be1 1540 dm_bio_restore(get_bio_details_from_bio(clone), clone);
76e33fe4
MS
1541
1542 spin_lock_irqsave(&m->lock, flags);
1543 bio_list_add(&m->queued_bios, clone);
1544 spin_unlock_irqrestore(&m->lock, flags);
1545 if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
1546 queue_work(kmultipathd, &m->process_queued_bios);
1547
1be56909 1548 r = DM_ENDIO_INCOMPLETE;
14ef1e48 1549done:
76e33fe4 1550 if (pgpath) {
14ef1e48
CH
1551 struct path_selector *ps = &pgpath->pg->ps;
1552
76e33fe4
MS
1553 if (ps->type->end_io)
1554 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1555 }
1556
1be56909 1557 return r;
76e33fe4
MS
1558}
1559
1da177e4
LT
1560/*
1561 * Suspend can't complete until all the I/O is processed so if
436d4108
AK
1562 * the last path fails we must error any remaining I/O.
1563 * Note that if the freeze_bdev fails while suspending, the
1564 * queue_if_no_path state is lost - userspace should reset it.
1da177e4
LT
1565 */
1566static void multipath_presuspend(struct dm_target *ti)
1567{
7943bd6d 1568 struct multipath *m = ti->private;
1da177e4 1569
be7d31cc 1570 queue_if_no_path(m, false, true);
1da177e4
LT
1571}
1572
6df400ab
KU
1573static void multipath_postsuspend(struct dm_target *ti)
1574{
6380f26f
MA
1575 struct multipath *m = ti->private;
1576
1577 mutex_lock(&m->work_mutex);
2bded7bd 1578 flush_multipath_work(m);
6380f26f 1579 mutex_unlock(&m->work_mutex);
6df400ab
KU
1580}
1581
436d4108
AK
1582/*
1583 * Restore the queue_if_no_path setting.
1584 */
1da177e4
LT
1585static void multipath_resume(struct dm_target *ti)
1586{
7943bd6d 1587 struct multipath *m = ti->private;
1814f2e3 1588 unsigned long flags;
1da177e4 1589
1814f2e3 1590 spin_lock_irqsave(&m->lock, flags);
9a8ac3ae
BVA
1591 assign_bit(test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
1592 MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1814f2e3 1593 spin_unlock_irqrestore(&m->lock, flags);
1da177e4
LT
1594}
1595
1596/*
1597 * Info output has the following format:
1598 * num_multipath_feature_args [multipath_feature_args]*
1599 * num_handler_status_args [handler_status_args]*
1600 * num_groups init_group_number
1601 * [A|D|E num_ps_status_args [ps_status_args]*
1602 * num_paths num_selector_args
1603 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1604 *
1605 * Table output has the following format (identical to the constructor string):
1606 * num_feature_args [features_args]*
1607 * num_handler_args hw_handler [hw_handler_args]*
1608 * num_groups init_group_number
1609 * [priority selector-name num_ps_args [ps_args]*
1610 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1611 */
fd7c092e
MP
1612static void multipath_status(struct dm_target *ti, status_type_t type,
1613 unsigned status_flags, char *result, unsigned maxlen)
1da177e4
LT
1614{
1615 int sz = 0;
1616 unsigned long flags;
7943bd6d 1617 struct multipath *m = ti->private;
1da177e4
LT
1618 struct priority_group *pg;
1619 struct pgpath *p;
1620 unsigned pg_num;
1621 char state;
1622
1623 spin_lock_irqsave(&m->lock, flags);
1624
1625 /* Features */
1626 if (type == STATUSTYPE_INFO)
91e968aa
MS
1627 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1628 atomic_read(&m->pg_init_count));
c9e45581 1629 else {
518257b1 1630 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
4e2d19e4 1631 (m->pg_init_retries > 0) * 2 +
a58a935d 1632 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
e83068a5
MS
1633 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1634 (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1635
518257b1 1636 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
c9e45581
DW
1637 DMEMIT("queue_if_no_path ");
1638 if (m->pg_init_retries)
1639 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
4e2d19e4
CS
1640 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1641 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
518257b1 1642 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
a58a935d 1643 DMEMIT("retain_attached_hw_handler ");
e83068a5
MS
1644 if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1645 switch(m->queue_mode) {
1646 case DM_TYPE_BIO_BASED:
1647 DMEMIT("queue_mode bio ");
1648 break;
1649 case DM_TYPE_MQ_REQUEST_BASED:
1650 DMEMIT("queue_mode mq ");
1651 break;
7e0d574f
BVA
1652 default:
1653 WARN_ON_ONCE(true);
1654 break;
e83068a5
MS
1655 }
1656 }
c9e45581 1657 }
1da177e4 1658
cfae5c9b 1659 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1da177e4
LT
1660 DMEMIT("0 ");
1661 else
cfae5c9b 1662 DMEMIT("1 %s ", m->hw_handler_name);
1da177e4
LT
1663
1664 DMEMIT("%u ", m->nr_priority_groups);
1665
1666 if (m->next_pg)
1667 pg_num = m->next_pg->pg_num;
1668 else if (m->current_pg)
1669 pg_num = m->current_pg->pg_num;
1670 else
a490a07a 1671 pg_num = (m->nr_priority_groups ? 1 : 0);
1da177e4
LT
1672
1673 DMEMIT("%u ", pg_num);
1674
1675 switch (type) {
1676 case STATUSTYPE_INFO:
1677 list_for_each_entry(pg, &m->priority_groups, list) {
1678 if (pg->bypassed)
1679 state = 'D'; /* Disabled */
1680 else if (pg == m->current_pg)
1681 state = 'A'; /* Currently Active */
1682 else
1683 state = 'E'; /* Enabled */
1684
1685 DMEMIT("%c ", state);
1686
1687 if (pg->ps.type->status)
1688 sz += pg->ps.type->status(&pg->ps, NULL, type,
1689 result + sz,
1690 maxlen - sz);
1691 else
1692 DMEMIT("0 ");
1693
1694 DMEMIT("%u %u ", pg->nr_pgpaths,
1695 pg->ps.type->info_args);
1696
1697 list_for_each_entry(p, &pg->pgpaths, list) {
1698 DMEMIT("%s %s %u ", p->path.dev->name,
6680073d 1699 p->is_active ? "A" : "F",
1da177e4
LT
1700 p->fail_count);
1701 if (pg->ps.type->status)
1702 sz += pg->ps.type->status(&pg->ps,
1703 &p->path, type, result + sz,
1704 maxlen - sz);
1705 }
1706 }
1707 break;
1708
1709 case STATUSTYPE_TABLE:
1710 list_for_each_entry(pg, &m->priority_groups, list) {
1711 DMEMIT("%s ", pg->ps.type->name);
1712
1713 if (pg->ps.type->status)
1714 sz += pg->ps.type->status(&pg->ps, NULL, type,
1715 result + sz,
1716 maxlen - sz);
1717 else
1718 DMEMIT("0 ");
1719
1720 DMEMIT("%u %u ", pg->nr_pgpaths,
1721 pg->ps.type->table_args);
1722
1723 list_for_each_entry(p, &pg->pgpaths, list) {
1724 DMEMIT("%s ", p->path.dev->name);
1725 if (pg->ps.type->status)
1726 sz += pg->ps.type->status(&pg->ps,
1727 &p->path, type, result + sz,
1728 maxlen - sz);
1729 }
1730 }
1731 break;
1732 }
1733
1734 spin_unlock_irqrestore(&m->lock, flags);
1da177e4
LT
1735}
1736
1737static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1738{
6380f26f 1739 int r = -EINVAL;
1da177e4 1740 struct dm_dev *dev;
7943bd6d 1741 struct multipath *m = ti->private;
1da177e4
LT
1742 action_fn action;
1743
6380f26f
MA
1744 mutex_lock(&m->work_mutex);
1745
c2f3d24b
KU
1746 if (dm_suspended(ti)) {
1747 r = -EBUSY;
1748 goto out;
1749 }
1750
1da177e4 1751 if (argc == 1) {
498f0103 1752 if (!strcasecmp(argv[0], "queue_if_no_path")) {
be7d31cc 1753 r = queue_if_no_path(m, true, false);
6380f26f 1754 goto out;
498f0103 1755 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
be7d31cc 1756 r = queue_if_no_path(m, false, false);
6380f26f
MA
1757 goto out;
1758 }
1da177e4
LT
1759 }
1760
6380f26f 1761 if (argc != 2) {
a356e426 1762 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
6380f26f
MA
1763 goto out;
1764 }
1da177e4 1765
498f0103 1766 if (!strcasecmp(argv[0], "disable_group")) {
be7d31cc 1767 r = bypass_pg_num(m, argv[1], true);
6380f26f 1768 goto out;
498f0103 1769 } else if (!strcasecmp(argv[0], "enable_group")) {
be7d31cc 1770 r = bypass_pg_num(m, argv[1], false);
6380f26f 1771 goto out;
498f0103 1772 } else if (!strcasecmp(argv[0], "switch_group")) {
6380f26f
MA
1773 r = switch_pg_num(m, argv[1]);
1774 goto out;
498f0103 1775 } else if (!strcasecmp(argv[0], "reinstate_path"))
1da177e4 1776 action = reinstate_path;
498f0103 1777 else if (!strcasecmp(argv[0], "fail_path"))
1da177e4 1778 action = fail_path;
6380f26f 1779 else {
a356e426 1780 DMWARN("Unrecognised multipath message received: %s", argv[0]);
6380f26f
MA
1781 goto out;
1782 }
1da177e4 1783
8215d6ec 1784 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1da177e4 1785 if (r) {
72d94861 1786 DMWARN("message: error getting device %s",
1da177e4 1787 argv[1]);
6380f26f 1788 goto out;
1da177e4
LT
1789 }
1790
1791 r = action_dev(m, dev, action);
1792
1793 dm_put_device(ti, dev);
1794
6380f26f
MA
1795out:
1796 mutex_unlock(&m->work_mutex);
1da177e4 1797 return r;
1da177e4
LT
1798}
1799
e56f81e0
CH
1800static int multipath_prepare_ioctl(struct dm_target *ti,
1801 struct block_device **bdev, fmode_t *mode)
9af4aa30 1802{
35991652 1803 struct multipath *m = ti->private;
2da1610a 1804 struct pgpath *current_pgpath;
35991652
MP
1805 int r;
1806
2da1610a
MS
1807 current_pgpath = lockless_dereference(m->current_pgpath);
1808 if (!current_pgpath)
1809 current_pgpath = choose_pgpath(m, 0);
9af4aa30 1810
2da1610a 1811 if (current_pgpath) {
518257b1 1812 if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
2da1610a
MS
1813 *bdev = current_pgpath->path.dev->bdev;
1814 *mode = current_pgpath->path.dev->mode;
43e43c9e
JN
1815 r = 0;
1816 } else {
1817 /* pg_init has not started or completed */
1818 r = -ENOTCONN;
1819 }
1820 } else {
1821 /* No path is available */
518257b1 1822 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
43e43c9e
JN
1823 r = -ENOTCONN;
1824 else
1825 r = -EIO;
e90dae1f 1826 }
9af4aa30 1827
5bbbfdf6 1828 if (r == -ENOTCONN) {
2da1610a 1829 if (!lockless_dereference(m->current_pg)) {
3e9f1be1 1830 /* Path status changed, redo selection */
2da1610a 1831 (void) choose_pgpath(m, 0);
3e9f1be1 1832 }
518257b1 1833 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
2da1610a 1834 pg_init_all_paths(m);
63d832c3 1835 dm_table_run_md_queue_async(m->ti->table);
7e48c768 1836 process_queued_io_list(m);
3e9f1be1 1837 }
35991652 1838
e56f81e0
CH
1839 /*
1840 * Only pass ioctls through if the device sizes match exactly.
1841 */
1842 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1843 return 1;
1844 return r;
9af4aa30
MB
1845}
1846
af4874e0
MS
1847static int multipath_iterate_devices(struct dm_target *ti,
1848 iterate_devices_callout_fn fn, void *data)
1849{
1850 struct multipath *m = ti->private;
1851 struct priority_group *pg;
1852 struct pgpath *p;
1853 int ret = 0;
1854
1855 list_for_each_entry(pg, &m->priority_groups, list) {
1856 list_for_each_entry(p, &pg->pgpaths, list) {
5dea271b 1857 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
af4874e0
MS
1858 if (ret)
1859 goto out;
1860 }
1861 }
1862
1863out:
1864 return ret;
1865}
1866
9f54cec5 1867static int pgpath_busy(struct pgpath *pgpath)
f40c67f0
KU
1868{
1869 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1870
52b09914 1871 return blk_lld_busy(q);
f40c67f0
KU
1872}
1873
1874/*
1875 * We return "busy", only when we can map I/Os but underlying devices
1876 * are busy (so even if we map I/Os now, the I/Os will wait on
1877 * the underlying queue).
1878 * In other words, if we want to kill I/Os or queue them inside us
1879 * due to map unavailability, we don't return "busy". Otherwise,
1880 * dm core won't give us the I/Os and we can't do what we want.
1881 */
1882static int multipath_busy(struct dm_target *ti)
1883{
be7d31cc 1884 bool busy = false, has_active = false;
f40c67f0 1885 struct multipath *m = ti->private;
2da1610a 1886 struct priority_group *pg, *next_pg;
f40c67f0 1887 struct pgpath *pgpath;
f40c67f0 1888
b88efd43
MS
1889 /* pg_init in progress */
1890 if (atomic_read(&m->pg_init_in_progress))
2da1610a
MS
1891 return true;
1892
b88efd43
MS
1893 /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
1894 if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1895 return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
1896
f40c67f0 1897 /* Guess which priority_group will be used at next mapping time */
2da1610a
MS
1898 pg = lockless_dereference(m->current_pg);
1899 next_pg = lockless_dereference(m->next_pg);
1900 if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
1901 pg = next_pg;
1902
1903 if (!pg) {
f40c67f0
KU
1904 /*
1905 * We don't know which pg will be used at next mapping time.
2da1610a 1906 * We don't call choose_pgpath() here to avoid to trigger
f40c67f0
KU
1907 * pg_init just by busy checking.
1908 * So we don't know whether underlying devices we will be using
1909 * at next mapping time are busy or not. Just try mapping.
1910 */
2da1610a
MS
1911 return busy;
1912 }
f40c67f0
KU
1913
1914 /*
1915 * If there is one non-busy active path at least, the path selector
1916 * will be able to select it. So we consider such a pg as not busy.
1917 */
be7d31cc 1918 busy = true;
2da1610a 1919 list_for_each_entry(pgpath, &pg->pgpaths, list) {
f40c67f0 1920 if (pgpath->is_active) {
be7d31cc 1921 has_active = true;
9f54cec5 1922 if (!pgpath_busy(pgpath)) {
be7d31cc 1923 busy = false;
f40c67f0
KU
1924 break;
1925 }
1926 }
2da1610a 1927 }
f40c67f0 1928
2da1610a 1929 if (!has_active) {
f40c67f0
KU
1930 /*
1931 * No active path in this pg, so this pg won't be used and
1932 * the current_pg will be changed at next mapping time.
1933 * We need to try mapping to determine it.
1934 */
be7d31cc 1935 busy = false;
2da1610a 1936 }
f40c67f0
KU
1937
1938 return busy;
1939}
1940
1da177e4
LT
1941/*-----------------------------------------------------------------
1942 * Module setup
1943 *---------------------------------------------------------------*/
1944static struct target_type multipath_target = {
1945 .name = "multipath",
e83068a5 1946 .version = {1, 12, 0},
16f12266 1947 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
1da177e4
LT
1948 .module = THIS_MODULE,
1949 .ctr = multipath_ctr,
1950 .dtr = multipath_dtr,
e5863d9a
MS
1951 .clone_and_map_rq = multipath_clone_and_map,
1952 .release_clone_rq = multipath_release_clone,
f40c67f0 1953 .rq_end_io = multipath_end_io,
76e33fe4
MS
1954 .map = multipath_map_bio,
1955 .end_io = multipath_end_io_bio,
1956 .presuspend = multipath_presuspend,
1957 .postsuspend = multipath_postsuspend,
1958 .resume = multipath_resume,
1959 .status = multipath_status,
1960 .message = multipath_message,
1961 .prepare_ioctl = multipath_prepare_ioctl,
1962 .iterate_devices = multipath_iterate_devices,
1963 .busy = multipath_busy,
1964};
1965
1da177e4
LT
1966static int __init dm_multipath_init(void)
1967{
1968 int r;
1969
1da177e4
LT
1970 r = dm_register_target(&multipath_target);
1971 if (r < 0) {
76e33fe4 1972 DMERR("request-based register failed %d", r);
ff658e9c
JT
1973 r = -EINVAL;
1974 goto bad_register_target;
1da177e4
LT
1975 }
1976
4d4d66ab 1977 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
c557308e 1978 if (!kmultipathd) {
0cd33124 1979 DMERR("failed to create workqueue kmpathd");
ff658e9c
JT
1980 r = -ENOMEM;
1981 goto bad_alloc_kmultipathd;
c557308e
AK
1982 }
1983
bab7cfc7
CS
1984 /*
1985 * A separate workqueue is used to handle the device handlers
1986 * to avoid overloading existing workqueue. Overloading the
1987 * old workqueue would also create a bottleneck in the
1988 * path of the storage hardware device activation.
1989 */
4d4d66ab
TH
1990 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1991 WQ_MEM_RECLAIM);
bab7cfc7
CS
1992 if (!kmpath_handlerd) {
1993 DMERR("failed to create workqueue kmpath_handlerd");
ff658e9c
JT
1994 r = -ENOMEM;
1995 goto bad_alloc_kmpath_handlerd;
bab7cfc7
CS
1996 }
1997
ff658e9c
JT
1998 return 0;
1999
2000bad_alloc_kmpath_handlerd:
2001 destroy_workqueue(kmultipathd);
2002bad_alloc_kmultipathd:
2003 dm_unregister_target(&multipath_target);
2004bad_register_target:
1da177e4
LT
2005 return r;
2006}
2007
2008static void __exit dm_multipath_exit(void)
2009{
bab7cfc7 2010 destroy_workqueue(kmpath_handlerd);
c557308e
AK
2011 destroy_workqueue(kmultipathd);
2012
10d3bd09 2013 dm_unregister_target(&multipath_target);
1da177e4
LT
2014}
2015
1da177e4
LT
2016module_init(dm_multipath_init);
2017module_exit(dm_multipath_exit);
2018
2019MODULE_DESCRIPTION(DM_NAME " multipath target");
2020MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2021MODULE_LICENSE("GPL");