2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/device-mapper.h>
10 #include "dm-path-selector.h"
11 #include "dm-uevent.h"
13 #include <linux/ctype.h>
14 #include <linux/init.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/pagemap.h>
18 #include <linux/slab.h>
19 #include <linux/time.h>
20 #include <linux/workqueue.h>
21 #include <linux/delay.h>
22 #include <scsi/scsi_dh.h>
23 #include <linux/atomic.h>
25 #define DM_MSG_PREFIX "multipath"
26 #define DM_PG_INIT_DELAY_MSECS 2000
27 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
31 struct list_head list
;
33 struct priority_group
*pg
; /* Owning PG */
34 unsigned is_active
; /* Path status */
35 unsigned fail_count
; /* Cumulative failure count */
38 struct delayed_work activate_path
;
41 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
44 * Paths are grouped into Priority Groups and numbered from 1 upwards.
45 * Each has a path selector which controls which path gets used.
47 struct priority_group
{
48 struct list_head list
;
50 struct multipath
*m
; /* Owning multipath instance */
51 struct path_selector ps
;
53 unsigned pg_num
; /* Reference number */
54 unsigned bypassed
; /* Temporarily bypass this PG? */
56 unsigned nr_pgpaths
; /* Number of paths in PG */
57 struct list_head pgpaths
;
60 /* Multipath context */
62 struct list_head list
;
65 const char *hw_handler_name
;
66 char *hw_handler_params
;
70 unsigned nr_priority_groups
;
71 struct list_head priority_groups
;
73 wait_queue_head_t pg_init_wait
; /* Wait for pg_init completion */
75 unsigned pg_init_required
; /* pg_init needs calling? */
76 unsigned pg_init_in_progress
; /* Only one pg_init allowed at once */
77 unsigned pg_init_delay_retry
; /* Delay pg_init retry? */
79 unsigned nr_valid_paths
; /* Total number of usable paths */
80 struct pgpath
*current_pgpath
;
81 struct priority_group
*current_pg
;
82 struct priority_group
*next_pg
; /* Switch to this PG if set */
83 unsigned repeat_count
; /* I/Os left before calling PS again */
85 unsigned queue_io
:1; /* Must we queue all I/O? */
86 unsigned queue_if_no_path
:1; /* Queue I/O if last path fails? */
87 unsigned saved_queue_if_no_path
:1; /* Saved state during suspension */
89 unsigned pg_init_retries
; /* Number of times to retry pg_init */
90 unsigned pg_init_count
; /* Number of times pg_init called */
91 unsigned pg_init_delay_msecs
; /* Number of msecs before pg_init retry */
94 struct work_struct process_queued_ios
;
95 struct list_head queued_ios
;
97 struct work_struct trigger_event
;
100 * We must use a mempool of dm_mpath_io structs so that we
101 * can resubmit bios on error.
103 mempool_t
*mpio_pool
;
105 struct mutex work_mutex
;
109 * Context information attached to each bio we process.
112 struct pgpath
*pgpath
;
116 typedef int (*action_fn
) (struct pgpath
*pgpath
);
118 #define MIN_IOS 256 /* Mempool size */
120 static struct kmem_cache
*_mpio_cache
;
122 static struct workqueue_struct
*kmultipathd
, *kmpath_handlerd
;
123 static void process_queued_ios(struct work_struct
*work
);
124 static void trigger_event(struct work_struct
*work
);
125 static void activate_path(struct work_struct
*work
);
128 /*-----------------------------------------------
129 * Allocation routines
130 *-----------------------------------------------*/
132 static struct pgpath
*alloc_pgpath(void)
134 struct pgpath
*pgpath
= kzalloc(sizeof(*pgpath
), GFP_KERNEL
);
137 pgpath
->is_active
= 1;
138 INIT_DELAYED_WORK(&pgpath
->activate_path
, activate_path
);
144 static void free_pgpath(struct pgpath
*pgpath
)
149 static struct priority_group
*alloc_priority_group(void)
151 struct priority_group
*pg
;
153 pg
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
156 INIT_LIST_HEAD(&pg
->pgpaths
);
161 static void free_pgpaths(struct list_head
*pgpaths
, struct dm_target
*ti
)
163 struct pgpath
*pgpath
, *tmp
;
164 struct multipath
*m
= ti
->private;
166 list_for_each_entry_safe(pgpath
, tmp
, pgpaths
, list
) {
167 list_del(&pgpath
->list
);
168 if (m
->hw_handler_name
)
169 scsi_dh_detach(bdev_get_queue(pgpath
->path
.dev
->bdev
));
170 dm_put_device(ti
, pgpath
->path
.dev
);
175 static void free_priority_group(struct priority_group
*pg
,
176 struct dm_target
*ti
)
178 struct path_selector
*ps
= &pg
->ps
;
181 ps
->type
->destroy(ps
);
182 dm_put_path_selector(ps
->type
);
185 free_pgpaths(&pg
->pgpaths
, ti
);
189 static struct multipath
*alloc_multipath(struct dm_target
*ti
)
193 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
195 INIT_LIST_HEAD(&m
->priority_groups
);
196 INIT_LIST_HEAD(&m
->queued_ios
);
197 spin_lock_init(&m
->lock
);
199 m
->pg_init_delay_msecs
= DM_PG_INIT_DELAY_DEFAULT
;
200 INIT_WORK(&m
->process_queued_ios
, process_queued_ios
);
201 INIT_WORK(&m
->trigger_event
, trigger_event
);
202 init_waitqueue_head(&m
->pg_init_wait
);
203 mutex_init(&m
->work_mutex
);
204 m
->mpio_pool
= mempool_create_slab_pool(MIN_IOS
, _mpio_cache
);
216 static void free_multipath(struct multipath
*m
)
218 struct priority_group
*pg
, *tmp
;
220 list_for_each_entry_safe(pg
, tmp
, &m
->priority_groups
, list
) {
222 free_priority_group(pg
, m
->ti
);
225 kfree(m
->hw_handler_name
);
226 kfree(m
->hw_handler_params
);
227 mempool_destroy(m
->mpio_pool
);
231 static int set_mapinfo(struct multipath
*m
, union map_info
*info
)
233 struct dm_mpath_io
*mpio
;
235 mpio
= mempool_alloc(m
->mpio_pool
, GFP_ATOMIC
);
239 memset(mpio
, 0, sizeof(*mpio
));
245 static void clear_mapinfo(struct multipath
*m
, union map_info
*info
)
247 struct dm_mpath_io
*mpio
= info
->ptr
;
250 mempool_free(mpio
, m
->mpio_pool
);
253 /*-----------------------------------------------
255 *-----------------------------------------------*/
257 static void __pg_init_all_paths(struct multipath
*m
)
259 struct pgpath
*pgpath
;
260 unsigned long pg_init_delay
= 0;
263 m
->pg_init_required
= 0;
264 if (m
->pg_init_delay_retry
)
265 pg_init_delay
= msecs_to_jiffies(m
->pg_init_delay_msecs
!= DM_PG_INIT_DELAY_DEFAULT
?
266 m
->pg_init_delay_msecs
: DM_PG_INIT_DELAY_MSECS
);
267 list_for_each_entry(pgpath
, &m
->current_pg
->pgpaths
, list
) {
268 /* Skip failed paths */
269 if (!pgpath
->is_active
)
271 if (queue_delayed_work(kmpath_handlerd
, &pgpath
->activate_path
,
273 m
->pg_init_in_progress
++;
277 static void __switch_pg(struct multipath
*m
, struct pgpath
*pgpath
)
279 m
->current_pg
= pgpath
->pg
;
281 /* Must we initialise the PG first, and queue I/O till it's ready? */
282 if (m
->hw_handler_name
) {
283 m
->pg_init_required
= 1;
286 m
->pg_init_required
= 0;
290 m
->pg_init_count
= 0;
293 static int __choose_path_in_pg(struct multipath
*m
, struct priority_group
*pg
,
296 struct dm_path
*path
;
298 path
= pg
->ps
.type
->select_path(&pg
->ps
, &m
->repeat_count
, nr_bytes
);
302 m
->current_pgpath
= path_to_pgpath(path
);
304 if (m
->current_pg
!= pg
)
305 __switch_pg(m
, m
->current_pgpath
);
310 static void __choose_pgpath(struct multipath
*m
, size_t nr_bytes
)
312 struct priority_group
*pg
;
313 unsigned bypassed
= 1;
315 if (!m
->nr_valid_paths
)
318 /* Were we instructed to switch PG? */
322 if (!__choose_path_in_pg(m
, pg
, nr_bytes
))
326 /* Don't change PG until it has no remaining paths */
327 if (m
->current_pg
&& !__choose_path_in_pg(m
, m
->current_pg
, nr_bytes
))
331 * Loop through priority groups until we find a valid path.
332 * First time we skip PGs marked 'bypassed'.
333 * Second time we only try the ones we skipped, but set
334 * pg_init_delay_retry so we do not hammer controllers.
337 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
338 if (pg
->bypassed
== bypassed
)
340 if (!__choose_path_in_pg(m
, pg
, nr_bytes
)) {
342 m
->pg_init_delay_retry
= 1;
346 } while (bypassed
--);
349 m
->current_pgpath
= NULL
;
350 m
->current_pg
= NULL
;
354 * Check whether bios must be queued in the device-mapper core rather
355 * than here in the target.
357 * m->lock must be held on entry.
359 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
360 * same value then we are not between multipath_presuspend()
361 * and multipath_resume() calls and we have no need to check
362 * for the DMF_NOFLUSH_SUSPENDING flag.
364 static int __must_push_back(struct multipath
*m
)
366 return (m
->queue_if_no_path
!= m
->saved_queue_if_no_path
&&
367 dm_noflush_suspending(m
->ti
));
370 static int map_io(struct multipath
*m
, struct request
*clone
,
371 union map_info
*map_context
, unsigned was_queued
)
373 int r
= DM_MAPIO_REMAPPED
;
374 size_t nr_bytes
= blk_rq_bytes(clone
);
376 struct pgpath
*pgpath
;
377 struct block_device
*bdev
;
378 struct dm_mpath_io
*mpio
= map_context
->ptr
;
380 spin_lock_irqsave(&m
->lock
, flags
);
382 /* Do we need to select a new pgpath? */
383 if (!m
->current_pgpath
||
384 (!m
->queue_io
&& (m
->repeat_count
&& --m
->repeat_count
== 0)))
385 __choose_pgpath(m
, nr_bytes
);
387 pgpath
= m
->current_pgpath
;
392 if ((pgpath
&& m
->queue_io
) ||
393 (!pgpath
&& m
->queue_if_no_path
)) {
394 /* Queue for the daemon to resubmit */
395 list_add_tail(&clone
->queuelist
, &m
->queued_ios
);
397 if ((m
->pg_init_required
&& !m
->pg_init_in_progress
) ||
399 queue_work(kmultipathd
, &m
->process_queued_ios
);
401 r
= DM_MAPIO_SUBMITTED
;
403 bdev
= pgpath
->path
.dev
->bdev
;
404 clone
->q
= bdev_get_queue(bdev
);
405 clone
->rq_disk
= bdev
->bd_disk
;
406 } else if (__must_push_back(m
))
407 r
= DM_MAPIO_REQUEUE
;
409 r
= -EIO
; /* Failed */
411 mpio
->pgpath
= pgpath
;
412 mpio
->nr_bytes
= nr_bytes
;
414 if (r
== DM_MAPIO_REMAPPED
&& pgpath
->pg
->ps
.type
->start_io
)
415 pgpath
->pg
->ps
.type
->start_io(&pgpath
->pg
->ps
, &pgpath
->path
,
418 spin_unlock_irqrestore(&m
->lock
, flags
);
424 * If we run out of usable paths, should we queue I/O or error it?
426 static int queue_if_no_path(struct multipath
*m
, unsigned queue_if_no_path
,
427 unsigned save_old_value
)
431 spin_lock_irqsave(&m
->lock
, flags
);
434 m
->saved_queue_if_no_path
= m
->queue_if_no_path
;
436 m
->saved_queue_if_no_path
= queue_if_no_path
;
437 m
->queue_if_no_path
= queue_if_no_path
;
438 if (!m
->queue_if_no_path
&& m
->queue_size
)
439 queue_work(kmultipathd
, &m
->process_queued_ios
);
441 spin_unlock_irqrestore(&m
->lock
, flags
);
446 /*-----------------------------------------------------------------
447 * The multipath daemon is responsible for resubmitting queued ios.
448 *---------------------------------------------------------------*/
450 static void dispatch_queued_ios(struct multipath
*m
)
454 union map_info
*info
;
455 struct request
*clone
, *n
;
458 spin_lock_irqsave(&m
->lock
, flags
);
459 list_splice_init(&m
->queued_ios
, &cl
);
460 spin_unlock_irqrestore(&m
->lock
, flags
);
462 list_for_each_entry_safe(clone
, n
, &cl
, queuelist
) {
463 list_del_init(&clone
->queuelist
);
465 info
= dm_get_rq_mapinfo(clone
);
467 r
= map_io(m
, clone
, info
, 1);
469 clear_mapinfo(m
, info
);
470 dm_kill_unmapped_request(clone
, r
);
471 } else if (r
== DM_MAPIO_REMAPPED
)
472 dm_dispatch_request(clone
);
473 else if (r
== DM_MAPIO_REQUEUE
) {
474 clear_mapinfo(m
, info
);
475 dm_requeue_unmapped_request(clone
);
480 static void process_queued_ios(struct work_struct
*work
)
482 struct multipath
*m
=
483 container_of(work
, struct multipath
, process_queued_ios
);
484 struct pgpath
*pgpath
= NULL
;
485 unsigned must_queue
= 1;
488 spin_lock_irqsave(&m
->lock
, flags
);
490 if (!m
->current_pgpath
)
491 __choose_pgpath(m
, 0);
493 pgpath
= m
->current_pgpath
;
495 if ((pgpath
&& !m
->queue_io
) ||
496 (!pgpath
&& !m
->queue_if_no_path
))
499 if (m
->pg_init_required
&& !m
->pg_init_in_progress
&& pgpath
)
500 __pg_init_all_paths(m
);
502 spin_unlock_irqrestore(&m
->lock
, flags
);
504 dispatch_queued_ios(m
);
508 * An event is triggered whenever a path is taken out of use.
509 * Includes path failure and PG bypass.
511 static void trigger_event(struct work_struct
*work
)
513 struct multipath
*m
=
514 container_of(work
, struct multipath
, trigger_event
);
516 dm_table_event(m
->ti
->table
);
519 /*-----------------------------------------------------------------
520 * Constructor/argument parsing:
521 * <#multipath feature args> [<arg>]*
522 * <#hw_handler args> [hw_handler [<arg>]*]
524 * <initial priority group>
525 * [<selector> <#selector args> [<arg>]*
526 * <#paths> <#per-path selector args>
527 * [<path> [<arg>]* ]+ ]+
528 *---------------------------------------------------------------*/
529 static int parse_path_selector(struct dm_arg_set
*as
, struct priority_group
*pg
,
530 struct dm_target
*ti
)
533 struct path_selector_type
*pst
;
536 static struct dm_arg _args
[] = {
537 {0, 1024, "invalid number of path selector args"},
540 pst
= dm_get_path_selector(dm_shift_arg(as
));
542 ti
->error
= "unknown path selector type";
546 r
= dm_read_arg_group(_args
, as
, &ps_argc
, &ti
->error
);
548 dm_put_path_selector(pst
);
552 r
= pst
->create(&pg
->ps
, ps_argc
, as
->argv
);
554 dm_put_path_selector(pst
);
555 ti
->error
= "path selector constructor failed";
560 dm_consume_args(as
, ps_argc
);
565 static struct pgpath
*parse_path(struct dm_arg_set
*as
, struct path_selector
*ps
,
566 struct dm_target
*ti
)
570 struct multipath
*m
= ti
->private;
572 /* we need at least a path arg */
574 ti
->error
= "no device given";
575 return ERR_PTR(-EINVAL
);
580 return ERR_PTR(-ENOMEM
);
582 r
= dm_get_device(ti
, dm_shift_arg(as
), dm_table_get_mode(ti
->table
),
585 ti
->error
= "error getting device";
589 if (m
->hw_handler_name
) {
590 struct request_queue
*q
= bdev_get_queue(p
->path
.dev
->bdev
);
592 r
= scsi_dh_attach(q
, m
->hw_handler_name
);
595 * Already attached to different hw_handler,
596 * try to reattach with correct one.
599 r
= scsi_dh_attach(q
, m
->hw_handler_name
);
603 ti
->error
= "error attaching hardware handler";
604 dm_put_device(ti
, p
->path
.dev
);
608 if (m
->hw_handler_params
) {
609 r
= scsi_dh_set_params(q
, m
->hw_handler_params
);
611 ti
->error
= "unable to set hardware "
612 "handler parameters";
614 dm_put_device(ti
, p
->path
.dev
);
620 r
= ps
->type
->add_path(ps
, &p
->path
, as
->argc
, as
->argv
, &ti
->error
);
622 dm_put_device(ti
, p
->path
.dev
);
633 static struct priority_group
*parse_priority_group(struct dm_arg_set
*as
,
636 static struct dm_arg _args
[] = {
637 {1, 1024, "invalid number of paths"},
638 {0, 1024, "invalid number of selector args"}
642 unsigned i
, nr_selector_args
, nr_args
;
643 struct priority_group
*pg
;
644 struct dm_target
*ti
= m
->ti
;
648 ti
->error
= "not enough priority group arguments";
649 return ERR_PTR(-EINVAL
);
652 pg
= alloc_priority_group();
654 ti
->error
= "couldn't allocate priority group";
655 return ERR_PTR(-ENOMEM
);
659 r
= parse_path_selector(as
, pg
, ti
);
666 r
= dm_read_arg(_args
, as
, &pg
->nr_pgpaths
, &ti
->error
);
670 r
= dm_read_arg(_args
+ 1, as
, &nr_selector_args
, &ti
->error
);
674 nr_args
= 1 + nr_selector_args
;
675 for (i
= 0; i
< pg
->nr_pgpaths
; i
++) {
676 struct pgpath
*pgpath
;
677 struct dm_arg_set path_args
;
679 if (as
->argc
< nr_args
) {
680 ti
->error
= "not enough path parameters";
685 path_args
.argc
= nr_args
;
686 path_args
.argv
= as
->argv
;
688 pgpath
= parse_path(&path_args
, &pg
->ps
, ti
);
689 if (IS_ERR(pgpath
)) {
695 list_add_tail(&pgpath
->list
, &pg
->pgpaths
);
696 dm_consume_args(as
, nr_args
);
702 free_priority_group(pg
, ti
);
706 static int parse_hw_handler(struct dm_arg_set
*as
, struct multipath
*m
)
710 struct dm_target
*ti
= m
->ti
;
712 static struct dm_arg _args
[] = {
713 {0, 1024, "invalid number of hardware handler args"},
716 if (dm_read_arg_group(_args
, as
, &hw_argc
, &ti
->error
))
722 m
->hw_handler_name
= kstrdup(dm_shift_arg(as
), GFP_KERNEL
);
723 if (!try_then_request_module(scsi_dh_handler_exist(m
->hw_handler_name
),
724 "scsi_dh_%s", m
->hw_handler_name
)) {
725 ti
->error
= "unknown hardware handler type";
734 for (i
= 0; i
<= hw_argc
- 2; i
++)
735 len
+= strlen(as
->argv
[i
]) + 1;
736 p
= m
->hw_handler_params
= kzalloc(len
, GFP_KERNEL
);
738 ti
->error
= "memory allocation failed";
742 j
= sprintf(p
, "%d", hw_argc
- 1);
743 for (i
= 0, p
+=j
+1; i
<= hw_argc
- 2; i
++, p
+=j
+1)
744 j
= sprintf(p
, "%s", as
->argv
[i
]);
746 dm_consume_args(as
, hw_argc
- 1);
750 kfree(m
->hw_handler_name
);
751 m
->hw_handler_name
= NULL
;
755 static int parse_features(struct dm_arg_set
*as
, struct multipath
*m
)
759 struct dm_target
*ti
= m
->ti
;
760 const char *arg_name
;
762 static struct dm_arg _args
[] = {
763 {0, 5, "invalid number of feature args"},
764 {1, 50, "pg_init_retries must be between 1 and 50"},
765 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
768 r
= dm_read_arg_group(_args
, as
, &argc
, &ti
->error
);
776 arg_name
= dm_shift_arg(as
);
779 if (!strcasecmp(arg_name
, "queue_if_no_path")) {
780 r
= queue_if_no_path(m
, 1, 0);
784 if (!strcasecmp(arg_name
, "pg_init_retries") &&
786 r
= dm_read_arg(_args
+ 1, as
, &m
->pg_init_retries
, &ti
->error
);
791 if (!strcasecmp(arg_name
, "pg_init_delay_msecs") &&
793 r
= dm_read_arg(_args
+ 2, as
, &m
->pg_init_delay_msecs
, &ti
->error
);
798 ti
->error
= "Unrecognised multipath feature request";
800 } while (argc
&& !r
);
805 static int multipath_ctr(struct dm_target
*ti
, unsigned int argc
,
808 /* target arguments */
809 static struct dm_arg _args
[] = {
810 {0, 1024, "invalid number of priority groups"},
811 {0, 1024, "invalid initial priority group number"},
816 struct dm_arg_set as
;
817 unsigned pg_count
= 0;
818 unsigned next_pg_num
;
823 m
= alloc_multipath(ti
);
825 ti
->error
= "can't allocate multipath";
829 r
= parse_features(&as
, m
);
833 r
= parse_hw_handler(&as
, m
);
837 r
= dm_read_arg(_args
, &as
, &m
->nr_priority_groups
, &ti
->error
);
841 r
= dm_read_arg(_args
+ 1, &as
, &next_pg_num
, &ti
->error
);
845 if ((!m
->nr_priority_groups
&& next_pg_num
) ||
846 (m
->nr_priority_groups
&& !next_pg_num
)) {
847 ti
->error
= "invalid initial priority group";
852 /* parse the priority groups */
854 struct priority_group
*pg
;
856 pg
= parse_priority_group(&as
, m
);
862 m
->nr_valid_paths
+= pg
->nr_pgpaths
;
863 list_add_tail(&pg
->list
, &m
->priority_groups
);
865 pg
->pg_num
= pg_count
;
870 if (pg_count
!= m
->nr_priority_groups
) {
871 ti
->error
= "priority group count mismatch";
876 ti
->num_flush_requests
= 1;
877 ti
->num_discard_requests
= 1;
886 static void multipath_wait_for_pg_init_completion(struct multipath
*m
)
888 DECLARE_WAITQUEUE(wait
, current
);
891 add_wait_queue(&m
->pg_init_wait
, &wait
);
894 set_current_state(TASK_UNINTERRUPTIBLE
);
896 spin_lock_irqsave(&m
->lock
, flags
);
897 if (!m
->pg_init_in_progress
) {
898 spin_unlock_irqrestore(&m
->lock
, flags
);
901 spin_unlock_irqrestore(&m
->lock
, flags
);
905 set_current_state(TASK_RUNNING
);
907 remove_wait_queue(&m
->pg_init_wait
, &wait
);
910 static void flush_multipath_work(struct multipath
*m
)
912 flush_workqueue(kmpath_handlerd
);
913 multipath_wait_for_pg_init_completion(m
);
914 flush_workqueue(kmultipathd
);
915 flush_work_sync(&m
->trigger_event
);
918 static void multipath_dtr(struct dm_target
*ti
)
920 struct multipath
*m
= ti
->private;
922 flush_multipath_work(m
);
927 * Map cloned requests
929 static int multipath_map(struct dm_target
*ti
, struct request
*clone
,
930 union map_info
*map_context
)
933 struct multipath
*m
= (struct multipath
*) ti
->private;
935 if (set_mapinfo(m
, map_context
) < 0)
936 /* ENOMEM, requeue */
937 return DM_MAPIO_REQUEUE
;
939 clone
->cmd_flags
|= REQ_FAILFAST_TRANSPORT
;
940 r
= map_io(m
, clone
, map_context
, 0);
941 if (r
< 0 || r
== DM_MAPIO_REQUEUE
)
942 clear_mapinfo(m
, map_context
);
948 * Take a path out of use.
950 static int fail_path(struct pgpath
*pgpath
)
953 struct multipath
*m
= pgpath
->pg
->m
;
955 spin_lock_irqsave(&m
->lock
, flags
);
957 if (!pgpath
->is_active
)
960 DMWARN("Failing path %s.", pgpath
->path
.dev
->name
);
962 pgpath
->pg
->ps
.type
->fail_path(&pgpath
->pg
->ps
, &pgpath
->path
);
963 pgpath
->is_active
= 0;
964 pgpath
->fail_count
++;
968 if (pgpath
== m
->current_pgpath
)
969 m
->current_pgpath
= NULL
;
971 dm_path_uevent(DM_UEVENT_PATH_FAILED
, m
->ti
,
972 pgpath
->path
.dev
->name
, m
->nr_valid_paths
);
974 schedule_work(&m
->trigger_event
);
977 spin_unlock_irqrestore(&m
->lock
, flags
);
983 * Reinstate a previously-failed path
985 static int reinstate_path(struct pgpath
*pgpath
)
989 struct multipath
*m
= pgpath
->pg
->m
;
991 spin_lock_irqsave(&m
->lock
, flags
);
993 if (pgpath
->is_active
)
996 if (!pgpath
->pg
->ps
.type
->reinstate_path
) {
997 DMWARN("Reinstate path not supported by path selector %s",
998 pgpath
->pg
->ps
.type
->name
);
1003 r
= pgpath
->pg
->ps
.type
->reinstate_path(&pgpath
->pg
->ps
, &pgpath
->path
);
1007 pgpath
->is_active
= 1;
1009 if (!m
->nr_valid_paths
++ && m
->queue_size
) {
1010 m
->current_pgpath
= NULL
;
1011 queue_work(kmultipathd
, &m
->process_queued_ios
);
1012 } else if (m
->hw_handler_name
&& (m
->current_pg
== pgpath
->pg
)) {
1013 if (queue_work(kmpath_handlerd
, &pgpath
->activate_path
.work
))
1014 m
->pg_init_in_progress
++;
1017 dm_path_uevent(DM_UEVENT_PATH_REINSTATED
, m
->ti
,
1018 pgpath
->path
.dev
->name
, m
->nr_valid_paths
);
1020 schedule_work(&m
->trigger_event
);
1023 spin_unlock_irqrestore(&m
->lock
, flags
);
1029 * Fail or reinstate all paths that match the provided struct dm_dev.
1031 static int action_dev(struct multipath
*m
, struct dm_dev
*dev
,
1035 struct pgpath
*pgpath
;
1036 struct priority_group
*pg
;
1038 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1039 list_for_each_entry(pgpath
, &pg
->pgpaths
, list
) {
1040 if (pgpath
->path
.dev
== dev
)
1049 * Temporarily try to avoid having to use the specified PG
1051 static void bypass_pg(struct multipath
*m
, struct priority_group
*pg
,
1054 unsigned long flags
;
1056 spin_lock_irqsave(&m
->lock
, flags
);
1058 pg
->bypassed
= bypassed
;
1059 m
->current_pgpath
= NULL
;
1060 m
->current_pg
= NULL
;
1062 spin_unlock_irqrestore(&m
->lock
, flags
);
1064 schedule_work(&m
->trigger_event
);
1068 * Switch to using the specified PG from the next I/O that gets mapped
1070 static int switch_pg_num(struct multipath
*m
, const char *pgstr
)
1072 struct priority_group
*pg
;
1074 unsigned long flags
;
1077 if (!pgstr
|| (sscanf(pgstr
, "%u%c", &pgnum
, &dummy
) != 1) || !pgnum
||
1078 (pgnum
> m
->nr_priority_groups
)) {
1079 DMWARN("invalid PG number supplied to switch_pg_num");
1083 spin_lock_irqsave(&m
->lock
, flags
);
1084 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1089 m
->current_pgpath
= NULL
;
1090 m
->current_pg
= NULL
;
1093 spin_unlock_irqrestore(&m
->lock
, flags
);
1095 schedule_work(&m
->trigger_event
);
1100 * Set/clear bypassed status of a PG.
1101 * PGs are numbered upwards from 1 in the order they were declared.
1103 static int bypass_pg_num(struct multipath
*m
, const char *pgstr
, int bypassed
)
1105 struct priority_group
*pg
;
1109 if (!pgstr
|| (sscanf(pgstr
, "%u%c", &pgnum
, &dummy
) != 1) || !pgnum
||
1110 (pgnum
> m
->nr_priority_groups
)) {
1111 DMWARN("invalid PG number supplied to bypass_pg");
1115 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1120 bypass_pg(m
, pg
, bypassed
);
1125 * Should we retry pg_init immediately?
1127 static int pg_init_limit_reached(struct multipath
*m
, struct pgpath
*pgpath
)
1129 unsigned long flags
;
1130 int limit_reached
= 0;
1132 spin_lock_irqsave(&m
->lock
, flags
);
1134 if (m
->pg_init_count
<= m
->pg_init_retries
)
1135 m
->pg_init_required
= 1;
1139 spin_unlock_irqrestore(&m
->lock
, flags
);
1141 return limit_reached
;
1144 static void pg_init_done(void *data
, int errors
)
1146 struct pgpath
*pgpath
= data
;
1147 struct priority_group
*pg
= pgpath
->pg
;
1148 struct multipath
*m
= pg
->m
;
1149 unsigned long flags
;
1150 unsigned delay_retry
= 0;
1152 /* device or driver problems */
1157 if (!m
->hw_handler_name
) {
1161 DMERR("Could not failover the device: Handler scsi_dh_%s "
1162 "Error %d.", m
->hw_handler_name
, errors
);
1164 * Fail path for now, so we do not ping pong
1168 case SCSI_DH_DEV_TEMP_BUSY
:
1170 * Probably doing something like FW upgrade on the
1171 * controller so try the other pg.
1173 bypass_pg(m
, pg
, 1);
1176 /* Wait before retrying. */
1178 case SCSI_DH_IMM_RETRY
:
1179 case SCSI_DH_RES_TEMP_UNAVAIL
:
1180 if (pg_init_limit_reached(m
, pgpath
))
1186 * We probably do not want to fail the path for a device
1187 * error, but this is what the old dm did. In future
1188 * patches we can do more advanced handling.
1193 spin_lock_irqsave(&m
->lock
, flags
);
1195 if (pgpath
== m
->current_pgpath
) {
1196 DMERR("Could not failover device. Error %d.", errors
);
1197 m
->current_pgpath
= NULL
;
1198 m
->current_pg
= NULL
;
1200 } else if (!m
->pg_init_required
)
1203 if (--m
->pg_init_in_progress
)
1204 /* Activations of other paths are still on going */
1207 if (!m
->pg_init_required
)
1210 m
->pg_init_delay_retry
= delay_retry
;
1211 queue_work(kmultipathd
, &m
->process_queued_ios
);
1214 * Wake up any thread waiting to suspend.
1216 wake_up(&m
->pg_init_wait
);
1219 spin_unlock_irqrestore(&m
->lock
, flags
);
1222 static void activate_path(struct work_struct
*work
)
1224 struct pgpath
*pgpath
=
1225 container_of(work
, struct pgpath
, activate_path
.work
);
1227 scsi_dh_activate(bdev_get_queue(pgpath
->path
.dev
->bdev
),
1228 pg_init_done
, pgpath
);
1234 static int do_end_io(struct multipath
*m
, struct request
*clone
,
1235 int error
, struct dm_mpath_io
*mpio
)
1238 * We don't queue any clone request inside the multipath target
1239 * during end I/O handling, since those clone requests don't have
1240 * bio clones. If we queue them inside the multipath target,
1241 * we need to make bio clones, that requires memory allocation.
1242 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1243 * don't have bio clones.)
1244 * Instead of queueing the clone request here, we queue the original
1245 * request into dm core, which will remake a clone request and
1246 * clone bios for it and resubmit it later.
1248 int r
= DM_ENDIO_REQUEUE
;
1249 unsigned long flags
;
1251 if (!error
&& !clone
->errors
)
1252 return 0; /* I/O complete */
1254 if (error
== -EOPNOTSUPP
|| error
== -EREMOTEIO
|| error
== -EILSEQ
)
1258 fail_path(mpio
->pgpath
);
1260 spin_lock_irqsave(&m
->lock
, flags
);
1261 if (!m
->nr_valid_paths
) {
1262 if (!m
->queue_if_no_path
) {
1263 if (!__must_push_back(m
))
1266 if (error
== -EBADE
)
1270 spin_unlock_irqrestore(&m
->lock
, flags
);
1275 static int multipath_end_io(struct dm_target
*ti
, struct request
*clone
,
1276 int error
, union map_info
*map_context
)
1278 struct multipath
*m
= ti
->private;
1279 struct dm_mpath_io
*mpio
= map_context
->ptr
;
1280 struct pgpath
*pgpath
= mpio
->pgpath
;
1281 struct path_selector
*ps
;
1286 r
= do_end_io(m
, clone
, error
, mpio
);
1288 ps
= &pgpath
->pg
->ps
;
1289 if (ps
->type
->end_io
)
1290 ps
->type
->end_io(ps
, &pgpath
->path
, mpio
->nr_bytes
);
1292 clear_mapinfo(m
, map_context
);
1298 * Suspend can't complete until all the I/O is processed so if
1299 * the last path fails we must error any remaining I/O.
1300 * Note that if the freeze_bdev fails while suspending, the
1301 * queue_if_no_path state is lost - userspace should reset it.
1303 static void multipath_presuspend(struct dm_target
*ti
)
1305 struct multipath
*m
= (struct multipath
*) ti
->private;
1307 queue_if_no_path(m
, 0, 1);
1310 static void multipath_postsuspend(struct dm_target
*ti
)
1312 struct multipath
*m
= ti
->private;
1314 mutex_lock(&m
->work_mutex
);
1315 flush_multipath_work(m
);
1316 mutex_unlock(&m
->work_mutex
);
1320 * Restore the queue_if_no_path setting.
1322 static void multipath_resume(struct dm_target
*ti
)
1324 struct multipath
*m
= (struct multipath
*) ti
->private;
1325 unsigned long flags
;
1327 spin_lock_irqsave(&m
->lock
, flags
);
1328 m
->queue_if_no_path
= m
->saved_queue_if_no_path
;
1329 spin_unlock_irqrestore(&m
->lock
, flags
);
1333 * Info output has the following format:
1334 * num_multipath_feature_args [multipath_feature_args]*
1335 * num_handler_status_args [handler_status_args]*
1336 * num_groups init_group_number
1337 * [A|D|E num_ps_status_args [ps_status_args]*
1338 * num_paths num_selector_args
1339 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1341 * Table output has the following format (identical to the constructor string):
1342 * num_feature_args [features_args]*
1343 * num_handler_args hw_handler [hw_handler_args]*
1344 * num_groups init_group_number
1345 * [priority selector-name num_ps_args [ps_args]*
1346 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1348 static int multipath_status(struct dm_target
*ti
, status_type_t type
,
1349 char *result
, unsigned int maxlen
)
1352 unsigned long flags
;
1353 struct multipath
*m
= (struct multipath
*) ti
->private;
1354 struct priority_group
*pg
;
1359 spin_lock_irqsave(&m
->lock
, flags
);
1362 if (type
== STATUSTYPE_INFO
)
1363 DMEMIT("2 %u %u ", m
->queue_size
, m
->pg_init_count
);
1365 DMEMIT("%u ", m
->queue_if_no_path
+
1366 (m
->pg_init_retries
> 0) * 2 +
1367 (m
->pg_init_delay_msecs
!= DM_PG_INIT_DELAY_DEFAULT
) * 2);
1368 if (m
->queue_if_no_path
)
1369 DMEMIT("queue_if_no_path ");
1370 if (m
->pg_init_retries
)
1371 DMEMIT("pg_init_retries %u ", m
->pg_init_retries
);
1372 if (m
->pg_init_delay_msecs
!= DM_PG_INIT_DELAY_DEFAULT
)
1373 DMEMIT("pg_init_delay_msecs %u ", m
->pg_init_delay_msecs
);
1376 if (!m
->hw_handler_name
|| type
== STATUSTYPE_INFO
)
1379 DMEMIT("1 %s ", m
->hw_handler_name
);
1381 DMEMIT("%u ", m
->nr_priority_groups
);
1384 pg_num
= m
->next_pg
->pg_num
;
1385 else if (m
->current_pg
)
1386 pg_num
= m
->current_pg
->pg_num
;
1388 pg_num
= (m
->nr_priority_groups
? 1 : 0);
1390 DMEMIT("%u ", pg_num
);
1393 case STATUSTYPE_INFO
:
1394 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1396 state
= 'D'; /* Disabled */
1397 else if (pg
== m
->current_pg
)
1398 state
= 'A'; /* Currently Active */
1400 state
= 'E'; /* Enabled */
1402 DMEMIT("%c ", state
);
1404 if (pg
->ps
.type
->status
)
1405 sz
+= pg
->ps
.type
->status(&pg
->ps
, NULL
, type
,
1411 DMEMIT("%u %u ", pg
->nr_pgpaths
,
1412 pg
->ps
.type
->info_args
);
1414 list_for_each_entry(p
, &pg
->pgpaths
, list
) {
1415 DMEMIT("%s %s %u ", p
->path
.dev
->name
,
1416 p
->is_active
? "A" : "F",
1418 if (pg
->ps
.type
->status
)
1419 sz
+= pg
->ps
.type
->status(&pg
->ps
,
1420 &p
->path
, type
, result
+ sz
,
1426 case STATUSTYPE_TABLE
:
1427 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1428 DMEMIT("%s ", pg
->ps
.type
->name
);
1430 if (pg
->ps
.type
->status
)
1431 sz
+= pg
->ps
.type
->status(&pg
->ps
, NULL
, type
,
1437 DMEMIT("%u %u ", pg
->nr_pgpaths
,
1438 pg
->ps
.type
->table_args
);
1440 list_for_each_entry(p
, &pg
->pgpaths
, list
) {
1441 DMEMIT("%s ", p
->path
.dev
->name
);
1442 if (pg
->ps
.type
->status
)
1443 sz
+= pg
->ps
.type
->status(&pg
->ps
,
1444 &p
->path
, type
, result
+ sz
,
1451 spin_unlock_irqrestore(&m
->lock
, flags
);
1456 static int multipath_message(struct dm_target
*ti
, unsigned argc
, char **argv
)
1460 struct multipath
*m
= (struct multipath
*) ti
->private;
1463 mutex_lock(&m
->work_mutex
);
1465 if (dm_suspended(ti
)) {
1471 if (!strcasecmp(argv
[0], "queue_if_no_path")) {
1472 r
= queue_if_no_path(m
, 1, 0);
1474 } else if (!strcasecmp(argv
[0], "fail_if_no_path")) {
1475 r
= queue_if_no_path(m
, 0, 0);
1481 DMWARN("Unrecognised multipath message received.");
1485 if (!strcasecmp(argv
[0], "disable_group")) {
1486 r
= bypass_pg_num(m
, argv
[1], 1);
1488 } else if (!strcasecmp(argv
[0], "enable_group")) {
1489 r
= bypass_pg_num(m
, argv
[1], 0);
1491 } else if (!strcasecmp(argv
[0], "switch_group")) {
1492 r
= switch_pg_num(m
, argv
[1]);
1494 } else if (!strcasecmp(argv
[0], "reinstate_path"))
1495 action
= reinstate_path
;
1496 else if (!strcasecmp(argv
[0], "fail_path"))
1499 DMWARN("Unrecognised multipath message received.");
1503 r
= dm_get_device(ti
, argv
[1], dm_table_get_mode(ti
->table
), &dev
);
1505 DMWARN("message: error getting device %s",
1510 r
= action_dev(m
, dev
, action
);
1512 dm_put_device(ti
, dev
);
1515 mutex_unlock(&m
->work_mutex
);
1519 static int multipath_ioctl(struct dm_target
*ti
, unsigned int cmd
,
1522 struct multipath
*m
= ti
->private;
1523 struct block_device
*bdev
;
1525 unsigned long flags
;
1533 spin_lock_irqsave(&m
->lock
, flags
);
1535 if (!m
->current_pgpath
)
1536 __choose_pgpath(m
, 0);
1538 if (m
->current_pgpath
) {
1539 bdev
= m
->current_pgpath
->path
.dev
->bdev
;
1540 mode
= m
->current_pgpath
->path
.dev
->mode
;
1548 spin_unlock_irqrestore(&m
->lock
, flags
);
1551 * Only pass ioctls through if the device sizes match exactly.
1553 if (!r
&& ti
->len
!= i_size_read(bdev
->bd_inode
) >> SECTOR_SHIFT
)
1554 r
= scsi_verify_blk_ioctl(NULL
, cmd
);
1556 if (r
== -EAGAIN
&& !fatal_signal_pending(current
)) {
1557 queue_work(kmultipathd
, &m
->process_queued_ios
);
1562 return r
? : __blkdev_driver_ioctl(bdev
, mode
, cmd
, arg
);
1565 static int multipath_iterate_devices(struct dm_target
*ti
,
1566 iterate_devices_callout_fn fn
, void *data
)
1568 struct multipath
*m
= ti
->private;
1569 struct priority_group
*pg
;
1573 list_for_each_entry(pg
, &m
->priority_groups
, list
) {
1574 list_for_each_entry(p
, &pg
->pgpaths
, list
) {
1575 ret
= fn(ti
, p
->path
.dev
, ti
->begin
, ti
->len
, data
);
1585 static int __pgpath_busy(struct pgpath
*pgpath
)
1587 struct request_queue
*q
= bdev_get_queue(pgpath
->path
.dev
->bdev
);
1589 return dm_underlying_device_busy(q
);
1593 * We return "busy", only when we can map I/Os but underlying devices
1594 * are busy (so even if we map I/Os now, the I/Os will wait on
1595 * the underlying queue).
1596 * In other words, if we want to kill I/Os or queue them inside us
1597 * due to map unavailability, we don't return "busy". Otherwise,
1598 * dm core won't give us the I/Os and we can't do what we want.
1600 static int multipath_busy(struct dm_target
*ti
)
1602 int busy
= 0, has_active
= 0;
1603 struct multipath
*m
= ti
->private;
1604 struct priority_group
*pg
;
1605 struct pgpath
*pgpath
;
1606 unsigned long flags
;
1608 spin_lock_irqsave(&m
->lock
, flags
);
1610 /* Guess which priority_group will be used at next mapping time */
1611 if (unlikely(!m
->current_pgpath
&& m
->next_pg
))
1613 else if (likely(m
->current_pg
))
1617 * We don't know which pg will be used at next mapping time.
1618 * We don't call __choose_pgpath() here to avoid to trigger
1619 * pg_init just by busy checking.
1620 * So we don't know whether underlying devices we will be using
1621 * at next mapping time are busy or not. Just try mapping.
1626 * If there is one non-busy active path at least, the path selector
1627 * will be able to select it. So we consider such a pg as not busy.
1630 list_for_each_entry(pgpath
, &pg
->pgpaths
, list
)
1631 if (pgpath
->is_active
) {
1634 if (!__pgpath_busy(pgpath
)) {
1642 * No active path in this pg, so this pg won't be used and
1643 * the current_pg will be changed at next mapping time.
1644 * We need to try mapping to determine it.
1649 spin_unlock_irqrestore(&m
->lock
, flags
);
1654 /*-----------------------------------------------------------------
1656 *---------------------------------------------------------------*/
1657 static struct target_type multipath_target
= {
1658 .name
= "multipath",
1659 .version
= {1, 4, 0},
1660 .module
= THIS_MODULE
,
1661 .ctr
= multipath_ctr
,
1662 .dtr
= multipath_dtr
,
1663 .map_rq
= multipath_map
,
1664 .rq_end_io
= multipath_end_io
,
1665 .presuspend
= multipath_presuspend
,
1666 .postsuspend
= multipath_postsuspend
,
1667 .resume
= multipath_resume
,
1668 .status
= multipath_status
,
1669 .message
= multipath_message
,
1670 .ioctl
= multipath_ioctl
,
1671 .iterate_devices
= multipath_iterate_devices
,
1672 .busy
= multipath_busy
,
1675 static int __init
dm_multipath_init(void)
1679 /* allocate a slab for the dm_ios */
1680 _mpio_cache
= KMEM_CACHE(dm_mpath_io
, 0);
1684 r
= dm_register_target(&multipath_target
);
1686 DMERR("register failed %d", r
);
1687 kmem_cache_destroy(_mpio_cache
);
1691 kmultipathd
= alloc_workqueue("kmpathd", WQ_MEM_RECLAIM
, 0);
1693 DMERR("failed to create workqueue kmpathd");
1694 dm_unregister_target(&multipath_target
);
1695 kmem_cache_destroy(_mpio_cache
);
1700 * A separate workqueue is used to handle the device handlers
1701 * to avoid overloading existing workqueue. Overloading the
1702 * old workqueue would also create a bottleneck in the
1703 * path of the storage hardware device activation.
1705 kmpath_handlerd
= alloc_ordered_workqueue("kmpath_handlerd",
1707 if (!kmpath_handlerd
) {
1708 DMERR("failed to create workqueue kmpath_handlerd");
1709 destroy_workqueue(kmultipathd
);
1710 dm_unregister_target(&multipath_target
);
1711 kmem_cache_destroy(_mpio_cache
);
1715 DMINFO("version %u.%u.%u loaded",
1716 multipath_target
.version
[0], multipath_target
.version
[1],
1717 multipath_target
.version
[2]);
1722 static void __exit
dm_multipath_exit(void)
1724 destroy_workqueue(kmpath_handlerd
);
1725 destroy_workqueue(kmultipathd
);
1727 dm_unregister_target(&multipath_target
);
1728 kmem_cache_destroy(_mpio_cache
);
1731 module_init(dm_multipath_init
);
1732 module_exit(dm_multipath_exit
);
1734 MODULE_DESCRIPTION(DM_NAME
" multipath target");
1735 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1736 MODULE_LICENSE("GPL");