Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Sistina Software Limited. | |
3 | * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include "dm.h" | |
9 | #include "dm-path-selector.h" | |
10 | #include "dm-hw-handler.h" | |
11 | #include "dm-bio-list.h" | |
12 | #include "dm-bio-record.h" | |
13 | ||
14 | #include <linux/ctype.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/mempool.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/pagemap.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/time.h> | |
21 | #include <linux/workqueue.h> | |
22 | #include <asm/atomic.h> | |
23 | ||
72d94861 | 24 | #define DM_MSG_PREFIX "multipath" |
1da177e4 LT |
25 | #define MESG_STR(x) x, sizeof(x) |
26 | ||
27 | /* Path properties */ | |
28 | struct pgpath { | |
29 | struct list_head list; | |
30 | ||
31 | struct priority_group *pg; /* Owning PG */ | |
32 | unsigned fail_count; /* Cumulative failure count */ | |
33 | ||
c922d5f7 | 34 | struct dm_path path; |
1da177e4 LT |
35 | }; |
36 | ||
37 | #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) | |
38 | ||
39 | /* | |
40 | * Paths are grouped into Priority Groups and numbered from 1 upwards. | |
41 | * Each has a path selector which controls which path gets used. | |
42 | */ | |
43 | struct priority_group { | |
44 | struct list_head list; | |
45 | ||
46 | struct multipath *m; /* Owning multipath instance */ | |
47 | struct path_selector ps; | |
48 | ||
49 | unsigned pg_num; /* Reference number */ | |
50 | unsigned bypassed; /* Temporarily bypass this PG? */ | |
51 | ||
52 | unsigned nr_pgpaths; /* Number of paths in PG */ | |
53 | struct list_head pgpaths; | |
54 | }; | |
55 | ||
56 | /* Multipath context */ | |
57 | struct multipath { | |
58 | struct list_head list; | |
59 | struct dm_target *ti; | |
60 | ||
61 | spinlock_t lock; | |
62 | ||
63 | struct hw_handler hw_handler; | |
64 | unsigned nr_priority_groups; | |
65 | struct list_head priority_groups; | |
66 | unsigned pg_init_required; /* pg_init needs calling? */ | |
c3cd4f6b | 67 | unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ |
1da177e4 LT |
68 | |
69 | unsigned nr_valid_paths; /* Total number of usable paths */ | |
70 | struct pgpath *current_pgpath; | |
71 | struct priority_group *current_pg; | |
72 | struct priority_group *next_pg; /* Switch to this PG if set */ | |
73 | unsigned repeat_count; /* I/Os left before calling PS again */ | |
74 | ||
75 | unsigned queue_io; /* Must we queue all I/O? */ | |
76 | unsigned queue_if_no_path; /* Queue I/O if last path fails? */ | |
436d4108 | 77 | unsigned saved_queue_if_no_path;/* Saved state during suspension */ |
1da177e4 LT |
78 | |
79 | struct work_struct process_queued_ios; | |
80 | struct bio_list queued_ios; | |
81 | unsigned queue_size; | |
82 | ||
83 | struct work_struct trigger_event; | |
84 | ||
85 | /* | |
86 | * We must use a mempool of mpath_io structs so that we | |
87 | * can resubmit bios on error. | |
88 | */ | |
89 | mempool_t *mpio_pool; | |
90 | }; | |
91 | ||
92 | /* | |
93 | * Context information attached to each bio we process. | |
94 | */ | |
95 | struct mpath_io { | |
96 | struct pgpath *pgpath; | |
97 | struct dm_bio_details details; | |
98 | }; | |
99 | ||
100 | typedef int (*action_fn) (struct pgpath *pgpath); | |
101 | ||
102 | #define MIN_IOS 256 /* Mempool size */ | |
103 | ||
e18b890b | 104 | static struct kmem_cache *_mpio_cache; |
1da177e4 | 105 | |
c557308e | 106 | struct workqueue_struct *kmultipathd; |
c4028958 DH |
107 | static void process_queued_ios(struct work_struct *work); |
108 | static void trigger_event(struct work_struct *work); | |
1da177e4 LT |
109 | |
110 | ||
111 | /*----------------------------------------------- | |
112 | * Allocation routines | |
113 | *-----------------------------------------------*/ | |
114 | ||
115 | static struct pgpath *alloc_pgpath(void) | |
116 | { | |
e69fae56 | 117 | struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); |
1da177e4 | 118 | |
e69fae56 | 119 | if (pgpath) |
1da177e4 | 120 | pgpath->path.is_active = 1; |
1da177e4 LT |
121 | |
122 | return pgpath; | |
123 | } | |
124 | ||
125 | static inline void free_pgpath(struct pgpath *pgpath) | |
126 | { | |
127 | kfree(pgpath); | |
128 | } | |
129 | ||
130 | static struct priority_group *alloc_priority_group(void) | |
131 | { | |
132 | struct priority_group *pg; | |
133 | ||
e69fae56 | 134 | pg = kzalloc(sizeof(*pg), GFP_KERNEL); |
1da177e4 | 135 | |
e69fae56 MM |
136 | if (pg) |
137 | INIT_LIST_HEAD(&pg->pgpaths); | |
1da177e4 LT |
138 | |
139 | return pg; | |
140 | } | |
141 | ||
142 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) | |
143 | { | |
144 | struct pgpath *pgpath, *tmp; | |
145 | ||
146 | list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { | |
147 | list_del(&pgpath->list); | |
148 | dm_put_device(ti, pgpath->path.dev); | |
149 | free_pgpath(pgpath); | |
150 | } | |
151 | } | |
152 | ||
153 | static void free_priority_group(struct priority_group *pg, | |
154 | struct dm_target *ti) | |
155 | { | |
156 | struct path_selector *ps = &pg->ps; | |
157 | ||
158 | if (ps->type) { | |
159 | ps->type->destroy(ps); | |
160 | dm_put_path_selector(ps->type); | |
161 | } | |
162 | ||
163 | free_pgpaths(&pg->pgpaths, ti); | |
164 | kfree(pg); | |
165 | } | |
166 | ||
28f16c20 | 167 | static struct multipath *alloc_multipath(struct dm_target *ti) |
1da177e4 LT |
168 | { |
169 | struct multipath *m; | |
170 | ||
e69fae56 | 171 | m = kzalloc(sizeof(*m), GFP_KERNEL); |
1da177e4 | 172 | if (m) { |
1da177e4 LT |
173 | INIT_LIST_HEAD(&m->priority_groups); |
174 | spin_lock_init(&m->lock); | |
175 | m->queue_io = 1; | |
c4028958 DH |
176 | INIT_WORK(&m->process_queued_ios, process_queued_ios); |
177 | INIT_WORK(&m->trigger_event, trigger_event); | |
93d2341c | 178 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); |
1da177e4 LT |
179 | if (!m->mpio_pool) { |
180 | kfree(m); | |
181 | return NULL; | |
182 | } | |
28f16c20 MM |
183 | m->ti = ti; |
184 | ti->private = m; | |
1da177e4 LT |
185 | } |
186 | ||
187 | return m; | |
188 | } | |
189 | ||
190 | static void free_multipath(struct multipath *m) | |
191 | { | |
192 | struct priority_group *pg, *tmp; | |
193 | struct hw_handler *hwh = &m->hw_handler; | |
194 | ||
195 | list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { | |
196 | list_del(&pg->list); | |
197 | free_priority_group(pg, m->ti); | |
198 | } | |
199 | ||
200 | if (hwh->type) { | |
201 | hwh->type->destroy(hwh); | |
202 | dm_put_hw_handler(hwh->type); | |
203 | } | |
204 | ||
205 | mempool_destroy(m->mpio_pool); | |
206 | kfree(m); | |
207 | } | |
208 | ||
209 | ||
210 | /*----------------------------------------------- | |
211 | * Path selection | |
212 | *-----------------------------------------------*/ | |
213 | ||
214 | static void __switch_pg(struct multipath *m, struct pgpath *pgpath) | |
215 | { | |
216 | struct hw_handler *hwh = &m->hw_handler; | |
217 | ||
218 | m->current_pg = pgpath->pg; | |
219 | ||
220 | /* Must we initialise the PG first, and queue I/O till it's ready? */ | |
221 | if (hwh->type && hwh->type->pg_init) { | |
222 | m->pg_init_required = 1; | |
223 | m->queue_io = 1; | |
224 | } else { | |
225 | m->pg_init_required = 0; | |
226 | m->queue_io = 0; | |
227 | } | |
228 | } | |
229 | ||
230 | static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg) | |
231 | { | |
c922d5f7 | 232 | struct dm_path *path; |
1da177e4 LT |
233 | |
234 | path = pg->ps.type->select_path(&pg->ps, &m->repeat_count); | |
235 | if (!path) | |
236 | return -ENXIO; | |
237 | ||
238 | m->current_pgpath = path_to_pgpath(path); | |
239 | ||
240 | if (m->current_pg != pg) | |
241 | __switch_pg(m, m->current_pgpath); | |
242 | ||
243 | return 0; | |
244 | } | |
245 | ||
246 | static void __choose_pgpath(struct multipath *m) | |
247 | { | |
248 | struct priority_group *pg; | |
249 | unsigned bypassed = 1; | |
250 | ||
251 | if (!m->nr_valid_paths) | |
252 | goto failed; | |
253 | ||
254 | /* Were we instructed to switch PG? */ | |
255 | if (m->next_pg) { | |
256 | pg = m->next_pg; | |
257 | m->next_pg = NULL; | |
258 | if (!__choose_path_in_pg(m, pg)) | |
259 | return; | |
260 | } | |
261 | ||
262 | /* Don't change PG until it has no remaining paths */ | |
263 | if (m->current_pg && !__choose_path_in_pg(m, m->current_pg)) | |
264 | return; | |
265 | ||
266 | /* | |
267 | * Loop through priority groups until we find a valid path. | |
268 | * First time we skip PGs marked 'bypassed'. | |
269 | * Second time we only try the ones we skipped. | |
270 | */ | |
271 | do { | |
272 | list_for_each_entry(pg, &m->priority_groups, list) { | |
273 | if (pg->bypassed == bypassed) | |
274 | continue; | |
275 | if (!__choose_path_in_pg(m, pg)) | |
276 | return; | |
277 | } | |
278 | } while (bypassed--); | |
279 | ||
280 | failed: | |
281 | m->current_pgpath = NULL; | |
282 | m->current_pg = NULL; | |
283 | } | |
284 | ||
45e15720 KU |
285 | /* |
286 | * Check whether bios must be queued in the device-mapper core rather | |
287 | * than here in the target. | |
288 | * | |
289 | * m->lock must be held on entry. | |
290 | * | |
291 | * If m->queue_if_no_path and m->saved_queue_if_no_path hold the | |
292 | * same value then we are not between multipath_presuspend() | |
293 | * and multipath_resume() calls and we have no need to check | |
294 | * for the DMF_NOFLUSH_SUSPENDING flag. | |
295 | */ | |
296 | static int __must_push_back(struct multipath *m) | |
297 | { | |
298 | return (m->queue_if_no_path != m->saved_queue_if_no_path && | |
299 | dm_noflush_suspending(m->ti)); | |
300 | } | |
301 | ||
1da177e4 LT |
302 | static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio, |
303 | unsigned was_queued) | |
304 | { | |
d2a7ad29 | 305 | int r = DM_MAPIO_REMAPPED; |
1da177e4 LT |
306 | unsigned long flags; |
307 | struct pgpath *pgpath; | |
308 | ||
309 | spin_lock_irqsave(&m->lock, flags); | |
310 | ||
311 | /* Do we need to select a new pgpath? */ | |
312 | if (!m->current_pgpath || | |
313 | (!m->queue_io && (m->repeat_count && --m->repeat_count == 0))) | |
314 | __choose_pgpath(m); | |
315 | ||
316 | pgpath = m->current_pgpath; | |
317 | ||
318 | if (was_queued) | |
319 | m->queue_size--; | |
320 | ||
321 | if ((pgpath && m->queue_io) || | |
436d4108 | 322 | (!pgpath && m->queue_if_no_path)) { |
1da177e4 LT |
323 | /* Queue for the daemon to resubmit */ |
324 | bio_list_add(&m->queued_ios, bio); | |
325 | m->queue_size++; | |
c3cd4f6b AK |
326 | if ((m->pg_init_required && !m->pg_init_in_progress) || |
327 | !m->queue_io) | |
c557308e | 328 | queue_work(kmultipathd, &m->process_queued_ios); |
1da177e4 | 329 | pgpath = NULL; |
d2a7ad29 | 330 | r = DM_MAPIO_SUBMITTED; |
45e15720 | 331 | } else if (pgpath) |
1da177e4 | 332 | bio->bi_bdev = pgpath->path.dev->bdev; |
45e15720 KU |
333 | else if (__must_push_back(m)) |
334 | r = DM_MAPIO_REQUEUE; | |
335 | else | |
336 | r = -EIO; /* Failed */ | |
1da177e4 LT |
337 | |
338 | mpio->pgpath = pgpath; | |
339 | ||
340 | spin_unlock_irqrestore(&m->lock, flags); | |
341 | ||
342 | return r; | |
343 | } | |
344 | ||
345 | /* | |
346 | * If we run out of usable paths, should we queue I/O or error it? | |
347 | */ | |
485ef69e AK |
348 | static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, |
349 | unsigned save_old_value) | |
1da177e4 LT |
350 | { |
351 | unsigned long flags; | |
352 | ||
353 | spin_lock_irqsave(&m->lock, flags); | |
354 | ||
485ef69e AK |
355 | if (save_old_value) |
356 | m->saved_queue_if_no_path = m->queue_if_no_path; | |
357 | else | |
358 | m->saved_queue_if_no_path = queue_if_no_path; | |
1da177e4 | 359 | m->queue_if_no_path = queue_if_no_path; |
c3cd4f6b | 360 | if (!m->queue_if_no_path && m->queue_size) |
c557308e | 361 | queue_work(kmultipathd, &m->process_queued_ios); |
1da177e4 LT |
362 | |
363 | spin_unlock_irqrestore(&m->lock, flags); | |
364 | ||
365 | return 0; | |
366 | } | |
367 | ||
368 | /*----------------------------------------------------------------- | |
369 | * The multipath daemon is responsible for resubmitting queued ios. | |
370 | *---------------------------------------------------------------*/ | |
371 | ||
372 | static void dispatch_queued_ios(struct multipath *m) | |
373 | { | |
374 | int r; | |
375 | unsigned long flags; | |
376 | struct bio *bio = NULL, *next; | |
377 | struct mpath_io *mpio; | |
378 | union map_info *info; | |
379 | ||
380 | spin_lock_irqsave(&m->lock, flags); | |
381 | bio = bio_list_get(&m->queued_ios); | |
382 | spin_unlock_irqrestore(&m->lock, flags); | |
383 | ||
384 | while (bio) { | |
385 | next = bio->bi_next; | |
386 | bio->bi_next = NULL; | |
387 | ||
388 | info = dm_get_mapinfo(bio); | |
389 | mpio = info->ptr; | |
390 | ||
391 | r = map_io(m, bio, mpio, 1); | |
392 | if (r < 0) | |
393 | bio_endio(bio, bio->bi_size, r); | |
d2a7ad29 | 394 | else if (r == DM_MAPIO_REMAPPED) |
1da177e4 | 395 | generic_make_request(bio); |
45e15720 KU |
396 | else if (r == DM_MAPIO_REQUEUE) |
397 | bio_endio(bio, bio->bi_size, -EIO); | |
1da177e4 LT |
398 | |
399 | bio = next; | |
400 | } | |
401 | } | |
402 | ||
c4028958 | 403 | static void process_queued_ios(struct work_struct *work) |
1da177e4 | 404 | { |
c4028958 DH |
405 | struct multipath *m = |
406 | container_of(work, struct multipath, process_queued_ios); | |
1da177e4 | 407 | struct hw_handler *hwh = &m->hw_handler; |
c3cd4f6b AK |
408 | struct pgpath *pgpath = NULL; |
409 | unsigned init_required = 0, must_queue = 1; | |
1da177e4 LT |
410 | unsigned long flags; |
411 | ||
412 | spin_lock_irqsave(&m->lock, flags); | |
413 | ||
c3cd4f6b AK |
414 | if (!m->queue_size) |
415 | goto out; | |
416 | ||
1da177e4 LT |
417 | if (!m->current_pgpath) |
418 | __choose_pgpath(m); | |
419 | ||
420 | pgpath = m->current_pgpath; | |
421 | ||
c3cd4f6b AK |
422 | if ((pgpath && !m->queue_io) || |
423 | (!pgpath && !m->queue_if_no_path)) | |
424 | must_queue = 0; | |
1da177e4 | 425 | |
c3cd4f6b | 426 | if (m->pg_init_required && !m->pg_init_in_progress) { |
1da177e4 | 427 | m->pg_init_required = 0; |
c3cd4f6b AK |
428 | m->pg_init_in_progress = 1; |
429 | init_required = 1; | |
430 | } | |
1da177e4 | 431 | |
c3cd4f6b | 432 | out: |
1da177e4 LT |
433 | spin_unlock_irqrestore(&m->lock, flags); |
434 | ||
435 | if (init_required) | |
436 | hwh->type->pg_init(hwh, pgpath->pg->bypassed, &pgpath->path); | |
437 | ||
438 | if (!must_queue) | |
439 | dispatch_queued_ios(m); | |
440 | } | |
441 | ||
442 | /* | |
443 | * An event is triggered whenever a path is taken out of use. | |
444 | * Includes path failure and PG bypass. | |
445 | */ | |
c4028958 | 446 | static void trigger_event(struct work_struct *work) |
1da177e4 | 447 | { |
c4028958 DH |
448 | struct multipath *m = |
449 | container_of(work, struct multipath, trigger_event); | |
1da177e4 LT |
450 | |
451 | dm_table_event(m->ti->table); | |
452 | } | |
453 | ||
454 | /*----------------------------------------------------------------- | |
455 | * Constructor/argument parsing: | |
456 | * <#multipath feature args> [<arg>]* | |
457 | * <#hw_handler args> [hw_handler [<arg>]*] | |
458 | * <#priority groups> | |
459 | * <initial priority group> | |
460 | * [<selector> <#selector args> [<arg>]* | |
461 | * <#paths> <#per-path selector args> | |
462 | * [<path> [<arg>]* ]+ ]+ | |
463 | *---------------------------------------------------------------*/ | |
464 | struct param { | |
465 | unsigned min; | |
466 | unsigned max; | |
467 | char *error; | |
468 | }; | |
469 | ||
1da177e4 LT |
470 | static int read_param(struct param *param, char *str, unsigned *v, char **error) |
471 | { | |
472 | if (!str || | |
473 | (sscanf(str, "%u", v) != 1) || | |
474 | (*v < param->min) || | |
475 | (*v > param->max)) { | |
476 | *error = param->error; | |
477 | return -EINVAL; | |
478 | } | |
479 | ||
480 | return 0; | |
481 | } | |
482 | ||
483 | struct arg_set { | |
484 | unsigned argc; | |
485 | char **argv; | |
486 | }; | |
487 | ||
488 | static char *shift(struct arg_set *as) | |
489 | { | |
490 | char *r; | |
491 | ||
492 | if (as->argc) { | |
493 | as->argc--; | |
494 | r = *as->argv; | |
495 | as->argv++; | |
496 | return r; | |
497 | } | |
498 | ||
499 | return NULL; | |
500 | } | |
501 | ||
502 | static void consume(struct arg_set *as, unsigned n) | |
503 | { | |
504 | BUG_ON (as->argc < n); | |
505 | as->argc -= n; | |
506 | as->argv += n; | |
507 | } | |
508 | ||
509 | static int parse_path_selector(struct arg_set *as, struct priority_group *pg, | |
510 | struct dm_target *ti) | |
511 | { | |
512 | int r; | |
513 | struct path_selector_type *pst; | |
514 | unsigned ps_argc; | |
515 | ||
516 | static struct param _params[] = { | |
72d94861 | 517 | {0, 1024, "invalid number of path selector args"}, |
1da177e4 LT |
518 | }; |
519 | ||
520 | pst = dm_get_path_selector(shift(as)); | |
521 | if (!pst) { | |
72d94861 | 522 | ti->error = "unknown path selector type"; |
1da177e4 LT |
523 | return -EINVAL; |
524 | } | |
525 | ||
526 | r = read_param(_params, shift(as), &ps_argc, &ti->error); | |
527 | if (r) | |
528 | return -EINVAL; | |
529 | ||
530 | r = pst->create(&pg->ps, ps_argc, as->argv); | |
531 | if (r) { | |
532 | dm_put_path_selector(pst); | |
72d94861 | 533 | ti->error = "path selector constructor failed"; |
1da177e4 LT |
534 | return r; |
535 | } | |
536 | ||
537 | pg->ps.type = pst; | |
538 | consume(as, ps_argc); | |
539 | ||
540 | return 0; | |
541 | } | |
542 | ||
543 | static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, | |
544 | struct dm_target *ti) | |
545 | { | |
546 | int r; | |
547 | struct pgpath *p; | |
548 | ||
549 | /* we need at least a path arg */ | |
550 | if (as->argc < 1) { | |
72d94861 | 551 | ti->error = "no device given"; |
1da177e4 LT |
552 | return NULL; |
553 | } | |
554 | ||
555 | p = alloc_pgpath(); | |
556 | if (!p) | |
557 | return NULL; | |
558 | ||
559 | r = dm_get_device(ti, shift(as), ti->begin, ti->len, | |
560 | dm_table_get_mode(ti->table), &p->path.dev); | |
561 | if (r) { | |
72d94861 | 562 | ti->error = "error getting device"; |
1da177e4 LT |
563 | goto bad; |
564 | } | |
565 | ||
566 | r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); | |
567 | if (r) { | |
568 | dm_put_device(ti, p->path.dev); | |
569 | goto bad; | |
570 | } | |
571 | ||
572 | return p; | |
573 | ||
574 | bad: | |
575 | free_pgpath(p); | |
576 | return NULL; | |
577 | } | |
578 | ||
579 | static struct priority_group *parse_priority_group(struct arg_set *as, | |
28f16c20 | 580 | struct multipath *m) |
1da177e4 LT |
581 | { |
582 | static struct param _params[] = { | |
72d94861 AK |
583 | {1, 1024, "invalid number of paths"}, |
584 | {0, 1024, "invalid number of selector args"} | |
1da177e4 LT |
585 | }; |
586 | ||
587 | int r; | |
588 | unsigned i, nr_selector_args, nr_params; | |
589 | struct priority_group *pg; | |
28f16c20 | 590 | struct dm_target *ti = m->ti; |
1da177e4 LT |
591 | |
592 | if (as->argc < 2) { | |
593 | as->argc = 0; | |
72d94861 | 594 | ti->error = "not enough priority group aruments"; |
1da177e4 LT |
595 | return NULL; |
596 | } | |
597 | ||
598 | pg = alloc_priority_group(); | |
599 | if (!pg) { | |
72d94861 | 600 | ti->error = "couldn't allocate priority group"; |
1da177e4 LT |
601 | return NULL; |
602 | } | |
603 | pg->m = m; | |
604 | ||
605 | r = parse_path_selector(as, pg, ti); | |
606 | if (r) | |
607 | goto bad; | |
608 | ||
609 | /* | |
610 | * read the paths | |
611 | */ | |
612 | r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error); | |
613 | if (r) | |
614 | goto bad; | |
615 | ||
616 | r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error); | |
617 | if (r) | |
618 | goto bad; | |
619 | ||
620 | nr_params = 1 + nr_selector_args; | |
621 | for (i = 0; i < pg->nr_pgpaths; i++) { | |
622 | struct pgpath *pgpath; | |
623 | struct arg_set path_args; | |
624 | ||
625 | if (as->argc < nr_params) | |
626 | goto bad; | |
627 | ||
628 | path_args.argc = nr_params; | |
629 | path_args.argv = as->argv; | |
630 | ||
631 | pgpath = parse_path(&path_args, &pg->ps, ti); | |
632 | if (!pgpath) | |
633 | goto bad; | |
634 | ||
635 | pgpath->pg = pg; | |
636 | list_add_tail(&pgpath->list, &pg->pgpaths); | |
637 | consume(as, nr_params); | |
638 | } | |
639 | ||
640 | return pg; | |
641 | ||
642 | bad: | |
643 | free_priority_group(pg, ti); | |
644 | return NULL; | |
645 | } | |
646 | ||
28f16c20 | 647 | static int parse_hw_handler(struct arg_set *as, struct multipath *m) |
1da177e4 LT |
648 | { |
649 | int r; | |
650 | struct hw_handler_type *hwht; | |
651 | unsigned hw_argc; | |
28f16c20 | 652 | struct dm_target *ti = m->ti; |
1da177e4 LT |
653 | |
654 | static struct param _params[] = { | |
72d94861 | 655 | {0, 1024, "invalid number of hardware handler args"}, |
1da177e4 LT |
656 | }; |
657 | ||
658 | r = read_param(_params, shift(as), &hw_argc, &ti->error); | |
659 | if (r) | |
660 | return -EINVAL; | |
661 | ||
662 | if (!hw_argc) | |
663 | return 0; | |
664 | ||
665 | hwht = dm_get_hw_handler(shift(as)); | |
666 | if (!hwht) { | |
72d94861 | 667 | ti->error = "unknown hardware handler type"; |
1da177e4 LT |
668 | return -EINVAL; |
669 | } | |
670 | ||
79eb885c EG |
671 | m->hw_handler.md = dm_table_get_md(ti->table); |
672 | dm_put(m->hw_handler.md); | |
673 | ||
1da177e4 LT |
674 | r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv); |
675 | if (r) { | |
676 | dm_put_hw_handler(hwht); | |
72d94861 | 677 | ti->error = "hardware handler constructor failed"; |
1da177e4 LT |
678 | return r; |
679 | } | |
680 | ||
681 | m->hw_handler.type = hwht; | |
682 | consume(as, hw_argc - 1); | |
683 | ||
684 | return 0; | |
685 | } | |
686 | ||
28f16c20 | 687 | static int parse_features(struct arg_set *as, struct multipath *m) |
1da177e4 LT |
688 | { |
689 | int r; | |
690 | unsigned argc; | |
28f16c20 | 691 | struct dm_target *ti = m->ti; |
1da177e4 LT |
692 | |
693 | static struct param _params[] = { | |
72d94861 | 694 | {0, 1, "invalid number of feature args"}, |
1da177e4 LT |
695 | }; |
696 | ||
697 | r = read_param(_params, shift(as), &argc, &ti->error); | |
698 | if (r) | |
699 | return -EINVAL; | |
700 | ||
701 | if (!argc) | |
702 | return 0; | |
703 | ||
704 | if (!strnicmp(shift(as), MESG_STR("queue_if_no_path"))) | |
485ef69e | 705 | return queue_if_no_path(m, 1, 0); |
1da177e4 LT |
706 | else { |
707 | ti->error = "Unrecognised multipath feature request"; | |
708 | return -EINVAL; | |
709 | } | |
710 | } | |
711 | ||
712 | static int multipath_ctr(struct dm_target *ti, unsigned int argc, | |
713 | char **argv) | |
714 | { | |
715 | /* target parameters */ | |
716 | static struct param _params[] = { | |
72d94861 AK |
717 | {1, 1024, "invalid number of priority groups"}, |
718 | {1, 1024, "invalid initial priority group number"}, | |
1da177e4 LT |
719 | }; |
720 | ||
721 | int r; | |
722 | struct multipath *m; | |
723 | struct arg_set as; | |
724 | unsigned pg_count = 0; | |
725 | unsigned next_pg_num; | |
726 | ||
727 | as.argc = argc; | |
728 | as.argv = argv; | |
729 | ||
28f16c20 | 730 | m = alloc_multipath(ti); |
1da177e4 | 731 | if (!m) { |
72d94861 | 732 | ti->error = "can't allocate multipath"; |
1da177e4 LT |
733 | return -EINVAL; |
734 | } | |
735 | ||
28f16c20 | 736 | r = parse_features(&as, m); |
1da177e4 LT |
737 | if (r) |
738 | goto bad; | |
739 | ||
28f16c20 | 740 | r = parse_hw_handler(&as, m); |
1da177e4 LT |
741 | if (r) |
742 | goto bad; | |
743 | ||
744 | r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error); | |
745 | if (r) | |
746 | goto bad; | |
747 | ||
748 | r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error); | |
749 | if (r) | |
750 | goto bad; | |
751 | ||
752 | /* parse the priority groups */ | |
753 | while (as.argc) { | |
754 | struct priority_group *pg; | |
755 | ||
28f16c20 | 756 | pg = parse_priority_group(&as, m); |
1da177e4 LT |
757 | if (!pg) { |
758 | r = -EINVAL; | |
759 | goto bad; | |
760 | } | |
761 | ||
762 | m->nr_valid_paths += pg->nr_pgpaths; | |
763 | list_add_tail(&pg->list, &m->priority_groups); | |
764 | pg_count++; | |
765 | pg->pg_num = pg_count; | |
766 | if (!--next_pg_num) | |
767 | m->next_pg = pg; | |
768 | } | |
769 | ||
770 | if (pg_count != m->nr_priority_groups) { | |
72d94861 | 771 | ti->error = "priority group count mismatch"; |
1da177e4 LT |
772 | r = -EINVAL; |
773 | goto bad; | |
774 | } | |
775 | ||
1da177e4 LT |
776 | return 0; |
777 | ||
778 | bad: | |
779 | free_multipath(m); | |
780 | return r; | |
781 | } | |
782 | ||
783 | static void multipath_dtr(struct dm_target *ti) | |
784 | { | |
785 | struct multipath *m = (struct multipath *) ti->private; | |
a044d016 AK |
786 | |
787 | flush_workqueue(kmultipathd); | |
1da177e4 LT |
788 | free_multipath(m); |
789 | } | |
790 | ||
791 | /* | |
792 | * Map bios, recording original fields for later in case we have to resubmit | |
793 | */ | |
794 | static int multipath_map(struct dm_target *ti, struct bio *bio, | |
795 | union map_info *map_context) | |
796 | { | |
797 | int r; | |
798 | struct mpath_io *mpio; | |
799 | struct multipath *m = (struct multipath *) ti->private; | |
800 | ||
f6a80ea8 AK |
801 | if (bio_barrier(bio)) |
802 | return -EOPNOTSUPP; | |
803 | ||
1da177e4 LT |
804 | mpio = mempool_alloc(m->mpio_pool, GFP_NOIO); |
805 | dm_bio_record(&mpio->details, bio); | |
806 | ||
807 | map_context->ptr = mpio; | |
808 | bio->bi_rw |= (1 << BIO_RW_FAILFAST); | |
809 | r = map_io(m, bio, mpio, 0); | |
45e15720 | 810 | if (r < 0 || r == DM_MAPIO_REQUEUE) |
1da177e4 LT |
811 | mempool_free(mpio, m->mpio_pool); |
812 | ||
813 | return r; | |
814 | } | |
815 | ||
816 | /* | |
817 | * Take a path out of use. | |
818 | */ | |
819 | static int fail_path(struct pgpath *pgpath) | |
820 | { | |
821 | unsigned long flags; | |
822 | struct multipath *m = pgpath->pg->m; | |
823 | ||
824 | spin_lock_irqsave(&m->lock, flags); | |
825 | ||
826 | if (!pgpath->path.is_active) | |
827 | goto out; | |
828 | ||
72d94861 | 829 | DMWARN("Failing path %s.", pgpath->path.dev->name); |
1da177e4 LT |
830 | |
831 | pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); | |
832 | pgpath->path.is_active = 0; | |
833 | pgpath->fail_count++; | |
834 | ||
835 | m->nr_valid_paths--; | |
836 | ||
837 | if (pgpath == m->current_pgpath) | |
838 | m->current_pgpath = NULL; | |
839 | ||
c557308e | 840 | queue_work(kmultipathd, &m->trigger_event); |
1da177e4 LT |
841 | |
842 | out: | |
843 | spin_unlock_irqrestore(&m->lock, flags); | |
844 | ||
845 | return 0; | |
846 | } | |
847 | ||
848 | /* | |
849 | * Reinstate a previously-failed path | |
850 | */ | |
851 | static int reinstate_path(struct pgpath *pgpath) | |
852 | { | |
853 | int r = 0; | |
854 | unsigned long flags; | |
855 | struct multipath *m = pgpath->pg->m; | |
856 | ||
857 | spin_lock_irqsave(&m->lock, flags); | |
858 | ||
859 | if (pgpath->path.is_active) | |
860 | goto out; | |
861 | ||
862 | if (!pgpath->pg->ps.type) { | |
863 | DMWARN("Reinstate path not supported by path selector %s", | |
864 | pgpath->pg->ps.type->name); | |
865 | r = -EINVAL; | |
866 | goto out; | |
867 | } | |
868 | ||
869 | r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); | |
870 | if (r) | |
871 | goto out; | |
872 | ||
873 | pgpath->path.is_active = 1; | |
874 | ||
875 | m->current_pgpath = NULL; | |
c3cd4f6b | 876 | if (!m->nr_valid_paths++ && m->queue_size) |
c557308e | 877 | queue_work(kmultipathd, &m->process_queued_ios); |
1da177e4 | 878 | |
c557308e | 879 | queue_work(kmultipathd, &m->trigger_event); |
1da177e4 LT |
880 | |
881 | out: | |
882 | spin_unlock_irqrestore(&m->lock, flags); | |
883 | ||
884 | return r; | |
885 | } | |
886 | ||
887 | /* | |
888 | * Fail or reinstate all paths that match the provided struct dm_dev. | |
889 | */ | |
890 | static int action_dev(struct multipath *m, struct dm_dev *dev, | |
891 | action_fn action) | |
892 | { | |
893 | int r = 0; | |
894 | struct pgpath *pgpath; | |
895 | struct priority_group *pg; | |
896 | ||
897 | list_for_each_entry(pg, &m->priority_groups, list) { | |
898 | list_for_each_entry(pgpath, &pg->pgpaths, list) { | |
899 | if (pgpath->path.dev == dev) | |
900 | r = action(pgpath); | |
901 | } | |
902 | } | |
903 | ||
904 | return r; | |
905 | } | |
906 | ||
907 | /* | |
908 | * Temporarily try to avoid having to use the specified PG | |
909 | */ | |
910 | static void bypass_pg(struct multipath *m, struct priority_group *pg, | |
911 | int bypassed) | |
912 | { | |
913 | unsigned long flags; | |
914 | ||
915 | spin_lock_irqsave(&m->lock, flags); | |
916 | ||
917 | pg->bypassed = bypassed; | |
918 | m->current_pgpath = NULL; | |
919 | m->current_pg = NULL; | |
920 | ||
921 | spin_unlock_irqrestore(&m->lock, flags); | |
922 | ||
c557308e | 923 | queue_work(kmultipathd, &m->trigger_event); |
1da177e4 LT |
924 | } |
925 | ||
926 | /* | |
927 | * Switch to using the specified PG from the next I/O that gets mapped | |
928 | */ | |
929 | static int switch_pg_num(struct multipath *m, const char *pgstr) | |
930 | { | |
931 | struct priority_group *pg; | |
932 | unsigned pgnum; | |
933 | unsigned long flags; | |
934 | ||
935 | if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum || | |
936 | (pgnum > m->nr_priority_groups)) { | |
937 | DMWARN("invalid PG number supplied to switch_pg_num"); | |
938 | return -EINVAL; | |
939 | } | |
940 | ||
941 | spin_lock_irqsave(&m->lock, flags); | |
942 | list_for_each_entry(pg, &m->priority_groups, list) { | |
943 | pg->bypassed = 0; | |
944 | if (--pgnum) | |
945 | continue; | |
946 | ||
947 | m->current_pgpath = NULL; | |
948 | m->current_pg = NULL; | |
949 | m->next_pg = pg; | |
950 | } | |
951 | spin_unlock_irqrestore(&m->lock, flags); | |
952 | ||
c557308e | 953 | queue_work(kmultipathd, &m->trigger_event); |
1da177e4 LT |
954 | return 0; |
955 | } | |
956 | ||
957 | /* | |
958 | * Set/clear bypassed status of a PG. | |
959 | * PGs are numbered upwards from 1 in the order they were declared. | |
960 | */ | |
961 | static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed) | |
962 | { | |
963 | struct priority_group *pg; | |
964 | unsigned pgnum; | |
965 | ||
966 | if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum || | |
967 | (pgnum > m->nr_priority_groups)) { | |
968 | DMWARN("invalid PG number supplied to bypass_pg"); | |
969 | return -EINVAL; | |
970 | } | |
971 | ||
972 | list_for_each_entry(pg, &m->priority_groups, list) { | |
973 | if (!--pgnum) | |
974 | break; | |
975 | } | |
976 | ||
977 | bypass_pg(m, pg, bypassed); | |
978 | return 0; | |
979 | } | |
980 | ||
981 | /* | |
982 | * pg_init must call this when it has completed its initialisation | |
983 | */ | |
c922d5f7 | 984 | void dm_pg_init_complete(struct dm_path *path, unsigned err_flags) |
1da177e4 LT |
985 | { |
986 | struct pgpath *pgpath = path_to_pgpath(path); | |
987 | struct priority_group *pg = pgpath->pg; | |
988 | struct multipath *m = pg->m; | |
989 | unsigned long flags; | |
990 | ||
991 | /* We insist on failing the path if the PG is already bypassed. */ | |
992 | if (err_flags && pg->bypassed) | |
993 | err_flags |= MP_FAIL_PATH; | |
994 | ||
995 | if (err_flags & MP_FAIL_PATH) | |
996 | fail_path(pgpath); | |
997 | ||
998 | if (err_flags & MP_BYPASS_PG) | |
999 | bypass_pg(m, pg, 1); | |
1000 | ||
1001 | spin_lock_irqsave(&m->lock, flags); | |
c3cd4f6b | 1002 | if (err_flags) { |
1da177e4 LT |
1003 | m->current_pgpath = NULL; |
1004 | m->current_pg = NULL; | |
c3cd4f6b AK |
1005 | } else if (!m->pg_init_required) |
1006 | m->queue_io = 0; | |
1007 | ||
1008 | m->pg_init_in_progress = 0; | |
c557308e | 1009 | queue_work(kmultipathd, &m->process_queued_ios); |
1da177e4 LT |
1010 | spin_unlock_irqrestore(&m->lock, flags); |
1011 | } | |
1012 | ||
1013 | /* | |
1014 | * end_io handling | |
1015 | */ | |
1016 | static int do_end_io(struct multipath *m, struct bio *bio, | |
1017 | int error, struct mpath_io *mpio) | |
1018 | { | |
1019 | struct hw_handler *hwh = &m->hw_handler; | |
1020 | unsigned err_flags = MP_FAIL_PATH; /* Default behavior */ | |
640eb3b0 | 1021 | unsigned long flags; |
1da177e4 LT |
1022 | |
1023 | if (!error) | |
1024 | return 0; /* I/O complete */ | |
1025 | ||
4f58802f LMB |
1026 | if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) |
1027 | return error; | |
1028 | ||
f6a80ea8 AK |
1029 | if (error == -EOPNOTSUPP) |
1030 | return error; | |
1031 | ||
640eb3b0 | 1032 | spin_lock_irqsave(&m->lock, flags); |
1da177e4 | 1033 | if (!m->nr_valid_paths) { |
45e15720 KU |
1034 | if (__must_push_back(m)) { |
1035 | spin_unlock_irqrestore(&m->lock, flags); | |
1036 | return DM_ENDIO_REQUEUE; | |
1037 | } else if (!m->queue_if_no_path) { | |
640eb3b0 | 1038 | spin_unlock_irqrestore(&m->lock, flags); |
1da177e4 LT |
1039 | return -EIO; |
1040 | } else { | |
640eb3b0 | 1041 | spin_unlock_irqrestore(&m->lock, flags); |
1da177e4 LT |
1042 | goto requeue; |
1043 | } | |
1044 | } | |
640eb3b0 | 1045 | spin_unlock_irqrestore(&m->lock, flags); |
1da177e4 LT |
1046 | |
1047 | if (hwh->type && hwh->type->error) | |
1048 | err_flags = hwh->type->error(hwh, bio); | |
1049 | ||
1050 | if (mpio->pgpath) { | |
1051 | if (err_flags & MP_FAIL_PATH) | |
1052 | fail_path(mpio->pgpath); | |
1053 | ||
1054 | if (err_flags & MP_BYPASS_PG) | |
1055 | bypass_pg(m, mpio->pgpath->pg, 1); | |
1056 | } | |
1057 | ||
1058 | if (err_flags & MP_ERROR_IO) | |
1059 | return -EIO; | |
1060 | ||
1061 | requeue: | |
1062 | dm_bio_restore(&mpio->details, bio); | |
1063 | ||
1064 | /* queue for the daemon to resubmit or fail */ | |
640eb3b0 | 1065 | spin_lock_irqsave(&m->lock, flags); |
1da177e4 LT |
1066 | bio_list_add(&m->queued_ios, bio); |
1067 | m->queue_size++; | |
1068 | if (!m->queue_io) | |
c557308e | 1069 | queue_work(kmultipathd, &m->process_queued_ios); |
640eb3b0 | 1070 | spin_unlock_irqrestore(&m->lock, flags); |
1da177e4 | 1071 | |
d2a7ad29 | 1072 | return DM_ENDIO_INCOMPLETE; /* io not complete */ |
1da177e4 LT |
1073 | } |
1074 | ||
1075 | static int multipath_end_io(struct dm_target *ti, struct bio *bio, | |
1076 | int error, union map_info *map_context) | |
1077 | { | |
1078 | struct multipath *m = (struct multipath *) ti->private; | |
1079 | struct mpath_io *mpio = (struct mpath_io *) map_context->ptr; | |
1080 | struct pgpath *pgpath = mpio->pgpath; | |
1081 | struct path_selector *ps; | |
1082 | int r; | |
1083 | ||
1084 | r = do_end_io(m, bio, error, mpio); | |
1085 | if (pgpath) { | |
1086 | ps = &pgpath->pg->ps; | |
1087 | if (ps->type->end_io) | |
1088 | ps->type->end_io(ps, &pgpath->path); | |
1089 | } | |
d2a7ad29 | 1090 | if (r != DM_ENDIO_INCOMPLETE) |
1da177e4 LT |
1091 | mempool_free(mpio, m->mpio_pool); |
1092 | ||
1093 | return r; | |
1094 | } | |
1095 | ||
1096 | /* | |
1097 | * Suspend can't complete until all the I/O is processed so if | |
436d4108 AK |
1098 | * the last path fails we must error any remaining I/O. |
1099 | * Note that if the freeze_bdev fails while suspending, the | |
1100 | * queue_if_no_path state is lost - userspace should reset it. | |
1da177e4 LT |
1101 | */ |
1102 | static void multipath_presuspend(struct dm_target *ti) | |
1103 | { | |
1104 | struct multipath *m = (struct multipath *) ti->private; | |
1da177e4 | 1105 | |
485ef69e | 1106 | queue_if_no_path(m, 0, 1); |
1da177e4 LT |
1107 | } |
1108 | ||
436d4108 AK |
1109 | /* |
1110 | * Restore the queue_if_no_path setting. | |
1111 | */ | |
1da177e4 LT |
1112 | static void multipath_resume(struct dm_target *ti) |
1113 | { | |
1114 | struct multipath *m = (struct multipath *) ti->private; | |
1115 | unsigned long flags; | |
1116 | ||
1117 | spin_lock_irqsave(&m->lock, flags); | |
436d4108 | 1118 | m->queue_if_no_path = m->saved_queue_if_no_path; |
1da177e4 LT |
1119 | spin_unlock_irqrestore(&m->lock, flags); |
1120 | } | |
1121 | ||
1122 | /* | |
1123 | * Info output has the following format: | |
1124 | * num_multipath_feature_args [multipath_feature_args]* | |
1125 | * num_handler_status_args [handler_status_args]* | |
1126 | * num_groups init_group_number | |
1127 | * [A|D|E num_ps_status_args [ps_status_args]* | |
1128 | * num_paths num_selector_args | |
1129 | * [path_dev A|F fail_count [selector_args]* ]+ ]+ | |
1130 | * | |
1131 | * Table output has the following format (identical to the constructor string): | |
1132 | * num_feature_args [features_args]* | |
1133 | * num_handler_args hw_handler [hw_handler_args]* | |
1134 | * num_groups init_group_number | |
1135 | * [priority selector-name num_ps_args [ps_args]* | |
1136 | * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+ | |
1137 | */ | |
1138 | static int multipath_status(struct dm_target *ti, status_type_t type, | |
1139 | char *result, unsigned int maxlen) | |
1140 | { | |
1141 | int sz = 0; | |
1142 | unsigned long flags; | |
1143 | struct multipath *m = (struct multipath *) ti->private; | |
1144 | struct hw_handler *hwh = &m->hw_handler; | |
1145 | struct priority_group *pg; | |
1146 | struct pgpath *p; | |
1147 | unsigned pg_num; | |
1148 | char state; | |
1149 | ||
1150 | spin_lock_irqsave(&m->lock, flags); | |
1151 | ||
1152 | /* Features */ | |
1153 | if (type == STATUSTYPE_INFO) | |
1154 | DMEMIT("1 %u ", m->queue_size); | |
1155 | else if (m->queue_if_no_path) | |
1156 | DMEMIT("1 queue_if_no_path "); | |
1157 | else | |
1158 | DMEMIT("0 "); | |
1159 | ||
1160 | if (hwh->type && hwh->type->status) | |
1161 | sz += hwh->type->status(hwh, type, result + sz, maxlen - sz); | |
1162 | else if (!hwh->type || type == STATUSTYPE_INFO) | |
1163 | DMEMIT("0 "); | |
1164 | else | |
1165 | DMEMIT("1 %s ", hwh->type->name); | |
1166 | ||
1167 | DMEMIT("%u ", m->nr_priority_groups); | |
1168 | ||
1169 | if (m->next_pg) | |
1170 | pg_num = m->next_pg->pg_num; | |
1171 | else if (m->current_pg) | |
1172 | pg_num = m->current_pg->pg_num; | |
1173 | else | |
1174 | pg_num = 1; | |
1175 | ||
1176 | DMEMIT("%u ", pg_num); | |
1177 | ||
1178 | switch (type) { | |
1179 | case STATUSTYPE_INFO: | |
1180 | list_for_each_entry(pg, &m->priority_groups, list) { | |
1181 | if (pg->bypassed) | |
1182 | state = 'D'; /* Disabled */ | |
1183 | else if (pg == m->current_pg) | |
1184 | state = 'A'; /* Currently Active */ | |
1185 | else | |
1186 | state = 'E'; /* Enabled */ | |
1187 | ||
1188 | DMEMIT("%c ", state); | |
1189 | ||
1190 | if (pg->ps.type->status) | |
1191 | sz += pg->ps.type->status(&pg->ps, NULL, type, | |
1192 | result + sz, | |
1193 | maxlen - sz); | |
1194 | else | |
1195 | DMEMIT("0 "); | |
1196 | ||
1197 | DMEMIT("%u %u ", pg->nr_pgpaths, | |
1198 | pg->ps.type->info_args); | |
1199 | ||
1200 | list_for_each_entry(p, &pg->pgpaths, list) { | |
1201 | DMEMIT("%s %s %u ", p->path.dev->name, | |
1202 | p->path.is_active ? "A" : "F", | |
1203 | p->fail_count); | |
1204 | if (pg->ps.type->status) | |
1205 | sz += pg->ps.type->status(&pg->ps, | |
1206 | &p->path, type, result + sz, | |
1207 | maxlen - sz); | |
1208 | } | |
1209 | } | |
1210 | break; | |
1211 | ||
1212 | case STATUSTYPE_TABLE: | |
1213 | list_for_each_entry(pg, &m->priority_groups, list) { | |
1214 | DMEMIT("%s ", pg->ps.type->name); | |
1215 | ||
1216 | if (pg->ps.type->status) | |
1217 | sz += pg->ps.type->status(&pg->ps, NULL, type, | |
1218 | result + sz, | |
1219 | maxlen - sz); | |
1220 | else | |
1221 | DMEMIT("0 "); | |
1222 | ||
1223 | DMEMIT("%u %u ", pg->nr_pgpaths, | |
1224 | pg->ps.type->table_args); | |
1225 | ||
1226 | list_for_each_entry(p, &pg->pgpaths, list) { | |
1227 | DMEMIT("%s ", p->path.dev->name); | |
1228 | if (pg->ps.type->status) | |
1229 | sz += pg->ps.type->status(&pg->ps, | |
1230 | &p->path, type, result + sz, | |
1231 | maxlen - sz); | |
1232 | } | |
1233 | } | |
1234 | break; | |
1235 | } | |
1236 | ||
1237 | spin_unlock_irqrestore(&m->lock, flags); | |
1238 | ||
1239 | return 0; | |
1240 | } | |
1241 | ||
1242 | static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) | |
1243 | { | |
1244 | int r; | |
1245 | struct dm_dev *dev; | |
1246 | struct multipath *m = (struct multipath *) ti->private; | |
1247 | action_fn action; | |
1248 | ||
1249 | if (argc == 1) { | |
1250 | if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) | |
485ef69e | 1251 | return queue_if_no_path(m, 1, 0); |
1da177e4 | 1252 | else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) |
485ef69e | 1253 | return queue_if_no_path(m, 0, 0); |
1da177e4 LT |
1254 | } |
1255 | ||
1256 | if (argc != 2) | |
1257 | goto error; | |
1258 | ||
1259 | if (!strnicmp(argv[0], MESG_STR("disable_group"))) | |
1260 | return bypass_pg_num(m, argv[1], 1); | |
1261 | else if (!strnicmp(argv[0], MESG_STR("enable_group"))) | |
1262 | return bypass_pg_num(m, argv[1], 0); | |
1263 | else if (!strnicmp(argv[0], MESG_STR("switch_group"))) | |
1264 | return switch_pg_num(m, argv[1]); | |
1265 | else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) | |
1266 | action = reinstate_path; | |
1267 | else if (!strnicmp(argv[0], MESG_STR("fail_path"))) | |
1268 | action = fail_path; | |
1269 | else | |
1270 | goto error; | |
1271 | ||
1272 | r = dm_get_device(ti, argv[1], ti->begin, ti->len, | |
1273 | dm_table_get_mode(ti->table), &dev); | |
1274 | if (r) { | |
72d94861 | 1275 | DMWARN("message: error getting device %s", |
1da177e4 LT |
1276 | argv[1]); |
1277 | return -EINVAL; | |
1278 | } | |
1279 | ||
1280 | r = action_dev(m, dev, action); | |
1281 | ||
1282 | dm_put_device(ti, dev); | |
1283 | ||
1284 | return r; | |
1285 | ||
1286 | error: | |
1287 | DMWARN("Unrecognised multipath message received."); | |
1288 | return -EINVAL; | |
1289 | } | |
1290 | ||
9af4aa30 MB |
1291 | static int multipath_ioctl(struct dm_target *ti, struct inode *inode, |
1292 | struct file *filp, unsigned int cmd, | |
1293 | unsigned long arg) | |
1294 | { | |
1295 | struct multipath *m = (struct multipath *) ti->private; | |
1296 | struct block_device *bdev = NULL; | |
1297 | unsigned long flags; | |
e90dae1f MB |
1298 | struct file fake_file = {}; |
1299 | struct dentry fake_dentry = {}; | |
9af4aa30 MB |
1300 | int r = 0; |
1301 | ||
c649bb9c | 1302 | fake_file.f_path.dentry = &fake_dentry; |
e90dae1f | 1303 | |
9af4aa30 MB |
1304 | spin_lock_irqsave(&m->lock, flags); |
1305 | ||
1306 | if (!m->current_pgpath) | |
1307 | __choose_pgpath(m); | |
1308 | ||
e90dae1f | 1309 | if (m->current_pgpath) { |
9af4aa30 | 1310 | bdev = m->current_pgpath->path.dev->bdev; |
e90dae1f MB |
1311 | fake_dentry.d_inode = bdev->bd_inode; |
1312 | fake_file.f_mode = m->current_pgpath->path.dev->mode; | |
1313 | } | |
9af4aa30 MB |
1314 | |
1315 | if (m->queue_io) | |
1316 | r = -EAGAIN; | |
1317 | else if (!bdev) | |
1318 | r = -EIO; | |
1319 | ||
1320 | spin_unlock_irqrestore(&m->lock, flags); | |
1321 | ||
e90dae1f MB |
1322 | return r ? : blkdev_driver_ioctl(bdev->bd_inode, &fake_file, |
1323 | bdev->bd_disk, cmd, arg); | |
9af4aa30 MB |
1324 | } |
1325 | ||
1da177e4 LT |
1326 | /*----------------------------------------------------------------- |
1327 | * Module setup | |
1328 | *---------------------------------------------------------------*/ | |
1329 | static struct target_type multipath_target = { | |
1330 | .name = "multipath", | |
9af4aa30 | 1331 | .version = {1, 0, 5}, |
1da177e4 LT |
1332 | .module = THIS_MODULE, |
1333 | .ctr = multipath_ctr, | |
1334 | .dtr = multipath_dtr, | |
1335 | .map = multipath_map, | |
1336 | .end_io = multipath_end_io, | |
1337 | .presuspend = multipath_presuspend, | |
1338 | .resume = multipath_resume, | |
1339 | .status = multipath_status, | |
1340 | .message = multipath_message, | |
9af4aa30 | 1341 | .ioctl = multipath_ioctl, |
1da177e4 LT |
1342 | }; |
1343 | ||
1344 | static int __init dm_multipath_init(void) | |
1345 | { | |
1346 | int r; | |
1347 | ||
1348 | /* allocate a slab for the dm_ios */ | |
1349 | _mpio_cache = kmem_cache_create("dm_mpath", sizeof(struct mpath_io), | |
1350 | 0, 0, NULL, NULL); | |
1351 | if (!_mpio_cache) | |
1352 | return -ENOMEM; | |
1353 | ||
1354 | r = dm_register_target(&multipath_target); | |
1355 | if (r < 0) { | |
1356 | DMERR("%s: register failed %d", multipath_target.name, r); | |
1357 | kmem_cache_destroy(_mpio_cache); | |
1358 | return -EINVAL; | |
1359 | } | |
1360 | ||
c557308e AK |
1361 | kmultipathd = create_workqueue("kmpathd"); |
1362 | if (!kmultipathd) { | |
1363 | DMERR("%s: failed to create workqueue kmpathd", | |
1364 | multipath_target.name); | |
1365 | dm_unregister_target(&multipath_target); | |
1366 | kmem_cache_destroy(_mpio_cache); | |
1367 | return -ENOMEM; | |
1368 | } | |
1369 | ||
72d94861 | 1370 | DMINFO("version %u.%u.%u loaded", |
1da177e4 LT |
1371 | multipath_target.version[0], multipath_target.version[1], |
1372 | multipath_target.version[2]); | |
1373 | ||
1374 | return r; | |
1375 | } | |
1376 | ||
1377 | static void __exit dm_multipath_exit(void) | |
1378 | { | |
1379 | int r; | |
1380 | ||
c557308e AK |
1381 | destroy_workqueue(kmultipathd); |
1382 | ||
1da177e4 LT |
1383 | r = dm_unregister_target(&multipath_target); |
1384 | if (r < 0) | |
1385 | DMERR("%s: target unregister failed %d", | |
1386 | multipath_target.name, r); | |
1387 | kmem_cache_destroy(_mpio_cache); | |
1388 | } | |
1389 | ||
1390 | EXPORT_SYMBOL_GPL(dm_pg_init_complete); | |
1391 | ||
1392 | module_init(dm_multipath_init); | |
1393 | module_exit(dm_multipath_exit); | |
1394 | ||
1395 | MODULE_DESCRIPTION(DM_NAME " multipath target"); | |
1396 | MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>"); | |
1397 | MODULE_LICENSE("GPL"); |