[PATCH] md: clear the congested_fn when stopping a raid5
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / dm-mpath.c
1 /*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8 #include "dm.h"
9 #include "dm-path-selector.h"
10 #include "dm-hw-handler.h"
11 #include "dm-bio-list.h"
12 #include "dm-bio-record.h"
13
14 #include <linux/ctype.h>
15 #include <linux/init.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/pagemap.h>
19 #include <linux/slab.h>
20 #include <linux/time.h>
21 #include <linux/workqueue.h>
22 #include <asm/atomic.h>
23
24 #define DM_MSG_PREFIX "multipath"
25 #define MESG_STR(x) x, sizeof(x)
26
27 /* Path properties */
28 struct pgpath {
29 struct list_head list;
30
31 struct priority_group *pg; /* Owning PG */
32 unsigned fail_count; /* Cumulative failure count */
33
34 struct dm_path path;
35 };
36
37 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
38
39 /*
40 * Paths are grouped into Priority Groups and numbered from 1 upwards.
41 * Each has a path selector which controls which path gets used.
42 */
43 struct priority_group {
44 struct list_head list;
45
46 struct multipath *m; /* Owning multipath instance */
47 struct path_selector ps;
48
49 unsigned pg_num; /* Reference number */
50 unsigned bypassed; /* Temporarily bypass this PG? */
51
52 unsigned nr_pgpaths; /* Number of paths in PG */
53 struct list_head pgpaths;
54 };
55
56 /* Multipath context */
57 struct multipath {
58 struct list_head list;
59 struct dm_target *ti;
60
61 spinlock_t lock;
62
63 struct hw_handler hw_handler;
64 unsigned nr_priority_groups;
65 struct list_head priority_groups;
66 unsigned pg_init_required; /* pg_init needs calling? */
67 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
68
69 unsigned nr_valid_paths; /* Total number of usable paths */
70 struct pgpath *current_pgpath;
71 struct priority_group *current_pg;
72 struct priority_group *next_pg; /* Switch to this PG if set */
73 unsigned repeat_count; /* I/Os left before calling PS again */
74
75 unsigned queue_io; /* Must we queue all I/O? */
76 unsigned queue_if_no_path; /* Queue I/O if last path fails? */
77 unsigned saved_queue_if_no_path;/* Saved state during suspension */
78
79 struct work_struct process_queued_ios;
80 struct bio_list queued_ios;
81 unsigned queue_size;
82
83 struct work_struct trigger_event;
84
85 /*
86 * We must use a mempool of mpath_io structs so that we
87 * can resubmit bios on error.
88 */
89 mempool_t *mpio_pool;
90 };
91
92 /*
93 * Context information attached to each bio we process.
94 */
95 struct mpath_io {
96 struct pgpath *pgpath;
97 struct dm_bio_details details;
98 };
99
100 typedef int (*action_fn) (struct pgpath *pgpath);
101
102 #define MIN_IOS 256 /* Mempool size */
103
104 static struct kmem_cache *_mpio_cache;
105
106 struct workqueue_struct *kmultipathd;
107 static void process_queued_ios(struct work_struct *work);
108 static void trigger_event(struct work_struct *work);
109
110
111 /*-----------------------------------------------
112 * Allocation routines
113 *-----------------------------------------------*/
114
115 static struct pgpath *alloc_pgpath(void)
116 {
117 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
118
119 if (pgpath)
120 pgpath->path.is_active = 1;
121
122 return pgpath;
123 }
124
125 static inline void free_pgpath(struct pgpath *pgpath)
126 {
127 kfree(pgpath);
128 }
129
130 static struct priority_group *alloc_priority_group(void)
131 {
132 struct priority_group *pg;
133
134 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
135
136 if (pg)
137 INIT_LIST_HEAD(&pg->pgpaths);
138
139 return pg;
140 }
141
142 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
143 {
144 struct pgpath *pgpath, *tmp;
145
146 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
147 list_del(&pgpath->list);
148 dm_put_device(ti, pgpath->path.dev);
149 free_pgpath(pgpath);
150 }
151 }
152
153 static void free_priority_group(struct priority_group *pg,
154 struct dm_target *ti)
155 {
156 struct path_selector *ps = &pg->ps;
157
158 if (ps->type) {
159 ps->type->destroy(ps);
160 dm_put_path_selector(ps->type);
161 }
162
163 free_pgpaths(&pg->pgpaths, ti);
164 kfree(pg);
165 }
166
167 static struct multipath *alloc_multipath(struct dm_target *ti)
168 {
169 struct multipath *m;
170
171 m = kzalloc(sizeof(*m), GFP_KERNEL);
172 if (m) {
173 INIT_LIST_HEAD(&m->priority_groups);
174 spin_lock_init(&m->lock);
175 m->queue_io = 1;
176 INIT_WORK(&m->process_queued_ios, process_queued_ios);
177 INIT_WORK(&m->trigger_event, trigger_event);
178 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
179 if (!m->mpio_pool) {
180 kfree(m);
181 return NULL;
182 }
183 m->ti = ti;
184 ti->private = m;
185 }
186
187 return m;
188 }
189
190 static void free_multipath(struct multipath *m)
191 {
192 struct priority_group *pg, *tmp;
193 struct hw_handler *hwh = &m->hw_handler;
194
195 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
196 list_del(&pg->list);
197 free_priority_group(pg, m->ti);
198 }
199
200 if (hwh->type) {
201 hwh->type->destroy(hwh);
202 dm_put_hw_handler(hwh->type);
203 }
204
205 mempool_destroy(m->mpio_pool);
206 kfree(m);
207 }
208
209
210 /*-----------------------------------------------
211 * Path selection
212 *-----------------------------------------------*/
213
214 static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
215 {
216 struct hw_handler *hwh = &m->hw_handler;
217
218 m->current_pg = pgpath->pg;
219
220 /* Must we initialise the PG first, and queue I/O till it's ready? */
221 if (hwh->type && hwh->type->pg_init) {
222 m->pg_init_required = 1;
223 m->queue_io = 1;
224 } else {
225 m->pg_init_required = 0;
226 m->queue_io = 0;
227 }
228 }
229
230 static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg)
231 {
232 struct dm_path *path;
233
234 path = pg->ps.type->select_path(&pg->ps, &m->repeat_count);
235 if (!path)
236 return -ENXIO;
237
238 m->current_pgpath = path_to_pgpath(path);
239
240 if (m->current_pg != pg)
241 __switch_pg(m, m->current_pgpath);
242
243 return 0;
244 }
245
246 static void __choose_pgpath(struct multipath *m)
247 {
248 struct priority_group *pg;
249 unsigned bypassed = 1;
250
251 if (!m->nr_valid_paths)
252 goto failed;
253
254 /* Were we instructed to switch PG? */
255 if (m->next_pg) {
256 pg = m->next_pg;
257 m->next_pg = NULL;
258 if (!__choose_path_in_pg(m, pg))
259 return;
260 }
261
262 /* Don't change PG until it has no remaining paths */
263 if (m->current_pg && !__choose_path_in_pg(m, m->current_pg))
264 return;
265
266 /*
267 * Loop through priority groups until we find a valid path.
268 * First time we skip PGs marked 'bypassed'.
269 * Second time we only try the ones we skipped.
270 */
271 do {
272 list_for_each_entry(pg, &m->priority_groups, list) {
273 if (pg->bypassed == bypassed)
274 continue;
275 if (!__choose_path_in_pg(m, pg))
276 return;
277 }
278 } while (bypassed--);
279
280 failed:
281 m->current_pgpath = NULL;
282 m->current_pg = NULL;
283 }
284
285 /*
286 * Check whether bios must be queued in the device-mapper core rather
287 * than here in the target.
288 *
289 * m->lock must be held on entry.
290 *
291 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
292 * same value then we are not between multipath_presuspend()
293 * and multipath_resume() calls and we have no need to check
294 * for the DMF_NOFLUSH_SUSPENDING flag.
295 */
296 static int __must_push_back(struct multipath *m)
297 {
298 return (m->queue_if_no_path != m->saved_queue_if_no_path &&
299 dm_noflush_suspending(m->ti));
300 }
301
302 static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
303 unsigned was_queued)
304 {
305 int r = DM_MAPIO_REMAPPED;
306 unsigned long flags;
307 struct pgpath *pgpath;
308
309 spin_lock_irqsave(&m->lock, flags);
310
311 /* Do we need to select a new pgpath? */
312 if (!m->current_pgpath ||
313 (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
314 __choose_pgpath(m);
315
316 pgpath = m->current_pgpath;
317
318 if (was_queued)
319 m->queue_size--;
320
321 if ((pgpath && m->queue_io) ||
322 (!pgpath && m->queue_if_no_path)) {
323 /* Queue for the daemon to resubmit */
324 bio_list_add(&m->queued_ios, bio);
325 m->queue_size++;
326 if ((m->pg_init_required && !m->pg_init_in_progress) ||
327 !m->queue_io)
328 queue_work(kmultipathd, &m->process_queued_ios);
329 pgpath = NULL;
330 r = DM_MAPIO_SUBMITTED;
331 } else if (pgpath)
332 bio->bi_bdev = pgpath->path.dev->bdev;
333 else if (__must_push_back(m))
334 r = DM_MAPIO_REQUEUE;
335 else
336 r = -EIO; /* Failed */
337
338 mpio->pgpath = pgpath;
339
340 spin_unlock_irqrestore(&m->lock, flags);
341
342 return r;
343 }
344
345 /*
346 * If we run out of usable paths, should we queue I/O or error it?
347 */
348 static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
349 unsigned save_old_value)
350 {
351 unsigned long flags;
352
353 spin_lock_irqsave(&m->lock, flags);
354
355 if (save_old_value)
356 m->saved_queue_if_no_path = m->queue_if_no_path;
357 else
358 m->saved_queue_if_no_path = queue_if_no_path;
359 m->queue_if_no_path = queue_if_no_path;
360 if (!m->queue_if_no_path && m->queue_size)
361 queue_work(kmultipathd, &m->process_queued_ios);
362
363 spin_unlock_irqrestore(&m->lock, flags);
364
365 return 0;
366 }
367
368 /*-----------------------------------------------------------------
369 * The multipath daemon is responsible for resubmitting queued ios.
370 *---------------------------------------------------------------*/
371
372 static void dispatch_queued_ios(struct multipath *m)
373 {
374 int r;
375 unsigned long flags;
376 struct bio *bio = NULL, *next;
377 struct mpath_io *mpio;
378 union map_info *info;
379
380 spin_lock_irqsave(&m->lock, flags);
381 bio = bio_list_get(&m->queued_ios);
382 spin_unlock_irqrestore(&m->lock, flags);
383
384 while (bio) {
385 next = bio->bi_next;
386 bio->bi_next = NULL;
387
388 info = dm_get_mapinfo(bio);
389 mpio = info->ptr;
390
391 r = map_io(m, bio, mpio, 1);
392 if (r < 0)
393 bio_endio(bio, bio->bi_size, r);
394 else if (r == DM_MAPIO_REMAPPED)
395 generic_make_request(bio);
396 else if (r == DM_MAPIO_REQUEUE)
397 bio_endio(bio, bio->bi_size, -EIO);
398
399 bio = next;
400 }
401 }
402
403 static void process_queued_ios(struct work_struct *work)
404 {
405 struct multipath *m =
406 container_of(work, struct multipath, process_queued_ios);
407 struct hw_handler *hwh = &m->hw_handler;
408 struct pgpath *pgpath = NULL;
409 unsigned init_required = 0, must_queue = 1;
410 unsigned long flags;
411
412 spin_lock_irqsave(&m->lock, flags);
413
414 if (!m->queue_size)
415 goto out;
416
417 if (!m->current_pgpath)
418 __choose_pgpath(m);
419
420 pgpath = m->current_pgpath;
421
422 if ((pgpath && !m->queue_io) ||
423 (!pgpath && !m->queue_if_no_path))
424 must_queue = 0;
425
426 if (m->pg_init_required && !m->pg_init_in_progress) {
427 m->pg_init_required = 0;
428 m->pg_init_in_progress = 1;
429 init_required = 1;
430 }
431
432 out:
433 spin_unlock_irqrestore(&m->lock, flags);
434
435 if (init_required)
436 hwh->type->pg_init(hwh, pgpath->pg->bypassed, &pgpath->path);
437
438 if (!must_queue)
439 dispatch_queued_ios(m);
440 }
441
442 /*
443 * An event is triggered whenever a path is taken out of use.
444 * Includes path failure and PG bypass.
445 */
446 static void trigger_event(struct work_struct *work)
447 {
448 struct multipath *m =
449 container_of(work, struct multipath, trigger_event);
450
451 dm_table_event(m->ti->table);
452 }
453
454 /*-----------------------------------------------------------------
455 * Constructor/argument parsing:
456 * <#multipath feature args> [<arg>]*
457 * <#hw_handler args> [hw_handler [<arg>]*]
458 * <#priority groups>
459 * <initial priority group>
460 * [<selector> <#selector args> [<arg>]*
461 * <#paths> <#per-path selector args>
462 * [<path> [<arg>]* ]+ ]+
463 *---------------------------------------------------------------*/
464 struct param {
465 unsigned min;
466 unsigned max;
467 char *error;
468 };
469
470 static int read_param(struct param *param, char *str, unsigned *v, char **error)
471 {
472 if (!str ||
473 (sscanf(str, "%u", v) != 1) ||
474 (*v < param->min) ||
475 (*v > param->max)) {
476 *error = param->error;
477 return -EINVAL;
478 }
479
480 return 0;
481 }
482
483 struct arg_set {
484 unsigned argc;
485 char **argv;
486 };
487
488 static char *shift(struct arg_set *as)
489 {
490 char *r;
491
492 if (as->argc) {
493 as->argc--;
494 r = *as->argv;
495 as->argv++;
496 return r;
497 }
498
499 return NULL;
500 }
501
502 static void consume(struct arg_set *as, unsigned n)
503 {
504 BUG_ON (as->argc < n);
505 as->argc -= n;
506 as->argv += n;
507 }
508
509 static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
510 struct dm_target *ti)
511 {
512 int r;
513 struct path_selector_type *pst;
514 unsigned ps_argc;
515
516 static struct param _params[] = {
517 {0, 1024, "invalid number of path selector args"},
518 };
519
520 pst = dm_get_path_selector(shift(as));
521 if (!pst) {
522 ti->error = "unknown path selector type";
523 return -EINVAL;
524 }
525
526 r = read_param(_params, shift(as), &ps_argc, &ti->error);
527 if (r)
528 return -EINVAL;
529
530 r = pst->create(&pg->ps, ps_argc, as->argv);
531 if (r) {
532 dm_put_path_selector(pst);
533 ti->error = "path selector constructor failed";
534 return r;
535 }
536
537 pg->ps.type = pst;
538 consume(as, ps_argc);
539
540 return 0;
541 }
542
543 static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
544 struct dm_target *ti)
545 {
546 int r;
547 struct pgpath *p;
548
549 /* we need at least a path arg */
550 if (as->argc < 1) {
551 ti->error = "no device given";
552 return NULL;
553 }
554
555 p = alloc_pgpath();
556 if (!p)
557 return NULL;
558
559 r = dm_get_device(ti, shift(as), ti->begin, ti->len,
560 dm_table_get_mode(ti->table), &p->path.dev);
561 if (r) {
562 ti->error = "error getting device";
563 goto bad;
564 }
565
566 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
567 if (r) {
568 dm_put_device(ti, p->path.dev);
569 goto bad;
570 }
571
572 return p;
573
574 bad:
575 free_pgpath(p);
576 return NULL;
577 }
578
579 static struct priority_group *parse_priority_group(struct arg_set *as,
580 struct multipath *m)
581 {
582 static struct param _params[] = {
583 {1, 1024, "invalid number of paths"},
584 {0, 1024, "invalid number of selector args"}
585 };
586
587 int r;
588 unsigned i, nr_selector_args, nr_params;
589 struct priority_group *pg;
590 struct dm_target *ti = m->ti;
591
592 if (as->argc < 2) {
593 as->argc = 0;
594 ti->error = "not enough priority group aruments";
595 return NULL;
596 }
597
598 pg = alloc_priority_group();
599 if (!pg) {
600 ti->error = "couldn't allocate priority group";
601 return NULL;
602 }
603 pg->m = m;
604
605 r = parse_path_selector(as, pg, ti);
606 if (r)
607 goto bad;
608
609 /*
610 * read the paths
611 */
612 r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error);
613 if (r)
614 goto bad;
615
616 r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error);
617 if (r)
618 goto bad;
619
620 nr_params = 1 + nr_selector_args;
621 for (i = 0; i < pg->nr_pgpaths; i++) {
622 struct pgpath *pgpath;
623 struct arg_set path_args;
624
625 if (as->argc < nr_params)
626 goto bad;
627
628 path_args.argc = nr_params;
629 path_args.argv = as->argv;
630
631 pgpath = parse_path(&path_args, &pg->ps, ti);
632 if (!pgpath)
633 goto bad;
634
635 pgpath->pg = pg;
636 list_add_tail(&pgpath->list, &pg->pgpaths);
637 consume(as, nr_params);
638 }
639
640 return pg;
641
642 bad:
643 free_priority_group(pg, ti);
644 return NULL;
645 }
646
647 static int parse_hw_handler(struct arg_set *as, struct multipath *m)
648 {
649 int r;
650 struct hw_handler_type *hwht;
651 unsigned hw_argc;
652 struct dm_target *ti = m->ti;
653
654 static struct param _params[] = {
655 {0, 1024, "invalid number of hardware handler args"},
656 };
657
658 r = read_param(_params, shift(as), &hw_argc, &ti->error);
659 if (r)
660 return -EINVAL;
661
662 if (!hw_argc)
663 return 0;
664
665 hwht = dm_get_hw_handler(shift(as));
666 if (!hwht) {
667 ti->error = "unknown hardware handler type";
668 return -EINVAL;
669 }
670
671 r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv);
672 if (r) {
673 dm_put_hw_handler(hwht);
674 ti->error = "hardware handler constructor failed";
675 return r;
676 }
677
678 m->hw_handler.type = hwht;
679 consume(as, hw_argc - 1);
680
681 return 0;
682 }
683
684 static int parse_features(struct arg_set *as, struct multipath *m)
685 {
686 int r;
687 unsigned argc;
688 struct dm_target *ti = m->ti;
689
690 static struct param _params[] = {
691 {0, 1, "invalid number of feature args"},
692 };
693
694 r = read_param(_params, shift(as), &argc, &ti->error);
695 if (r)
696 return -EINVAL;
697
698 if (!argc)
699 return 0;
700
701 if (!strnicmp(shift(as), MESG_STR("queue_if_no_path")))
702 return queue_if_no_path(m, 1, 0);
703 else {
704 ti->error = "Unrecognised multipath feature request";
705 return -EINVAL;
706 }
707 }
708
709 static int multipath_ctr(struct dm_target *ti, unsigned int argc,
710 char **argv)
711 {
712 /* target parameters */
713 static struct param _params[] = {
714 {1, 1024, "invalid number of priority groups"},
715 {1, 1024, "invalid initial priority group number"},
716 };
717
718 int r;
719 struct multipath *m;
720 struct arg_set as;
721 unsigned pg_count = 0;
722 unsigned next_pg_num;
723
724 as.argc = argc;
725 as.argv = argv;
726
727 m = alloc_multipath(ti);
728 if (!m) {
729 ti->error = "can't allocate multipath";
730 return -EINVAL;
731 }
732
733 r = parse_features(&as, m);
734 if (r)
735 goto bad;
736
737 r = parse_hw_handler(&as, m);
738 if (r)
739 goto bad;
740
741 r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error);
742 if (r)
743 goto bad;
744
745 r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error);
746 if (r)
747 goto bad;
748
749 /* parse the priority groups */
750 while (as.argc) {
751 struct priority_group *pg;
752
753 pg = parse_priority_group(&as, m);
754 if (!pg) {
755 r = -EINVAL;
756 goto bad;
757 }
758
759 m->nr_valid_paths += pg->nr_pgpaths;
760 list_add_tail(&pg->list, &m->priority_groups);
761 pg_count++;
762 pg->pg_num = pg_count;
763 if (!--next_pg_num)
764 m->next_pg = pg;
765 }
766
767 if (pg_count != m->nr_priority_groups) {
768 ti->error = "priority group count mismatch";
769 r = -EINVAL;
770 goto bad;
771 }
772
773 return 0;
774
775 bad:
776 free_multipath(m);
777 return r;
778 }
779
780 static void multipath_dtr(struct dm_target *ti)
781 {
782 struct multipath *m = (struct multipath *) ti->private;
783
784 flush_workqueue(kmultipathd);
785 free_multipath(m);
786 }
787
788 /*
789 * Map bios, recording original fields for later in case we have to resubmit
790 */
791 static int multipath_map(struct dm_target *ti, struct bio *bio,
792 union map_info *map_context)
793 {
794 int r;
795 struct mpath_io *mpio;
796 struct multipath *m = (struct multipath *) ti->private;
797
798 if (bio_barrier(bio))
799 return -EOPNOTSUPP;
800
801 mpio = mempool_alloc(m->mpio_pool, GFP_NOIO);
802 dm_bio_record(&mpio->details, bio);
803
804 map_context->ptr = mpio;
805 bio->bi_rw |= (1 << BIO_RW_FAILFAST);
806 r = map_io(m, bio, mpio, 0);
807 if (r < 0 || r == DM_MAPIO_REQUEUE)
808 mempool_free(mpio, m->mpio_pool);
809
810 return r;
811 }
812
813 /*
814 * Take a path out of use.
815 */
816 static int fail_path(struct pgpath *pgpath)
817 {
818 unsigned long flags;
819 struct multipath *m = pgpath->pg->m;
820
821 spin_lock_irqsave(&m->lock, flags);
822
823 if (!pgpath->path.is_active)
824 goto out;
825
826 DMWARN("Failing path %s.", pgpath->path.dev->name);
827
828 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
829 pgpath->path.is_active = 0;
830 pgpath->fail_count++;
831
832 m->nr_valid_paths--;
833
834 if (pgpath == m->current_pgpath)
835 m->current_pgpath = NULL;
836
837 queue_work(kmultipathd, &m->trigger_event);
838
839 out:
840 spin_unlock_irqrestore(&m->lock, flags);
841
842 return 0;
843 }
844
845 /*
846 * Reinstate a previously-failed path
847 */
848 static int reinstate_path(struct pgpath *pgpath)
849 {
850 int r = 0;
851 unsigned long flags;
852 struct multipath *m = pgpath->pg->m;
853
854 spin_lock_irqsave(&m->lock, flags);
855
856 if (pgpath->path.is_active)
857 goto out;
858
859 if (!pgpath->pg->ps.type) {
860 DMWARN("Reinstate path not supported by path selector %s",
861 pgpath->pg->ps.type->name);
862 r = -EINVAL;
863 goto out;
864 }
865
866 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
867 if (r)
868 goto out;
869
870 pgpath->path.is_active = 1;
871
872 m->current_pgpath = NULL;
873 if (!m->nr_valid_paths++ && m->queue_size)
874 queue_work(kmultipathd, &m->process_queued_ios);
875
876 queue_work(kmultipathd, &m->trigger_event);
877
878 out:
879 spin_unlock_irqrestore(&m->lock, flags);
880
881 return r;
882 }
883
884 /*
885 * Fail or reinstate all paths that match the provided struct dm_dev.
886 */
887 static int action_dev(struct multipath *m, struct dm_dev *dev,
888 action_fn action)
889 {
890 int r = 0;
891 struct pgpath *pgpath;
892 struct priority_group *pg;
893
894 list_for_each_entry(pg, &m->priority_groups, list) {
895 list_for_each_entry(pgpath, &pg->pgpaths, list) {
896 if (pgpath->path.dev == dev)
897 r = action(pgpath);
898 }
899 }
900
901 return r;
902 }
903
904 /*
905 * Temporarily try to avoid having to use the specified PG
906 */
907 static void bypass_pg(struct multipath *m, struct priority_group *pg,
908 int bypassed)
909 {
910 unsigned long flags;
911
912 spin_lock_irqsave(&m->lock, flags);
913
914 pg->bypassed = bypassed;
915 m->current_pgpath = NULL;
916 m->current_pg = NULL;
917
918 spin_unlock_irqrestore(&m->lock, flags);
919
920 queue_work(kmultipathd, &m->trigger_event);
921 }
922
923 /*
924 * Switch to using the specified PG from the next I/O that gets mapped
925 */
926 static int switch_pg_num(struct multipath *m, const char *pgstr)
927 {
928 struct priority_group *pg;
929 unsigned pgnum;
930 unsigned long flags;
931
932 if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
933 (pgnum > m->nr_priority_groups)) {
934 DMWARN("invalid PG number supplied to switch_pg_num");
935 return -EINVAL;
936 }
937
938 spin_lock_irqsave(&m->lock, flags);
939 list_for_each_entry(pg, &m->priority_groups, list) {
940 pg->bypassed = 0;
941 if (--pgnum)
942 continue;
943
944 m->current_pgpath = NULL;
945 m->current_pg = NULL;
946 m->next_pg = pg;
947 }
948 spin_unlock_irqrestore(&m->lock, flags);
949
950 queue_work(kmultipathd, &m->trigger_event);
951 return 0;
952 }
953
954 /*
955 * Set/clear bypassed status of a PG.
956 * PGs are numbered upwards from 1 in the order they were declared.
957 */
958 static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
959 {
960 struct priority_group *pg;
961 unsigned pgnum;
962
963 if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
964 (pgnum > m->nr_priority_groups)) {
965 DMWARN("invalid PG number supplied to bypass_pg");
966 return -EINVAL;
967 }
968
969 list_for_each_entry(pg, &m->priority_groups, list) {
970 if (!--pgnum)
971 break;
972 }
973
974 bypass_pg(m, pg, bypassed);
975 return 0;
976 }
977
978 /*
979 * pg_init must call this when it has completed its initialisation
980 */
981 void dm_pg_init_complete(struct dm_path *path, unsigned err_flags)
982 {
983 struct pgpath *pgpath = path_to_pgpath(path);
984 struct priority_group *pg = pgpath->pg;
985 struct multipath *m = pg->m;
986 unsigned long flags;
987
988 /* We insist on failing the path if the PG is already bypassed. */
989 if (err_flags && pg->bypassed)
990 err_flags |= MP_FAIL_PATH;
991
992 if (err_flags & MP_FAIL_PATH)
993 fail_path(pgpath);
994
995 if (err_flags & MP_BYPASS_PG)
996 bypass_pg(m, pg, 1);
997
998 spin_lock_irqsave(&m->lock, flags);
999 if (err_flags) {
1000 m->current_pgpath = NULL;
1001 m->current_pg = NULL;
1002 } else if (!m->pg_init_required)
1003 m->queue_io = 0;
1004
1005 m->pg_init_in_progress = 0;
1006 queue_work(kmultipathd, &m->process_queued_ios);
1007 spin_unlock_irqrestore(&m->lock, flags);
1008 }
1009
1010 /*
1011 * end_io handling
1012 */
1013 static int do_end_io(struct multipath *m, struct bio *bio,
1014 int error, struct mpath_io *mpio)
1015 {
1016 struct hw_handler *hwh = &m->hw_handler;
1017 unsigned err_flags = MP_FAIL_PATH; /* Default behavior */
1018 unsigned long flags;
1019
1020 if (!error)
1021 return 0; /* I/O complete */
1022
1023 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
1024 return error;
1025
1026 if (error == -EOPNOTSUPP)
1027 return error;
1028
1029 spin_lock_irqsave(&m->lock, flags);
1030 if (!m->nr_valid_paths) {
1031 if (__must_push_back(m)) {
1032 spin_unlock_irqrestore(&m->lock, flags);
1033 return DM_ENDIO_REQUEUE;
1034 } else if (!m->queue_if_no_path) {
1035 spin_unlock_irqrestore(&m->lock, flags);
1036 return -EIO;
1037 } else {
1038 spin_unlock_irqrestore(&m->lock, flags);
1039 goto requeue;
1040 }
1041 }
1042 spin_unlock_irqrestore(&m->lock, flags);
1043
1044 if (hwh->type && hwh->type->error)
1045 err_flags = hwh->type->error(hwh, bio);
1046
1047 if (mpio->pgpath) {
1048 if (err_flags & MP_FAIL_PATH)
1049 fail_path(mpio->pgpath);
1050
1051 if (err_flags & MP_BYPASS_PG)
1052 bypass_pg(m, mpio->pgpath->pg, 1);
1053 }
1054
1055 if (err_flags & MP_ERROR_IO)
1056 return -EIO;
1057
1058 requeue:
1059 dm_bio_restore(&mpio->details, bio);
1060
1061 /* queue for the daemon to resubmit or fail */
1062 spin_lock_irqsave(&m->lock, flags);
1063 bio_list_add(&m->queued_ios, bio);
1064 m->queue_size++;
1065 if (!m->queue_io)
1066 queue_work(kmultipathd, &m->process_queued_ios);
1067 spin_unlock_irqrestore(&m->lock, flags);
1068
1069 return DM_ENDIO_INCOMPLETE; /* io not complete */
1070 }
1071
1072 static int multipath_end_io(struct dm_target *ti, struct bio *bio,
1073 int error, union map_info *map_context)
1074 {
1075 struct multipath *m = (struct multipath *) ti->private;
1076 struct mpath_io *mpio = (struct mpath_io *) map_context->ptr;
1077 struct pgpath *pgpath = mpio->pgpath;
1078 struct path_selector *ps;
1079 int r;
1080
1081 r = do_end_io(m, bio, error, mpio);
1082 if (pgpath) {
1083 ps = &pgpath->pg->ps;
1084 if (ps->type->end_io)
1085 ps->type->end_io(ps, &pgpath->path);
1086 }
1087 if (r != DM_ENDIO_INCOMPLETE)
1088 mempool_free(mpio, m->mpio_pool);
1089
1090 return r;
1091 }
1092
1093 /*
1094 * Suspend can't complete until all the I/O is processed so if
1095 * the last path fails we must error any remaining I/O.
1096 * Note that if the freeze_bdev fails while suspending, the
1097 * queue_if_no_path state is lost - userspace should reset it.
1098 */
1099 static void multipath_presuspend(struct dm_target *ti)
1100 {
1101 struct multipath *m = (struct multipath *) ti->private;
1102
1103 queue_if_no_path(m, 0, 1);
1104 }
1105
1106 /*
1107 * Restore the queue_if_no_path setting.
1108 */
1109 static void multipath_resume(struct dm_target *ti)
1110 {
1111 struct multipath *m = (struct multipath *) ti->private;
1112 unsigned long flags;
1113
1114 spin_lock_irqsave(&m->lock, flags);
1115 m->queue_if_no_path = m->saved_queue_if_no_path;
1116 spin_unlock_irqrestore(&m->lock, flags);
1117 }
1118
1119 /*
1120 * Info output has the following format:
1121 * num_multipath_feature_args [multipath_feature_args]*
1122 * num_handler_status_args [handler_status_args]*
1123 * num_groups init_group_number
1124 * [A|D|E num_ps_status_args [ps_status_args]*
1125 * num_paths num_selector_args
1126 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1127 *
1128 * Table output has the following format (identical to the constructor string):
1129 * num_feature_args [features_args]*
1130 * num_handler_args hw_handler [hw_handler_args]*
1131 * num_groups init_group_number
1132 * [priority selector-name num_ps_args [ps_args]*
1133 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1134 */
1135 static int multipath_status(struct dm_target *ti, status_type_t type,
1136 char *result, unsigned int maxlen)
1137 {
1138 int sz = 0;
1139 unsigned long flags;
1140 struct multipath *m = (struct multipath *) ti->private;
1141 struct hw_handler *hwh = &m->hw_handler;
1142 struct priority_group *pg;
1143 struct pgpath *p;
1144 unsigned pg_num;
1145 char state;
1146
1147 spin_lock_irqsave(&m->lock, flags);
1148
1149 /* Features */
1150 if (type == STATUSTYPE_INFO)
1151 DMEMIT("1 %u ", m->queue_size);
1152 else if (m->queue_if_no_path)
1153 DMEMIT("1 queue_if_no_path ");
1154 else
1155 DMEMIT("0 ");
1156
1157 if (hwh->type && hwh->type->status)
1158 sz += hwh->type->status(hwh, type, result + sz, maxlen - sz);
1159 else if (!hwh->type || type == STATUSTYPE_INFO)
1160 DMEMIT("0 ");
1161 else
1162 DMEMIT("1 %s ", hwh->type->name);
1163
1164 DMEMIT("%u ", m->nr_priority_groups);
1165
1166 if (m->next_pg)
1167 pg_num = m->next_pg->pg_num;
1168 else if (m->current_pg)
1169 pg_num = m->current_pg->pg_num;
1170 else
1171 pg_num = 1;
1172
1173 DMEMIT("%u ", pg_num);
1174
1175 switch (type) {
1176 case STATUSTYPE_INFO:
1177 list_for_each_entry(pg, &m->priority_groups, list) {
1178 if (pg->bypassed)
1179 state = 'D'; /* Disabled */
1180 else if (pg == m->current_pg)
1181 state = 'A'; /* Currently Active */
1182 else
1183 state = 'E'; /* Enabled */
1184
1185 DMEMIT("%c ", state);
1186
1187 if (pg->ps.type->status)
1188 sz += pg->ps.type->status(&pg->ps, NULL, type,
1189 result + sz,
1190 maxlen - sz);
1191 else
1192 DMEMIT("0 ");
1193
1194 DMEMIT("%u %u ", pg->nr_pgpaths,
1195 pg->ps.type->info_args);
1196
1197 list_for_each_entry(p, &pg->pgpaths, list) {
1198 DMEMIT("%s %s %u ", p->path.dev->name,
1199 p->path.is_active ? "A" : "F",
1200 p->fail_count);
1201 if (pg->ps.type->status)
1202 sz += pg->ps.type->status(&pg->ps,
1203 &p->path, type, result + sz,
1204 maxlen - sz);
1205 }
1206 }
1207 break;
1208
1209 case STATUSTYPE_TABLE:
1210 list_for_each_entry(pg, &m->priority_groups, list) {
1211 DMEMIT("%s ", pg->ps.type->name);
1212
1213 if (pg->ps.type->status)
1214 sz += pg->ps.type->status(&pg->ps, NULL, type,
1215 result + sz,
1216 maxlen - sz);
1217 else
1218 DMEMIT("0 ");
1219
1220 DMEMIT("%u %u ", pg->nr_pgpaths,
1221 pg->ps.type->table_args);
1222
1223 list_for_each_entry(p, &pg->pgpaths, list) {
1224 DMEMIT("%s ", p->path.dev->name);
1225 if (pg->ps.type->status)
1226 sz += pg->ps.type->status(&pg->ps,
1227 &p->path, type, result + sz,
1228 maxlen - sz);
1229 }
1230 }
1231 break;
1232 }
1233
1234 spin_unlock_irqrestore(&m->lock, flags);
1235
1236 return 0;
1237 }
1238
1239 static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1240 {
1241 int r;
1242 struct dm_dev *dev;
1243 struct multipath *m = (struct multipath *) ti->private;
1244 action_fn action;
1245
1246 if (argc == 1) {
1247 if (!strnicmp(argv[0], MESG_STR("queue_if_no_path")))
1248 return queue_if_no_path(m, 1, 0);
1249 else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path")))
1250 return queue_if_no_path(m, 0, 0);
1251 }
1252
1253 if (argc != 2)
1254 goto error;
1255
1256 if (!strnicmp(argv[0], MESG_STR("disable_group")))
1257 return bypass_pg_num(m, argv[1], 1);
1258 else if (!strnicmp(argv[0], MESG_STR("enable_group")))
1259 return bypass_pg_num(m, argv[1], 0);
1260 else if (!strnicmp(argv[0], MESG_STR("switch_group")))
1261 return switch_pg_num(m, argv[1]);
1262 else if (!strnicmp(argv[0], MESG_STR("reinstate_path")))
1263 action = reinstate_path;
1264 else if (!strnicmp(argv[0], MESG_STR("fail_path")))
1265 action = fail_path;
1266 else
1267 goto error;
1268
1269 r = dm_get_device(ti, argv[1], ti->begin, ti->len,
1270 dm_table_get_mode(ti->table), &dev);
1271 if (r) {
1272 DMWARN("message: error getting device %s",
1273 argv[1]);
1274 return -EINVAL;
1275 }
1276
1277 r = action_dev(m, dev, action);
1278
1279 dm_put_device(ti, dev);
1280
1281 return r;
1282
1283 error:
1284 DMWARN("Unrecognised multipath message received.");
1285 return -EINVAL;
1286 }
1287
1288 static int multipath_ioctl(struct dm_target *ti, struct inode *inode,
1289 struct file *filp, unsigned int cmd,
1290 unsigned long arg)
1291 {
1292 struct multipath *m = (struct multipath *) ti->private;
1293 struct block_device *bdev = NULL;
1294 unsigned long flags;
1295 struct file fake_file = {};
1296 struct dentry fake_dentry = {};
1297 int r = 0;
1298
1299 fake_file.f_path.dentry = &fake_dentry;
1300
1301 spin_lock_irqsave(&m->lock, flags);
1302
1303 if (!m->current_pgpath)
1304 __choose_pgpath(m);
1305
1306 if (m->current_pgpath) {
1307 bdev = m->current_pgpath->path.dev->bdev;
1308 fake_dentry.d_inode = bdev->bd_inode;
1309 fake_file.f_mode = m->current_pgpath->path.dev->mode;
1310 }
1311
1312 if (m->queue_io)
1313 r = -EAGAIN;
1314 else if (!bdev)
1315 r = -EIO;
1316
1317 spin_unlock_irqrestore(&m->lock, flags);
1318
1319 return r ? : blkdev_driver_ioctl(bdev->bd_inode, &fake_file,
1320 bdev->bd_disk, cmd, arg);
1321 }
1322
1323 /*-----------------------------------------------------------------
1324 * Module setup
1325 *---------------------------------------------------------------*/
1326 static struct target_type multipath_target = {
1327 .name = "multipath",
1328 .version = {1, 0, 5},
1329 .module = THIS_MODULE,
1330 .ctr = multipath_ctr,
1331 .dtr = multipath_dtr,
1332 .map = multipath_map,
1333 .end_io = multipath_end_io,
1334 .presuspend = multipath_presuspend,
1335 .resume = multipath_resume,
1336 .status = multipath_status,
1337 .message = multipath_message,
1338 .ioctl = multipath_ioctl,
1339 };
1340
1341 static int __init dm_multipath_init(void)
1342 {
1343 int r;
1344
1345 /* allocate a slab for the dm_ios */
1346 _mpio_cache = kmem_cache_create("dm_mpath", sizeof(struct mpath_io),
1347 0, 0, NULL, NULL);
1348 if (!_mpio_cache)
1349 return -ENOMEM;
1350
1351 r = dm_register_target(&multipath_target);
1352 if (r < 0) {
1353 DMERR("%s: register failed %d", multipath_target.name, r);
1354 kmem_cache_destroy(_mpio_cache);
1355 return -EINVAL;
1356 }
1357
1358 kmultipathd = create_workqueue("kmpathd");
1359 if (!kmultipathd) {
1360 DMERR("%s: failed to create workqueue kmpathd",
1361 multipath_target.name);
1362 dm_unregister_target(&multipath_target);
1363 kmem_cache_destroy(_mpio_cache);
1364 return -ENOMEM;
1365 }
1366
1367 DMINFO("version %u.%u.%u loaded",
1368 multipath_target.version[0], multipath_target.version[1],
1369 multipath_target.version[2]);
1370
1371 return r;
1372 }
1373
1374 static void __exit dm_multipath_exit(void)
1375 {
1376 int r;
1377
1378 destroy_workqueue(kmultipathd);
1379
1380 r = dm_unregister_target(&multipath_target);
1381 if (r < 0)
1382 DMERR("%s: target unregister failed %d",
1383 multipath_target.name, r);
1384 kmem_cache_destroy(_mpio_cache);
1385 }
1386
1387 EXPORT_SYMBOL_GPL(dm_pg_init_complete);
1388
1389 module_init(dm_multipath_init);
1390 module_exit(dm_multipath_exit);
1391
1392 MODULE_DESCRIPTION(DM_NAME " multipath target");
1393 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1394 MODULE_LICENSE("GPL");