x86/smpboot: Init apic mapping before usage
[cascardo/linux.git] / drivers / md / dm-mpath.c
1 /*
2  * Copyright (C) 2003 Sistina Software Limited.
3  * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include <linux/device-mapper.h>
9
10 #include "dm-rq.h"
11 #include "dm-bio-record.h"
12 #include "dm-path-selector.h"
13 #include "dm-uevent.h"
14
15 #include <linux/blkdev.h>
16 #include <linux/ctype.h>
17 #include <linux/init.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/time.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <scsi/scsi_dh.h>
26 #include <linux/atomic.h>
27 #include <linux/blk-mq.h>
28
29 #define DM_MSG_PREFIX "multipath"
30 #define DM_PG_INIT_DELAY_MSECS 2000
31 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
32
33 /* Path properties */
34 struct pgpath {
35         struct list_head list;
36
37         struct priority_group *pg;      /* Owning PG */
38         unsigned fail_count;            /* Cumulative failure count */
39
40         struct dm_path path;
41         struct delayed_work activate_path;
42
43         bool is_active:1;               /* Path status */
44 };
45
46 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
47
48 /*
49  * Paths are grouped into Priority Groups and numbered from 1 upwards.
50  * Each has a path selector which controls which path gets used.
51  */
52 struct priority_group {
53         struct list_head list;
54
55         struct multipath *m;            /* Owning multipath instance */
56         struct path_selector ps;
57
58         unsigned pg_num;                /* Reference number */
59         unsigned nr_pgpaths;            /* Number of paths in PG */
60         struct list_head pgpaths;
61
62         bool bypassed:1;                /* Temporarily bypass this PG? */
63 };
64
65 /* Multipath context */
66 struct multipath {
67         struct list_head list;
68         struct dm_target *ti;
69
70         const char *hw_handler_name;
71         char *hw_handler_params;
72
73         spinlock_t lock;
74
75         unsigned nr_priority_groups;
76         struct list_head priority_groups;
77
78         wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
79
80         struct pgpath *current_pgpath;
81         struct priority_group *current_pg;
82         struct priority_group *next_pg; /* Switch to this PG if set */
83
84         unsigned long flags;            /* Multipath state flags */
85
86         unsigned pg_init_retries;       /* Number of times to retry pg_init */
87         unsigned pg_init_delay_msecs;   /* Number of msecs before pg_init retry */
88
89         atomic_t nr_valid_paths;        /* Total number of usable paths */
90         atomic_t pg_init_in_progress;   /* Only one pg_init allowed at once */
91         atomic_t pg_init_count;         /* Number of times pg_init called */
92
93         unsigned queue_mode;
94
95         /*
96          * We must use a mempool of dm_mpath_io structs so that we
97          * can resubmit bios on error.
98          */
99         mempool_t *mpio_pool;
100
101         struct mutex work_mutex;
102         struct work_struct trigger_event;
103
104         struct work_struct process_queued_bios;
105         struct bio_list queued_bios;
106 };
107
108 /*
109  * Context information attached to each io we process.
110  */
111 struct dm_mpath_io {
112         struct pgpath *pgpath;
113         size_t nr_bytes;
114 };
115
116 typedef int (*action_fn) (struct pgpath *pgpath);
117
118 static struct kmem_cache *_mpio_cache;
119
120 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
121 static void trigger_event(struct work_struct *work);
122 static void activate_path(struct work_struct *work);
123 static void process_queued_bios(struct work_struct *work);
124
125 /*-----------------------------------------------
126  * Multipath state flags.
127  *-----------------------------------------------*/
128
129 #define MPATHF_QUEUE_IO 0                       /* Must we queue all I/O? */
130 #define MPATHF_QUEUE_IF_NO_PATH 1               /* Queue I/O if last path fails? */
131 #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2         /* Saved state during suspension */
132 #define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3     /* If there's already a hw_handler present, don't change it. */
133 #define MPATHF_PG_INIT_DISABLED 4               /* pg_init is not currently allowed */
134 #define MPATHF_PG_INIT_REQUIRED 5               /* pg_init needs calling? */
135 #define MPATHF_PG_INIT_DELAY_RETRY 6            /* Delay pg_init retry? */
136
137 /*-----------------------------------------------
138  * Allocation routines
139  *-----------------------------------------------*/
140
141 static struct pgpath *alloc_pgpath(void)
142 {
143         struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
144
145         if (pgpath) {
146                 pgpath->is_active = true;
147                 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
148         }
149
150         return pgpath;
151 }
152
153 static void free_pgpath(struct pgpath *pgpath)
154 {
155         kfree(pgpath);
156 }
157
158 static struct priority_group *alloc_priority_group(void)
159 {
160         struct priority_group *pg;
161
162         pg = kzalloc(sizeof(*pg), GFP_KERNEL);
163
164         if (pg)
165                 INIT_LIST_HEAD(&pg->pgpaths);
166
167         return pg;
168 }
169
170 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
171 {
172         struct pgpath *pgpath, *tmp;
173
174         list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
175                 list_del(&pgpath->list);
176                 dm_put_device(ti, pgpath->path.dev);
177                 free_pgpath(pgpath);
178         }
179 }
180
181 static void free_priority_group(struct priority_group *pg,
182                                 struct dm_target *ti)
183 {
184         struct path_selector *ps = &pg->ps;
185
186         if (ps->type) {
187                 ps->type->destroy(ps);
188                 dm_put_path_selector(ps->type);
189         }
190
191         free_pgpaths(&pg->pgpaths, ti);
192         kfree(pg);
193 }
194
195 static struct multipath *alloc_multipath(struct dm_target *ti)
196 {
197         struct multipath *m;
198
199         m = kzalloc(sizeof(*m), GFP_KERNEL);
200         if (m) {
201                 INIT_LIST_HEAD(&m->priority_groups);
202                 spin_lock_init(&m->lock);
203                 set_bit(MPATHF_QUEUE_IO, &m->flags);
204                 atomic_set(&m->nr_valid_paths, 0);
205                 atomic_set(&m->pg_init_in_progress, 0);
206                 atomic_set(&m->pg_init_count, 0);
207                 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
208                 INIT_WORK(&m->trigger_event, trigger_event);
209                 init_waitqueue_head(&m->pg_init_wait);
210                 mutex_init(&m->work_mutex);
211
212                 m->mpio_pool = NULL;
213                 m->queue_mode = DM_TYPE_NONE;
214
215                 m->ti = ti;
216                 ti->private = m;
217         }
218
219         return m;
220 }
221
222 static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
223 {
224         if (m->queue_mode == DM_TYPE_NONE) {
225                 /*
226                  * Default to request-based.
227                  */
228                 if (dm_use_blk_mq(dm_table_get_md(ti->table)))
229                         m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
230                 else
231                         m->queue_mode = DM_TYPE_REQUEST_BASED;
232         }
233
234         if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
235                 unsigned min_ios = dm_get_reserved_rq_based_ios();
236
237                 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
238                 if (!m->mpio_pool)
239                         return -ENOMEM;
240         }
241         else if (m->queue_mode == DM_TYPE_BIO_BASED) {
242                 INIT_WORK(&m->process_queued_bios, process_queued_bios);
243                 /*
244                  * bio-based doesn't support any direct scsi_dh management;
245                  * it just discovers if a scsi_dh is attached.
246                  */
247                 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
248         }
249
250         dm_table_set_type(ti->table, m->queue_mode);
251
252         return 0;
253 }
254
255 static void free_multipath(struct multipath *m)
256 {
257         struct priority_group *pg, *tmp;
258
259         list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
260                 list_del(&pg->list);
261                 free_priority_group(pg, m->ti);
262         }
263
264         kfree(m->hw_handler_name);
265         kfree(m->hw_handler_params);
266         mempool_destroy(m->mpio_pool);
267         kfree(m);
268 }
269
270 static struct dm_mpath_io *get_mpio(union map_info *info)
271 {
272         return info->ptr;
273 }
274
275 static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
276 {
277         struct dm_mpath_io *mpio;
278
279         if (!m->mpio_pool) {
280                 /* Use blk-mq pdu memory requested via per_io_data_size */
281                 mpio = get_mpio(info);
282                 memset(mpio, 0, sizeof(*mpio));
283                 return mpio;
284         }
285
286         mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
287         if (!mpio)
288                 return NULL;
289
290         memset(mpio, 0, sizeof(*mpio));
291         info->ptr = mpio;
292
293         return mpio;
294 }
295
296 static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
297 {
298         /* Only needed for non blk-mq (.request_fn) multipath */
299         if (m->mpio_pool) {
300                 struct dm_mpath_io *mpio = info->ptr;
301
302                 info->ptr = NULL;
303                 mempool_free(mpio, m->mpio_pool);
304         }
305 }
306
307 static size_t multipath_per_bio_data_size(void)
308 {
309         return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
310 }
311
312 static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
313 {
314         return dm_per_bio_data(bio, multipath_per_bio_data_size());
315 }
316
317 static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio)
318 {
319         /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
320         struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
321         void *bio_details = mpio + 1;
322
323         return bio_details;
324 }
325
326 static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p,
327                                         struct dm_bio_details **bio_details_p)
328 {
329         struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
330         struct dm_bio_details *bio_details = get_bio_details_from_bio(bio);
331
332         memset(mpio, 0, sizeof(*mpio));
333         memset(bio_details, 0, sizeof(*bio_details));
334         dm_bio_record(bio_details, bio);
335
336         if (mpio_p)
337                 *mpio_p = mpio;
338         if (bio_details_p)
339                 *bio_details_p = bio_details;
340 }
341
342 /*-----------------------------------------------
343  * Path selection
344  *-----------------------------------------------*/
345
346 static int __pg_init_all_paths(struct multipath *m)
347 {
348         struct pgpath *pgpath;
349         unsigned long pg_init_delay = 0;
350
351         if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
352                 return 0;
353
354         atomic_inc(&m->pg_init_count);
355         clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
356
357         /* Check here to reset pg_init_required */
358         if (!m->current_pg)
359                 return 0;
360
361         if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
362                 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
363                                                  m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
364         list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
365                 /* Skip failed paths */
366                 if (!pgpath->is_active)
367                         continue;
368                 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
369                                        pg_init_delay))
370                         atomic_inc(&m->pg_init_in_progress);
371         }
372         return atomic_read(&m->pg_init_in_progress);
373 }
374
375 static int pg_init_all_paths(struct multipath *m)
376 {
377         int r;
378         unsigned long flags;
379
380         spin_lock_irqsave(&m->lock, flags);
381         r = __pg_init_all_paths(m);
382         spin_unlock_irqrestore(&m->lock, flags);
383
384         return r;
385 }
386
387 static void __switch_pg(struct multipath *m, struct priority_group *pg)
388 {
389         m->current_pg = pg;
390
391         /* Must we initialise the PG first, and queue I/O till it's ready? */
392         if (m->hw_handler_name) {
393                 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
394                 set_bit(MPATHF_QUEUE_IO, &m->flags);
395         } else {
396                 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
397                 clear_bit(MPATHF_QUEUE_IO, &m->flags);
398         }
399
400         atomic_set(&m->pg_init_count, 0);
401 }
402
403 static struct pgpath *choose_path_in_pg(struct multipath *m,
404                                         struct priority_group *pg,
405                                         size_t nr_bytes)
406 {
407         unsigned long flags;
408         struct dm_path *path;
409         struct pgpath *pgpath;
410
411         path = pg->ps.type->select_path(&pg->ps, nr_bytes);
412         if (!path)
413                 return ERR_PTR(-ENXIO);
414
415         pgpath = path_to_pgpath(path);
416
417         if (unlikely(lockless_dereference(m->current_pg) != pg)) {
418                 /* Only update current_pgpath if pg changed */
419                 spin_lock_irqsave(&m->lock, flags);
420                 m->current_pgpath = pgpath;
421                 __switch_pg(m, pg);
422                 spin_unlock_irqrestore(&m->lock, flags);
423         }
424
425         return pgpath;
426 }
427
428 static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
429 {
430         unsigned long flags;
431         struct priority_group *pg;
432         struct pgpath *pgpath;
433         bool bypassed = true;
434
435         if (!atomic_read(&m->nr_valid_paths)) {
436                 clear_bit(MPATHF_QUEUE_IO, &m->flags);
437                 goto failed;
438         }
439
440         /* Were we instructed to switch PG? */
441         if (lockless_dereference(m->next_pg)) {
442                 spin_lock_irqsave(&m->lock, flags);
443                 pg = m->next_pg;
444                 if (!pg) {
445                         spin_unlock_irqrestore(&m->lock, flags);
446                         goto check_current_pg;
447                 }
448                 m->next_pg = NULL;
449                 spin_unlock_irqrestore(&m->lock, flags);
450                 pgpath = choose_path_in_pg(m, pg, nr_bytes);
451                 if (!IS_ERR_OR_NULL(pgpath))
452                         return pgpath;
453         }
454
455         /* Don't change PG until it has no remaining paths */
456 check_current_pg:
457         pg = lockless_dereference(m->current_pg);
458         if (pg) {
459                 pgpath = choose_path_in_pg(m, pg, nr_bytes);
460                 if (!IS_ERR_OR_NULL(pgpath))
461                         return pgpath;
462         }
463
464         /*
465          * Loop through priority groups until we find a valid path.
466          * First time we skip PGs marked 'bypassed'.
467          * Second time we only try the ones we skipped, but set
468          * pg_init_delay_retry so we do not hammer controllers.
469          */
470         do {
471                 list_for_each_entry(pg, &m->priority_groups, list) {
472                         if (pg->bypassed == bypassed)
473                                 continue;
474                         pgpath = choose_path_in_pg(m, pg, nr_bytes);
475                         if (!IS_ERR_OR_NULL(pgpath)) {
476                                 if (!bypassed)
477                                         set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
478                                 return pgpath;
479                         }
480                 }
481         } while (bypassed--);
482
483 failed:
484         spin_lock_irqsave(&m->lock, flags);
485         m->current_pgpath = NULL;
486         m->current_pg = NULL;
487         spin_unlock_irqrestore(&m->lock, flags);
488
489         return NULL;
490 }
491
492 /*
493  * Check whether bios must be queued in the device-mapper core rather
494  * than here in the target.
495  *
496  * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
497  * same value then we are not between multipath_presuspend()
498  * and multipath_resume() calls and we have no need to check
499  * for the DMF_NOFLUSH_SUSPENDING flag.
500  */
501 static bool __must_push_back(struct multipath *m)
502 {
503         return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
504                  test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
505                 dm_noflush_suspending(m->ti));
506 }
507
508 static bool must_push_back_rq(struct multipath *m)
509 {
510         bool r;
511         unsigned long flags;
512
513         spin_lock_irqsave(&m->lock, flags);
514         r = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
515              __must_push_back(m));
516         spin_unlock_irqrestore(&m->lock, flags);
517
518         return r;
519 }
520
521 static bool must_push_back_bio(struct multipath *m)
522 {
523         bool r;
524         unsigned long flags;
525
526         spin_lock_irqsave(&m->lock, flags);
527         r = __must_push_back(m);
528         spin_unlock_irqrestore(&m->lock, flags);
529
530         return r;
531 }
532
533 /*
534  * Map cloned requests (request-based multipath)
535  */
536 static int __multipath_map(struct dm_target *ti, struct request *clone,
537                            union map_info *map_context,
538                            struct request *rq, struct request **__clone)
539 {
540         struct multipath *m = ti->private;
541         int r = DM_MAPIO_REQUEUE;
542         size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
543         struct pgpath *pgpath;
544         struct block_device *bdev;
545         struct dm_mpath_io *mpio;
546
547         /* Do we need to select a new pgpath? */
548         pgpath = lockless_dereference(m->current_pgpath);
549         if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
550                 pgpath = choose_pgpath(m, nr_bytes);
551
552         if (!pgpath) {
553                 if (must_push_back_rq(m))
554                         return DM_MAPIO_DELAY_REQUEUE;
555                 return -EIO;    /* Failed */
556         } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
557                    test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
558                 pg_init_all_paths(m);
559                 return r;
560         }
561
562         mpio = set_mpio(m, map_context);
563         if (!mpio)
564                 /* ENOMEM, requeue */
565                 return r;
566
567         mpio->pgpath = pgpath;
568         mpio->nr_bytes = nr_bytes;
569
570         bdev = pgpath->path.dev->bdev;
571
572         if (clone) {
573                 /*
574                  * Old request-based interface: allocated clone is passed in.
575                  * Used by: .request_fn stacked on .request_fn path(s).
576                  */
577                 clone->q = bdev_get_queue(bdev);
578                 clone->rq_disk = bdev->bd_disk;
579                 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
580         } else {
581                 /*
582                  * blk-mq request-based interface; used by both:
583                  * .request_fn stacked on blk-mq path(s) and
584                  * blk-mq stacked on blk-mq path(s).
585                  */
586                 *__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
587                                                 rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
588                 if (IS_ERR(*__clone)) {
589                         /* ENOMEM, requeue */
590                         clear_request_fn_mpio(m, map_context);
591                         return r;
592                 }
593                 (*__clone)->bio = (*__clone)->biotail = NULL;
594                 (*__clone)->rq_disk = bdev->bd_disk;
595                 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
596         }
597
598         if (pgpath->pg->ps.type->start_io)
599                 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
600                                               &pgpath->path,
601                                               nr_bytes);
602         return DM_MAPIO_REMAPPED;
603 }
604
605 static int multipath_map(struct dm_target *ti, struct request *clone,
606                          union map_info *map_context)
607 {
608         return __multipath_map(ti, clone, map_context, NULL, NULL);
609 }
610
611 static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
612                                    union map_info *map_context,
613                                    struct request **clone)
614 {
615         return __multipath_map(ti, NULL, map_context, rq, clone);
616 }
617
618 static void multipath_release_clone(struct request *clone)
619 {
620         blk_mq_free_request(clone);
621 }
622
623 /*
624  * Map cloned bios (bio-based multipath)
625  */
626 static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
627 {
628         size_t nr_bytes = bio->bi_iter.bi_size;
629         struct pgpath *pgpath;
630         unsigned long flags;
631         bool queue_io;
632
633         /* Do we need to select a new pgpath? */
634         pgpath = lockless_dereference(m->current_pgpath);
635         queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
636         if (!pgpath || !queue_io)
637                 pgpath = choose_pgpath(m, nr_bytes);
638
639         if ((pgpath && queue_io) ||
640             (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
641                 /* Queue for the daemon to resubmit */
642                 spin_lock_irqsave(&m->lock, flags);
643                 bio_list_add(&m->queued_bios, bio);
644                 spin_unlock_irqrestore(&m->lock, flags);
645                 /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
646                 if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
647                         pg_init_all_paths(m);
648                 else if (!queue_io)
649                         queue_work(kmultipathd, &m->process_queued_bios);
650                 return DM_MAPIO_SUBMITTED;
651         }
652
653         if (!pgpath) {
654                 if (!must_push_back_bio(m))
655                         return -EIO;
656                 return DM_MAPIO_REQUEUE;
657         }
658
659         mpio->pgpath = pgpath;
660         mpio->nr_bytes = nr_bytes;
661
662         bio->bi_error = 0;
663         bio->bi_bdev = pgpath->path.dev->bdev;
664         bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
665
666         if (pgpath->pg->ps.type->start_io)
667                 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
668                                               &pgpath->path,
669                                               nr_bytes);
670         return DM_MAPIO_REMAPPED;
671 }
672
673 static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
674 {
675         struct multipath *m = ti->private;
676         struct dm_mpath_io *mpio = NULL;
677
678         multipath_init_per_bio_data(bio, &mpio, NULL);
679
680         return __multipath_map_bio(m, bio, mpio);
681 }
682
683 static void process_queued_io_list(struct multipath *m)
684 {
685         if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
686                 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
687         else if (m->queue_mode == DM_TYPE_BIO_BASED)
688                 queue_work(kmultipathd, &m->process_queued_bios);
689 }
690
691 static void process_queued_bios(struct work_struct *work)
692 {
693         int r;
694         unsigned long flags;
695         struct bio *bio;
696         struct bio_list bios;
697         struct blk_plug plug;
698         struct multipath *m =
699                 container_of(work, struct multipath, process_queued_bios);
700
701         bio_list_init(&bios);
702
703         spin_lock_irqsave(&m->lock, flags);
704
705         if (bio_list_empty(&m->queued_bios)) {
706                 spin_unlock_irqrestore(&m->lock, flags);
707                 return;
708         }
709
710         bio_list_merge(&bios, &m->queued_bios);
711         bio_list_init(&m->queued_bios);
712
713         spin_unlock_irqrestore(&m->lock, flags);
714
715         blk_start_plug(&plug);
716         while ((bio = bio_list_pop(&bios))) {
717                 r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
718                 if (r < 0 || r == DM_MAPIO_REQUEUE) {
719                         bio->bi_error = r;
720                         bio_endio(bio);
721                 } else if (r == DM_MAPIO_REMAPPED)
722                         generic_make_request(bio);
723         }
724         blk_finish_plug(&plug);
725 }
726
727 /*
728  * If we run out of usable paths, should we queue I/O or error it?
729  */
730 static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
731                             bool save_old_value)
732 {
733         unsigned long flags;
734
735         spin_lock_irqsave(&m->lock, flags);
736
737         if (save_old_value) {
738                 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
739                         set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
740                 else
741                         clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
742         } else {
743                 if (queue_if_no_path)
744                         set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
745                 else
746                         clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
747         }
748         if (queue_if_no_path)
749                 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
750         else
751                 clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
752
753         spin_unlock_irqrestore(&m->lock, flags);
754
755         if (!queue_if_no_path) {
756                 dm_table_run_md_queue_async(m->ti->table);
757                 process_queued_io_list(m);
758         }
759
760         return 0;
761 }
762
763 /*
764  * An event is triggered whenever a path is taken out of use.
765  * Includes path failure and PG bypass.
766  */
767 static void trigger_event(struct work_struct *work)
768 {
769         struct multipath *m =
770                 container_of(work, struct multipath, trigger_event);
771
772         dm_table_event(m->ti->table);
773 }
774
775 /*-----------------------------------------------------------------
776  * Constructor/argument parsing:
777  * <#multipath feature args> [<arg>]*
778  * <#hw_handler args> [hw_handler [<arg>]*]
779  * <#priority groups>
780  * <initial priority group>
781  *     [<selector> <#selector args> [<arg>]*
782  *      <#paths> <#per-path selector args>
783  *         [<path> [<arg>]* ]+ ]+
784  *---------------------------------------------------------------*/
785 static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
786                                struct dm_target *ti)
787 {
788         int r;
789         struct path_selector_type *pst;
790         unsigned ps_argc;
791
792         static struct dm_arg _args[] = {
793                 {0, 1024, "invalid number of path selector args"},
794         };
795
796         pst = dm_get_path_selector(dm_shift_arg(as));
797         if (!pst) {
798                 ti->error = "unknown path selector type";
799                 return -EINVAL;
800         }
801
802         r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
803         if (r) {
804                 dm_put_path_selector(pst);
805                 return -EINVAL;
806         }
807
808         r = pst->create(&pg->ps, ps_argc, as->argv);
809         if (r) {
810                 dm_put_path_selector(pst);
811                 ti->error = "path selector constructor failed";
812                 return r;
813         }
814
815         pg->ps.type = pst;
816         dm_consume_args(as, ps_argc);
817
818         return 0;
819 }
820
821 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
822                                struct dm_target *ti)
823 {
824         int r;
825         struct pgpath *p;
826         struct multipath *m = ti->private;
827         struct request_queue *q = NULL;
828         const char *attached_handler_name;
829
830         /* we need at least a path arg */
831         if (as->argc < 1) {
832                 ti->error = "no device given";
833                 return ERR_PTR(-EINVAL);
834         }
835
836         p = alloc_pgpath();
837         if (!p)
838                 return ERR_PTR(-ENOMEM);
839
840         r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
841                           &p->path.dev);
842         if (r) {
843                 ti->error = "error getting device";
844                 goto bad;
845         }
846
847         if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
848                 q = bdev_get_queue(p->path.dev->bdev);
849
850         if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
851 retain:
852                 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
853                 if (attached_handler_name) {
854                         /*
855                          * Reset hw_handler_name to match the attached handler
856                          * and clear any hw_handler_params associated with the
857                          * ignored handler.
858                          *
859                          * NB. This modifies the table line to show the actual
860                          * handler instead of the original table passed in.
861                          */
862                         kfree(m->hw_handler_name);
863                         m->hw_handler_name = attached_handler_name;
864
865                         kfree(m->hw_handler_params);
866                         m->hw_handler_params = NULL;
867                 }
868         }
869
870         if (m->hw_handler_name) {
871                 r = scsi_dh_attach(q, m->hw_handler_name);
872                 if (r == -EBUSY) {
873                         char b[BDEVNAME_SIZE];
874
875                         printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
876                                 bdevname(p->path.dev->bdev, b));
877                         goto retain;
878                 }
879                 if (r < 0) {
880                         ti->error = "error attaching hardware handler";
881                         dm_put_device(ti, p->path.dev);
882                         goto bad;
883                 }
884
885                 if (m->hw_handler_params) {
886                         r = scsi_dh_set_params(q, m->hw_handler_params);
887                         if (r < 0) {
888                                 ti->error = "unable to set hardware "
889                                                         "handler parameters";
890                                 dm_put_device(ti, p->path.dev);
891                                 goto bad;
892                         }
893                 }
894         }
895
896         r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
897         if (r) {
898                 dm_put_device(ti, p->path.dev);
899                 goto bad;
900         }
901
902         return p;
903
904  bad:
905         free_pgpath(p);
906         return ERR_PTR(r);
907 }
908
909 static struct priority_group *parse_priority_group(struct dm_arg_set *as,
910                                                    struct multipath *m)
911 {
912         static struct dm_arg _args[] = {
913                 {1, 1024, "invalid number of paths"},
914                 {0, 1024, "invalid number of selector args"}
915         };
916
917         int r;
918         unsigned i, nr_selector_args, nr_args;
919         struct priority_group *pg;
920         struct dm_target *ti = m->ti;
921
922         if (as->argc < 2) {
923                 as->argc = 0;
924                 ti->error = "not enough priority group arguments";
925                 return ERR_PTR(-EINVAL);
926         }
927
928         pg = alloc_priority_group();
929         if (!pg) {
930                 ti->error = "couldn't allocate priority group";
931                 return ERR_PTR(-ENOMEM);
932         }
933         pg->m = m;
934
935         r = parse_path_selector(as, pg, ti);
936         if (r)
937                 goto bad;
938
939         /*
940          * read the paths
941          */
942         r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
943         if (r)
944                 goto bad;
945
946         r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
947         if (r)
948                 goto bad;
949
950         nr_args = 1 + nr_selector_args;
951         for (i = 0; i < pg->nr_pgpaths; i++) {
952                 struct pgpath *pgpath;
953                 struct dm_arg_set path_args;
954
955                 if (as->argc < nr_args) {
956                         ti->error = "not enough path parameters";
957                         r = -EINVAL;
958                         goto bad;
959                 }
960
961                 path_args.argc = nr_args;
962                 path_args.argv = as->argv;
963
964                 pgpath = parse_path(&path_args, &pg->ps, ti);
965                 if (IS_ERR(pgpath)) {
966                         r = PTR_ERR(pgpath);
967                         goto bad;
968                 }
969
970                 pgpath->pg = pg;
971                 list_add_tail(&pgpath->list, &pg->pgpaths);
972                 dm_consume_args(as, nr_args);
973         }
974
975         return pg;
976
977  bad:
978         free_priority_group(pg, ti);
979         return ERR_PTR(r);
980 }
981
982 static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
983 {
984         unsigned hw_argc;
985         int ret;
986         struct dm_target *ti = m->ti;
987
988         static struct dm_arg _args[] = {
989                 {0, 1024, "invalid number of hardware handler args"},
990         };
991
992         if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
993                 return -EINVAL;
994
995         if (!hw_argc)
996                 return 0;
997
998         if (m->queue_mode == DM_TYPE_BIO_BASED) {
999                 dm_consume_args(as, hw_argc);
1000                 DMERR("bio-based multipath doesn't allow hardware handler args");
1001                 return 0;
1002         }
1003
1004         m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
1005
1006         if (hw_argc > 1) {
1007                 char *p;
1008                 int i, j, len = 4;
1009
1010                 for (i = 0; i <= hw_argc - 2; i++)
1011                         len += strlen(as->argv[i]) + 1;
1012                 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
1013                 if (!p) {
1014                         ti->error = "memory allocation failed";
1015                         ret = -ENOMEM;
1016                         goto fail;
1017                 }
1018                 j = sprintf(p, "%d", hw_argc - 1);
1019                 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
1020                         j = sprintf(p, "%s", as->argv[i]);
1021         }
1022         dm_consume_args(as, hw_argc - 1);
1023
1024         return 0;
1025 fail:
1026         kfree(m->hw_handler_name);
1027         m->hw_handler_name = NULL;
1028         return ret;
1029 }
1030
1031 static int parse_features(struct dm_arg_set *as, struct multipath *m)
1032 {
1033         int r;
1034         unsigned argc;
1035         struct dm_target *ti = m->ti;
1036         const char *arg_name;
1037
1038         static struct dm_arg _args[] = {
1039                 {0, 8, "invalid number of feature args"},
1040                 {1, 50, "pg_init_retries must be between 1 and 50"},
1041                 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1042         };
1043
1044         r = dm_read_arg_group(_args, as, &argc, &ti->error);
1045         if (r)
1046                 return -EINVAL;
1047
1048         if (!argc)
1049                 return 0;
1050
1051         do {
1052                 arg_name = dm_shift_arg(as);
1053                 argc--;
1054
1055                 if (!strcasecmp(arg_name, "queue_if_no_path")) {
1056                         r = queue_if_no_path(m, true, false);
1057                         continue;
1058                 }
1059
1060                 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1061                         set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1062                         continue;
1063                 }
1064
1065                 if (!strcasecmp(arg_name, "pg_init_retries") &&
1066                     (argc >= 1)) {
1067                         r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1068                         argc--;
1069                         continue;
1070                 }
1071
1072                 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1073                     (argc >= 1)) {
1074                         r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1075                         argc--;
1076                         continue;
1077                 }
1078
1079                 if (!strcasecmp(arg_name, "queue_mode") &&
1080                     (argc >= 1)) {
1081                         const char *queue_mode_name = dm_shift_arg(as);
1082
1083                         if (!strcasecmp(queue_mode_name, "bio"))
1084                                 m->queue_mode = DM_TYPE_BIO_BASED;
1085                         else if (!strcasecmp(queue_mode_name, "rq"))
1086                                 m->queue_mode = DM_TYPE_REQUEST_BASED;
1087                         else if (!strcasecmp(queue_mode_name, "mq"))
1088                                 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
1089                         else {
1090                                 ti->error = "Unknown 'queue_mode' requested";
1091                                 r = -EINVAL;
1092                         }
1093                         argc--;
1094                         continue;
1095                 }
1096
1097                 ti->error = "Unrecognised multipath feature request";
1098                 r = -EINVAL;
1099         } while (argc && !r);
1100
1101         return r;
1102 }
1103
1104 static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1105 {
1106         /* target arguments */
1107         static struct dm_arg _args[] = {
1108                 {0, 1024, "invalid number of priority groups"},
1109                 {0, 1024, "invalid initial priority group number"},
1110         };
1111
1112         int r;
1113         struct multipath *m;
1114         struct dm_arg_set as;
1115         unsigned pg_count = 0;
1116         unsigned next_pg_num;
1117
1118         as.argc = argc;
1119         as.argv = argv;
1120
1121         m = alloc_multipath(ti);
1122         if (!m) {
1123                 ti->error = "can't allocate multipath";
1124                 return -EINVAL;
1125         }
1126
1127         r = parse_features(&as, m);
1128         if (r)
1129                 goto bad;
1130
1131         r = alloc_multipath_stage2(ti, m);
1132         if (r)
1133                 goto bad;
1134
1135         r = parse_hw_handler(&as, m);
1136         if (r)
1137                 goto bad;
1138
1139         r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1140         if (r)
1141                 goto bad;
1142
1143         r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1144         if (r)
1145                 goto bad;
1146
1147         if ((!m->nr_priority_groups && next_pg_num) ||
1148             (m->nr_priority_groups && !next_pg_num)) {
1149                 ti->error = "invalid initial priority group";
1150                 r = -EINVAL;
1151                 goto bad;
1152         }
1153
1154         /* parse the priority groups */
1155         while (as.argc) {
1156                 struct priority_group *pg;
1157                 unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1158
1159                 pg = parse_priority_group(&as, m);
1160                 if (IS_ERR(pg)) {
1161                         r = PTR_ERR(pg);
1162                         goto bad;
1163                 }
1164
1165                 nr_valid_paths += pg->nr_pgpaths;
1166                 atomic_set(&m->nr_valid_paths, nr_valid_paths);
1167
1168                 list_add_tail(&pg->list, &m->priority_groups);
1169                 pg_count++;
1170                 pg->pg_num = pg_count;
1171                 if (!--next_pg_num)
1172                         m->next_pg = pg;
1173         }
1174
1175         if (pg_count != m->nr_priority_groups) {
1176                 ti->error = "priority group count mismatch";
1177                 r = -EINVAL;
1178                 goto bad;
1179         }
1180
1181         ti->num_flush_bios = 1;
1182         ti->num_discard_bios = 1;
1183         ti->num_write_same_bios = 1;
1184         if (m->queue_mode == DM_TYPE_BIO_BASED)
1185                 ti->per_io_data_size = multipath_per_bio_data_size();
1186         else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
1187                 ti->per_io_data_size = sizeof(struct dm_mpath_io);
1188
1189         return 0;
1190
1191  bad:
1192         free_multipath(m);
1193         return r;
1194 }
1195
1196 static void multipath_wait_for_pg_init_completion(struct multipath *m)
1197 {
1198         DEFINE_WAIT(wait);
1199
1200         while (1) {
1201                 prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1202
1203                 if (!atomic_read(&m->pg_init_in_progress))
1204                         break;
1205
1206                 io_schedule();
1207         }
1208         finish_wait(&m->pg_init_wait, &wait);
1209 }
1210
1211 static void flush_multipath_work(struct multipath *m)
1212 {
1213         set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1214         smp_mb__after_atomic();
1215
1216         flush_workqueue(kmpath_handlerd);
1217         multipath_wait_for_pg_init_completion(m);
1218         flush_workqueue(kmultipathd);
1219         flush_work(&m->trigger_event);
1220
1221         clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1222         smp_mb__after_atomic();
1223 }
1224
1225 static void multipath_dtr(struct dm_target *ti)
1226 {
1227         struct multipath *m = ti->private;
1228
1229         flush_multipath_work(m);
1230         free_multipath(m);
1231 }
1232
1233 /*
1234  * Take a path out of use.
1235  */
1236 static int fail_path(struct pgpath *pgpath)
1237 {
1238         unsigned long flags;
1239         struct multipath *m = pgpath->pg->m;
1240
1241         spin_lock_irqsave(&m->lock, flags);
1242
1243         if (!pgpath->is_active)
1244                 goto out;
1245
1246         DMWARN("Failing path %s.", pgpath->path.dev->name);
1247
1248         pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1249         pgpath->is_active = false;
1250         pgpath->fail_count++;
1251
1252         atomic_dec(&m->nr_valid_paths);
1253
1254         if (pgpath == m->current_pgpath)
1255                 m->current_pgpath = NULL;
1256
1257         dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1258                        pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1259
1260         schedule_work(&m->trigger_event);
1261
1262 out:
1263         spin_unlock_irqrestore(&m->lock, flags);
1264
1265         return 0;
1266 }
1267
1268 /*
1269  * Reinstate a previously-failed path
1270  */
1271 static int reinstate_path(struct pgpath *pgpath)
1272 {
1273         int r = 0, run_queue = 0;
1274         unsigned long flags;
1275         struct multipath *m = pgpath->pg->m;
1276         unsigned nr_valid_paths;
1277
1278         spin_lock_irqsave(&m->lock, flags);
1279
1280         if (pgpath->is_active)
1281                 goto out;
1282
1283         DMWARN("Reinstating path %s.", pgpath->path.dev->name);
1284
1285         r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1286         if (r)
1287                 goto out;
1288
1289         pgpath->is_active = true;
1290
1291         nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1292         if (nr_valid_paths == 1) {
1293                 m->current_pgpath = NULL;
1294                 run_queue = 1;
1295         } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1296                 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1297                         atomic_inc(&m->pg_init_in_progress);
1298         }
1299
1300         dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1301                        pgpath->path.dev->name, nr_valid_paths);
1302
1303         schedule_work(&m->trigger_event);
1304
1305 out:
1306         spin_unlock_irqrestore(&m->lock, flags);
1307         if (run_queue) {
1308                 dm_table_run_md_queue_async(m->ti->table);
1309                 process_queued_io_list(m);
1310         }
1311
1312         return r;
1313 }
1314
1315 /*
1316  * Fail or reinstate all paths that match the provided struct dm_dev.
1317  */
1318 static int action_dev(struct multipath *m, struct dm_dev *dev,
1319                       action_fn action)
1320 {
1321         int r = -EINVAL;
1322         struct pgpath *pgpath;
1323         struct priority_group *pg;
1324
1325         list_for_each_entry(pg, &m->priority_groups, list) {
1326                 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1327                         if (pgpath->path.dev == dev)
1328                                 r = action(pgpath);
1329                 }
1330         }
1331
1332         return r;
1333 }
1334
1335 /*
1336  * Temporarily try to avoid having to use the specified PG
1337  */
1338 static void bypass_pg(struct multipath *m, struct priority_group *pg,
1339                       bool bypassed)
1340 {
1341         unsigned long flags;
1342
1343         spin_lock_irqsave(&m->lock, flags);
1344
1345         pg->bypassed = bypassed;
1346         m->current_pgpath = NULL;
1347         m->current_pg = NULL;
1348
1349         spin_unlock_irqrestore(&m->lock, flags);
1350
1351         schedule_work(&m->trigger_event);
1352 }
1353
1354 /*
1355  * Switch to using the specified PG from the next I/O that gets mapped
1356  */
1357 static int switch_pg_num(struct multipath *m, const char *pgstr)
1358 {
1359         struct priority_group *pg;
1360         unsigned pgnum;
1361         unsigned long flags;
1362         char dummy;
1363
1364         if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1365             (pgnum > m->nr_priority_groups)) {
1366                 DMWARN("invalid PG number supplied to switch_pg_num");
1367                 return -EINVAL;
1368         }
1369
1370         spin_lock_irqsave(&m->lock, flags);
1371         list_for_each_entry(pg, &m->priority_groups, list) {
1372                 pg->bypassed = false;
1373                 if (--pgnum)
1374                         continue;
1375
1376                 m->current_pgpath = NULL;
1377                 m->current_pg = NULL;
1378                 m->next_pg = pg;
1379         }
1380         spin_unlock_irqrestore(&m->lock, flags);
1381
1382         schedule_work(&m->trigger_event);
1383         return 0;
1384 }
1385
1386 /*
1387  * Set/clear bypassed status of a PG.
1388  * PGs are numbered upwards from 1 in the order they were declared.
1389  */
1390 static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1391 {
1392         struct priority_group *pg;
1393         unsigned pgnum;
1394         char dummy;
1395
1396         if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1397             (pgnum > m->nr_priority_groups)) {
1398                 DMWARN("invalid PG number supplied to bypass_pg");
1399                 return -EINVAL;
1400         }
1401
1402         list_for_each_entry(pg, &m->priority_groups, list) {
1403                 if (!--pgnum)
1404                         break;
1405         }
1406
1407         bypass_pg(m, pg, bypassed);
1408         return 0;
1409 }
1410
1411 /*
1412  * Should we retry pg_init immediately?
1413  */
1414 static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1415 {
1416         unsigned long flags;
1417         bool limit_reached = false;
1418
1419         spin_lock_irqsave(&m->lock, flags);
1420
1421         if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1422             !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1423                 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1424         else
1425                 limit_reached = true;
1426
1427         spin_unlock_irqrestore(&m->lock, flags);
1428
1429         return limit_reached;
1430 }
1431
1432 static void pg_init_done(void *data, int errors)
1433 {
1434         struct pgpath *pgpath = data;
1435         struct priority_group *pg = pgpath->pg;
1436         struct multipath *m = pg->m;
1437         unsigned long flags;
1438         bool delay_retry = false;
1439
1440         /* device or driver problems */
1441         switch (errors) {
1442         case SCSI_DH_OK:
1443                 break;
1444         case SCSI_DH_NOSYS:
1445                 if (!m->hw_handler_name) {
1446                         errors = 0;
1447                         break;
1448                 }
1449                 DMERR("Could not failover the device: Handler scsi_dh_%s "
1450                       "Error %d.", m->hw_handler_name, errors);
1451                 /*
1452                  * Fail path for now, so we do not ping pong
1453                  */
1454                 fail_path(pgpath);
1455                 break;
1456         case SCSI_DH_DEV_TEMP_BUSY:
1457                 /*
1458                  * Probably doing something like FW upgrade on the
1459                  * controller so try the other pg.
1460                  */
1461                 bypass_pg(m, pg, true);
1462                 break;
1463         case SCSI_DH_RETRY:
1464                 /* Wait before retrying. */
1465                 delay_retry = 1;
1466         case SCSI_DH_IMM_RETRY:
1467         case SCSI_DH_RES_TEMP_UNAVAIL:
1468                 if (pg_init_limit_reached(m, pgpath))
1469                         fail_path(pgpath);
1470                 errors = 0;
1471                 break;
1472         case SCSI_DH_DEV_OFFLINED:
1473         default:
1474                 /*
1475                  * We probably do not want to fail the path for a device
1476                  * error, but this is what the old dm did. In future
1477                  * patches we can do more advanced handling.
1478                  */
1479                 fail_path(pgpath);
1480         }
1481
1482         spin_lock_irqsave(&m->lock, flags);
1483         if (errors) {
1484                 if (pgpath == m->current_pgpath) {
1485                         DMERR("Could not failover device. Error %d.", errors);
1486                         m->current_pgpath = NULL;
1487                         m->current_pg = NULL;
1488                 }
1489         } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1490                 pg->bypassed = false;
1491
1492         if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1493                 /* Activations of other paths are still on going */
1494                 goto out;
1495
1496         if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1497                 if (delay_retry)
1498                         set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1499                 else
1500                         clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1501
1502                 if (__pg_init_all_paths(m))
1503                         goto out;
1504         }
1505         clear_bit(MPATHF_QUEUE_IO, &m->flags);
1506
1507         process_queued_io_list(m);
1508
1509         /*
1510          * Wake up any thread waiting to suspend.
1511          */
1512         wake_up(&m->pg_init_wait);
1513
1514 out:
1515         spin_unlock_irqrestore(&m->lock, flags);
1516 }
1517
1518 static void activate_path(struct work_struct *work)
1519 {
1520         struct pgpath *pgpath =
1521                 container_of(work, struct pgpath, activate_path.work);
1522         struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1523
1524         if (pgpath->is_active && !blk_queue_dying(q))
1525                 scsi_dh_activate(q, pg_init_done, pgpath);
1526         else
1527                 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1528 }
1529
1530 static int noretry_error(int error)
1531 {
1532         switch (error) {
1533         case -EBADE:
1534                 /*
1535                  * EBADE signals an reservation conflict.
1536                  * We shouldn't fail the path here as we can communicate with
1537                  * the target.  We should failover to the next path, but in
1538                  * doing so we might be causing a ping-pong between paths.
1539                  * So just return the reservation conflict error.
1540                  */
1541         case -EOPNOTSUPP:
1542         case -EREMOTEIO:
1543         case -EILSEQ:
1544         case -ENODATA:
1545         case -ENOSPC:
1546                 return 1;
1547         }
1548
1549         /* Anything else could be a path failure, so should be retried */
1550         return 0;
1551 }
1552
1553 /*
1554  * end_io handling
1555  */
1556 static int do_end_io(struct multipath *m, struct request *clone,
1557                      int error, struct dm_mpath_io *mpio)
1558 {
1559         /*
1560          * We don't queue any clone request inside the multipath target
1561          * during end I/O handling, since those clone requests don't have
1562          * bio clones.  If we queue them inside the multipath target,
1563          * we need to make bio clones, that requires memory allocation.
1564          * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1565          *  don't have bio clones.)
1566          * Instead of queueing the clone request here, we queue the original
1567          * request into dm core, which will remake a clone request and
1568          * clone bios for it and resubmit it later.
1569          */
1570         int r = DM_ENDIO_REQUEUE;
1571
1572         if (!error && !clone->errors)
1573                 return 0;       /* I/O complete */
1574
1575         if (noretry_error(error))
1576                 return error;
1577
1578         if (mpio->pgpath)
1579                 fail_path(mpio->pgpath);
1580
1581         if (!atomic_read(&m->nr_valid_paths)) {
1582                 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1583                         if (!must_push_back_rq(m))
1584                                 r = -EIO;
1585                 }
1586         }
1587
1588         return r;
1589 }
1590
1591 static int multipath_end_io(struct dm_target *ti, struct request *clone,
1592                             int error, union map_info *map_context)
1593 {
1594         struct multipath *m = ti->private;
1595         struct dm_mpath_io *mpio = get_mpio(map_context);
1596         struct pgpath *pgpath;
1597         struct path_selector *ps;
1598         int r;
1599
1600         BUG_ON(!mpio);
1601
1602         r = do_end_io(m, clone, error, mpio);
1603         pgpath = mpio->pgpath;
1604         if (pgpath) {
1605                 ps = &pgpath->pg->ps;
1606                 if (ps->type->end_io)
1607                         ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1608         }
1609         clear_request_fn_mpio(m, map_context);
1610
1611         return r;
1612 }
1613
1614 static int do_end_io_bio(struct multipath *m, struct bio *clone,
1615                          int error, struct dm_mpath_io *mpio)
1616 {
1617         unsigned long flags;
1618
1619         if (!error)
1620                 return 0;       /* I/O complete */
1621
1622         if (noretry_error(error))
1623                 return error;
1624
1625         if (mpio->pgpath)
1626                 fail_path(mpio->pgpath);
1627
1628         if (!atomic_read(&m->nr_valid_paths)) {
1629                 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1630                         if (!must_push_back_bio(m))
1631                                 return -EIO;
1632                         return DM_ENDIO_REQUEUE;
1633                 }
1634         }
1635
1636         /* Queue for the daemon to resubmit */
1637         dm_bio_restore(get_bio_details_from_bio(clone), clone);
1638
1639         spin_lock_irqsave(&m->lock, flags);
1640         bio_list_add(&m->queued_bios, clone);
1641         spin_unlock_irqrestore(&m->lock, flags);
1642         if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
1643                 queue_work(kmultipathd, &m->process_queued_bios);
1644
1645         return DM_ENDIO_INCOMPLETE;
1646 }
1647
1648 static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int error)
1649 {
1650         struct multipath *m = ti->private;
1651         struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1652         struct pgpath *pgpath;
1653         struct path_selector *ps;
1654         int r;
1655
1656         BUG_ON(!mpio);
1657
1658         r = do_end_io_bio(m, clone, error, mpio);
1659         pgpath = mpio->pgpath;
1660         if (pgpath) {
1661                 ps = &pgpath->pg->ps;
1662                 if (ps->type->end_io)
1663                         ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1664         }
1665
1666         return r;
1667 }
1668
1669 /*
1670  * Suspend can't complete until all the I/O is processed so if
1671  * the last path fails we must error any remaining I/O.
1672  * Note that if the freeze_bdev fails while suspending, the
1673  * queue_if_no_path state is lost - userspace should reset it.
1674  */
1675 static void multipath_presuspend(struct dm_target *ti)
1676 {
1677         struct multipath *m = ti->private;
1678
1679         queue_if_no_path(m, false, true);
1680 }
1681
1682 static void multipath_postsuspend(struct dm_target *ti)
1683 {
1684         struct multipath *m = ti->private;
1685
1686         mutex_lock(&m->work_mutex);
1687         flush_multipath_work(m);
1688         mutex_unlock(&m->work_mutex);
1689 }
1690
1691 /*
1692  * Restore the queue_if_no_path setting.
1693  */
1694 static void multipath_resume(struct dm_target *ti)
1695 {
1696         struct multipath *m = ti->private;
1697         unsigned long flags;
1698
1699         spin_lock_irqsave(&m->lock, flags);
1700         if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
1701                 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1702         else
1703                 clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1704         spin_unlock_irqrestore(&m->lock, flags);
1705 }
1706
1707 /*
1708  * Info output has the following format:
1709  * num_multipath_feature_args [multipath_feature_args]*
1710  * num_handler_status_args [handler_status_args]*
1711  * num_groups init_group_number
1712  *            [A|D|E num_ps_status_args [ps_status_args]*
1713  *             num_paths num_selector_args
1714  *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1715  *
1716  * Table output has the following format (identical to the constructor string):
1717  * num_feature_args [features_args]*
1718  * num_handler_args hw_handler [hw_handler_args]*
1719  * num_groups init_group_number
1720  *     [priority selector-name num_ps_args [ps_args]*
1721  *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1722  */
1723 static void multipath_status(struct dm_target *ti, status_type_t type,
1724                              unsigned status_flags, char *result, unsigned maxlen)
1725 {
1726         int sz = 0;
1727         unsigned long flags;
1728         struct multipath *m = ti->private;
1729         struct priority_group *pg;
1730         struct pgpath *p;
1731         unsigned pg_num;
1732         char state;
1733
1734         spin_lock_irqsave(&m->lock, flags);
1735
1736         /* Features */
1737         if (type == STATUSTYPE_INFO)
1738                 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1739                        atomic_read(&m->pg_init_count));
1740         else {
1741                 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1742                               (m->pg_init_retries > 0) * 2 +
1743                               (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1744                               test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1745                               (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1746
1747                 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1748                         DMEMIT("queue_if_no_path ");
1749                 if (m->pg_init_retries)
1750                         DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1751                 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1752                         DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1753                 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1754                         DMEMIT("retain_attached_hw_handler ");
1755                 if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1756                         switch(m->queue_mode) {
1757                         case DM_TYPE_BIO_BASED:
1758                                 DMEMIT("queue_mode bio ");
1759                                 break;
1760                         case DM_TYPE_MQ_REQUEST_BASED:
1761                                 DMEMIT("queue_mode mq ");
1762                                 break;
1763                         }
1764                 }
1765         }
1766
1767         if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1768                 DMEMIT("0 ");
1769         else
1770                 DMEMIT("1 %s ", m->hw_handler_name);
1771
1772         DMEMIT("%u ", m->nr_priority_groups);
1773
1774         if (m->next_pg)
1775                 pg_num = m->next_pg->pg_num;
1776         else if (m->current_pg)
1777                 pg_num = m->current_pg->pg_num;
1778         else
1779                 pg_num = (m->nr_priority_groups ? 1 : 0);
1780
1781         DMEMIT("%u ", pg_num);
1782
1783         switch (type) {
1784         case STATUSTYPE_INFO:
1785                 list_for_each_entry(pg, &m->priority_groups, list) {
1786                         if (pg->bypassed)
1787                                 state = 'D';    /* Disabled */
1788                         else if (pg == m->current_pg)
1789                                 state = 'A';    /* Currently Active */
1790                         else
1791                                 state = 'E';    /* Enabled */
1792
1793                         DMEMIT("%c ", state);
1794
1795                         if (pg->ps.type->status)
1796                                 sz += pg->ps.type->status(&pg->ps, NULL, type,
1797                                                           result + sz,
1798                                                           maxlen - sz);
1799                         else
1800                                 DMEMIT("0 ");
1801
1802                         DMEMIT("%u %u ", pg->nr_pgpaths,
1803                                pg->ps.type->info_args);
1804
1805                         list_for_each_entry(p, &pg->pgpaths, list) {
1806                                 DMEMIT("%s %s %u ", p->path.dev->name,
1807                                        p->is_active ? "A" : "F",
1808                                        p->fail_count);
1809                                 if (pg->ps.type->status)
1810                                         sz += pg->ps.type->status(&pg->ps,
1811                                               &p->path, type, result + sz,
1812                                               maxlen - sz);
1813                         }
1814                 }
1815                 break;
1816
1817         case STATUSTYPE_TABLE:
1818                 list_for_each_entry(pg, &m->priority_groups, list) {
1819                         DMEMIT("%s ", pg->ps.type->name);
1820
1821                         if (pg->ps.type->status)
1822                                 sz += pg->ps.type->status(&pg->ps, NULL, type,
1823                                                           result + sz,
1824                                                           maxlen - sz);
1825                         else
1826                                 DMEMIT("0 ");
1827
1828                         DMEMIT("%u %u ", pg->nr_pgpaths,
1829                                pg->ps.type->table_args);
1830
1831                         list_for_each_entry(p, &pg->pgpaths, list) {
1832                                 DMEMIT("%s ", p->path.dev->name);
1833                                 if (pg->ps.type->status)
1834                                         sz += pg->ps.type->status(&pg->ps,
1835                                               &p->path, type, result + sz,
1836                                               maxlen - sz);
1837                         }
1838                 }
1839                 break;
1840         }
1841
1842         spin_unlock_irqrestore(&m->lock, flags);
1843 }
1844
1845 static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1846 {
1847         int r = -EINVAL;
1848         struct dm_dev *dev;
1849         struct multipath *m = ti->private;
1850         action_fn action;
1851
1852         mutex_lock(&m->work_mutex);
1853
1854         if (dm_suspended(ti)) {
1855                 r = -EBUSY;
1856                 goto out;
1857         }
1858
1859         if (argc == 1) {
1860                 if (!strcasecmp(argv[0], "queue_if_no_path")) {
1861                         r = queue_if_no_path(m, true, false);
1862                         goto out;
1863                 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1864                         r = queue_if_no_path(m, false, false);
1865                         goto out;
1866                 }
1867         }
1868
1869         if (argc != 2) {
1870                 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1871                 goto out;
1872         }
1873
1874         if (!strcasecmp(argv[0], "disable_group")) {
1875                 r = bypass_pg_num(m, argv[1], true);
1876                 goto out;
1877         } else if (!strcasecmp(argv[0], "enable_group")) {
1878                 r = bypass_pg_num(m, argv[1], false);
1879                 goto out;
1880         } else if (!strcasecmp(argv[0], "switch_group")) {
1881                 r = switch_pg_num(m, argv[1]);
1882                 goto out;
1883         } else if (!strcasecmp(argv[0], "reinstate_path"))
1884                 action = reinstate_path;
1885         else if (!strcasecmp(argv[0], "fail_path"))
1886                 action = fail_path;
1887         else {
1888                 DMWARN("Unrecognised multipath message received: %s", argv[0]);
1889                 goto out;
1890         }
1891
1892         r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1893         if (r) {
1894                 DMWARN("message: error getting device %s",
1895                        argv[1]);
1896                 goto out;
1897         }
1898
1899         r = action_dev(m, dev, action);
1900
1901         dm_put_device(ti, dev);
1902
1903 out:
1904         mutex_unlock(&m->work_mutex);
1905         return r;
1906 }
1907
1908 static int multipath_prepare_ioctl(struct dm_target *ti,
1909                 struct block_device **bdev, fmode_t *mode)
1910 {
1911         struct multipath *m = ti->private;
1912         struct pgpath *current_pgpath;
1913         int r;
1914
1915         current_pgpath = lockless_dereference(m->current_pgpath);
1916         if (!current_pgpath)
1917                 current_pgpath = choose_pgpath(m, 0);
1918
1919         if (current_pgpath) {
1920                 if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
1921                         *bdev = current_pgpath->path.dev->bdev;
1922                         *mode = current_pgpath->path.dev->mode;
1923                         r = 0;
1924                 } else {
1925                         /* pg_init has not started or completed */
1926                         r = -ENOTCONN;
1927                 }
1928         } else {
1929                 /* No path is available */
1930                 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1931                         r = -ENOTCONN;
1932                 else
1933                         r = -EIO;
1934         }
1935
1936         if (r == -ENOTCONN) {
1937                 if (!lockless_dereference(m->current_pg)) {
1938                         /* Path status changed, redo selection */
1939                         (void) choose_pgpath(m, 0);
1940                 }
1941                 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1942                         pg_init_all_paths(m);
1943                 dm_table_run_md_queue_async(m->ti->table);
1944                 process_queued_io_list(m);
1945         }
1946
1947         /*
1948          * Only pass ioctls through if the device sizes match exactly.
1949          */
1950         if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1951                 return 1;
1952         return r;
1953 }
1954
1955 static int multipath_iterate_devices(struct dm_target *ti,
1956                                      iterate_devices_callout_fn fn, void *data)
1957 {
1958         struct multipath *m = ti->private;
1959         struct priority_group *pg;
1960         struct pgpath *p;
1961         int ret = 0;
1962
1963         list_for_each_entry(pg, &m->priority_groups, list) {
1964                 list_for_each_entry(p, &pg->pgpaths, list) {
1965                         ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1966                         if (ret)
1967                                 goto out;
1968                 }
1969         }
1970
1971 out:
1972         return ret;
1973 }
1974
1975 static int pgpath_busy(struct pgpath *pgpath)
1976 {
1977         struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1978
1979         return blk_lld_busy(q);
1980 }
1981
1982 /*
1983  * We return "busy", only when we can map I/Os but underlying devices
1984  * are busy (so even if we map I/Os now, the I/Os will wait on
1985  * the underlying queue).
1986  * In other words, if we want to kill I/Os or queue them inside us
1987  * due to map unavailability, we don't return "busy".  Otherwise,
1988  * dm core won't give us the I/Os and we can't do what we want.
1989  */
1990 static int multipath_busy(struct dm_target *ti)
1991 {
1992         bool busy = false, has_active = false;
1993         struct multipath *m = ti->private;
1994         struct priority_group *pg, *next_pg;
1995         struct pgpath *pgpath;
1996
1997         /* pg_init in progress */
1998         if (atomic_read(&m->pg_init_in_progress))
1999                 return true;
2000
2001         /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
2002         if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
2003                 return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
2004
2005         /* Guess which priority_group will be used at next mapping time */
2006         pg = lockless_dereference(m->current_pg);
2007         next_pg = lockless_dereference(m->next_pg);
2008         if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
2009                 pg = next_pg;
2010
2011         if (!pg) {
2012                 /*
2013                  * We don't know which pg will be used at next mapping time.
2014                  * We don't call choose_pgpath() here to avoid to trigger
2015                  * pg_init just by busy checking.
2016                  * So we don't know whether underlying devices we will be using
2017                  * at next mapping time are busy or not. Just try mapping.
2018                  */
2019                 return busy;
2020         }
2021
2022         /*
2023          * If there is one non-busy active path at least, the path selector
2024          * will be able to select it. So we consider such a pg as not busy.
2025          */
2026         busy = true;
2027         list_for_each_entry(pgpath, &pg->pgpaths, list) {
2028                 if (pgpath->is_active) {
2029                         has_active = true;
2030                         if (!pgpath_busy(pgpath)) {
2031                                 busy = false;
2032                                 break;
2033                         }
2034                 }
2035         }
2036
2037         if (!has_active) {
2038                 /*
2039                  * No active path in this pg, so this pg won't be used and
2040                  * the current_pg will be changed at next mapping time.
2041                  * We need to try mapping to determine it.
2042                  */
2043                 busy = false;
2044         }
2045
2046         return busy;
2047 }
2048
2049 /*-----------------------------------------------------------------
2050  * Module setup
2051  *---------------------------------------------------------------*/
2052 static struct target_type multipath_target = {
2053         .name = "multipath",
2054         .version = {1, 12, 0},
2055         .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
2056         .module = THIS_MODULE,
2057         .ctr = multipath_ctr,
2058         .dtr = multipath_dtr,
2059         .map_rq = multipath_map,
2060         .clone_and_map_rq = multipath_clone_and_map,
2061         .release_clone_rq = multipath_release_clone,
2062         .rq_end_io = multipath_end_io,
2063         .map = multipath_map_bio,
2064         .end_io = multipath_end_io_bio,
2065         .presuspend = multipath_presuspend,
2066         .postsuspend = multipath_postsuspend,
2067         .resume = multipath_resume,
2068         .status = multipath_status,
2069         .message = multipath_message,
2070         .prepare_ioctl = multipath_prepare_ioctl,
2071         .iterate_devices = multipath_iterate_devices,
2072         .busy = multipath_busy,
2073 };
2074
2075 static int __init dm_multipath_init(void)
2076 {
2077         int r;
2078
2079         /* allocate a slab for the dm_mpath_ios */
2080         _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
2081         if (!_mpio_cache)
2082                 return -ENOMEM;
2083
2084         r = dm_register_target(&multipath_target);
2085         if (r < 0) {
2086                 DMERR("request-based register failed %d", r);
2087                 r = -EINVAL;
2088                 goto bad_register_target;
2089         }
2090
2091         kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2092         if (!kmultipathd) {
2093                 DMERR("failed to create workqueue kmpathd");
2094                 r = -ENOMEM;
2095                 goto bad_alloc_kmultipathd;
2096         }
2097
2098         /*
2099          * A separate workqueue is used to handle the device handlers
2100          * to avoid overloading existing workqueue. Overloading the
2101          * old workqueue would also create a bottleneck in the
2102          * path of the storage hardware device activation.
2103          */
2104         kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2105                                                   WQ_MEM_RECLAIM);
2106         if (!kmpath_handlerd) {
2107                 DMERR("failed to create workqueue kmpath_handlerd");
2108                 r = -ENOMEM;
2109                 goto bad_alloc_kmpath_handlerd;
2110         }
2111
2112         return 0;
2113
2114 bad_alloc_kmpath_handlerd:
2115         destroy_workqueue(kmultipathd);
2116 bad_alloc_kmultipathd:
2117         dm_unregister_target(&multipath_target);
2118 bad_register_target:
2119         kmem_cache_destroy(_mpio_cache);
2120
2121         return r;
2122 }
2123
2124 static void __exit dm_multipath_exit(void)
2125 {
2126         destroy_workqueue(kmpath_handlerd);
2127         destroy_workqueue(kmultipathd);
2128
2129         dm_unregister_target(&multipath_target);
2130         kmem_cache_destroy(_mpio_cache);
2131 }
2132
2133 module_init(dm_multipath_init);
2134 module_exit(dm_multipath_exit);
2135
2136 MODULE_DESCRIPTION(DM_NAME " multipath target");
2137 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2138 MODULE_LICENSE("GPL");