Merge tag 'hwmon-for-linus-v4.9-rc2' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / drivers / md / dm-mpath.c
index ac734e5..e477af8 100644 (file)
@@ -550,9 +550,9 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
                pgpath = choose_pgpath(m, nr_bytes);
 
        if (!pgpath) {
-               if (!must_push_back_rq(m))
-                       r = -EIO;       /* Failed */
-               return r;
+               if (must_push_back_rq(m))
+                       return DM_MAPIO_DELAY_REQUEUE;
+               return -EIO;    /* Failed */
        } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
                   test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
                pg_init_all_paths(m);
@@ -680,9 +680,11 @@ static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
        return __multipath_map_bio(m, bio, mpio);
 }
 
-static void process_queued_bios_list(struct multipath *m)
+static void process_queued_io_list(struct multipath *m)
 {
-       if (m->queue_mode == DM_TYPE_BIO_BASED)
+       if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
+               dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
+       else if (m->queue_mode == DM_TYPE_BIO_BASED)
                queue_work(kmultipathd, &m->process_queued_bios);
 }
 
@@ -752,7 +754,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
 
        if (!queue_if_no_path) {
                dm_table_run_md_queue_async(m->ti->table);
-               process_queued_bios_list(m);
+               process_queued_io_list(m);
        }
 
        return 0;
@@ -1193,21 +1195,17 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
 
 static void multipath_wait_for_pg_init_completion(struct multipath *m)
 {
-       DECLARE_WAITQUEUE(wait, current);
-
-       add_wait_queue(&m->pg_init_wait, &wait);
+       DEFINE_WAIT(wait);
 
        while (1) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
+               prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
 
                if (!atomic_read(&m->pg_init_in_progress))
                        break;
 
                io_schedule();
        }
-       set_current_state(TASK_RUNNING);
-
-       remove_wait_queue(&m->pg_init_wait, &wait);
+       finish_wait(&m->pg_init_wait, &wait);
 }
 
 static void flush_multipath_work(struct multipath *m)
@@ -1308,7 +1306,7 @@ out:
        spin_unlock_irqrestore(&m->lock, flags);
        if (run_queue) {
                dm_table_run_md_queue_async(m->ti->table);
-               process_queued_bios_list(m);
+               process_queued_io_list(m);
        }
 
        return r;
@@ -1506,7 +1504,7 @@ static void pg_init_done(void *data, int errors)
        }
        clear_bit(MPATHF_QUEUE_IO, &m->flags);
 
-       process_queued_bios_list(m);
+       process_queued_io_list(m);
 
        /*
         * Wake up any thread waiting to suspend.
@@ -1521,10 +1519,10 @@ static void activate_path(struct work_struct *work)
 {
        struct pgpath *pgpath =
                container_of(work, struct pgpath, activate_path.work);
+       struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
 
-       if (pgpath->is_active)
-               scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
-                                pg_init_done, pgpath);
+       if (pgpath->is_active && !blk_queue_dying(q))
+               scsi_dh_activate(q, pg_init_done, pgpath);
        else
                pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
 }
@@ -1532,6 +1530,14 @@ static void activate_path(struct work_struct *work)
 static int noretry_error(int error)
 {
        switch (error) {
+       case -EBADE:
+               /*
+                * EBADE signals an reservation conflict.
+                * We shouldn't fail the path here as we can communicate with
+                * the target.  We should failover to the next path, but in
+                * doing so we might be causing a ping-pong between paths.
+                * So just return the reservation conflict error.
+                */
        case -EOPNOTSUPP:
        case -EREMOTEIO:
        case -EILSEQ:
@@ -1576,9 +1582,6 @@ static int do_end_io(struct multipath *m, struct request *clone,
                if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
                        if (!must_push_back_rq(m))
                                r = -EIO;
-               } else {
-                       if (error == -EBADE)
-                               r = error;
                }
        }
 
@@ -1627,9 +1630,6 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
                        if (!must_push_back_bio(m))
                                return -EIO;
                        return DM_ENDIO_REQUEUE;
-               } else {
-                       if (error == -EBADE)
-                               return error;
                }
        }
 
@@ -1941,7 +1941,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
                if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
                        pg_init_all_paths(m);
                dm_table_run_md_queue_async(m->ti->table);
-               process_queued_bios_list(m);
+               process_queued_io_list(m);
        }
 
        /*
@@ -1994,11 +1994,14 @@ static int multipath_busy(struct dm_target *ti)
        struct priority_group *pg, *next_pg;
        struct pgpath *pgpath;
 
-       /* pg_init in progress or no paths available */
-       if (atomic_read(&m->pg_init_in_progress) ||
-           (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)))
+       /* pg_init in progress */
+       if (atomic_read(&m->pg_init_in_progress))
                return true;
 
+       /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
+       if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+               return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
+
        /* Guess which priority_group will be used at next mapping time */
        pg = lockless_dereference(m->current_pg);
        next_pg = lockless_dereference(m->next_pg);