Merge branch 'for-4.9/block-irq' of git://git.kernel.dk/linux-block
[cascardo/linux.git] / drivers / md / dm-rq.c
index d1c3645..5eacce1 100644 (file)
@@ -73,43 +73,60 @@ static void dm_old_start_queue(struct request_queue *q)
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
+static void dm_mq_start_queue(struct request_queue *q)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       blk_mq_start_stopped_hw_queues(q, true);
+       blk_mq_kick_requeue_list(q);
+}
+
 void dm_start_queue(struct request_queue *q)
 {
        if (!q->mq_ops)
                dm_old_start_queue(q);
-       else {
-               queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, q);
-               blk_mq_start_stopped_hw_queues(q, true);
-               blk_mq_kick_requeue_list(q);
-       }
+       else
+               dm_mq_start_queue(q);
 }
 
 static void dm_old_stop_queue(struct request_queue *q)
 {
        unsigned long flags;
 
+       spin_lock_irqsave(q->queue_lock, flags);
+       if (!blk_queue_stopped(q))
+               blk_stop_queue(q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void dm_mq_stop_queue(struct request_queue *q)
+{
+       unsigned long flags;
+
        spin_lock_irqsave(q->queue_lock, flags);
        if (blk_queue_stopped(q)) {
                spin_unlock_irqrestore(q->queue_lock, flags);
                return;
        }
 
-       blk_stop_queue(q);
+       queue_flag_set(QUEUE_FLAG_STOPPED, q);
        spin_unlock_irqrestore(q->queue_lock, flags);
+
+       /* Avoid that requeuing could restart the queue. */
+       blk_mq_cancel_requeue_work(q);
+       blk_mq_stop_hw_queues(q);
 }
 
 void dm_stop_queue(struct request_queue *q)
 {
        if (!q->mq_ops)
                dm_old_stop_queue(q);
-       else {
-               spin_lock_irq(q->queue_lock);
-               queue_flag_set(QUEUE_FLAG_STOPPED, q);
-               spin_unlock_irq(q->queue_lock);
-
-               blk_mq_cancel_requeue_work(q);
-               blk_mq_stop_hw_queues(q);
-       }
+       else
+               dm_mq_stop_queue(q);
 }
 
 static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
@@ -319,21 +336,32 @@ static void dm_old_requeue_request(struct request *rq)
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
-static void dm_mq_requeue_request(struct request *rq)
+static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
 {
-       struct request_queue *q = rq->q;
        unsigned long flags;
 
-       blk_mq_requeue_request(rq);
        spin_lock_irqsave(q->queue_lock, flags);
        if (!blk_queue_stopped(q))
-               blk_mq_kick_requeue_list(q);
+               blk_mq_delay_kick_requeue_list(q, msecs);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
-static void dm_requeue_original_request(struct mapped_device *md,
-                                       struct request *rq)
+void dm_mq_kick_requeue_list(struct mapped_device *md)
+{
+       __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
+}
+EXPORT_SYMBOL(dm_mq_kick_requeue_list);
+
+static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
+{
+       blk_mq_requeue_request(rq);
+       __dm_mq_kick_requeue_list(rq->q, msecs);
+}
+
+static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
 {
+       struct mapped_device *md = tio->md;
+       struct request *rq = tio->orig;
        int rw = rq_data_dir(rq);
 
        rq_end_stats(md, rq);
@@ -342,7 +370,7 @@ static void dm_requeue_original_request(struct mapped_device *md,
        if (!rq->q->mq_ops)
                dm_old_requeue_request(rq);
        else
-               dm_mq_requeue_request(rq);
+               dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
 
        rq_completed(md, rw, false);
 }
@@ -372,7 +400,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
                return;
        else if (r == DM_ENDIO_REQUEUE)
                /* The target wants to requeue the I/O */
-               dm_requeue_original_request(tio->md, tio->orig);
+               dm_requeue_original_request(tio, false);
        else {
                DMWARN("unimplemented target endio return value: %d", r);
                BUG();
@@ -612,20 +640,23 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
 
 /*
  * Returns:
- * 0                : the request has been processed
- * DM_MAPIO_REQUEUE : the original request needs to be requeued
+ * DM_MAPIO_*       : the request has been processed as indicated
+ * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
  * < 0              : the request was completed due to failure
  */
-static int map_request(struct dm_rq_target_io *tio, struct request *rq,
-                      struct mapped_device *md)
+static int map_request(struct dm_rq_target_io *tio)
 {
        int r;
        struct dm_target *ti = tio->ti;
+       struct mapped_device *md = tio->md;
+       struct request *rq = tio->orig;
        struct request *clone = NULL;
 
        if (tio->clone) {
                clone = tio->clone;
                r = ti->type->map_rq(ti, clone, &tio->info);
+               if (r == DM_MAPIO_DELAY_REQUEUE)
+                       return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
        } else {
                r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
                if (r < 0) {
@@ -633,9 +664,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
                        dm_kill_unmapped_request(rq, r);
                        return r;
                }
-               if (r != DM_MAPIO_REMAPPED)
-                       return r;
-               if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
+               if (r == DM_MAPIO_REMAPPED &&
+                   setup_clone(clone, rq, tio, GFP_ATOMIC)) {
                        /* -ENOMEM */
                        ti->type->release_clone_rq(clone);
                        return DM_MAPIO_REQUEUE;
@@ -654,7 +684,10 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
                break;
        case DM_MAPIO_REQUEUE:
                /* The target wants to requeue the I/O */
-               dm_requeue_original_request(md, tio->orig);
+               break;
+       case DM_MAPIO_DELAY_REQUEUE:
+               /* The target wants to requeue the I/O after a delay */
+               dm_requeue_original_request(tio, true);
                break;
        default:
                if (r > 0) {
@@ -664,10 +697,9 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
 
                /* The target wants to complete the I/O */
                dm_kill_unmapped_request(rq, r);
-               return r;
        }
 
-       return 0;
+       return r;
 }
 
 static void dm_start_request(struct mapped_device *md, struct request *orig)
@@ -706,11 +738,9 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
 static void map_tio_request(struct kthread_work *work)
 {
        struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
-       struct request *rq = tio->orig;
-       struct mapped_device *md = tio->md;
 
-       if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
-               dm_requeue_original_request(md, rq);
+       if (map_request(tio) == DM_MAPIO_REQUEUE)
+               dm_requeue_original_request(tio, false);
 }
 
 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
@@ -896,7 +926,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
        tio->ti = ti;
 
        /* Direct call is fine since .queue_rq allows allocations */
-       if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
+       if (map_request(tio) == DM_MAPIO_REQUEUE) {
                /* Undo dm_start_request() before requeuing */
                rq_end_stats(md, rq);
                rq_completed(md, rq_data_dir(rq), false);
@@ -954,7 +984,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
        dm_init_md_queue(md);
 
        /* backfill 'mq' sysfs registration normally done in blk_register_queue */
-       blk_mq_register_disk(md->disk);
+       blk_mq_register_dev(disk_to_dev(md->disk), q);
 
        return 0;