Merge branch 'sunvnet-next'
[cascardo/linux.git] / block / blk-flush.c
index b01a86d..20badd7 100644 (file)
@@ -91,7 +91,8 @@ enum {
        FLUSH_PENDING_TIMEOUT   = 5 * HZ,
 };
 
-static bool blk_kick_flush(struct request_queue *q);
+static bool blk_kick_flush(struct request_queue *q,
+                          struct blk_flush_queue *fq);
 
 static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
 {
@@ -148,6 +149,7 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
 /**
  * blk_flush_complete_seq - complete flush sequence
  * @rq: FLUSH/FUA request being sequenced
+ * @fq: flush queue
  * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
  * @error: whether an error occurred
  *
@@ -160,11 +162,11 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
  * RETURNS:
  * %true if requests were added to the dispatch queue, %false otherwise.
  */
-static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
-                                  int error)
+static bool blk_flush_complete_seq(struct request *rq,
+                                  struct blk_flush_queue *fq,
+                                  unsigned int seq, int error)
 {
        struct request_queue *q = rq->q;
-       struct blk_flush_queue *fq = blk_get_flush_queue(q);
        struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
        bool queued = false, kicked;
 
@@ -210,7 +212,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
                BUG();
        }
 
-       kicked = blk_kick_flush(q);
+       kicked = blk_kick_flush(q, fq);
        return kicked | queued;
 }
 
@@ -221,7 +223,7 @@ static void flush_end_io(struct request *flush_rq, int error)
        bool queued = false;
        struct request *rq, *n;
        unsigned long flags = 0;
-       struct blk_flush_queue *fq = blk_get_flush_queue(q);
+       struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
 
        if (q->mq_ops) {
                spin_lock_irqsave(&fq->mq_flush_lock, flags);
@@ -242,7 +244,7 @@ static void flush_end_io(struct request *flush_rq, int error)
                unsigned int seq = blk_flush_cur_seq(rq);
 
                BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
-               queued |= blk_flush_complete_seq(rq, seq, error);
+               queued |= blk_flush_complete_seq(rq, fq, seq, error);
        }
 
        /*
@@ -268,6 +270,7 @@ static void flush_end_io(struct request *flush_rq, int error)
 /**
  * blk_kick_flush - consider issuing flush request
  * @q: request_queue being kicked
+ * @fq: flush queue
  *
  * Flush related states of @q have changed, consider issuing flush request.
  * Please read the comment at the top of this file for more info.
@@ -278,9 +281,8 @@ static void flush_end_io(struct request *flush_rq, int error)
  * RETURNS:
  * %true if flush was issued, %false otherwise.
  */
-static bool blk_kick_flush(struct request_queue *q)
+static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
 {
-       struct blk_flush_queue *fq = blk_get_flush_queue(q);
        struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
        struct request *first_rq =
                list_first_entry(pending, struct request, flush.list);
@@ -303,8 +305,15 @@ static bool blk_kick_flush(struct request_queue *q)
        fq->flush_pending_idx ^= 1;
 
        blk_rq_init(q, flush_rq);
-       if (q->mq_ops)
-               blk_mq_clone_flush_request(flush_rq, first_rq);
+
+       /*
+        * Borrow tag from the first request since they can't
+        * be in flight at the same time.
+        */
+       if (q->mq_ops) {
+               flush_rq->mq_ctx = first_rq->mq_ctx;
+               flush_rq->tag = first_rq->tag;
+       }
 
        flush_rq->cmd_type = REQ_TYPE_FS;
        flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
@@ -317,12 +326,13 @@ static bool blk_kick_flush(struct request_queue *q)
 static void flush_data_end_io(struct request *rq, int error)
 {
        struct request_queue *q = rq->q;
+       struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
 
        /*
         * After populating an empty queue, kick it to avoid stall.  Read
         * the comment in flush_end_io().
         */
-       if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
+       if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
                blk_run_queue_async(q);
 }
 
@@ -330,11 +340,10 @@ static void mq_flush_data_end_io(struct request *rq, int error)
 {
        struct request_queue *q = rq->q;
        struct blk_mq_hw_ctx *hctx;
-       struct blk_mq_ctx *ctx;
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
        unsigned long flags;
-       struct blk_flush_queue *fq = blk_get_flush_queue(q);
+       struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
 
-       ctx = rq->mq_ctx;
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
        /*
@@ -342,7 +351,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
         * the comment in flush_end_io().
         */
        spin_lock_irqsave(&fq->mq_flush_lock, flags);
-       if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
+       if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
                blk_mq_run_hw_queue(hctx, true);
        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 }
@@ -364,6 +373,7 @@ void blk_insert_flush(struct request *rq)
        struct request_queue *q = rq->q;
        unsigned int fflags = q->flush_flags;   /* may change, cache */
        unsigned int policy = blk_flush_policy(fflags, rq);
+       struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
 
        /*
         * @policy now records what operations need to be done.  Adjust
@@ -412,18 +422,16 @@ void blk_insert_flush(struct request *rq)
        rq->cmd_flags |= REQ_FLUSH_SEQ;
        rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
        if (q->mq_ops) {
-               struct blk_flush_queue *fq = blk_get_flush_queue(q);
-
                rq->end_io = mq_flush_data_end_io;
 
                spin_lock_irq(&fq->mq_flush_lock);
-               blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
+               blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
                spin_unlock_irq(&fq->mq_flush_lock);
                return;
        }
        rq->end_io = flush_data_end_io;
 
-       blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
+       blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
 }
 
 /**
@@ -479,23 +487,22 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
 }
 EXPORT_SYMBOL(blkdev_issue_flush);
 
-static struct blk_flush_queue *blk_alloc_flush_queue(
-               struct request_queue *q)
+struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
+               int node, int cmd_size)
 {
        struct blk_flush_queue *fq;
        int rq_sz = sizeof(struct request);
 
-       fq = kzalloc(sizeof(*fq), GFP_KERNEL);
+       fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
        if (!fq)
                goto fail;
 
        if (q->mq_ops) {
                spin_lock_init(&fq->mq_flush_lock);
-               rq_sz = round_up(rq_sz + q->tag_set->cmd_size,
-                               cache_line_size());
+               rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
        }
 
-       fq->flush_rq = kzalloc(rq_sz, GFP_KERNEL);
+       fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
        if (!fq->flush_rq)
                goto fail_rq;
 
@@ -511,7 +518,7 @@ static struct blk_flush_queue *blk_alloc_flush_queue(
        return NULL;
 }
 
-static void blk_free_flush_queue(struct blk_flush_queue *fq)
+void blk_free_flush_queue(struct blk_flush_queue *fq)
 {
        /* bio based request queue hasn't flush queue */
        if (!fq)
@@ -520,17 +527,3 @@ static void blk_free_flush_queue(struct blk_flush_queue *fq)
        kfree(fq->flush_rq);
        kfree(fq);
 }
-
-int blk_init_flush(struct request_queue *q)
-{
-       q->fq = blk_alloc_flush_queue(q);
-       if (!q->fq)
-               return -ENOMEM;
-
-       return 0;
-}
-
-void blk_exit_flush(struct request_queue *q)
-{
-       blk_free_flush_queue(q->fq);
-}