[btrfs] fix check_direct_IO() for non-iovec iterators
[cascardo/linux.git] / block / blk-mq.c
index 729169d..c207fa9 100644 (file)
@@ -296,17 +296,29 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
        if (ret)
                return ERR_PTR(ret);
 
+       /*
+        * Check if the hardware context is actually mapped to anything.
+        * If not tell the caller that it should skip this queue.
+        */
        hctx = q->queue_hw_ctx[hctx_idx];
+       if (!blk_mq_hw_queue_mapped(hctx)) {
+               ret = -EXDEV;
+               goto out_queue_exit;
+       }
        ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
 
        blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
        rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
        if (!rq) {
-               blk_queue_exit(q);
-               return ERR_PTR(-EWOULDBLOCK);
+               ret = -EWOULDBLOCK;
+               goto out_queue_exit;
        }
 
        return rq;
+
+out_queue_exit:
+       blk_queue_exit(q);
+       return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
 
@@ -793,11 +805,12 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
        struct list_head *dptr;
        int queued;
 
-       WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
-
        if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
                return;
 
+       WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
+               cpu_online(hctx->next_cpu));
+
        hctx->run++;
 
        /*