static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
-static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
- unsigned int cpu)
-{
- return per_cpu_ptr(q->queue_ctx, cpu);
-}
-
-/*
- * This assumes per-cpu software queueing queues. They could be per-node
- * as well, for instance. For now this is hardcoded as-is. Note that we don't
- * care about preemption, since we know the ctx's are persistent. This does
- * mean that we can't rely on ctx always matching the currently running CPU.
- */
-static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
-{
- return __blk_mq_get_ctx(q, get_cpu());
-}
-
-static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
-{
- put_cpu();
-}
-
/*
* Check if any of the ctx's have pending work in this hardware queue
*/
}
static struct request *
-__blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
- struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved)
+__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
{
struct request *rq;
unsigned int tag;
- tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved);
+ tag = blk_mq_get_tag(data);
if (tag != BLK_MQ_TAG_FAIL) {
- rq = hctx->tags->rqs[tag];
+ rq = data->hctx->tags->rqs[tag];
rq->cmd_flags = 0;
- if (blk_mq_tag_busy(hctx)) {
+ if (blk_mq_tag_busy(data->hctx)) {
rq->cmd_flags = REQ_MQ_INFLIGHT;
- atomic_inc(&hctx->nr_active);
+ atomic_inc(&data->hctx->nr_active);
}
rq->tag = tag;
- blk_mq_rq_ctx_init(q, ctx, rq, rw);
+ blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
return rq;
}
struct blk_mq_ctx *ctx;
struct blk_mq_hw_ctx *hctx;
struct request *rq;
+ struct blk_mq_alloc_data alloc_data;
if (blk_mq_queue_enter(q))
return NULL;
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
+ reserved, ctx, hctx);
- rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT,
- reserved);
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
if (!rq && (gfp & __GFP_WAIT)) {
__blk_mq_run_hw_queue(hctx);
blk_mq_put_ctx(ctx);
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
- rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp, reserved);
+ blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
+ hctx);
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
+ ctx = alloc_data.ctx;
}
blk_mq_put_ctx(ctx);
return rq;
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
-struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag)
+static inline bool is_flush_request(struct request *rq, unsigned int tag)
{
- struct request_queue *q = hctx->queue;
+ return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
+ rq->q->flush_rq->tag == tag);
+}
+
+struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
+{
+ struct request *rq = tags->rqs[tag];
- if ((q->flush_rq->cmd_flags & REQ_FLUSH_SEQ) &&
- q->flush_rq->tag == tag)
- return q->flush_rq;
+ if (!is_flush_request(rq, tag))
+ return rq;
- return hctx->tags->rqs[tag];
+ return rq->q->flush_rq;
}
EXPORT_SYMBOL(blk_mq_tag_to_rq);
if (tag >= hctx->tags->nr_tags)
break;
- rq = blk_mq_tag_to_rq(hctx, tag++);
+ rq = blk_mq_tag_to_rq(hctx->tags, tag++);
if (rq->q != hctx->queue)
continue;
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
struct blk_mq_ctx *ctx;
struct request *rq;
int rw = bio_data_dir(bio);
+ struct blk_mq_alloc_data alloc_data;
if (unlikely(blk_mq_queue_enter(q))) {
bio_endio(bio, -EIO);
rw |= REQ_SYNC;
trace_block_getrq(q, bio, rw);
- rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false);
+ blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
+ hctx);
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
if (unlikely(!rq)) {
__blk_mq_run_hw_queue(hctx);
blk_mq_put_ctx(ctx);
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
- rq = __blk_mq_alloc_request(q, hctx, ctx, rw,
- __GFP_WAIT|GFP_ATOMIC, false);
+ blk_mq_set_alloc_data(&alloc_data, q,
+ __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
+ ctx = alloc_data.ctx;
+ hctx = alloc_data.hctx;
}
hctx->queued++;
return;
rq = blk_mq_map_request(q, bio, &data);
+ if (unlikely(!rq))
+ return;
if (unlikely(is_flush_fua)) {
blk_mq_bio_to_request(rq, bio);
if (i == nr_queue)
break;
+ blk_mq_tag_idle(hctx);
+
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, i);
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{
struct blk_mq_hw_ctx **hctxs;
- struct blk_mq_ctx *ctx;
+ struct blk_mq_ctx __percpu *ctx;
struct request_queue *q;
unsigned int *map;
int i;