9 struct list_head rq_list;
10 } ____cacheline_aligned_in_smp;
13 unsigned int index_hw;
15 /* incremented at dispatch time */
16 unsigned long rq_dispatched[2];
17 unsigned long rq_merged;
19 /* incremented at completion time */
20 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
22 struct request_queue *queue;
24 } ____cacheline_aligned_in_smp;
26 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
27 void blk_mq_freeze_queue(struct request_queue *q);
28 void blk_mq_free_queue(struct request_queue *q);
29 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
30 void blk_mq_wake_waiters(struct request_queue *q);
35 struct blk_mq_cpu_notifier;
36 void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
37 int (*fn)(void *, unsigned long, unsigned int),
39 void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
40 void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
41 void blk_mq_cpu_init(void);
42 void blk_mq_enable_hotplug(void);
43 void blk_mq_disable_hotplug(void);
46 * CPU -> queue mappings
48 int blk_mq_map_queues(struct blk_mq_tag_set *set);
49 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
51 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
54 return q->queue_hw_ctx[q->mq_map[cpu]];
60 extern int blk_mq_sysfs_register(struct request_queue *q);
61 extern void blk_mq_sysfs_unregister(struct request_queue *q);
62 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
64 extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
66 void blk_mq_release(struct request_queue *q);
68 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
71 return per_cpu_ptr(q->queue_ctx, cpu);
75 * This assumes per-cpu software queueing queues. They could be per-node
76 * as well, for instance. For now this is hardcoded as-is. Note that we don't
77 * care about preemption, since we know the ctx's are persistent. This does
78 * mean that we can't rely on ctx always matching the currently running CPU.
80 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
82 return __blk_mq_get_ctx(q, get_cpu());
85 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
90 struct blk_mq_alloc_data {
92 struct request_queue *q;
95 /* input & output parameter */
96 struct blk_mq_ctx *ctx;
97 struct blk_mq_hw_ctx *hctx;
100 static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
101 struct request_queue *q, unsigned int flags,
102 struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
110 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
112 return hctx->nr_ctx && hctx->tags;