ARM: dts: Add I2S dt node for exynos3250
[cascardo/linux.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/smp.h>
17 #include <linux/llist.h>
18 #include <linux/list_sort.h>
19 #include <linux/cpu.h>
20 #include <linux/cache.h>
21 #include <linux/sched/sysctl.h>
22 #include <linux/delay.h>
23
24 #include <trace/events/block.h>
25
26 #include <linux/blk-mq.h>
27 #include "blk.h"
28 #include "blk-mq.h"
29 #include "blk-mq-tag.h"
30
31 static DEFINE_MUTEX(all_q_mutex);
32 static LIST_HEAD(all_q_list);
33
34 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
35
36 /*
37  * Check if any of the ctx's have pending work in this hardware queue
38  */
39 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
40 {
41         unsigned int i;
42
43         for (i = 0; i < hctx->ctx_map.map_size; i++)
44                 if (hctx->ctx_map.map[i].word)
45                         return true;
46
47         return false;
48 }
49
50 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
51                                               struct blk_mq_ctx *ctx)
52 {
53         return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
54 }
55
56 #define CTX_TO_BIT(hctx, ctx)   \
57         ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
58
59 /*
60  * Mark this ctx as having pending work in this hardware queue
61  */
62 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
63                                      struct blk_mq_ctx *ctx)
64 {
65         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
66
67         if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
68                 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
69 }
70
71 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
72                                       struct blk_mq_ctx *ctx)
73 {
74         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
75
76         clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
77 }
78
79 static int blk_mq_queue_enter(struct request_queue *q)
80 {
81         int ret;
82
83         __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
84         smp_wmb();
85
86         /* we have problems freezing the queue if it's initializing */
87         if (!blk_queue_dying(q) &&
88             (!blk_queue_bypass(q) || !blk_queue_init_done(q)))
89                 return 0;
90
91         __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
92
93         spin_lock_irq(q->queue_lock);
94         ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
95                 !blk_queue_bypass(q) || blk_queue_dying(q),
96                 *q->queue_lock);
97         /* inc usage with lock hold to avoid freeze_queue runs here */
98         if (!ret && !blk_queue_dying(q))
99                 __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
100         else if (blk_queue_dying(q))
101                 ret = -ENODEV;
102         spin_unlock_irq(q->queue_lock);
103
104         return ret;
105 }
106
107 static void blk_mq_queue_exit(struct request_queue *q)
108 {
109         __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
110 }
111
112 static void __blk_mq_drain_queue(struct request_queue *q)
113 {
114         while (true) {
115                 s64 count;
116
117                 spin_lock_irq(q->queue_lock);
118                 count = percpu_counter_sum(&q->mq_usage_counter);
119                 spin_unlock_irq(q->queue_lock);
120
121                 if (count == 0)
122                         break;
123                 blk_mq_run_queues(q, false);
124                 msleep(10);
125         }
126 }
127
128 /*
129  * Guarantee no request is in use, so we can change any data structure of
130  * the queue afterward.
131  */
132 static void blk_mq_freeze_queue(struct request_queue *q)
133 {
134         bool drain;
135
136         spin_lock_irq(q->queue_lock);
137         drain = !q->bypass_depth++;
138         queue_flag_set(QUEUE_FLAG_BYPASS, q);
139         spin_unlock_irq(q->queue_lock);
140
141         if (drain)
142                 __blk_mq_drain_queue(q);
143 }
144
145 void blk_mq_drain_queue(struct request_queue *q)
146 {
147         __blk_mq_drain_queue(q);
148 }
149
150 static void blk_mq_unfreeze_queue(struct request_queue *q)
151 {
152         bool wake = false;
153
154         spin_lock_irq(q->queue_lock);
155         if (!--q->bypass_depth) {
156                 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
157                 wake = true;
158         }
159         WARN_ON_ONCE(q->bypass_depth < 0);
160         spin_unlock_irq(q->queue_lock);
161         if (wake)
162                 wake_up_all(&q->mq_freeze_wq);
163 }
164
165 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
166 {
167         return blk_mq_has_free_tags(hctx->tags);
168 }
169 EXPORT_SYMBOL(blk_mq_can_queue);
170
171 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
172                                struct request *rq, unsigned int rw_flags)
173 {
174         if (blk_queue_io_stat(q))
175                 rw_flags |= REQ_IO_STAT;
176
177         INIT_LIST_HEAD(&rq->queuelist);
178         /* csd/requeue_work/fifo_time is initialized before use */
179         rq->q = q;
180         rq->mq_ctx = ctx;
181         rq->cmd_flags |= rw_flags;
182         /* do not touch atomic flags, it needs atomic ops against the timer */
183         rq->cpu = -1;
184         INIT_HLIST_NODE(&rq->hash);
185         RB_CLEAR_NODE(&rq->rb_node);
186         rq->rq_disk = NULL;
187         rq->part = NULL;
188         rq->start_time = jiffies;
189 #ifdef CONFIG_BLK_CGROUP
190         rq->rl = NULL;
191         set_start_time_ns(rq);
192         rq->io_start_time_ns = 0;
193 #endif
194         rq->nr_phys_segments = 0;
195 #if defined(CONFIG_BLK_DEV_INTEGRITY)
196         rq->nr_integrity_segments = 0;
197 #endif
198         rq->special = NULL;
199         /* tag was already set */
200         rq->errors = 0;
201
202         rq->extra_len = 0;
203         rq->sense_len = 0;
204         rq->resid_len = 0;
205         rq->sense = NULL;
206
207         INIT_LIST_HEAD(&rq->timeout_list);
208         rq->timeout = 0;
209
210         rq->end_io = NULL;
211         rq->end_io_data = NULL;
212         rq->next_rq = NULL;
213
214         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
215 }
216
217 static struct request *
218 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
219 {
220         struct request *rq;
221         unsigned int tag;
222
223         tag = blk_mq_get_tag(data);
224         if (tag != BLK_MQ_TAG_FAIL) {
225                 rq = data->hctx->tags->rqs[tag];
226
227                 rq->cmd_flags = 0;
228                 if (blk_mq_tag_busy(data->hctx)) {
229                         rq->cmd_flags = REQ_MQ_INFLIGHT;
230                         atomic_inc(&data->hctx->nr_active);
231                 }
232
233                 rq->tag = tag;
234                 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
235                 return rq;
236         }
237
238         return NULL;
239 }
240
241 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
242                 bool reserved)
243 {
244         struct blk_mq_ctx *ctx;
245         struct blk_mq_hw_ctx *hctx;
246         struct request *rq;
247         struct blk_mq_alloc_data alloc_data;
248
249         if (blk_mq_queue_enter(q))
250                 return NULL;
251
252         ctx = blk_mq_get_ctx(q);
253         hctx = q->mq_ops->map_queue(q, ctx->cpu);
254         blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
255                         reserved, ctx, hctx);
256
257         rq = __blk_mq_alloc_request(&alloc_data, rw);
258         if (!rq && (gfp & __GFP_WAIT)) {
259                 __blk_mq_run_hw_queue(hctx);
260                 blk_mq_put_ctx(ctx);
261
262                 ctx = blk_mq_get_ctx(q);
263                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
264                 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
265                                 hctx);
266                 rq =  __blk_mq_alloc_request(&alloc_data, rw);
267                 ctx = alloc_data.ctx;
268         }
269         blk_mq_put_ctx(ctx);
270         return rq;
271 }
272 EXPORT_SYMBOL(blk_mq_alloc_request);
273
274 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
275                                   struct blk_mq_ctx *ctx, struct request *rq)
276 {
277         const int tag = rq->tag;
278         struct request_queue *q = rq->q;
279
280         if (rq->cmd_flags & REQ_MQ_INFLIGHT)
281                 atomic_dec(&hctx->nr_active);
282
283         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
284         blk_mq_put_tag(hctx, tag, &ctx->last_tag);
285         blk_mq_queue_exit(q);
286 }
287
288 void blk_mq_free_request(struct request *rq)
289 {
290         struct blk_mq_ctx *ctx = rq->mq_ctx;
291         struct blk_mq_hw_ctx *hctx;
292         struct request_queue *q = rq->q;
293
294         ctx->rq_completed[rq_is_sync(rq)]++;
295
296         hctx = q->mq_ops->map_queue(q, ctx->cpu);
297         __blk_mq_free_request(hctx, ctx, rq);
298 }
299
300 /*
301  * Clone all relevant state from a request that has been put on hold in
302  * the flush state machine into the preallocated flush request that hangs
303  * off the request queue.
304  *
305  * For a driver the flush request should be invisible, that's why we are
306  * impersonating the original request here.
307  */
308 void blk_mq_clone_flush_request(struct request *flush_rq,
309                 struct request *orig_rq)
310 {
311         struct blk_mq_hw_ctx *hctx =
312                 orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
313
314         flush_rq->mq_ctx = orig_rq->mq_ctx;
315         flush_rq->tag = orig_rq->tag;
316         memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
317                 hctx->cmd_size);
318 }
319
320 inline void __blk_mq_end_io(struct request *rq, int error)
321 {
322         blk_account_io_done(rq);
323
324         if (rq->end_io) {
325                 rq->end_io(rq, error);
326         } else {
327                 if (unlikely(blk_bidi_rq(rq)))
328                         blk_mq_free_request(rq->next_rq);
329                 blk_mq_free_request(rq);
330         }
331 }
332 EXPORT_SYMBOL(__blk_mq_end_io);
333
334 void blk_mq_end_io(struct request *rq, int error)
335 {
336         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
337                 BUG();
338         __blk_mq_end_io(rq, error);
339 }
340 EXPORT_SYMBOL(blk_mq_end_io);
341
342 static void __blk_mq_complete_request_remote(void *data)
343 {
344         struct request *rq = data;
345
346         rq->q->softirq_done_fn(rq);
347 }
348
349 static void blk_mq_ipi_complete_request(struct request *rq)
350 {
351         struct blk_mq_ctx *ctx = rq->mq_ctx;
352         bool shared = false;
353         int cpu;
354
355         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
356                 rq->q->softirq_done_fn(rq);
357                 return;
358         }
359
360         cpu = get_cpu();
361         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
362                 shared = cpus_share_cache(cpu, ctx->cpu);
363
364         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
365                 rq->csd.func = __blk_mq_complete_request_remote;
366                 rq->csd.info = rq;
367                 rq->csd.flags = 0;
368                 smp_call_function_single_async(ctx->cpu, &rq->csd);
369         } else {
370                 rq->q->softirq_done_fn(rq);
371         }
372         put_cpu();
373 }
374
375 void __blk_mq_complete_request(struct request *rq)
376 {
377         struct request_queue *q = rq->q;
378
379         if (!q->softirq_done_fn)
380                 blk_mq_end_io(rq, rq->errors);
381         else
382                 blk_mq_ipi_complete_request(rq);
383 }
384
385 /**
386  * blk_mq_complete_request - end I/O on a request
387  * @rq:         the request being processed
388  *
389  * Description:
390  *      Ends all I/O on a request. It does not handle partial completions.
391  *      The actual completion happens out-of-order, through a IPI handler.
392  **/
393 void blk_mq_complete_request(struct request *rq)
394 {
395         struct request_queue *q = rq->q;
396
397         if (unlikely(blk_should_fake_timeout(q)))
398                 return;
399         if (!blk_mark_rq_complete(rq))
400                 __blk_mq_complete_request(rq);
401 }
402 EXPORT_SYMBOL(blk_mq_complete_request);
403
404 static void blk_mq_start_request(struct request *rq, bool last)
405 {
406         struct request_queue *q = rq->q;
407
408         trace_block_rq_issue(q, rq);
409
410         rq->resid_len = blk_rq_bytes(rq);
411         if (unlikely(blk_bidi_rq(rq)))
412                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
413
414         blk_add_timer(rq);
415
416         /*
417          * Mark us as started and clear complete. Complete might have been
418          * set if requeue raced with timeout, which then marked it as
419          * complete. So be sure to clear complete again when we start
420          * the request, otherwise we'll ignore the completion event.
421          */
422         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
423                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
424         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
425                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
426
427         if (q->dma_drain_size && blk_rq_bytes(rq)) {
428                 /*
429                  * Make sure space for the drain appears.  We know we can do
430                  * this because max_hw_segments has been adjusted to be one
431                  * fewer than the device can handle.
432                  */
433                 rq->nr_phys_segments++;
434         }
435
436         /*
437          * Flag the last request in the series so that drivers know when IO
438          * should be kicked off, if they don't do it on a per-request basis.
439          *
440          * Note: the flag isn't the only condition drivers should do kick off.
441          * If drive is busy, the last request might not have the bit set.
442          */
443         if (last)
444                 rq->cmd_flags |= REQ_END;
445 }
446
447 static void __blk_mq_requeue_request(struct request *rq)
448 {
449         struct request_queue *q = rq->q;
450
451         trace_block_rq_requeue(q, rq);
452         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
453
454         rq->cmd_flags &= ~REQ_END;
455
456         if (q->dma_drain_size && blk_rq_bytes(rq))
457                 rq->nr_phys_segments--;
458 }
459
460 void blk_mq_requeue_request(struct request *rq)
461 {
462         __blk_mq_requeue_request(rq);
463         blk_clear_rq_complete(rq);
464
465         BUG_ON(blk_queued_rq(rq));
466         blk_mq_add_to_requeue_list(rq, true);
467 }
468 EXPORT_SYMBOL(blk_mq_requeue_request);
469
470 static void blk_mq_requeue_work(struct work_struct *work)
471 {
472         struct request_queue *q =
473                 container_of(work, struct request_queue, requeue_work);
474         LIST_HEAD(rq_list);
475         struct request *rq, *next;
476         unsigned long flags;
477
478         spin_lock_irqsave(&q->requeue_lock, flags);
479         list_splice_init(&q->requeue_list, &rq_list);
480         spin_unlock_irqrestore(&q->requeue_lock, flags);
481
482         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
483                 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
484                         continue;
485
486                 rq->cmd_flags &= ~REQ_SOFTBARRIER;
487                 list_del_init(&rq->queuelist);
488                 blk_mq_insert_request(rq, true, false, false);
489         }
490
491         while (!list_empty(&rq_list)) {
492                 rq = list_entry(rq_list.next, struct request, queuelist);
493                 list_del_init(&rq->queuelist);
494                 blk_mq_insert_request(rq, false, false, false);
495         }
496
497         blk_mq_run_queues(q, false);
498 }
499
500 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
501 {
502         struct request_queue *q = rq->q;
503         unsigned long flags;
504
505         /*
506          * We abuse this flag that is otherwise used by the I/O scheduler to
507          * request head insertation from the workqueue.
508          */
509         BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
510
511         spin_lock_irqsave(&q->requeue_lock, flags);
512         if (at_head) {
513                 rq->cmd_flags |= REQ_SOFTBARRIER;
514                 list_add(&rq->queuelist, &q->requeue_list);
515         } else {
516                 list_add_tail(&rq->queuelist, &q->requeue_list);
517         }
518         spin_unlock_irqrestore(&q->requeue_lock, flags);
519 }
520 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
521
522 void blk_mq_kick_requeue_list(struct request_queue *q)
523 {
524         kblockd_schedule_work(&q->requeue_work);
525 }
526 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
527
528 static inline bool is_flush_request(struct request *rq, unsigned int tag)
529 {
530         return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
531                         rq->q->flush_rq->tag == tag);
532 }
533
534 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
535 {
536         struct request *rq = tags->rqs[tag];
537
538         if (!is_flush_request(rq, tag))
539                 return rq;
540
541         return rq->q->flush_rq;
542 }
543 EXPORT_SYMBOL(blk_mq_tag_to_rq);
544
545 struct blk_mq_timeout_data {
546         struct blk_mq_hw_ctx *hctx;
547         unsigned long *next;
548         unsigned int *next_set;
549 };
550
551 static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
552 {
553         struct blk_mq_timeout_data *data = __data;
554         struct blk_mq_hw_ctx *hctx = data->hctx;
555         unsigned int tag;
556
557          /* It may not be in flight yet (this is where
558          * the REQ_ATOMIC_STARTED flag comes in). The requests are
559          * statically allocated, so we know it's always safe to access the
560          * memory associated with a bit offset into ->rqs[].
561          */
562         tag = 0;
563         do {
564                 struct request *rq;
565
566                 tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
567                 if (tag >= hctx->tags->nr_tags)
568                         break;
569
570                 rq = blk_mq_tag_to_rq(hctx->tags, tag++);
571                 if (rq->q != hctx->queue)
572                         continue;
573                 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
574                         continue;
575
576                 blk_rq_check_expired(rq, data->next, data->next_set);
577         } while (1);
578 }
579
580 static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
581                                         unsigned long *next,
582                                         unsigned int *next_set)
583 {
584         struct blk_mq_timeout_data data = {
585                 .hctx           = hctx,
586                 .next           = next,
587                 .next_set       = next_set,
588         };
589
590         /*
591          * Ask the tagging code to iterate busy requests, so we can
592          * check them for timeout.
593          */
594         blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
595 }
596
597 static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
598 {
599         struct request_queue *q = rq->q;
600
601         /*
602          * We know that complete is set at this point. If STARTED isn't set
603          * anymore, then the request isn't active and the "timeout" should
604          * just be ignored. This can happen due to the bitflag ordering.
605          * Timeout first checks if STARTED is set, and if it is, assumes
606          * the request is active. But if we race with completion, then
607          * we both flags will get cleared. So check here again, and ignore
608          * a timeout event with a request that isn't active.
609          */
610         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
611                 return BLK_EH_NOT_HANDLED;
612
613         if (!q->mq_ops->timeout)
614                 return BLK_EH_RESET_TIMER;
615
616         return q->mq_ops->timeout(rq);
617 }
618
619 static void blk_mq_rq_timer(unsigned long data)
620 {
621         struct request_queue *q = (struct request_queue *) data;
622         struct blk_mq_hw_ctx *hctx;
623         unsigned long next = 0;
624         int i, next_set = 0;
625
626         queue_for_each_hw_ctx(q, hctx, i) {
627                 /*
628                  * If not software queues are currently mapped to this
629                  * hardware queue, there's nothing to check
630                  */
631                 if (!hctx->nr_ctx || !hctx->tags)
632                         continue;
633
634                 blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
635         }
636
637         if (next_set) {
638                 next = blk_rq_timeout(round_jiffies_up(next));
639                 mod_timer(&q->timeout, next);
640         } else {
641                 queue_for_each_hw_ctx(q, hctx, i)
642                         blk_mq_tag_idle(hctx);
643         }
644 }
645
646 /*
647  * Reverse check our software queue for entries that we could potentially
648  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
649  * too much time checking for merges.
650  */
651 static bool blk_mq_attempt_merge(struct request_queue *q,
652                                  struct blk_mq_ctx *ctx, struct bio *bio)
653 {
654         struct request *rq;
655         int checked = 8;
656
657         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
658                 int el_ret;
659
660                 if (!checked--)
661                         break;
662
663                 if (!blk_rq_merge_ok(rq, bio))
664                         continue;
665
666                 el_ret = blk_try_merge(rq, bio);
667                 if (el_ret == ELEVATOR_BACK_MERGE) {
668                         if (bio_attempt_back_merge(q, rq, bio)) {
669                                 ctx->rq_merged++;
670                                 return true;
671                         }
672                         break;
673                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
674                         if (bio_attempt_front_merge(q, rq, bio)) {
675                                 ctx->rq_merged++;
676                                 return true;
677                         }
678                         break;
679                 }
680         }
681
682         return false;
683 }
684
685 /*
686  * Process software queues that have been marked busy, splicing them
687  * to the for-dispatch
688  */
689 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
690 {
691         struct blk_mq_ctx *ctx;
692         int i;
693
694         for (i = 0; i < hctx->ctx_map.map_size; i++) {
695                 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
696                 unsigned int off, bit;
697
698                 if (!bm->word)
699                         continue;
700
701                 bit = 0;
702                 off = i * hctx->ctx_map.bits_per_word;
703                 do {
704                         bit = find_next_bit(&bm->word, bm->depth, bit);
705                         if (bit >= bm->depth)
706                                 break;
707
708                         ctx = hctx->ctxs[bit + off];
709                         clear_bit(bit, &bm->word);
710                         spin_lock(&ctx->lock);
711                         list_splice_tail_init(&ctx->rq_list, list);
712                         spin_unlock(&ctx->lock);
713
714                         bit++;
715                 } while (1);
716         }
717 }
718
719 /*
720  * Run this hardware queue, pulling any software queues mapped to it in.
721  * Note that this function currently has various problems around ordering
722  * of IO. In particular, we'd like FIFO behaviour on handling existing
723  * items on the hctx->dispatch list. Ignore that for now.
724  */
725 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
726 {
727         struct request_queue *q = hctx->queue;
728         struct request *rq;
729         LIST_HEAD(rq_list);
730         int queued;
731
732         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
733
734         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
735                 return;
736
737         hctx->run++;
738
739         /*
740          * Touch any software queue that has pending entries.
741          */
742         flush_busy_ctxs(hctx, &rq_list);
743
744         /*
745          * If we have previous entries on our dispatch list, grab them
746          * and stuff them at the front for more fair dispatch.
747          */
748         if (!list_empty_careful(&hctx->dispatch)) {
749                 spin_lock(&hctx->lock);
750                 if (!list_empty(&hctx->dispatch))
751                         list_splice_init(&hctx->dispatch, &rq_list);
752                 spin_unlock(&hctx->lock);
753         }
754
755         /*
756          * Now process all the entries, sending them to the driver.
757          */
758         queued = 0;
759         while (!list_empty(&rq_list)) {
760                 int ret;
761
762                 rq = list_first_entry(&rq_list, struct request, queuelist);
763                 list_del_init(&rq->queuelist);
764
765                 blk_mq_start_request(rq, list_empty(&rq_list));
766
767                 ret = q->mq_ops->queue_rq(hctx, rq);
768                 switch (ret) {
769                 case BLK_MQ_RQ_QUEUE_OK:
770                         queued++;
771                         continue;
772                 case BLK_MQ_RQ_QUEUE_BUSY:
773                         list_add(&rq->queuelist, &rq_list);
774                         __blk_mq_requeue_request(rq);
775                         break;
776                 default:
777                         pr_err("blk-mq: bad return on queue: %d\n", ret);
778                 case BLK_MQ_RQ_QUEUE_ERROR:
779                         rq->errors = -EIO;
780                         blk_mq_end_io(rq, rq->errors);
781                         break;
782                 }
783
784                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
785                         break;
786         }
787
788         if (!queued)
789                 hctx->dispatched[0]++;
790         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
791                 hctx->dispatched[ilog2(queued) + 1]++;
792
793         /*
794          * Any items that need requeuing? Stuff them into hctx->dispatch,
795          * that is where we will continue on next queue run.
796          */
797         if (!list_empty(&rq_list)) {
798                 spin_lock(&hctx->lock);
799                 list_splice(&rq_list, &hctx->dispatch);
800                 spin_unlock(&hctx->lock);
801         }
802 }
803
804 /*
805  * It'd be great if the workqueue API had a way to pass
806  * in a mask and had some smarts for more clever placement.
807  * For now we just round-robin here, switching for every
808  * BLK_MQ_CPU_WORK_BATCH queued items.
809  */
810 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
811 {
812         int cpu = hctx->next_cpu;
813
814         if (--hctx->next_cpu_batch <= 0) {
815                 int next_cpu;
816
817                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
818                 if (next_cpu >= nr_cpu_ids)
819                         next_cpu = cpumask_first(hctx->cpumask);
820
821                 hctx->next_cpu = next_cpu;
822                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
823         }
824
825         return cpu;
826 }
827
828 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
829 {
830         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
831                 return;
832
833         if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
834                 __blk_mq_run_hw_queue(hctx);
835         else if (hctx->queue->nr_hw_queues == 1)
836                 kblockd_schedule_delayed_work(&hctx->run_work, 0);
837         else {
838                 unsigned int cpu;
839
840                 cpu = blk_mq_hctx_next_cpu(hctx);
841                 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
842         }
843 }
844
845 void blk_mq_run_queues(struct request_queue *q, bool async)
846 {
847         struct blk_mq_hw_ctx *hctx;
848         int i;
849
850         queue_for_each_hw_ctx(q, hctx, i) {
851                 if ((!blk_mq_hctx_has_pending(hctx) &&
852                     list_empty_careful(&hctx->dispatch)) ||
853                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
854                         continue;
855
856                 preempt_disable();
857                 blk_mq_run_hw_queue(hctx, async);
858                 preempt_enable();
859         }
860 }
861 EXPORT_SYMBOL(blk_mq_run_queues);
862
863 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
864 {
865         cancel_delayed_work(&hctx->run_work);
866         cancel_delayed_work(&hctx->delay_work);
867         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
868 }
869 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
870
871 void blk_mq_stop_hw_queues(struct request_queue *q)
872 {
873         struct blk_mq_hw_ctx *hctx;
874         int i;
875
876         queue_for_each_hw_ctx(q, hctx, i)
877                 blk_mq_stop_hw_queue(hctx);
878 }
879 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
880
881 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
882 {
883         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
884
885         preempt_disable();
886         __blk_mq_run_hw_queue(hctx);
887         preempt_enable();
888 }
889 EXPORT_SYMBOL(blk_mq_start_hw_queue);
890
891 void blk_mq_start_hw_queues(struct request_queue *q)
892 {
893         struct blk_mq_hw_ctx *hctx;
894         int i;
895
896         queue_for_each_hw_ctx(q, hctx, i)
897                 blk_mq_start_hw_queue(hctx);
898 }
899 EXPORT_SYMBOL(blk_mq_start_hw_queues);
900
901
902 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
903 {
904         struct blk_mq_hw_ctx *hctx;
905         int i;
906
907         queue_for_each_hw_ctx(q, hctx, i) {
908                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
909                         continue;
910
911                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
912                 preempt_disable();
913                 blk_mq_run_hw_queue(hctx, async);
914                 preempt_enable();
915         }
916 }
917 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
918
919 static void blk_mq_run_work_fn(struct work_struct *work)
920 {
921         struct blk_mq_hw_ctx *hctx;
922
923         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
924
925         __blk_mq_run_hw_queue(hctx);
926 }
927
928 static void blk_mq_delay_work_fn(struct work_struct *work)
929 {
930         struct blk_mq_hw_ctx *hctx;
931
932         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
933
934         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
935                 __blk_mq_run_hw_queue(hctx);
936 }
937
938 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
939 {
940         unsigned long tmo = msecs_to_jiffies(msecs);
941
942         if (hctx->queue->nr_hw_queues == 1)
943                 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
944         else {
945                 unsigned int cpu;
946
947                 cpu = blk_mq_hctx_next_cpu(hctx);
948                 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
949         }
950 }
951 EXPORT_SYMBOL(blk_mq_delay_queue);
952
953 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
954                                     struct request *rq, bool at_head)
955 {
956         struct blk_mq_ctx *ctx = rq->mq_ctx;
957
958         trace_block_rq_insert(hctx->queue, rq);
959
960         if (at_head)
961                 list_add(&rq->queuelist, &ctx->rq_list);
962         else
963                 list_add_tail(&rq->queuelist, &ctx->rq_list);
964
965         blk_mq_hctx_mark_pending(hctx, ctx);
966 }
967
968 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
969                 bool async)
970 {
971         struct request_queue *q = rq->q;
972         struct blk_mq_hw_ctx *hctx;
973         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
974
975         current_ctx = blk_mq_get_ctx(q);
976         if (!cpu_online(ctx->cpu))
977                 rq->mq_ctx = ctx = current_ctx;
978
979         hctx = q->mq_ops->map_queue(q, ctx->cpu);
980
981         if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
982             !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
983                 blk_insert_flush(rq);
984         } else {
985                 spin_lock(&ctx->lock);
986                 __blk_mq_insert_request(hctx, rq, at_head);
987                 spin_unlock(&ctx->lock);
988         }
989
990         if (run_queue)
991                 blk_mq_run_hw_queue(hctx, async);
992
993         blk_mq_put_ctx(current_ctx);
994 }
995
996 static void blk_mq_insert_requests(struct request_queue *q,
997                                      struct blk_mq_ctx *ctx,
998                                      struct list_head *list,
999                                      int depth,
1000                                      bool from_schedule)
1001
1002 {
1003         struct blk_mq_hw_ctx *hctx;
1004         struct blk_mq_ctx *current_ctx;
1005
1006         trace_block_unplug(q, depth, !from_schedule);
1007
1008         current_ctx = blk_mq_get_ctx(q);
1009
1010         if (!cpu_online(ctx->cpu))
1011                 ctx = current_ctx;
1012         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1013
1014         /*
1015          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1016          * offline now
1017          */
1018         spin_lock(&ctx->lock);
1019         while (!list_empty(list)) {
1020                 struct request *rq;
1021
1022                 rq = list_first_entry(list, struct request, queuelist);
1023                 list_del_init(&rq->queuelist);
1024                 rq->mq_ctx = ctx;
1025                 __blk_mq_insert_request(hctx, rq, false);
1026         }
1027         spin_unlock(&ctx->lock);
1028
1029         blk_mq_run_hw_queue(hctx, from_schedule);
1030         blk_mq_put_ctx(current_ctx);
1031 }
1032
1033 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1034 {
1035         struct request *rqa = container_of(a, struct request, queuelist);
1036         struct request *rqb = container_of(b, struct request, queuelist);
1037
1038         return !(rqa->mq_ctx < rqb->mq_ctx ||
1039                  (rqa->mq_ctx == rqb->mq_ctx &&
1040                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1041 }
1042
1043 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1044 {
1045         struct blk_mq_ctx *this_ctx;
1046         struct request_queue *this_q;
1047         struct request *rq;
1048         LIST_HEAD(list);
1049         LIST_HEAD(ctx_list);
1050         unsigned int depth;
1051
1052         list_splice_init(&plug->mq_list, &list);
1053
1054         list_sort(NULL, &list, plug_ctx_cmp);
1055
1056         this_q = NULL;
1057         this_ctx = NULL;
1058         depth = 0;
1059
1060         while (!list_empty(&list)) {
1061                 rq = list_entry_rq(list.next);
1062                 list_del_init(&rq->queuelist);
1063                 BUG_ON(!rq->q);
1064                 if (rq->mq_ctx != this_ctx) {
1065                         if (this_ctx) {
1066                                 blk_mq_insert_requests(this_q, this_ctx,
1067                                                         &ctx_list, depth,
1068                                                         from_schedule);
1069                         }
1070
1071                         this_ctx = rq->mq_ctx;
1072                         this_q = rq->q;
1073                         depth = 0;
1074                 }
1075
1076                 depth++;
1077                 list_add_tail(&rq->queuelist, &ctx_list);
1078         }
1079
1080         /*
1081          * If 'this_ctx' is set, we know we have entries to complete
1082          * on 'ctx_list'. Do those.
1083          */
1084         if (this_ctx) {
1085                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1086                                        from_schedule);
1087         }
1088 }
1089
1090 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1091 {
1092         init_request_from_bio(rq, bio);
1093
1094         if (blk_do_io_stat(rq))
1095                 blk_account_io_start(rq, 1);
1096 }
1097
1098 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1099                                          struct blk_mq_ctx *ctx,
1100                                          struct request *rq, struct bio *bio)
1101 {
1102         struct request_queue *q = hctx->queue;
1103
1104         if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
1105                 blk_mq_bio_to_request(rq, bio);
1106                 spin_lock(&ctx->lock);
1107 insert_rq:
1108                 __blk_mq_insert_request(hctx, rq, false);
1109                 spin_unlock(&ctx->lock);
1110                 return false;
1111         } else {
1112                 spin_lock(&ctx->lock);
1113                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1114                         blk_mq_bio_to_request(rq, bio);
1115                         goto insert_rq;
1116                 }
1117
1118                 spin_unlock(&ctx->lock);
1119                 __blk_mq_free_request(hctx, ctx, rq);
1120                 return true;
1121         }
1122 }
1123
1124 struct blk_map_ctx {
1125         struct blk_mq_hw_ctx *hctx;
1126         struct blk_mq_ctx *ctx;
1127 };
1128
1129 static struct request *blk_mq_map_request(struct request_queue *q,
1130                                           struct bio *bio,
1131                                           struct blk_map_ctx *data)
1132 {
1133         struct blk_mq_hw_ctx *hctx;
1134         struct blk_mq_ctx *ctx;
1135         struct request *rq;
1136         int rw = bio_data_dir(bio);
1137         struct blk_mq_alloc_data alloc_data;
1138
1139         if (unlikely(blk_mq_queue_enter(q))) {
1140                 bio_endio(bio, -EIO);
1141                 return NULL;
1142         }
1143
1144         ctx = blk_mq_get_ctx(q);
1145         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1146
1147         if (rw_is_sync(bio->bi_rw))
1148                 rw |= REQ_SYNC;
1149
1150         trace_block_getrq(q, bio, rw);
1151         blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1152                         hctx);
1153         rq = __blk_mq_alloc_request(&alloc_data, rw);
1154         if (unlikely(!rq)) {
1155                 __blk_mq_run_hw_queue(hctx);
1156                 blk_mq_put_ctx(ctx);
1157                 trace_block_sleeprq(q, bio, rw);
1158
1159                 ctx = blk_mq_get_ctx(q);
1160                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1161                 blk_mq_set_alloc_data(&alloc_data, q,
1162                                 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1163                 rq = __blk_mq_alloc_request(&alloc_data, rw);
1164                 ctx = alloc_data.ctx;
1165                 hctx = alloc_data.hctx;
1166         }
1167
1168         hctx->queued++;
1169         data->hctx = hctx;
1170         data->ctx = ctx;
1171         return rq;
1172 }
1173
1174 /*
1175  * Multiple hardware queue variant. This will not use per-process plugs,
1176  * but will attempt to bypass the hctx queueing if we can go straight to
1177  * hardware for SYNC IO.
1178  */
1179 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1180 {
1181         const int is_sync = rw_is_sync(bio->bi_rw);
1182         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1183         struct blk_map_ctx data;
1184         struct request *rq;
1185
1186         blk_queue_bounce(q, &bio);
1187
1188         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1189                 bio_endio(bio, -EIO);
1190                 return;
1191         }
1192
1193         rq = blk_mq_map_request(q, bio, &data);
1194         if (unlikely(!rq))
1195                 return;
1196
1197         if (unlikely(is_flush_fua)) {
1198                 blk_mq_bio_to_request(rq, bio);
1199                 blk_insert_flush(rq);
1200                 goto run_queue;
1201         }
1202
1203         if (is_sync) {
1204                 int ret;
1205
1206                 blk_mq_bio_to_request(rq, bio);
1207                 blk_mq_start_request(rq, true);
1208
1209                 /*
1210                  * For OK queue, we are done. For error, kill it. Any other
1211                  * error (busy), just add it to our list as we previously
1212                  * would have done
1213                  */
1214                 ret = q->mq_ops->queue_rq(data.hctx, rq);
1215                 if (ret == BLK_MQ_RQ_QUEUE_OK)
1216                         goto done;
1217                 else {
1218                         __blk_mq_requeue_request(rq);
1219
1220                         if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1221                                 rq->errors = -EIO;
1222                                 blk_mq_end_io(rq, rq->errors);
1223                                 goto done;
1224                         }
1225                 }
1226         }
1227
1228         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1229                 /*
1230                  * For a SYNC request, send it to the hardware immediately. For
1231                  * an ASYNC request, just ensure that we run it later on. The
1232                  * latter allows for merging opportunities and more efficient
1233                  * dispatching.
1234                  */
1235 run_queue:
1236                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1237         }
1238 done:
1239         blk_mq_put_ctx(data.ctx);
1240 }
1241
1242 /*
1243  * Single hardware queue variant. This will attempt to use any per-process
1244  * plug for merging and IO deferral.
1245  */
1246 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1247 {
1248         const int is_sync = rw_is_sync(bio->bi_rw);
1249         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1250         unsigned int use_plug, request_count = 0;
1251         struct blk_map_ctx data;
1252         struct request *rq;
1253
1254         /*
1255          * If we have multiple hardware queues, just go directly to
1256          * one of those for sync IO.
1257          */
1258         use_plug = !is_flush_fua && !is_sync;
1259
1260         blk_queue_bounce(q, &bio);
1261
1262         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1263                 bio_endio(bio, -EIO);
1264                 return;
1265         }
1266
1267         if (use_plug && !blk_queue_nomerges(q) &&
1268             blk_attempt_plug_merge(q, bio, &request_count))
1269                 return;
1270
1271         rq = blk_mq_map_request(q, bio, &data);
1272         if (unlikely(!rq))
1273                 return;
1274
1275         if (unlikely(is_flush_fua)) {
1276                 blk_mq_bio_to_request(rq, bio);
1277                 blk_insert_flush(rq);
1278                 goto run_queue;
1279         }
1280
1281         /*
1282          * A task plug currently exists. Since this is completely lockless,
1283          * utilize that to temporarily store requests until the task is
1284          * either done or scheduled away.
1285          */
1286         if (use_plug) {
1287                 struct blk_plug *plug = current->plug;
1288
1289                 if (plug) {
1290                         blk_mq_bio_to_request(rq, bio);
1291                         if (list_empty(&plug->mq_list))
1292                                 trace_block_plug(q);
1293                         else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1294                                 blk_flush_plug_list(plug, false);
1295                                 trace_block_plug(q);
1296                         }
1297                         list_add_tail(&rq->queuelist, &plug->mq_list);
1298                         blk_mq_put_ctx(data.ctx);
1299                         return;
1300                 }
1301         }
1302
1303         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1304                 /*
1305                  * For a SYNC request, send it to the hardware immediately. For
1306                  * an ASYNC request, just ensure that we run it later on. The
1307                  * latter allows for merging opportunities and more efficient
1308                  * dispatching.
1309                  */
1310 run_queue:
1311                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1312         }
1313
1314         blk_mq_put_ctx(data.ctx);
1315 }
1316
1317 /*
1318  * Default mapping to a software queue, since we use one per CPU.
1319  */
1320 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1321 {
1322         return q->queue_hw_ctx[q->mq_map[cpu]];
1323 }
1324 EXPORT_SYMBOL(blk_mq_map_queue);
1325
1326 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1327                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1328 {
1329         struct page *page;
1330
1331         if (tags->rqs && set->ops->exit_request) {
1332                 int i;
1333
1334                 for (i = 0; i < tags->nr_tags; i++) {
1335                         if (!tags->rqs[i])
1336                                 continue;
1337                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1338                                                 hctx_idx, i);
1339                 }
1340         }
1341
1342         while (!list_empty(&tags->page_list)) {
1343                 page = list_first_entry(&tags->page_list, struct page, lru);
1344                 list_del_init(&page->lru);
1345                 __free_pages(page, page->private);
1346         }
1347
1348         kfree(tags->rqs);
1349
1350         blk_mq_free_tags(tags);
1351 }
1352
1353 static size_t order_to_size(unsigned int order)
1354 {
1355         return (size_t)PAGE_SIZE << order;
1356 }
1357
1358 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1359                 unsigned int hctx_idx)
1360 {
1361         struct blk_mq_tags *tags;
1362         unsigned int i, j, entries_per_page, max_order = 4;
1363         size_t rq_size, left;
1364
1365         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1366                                 set->numa_node);
1367         if (!tags)
1368                 return NULL;
1369
1370         INIT_LIST_HEAD(&tags->page_list);
1371
1372         tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
1373                                         GFP_KERNEL, set->numa_node);
1374         if (!tags->rqs) {
1375                 blk_mq_free_tags(tags);
1376                 return NULL;
1377         }
1378
1379         /*
1380          * rq_size is the size of the request plus driver payload, rounded
1381          * to the cacheline size
1382          */
1383         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1384                                 cache_line_size());
1385         left = rq_size * set->queue_depth;
1386
1387         for (i = 0; i < set->queue_depth; ) {
1388                 int this_order = max_order;
1389                 struct page *page;
1390                 int to_do;
1391                 void *p;
1392
1393                 while (left < order_to_size(this_order - 1) && this_order)
1394                         this_order--;
1395
1396                 do {
1397                         page = alloc_pages_node(set->numa_node, GFP_KERNEL,
1398                                                 this_order);
1399                         if (page)
1400                                 break;
1401                         if (!this_order--)
1402                                 break;
1403                         if (order_to_size(this_order) < rq_size)
1404                                 break;
1405                 } while (1);
1406
1407                 if (!page)
1408                         goto fail;
1409
1410                 page->private = this_order;
1411                 list_add_tail(&page->lru, &tags->page_list);
1412
1413                 p = page_address(page);
1414                 entries_per_page = order_to_size(this_order) / rq_size;
1415                 to_do = min(entries_per_page, set->queue_depth - i);
1416                 left -= to_do * rq_size;
1417                 for (j = 0; j < to_do; j++) {
1418                         tags->rqs[i] = p;
1419                         if (set->ops->init_request) {
1420                                 if (set->ops->init_request(set->driver_data,
1421                                                 tags->rqs[i], hctx_idx, i,
1422                                                 set->numa_node))
1423                                         goto fail;
1424                         }
1425
1426                         p += rq_size;
1427                         i++;
1428                 }
1429         }
1430
1431         return tags;
1432
1433 fail:
1434         pr_warn("%s: failed to allocate requests\n", __func__);
1435         blk_mq_free_rq_map(set, tags, hctx_idx);
1436         return NULL;
1437 }
1438
1439 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1440 {
1441         kfree(bitmap->map);
1442 }
1443
1444 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1445 {
1446         unsigned int bpw = 8, total, num_maps, i;
1447
1448         bitmap->bits_per_word = bpw;
1449
1450         num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1451         bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1452                                         GFP_KERNEL, node);
1453         if (!bitmap->map)
1454                 return -ENOMEM;
1455
1456         bitmap->map_size = num_maps;
1457
1458         total = nr_cpu_ids;
1459         for (i = 0; i < num_maps; i++) {
1460                 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1461                 total -= bitmap->map[i].depth;
1462         }
1463
1464         return 0;
1465 }
1466
1467 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1468 {
1469         struct request_queue *q = hctx->queue;
1470         struct blk_mq_ctx *ctx;
1471         LIST_HEAD(tmp);
1472
1473         /*
1474          * Move ctx entries to new CPU, if this one is going away.
1475          */
1476         ctx = __blk_mq_get_ctx(q, cpu);
1477
1478         spin_lock(&ctx->lock);
1479         if (!list_empty(&ctx->rq_list)) {
1480                 list_splice_init(&ctx->rq_list, &tmp);
1481                 blk_mq_hctx_clear_pending(hctx, ctx);
1482         }
1483         spin_unlock(&ctx->lock);
1484
1485         if (list_empty(&tmp))
1486                 return NOTIFY_OK;
1487
1488         ctx = blk_mq_get_ctx(q);
1489         spin_lock(&ctx->lock);
1490
1491         while (!list_empty(&tmp)) {
1492                 struct request *rq;
1493
1494                 rq = list_first_entry(&tmp, struct request, queuelist);
1495                 rq->mq_ctx = ctx;
1496                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1497         }
1498
1499         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1500         blk_mq_hctx_mark_pending(hctx, ctx);
1501
1502         spin_unlock(&ctx->lock);
1503
1504         blk_mq_run_hw_queue(hctx, true);
1505         blk_mq_put_ctx(ctx);
1506         return NOTIFY_OK;
1507 }
1508
1509 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1510 {
1511         struct request_queue *q = hctx->queue;
1512         struct blk_mq_tag_set *set = q->tag_set;
1513
1514         if (set->tags[hctx->queue_num])
1515                 return NOTIFY_OK;
1516
1517         set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1518         if (!set->tags[hctx->queue_num])
1519                 return NOTIFY_STOP;
1520
1521         hctx->tags = set->tags[hctx->queue_num];
1522         return NOTIFY_OK;
1523 }
1524
1525 static int blk_mq_hctx_notify(void *data, unsigned long action,
1526                               unsigned int cpu)
1527 {
1528         struct blk_mq_hw_ctx *hctx = data;
1529
1530         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1531                 return blk_mq_hctx_cpu_offline(hctx, cpu);
1532         else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1533                 return blk_mq_hctx_cpu_online(hctx, cpu);
1534
1535         return NOTIFY_OK;
1536 }
1537
1538 static void blk_mq_exit_hw_queues(struct request_queue *q,
1539                 struct blk_mq_tag_set *set, int nr_queue)
1540 {
1541         struct blk_mq_hw_ctx *hctx;
1542         unsigned int i;
1543
1544         queue_for_each_hw_ctx(q, hctx, i) {
1545                 if (i == nr_queue)
1546                         break;
1547
1548                 blk_mq_tag_idle(hctx);
1549
1550                 if (set->ops->exit_hctx)
1551                         set->ops->exit_hctx(hctx, i);
1552
1553                 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1554                 kfree(hctx->ctxs);
1555                 blk_mq_free_bitmap(&hctx->ctx_map);
1556         }
1557
1558 }
1559
1560 static void blk_mq_free_hw_queues(struct request_queue *q,
1561                 struct blk_mq_tag_set *set)
1562 {
1563         struct blk_mq_hw_ctx *hctx;
1564         unsigned int i;
1565
1566         queue_for_each_hw_ctx(q, hctx, i) {
1567                 free_cpumask_var(hctx->cpumask);
1568                 kfree(hctx);
1569         }
1570 }
1571
1572 static int blk_mq_init_hw_queues(struct request_queue *q,
1573                 struct blk_mq_tag_set *set)
1574 {
1575         struct blk_mq_hw_ctx *hctx;
1576         unsigned int i;
1577
1578         /*
1579          * Initialize hardware queues
1580          */
1581         queue_for_each_hw_ctx(q, hctx, i) {
1582                 int node;
1583
1584                 node = hctx->numa_node;
1585                 if (node == NUMA_NO_NODE)
1586                         node = hctx->numa_node = set->numa_node;
1587
1588                 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1589                 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1590                 spin_lock_init(&hctx->lock);
1591                 INIT_LIST_HEAD(&hctx->dispatch);
1592                 hctx->queue = q;
1593                 hctx->queue_num = i;
1594                 hctx->flags = set->flags;
1595                 hctx->cmd_size = set->cmd_size;
1596
1597                 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1598                                                 blk_mq_hctx_notify, hctx);
1599                 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1600
1601                 hctx->tags = set->tags[i];
1602
1603                 /*
1604                  * Allocate space for all possible cpus to avoid allocation in
1605                  * runtime
1606                  */
1607                 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1608                                                 GFP_KERNEL, node);
1609                 if (!hctx->ctxs)
1610                         break;
1611
1612                 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1613                         break;
1614
1615                 hctx->nr_ctx = 0;
1616
1617                 if (set->ops->init_hctx &&
1618                     set->ops->init_hctx(hctx, set->driver_data, i))
1619                         break;
1620         }
1621
1622         if (i == q->nr_hw_queues)
1623                 return 0;
1624
1625         /*
1626          * Init failed
1627          */
1628         blk_mq_exit_hw_queues(q, set, i);
1629
1630         return 1;
1631 }
1632
1633 static void blk_mq_init_cpu_queues(struct request_queue *q,
1634                                    unsigned int nr_hw_queues)
1635 {
1636         unsigned int i;
1637
1638         for_each_possible_cpu(i) {
1639                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1640                 struct blk_mq_hw_ctx *hctx;
1641
1642                 memset(__ctx, 0, sizeof(*__ctx));
1643                 __ctx->cpu = i;
1644                 spin_lock_init(&__ctx->lock);
1645                 INIT_LIST_HEAD(&__ctx->rq_list);
1646                 __ctx->queue = q;
1647
1648                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1649                 if (!cpu_online(i))
1650                         continue;
1651
1652                 hctx = q->mq_ops->map_queue(q, i);
1653                 cpumask_set_cpu(i, hctx->cpumask);
1654                 hctx->nr_ctx++;
1655
1656                 /*
1657                  * Set local node, IFF we have more than one hw queue. If
1658                  * not, we remain on the home node of the device
1659                  */
1660                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1661                         hctx->numa_node = cpu_to_node(i);
1662         }
1663 }
1664
1665 static void blk_mq_map_swqueue(struct request_queue *q)
1666 {
1667         unsigned int i;
1668         struct blk_mq_hw_ctx *hctx;
1669         struct blk_mq_ctx *ctx;
1670
1671         queue_for_each_hw_ctx(q, hctx, i) {
1672                 cpumask_clear(hctx->cpumask);
1673                 hctx->nr_ctx = 0;
1674         }
1675
1676         /*
1677          * Map software to hardware queues
1678          */
1679         queue_for_each_ctx(q, ctx, i) {
1680                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1681                 if (!cpu_online(i))
1682                         continue;
1683
1684                 hctx = q->mq_ops->map_queue(q, i);
1685                 cpumask_set_cpu(i, hctx->cpumask);
1686                 ctx->index_hw = hctx->nr_ctx;
1687                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1688         }
1689
1690         queue_for_each_hw_ctx(q, hctx, i) {
1691                 /*
1692                  * If not software queues are mapped to this hardware queue,
1693                  * disable it and free the request entries
1694                  */
1695                 if (!hctx->nr_ctx) {
1696                         struct blk_mq_tag_set *set = q->tag_set;
1697
1698                         if (set->tags[i]) {
1699                                 blk_mq_free_rq_map(set, set->tags[i], i);
1700                                 set->tags[i] = NULL;
1701                                 hctx->tags = NULL;
1702                         }
1703                         continue;
1704                 }
1705
1706                 /*
1707                  * Initialize batch roundrobin counts
1708                  */
1709                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1710                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1711         }
1712 }
1713
1714 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1715 {
1716         struct blk_mq_hw_ctx *hctx;
1717         struct request_queue *q;
1718         bool shared;
1719         int i;
1720
1721         if (set->tag_list.next == set->tag_list.prev)
1722                 shared = false;
1723         else
1724                 shared = true;
1725
1726         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1727                 blk_mq_freeze_queue(q);
1728
1729                 queue_for_each_hw_ctx(q, hctx, i) {
1730                         if (shared)
1731                                 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1732                         else
1733                                 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1734                 }
1735                 blk_mq_unfreeze_queue(q);
1736         }
1737 }
1738
1739 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1740 {
1741         struct blk_mq_tag_set *set = q->tag_set;
1742
1743         blk_mq_freeze_queue(q);
1744
1745         mutex_lock(&set->tag_list_lock);
1746         list_del_init(&q->tag_set_list);
1747         blk_mq_update_tag_set_depth(set);
1748         mutex_unlock(&set->tag_list_lock);
1749
1750         blk_mq_unfreeze_queue(q);
1751 }
1752
1753 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1754                                      struct request_queue *q)
1755 {
1756         q->tag_set = set;
1757
1758         mutex_lock(&set->tag_list_lock);
1759         list_add_tail(&q->tag_set_list, &set->tag_list);
1760         blk_mq_update_tag_set_depth(set);
1761         mutex_unlock(&set->tag_list_lock);
1762 }
1763
1764 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1765 {
1766         struct blk_mq_hw_ctx **hctxs;
1767         struct blk_mq_ctx __percpu *ctx;
1768         struct request_queue *q;
1769         unsigned int *map;
1770         int i;
1771
1772         ctx = alloc_percpu(struct blk_mq_ctx);
1773         if (!ctx)
1774                 return ERR_PTR(-ENOMEM);
1775
1776         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1777                         set->numa_node);
1778
1779         if (!hctxs)
1780                 goto err_percpu;
1781
1782         map = blk_mq_make_queue_map(set);
1783         if (!map)
1784                 goto err_map;
1785
1786         for (i = 0; i < set->nr_hw_queues; i++) {
1787                 int node = blk_mq_hw_queue_to_node(map, i);
1788
1789                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1790                                         GFP_KERNEL, node);
1791                 if (!hctxs[i])
1792                         goto err_hctxs;
1793
1794                 if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1795                         goto err_hctxs;
1796
1797                 atomic_set(&hctxs[i]->nr_active, 0);
1798                 hctxs[i]->numa_node = node;
1799                 hctxs[i]->queue_num = i;
1800         }
1801
1802         q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1803         if (!q)
1804                 goto err_hctxs;
1805
1806         if (percpu_counter_init(&q->mq_usage_counter, 0))
1807                 goto err_map;
1808
1809         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1810         blk_queue_rq_timeout(q, 30000);
1811
1812         q->nr_queues = nr_cpu_ids;
1813         q->nr_hw_queues = set->nr_hw_queues;
1814         q->mq_map = map;
1815
1816         q->queue_ctx = ctx;
1817         q->queue_hw_ctx = hctxs;
1818
1819         q->mq_ops = set->ops;
1820         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1821
1822         if (!(set->flags & BLK_MQ_F_SG_MERGE))
1823                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1824
1825         q->sg_reserved_size = INT_MAX;
1826
1827         INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1828         INIT_LIST_HEAD(&q->requeue_list);
1829         spin_lock_init(&q->requeue_lock);
1830
1831         if (q->nr_hw_queues > 1)
1832                 blk_queue_make_request(q, blk_mq_make_request);
1833         else
1834                 blk_queue_make_request(q, blk_sq_make_request);
1835
1836         blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
1837         if (set->timeout)
1838                 blk_queue_rq_timeout(q, set->timeout);
1839
1840         /*
1841          * Do this after blk_queue_make_request() overrides it...
1842          */
1843         q->nr_requests = set->queue_depth;
1844
1845         if (set->ops->complete)
1846                 blk_queue_softirq_done(q, set->ops->complete);
1847
1848         blk_mq_init_flush(q);
1849         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1850
1851         q->flush_rq = kzalloc(round_up(sizeof(struct request) +
1852                                 set->cmd_size, cache_line_size()),
1853                                 GFP_KERNEL);
1854         if (!q->flush_rq)
1855                 goto err_hw;
1856
1857         if (blk_mq_init_hw_queues(q, set))
1858                 goto err_flush_rq;
1859
1860         mutex_lock(&all_q_mutex);
1861         list_add_tail(&q->all_q_node, &all_q_list);
1862         mutex_unlock(&all_q_mutex);
1863
1864         blk_mq_add_queue_tag_set(set, q);
1865
1866         blk_mq_map_swqueue(q);
1867
1868         return q;
1869
1870 err_flush_rq:
1871         kfree(q->flush_rq);
1872 err_hw:
1873         blk_cleanup_queue(q);
1874 err_hctxs:
1875         kfree(map);
1876         for (i = 0; i < set->nr_hw_queues; i++) {
1877                 if (!hctxs[i])
1878                         break;
1879                 free_cpumask_var(hctxs[i]->cpumask);
1880                 kfree(hctxs[i]);
1881         }
1882 err_map:
1883         kfree(hctxs);
1884 err_percpu:
1885         free_percpu(ctx);
1886         return ERR_PTR(-ENOMEM);
1887 }
1888 EXPORT_SYMBOL(blk_mq_init_queue);
1889
1890 void blk_mq_free_queue(struct request_queue *q)
1891 {
1892         struct blk_mq_tag_set   *set = q->tag_set;
1893
1894         blk_mq_del_queue_tag_set(q);
1895
1896         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1897         blk_mq_free_hw_queues(q, set);
1898
1899         percpu_counter_destroy(&q->mq_usage_counter);
1900
1901         free_percpu(q->queue_ctx);
1902         kfree(q->queue_hw_ctx);
1903         kfree(q->mq_map);
1904
1905         q->queue_ctx = NULL;
1906         q->queue_hw_ctx = NULL;
1907         q->mq_map = NULL;
1908
1909         mutex_lock(&all_q_mutex);
1910         list_del_init(&q->all_q_node);
1911         mutex_unlock(&all_q_mutex);
1912 }
1913
1914 /* Basically redo blk_mq_init_queue with queue frozen */
1915 static void blk_mq_queue_reinit(struct request_queue *q)
1916 {
1917         blk_mq_freeze_queue(q);
1918
1919         blk_mq_sysfs_unregister(q);
1920
1921         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1922
1923         /*
1924          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1925          * we should change hctx numa_node according to new topology (this
1926          * involves free and re-allocate memory, worthy doing?)
1927          */
1928
1929         blk_mq_map_swqueue(q);
1930
1931         blk_mq_sysfs_register(q);
1932
1933         blk_mq_unfreeze_queue(q);
1934 }
1935
1936 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1937                                       unsigned long action, void *hcpu)
1938 {
1939         struct request_queue *q;
1940
1941         /*
1942          * Before new mappings are established, hotadded cpu might already
1943          * start handling requests. This doesn't break anything as we map
1944          * offline CPUs to first hardware queue. We will re-init the queue
1945          * below to get optimal settings.
1946          */
1947         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1948             action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1949                 return NOTIFY_OK;
1950
1951         mutex_lock(&all_q_mutex);
1952         list_for_each_entry(q, &all_q_list, all_q_node)
1953                 blk_mq_queue_reinit(q);
1954         mutex_unlock(&all_q_mutex);
1955         return NOTIFY_OK;
1956 }
1957
1958 /*
1959  * Alloc a tag set to be associated with one or more request queues.
1960  * May fail with EINVAL for various error conditions. May adjust the
1961  * requested depth down, if if it too large. In that case, the set
1962  * value will be stored in set->queue_depth.
1963  */
1964 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1965 {
1966         int i;
1967
1968         if (!set->nr_hw_queues)
1969                 return -EINVAL;
1970         if (!set->queue_depth)
1971                 return -EINVAL;
1972         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1973                 return -EINVAL;
1974
1975         if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
1976                 return -EINVAL;
1977
1978         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
1979                 pr_info("blk-mq: reduced tag depth to %u\n",
1980                         BLK_MQ_MAX_DEPTH);
1981                 set->queue_depth = BLK_MQ_MAX_DEPTH;
1982         }
1983
1984         set->tags = kmalloc_node(set->nr_hw_queues *
1985                                  sizeof(struct blk_mq_tags *),
1986                                  GFP_KERNEL, set->numa_node);
1987         if (!set->tags)
1988                 goto out;
1989
1990         for (i = 0; i < set->nr_hw_queues; i++) {
1991                 set->tags[i] = blk_mq_init_rq_map(set, i);
1992                 if (!set->tags[i])
1993                         goto out_unwind;
1994         }
1995
1996         mutex_init(&set->tag_list_lock);
1997         INIT_LIST_HEAD(&set->tag_list);
1998
1999         return 0;
2000
2001 out_unwind:
2002         while (--i >= 0)
2003                 blk_mq_free_rq_map(set, set->tags[i], i);
2004 out:
2005         return -ENOMEM;
2006 }
2007 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2008
2009 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2010 {
2011         int i;
2012
2013         for (i = 0; i < set->nr_hw_queues; i++) {
2014                 if (set->tags[i])
2015                         blk_mq_free_rq_map(set, set->tags[i], i);
2016         }
2017
2018         kfree(set->tags);
2019 }
2020 EXPORT_SYMBOL(blk_mq_free_tag_set);
2021
2022 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2023 {
2024         struct blk_mq_tag_set *set = q->tag_set;
2025         struct blk_mq_hw_ctx *hctx;
2026         int i, ret;
2027
2028         if (!set || nr > set->queue_depth)
2029                 return -EINVAL;
2030
2031         ret = 0;
2032         queue_for_each_hw_ctx(q, hctx, i) {
2033                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2034                 if (ret)
2035                         break;
2036         }
2037
2038         if (!ret)
2039                 q->nr_requests = nr;
2040
2041         return ret;
2042 }
2043
2044 void blk_mq_disable_hotplug(void)
2045 {
2046         mutex_lock(&all_q_mutex);
2047 }
2048
2049 void blk_mq_enable_hotplug(void)
2050 {
2051         mutex_unlock(&all_q_mutex);
2052 }
2053
2054 static int __init blk_mq_init(void)
2055 {
2056         blk_mq_cpu_init();
2057
2058         /* Must be called after percpu_counter_hotcpu_callback() */
2059         hotcpu_notifier(blk_mq_queue_reinit_notify, -10);
2060
2061         return 0;
2062 }
2063 subsys_initcall(blk_mq_init);