Merge tag 'renesas-cleanup-for-v3.19' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/smp.h>
17 #include <linux/llist.h>
18 #include <linux/list_sort.h>
19 #include <linux/cpu.h>
20 #include <linux/cache.h>
21 #include <linux/sched/sysctl.h>
22 #include <linux/delay.h>
23 #include <linux/crash_dump.h>
24
25 #include <trace/events/block.h>
26
27 #include <linux/blk-mq.h>
28 #include "blk.h"
29 #include "blk-mq.h"
30 #include "blk-mq-tag.h"
31
32 static DEFINE_MUTEX(all_q_mutex);
33 static LIST_HEAD(all_q_list);
34
35 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
36
37 /*
38  * Check if any of the ctx's have pending work in this hardware queue
39  */
40 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
41 {
42         unsigned int i;
43
44         for (i = 0; i < hctx->ctx_map.map_size; i++)
45                 if (hctx->ctx_map.map[i].word)
46                         return true;
47
48         return false;
49 }
50
51 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
52                                               struct blk_mq_ctx *ctx)
53 {
54         return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
55 }
56
57 #define CTX_TO_BIT(hctx, ctx)   \
58         ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
59
60 /*
61  * Mark this ctx as having pending work in this hardware queue
62  */
63 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
64                                      struct blk_mq_ctx *ctx)
65 {
66         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
67
68         if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
69                 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
70 }
71
72 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
73                                       struct blk_mq_ctx *ctx)
74 {
75         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
76
77         clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
78 }
79
80 static int blk_mq_queue_enter(struct request_queue *q)
81 {
82         while (true) {
83                 int ret;
84
85                 if (percpu_ref_tryget_live(&q->mq_usage_counter))
86                         return 0;
87
88                 ret = wait_event_interruptible(q->mq_freeze_wq,
89                                 !q->mq_freeze_depth || blk_queue_dying(q));
90                 if (blk_queue_dying(q))
91                         return -ENODEV;
92                 if (ret)
93                         return ret;
94         }
95 }
96
97 static void blk_mq_queue_exit(struct request_queue *q)
98 {
99         percpu_ref_put(&q->mq_usage_counter);
100 }
101
102 static void blk_mq_usage_counter_release(struct percpu_ref *ref)
103 {
104         struct request_queue *q =
105                 container_of(ref, struct request_queue, mq_usage_counter);
106
107         wake_up_all(&q->mq_freeze_wq);
108 }
109
110 /*
111  * Guarantee no request is in use, so we can change any data structure of
112  * the queue afterward.
113  */
114 void blk_mq_freeze_queue(struct request_queue *q)
115 {
116         bool freeze;
117
118         spin_lock_irq(q->queue_lock);
119         freeze = !q->mq_freeze_depth++;
120         spin_unlock_irq(q->queue_lock);
121
122         if (freeze) {
123                 percpu_ref_kill(&q->mq_usage_counter);
124                 blk_mq_run_queues(q, false);
125         }
126         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
127 }
128
129 static void blk_mq_unfreeze_queue(struct request_queue *q)
130 {
131         bool wake;
132
133         spin_lock_irq(q->queue_lock);
134         wake = !--q->mq_freeze_depth;
135         WARN_ON_ONCE(q->mq_freeze_depth < 0);
136         spin_unlock_irq(q->queue_lock);
137         if (wake) {
138                 percpu_ref_reinit(&q->mq_usage_counter);
139                 wake_up_all(&q->mq_freeze_wq);
140         }
141 }
142
143 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
144 {
145         return blk_mq_has_free_tags(hctx->tags);
146 }
147 EXPORT_SYMBOL(blk_mq_can_queue);
148
149 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
150                                struct request *rq, unsigned int rw_flags)
151 {
152         if (blk_queue_io_stat(q))
153                 rw_flags |= REQ_IO_STAT;
154
155         INIT_LIST_HEAD(&rq->queuelist);
156         /* csd/requeue_work/fifo_time is initialized before use */
157         rq->q = q;
158         rq->mq_ctx = ctx;
159         rq->cmd_flags |= rw_flags;
160         /* do not touch atomic flags, it needs atomic ops against the timer */
161         rq->cpu = -1;
162         INIT_HLIST_NODE(&rq->hash);
163         RB_CLEAR_NODE(&rq->rb_node);
164         rq->rq_disk = NULL;
165         rq->part = NULL;
166         rq->start_time = jiffies;
167 #ifdef CONFIG_BLK_CGROUP
168         rq->rl = NULL;
169         set_start_time_ns(rq);
170         rq->io_start_time_ns = 0;
171 #endif
172         rq->nr_phys_segments = 0;
173 #if defined(CONFIG_BLK_DEV_INTEGRITY)
174         rq->nr_integrity_segments = 0;
175 #endif
176         rq->special = NULL;
177         /* tag was already set */
178         rq->errors = 0;
179
180         rq->cmd = rq->__cmd;
181
182         rq->extra_len = 0;
183         rq->sense_len = 0;
184         rq->resid_len = 0;
185         rq->sense = NULL;
186
187         INIT_LIST_HEAD(&rq->timeout_list);
188         rq->timeout = 0;
189
190         rq->end_io = NULL;
191         rq->end_io_data = NULL;
192         rq->next_rq = NULL;
193
194         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
195 }
196
197 static struct request *
198 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
199 {
200         struct request *rq;
201         unsigned int tag;
202
203         tag = blk_mq_get_tag(data);
204         if (tag != BLK_MQ_TAG_FAIL) {
205                 rq = data->hctx->tags->rqs[tag];
206
207                 if (blk_mq_tag_busy(data->hctx)) {
208                         rq->cmd_flags = REQ_MQ_INFLIGHT;
209                         atomic_inc(&data->hctx->nr_active);
210                 }
211
212                 rq->tag = tag;
213                 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
214                 return rq;
215         }
216
217         return NULL;
218 }
219
220 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
221                 bool reserved)
222 {
223         struct blk_mq_ctx *ctx;
224         struct blk_mq_hw_ctx *hctx;
225         struct request *rq;
226         struct blk_mq_alloc_data alloc_data;
227         int ret;
228
229         ret = blk_mq_queue_enter(q);
230         if (ret)
231                 return ERR_PTR(ret);
232
233         ctx = blk_mq_get_ctx(q);
234         hctx = q->mq_ops->map_queue(q, ctx->cpu);
235         blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
236                         reserved, ctx, hctx);
237
238         rq = __blk_mq_alloc_request(&alloc_data, rw);
239         if (!rq && (gfp & __GFP_WAIT)) {
240                 __blk_mq_run_hw_queue(hctx);
241                 blk_mq_put_ctx(ctx);
242
243                 ctx = blk_mq_get_ctx(q);
244                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
245                 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
246                                 hctx);
247                 rq =  __blk_mq_alloc_request(&alloc_data, rw);
248                 ctx = alloc_data.ctx;
249         }
250         blk_mq_put_ctx(ctx);
251         if (!rq)
252                 return ERR_PTR(-EWOULDBLOCK);
253         return rq;
254 }
255 EXPORT_SYMBOL(blk_mq_alloc_request);
256
257 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
258                                   struct blk_mq_ctx *ctx, struct request *rq)
259 {
260         const int tag = rq->tag;
261         struct request_queue *q = rq->q;
262
263         if (rq->cmd_flags & REQ_MQ_INFLIGHT)
264                 atomic_dec(&hctx->nr_active);
265         rq->cmd_flags = 0;
266
267         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
268         blk_mq_put_tag(hctx, tag, &ctx->last_tag);
269         blk_mq_queue_exit(q);
270 }
271
272 void blk_mq_free_request(struct request *rq)
273 {
274         struct blk_mq_ctx *ctx = rq->mq_ctx;
275         struct blk_mq_hw_ctx *hctx;
276         struct request_queue *q = rq->q;
277
278         ctx->rq_completed[rq_is_sync(rq)]++;
279
280         hctx = q->mq_ops->map_queue(q, ctx->cpu);
281         __blk_mq_free_request(hctx, ctx, rq);
282 }
283
284 inline void __blk_mq_end_request(struct request *rq, int error)
285 {
286         blk_account_io_done(rq);
287
288         if (rq->end_io) {
289                 rq->end_io(rq, error);
290         } else {
291                 if (unlikely(blk_bidi_rq(rq)))
292                         blk_mq_free_request(rq->next_rq);
293                 blk_mq_free_request(rq);
294         }
295 }
296 EXPORT_SYMBOL(__blk_mq_end_request);
297
298 void blk_mq_end_request(struct request *rq, int error)
299 {
300         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
301                 BUG();
302         __blk_mq_end_request(rq, error);
303 }
304 EXPORT_SYMBOL(blk_mq_end_request);
305
306 static void __blk_mq_complete_request_remote(void *data)
307 {
308         struct request *rq = data;
309
310         rq->q->softirq_done_fn(rq);
311 }
312
313 static void blk_mq_ipi_complete_request(struct request *rq)
314 {
315         struct blk_mq_ctx *ctx = rq->mq_ctx;
316         bool shared = false;
317         int cpu;
318
319         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
320                 rq->q->softirq_done_fn(rq);
321                 return;
322         }
323
324         cpu = get_cpu();
325         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
326                 shared = cpus_share_cache(cpu, ctx->cpu);
327
328         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
329                 rq->csd.func = __blk_mq_complete_request_remote;
330                 rq->csd.info = rq;
331                 rq->csd.flags = 0;
332                 smp_call_function_single_async(ctx->cpu, &rq->csd);
333         } else {
334                 rq->q->softirq_done_fn(rq);
335         }
336         put_cpu();
337 }
338
339 void __blk_mq_complete_request(struct request *rq)
340 {
341         struct request_queue *q = rq->q;
342
343         if (!q->softirq_done_fn)
344                 blk_mq_end_request(rq, rq->errors);
345         else
346                 blk_mq_ipi_complete_request(rq);
347 }
348
349 /**
350  * blk_mq_complete_request - end I/O on a request
351  * @rq:         the request being processed
352  *
353  * Description:
354  *      Ends all I/O on a request. It does not handle partial completions.
355  *      The actual completion happens out-of-order, through a IPI handler.
356  **/
357 void blk_mq_complete_request(struct request *rq)
358 {
359         struct request_queue *q = rq->q;
360
361         if (unlikely(blk_should_fake_timeout(q)))
362                 return;
363         if (!blk_mark_rq_complete(rq))
364                 __blk_mq_complete_request(rq);
365 }
366 EXPORT_SYMBOL(blk_mq_complete_request);
367
368 void blk_mq_start_request(struct request *rq)
369 {
370         struct request_queue *q = rq->q;
371
372         trace_block_rq_issue(q, rq);
373
374         rq->resid_len = blk_rq_bytes(rq);
375         if (unlikely(blk_bidi_rq(rq)))
376                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
377
378         blk_add_timer(rq);
379
380         /*
381          * Ensure that ->deadline is visible before set the started
382          * flag and clear the completed flag.
383          */
384         smp_mb__before_atomic();
385
386         /*
387          * Mark us as started and clear complete. Complete might have been
388          * set if requeue raced with timeout, which then marked it as
389          * complete. So be sure to clear complete again when we start
390          * the request, otherwise we'll ignore the completion event.
391          */
392         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
393                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
394         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
395                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
396
397         if (q->dma_drain_size && blk_rq_bytes(rq)) {
398                 /*
399                  * Make sure space for the drain appears.  We know we can do
400                  * this because max_hw_segments has been adjusted to be one
401                  * fewer than the device can handle.
402                  */
403                 rq->nr_phys_segments++;
404         }
405 }
406 EXPORT_SYMBOL(blk_mq_start_request);
407
408 static void __blk_mq_requeue_request(struct request *rq)
409 {
410         struct request_queue *q = rq->q;
411
412         trace_block_rq_requeue(q, rq);
413
414         if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
415                 if (q->dma_drain_size && blk_rq_bytes(rq))
416                         rq->nr_phys_segments--;
417         }
418 }
419
420 void blk_mq_requeue_request(struct request *rq)
421 {
422         __blk_mq_requeue_request(rq);
423
424         BUG_ON(blk_queued_rq(rq));
425         blk_mq_add_to_requeue_list(rq, true);
426 }
427 EXPORT_SYMBOL(blk_mq_requeue_request);
428
429 static void blk_mq_requeue_work(struct work_struct *work)
430 {
431         struct request_queue *q =
432                 container_of(work, struct request_queue, requeue_work);
433         LIST_HEAD(rq_list);
434         struct request *rq, *next;
435         unsigned long flags;
436
437         spin_lock_irqsave(&q->requeue_lock, flags);
438         list_splice_init(&q->requeue_list, &rq_list);
439         spin_unlock_irqrestore(&q->requeue_lock, flags);
440
441         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
442                 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
443                         continue;
444
445                 rq->cmd_flags &= ~REQ_SOFTBARRIER;
446                 list_del_init(&rq->queuelist);
447                 blk_mq_insert_request(rq, true, false, false);
448         }
449
450         while (!list_empty(&rq_list)) {
451                 rq = list_entry(rq_list.next, struct request, queuelist);
452                 list_del_init(&rq->queuelist);
453                 blk_mq_insert_request(rq, false, false, false);
454         }
455
456         /*
457          * Use the start variant of queue running here, so that running
458          * the requeue work will kick stopped queues.
459          */
460         blk_mq_start_hw_queues(q);
461 }
462
463 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
464 {
465         struct request_queue *q = rq->q;
466         unsigned long flags;
467
468         /*
469          * We abuse this flag that is otherwise used by the I/O scheduler to
470          * request head insertation from the workqueue.
471          */
472         BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
473
474         spin_lock_irqsave(&q->requeue_lock, flags);
475         if (at_head) {
476                 rq->cmd_flags |= REQ_SOFTBARRIER;
477                 list_add(&rq->queuelist, &q->requeue_list);
478         } else {
479                 list_add_tail(&rq->queuelist, &q->requeue_list);
480         }
481         spin_unlock_irqrestore(&q->requeue_lock, flags);
482 }
483 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
484
485 void blk_mq_kick_requeue_list(struct request_queue *q)
486 {
487         kblockd_schedule_work(&q->requeue_work);
488 }
489 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
490
491 static inline bool is_flush_request(struct request *rq,
492                 struct blk_flush_queue *fq, unsigned int tag)
493 {
494         return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
495                         fq->flush_rq->tag == tag);
496 }
497
498 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
499 {
500         struct request *rq = tags->rqs[tag];
501         /* mq_ctx of flush rq is always cloned from the corresponding req */
502         struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx);
503
504         if (!is_flush_request(rq, fq, tag))
505                 return rq;
506
507         return fq->flush_rq;
508 }
509 EXPORT_SYMBOL(blk_mq_tag_to_rq);
510
511 struct blk_mq_timeout_data {
512         unsigned long next;
513         unsigned int next_set;
514 };
515
516 void blk_mq_rq_timed_out(struct request *req, bool reserved)
517 {
518         struct blk_mq_ops *ops = req->q->mq_ops;
519         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
520
521         /*
522          * We know that complete is set at this point. If STARTED isn't set
523          * anymore, then the request isn't active and the "timeout" should
524          * just be ignored. This can happen due to the bitflag ordering.
525          * Timeout first checks if STARTED is set, and if it is, assumes
526          * the request is active. But if we race with completion, then
527          * we both flags will get cleared. So check here again, and ignore
528          * a timeout event with a request that isn't active.
529          */
530         if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
531                 return;
532
533         if (ops->timeout)
534                 ret = ops->timeout(req, reserved);
535
536         switch (ret) {
537         case BLK_EH_HANDLED:
538                 __blk_mq_complete_request(req);
539                 break;
540         case BLK_EH_RESET_TIMER:
541                 blk_add_timer(req);
542                 blk_clear_rq_complete(req);
543                 break;
544         case BLK_EH_NOT_HANDLED:
545                 break;
546         default:
547                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
548                 break;
549         }
550 }
551                 
552 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
553                 struct request *rq, void *priv, bool reserved)
554 {
555         struct blk_mq_timeout_data *data = priv;
556
557         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
558                 return;
559
560         if (time_after_eq(jiffies, rq->deadline)) {
561                 if (!blk_mark_rq_complete(rq))
562                         blk_mq_rq_timed_out(rq, reserved);
563         } else if (!data->next_set || time_after(data->next, rq->deadline)) {
564                 data->next = rq->deadline;
565                 data->next_set = 1;
566         }
567 }
568
569 static void blk_mq_rq_timer(unsigned long priv)
570 {
571         struct request_queue *q = (struct request_queue *)priv;
572         struct blk_mq_timeout_data data = {
573                 .next           = 0,
574                 .next_set       = 0,
575         };
576         struct blk_mq_hw_ctx *hctx;
577         int i;
578
579         queue_for_each_hw_ctx(q, hctx, i) {
580                 /*
581                  * If not software queues are currently mapped to this
582                  * hardware queue, there's nothing to check
583                  */
584                 if (!hctx->nr_ctx || !hctx->tags)
585                         continue;
586
587                 blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
588         }
589
590         if (data.next_set) {
591                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
592                 mod_timer(&q->timeout, data.next);
593         } else {
594                 queue_for_each_hw_ctx(q, hctx, i)
595                         blk_mq_tag_idle(hctx);
596         }
597 }
598
599 /*
600  * Reverse check our software queue for entries that we could potentially
601  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
602  * too much time checking for merges.
603  */
604 static bool blk_mq_attempt_merge(struct request_queue *q,
605                                  struct blk_mq_ctx *ctx, struct bio *bio)
606 {
607         struct request *rq;
608         int checked = 8;
609
610         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
611                 int el_ret;
612
613                 if (!checked--)
614                         break;
615
616                 if (!blk_rq_merge_ok(rq, bio))
617                         continue;
618
619                 el_ret = blk_try_merge(rq, bio);
620                 if (el_ret == ELEVATOR_BACK_MERGE) {
621                         if (bio_attempt_back_merge(q, rq, bio)) {
622                                 ctx->rq_merged++;
623                                 return true;
624                         }
625                         break;
626                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
627                         if (bio_attempt_front_merge(q, rq, bio)) {
628                                 ctx->rq_merged++;
629                                 return true;
630                         }
631                         break;
632                 }
633         }
634
635         return false;
636 }
637
638 /*
639  * Process software queues that have been marked busy, splicing them
640  * to the for-dispatch
641  */
642 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
643 {
644         struct blk_mq_ctx *ctx;
645         int i;
646
647         for (i = 0; i < hctx->ctx_map.map_size; i++) {
648                 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
649                 unsigned int off, bit;
650
651                 if (!bm->word)
652                         continue;
653
654                 bit = 0;
655                 off = i * hctx->ctx_map.bits_per_word;
656                 do {
657                         bit = find_next_bit(&bm->word, bm->depth, bit);
658                         if (bit >= bm->depth)
659                                 break;
660
661                         ctx = hctx->ctxs[bit + off];
662                         clear_bit(bit, &bm->word);
663                         spin_lock(&ctx->lock);
664                         list_splice_tail_init(&ctx->rq_list, list);
665                         spin_unlock(&ctx->lock);
666
667                         bit++;
668                 } while (1);
669         }
670 }
671
672 /*
673  * Run this hardware queue, pulling any software queues mapped to it in.
674  * Note that this function currently has various problems around ordering
675  * of IO. In particular, we'd like FIFO behaviour on handling existing
676  * items on the hctx->dispatch list. Ignore that for now.
677  */
678 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
679 {
680         struct request_queue *q = hctx->queue;
681         struct request *rq;
682         LIST_HEAD(rq_list);
683         int queued;
684
685         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
686
687         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
688                 return;
689
690         hctx->run++;
691
692         /*
693          * Touch any software queue that has pending entries.
694          */
695         flush_busy_ctxs(hctx, &rq_list);
696
697         /*
698          * If we have previous entries on our dispatch list, grab them
699          * and stuff them at the front for more fair dispatch.
700          */
701         if (!list_empty_careful(&hctx->dispatch)) {
702                 spin_lock(&hctx->lock);
703                 if (!list_empty(&hctx->dispatch))
704                         list_splice_init(&hctx->dispatch, &rq_list);
705                 spin_unlock(&hctx->lock);
706         }
707
708         /*
709          * Now process all the entries, sending them to the driver.
710          */
711         queued = 0;
712         while (!list_empty(&rq_list)) {
713                 int ret;
714
715                 rq = list_first_entry(&rq_list, struct request, queuelist);
716                 list_del_init(&rq->queuelist);
717
718                 ret = q->mq_ops->queue_rq(hctx, rq, list_empty(&rq_list));
719                 switch (ret) {
720                 case BLK_MQ_RQ_QUEUE_OK:
721                         queued++;
722                         continue;
723                 case BLK_MQ_RQ_QUEUE_BUSY:
724                         list_add(&rq->queuelist, &rq_list);
725                         __blk_mq_requeue_request(rq);
726                         break;
727                 default:
728                         pr_err("blk-mq: bad return on queue: %d\n", ret);
729                 case BLK_MQ_RQ_QUEUE_ERROR:
730                         rq->errors = -EIO;
731                         blk_mq_end_request(rq, rq->errors);
732                         break;
733                 }
734
735                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
736                         break;
737         }
738
739         if (!queued)
740                 hctx->dispatched[0]++;
741         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
742                 hctx->dispatched[ilog2(queued) + 1]++;
743
744         /*
745          * Any items that need requeuing? Stuff them into hctx->dispatch,
746          * that is where we will continue on next queue run.
747          */
748         if (!list_empty(&rq_list)) {
749                 spin_lock(&hctx->lock);
750                 list_splice(&rq_list, &hctx->dispatch);
751                 spin_unlock(&hctx->lock);
752         }
753 }
754
755 /*
756  * It'd be great if the workqueue API had a way to pass
757  * in a mask and had some smarts for more clever placement.
758  * For now we just round-robin here, switching for every
759  * BLK_MQ_CPU_WORK_BATCH queued items.
760  */
761 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
762 {
763         int cpu = hctx->next_cpu;
764
765         if (--hctx->next_cpu_batch <= 0) {
766                 int next_cpu;
767
768                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
769                 if (next_cpu >= nr_cpu_ids)
770                         next_cpu = cpumask_first(hctx->cpumask);
771
772                 hctx->next_cpu = next_cpu;
773                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
774         }
775
776         return cpu;
777 }
778
779 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
780 {
781         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
782                 return;
783
784         if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
785                 __blk_mq_run_hw_queue(hctx);
786         else if (hctx->queue->nr_hw_queues == 1)
787                 kblockd_schedule_delayed_work(&hctx->run_work, 0);
788         else {
789                 unsigned int cpu;
790
791                 cpu = blk_mq_hctx_next_cpu(hctx);
792                 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
793         }
794 }
795
796 void blk_mq_run_queues(struct request_queue *q, bool async)
797 {
798         struct blk_mq_hw_ctx *hctx;
799         int i;
800
801         queue_for_each_hw_ctx(q, hctx, i) {
802                 if ((!blk_mq_hctx_has_pending(hctx) &&
803                     list_empty_careful(&hctx->dispatch)) ||
804                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
805                         continue;
806
807                 preempt_disable();
808                 blk_mq_run_hw_queue(hctx, async);
809                 preempt_enable();
810         }
811 }
812 EXPORT_SYMBOL(blk_mq_run_queues);
813
814 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
815 {
816         cancel_delayed_work(&hctx->run_work);
817         cancel_delayed_work(&hctx->delay_work);
818         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
819 }
820 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
821
822 void blk_mq_stop_hw_queues(struct request_queue *q)
823 {
824         struct blk_mq_hw_ctx *hctx;
825         int i;
826
827         queue_for_each_hw_ctx(q, hctx, i)
828                 blk_mq_stop_hw_queue(hctx);
829 }
830 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
831
832 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
833 {
834         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
835
836         preempt_disable();
837         blk_mq_run_hw_queue(hctx, false);
838         preempt_enable();
839 }
840 EXPORT_SYMBOL(blk_mq_start_hw_queue);
841
842 void blk_mq_start_hw_queues(struct request_queue *q)
843 {
844         struct blk_mq_hw_ctx *hctx;
845         int i;
846
847         queue_for_each_hw_ctx(q, hctx, i)
848                 blk_mq_start_hw_queue(hctx);
849 }
850 EXPORT_SYMBOL(blk_mq_start_hw_queues);
851
852
853 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
854 {
855         struct blk_mq_hw_ctx *hctx;
856         int i;
857
858         queue_for_each_hw_ctx(q, hctx, i) {
859                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
860                         continue;
861
862                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
863                 preempt_disable();
864                 blk_mq_run_hw_queue(hctx, async);
865                 preempt_enable();
866         }
867 }
868 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
869
870 static void blk_mq_run_work_fn(struct work_struct *work)
871 {
872         struct blk_mq_hw_ctx *hctx;
873
874         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
875
876         __blk_mq_run_hw_queue(hctx);
877 }
878
879 static void blk_mq_delay_work_fn(struct work_struct *work)
880 {
881         struct blk_mq_hw_ctx *hctx;
882
883         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
884
885         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
886                 __blk_mq_run_hw_queue(hctx);
887 }
888
889 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
890 {
891         unsigned long tmo = msecs_to_jiffies(msecs);
892
893         if (hctx->queue->nr_hw_queues == 1)
894                 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
895         else {
896                 unsigned int cpu;
897
898                 cpu = blk_mq_hctx_next_cpu(hctx);
899                 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
900         }
901 }
902 EXPORT_SYMBOL(blk_mq_delay_queue);
903
904 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
905                                     struct request *rq, bool at_head)
906 {
907         struct blk_mq_ctx *ctx = rq->mq_ctx;
908
909         trace_block_rq_insert(hctx->queue, rq);
910
911         if (at_head)
912                 list_add(&rq->queuelist, &ctx->rq_list);
913         else
914                 list_add_tail(&rq->queuelist, &ctx->rq_list);
915
916         blk_mq_hctx_mark_pending(hctx, ctx);
917 }
918
919 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
920                 bool async)
921 {
922         struct request_queue *q = rq->q;
923         struct blk_mq_hw_ctx *hctx;
924         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
925
926         current_ctx = blk_mq_get_ctx(q);
927         if (!cpu_online(ctx->cpu))
928                 rq->mq_ctx = ctx = current_ctx;
929
930         hctx = q->mq_ops->map_queue(q, ctx->cpu);
931
932         spin_lock(&ctx->lock);
933         __blk_mq_insert_request(hctx, rq, at_head);
934         spin_unlock(&ctx->lock);
935
936         if (run_queue)
937                 blk_mq_run_hw_queue(hctx, async);
938
939         blk_mq_put_ctx(current_ctx);
940 }
941
942 static void blk_mq_insert_requests(struct request_queue *q,
943                                      struct blk_mq_ctx *ctx,
944                                      struct list_head *list,
945                                      int depth,
946                                      bool from_schedule)
947
948 {
949         struct blk_mq_hw_ctx *hctx;
950         struct blk_mq_ctx *current_ctx;
951
952         trace_block_unplug(q, depth, !from_schedule);
953
954         current_ctx = blk_mq_get_ctx(q);
955
956         if (!cpu_online(ctx->cpu))
957                 ctx = current_ctx;
958         hctx = q->mq_ops->map_queue(q, ctx->cpu);
959
960         /*
961          * preemption doesn't flush plug list, so it's possible ctx->cpu is
962          * offline now
963          */
964         spin_lock(&ctx->lock);
965         while (!list_empty(list)) {
966                 struct request *rq;
967
968                 rq = list_first_entry(list, struct request, queuelist);
969                 list_del_init(&rq->queuelist);
970                 rq->mq_ctx = ctx;
971                 __blk_mq_insert_request(hctx, rq, false);
972         }
973         spin_unlock(&ctx->lock);
974
975         blk_mq_run_hw_queue(hctx, from_schedule);
976         blk_mq_put_ctx(current_ctx);
977 }
978
979 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
980 {
981         struct request *rqa = container_of(a, struct request, queuelist);
982         struct request *rqb = container_of(b, struct request, queuelist);
983
984         return !(rqa->mq_ctx < rqb->mq_ctx ||
985                  (rqa->mq_ctx == rqb->mq_ctx &&
986                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
987 }
988
989 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
990 {
991         struct blk_mq_ctx *this_ctx;
992         struct request_queue *this_q;
993         struct request *rq;
994         LIST_HEAD(list);
995         LIST_HEAD(ctx_list);
996         unsigned int depth;
997
998         list_splice_init(&plug->mq_list, &list);
999
1000         list_sort(NULL, &list, plug_ctx_cmp);
1001
1002         this_q = NULL;
1003         this_ctx = NULL;
1004         depth = 0;
1005
1006         while (!list_empty(&list)) {
1007                 rq = list_entry_rq(list.next);
1008                 list_del_init(&rq->queuelist);
1009                 BUG_ON(!rq->q);
1010                 if (rq->mq_ctx != this_ctx) {
1011                         if (this_ctx) {
1012                                 blk_mq_insert_requests(this_q, this_ctx,
1013                                                         &ctx_list, depth,
1014                                                         from_schedule);
1015                         }
1016
1017                         this_ctx = rq->mq_ctx;
1018                         this_q = rq->q;
1019                         depth = 0;
1020                 }
1021
1022                 depth++;
1023                 list_add_tail(&rq->queuelist, &ctx_list);
1024         }
1025
1026         /*
1027          * If 'this_ctx' is set, we know we have entries to complete
1028          * on 'ctx_list'. Do those.
1029          */
1030         if (this_ctx) {
1031                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1032                                        from_schedule);
1033         }
1034 }
1035
1036 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1037 {
1038         init_request_from_bio(rq, bio);
1039
1040         if (blk_do_io_stat(rq))
1041                 blk_account_io_start(rq, 1);
1042 }
1043
1044 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1045 {
1046         return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1047                 !blk_queue_nomerges(hctx->queue);
1048 }
1049
1050 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1051                                          struct blk_mq_ctx *ctx,
1052                                          struct request *rq, struct bio *bio)
1053 {
1054         if (!hctx_allow_merges(hctx)) {
1055                 blk_mq_bio_to_request(rq, bio);
1056                 spin_lock(&ctx->lock);
1057 insert_rq:
1058                 __blk_mq_insert_request(hctx, rq, false);
1059                 spin_unlock(&ctx->lock);
1060                 return false;
1061         } else {
1062                 struct request_queue *q = hctx->queue;
1063
1064                 spin_lock(&ctx->lock);
1065                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1066                         blk_mq_bio_to_request(rq, bio);
1067                         goto insert_rq;
1068                 }
1069
1070                 spin_unlock(&ctx->lock);
1071                 __blk_mq_free_request(hctx, ctx, rq);
1072                 return true;
1073         }
1074 }
1075
1076 struct blk_map_ctx {
1077         struct blk_mq_hw_ctx *hctx;
1078         struct blk_mq_ctx *ctx;
1079 };
1080
1081 static struct request *blk_mq_map_request(struct request_queue *q,
1082                                           struct bio *bio,
1083                                           struct blk_map_ctx *data)
1084 {
1085         struct blk_mq_hw_ctx *hctx;
1086         struct blk_mq_ctx *ctx;
1087         struct request *rq;
1088         int rw = bio_data_dir(bio);
1089         struct blk_mq_alloc_data alloc_data;
1090
1091         if (unlikely(blk_mq_queue_enter(q))) {
1092                 bio_endio(bio, -EIO);
1093                 return NULL;
1094         }
1095
1096         ctx = blk_mq_get_ctx(q);
1097         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1098
1099         if (rw_is_sync(bio->bi_rw))
1100                 rw |= REQ_SYNC;
1101
1102         trace_block_getrq(q, bio, rw);
1103         blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1104                         hctx);
1105         rq = __blk_mq_alloc_request(&alloc_data, rw);
1106         if (unlikely(!rq)) {
1107                 __blk_mq_run_hw_queue(hctx);
1108                 blk_mq_put_ctx(ctx);
1109                 trace_block_sleeprq(q, bio, rw);
1110
1111                 ctx = blk_mq_get_ctx(q);
1112                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1113                 blk_mq_set_alloc_data(&alloc_data, q,
1114                                 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1115                 rq = __blk_mq_alloc_request(&alloc_data, rw);
1116                 ctx = alloc_data.ctx;
1117                 hctx = alloc_data.hctx;
1118         }
1119
1120         hctx->queued++;
1121         data->hctx = hctx;
1122         data->ctx = ctx;
1123         return rq;
1124 }
1125
1126 /*
1127  * Multiple hardware queue variant. This will not use per-process plugs,
1128  * but will attempt to bypass the hctx queueing if we can go straight to
1129  * hardware for SYNC IO.
1130  */
1131 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1132 {
1133         const int is_sync = rw_is_sync(bio->bi_rw);
1134         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1135         struct blk_map_ctx data;
1136         struct request *rq;
1137
1138         blk_queue_bounce(q, &bio);
1139
1140         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1141                 bio_endio(bio, -EIO);
1142                 return;
1143         }
1144
1145         rq = blk_mq_map_request(q, bio, &data);
1146         if (unlikely(!rq))
1147                 return;
1148
1149         if (unlikely(is_flush_fua)) {
1150                 blk_mq_bio_to_request(rq, bio);
1151                 blk_insert_flush(rq);
1152                 goto run_queue;
1153         }
1154
1155         if (is_sync) {
1156                 int ret;
1157
1158                 blk_mq_bio_to_request(rq, bio);
1159
1160                 /*
1161                  * For OK queue, we are done. For error, kill it. Any other
1162                  * error (busy), just add it to our list as we previously
1163                  * would have done
1164                  */
1165                 ret = q->mq_ops->queue_rq(data.hctx, rq, true);
1166                 if (ret == BLK_MQ_RQ_QUEUE_OK)
1167                         goto done;
1168                 else {
1169                         __blk_mq_requeue_request(rq);
1170
1171                         if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1172                                 rq->errors = -EIO;
1173                                 blk_mq_end_request(rq, rq->errors);
1174                                 goto done;
1175                         }
1176                 }
1177         }
1178
1179         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1180                 /*
1181                  * For a SYNC request, send it to the hardware immediately. For
1182                  * an ASYNC request, just ensure that we run it later on. The
1183                  * latter allows for merging opportunities and more efficient
1184                  * dispatching.
1185                  */
1186 run_queue:
1187                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1188         }
1189 done:
1190         blk_mq_put_ctx(data.ctx);
1191 }
1192
1193 /*
1194  * Single hardware queue variant. This will attempt to use any per-process
1195  * plug for merging and IO deferral.
1196  */
1197 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1198 {
1199         const int is_sync = rw_is_sync(bio->bi_rw);
1200         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1201         unsigned int use_plug, request_count = 0;
1202         struct blk_map_ctx data;
1203         struct request *rq;
1204
1205         /*
1206          * If we have multiple hardware queues, just go directly to
1207          * one of those for sync IO.
1208          */
1209         use_plug = !is_flush_fua && !is_sync;
1210
1211         blk_queue_bounce(q, &bio);
1212
1213         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1214                 bio_endio(bio, -EIO);
1215                 return;
1216         }
1217
1218         if (use_plug && !blk_queue_nomerges(q) &&
1219             blk_attempt_plug_merge(q, bio, &request_count))
1220                 return;
1221
1222         rq = blk_mq_map_request(q, bio, &data);
1223         if (unlikely(!rq))
1224                 return;
1225
1226         if (unlikely(is_flush_fua)) {
1227                 blk_mq_bio_to_request(rq, bio);
1228                 blk_insert_flush(rq);
1229                 goto run_queue;
1230         }
1231
1232         /*
1233          * A task plug currently exists. Since this is completely lockless,
1234          * utilize that to temporarily store requests until the task is
1235          * either done or scheduled away.
1236          */
1237         if (use_plug) {
1238                 struct blk_plug *plug = current->plug;
1239
1240                 if (plug) {
1241                         blk_mq_bio_to_request(rq, bio);
1242                         if (list_empty(&plug->mq_list))
1243                                 trace_block_plug(q);
1244                         else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1245                                 blk_flush_plug_list(plug, false);
1246                                 trace_block_plug(q);
1247                         }
1248                         list_add_tail(&rq->queuelist, &plug->mq_list);
1249                         blk_mq_put_ctx(data.ctx);
1250                         return;
1251                 }
1252         }
1253
1254         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1255                 /*
1256                  * For a SYNC request, send it to the hardware immediately. For
1257                  * an ASYNC request, just ensure that we run it later on. The
1258                  * latter allows for merging opportunities and more efficient
1259                  * dispatching.
1260                  */
1261 run_queue:
1262                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1263         }
1264
1265         blk_mq_put_ctx(data.ctx);
1266 }
1267
1268 /*
1269  * Default mapping to a software queue, since we use one per CPU.
1270  */
1271 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1272 {
1273         return q->queue_hw_ctx[q->mq_map[cpu]];
1274 }
1275 EXPORT_SYMBOL(blk_mq_map_queue);
1276
1277 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1278                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1279 {
1280         struct page *page;
1281
1282         if (tags->rqs && set->ops->exit_request) {
1283                 int i;
1284
1285                 for (i = 0; i < tags->nr_tags; i++) {
1286                         if (!tags->rqs[i])
1287                                 continue;
1288                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1289                                                 hctx_idx, i);
1290                         tags->rqs[i] = NULL;
1291                 }
1292         }
1293
1294         while (!list_empty(&tags->page_list)) {
1295                 page = list_first_entry(&tags->page_list, struct page, lru);
1296                 list_del_init(&page->lru);
1297                 __free_pages(page, page->private);
1298         }
1299
1300         kfree(tags->rqs);
1301
1302         blk_mq_free_tags(tags);
1303 }
1304
1305 static size_t order_to_size(unsigned int order)
1306 {
1307         return (size_t)PAGE_SIZE << order;
1308 }
1309
1310 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1311                 unsigned int hctx_idx)
1312 {
1313         struct blk_mq_tags *tags;
1314         unsigned int i, j, entries_per_page, max_order = 4;
1315         size_t rq_size, left;
1316
1317         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1318                                 set->numa_node);
1319         if (!tags)
1320                 return NULL;
1321
1322         INIT_LIST_HEAD(&tags->page_list);
1323
1324         tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1325                                  GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1326                                  set->numa_node);
1327         if (!tags->rqs) {
1328                 blk_mq_free_tags(tags);
1329                 return NULL;
1330         }
1331
1332         /*
1333          * rq_size is the size of the request plus driver payload, rounded
1334          * to the cacheline size
1335          */
1336         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1337                                 cache_line_size());
1338         left = rq_size * set->queue_depth;
1339
1340         for (i = 0; i < set->queue_depth; ) {
1341                 int this_order = max_order;
1342                 struct page *page;
1343                 int to_do;
1344                 void *p;
1345
1346                 while (left < order_to_size(this_order - 1) && this_order)
1347                         this_order--;
1348
1349                 do {
1350                         page = alloc_pages_node(set->numa_node,
1351                                 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1352                                 this_order);
1353                         if (page)
1354                                 break;
1355                         if (!this_order--)
1356                                 break;
1357                         if (order_to_size(this_order) < rq_size)
1358                                 break;
1359                 } while (1);
1360
1361                 if (!page)
1362                         goto fail;
1363
1364                 page->private = this_order;
1365                 list_add_tail(&page->lru, &tags->page_list);
1366
1367                 p = page_address(page);
1368                 entries_per_page = order_to_size(this_order) / rq_size;
1369                 to_do = min(entries_per_page, set->queue_depth - i);
1370                 left -= to_do * rq_size;
1371                 for (j = 0; j < to_do; j++) {
1372                         tags->rqs[i] = p;
1373                         tags->rqs[i]->atomic_flags = 0;
1374                         tags->rqs[i]->cmd_flags = 0;
1375                         if (set->ops->init_request) {
1376                                 if (set->ops->init_request(set->driver_data,
1377                                                 tags->rqs[i], hctx_idx, i,
1378                                                 set->numa_node)) {
1379                                         tags->rqs[i] = NULL;
1380                                         goto fail;
1381                                 }
1382                         }
1383
1384                         p += rq_size;
1385                         i++;
1386                 }
1387         }
1388
1389         return tags;
1390
1391 fail:
1392         blk_mq_free_rq_map(set, tags, hctx_idx);
1393         return NULL;
1394 }
1395
1396 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1397 {
1398         kfree(bitmap->map);
1399 }
1400
1401 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1402 {
1403         unsigned int bpw = 8, total, num_maps, i;
1404
1405         bitmap->bits_per_word = bpw;
1406
1407         num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1408         bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1409                                         GFP_KERNEL, node);
1410         if (!bitmap->map)
1411                 return -ENOMEM;
1412
1413         bitmap->map_size = num_maps;
1414
1415         total = nr_cpu_ids;
1416         for (i = 0; i < num_maps; i++) {
1417                 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1418                 total -= bitmap->map[i].depth;
1419         }
1420
1421         return 0;
1422 }
1423
1424 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1425 {
1426         struct request_queue *q = hctx->queue;
1427         struct blk_mq_ctx *ctx;
1428         LIST_HEAD(tmp);
1429
1430         /*
1431          * Move ctx entries to new CPU, if this one is going away.
1432          */
1433         ctx = __blk_mq_get_ctx(q, cpu);
1434
1435         spin_lock(&ctx->lock);
1436         if (!list_empty(&ctx->rq_list)) {
1437                 list_splice_init(&ctx->rq_list, &tmp);
1438                 blk_mq_hctx_clear_pending(hctx, ctx);
1439         }
1440         spin_unlock(&ctx->lock);
1441
1442         if (list_empty(&tmp))
1443                 return NOTIFY_OK;
1444
1445         ctx = blk_mq_get_ctx(q);
1446         spin_lock(&ctx->lock);
1447
1448         while (!list_empty(&tmp)) {
1449                 struct request *rq;
1450
1451                 rq = list_first_entry(&tmp, struct request, queuelist);
1452                 rq->mq_ctx = ctx;
1453                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1454         }
1455
1456         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1457         blk_mq_hctx_mark_pending(hctx, ctx);
1458
1459         spin_unlock(&ctx->lock);
1460
1461         blk_mq_run_hw_queue(hctx, true);
1462         blk_mq_put_ctx(ctx);
1463         return NOTIFY_OK;
1464 }
1465
1466 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1467 {
1468         struct request_queue *q = hctx->queue;
1469         struct blk_mq_tag_set *set = q->tag_set;
1470
1471         if (set->tags[hctx->queue_num])
1472                 return NOTIFY_OK;
1473
1474         set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1475         if (!set->tags[hctx->queue_num])
1476                 return NOTIFY_STOP;
1477
1478         hctx->tags = set->tags[hctx->queue_num];
1479         return NOTIFY_OK;
1480 }
1481
1482 static int blk_mq_hctx_notify(void *data, unsigned long action,
1483                               unsigned int cpu)
1484 {
1485         struct blk_mq_hw_ctx *hctx = data;
1486
1487         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1488                 return blk_mq_hctx_cpu_offline(hctx, cpu);
1489         else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1490                 return blk_mq_hctx_cpu_online(hctx, cpu);
1491
1492         return NOTIFY_OK;
1493 }
1494
1495 static void blk_mq_exit_hctx(struct request_queue *q,
1496                 struct blk_mq_tag_set *set,
1497                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1498 {
1499         unsigned flush_start_tag = set->queue_depth;
1500
1501         blk_mq_tag_idle(hctx);
1502
1503         if (set->ops->exit_request)
1504                 set->ops->exit_request(set->driver_data,
1505                                        hctx->fq->flush_rq, hctx_idx,
1506                                        flush_start_tag + hctx_idx);
1507
1508         if (set->ops->exit_hctx)
1509                 set->ops->exit_hctx(hctx, hctx_idx);
1510
1511         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1512         blk_free_flush_queue(hctx->fq);
1513         kfree(hctx->ctxs);
1514         blk_mq_free_bitmap(&hctx->ctx_map);
1515 }
1516
1517 static void blk_mq_exit_hw_queues(struct request_queue *q,
1518                 struct blk_mq_tag_set *set, int nr_queue)
1519 {
1520         struct blk_mq_hw_ctx *hctx;
1521         unsigned int i;
1522
1523         queue_for_each_hw_ctx(q, hctx, i) {
1524                 if (i == nr_queue)
1525                         break;
1526                 blk_mq_exit_hctx(q, set, hctx, i);
1527         }
1528 }
1529
1530 static void blk_mq_free_hw_queues(struct request_queue *q,
1531                 struct blk_mq_tag_set *set)
1532 {
1533         struct blk_mq_hw_ctx *hctx;
1534         unsigned int i;
1535
1536         queue_for_each_hw_ctx(q, hctx, i) {
1537                 free_cpumask_var(hctx->cpumask);
1538                 kfree(hctx);
1539         }
1540 }
1541
1542 static int blk_mq_init_hctx(struct request_queue *q,
1543                 struct blk_mq_tag_set *set,
1544                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1545 {
1546         int node;
1547         unsigned flush_start_tag = set->queue_depth;
1548
1549         node = hctx->numa_node;
1550         if (node == NUMA_NO_NODE)
1551                 node = hctx->numa_node = set->numa_node;
1552
1553         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1554         INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1555         spin_lock_init(&hctx->lock);
1556         INIT_LIST_HEAD(&hctx->dispatch);
1557         hctx->queue = q;
1558         hctx->queue_num = hctx_idx;
1559         hctx->flags = set->flags;
1560         hctx->cmd_size = set->cmd_size;
1561
1562         blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1563                                         blk_mq_hctx_notify, hctx);
1564         blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1565
1566         hctx->tags = set->tags[hctx_idx];
1567
1568         /*
1569          * Allocate space for all possible cpus to avoid allocation at
1570          * runtime
1571          */
1572         hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1573                                         GFP_KERNEL, node);
1574         if (!hctx->ctxs)
1575                 goto unregister_cpu_notifier;
1576
1577         if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1578                 goto free_ctxs;
1579
1580         hctx->nr_ctx = 0;
1581
1582         if (set->ops->init_hctx &&
1583             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1584                 goto free_bitmap;
1585
1586         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1587         if (!hctx->fq)
1588                 goto exit_hctx;
1589
1590         if (set->ops->init_request &&
1591             set->ops->init_request(set->driver_data,
1592                                    hctx->fq->flush_rq, hctx_idx,
1593                                    flush_start_tag + hctx_idx, node))
1594                 goto free_fq;
1595
1596         return 0;
1597
1598  free_fq:
1599         kfree(hctx->fq);
1600  exit_hctx:
1601         if (set->ops->exit_hctx)
1602                 set->ops->exit_hctx(hctx, hctx_idx);
1603  free_bitmap:
1604         blk_mq_free_bitmap(&hctx->ctx_map);
1605  free_ctxs:
1606         kfree(hctx->ctxs);
1607  unregister_cpu_notifier:
1608         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1609
1610         return -1;
1611 }
1612
1613 static int blk_mq_init_hw_queues(struct request_queue *q,
1614                 struct blk_mq_tag_set *set)
1615 {
1616         struct blk_mq_hw_ctx *hctx;
1617         unsigned int i;
1618
1619         /*
1620          * Initialize hardware queues
1621          */
1622         queue_for_each_hw_ctx(q, hctx, i) {
1623                 if (blk_mq_init_hctx(q, set, hctx, i))
1624                         break;
1625         }
1626
1627         if (i == q->nr_hw_queues)
1628                 return 0;
1629
1630         /*
1631          * Init failed
1632          */
1633         blk_mq_exit_hw_queues(q, set, i);
1634
1635         return 1;
1636 }
1637
1638 static void blk_mq_init_cpu_queues(struct request_queue *q,
1639                                    unsigned int nr_hw_queues)
1640 {
1641         unsigned int i;
1642
1643         for_each_possible_cpu(i) {
1644                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1645                 struct blk_mq_hw_ctx *hctx;
1646
1647                 memset(__ctx, 0, sizeof(*__ctx));
1648                 __ctx->cpu = i;
1649                 spin_lock_init(&__ctx->lock);
1650                 INIT_LIST_HEAD(&__ctx->rq_list);
1651                 __ctx->queue = q;
1652
1653                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1654                 if (!cpu_online(i))
1655                         continue;
1656
1657                 hctx = q->mq_ops->map_queue(q, i);
1658                 cpumask_set_cpu(i, hctx->cpumask);
1659                 hctx->nr_ctx++;
1660
1661                 /*
1662                  * Set local node, IFF we have more than one hw queue. If
1663                  * not, we remain on the home node of the device
1664                  */
1665                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1666                         hctx->numa_node = cpu_to_node(i);
1667         }
1668 }
1669
1670 static void blk_mq_map_swqueue(struct request_queue *q)
1671 {
1672         unsigned int i;
1673         struct blk_mq_hw_ctx *hctx;
1674         struct blk_mq_ctx *ctx;
1675
1676         queue_for_each_hw_ctx(q, hctx, i) {
1677                 cpumask_clear(hctx->cpumask);
1678                 hctx->nr_ctx = 0;
1679         }
1680
1681         /*
1682          * Map software to hardware queues
1683          */
1684         queue_for_each_ctx(q, ctx, i) {
1685                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1686                 if (!cpu_online(i))
1687                         continue;
1688
1689                 hctx = q->mq_ops->map_queue(q, i);
1690                 cpumask_set_cpu(i, hctx->cpumask);
1691                 ctx->index_hw = hctx->nr_ctx;
1692                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1693         }
1694
1695         queue_for_each_hw_ctx(q, hctx, i) {
1696                 /*
1697                  * If no software queues are mapped to this hardware queue,
1698                  * disable it and free the request entries.
1699                  */
1700                 if (!hctx->nr_ctx) {
1701                         struct blk_mq_tag_set *set = q->tag_set;
1702
1703                         if (set->tags[i]) {
1704                                 blk_mq_free_rq_map(set, set->tags[i], i);
1705                                 set->tags[i] = NULL;
1706                                 hctx->tags = NULL;
1707                         }
1708                         continue;
1709                 }
1710
1711                 /*
1712                  * Initialize batch roundrobin counts
1713                  */
1714                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1715                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1716         }
1717 }
1718
1719 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1720 {
1721         struct blk_mq_hw_ctx *hctx;
1722         struct request_queue *q;
1723         bool shared;
1724         int i;
1725
1726         if (set->tag_list.next == set->tag_list.prev)
1727                 shared = false;
1728         else
1729                 shared = true;
1730
1731         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1732                 blk_mq_freeze_queue(q);
1733
1734                 queue_for_each_hw_ctx(q, hctx, i) {
1735                         if (shared)
1736                                 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1737                         else
1738                                 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1739                 }
1740                 blk_mq_unfreeze_queue(q);
1741         }
1742 }
1743
1744 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1745 {
1746         struct blk_mq_tag_set *set = q->tag_set;
1747
1748         mutex_lock(&set->tag_list_lock);
1749         list_del_init(&q->tag_set_list);
1750         blk_mq_update_tag_set_depth(set);
1751         mutex_unlock(&set->tag_list_lock);
1752 }
1753
1754 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1755                                      struct request_queue *q)
1756 {
1757         q->tag_set = set;
1758
1759         mutex_lock(&set->tag_list_lock);
1760         list_add_tail(&q->tag_set_list, &set->tag_list);
1761         blk_mq_update_tag_set_depth(set);
1762         mutex_unlock(&set->tag_list_lock);
1763 }
1764
1765 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1766 {
1767         struct blk_mq_hw_ctx **hctxs;
1768         struct blk_mq_ctx __percpu *ctx;
1769         struct request_queue *q;
1770         unsigned int *map;
1771         int i;
1772
1773         ctx = alloc_percpu(struct blk_mq_ctx);
1774         if (!ctx)
1775                 return ERR_PTR(-ENOMEM);
1776
1777         /*
1778          * If a crashdump is active, then we are potentially in a very
1779          * memory constrained environment. Limit us to 1 queue and
1780          * 64 tags to prevent using too much memory.
1781          */
1782         if (is_kdump_kernel()) {
1783                 set->nr_hw_queues = 1;
1784                 set->queue_depth = min(64U, set->queue_depth);
1785         }
1786
1787         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1788                         set->numa_node);
1789
1790         if (!hctxs)
1791                 goto err_percpu;
1792
1793         map = blk_mq_make_queue_map(set);
1794         if (!map)
1795                 goto err_map;
1796
1797         for (i = 0; i < set->nr_hw_queues; i++) {
1798                 int node = blk_mq_hw_queue_to_node(map, i);
1799
1800                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1801                                         GFP_KERNEL, node);
1802                 if (!hctxs[i])
1803                         goto err_hctxs;
1804
1805                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1806                                                 node))
1807                         goto err_hctxs;
1808
1809                 atomic_set(&hctxs[i]->nr_active, 0);
1810                 hctxs[i]->numa_node = node;
1811                 hctxs[i]->queue_num = i;
1812         }
1813
1814         q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1815         if (!q)
1816                 goto err_hctxs;
1817
1818         /*
1819          * Init percpu_ref in atomic mode so that it's faster to shutdown.
1820          * See blk_register_queue() for details.
1821          */
1822         if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1823                             PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
1824                 goto err_map;
1825
1826         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1827         blk_queue_rq_timeout(q, 30000);
1828
1829         q->nr_queues = nr_cpu_ids;
1830         q->nr_hw_queues = set->nr_hw_queues;
1831         q->mq_map = map;
1832
1833         q->queue_ctx = ctx;
1834         q->queue_hw_ctx = hctxs;
1835
1836         q->mq_ops = set->ops;
1837         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1838
1839         if (!(set->flags & BLK_MQ_F_SG_MERGE))
1840                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1841
1842         q->sg_reserved_size = INT_MAX;
1843
1844         INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1845         INIT_LIST_HEAD(&q->requeue_list);
1846         spin_lock_init(&q->requeue_lock);
1847
1848         if (q->nr_hw_queues > 1)
1849                 blk_queue_make_request(q, blk_mq_make_request);
1850         else
1851                 blk_queue_make_request(q, blk_sq_make_request);
1852
1853         if (set->timeout)
1854                 blk_queue_rq_timeout(q, set->timeout);
1855
1856         /*
1857          * Do this after blk_queue_make_request() overrides it...
1858          */
1859         q->nr_requests = set->queue_depth;
1860
1861         if (set->ops->complete)
1862                 blk_queue_softirq_done(q, set->ops->complete);
1863
1864         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1865
1866         if (blk_mq_init_hw_queues(q, set))
1867                 goto err_hw;
1868
1869         mutex_lock(&all_q_mutex);
1870         list_add_tail(&q->all_q_node, &all_q_list);
1871         mutex_unlock(&all_q_mutex);
1872
1873         blk_mq_add_queue_tag_set(set, q);
1874
1875         blk_mq_map_swqueue(q);
1876
1877         return q;
1878
1879 err_hw:
1880         blk_cleanup_queue(q);
1881 err_hctxs:
1882         kfree(map);
1883         for (i = 0; i < set->nr_hw_queues; i++) {
1884                 if (!hctxs[i])
1885                         break;
1886                 free_cpumask_var(hctxs[i]->cpumask);
1887                 kfree(hctxs[i]);
1888         }
1889 err_map:
1890         kfree(hctxs);
1891 err_percpu:
1892         free_percpu(ctx);
1893         return ERR_PTR(-ENOMEM);
1894 }
1895 EXPORT_SYMBOL(blk_mq_init_queue);
1896
1897 void blk_mq_free_queue(struct request_queue *q)
1898 {
1899         struct blk_mq_tag_set   *set = q->tag_set;
1900
1901         blk_mq_del_queue_tag_set(q);
1902
1903         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1904         blk_mq_free_hw_queues(q, set);
1905
1906         percpu_ref_exit(&q->mq_usage_counter);
1907
1908         free_percpu(q->queue_ctx);
1909         kfree(q->queue_hw_ctx);
1910         kfree(q->mq_map);
1911
1912         q->queue_ctx = NULL;
1913         q->queue_hw_ctx = NULL;
1914         q->mq_map = NULL;
1915
1916         mutex_lock(&all_q_mutex);
1917         list_del_init(&q->all_q_node);
1918         mutex_unlock(&all_q_mutex);
1919 }
1920
1921 /* Basically redo blk_mq_init_queue with queue frozen */
1922 static void blk_mq_queue_reinit(struct request_queue *q)
1923 {
1924         blk_mq_freeze_queue(q);
1925
1926         blk_mq_sysfs_unregister(q);
1927
1928         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1929
1930         /*
1931          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1932          * we should change hctx numa_node according to new topology (this
1933          * involves free and re-allocate memory, worthy doing?)
1934          */
1935
1936         blk_mq_map_swqueue(q);
1937
1938         blk_mq_sysfs_register(q);
1939
1940         blk_mq_unfreeze_queue(q);
1941 }
1942
1943 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1944                                       unsigned long action, void *hcpu)
1945 {
1946         struct request_queue *q;
1947
1948         /*
1949          * Before new mappings are established, hotadded cpu might already
1950          * start handling requests. This doesn't break anything as we map
1951          * offline CPUs to first hardware queue. We will re-init the queue
1952          * below to get optimal settings.
1953          */
1954         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1955             action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1956                 return NOTIFY_OK;
1957
1958         mutex_lock(&all_q_mutex);
1959         list_for_each_entry(q, &all_q_list, all_q_node)
1960                 blk_mq_queue_reinit(q);
1961         mutex_unlock(&all_q_mutex);
1962         return NOTIFY_OK;
1963 }
1964
1965 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1966 {
1967         int i;
1968
1969         for (i = 0; i < set->nr_hw_queues; i++) {
1970                 set->tags[i] = blk_mq_init_rq_map(set, i);
1971                 if (!set->tags[i])
1972                         goto out_unwind;
1973         }
1974
1975         return 0;
1976
1977 out_unwind:
1978         while (--i >= 0)
1979                 blk_mq_free_rq_map(set, set->tags[i], i);
1980
1981         return -ENOMEM;
1982 }
1983
1984 /*
1985  * Allocate the request maps associated with this tag_set. Note that this
1986  * may reduce the depth asked for, if memory is tight. set->queue_depth
1987  * will be updated to reflect the allocated depth.
1988  */
1989 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1990 {
1991         unsigned int depth;
1992         int err;
1993
1994         depth = set->queue_depth;
1995         do {
1996                 err = __blk_mq_alloc_rq_maps(set);
1997                 if (!err)
1998                         break;
1999
2000                 set->queue_depth >>= 1;
2001                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2002                         err = -ENOMEM;
2003                         break;
2004                 }
2005         } while (set->queue_depth);
2006
2007         if (!set->queue_depth || err) {
2008                 pr_err("blk-mq: failed to allocate request map\n");
2009                 return -ENOMEM;
2010         }
2011
2012         if (depth != set->queue_depth)
2013                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2014                                                 depth, set->queue_depth);
2015
2016         return 0;
2017 }
2018
2019 /*
2020  * Alloc a tag set to be associated with one or more request queues.
2021  * May fail with EINVAL for various error conditions. May adjust the
2022  * requested depth down, if if it too large. In that case, the set
2023  * value will be stored in set->queue_depth.
2024  */
2025 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2026 {
2027         if (!set->nr_hw_queues)
2028                 return -EINVAL;
2029         if (!set->queue_depth)
2030                 return -EINVAL;
2031         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2032                 return -EINVAL;
2033
2034         if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
2035                 return -EINVAL;
2036
2037         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2038                 pr_info("blk-mq: reduced tag depth to %u\n",
2039                         BLK_MQ_MAX_DEPTH);
2040                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2041         }
2042
2043         set->tags = kmalloc_node(set->nr_hw_queues *
2044                                  sizeof(struct blk_mq_tags *),
2045                                  GFP_KERNEL, set->numa_node);
2046         if (!set->tags)
2047                 return -ENOMEM;
2048
2049         if (blk_mq_alloc_rq_maps(set))
2050                 goto enomem;
2051
2052         mutex_init(&set->tag_list_lock);
2053         INIT_LIST_HEAD(&set->tag_list);
2054
2055         return 0;
2056 enomem:
2057         kfree(set->tags);
2058         set->tags = NULL;
2059         return -ENOMEM;
2060 }
2061 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2062
2063 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2064 {
2065         int i;
2066
2067         for (i = 0; i < set->nr_hw_queues; i++) {
2068                 if (set->tags[i])
2069                         blk_mq_free_rq_map(set, set->tags[i], i);
2070         }
2071
2072         kfree(set->tags);
2073         set->tags = NULL;
2074 }
2075 EXPORT_SYMBOL(blk_mq_free_tag_set);
2076
2077 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2078 {
2079         struct blk_mq_tag_set *set = q->tag_set;
2080         struct blk_mq_hw_ctx *hctx;
2081         int i, ret;
2082
2083         if (!set || nr > set->queue_depth)
2084                 return -EINVAL;
2085
2086         ret = 0;
2087         queue_for_each_hw_ctx(q, hctx, i) {
2088                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2089                 if (ret)
2090                         break;
2091         }
2092
2093         if (!ret)
2094                 q->nr_requests = nr;
2095
2096         return ret;
2097 }
2098
2099 void blk_mq_disable_hotplug(void)
2100 {
2101         mutex_lock(&all_q_mutex);
2102 }
2103
2104 void blk_mq_enable_hotplug(void)
2105 {
2106         mutex_unlock(&all_q_mutex);
2107 }
2108
2109 static int __init blk_mq_init(void)
2110 {
2111         blk_mq_cpu_init();
2112
2113         hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2114
2115         return 0;
2116 }
2117 subsys_initcall(blk_mq_init);