Merge tag 'renesas-defconfig-fixes-for-v3.19' of git://git.kernel.org/pub/scm/linux...
[cascardo/linux.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/smp.h>
17 #include <linux/llist.h>
18 #include <linux/list_sort.h>
19 #include <linux/cpu.h>
20 #include <linux/cache.h>
21 #include <linux/sched/sysctl.h>
22 #include <linux/delay.h>
23 #include <linux/crash_dump.h>
24
25 #include <trace/events/block.h>
26
27 #include <linux/blk-mq.h>
28 #include "blk.h"
29 #include "blk-mq.h"
30 #include "blk-mq-tag.h"
31
32 static DEFINE_MUTEX(all_q_mutex);
33 static LIST_HEAD(all_q_list);
34
35 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
36
37 /*
38  * Check if any of the ctx's have pending work in this hardware queue
39  */
40 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
41 {
42         unsigned int i;
43
44         for (i = 0; i < hctx->ctx_map.map_size; i++)
45                 if (hctx->ctx_map.map[i].word)
46                         return true;
47
48         return false;
49 }
50
51 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
52                                               struct blk_mq_ctx *ctx)
53 {
54         return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
55 }
56
57 #define CTX_TO_BIT(hctx, ctx)   \
58         ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
59
60 /*
61  * Mark this ctx as having pending work in this hardware queue
62  */
63 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
64                                      struct blk_mq_ctx *ctx)
65 {
66         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
67
68         if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
69                 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
70 }
71
72 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
73                                       struct blk_mq_ctx *ctx)
74 {
75         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
76
77         clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
78 }
79
80 static int blk_mq_queue_enter(struct request_queue *q)
81 {
82         while (true) {
83                 int ret;
84
85                 if (percpu_ref_tryget_live(&q->mq_usage_counter))
86                         return 0;
87
88                 ret = wait_event_interruptible(q->mq_freeze_wq,
89                                 !q->mq_freeze_depth || blk_queue_dying(q));
90                 if (blk_queue_dying(q))
91                         return -ENODEV;
92                 if (ret)
93                         return ret;
94         }
95 }
96
97 static void blk_mq_queue_exit(struct request_queue *q)
98 {
99         percpu_ref_put(&q->mq_usage_counter);
100 }
101
102 static void blk_mq_usage_counter_release(struct percpu_ref *ref)
103 {
104         struct request_queue *q =
105                 container_of(ref, struct request_queue, mq_usage_counter);
106
107         wake_up_all(&q->mq_freeze_wq);
108 }
109
110 static void blk_mq_freeze_queue_start(struct request_queue *q)
111 {
112         bool freeze;
113
114         spin_lock_irq(q->queue_lock);
115         freeze = !q->mq_freeze_depth++;
116         spin_unlock_irq(q->queue_lock);
117
118         if (freeze) {
119                 percpu_ref_kill(&q->mq_usage_counter);
120                 blk_mq_run_queues(q, false);
121         }
122 }
123
124 static void blk_mq_freeze_queue_wait(struct request_queue *q)
125 {
126         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
127 }
128
129 /*
130  * Guarantee no request is in use, so we can change any data structure of
131  * the queue afterward.
132  */
133 void blk_mq_freeze_queue(struct request_queue *q)
134 {
135         blk_mq_freeze_queue_start(q);
136         blk_mq_freeze_queue_wait(q);
137 }
138
139 static void blk_mq_unfreeze_queue(struct request_queue *q)
140 {
141         bool wake;
142
143         spin_lock_irq(q->queue_lock);
144         wake = !--q->mq_freeze_depth;
145         WARN_ON_ONCE(q->mq_freeze_depth < 0);
146         spin_unlock_irq(q->queue_lock);
147         if (wake) {
148                 percpu_ref_reinit(&q->mq_usage_counter);
149                 wake_up_all(&q->mq_freeze_wq);
150         }
151 }
152
153 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
154 {
155         return blk_mq_has_free_tags(hctx->tags);
156 }
157 EXPORT_SYMBOL(blk_mq_can_queue);
158
159 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
160                                struct request *rq, unsigned int rw_flags)
161 {
162         if (blk_queue_io_stat(q))
163                 rw_flags |= REQ_IO_STAT;
164
165         INIT_LIST_HEAD(&rq->queuelist);
166         /* csd/requeue_work/fifo_time is initialized before use */
167         rq->q = q;
168         rq->mq_ctx = ctx;
169         rq->cmd_flags |= rw_flags;
170         /* do not touch atomic flags, it needs atomic ops against the timer */
171         rq->cpu = -1;
172         INIT_HLIST_NODE(&rq->hash);
173         RB_CLEAR_NODE(&rq->rb_node);
174         rq->rq_disk = NULL;
175         rq->part = NULL;
176         rq->start_time = jiffies;
177 #ifdef CONFIG_BLK_CGROUP
178         rq->rl = NULL;
179         set_start_time_ns(rq);
180         rq->io_start_time_ns = 0;
181 #endif
182         rq->nr_phys_segments = 0;
183 #if defined(CONFIG_BLK_DEV_INTEGRITY)
184         rq->nr_integrity_segments = 0;
185 #endif
186         rq->special = NULL;
187         /* tag was already set */
188         rq->errors = 0;
189
190         rq->cmd = rq->__cmd;
191
192         rq->extra_len = 0;
193         rq->sense_len = 0;
194         rq->resid_len = 0;
195         rq->sense = NULL;
196
197         INIT_LIST_HEAD(&rq->timeout_list);
198         rq->timeout = 0;
199
200         rq->end_io = NULL;
201         rq->end_io_data = NULL;
202         rq->next_rq = NULL;
203
204         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
205 }
206
207 static struct request *
208 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
209 {
210         struct request *rq;
211         unsigned int tag;
212
213         tag = blk_mq_get_tag(data);
214         if (tag != BLK_MQ_TAG_FAIL) {
215                 rq = data->hctx->tags->rqs[tag];
216
217                 if (blk_mq_tag_busy(data->hctx)) {
218                         rq->cmd_flags = REQ_MQ_INFLIGHT;
219                         atomic_inc(&data->hctx->nr_active);
220                 }
221
222                 rq->tag = tag;
223                 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
224                 return rq;
225         }
226
227         return NULL;
228 }
229
230 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
231                 bool reserved)
232 {
233         struct blk_mq_ctx *ctx;
234         struct blk_mq_hw_ctx *hctx;
235         struct request *rq;
236         struct blk_mq_alloc_data alloc_data;
237         int ret;
238
239         ret = blk_mq_queue_enter(q);
240         if (ret)
241                 return ERR_PTR(ret);
242
243         ctx = blk_mq_get_ctx(q);
244         hctx = q->mq_ops->map_queue(q, ctx->cpu);
245         blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
246                         reserved, ctx, hctx);
247
248         rq = __blk_mq_alloc_request(&alloc_data, rw);
249         if (!rq && (gfp & __GFP_WAIT)) {
250                 __blk_mq_run_hw_queue(hctx);
251                 blk_mq_put_ctx(ctx);
252
253                 ctx = blk_mq_get_ctx(q);
254                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
255                 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
256                                 hctx);
257                 rq =  __blk_mq_alloc_request(&alloc_data, rw);
258                 ctx = alloc_data.ctx;
259         }
260         blk_mq_put_ctx(ctx);
261         if (!rq)
262                 return ERR_PTR(-EWOULDBLOCK);
263         return rq;
264 }
265 EXPORT_SYMBOL(blk_mq_alloc_request);
266
267 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
268                                   struct blk_mq_ctx *ctx, struct request *rq)
269 {
270         const int tag = rq->tag;
271         struct request_queue *q = rq->q;
272
273         if (rq->cmd_flags & REQ_MQ_INFLIGHT)
274                 atomic_dec(&hctx->nr_active);
275         rq->cmd_flags = 0;
276
277         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
278         blk_mq_put_tag(hctx, tag, &ctx->last_tag);
279         blk_mq_queue_exit(q);
280 }
281
282 void blk_mq_free_request(struct request *rq)
283 {
284         struct blk_mq_ctx *ctx = rq->mq_ctx;
285         struct blk_mq_hw_ctx *hctx;
286         struct request_queue *q = rq->q;
287
288         ctx->rq_completed[rq_is_sync(rq)]++;
289
290         hctx = q->mq_ops->map_queue(q, ctx->cpu);
291         __blk_mq_free_request(hctx, ctx, rq);
292 }
293
294 inline void __blk_mq_end_request(struct request *rq, int error)
295 {
296         blk_account_io_done(rq);
297
298         if (rq->end_io) {
299                 rq->end_io(rq, error);
300         } else {
301                 if (unlikely(blk_bidi_rq(rq)))
302                         blk_mq_free_request(rq->next_rq);
303                 blk_mq_free_request(rq);
304         }
305 }
306 EXPORT_SYMBOL(__blk_mq_end_request);
307
308 void blk_mq_end_request(struct request *rq, int error)
309 {
310         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
311                 BUG();
312         __blk_mq_end_request(rq, error);
313 }
314 EXPORT_SYMBOL(blk_mq_end_request);
315
316 static void __blk_mq_complete_request_remote(void *data)
317 {
318         struct request *rq = data;
319
320         rq->q->softirq_done_fn(rq);
321 }
322
323 static void blk_mq_ipi_complete_request(struct request *rq)
324 {
325         struct blk_mq_ctx *ctx = rq->mq_ctx;
326         bool shared = false;
327         int cpu;
328
329         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
330                 rq->q->softirq_done_fn(rq);
331                 return;
332         }
333
334         cpu = get_cpu();
335         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
336                 shared = cpus_share_cache(cpu, ctx->cpu);
337
338         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
339                 rq->csd.func = __blk_mq_complete_request_remote;
340                 rq->csd.info = rq;
341                 rq->csd.flags = 0;
342                 smp_call_function_single_async(ctx->cpu, &rq->csd);
343         } else {
344                 rq->q->softirq_done_fn(rq);
345         }
346         put_cpu();
347 }
348
349 void __blk_mq_complete_request(struct request *rq)
350 {
351         struct request_queue *q = rq->q;
352
353         if (!q->softirq_done_fn)
354                 blk_mq_end_request(rq, rq->errors);
355         else
356                 blk_mq_ipi_complete_request(rq);
357 }
358
359 /**
360  * blk_mq_complete_request - end I/O on a request
361  * @rq:         the request being processed
362  *
363  * Description:
364  *      Ends all I/O on a request. It does not handle partial completions.
365  *      The actual completion happens out-of-order, through a IPI handler.
366  **/
367 void blk_mq_complete_request(struct request *rq)
368 {
369         struct request_queue *q = rq->q;
370
371         if (unlikely(blk_should_fake_timeout(q)))
372                 return;
373         if (!blk_mark_rq_complete(rq))
374                 __blk_mq_complete_request(rq);
375 }
376 EXPORT_SYMBOL(blk_mq_complete_request);
377
378 void blk_mq_start_request(struct request *rq)
379 {
380         struct request_queue *q = rq->q;
381
382         trace_block_rq_issue(q, rq);
383
384         rq->resid_len = blk_rq_bytes(rq);
385         if (unlikely(blk_bidi_rq(rq)))
386                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
387
388         blk_add_timer(rq);
389
390         /*
391          * Ensure that ->deadline is visible before set the started
392          * flag and clear the completed flag.
393          */
394         smp_mb__before_atomic();
395
396         /*
397          * Mark us as started and clear complete. Complete might have been
398          * set if requeue raced with timeout, which then marked it as
399          * complete. So be sure to clear complete again when we start
400          * the request, otherwise we'll ignore the completion event.
401          */
402         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
403                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
404         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
405                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
406
407         if (q->dma_drain_size && blk_rq_bytes(rq)) {
408                 /*
409                  * Make sure space for the drain appears.  We know we can do
410                  * this because max_hw_segments has been adjusted to be one
411                  * fewer than the device can handle.
412                  */
413                 rq->nr_phys_segments++;
414         }
415 }
416 EXPORT_SYMBOL(blk_mq_start_request);
417
418 static void __blk_mq_requeue_request(struct request *rq)
419 {
420         struct request_queue *q = rq->q;
421
422         trace_block_rq_requeue(q, rq);
423
424         if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
425                 if (q->dma_drain_size && blk_rq_bytes(rq))
426                         rq->nr_phys_segments--;
427         }
428 }
429
430 void blk_mq_requeue_request(struct request *rq)
431 {
432         __blk_mq_requeue_request(rq);
433
434         BUG_ON(blk_queued_rq(rq));
435         blk_mq_add_to_requeue_list(rq, true);
436 }
437 EXPORT_SYMBOL(blk_mq_requeue_request);
438
439 static void blk_mq_requeue_work(struct work_struct *work)
440 {
441         struct request_queue *q =
442                 container_of(work, struct request_queue, requeue_work);
443         LIST_HEAD(rq_list);
444         struct request *rq, *next;
445         unsigned long flags;
446
447         spin_lock_irqsave(&q->requeue_lock, flags);
448         list_splice_init(&q->requeue_list, &rq_list);
449         spin_unlock_irqrestore(&q->requeue_lock, flags);
450
451         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
452                 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
453                         continue;
454
455                 rq->cmd_flags &= ~REQ_SOFTBARRIER;
456                 list_del_init(&rq->queuelist);
457                 blk_mq_insert_request(rq, true, false, false);
458         }
459
460         while (!list_empty(&rq_list)) {
461                 rq = list_entry(rq_list.next, struct request, queuelist);
462                 list_del_init(&rq->queuelist);
463                 blk_mq_insert_request(rq, false, false, false);
464         }
465
466         /*
467          * Use the start variant of queue running here, so that running
468          * the requeue work will kick stopped queues.
469          */
470         blk_mq_start_hw_queues(q);
471 }
472
473 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
474 {
475         struct request_queue *q = rq->q;
476         unsigned long flags;
477
478         /*
479          * We abuse this flag that is otherwise used by the I/O scheduler to
480          * request head insertation from the workqueue.
481          */
482         BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
483
484         spin_lock_irqsave(&q->requeue_lock, flags);
485         if (at_head) {
486                 rq->cmd_flags |= REQ_SOFTBARRIER;
487                 list_add(&rq->queuelist, &q->requeue_list);
488         } else {
489                 list_add_tail(&rq->queuelist, &q->requeue_list);
490         }
491         spin_unlock_irqrestore(&q->requeue_lock, flags);
492 }
493 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
494
495 void blk_mq_kick_requeue_list(struct request_queue *q)
496 {
497         kblockd_schedule_work(&q->requeue_work);
498 }
499 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
500
501 static inline bool is_flush_request(struct request *rq,
502                 struct blk_flush_queue *fq, unsigned int tag)
503 {
504         return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
505                         fq->flush_rq->tag == tag);
506 }
507
508 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
509 {
510         struct request *rq = tags->rqs[tag];
511         /* mq_ctx of flush rq is always cloned from the corresponding req */
512         struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx);
513
514         if (!is_flush_request(rq, fq, tag))
515                 return rq;
516
517         return fq->flush_rq;
518 }
519 EXPORT_SYMBOL(blk_mq_tag_to_rq);
520
521 struct blk_mq_timeout_data {
522         unsigned long next;
523         unsigned int next_set;
524 };
525
526 void blk_mq_rq_timed_out(struct request *req, bool reserved)
527 {
528         struct blk_mq_ops *ops = req->q->mq_ops;
529         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
530
531         /*
532          * We know that complete is set at this point. If STARTED isn't set
533          * anymore, then the request isn't active and the "timeout" should
534          * just be ignored. This can happen due to the bitflag ordering.
535          * Timeout first checks if STARTED is set, and if it is, assumes
536          * the request is active. But if we race with completion, then
537          * we both flags will get cleared. So check here again, and ignore
538          * a timeout event with a request that isn't active.
539          */
540         if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
541                 return;
542
543         if (ops->timeout)
544                 ret = ops->timeout(req, reserved);
545
546         switch (ret) {
547         case BLK_EH_HANDLED:
548                 __blk_mq_complete_request(req);
549                 break;
550         case BLK_EH_RESET_TIMER:
551                 blk_add_timer(req);
552                 blk_clear_rq_complete(req);
553                 break;
554         case BLK_EH_NOT_HANDLED:
555                 break;
556         default:
557                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
558                 break;
559         }
560 }
561                 
562 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
563                 struct request *rq, void *priv, bool reserved)
564 {
565         struct blk_mq_timeout_data *data = priv;
566
567         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
568                 return;
569
570         if (time_after_eq(jiffies, rq->deadline)) {
571                 if (!blk_mark_rq_complete(rq))
572                         blk_mq_rq_timed_out(rq, reserved);
573         } else if (!data->next_set || time_after(data->next, rq->deadline)) {
574                 data->next = rq->deadline;
575                 data->next_set = 1;
576         }
577 }
578
579 static void blk_mq_rq_timer(unsigned long priv)
580 {
581         struct request_queue *q = (struct request_queue *)priv;
582         struct blk_mq_timeout_data data = {
583                 .next           = 0,
584                 .next_set       = 0,
585         };
586         struct blk_mq_hw_ctx *hctx;
587         int i;
588
589         queue_for_each_hw_ctx(q, hctx, i) {
590                 /*
591                  * If not software queues are currently mapped to this
592                  * hardware queue, there's nothing to check
593                  */
594                 if (!hctx->nr_ctx || !hctx->tags)
595                         continue;
596
597                 blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
598         }
599
600         if (data.next_set) {
601                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
602                 mod_timer(&q->timeout, data.next);
603         } else {
604                 queue_for_each_hw_ctx(q, hctx, i)
605                         blk_mq_tag_idle(hctx);
606         }
607 }
608
609 /*
610  * Reverse check our software queue for entries that we could potentially
611  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
612  * too much time checking for merges.
613  */
614 static bool blk_mq_attempt_merge(struct request_queue *q,
615                                  struct blk_mq_ctx *ctx, struct bio *bio)
616 {
617         struct request *rq;
618         int checked = 8;
619
620         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
621                 int el_ret;
622
623                 if (!checked--)
624                         break;
625
626                 if (!blk_rq_merge_ok(rq, bio))
627                         continue;
628
629                 el_ret = blk_try_merge(rq, bio);
630                 if (el_ret == ELEVATOR_BACK_MERGE) {
631                         if (bio_attempt_back_merge(q, rq, bio)) {
632                                 ctx->rq_merged++;
633                                 return true;
634                         }
635                         break;
636                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
637                         if (bio_attempt_front_merge(q, rq, bio)) {
638                                 ctx->rq_merged++;
639                                 return true;
640                         }
641                         break;
642                 }
643         }
644
645         return false;
646 }
647
648 /*
649  * Process software queues that have been marked busy, splicing them
650  * to the for-dispatch
651  */
652 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
653 {
654         struct blk_mq_ctx *ctx;
655         int i;
656
657         for (i = 0; i < hctx->ctx_map.map_size; i++) {
658                 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
659                 unsigned int off, bit;
660
661                 if (!bm->word)
662                         continue;
663
664                 bit = 0;
665                 off = i * hctx->ctx_map.bits_per_word;
666                 do {
667                         bit = find_next_bit(&bm->word, bm->depth, bit);
668                         if (bit >= bm->depth)
669                                 break;
670
671                         ctx = hctx->ctxs[bit + off];
672                         clear_bit(bit, &bm->word);
673                         spin_lock(&ctx->lock);
674                         list_splice_tail_init(&ctx->rq_list, list);
675                         spin_unlock(&ctx->lock);
676
677                         bit++;
678                 } while (1);
679         }
680 }
681
682 /*
683  * Run this hardware queue, pulling any software queues mapped to it in.
684  * Note that this function currently has various problems around ordering
685  * of IO. In particular, we'd like FIFO behaviour on handling existing
686  * items on the hctx->dispatch list. Ignore that for now.
687  */
688 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
689 {
690         struct request_queue *q = hctx->queue;
691         struct request *rq;
692         LIST_HEAD(rq_list);
693         int queued;
694
695         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
696
697         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
698                 return;
699
700         hctx->run++;
701
702         /*
703          * Touch any software queue that has pending entries.
704          */
705         flush_busy_ctxs(hctx, &rq_list);
706
707         /*
708          * If we have previous entries on our dispatch list, grab them
709          * and stuff them at the front for more fair dispatch.
710          */
711         if (!list_empty_careful(&hctx->dispatch)) {
712                 spin_lock(&hctx->lock);
713                 if (!list_empty(&hctx->dispatch))
714                         list_splice_init(&hctx->dispatch, &rq_list);
715                 spin_unlock(&hctx->lock);
716         }
717
718         /*
719          * Now process all the entries, sending them to the driver.
720          */
721         queued = 0;
722         while (!list_empty(&rq_list)) {
723                 int ret;
724
725                 rq = list_first_entry(&rq_list, struct request, queuelist);
726                 list_del_init(&rq->queuelist);
727
728                 ret = q->mq_ops->queue_rq(hctx, rq, list_empty(&rq_list));
729                 switch (ret) {
730                 case BLK_MQ_RQ_QUEUE_OK:
731                         queued++;
732                         continue;
733                 case BLK_MQ_RQ_QUEUE_BUSY:
734                         list_add(&rq->queuelist, &rq_list);
735                         __blk_mq_requeue_request(rq);
736                         break;
737                 default:
738                         pr_err("blk-mq: bad return on queue: %d\n", ret);
739                 case BLK_MQ_RQ_QUEUE_ERROR:
740                         rq->errors = -EIO;
741                         blk_mq_end_request(rq, rq->errors);
742                         break;
743                 }
744
745                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
746                         break;
747         }
748
749         if (!queued)
750                 hctx->dispatched[0]++;
751         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
752                 hctx->dispatched[ilog2(queued) + 1]++;
753
754         /*
755          * Any items that need requeuing? Stuff them into hctx->dispatch,
756          * that is where we will continue on next queue run.
757          */
758         if (!list_empty(&rq_list)) {
759                 spin_lock(&hctx->lock);
760                 list_splice(&rq_list, &hctx->dispatch);
761                 spin_unlock(&hctx->lock);
762         }
763 }
764
765 /*
766  * It'd be great if the workqueue API had a way to pass
767  * in a mask and had some smarts for more clever placement.
768  * For now we just round-robin here, switching for every
769  * BLK_MQ_CPU_WORK_BATCH queued items.
770  */
771 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
772 {
773         int cpu = hctx->next_cpu;
774
775         if (--hctx->next_cpu_batch <= 0) {
776                 int next_cpu;
777
778                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
779                 if (next_cpu >= nr_cpu_ids)
780                         next_cpu = cpumask_first(hctx->cpumask);
781
782                 hctx->next_cpu = next_cpu;
783                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
784         }
785
786         return cpu;
787 }
788
789 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
790 {
791         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
792                 return;
793
794         if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
795                 __blk_mq_run_hw_queue(hctx);
796         else if (hctx->queue->nr_hw_queues == 1)
797                 kblockd_schedule_delayed_work(&hctx->run_work, 0);
798         else {
799                 unsigned int cpu;
800
801                 cpu = blk_mq_hctx_next_cpu(hctx);
802                 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
803         }
804 }
805
806 void blk_mq_run_queues(struct request_queue *q, bool async)
807 {
808         struct blk_mq_hw_ctx *hctx;
809         int i;
810
811         queue_for_each_hw_ctx(q, hctx, i) {
812                 if ((!blk_mq_hctx_has_pending(hctx) &&
813                     list_empty_careful(&hctx->dispatch)) ||
814                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
815                         continue;
816
817                 preempt_disable();
818                 blk_mq_run_hw_queue(hctx, async);
819                 preempt_enable();
820         }
821 }
822 EXPORT_SYMBOL(blk_mq_run_queues);
823
824 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
825 {
826         cancel_delayed_work(&hctx->run_work);
827         cancel_delayed_work(&hctx->delay_work);
828         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
829 }
830 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
831
832 void blk_mq_stop_hw_queues(struct request_queue *q)
833 {
834         struct blk_mq_hw_ctx *hctx;
835         int i;
836
837         queue_for_each_hw_ctx(q, hctx, i)
838                 blk_mq_stop_hw_queue(hctx);
839 }
840 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
841
842 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
843 {
844         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
845
846         preempt_disable();
847         blk_mq_run_hw_queue(hctx, false);
848         preempt_enable();
849 }
850 EXPORT_SYMBOL(blk_mq_start_hw_queue);
851
852 void blk_mq_start_hw_queues(struct request_queue *q)
853 {
854         struct blk_mq_hw_ctx *hctx;
855         int i;
856
857         queue_for_each_hw_ctx(q, hctx, i)
858                 blk_mq_start_hw_queue(hctx);
859 }
860 EXPORT_SYMBOL(blk_mq_start_hw_queues);
861
862
863 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
864 {
865         struct blk_mq_hw_ctx *hctx;
866         int i;
867
868         queue_for_each_hw_ctx(q, hctx, i) {
869                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
870                         continue;
871
872                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
873                 preempt_disable();
874                 blk_mq_run_hw_queue(hctx, async);
875                 preempt_enable();
876         }
877 }
878 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
879
880 static void blk_mq_run_work_fn(struct work_struct *work)
881 {
882         struct blk_mq_hw_ctx *hctx;
883
884         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
885
886         __blk_mq_run_hw_queue(hctx);
887 }
888
889 static void blk_mq_delay_work_fn(struct work_struct *work)
890 {
891         struct blk_mq_hw_ctx *hctx;
892
893         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
894
895         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
896                 __blk_mq_run_hw_queue(hctx);
897 }
898
899 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
900 {
901         unsigned long tmo = msecs_to_jiffies(msecs);
902
903         if (hctx->queue->nr_hw_queues == 1)
904                 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
905         else {
906                 unsigned int cpu;
907
908                 cpu = blk_mq_hctx_next_cpu(hctx);
909                 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
910         }
911 }
912 EXPORT_SYMBOL(blk_mq_delay_queue);
913
914 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
915                                     struct request *rq, bool at_head)
916 {
917         struct blk_mq_ctx *ctx = rq->mq_ctx;
918
919         trace_block_rq_insert(hctx->queue, rq);
920
921         if (at_head)
922                 list_add(&rq->queuelist, &ctx->rq_list);
923         else
924                 list_add_tail(&rq->queuelist, &ctx->rq_list);
925
926         blk_mq_hctx_mark_pending(hctx, ctx);
927 }
928
929 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
930                 bool async)
931 {
932         struct request_queue *q = rq->q;
933         struct blk_mq_hw_ctx *hctx;
934         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
935
936         current_ctx = blk_mq_get_ctx(q);
937         if (!cpu_online(ctx->cpu))
938                 rq->mq_ctx = ctx = current_ctx;
939
940         hctx = q->mq_ops->map_queue(q, ctx->cpu);
941
942         spin_lock(&ctx->lock);
943         __blk_mq_insert_request(hctx, rq, at_head);
944         spin_unlock(&ctx->lock);
945
946         if (run_queue)
947                 blk_mq_run_hw_queue(hctx, async);
948
949         blk_mq_put_ctx(current_ctx);
950 }
951
952 static void blk_mq_insert_requests(struct request_queue *q,
953                                      struct blk_mq_ctx *ctx,
954                                      struct list_head *list,
955                                      int depth,
956                                      bool from_schedule)
957
958 {
959         struct blk_mq_hw_ctx *hctx;
960         struct blk_mq_ctx *current_ctx;
961
962         trace_block_unplug(q, depth, !from_schedule);
963
964         current_ctx = blk_mq_get_ctx(q);
965
966         if (!cpu_online(ctx->cpu))
967                 ctx = current_ctx;
968         hctx = q->mq_ops->map_queue(q, ctx->cpu);
969
970         /*
971          * preemption doesn't flush plug list, so it's possible ctx->cpu is
972          * offline now
973          */
974         spin_lock(&ctx->lock);
975         while (!list_empty(list)) {
976                 struct request *rq;
977
978                 rq = list_first_entry(list, struct request, queuelist);
979                 list_del_init(&rq->queuelist);
980                 rq->mq_ctx = ctx;
981                 __blk_mq_insert_request(hctx, rq, false);
982         }
983         spin_unlock(&ctx->lock);
984
985         blk_mq_run_hw_queue(hctx, from_schedule);
986         blk_mq_put_ctx(current_ctx);
987 }
988
989 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
990 {
991         struct request *rqa = container_of(a, struct request, queuelist);
992         struct request *rqb = container_of(b, struct request, queuelist);
993
994         return !(rqa->mq_ctx < rqb->mq_ctx ||
995                  (rqa->mq_ctx == rqb->mq_ctx &&
996                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
997 }
998
999 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1000 {
1001         struct blk_mq_ctx *this_ctx;
1002         struct request_queue *this_q;
1003         struct request *rq;
1004         LIST_HEAD(list);
1005         LIST_HEAD(ctx_list);
1006         unsigned int depth;
1007
1008         list_splice_init(&plug->mq_list, &list);
1009
1010         list_sort(NULL, &list, plug_ctx_cmp);
1011
1012         this_q = NULL;
1013         this_ctx = NULL;
1014         depth = 0;
1015
1016         while (!list_empty(&list)) {
1017                 rq = list_entry_rq(list.next);
1018                 list_del_init(&rq->queuelist);
1019                 BUG_ON(!rq->q);
1020                 if (rq->mq_ctx != this_ctx) {
1021                         if (this_ctx) {
1022                                 blk_mq_insert_requests(this_q, this_ctx,
1023                                                         &ctx_list, depth,
1024                                                         from_schedule);
1025                         }
1026
1027                         this_ctx = rq->mq_ctx;
1028                         this_q = rq->q;
1029                         depth = 0;
1030                 }
1031
1032                 depth++;
1033                 list_add_tail(&rq->queuelist, &ctx_list);
1034         }
1035
1036         /*
1037          * If 'this_ctx' is set, we know we have entries to complete
1038          * on 'ctx_list'. Do those.
1039          */
1040         if (this_ctx) {
1041                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1042                                        from_schedule);
1043         }
1044 }
1045
1046 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1047 {
1048         init_request_from_bio(rq, bio);
1049
1050         if (blk_do_io_stat(rq))
1051                 blk_account_io_start(rq, 1);
1052 }
1053
1054 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1055 {
1056         return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1057                 !blk_queue_nomerges(hctx->queue);
1058 }
1059
1060 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1061                                          struct blk_mq_ctx *ctx,
1062                                          struct request *rq, struct bio *bio)
1063 {
1064         if (!hctx_allow_merges(hctx)) {
1065                 blk_mq_bio_to_request(rq, bio);
1066                 spin_lock(&ctx->lock);
1067 insert_rq:
1068                 __blk_mq_insert_request(hctx, rq, false);
1069                 spin_unlock(&ctx->lock);
1070                 return false;
1071         } else {
1072                 struct request_queue *q = hctx->queue;
1073
1074                 spin_lock(&ctx->lock);
1075                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1076                         blk_mq_bio_to_request(rq, bio);
1077                         goto insert_rq;
1078                 }
1079
1080                 spin_unlock(&ctx->lock);
1081                 __blk_mq_free_request(hctx, ctx, rq);
1082                 return true;
1083         }
1084 }
1085
1086 struct blk_map_ctx {
1087         struct blk_mq_hw_ctx *hctx;
1088         struct blk_mq_ctx *ctx;
1089 };
1090
1091 static struct request *blk_mq_map_request(struct request_queue *q,
1092                                           struct bio *bio,
1093                                           struct blk_map_ctx *data)
1094 {
1095         struct blk_mq_hw_ctx *hctx;
1096         struct blk_mq_ctx *ctx;
1097         struct request *rq;
1098         int rw = bio_data_dir(bio);
1099         struct blk_mq_alloc_data alloc_data;
1100
1101         if (unlikely(blk_mq_queue_enter(q))) {
1102                 bio_endio(bio, -EIO);
1103                 return NULL;
1104         }
1105
1106         ctx = blk_mq_get_ctx(q);
1107         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1108
1109         if (rw_is_sync(bio->bi_rw))
1110                 rw |= REQ_SYNC;
1111
1112         trace_block_getrq(q, bio, rw);
1113         blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1114                         hctx);
1115         rq = __blk_mq_alloc_request(&alloc_data, rw);
1116         if (unlikely(!rq)) {
1117                 __blk_mq_run_hw_queue(hctx);
1118                 blk_mq_put_ctx(ctx);
1119                 trace_block_sleeprq(q, bio, rw);
1120
1121                 ctx = blk_mq_get_ctx(q);
1122                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1123                 blk_mq_set_alloc_data(&alloc_data, q,
1124                                 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1125                 rq = __blk_mq_alloc_request(&alloc_data, rw);
1126                 ctx = alloc_data.ctx;
1127                 hctx = alloc_data.hctx;
1128         }
1129
1130         hctx->queued++;
1131         data->hctx = hctx;
1132         data->ctx = ctx;
1133         return rq;
1134 }
1135
1136 /*
1137  * Multiple hardware queue variant. This will not use per-process plugs,
1138  * but will attempt to bypass the hctx queueing if we can go straight to
1139  * hardware for SYNC IO.
1140  */
1141 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1142 {
1143         const int is_sync = rw_is_sync(bio->bi_rw);
1144         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1145         struct blk_map_ctx data;
1146         struct request *rq;
1147
1148         blk_queue_bounce(q, &bio);
1149
1150         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1151                 bio_endio(bio, -EIO);
1152                 return;
1153         }
1154
1155         rq = blk_mq_map_request(q, bio, &data);
1156         if (unlikely(!rq))
1157                 return;
1158
1159         if (unlikely(is_flush_fua)) {
1160                 blk_mq_bio_to_request(rq, bio);
1161                 blk_insert_flush(rq);
1162                 goto run_queue;
1163         }
1164
1165         if (is_sync) {
1166                 int ret;
1167
1168                 blk_mq_bio_to_request(rq, bio);
1169
1170                 /*
1171                  * For OK queue, we are done. For error, kill it. Any other
1172                  * error (busy), just add it to our list as we previously
1173                  * would have done
1174                  */
1175                 ret = q->mq_ops->queue_rq(data.hctx, rq, true);
1176                 if (ret == BLK_MQ_RQ_QUEUE_OK)
1177                         goto done;
1178                 else {
1179                         __blk_mq_requeue_request(rq);
1180
1181                         if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1182                                 rq->errors = -EIO;
1183                                 blk_mq_end_request(rq, rq->errors);
1184                                 goto done;
1185                         }
1186                 }
1187         }
1188
1189         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1190                 /*
1191                  * For a SYNC request, send it to the hardware immediately. For
1192                  * an ASYNC request, just ensure that we run it later on. The
1193                  * latter allows for merging opportunities and more efficient
1194                  * dispatching.
1195                  */
1196 run_queue:
1197                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1198         }
1199 done:
1200         blk_mq_put_ctx(data.ctx);
1201 }
1202
1203 /*
1204  * Single hardware queue variant. This will attempt to use any per-process
1205  * plug for merging and IO deferral.
1206  */
1207 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1208 {
1209         const int is_sync = rw_is_sync(bio->bi_rw);
1210         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1211         unsigned int use_plug, request_count = 0;
1212         struct blk_map_ctx data;
1213         struct request *rq;
1214
1215         /*
1216          * If we have multiple hardware queues, just go directly to
1217          * one of those for sync IO.
1218          */
1219         use_plug = !is_flush_fua && !is_sync;
1220
1221         blk_queue_bounce(q, &bio);
1222
1223         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1224                 bio_endio(bio, -EIO);
1225                 return;
1226         }
1227
1228         if (use_plug && !blk_queue_nomerges(q) &&
1229             blk_attempt_plug_merge(q, bio, &request_count))
1230                 return;
1231
1232         rq = blk_mq_map_request(q, bio, &data);
1233         if (unlikely(!rq))
1234                 return;
1235
1236         if (unlikely(is_flush_fua)) {
1237                 blk_mq_bio_to_request(rq, bio);
1238                 blk_insert_flush(rq);
1239                 goto run_queue;
1240         }
1241
1242         /*
1243          * A task plug currently exists. Since this is completely lockless,
1244          * utilize that to temporarily store requests until the task is
1245          * either done or scheduled away.
1246          */
1247         if (use_plug) {
1248                 struct blk_plug *plug = current->plug;
1249
1250                 if (plug) {
1251                         blk_mq_bio_to_request(rq, bio);
1252                         if (list_empty(&plug->mq_list))
1253                                 trace_block_plug(q);
1254                         else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1255                                 blk_flush_plug_list(plug, false);
1256                                 trace_block_plug(q);
1257                         }
1258                         list_add_tail(&rq->queuelist, &plug->mq_list);
1259                         blk_mq_put_ctx(data.ctx);
1260                         return;
1261                 }
1262         }
1263
1264         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1265                 /*
1266                  * For a SYNC request, send it to the hardware immediately. For
1267                  * an ASYNC request, just ensure that we run it later on. The
1268                  * latter allows for merging opportunities and more efficient
1269                  * dispatching.
1270                  */
1271 run_queue:
1272                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1273         }
1274
1275         blk_mq_put_ctx(data.ctx);
1276 }
1277
1278 /*
1279  * Default mapping to a software queue, since we use one per CPU.
1280  */
1281 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1282 {
1283         return q->queue_hw_ctx[q->mq_map[cpu]];
1284 }
1285 EXPORT_SYMBOL(blk_mq_map_queue);
1286
1287 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1288                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1289 {
1290         struct page *page;
1291
1292         if (tags->rqs && set->ops->exit_request) {
1293                 int i;
1294
1295                 for (i = 0; i < tags->nr_tags; i++) {
1296                         if (!tags->rqs[i])
1297                                 continue;
1298                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1299                                                 hctx_idx, i);
1300                         tags->rqs[i] = NULL;
1301                 }
1302         }
1303
1304         while (!list_empty(&tags->page_list)) {
1305                 page = list_first_entry(&tags->page_list, struct page, lru);
1306                 list_del_init(&page->lru);
1307                 __free_pages(page, page->private);
1308         }
1309
1310         kfree(tags->rqs);
1311
1312         blk_mq_free_tags(tags);
1313 }
1314
1315 static size_t order_to_size(unsigned int order)
1316 {
1317         return (size_t)PAGE_SIZE << order;
1318 }
1319
1320 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1321                 unsigned int hctx_idx)
1322 {
1323         struct blk_mq_tags *tags;
1324         unsigned int i, j, entries_per_page, max_order = 4;
1325         size_t rq_size, left;
1326
1327         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1328                                 set->numa_node);
1329         if (!tags)
1330                 return NULL;
1331
1332         INIT_LIST_HEAD(&tags->page_list);
1333
1334         tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1335                                  GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1336                                  set->numa_node);
1337         if (!tags->rqs) {
1338                 blk_mq_free_tags(tags);
1339                 return NULL;
1340         }
1341
1342         /*
1343          * rq_size is the size of the request plus driver payload, rounded
1344          * to the cacheline size
1345          */
1346         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1347                                 cache_line_size());
1348         left = rq_size * set->queue_depth;
1349
1350         for (i = 0; i < set->queue_depth; ) {
1351                 int this_order = max_order;
1352                 struct page *page;
1353                 int to_do;
1354                 void *p;
1355
1356                 while (left < order_to_size(this_order - 1) && this_order)
1357                         this_order--;
1358
1359                 do {
1360                         page = alloc_pages_node(set->numa_node,
1361                                 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1362                                 this_order);
1363                         if (page)
1364                                 break;
1365                         if (!this_order--)
1366                                 break;
1367                         if (order_to_size(this_order) < rq_size)
1368                                 break;
1369                 } while (1);
1370
1371                 if (!page)
1372                         goto fail;
1373
1374                 page->private = this_order;
1375                 list_add_tail(&page->lru, &tags->page_list);
1376
1377                 p = page_address(page);
1378                 entries_per_page = order_to_size(this_order) / rq_size;
1379                 to_do = min(entries_per_page, set->queue_depth - i);
1380                 left -= to_do * rq_size;
1381                 for (j = 0; j < to_do; j++) {
1382                         tags->rqs[i] = p;
1383                         tags->rqs[i]->atomic_flags = 0;
1384                         tags->rqs[i]->cmd_flags = 0;
1385                         if (set->ops->init_request) {
1386                                 if (set->ops->init_request(set->driver_data,
1387                                                 tags->rqs[i], hctx_idx, i,
1388                                                 set->numa_node)) {
1389                                         tags->rqs[i] = NULL;
1390                                         goto fail;
1391                                 }
1392                         }
1393
1394                         p += rq_size;
1395                         i++;
1396                 }
1397         }
1398
1399         return tags;
1400
1401 fail:
1402         blk_mq_free_rq_map(set, tags, hctx_idx);
1403         return NULL;
1404 }
1405
1406 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1407 {
1408         kfree(bitmap->map);
1409 }
1410
1411 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1412 {
1413         unsigned int bpw = 8, total, num_maps, i;
1414
1415         bitmap->bits_per_word = bpw;
1416
1417         num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1418         bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1419                                         GFP_KERNEL, node);
1420         if (!bitmap->map)
1421                 return -ENOMEM;
1422
1423         bitmap->map_size = num_maps;
1424
1425         total = nr_cpu_ids;
1426         for (i = 0; i < num_maps; i++) {
1427                 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1428                 total -= bitmap->map[i].depth;
1429         }
1430
1431         return 0;
1432 }
1433
1434 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1435 {
1436         struct request_queue *q = hctx->queue;
1437         struct blk_mq_ctx *ctx;
1438         LIST_HEAD(tmp);
1439
1440         /*
1441          * Move ctx entries to new CPU, if this one is going away.
1442          */
1443         ctx = __blk_mq_get_ctx(q, cpu);
1444
1445         spin_lock(&ctx->lock);
1446         if (!list_empty(&ctx->rq_list)) {
1447                 list_splice_init(&ctx->rq_list, &tmp);
1448                 blk_mq_hctx_clear_pending(hctx, ctx);
1449         }
1450         spin_unlock(&ctx->lock);
1451
1452         if (list_empty(&tmp))
1453                 return NOTIFY_OK;
1454
1455         ctx = blk_mq_get_ctx(q);
1456         spin_lock(&ctx->lock);
1457
1458         while (!list_empty(&tmp)) {
1459                 struct request *rq;
1460
1461                 rq = list_first_entry(&tmp, struct request, queuelist);
1462                 rq->mq_ctx = ctx;
1463                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1464         }
1465
1466         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1467         blk_mq_hctx_mark_pending(hctx, ctx);
1468
1469         spin_unlock(&ctx->lock);
1470
1471         blk_mq_run_hw_queue(hctx, true);
1472         blk_mq_put_ctx(ctx);
1473         return NOTIFY_OK;
1474 }
1475
1476 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1477 {
1478         struct request_queue *q = hctx->queue;
1479         struct blk_mq_tag_set *set = q->tag_set;
1480
1481         if (set->tags[hctx->queue_num])
1482                 return NOTIFY_OK;
1483
1484         set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1485         if (!set->tags[hctx->queue_num])
1486                 return NOTIFY_STOP;
1487
1488         hctx->tags = set->tags[hctx->queue_num];
1489         return NOTIFY_OK;
1490 }
1491
1492 static int blk_mq_hctx_notify(void *data, unsigned long action,
1493                               unsigned int cpu)
1494 {
1495         struct blk_mq_hw_ctx *hctx = data;
1496
1497         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1498                 return blk_mq_hctx_cpu_offline(hctx, cpu);
1499         else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1500                 return blk_mq_hctx_cpu_online(hctx, cpu);
1501
1502         return NOTIFY_OK;
1503 }
1504
1505 static void blk_mq_exit_hctx(struct request_queue *q,
1506                 struct blk_mq_tag_set *set,
1507                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1508 {
1509         unsigned flush_start_tag = set->queue_depth;
1510
1511         blk_mq_tag_idle(hctx);
1512
1513         if (set->ops->exit_request)
1514                 set->ops->exit_request(set->driver_data,
1515                                        hctx->fq->flush_rq, hctx_idx,
1516                                        flush_start_tag + hctx_idx);
1517
1518         if (set->ops->exit_hctx)
1519                 set->ops->exit_hctx(hctx, hctx_idx);
1520
1521         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1522         blk_free_flush_queue(hctx->fq);
1523         kfree(hctx->ctxs);
1524         blk_mq_free_bitmap(&hctx->ctx_map);
1525 }
1526
1527 static void blk_mq_exit_hw_queues(struct request_queue *q,
1528                 struct blk_mq_tag_set *set, int nr_queue)
1529 {
1530         struct blk_mq_hw_ctx *hctx;
1531         unsigned int i;
1532
1533         queue_for_each_hw_ctx(q, hctx, i) {
1534                 if (i == nr_queue)
1535                         break;
1536                 blk_mq_exit_hctx(q, set, hctx, i);
1537         }
1538 }
1539
1540 static void blk_mq_free_hw_queues(struct request_queue *q,
1541                 struct blk_mq_tag_set *set)
1542 {
1543         struct blk_mq_hw_ctx *hctx;
1544         unsigned int i;
1545
1546         queue_for_each_hw_ctx(q, hctx, i) {
1547                 free_cpumask_var(hctx->cpumask);
1548                 kfree(hctx);
1549         }
1550 }
1551
1552 static int blk_mq_init_hctx(struct request_queue *q,
1553                 struct blk_mq_tag_set *set,
1554                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1555 {
1556         int node;
1557         unsigned flush_start_tag = set->queue_depth;
1558
1559         node = hctx->numa_node;
1560         if (node == NUMA_NO_NODE)
1561                 node = hctx->numa_node = set->numa_node;
1562
1563         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1564         INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1565         spin_lock_init(&hctx->lock);
1566         INIT_LIST_HEAD(&hctx->dispatch);
1567         hctx->queue = q;
1568         hctx->queue_num = hctx_idx;
1569         hctx->flags = set->flags;
1570         hctx->cmd_size = set->cmd_size;
1571
1572         blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1573                                         blk_mq_hctx_notify, hctx);
1574         blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1575
1576         hctx->tags = set->tags[hctx_idx];
1577
1578         /*
1579          * Allocate space for all possible cpus to avoid allocation at
1580          * runtime
1581          */
1582         hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1583                                         GFP_KERNEL, node);
1584         if (!hctx->ctxs)
1585                 goto unregister_cpu_notifier;
1586
1587         if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1588                 goto free_ctxs;
1589
1590         hctx->nr_ctx = 0;
1591
1592         if (set->ops->init_hctx &&
1593             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1594                 goto free_bitmap;
1595
1596         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1597         if (!hctx->fq)
1598                 goto exit_hctx;
1599
1600         if (set->ops->init_request &&
1601             set->ops->init_request(set->driver_data,
1602                                    hctx->fq->flush_rq, hctx_idx,
1603                                    flush_start_tag + hctx_idx, node))
1604                 goto free_fq;
1605
1606         return 0;
1607
1608  free_fq:
1609         kfree(hctx->fq);
1610  exit_hctx:
1611         if (set->ops->exit_hctx)
1612                 set->ops->exit_hctx(hctx, hctx_idx);
1613  free_bitmap:
1614         blk_mq_free_bitmap(&hctx->ctx_map);
1615  free_ctxs:
1616         kfree(hctx->ctxs);
1617  unregister_cpu_notifier:
1618         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1619
1620         return -1;
1621 }
1622
1623 static int blk_mq_init_hw_queues(struct request_queue *q,
1624                 struct blk_mq_tag_set *set)
1625 {
1626         struct blk_mq_hw_ctx *hctx;
1627         unsigned int i;
1628
1629         /*
1630          * Initialize hardware queues
1631          */
1632         queue_for_each_hw_ctx(q, hctx, i) {
1633                 if (blk_mq_init_hctx(q, set, hctx, i))
1634                         break;
1635         }
1636
1637         if (i == q->nr_hw_queues)
1638                 return 0;
1639
1640         /*
1641          * Init failed
1642          */
1643         blk_mq_exit_hw_queues(q, set, i);
1644
1645         return 1;
1646 }
1647
1648 static void blk_mq_init_cpu_queues(struct request_queue *q,
1649                                    unsigned int nr_hw_queues)
1650 {
1651         unsigned int i;
1652
1653         for_each_possible_cpu(i) {
1654                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1655                 struct blk_mq_hw_ctx *hctx;
1656
1657                 memset(__ctx, 0, sizeof(*__ctx));
1658                 __ctx->cpu = i;
1659                 spin_lock_init(&__ctx->lock);
1660                 INIT_LIST_HEAD(&__ctx->rq_list);
1661                 __ctx->queue = q;
1662
1663                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1664                 if (!cpu_online(i))
1665                         continue;
1666
1667                 hctx = q->mq_ops->map_queue(q, i);
1668                 cpumask_set_cpu(i, hctx->cpumask);
1669                 hctx->nr_ctx++;
1670
1671                 /*
1672                  * Set local node, IFF we have more than one hw queue. If
1673                  * not, we remain on the home node of the device
1674                  */
1675                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1676                         hctx->numa_node = cpu_to_node(i);
1677         }
1678 }
1679
1680 static void blk_mq_map_swqueue(struct request_queue *q)
1681 {
1682         unsigned int i;
1683         struct blk_mq_hw_ctx *hctx;
1684         struct blk_mq_ctx *ctx;
1685
1686         queue_for_each_hw_ctx(q, hctx, i) {
1687                 cpumask_clear(hctx->cpumask);
1688                 hctx->nr_ctx = 0;
1689         }
1690
1691         /*
1692          * Map software to hardware queues
1693          */
1694         queue_for_each_ctx(q, ctx, i) {
1695                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1696                 if (!cpu_online(i))
1697                         continue;
1698
1699                 hctx = q->mq_ops->map_queue(q, i);
1700                 cpumask_set_cpu(i, hctx->cpumask);
1701                 ctx->index_hw = hctx->nr_ctx;
1702                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1703         }
1704
1705         queue_for_each_hw_ctx(q, hctx, i) {
1706                 /*
1707                  * If no software queues are mapped to this hardware queue,
1708                  * disable it and free the request entries.
1709                  */
1710                 if (!hctx->nr_ctx) {
1711                         struct blk_mq_tag_set *set = q->tag_set;
1712
1713                         if (set->tags[i]) {
1714                                 blk_mq_free_rq_map(set, set->tags[i], i);
1715                                 set->tags[i] = NULL;
1716                                 hctx->tags = NULL;
1717                         }
1718                         continue;
1719                 }
1720
1721                 /*
1722                  * Initialize batch roundrobin counts
1723                  */
1724                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1725                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1726         }
1727 }
1728
1729 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1730 {
1731         struct blk_mq_hw_ctx *hctx;
1732         struct request_queue *q;
1733         bool shared;
1734         int i;
1735
1736         if (set->tag_list.next == set->tag_list.prev)
1737                 shared = false;
1738         else
1739                 shared = true;
1740
1741         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1742                 blk_mq_freeze_queue(q);
1743
1744                 queue_for_each_hw_ctx(q, hctx, i) {
1745                         if (shared)
1746                                 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1747                         else
1748                                 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1749                 }
1750                 blk_mq_unfreeze_queue(q);
1751         }
1752 }
1753
1754 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1755 {
1756         struct blk_mq_tag_set *set = q->tag_set;
1757
1758         mutex_lock(&set->tag_list_lock);
1759         list_del_init(&q->tag_set_list);
1760         blk_mq_update_tag_set_depth(set);
1761         mutex_unlock(&set->tag_list_lock);
1762 }
1763
1764 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1765                                      struct request_queue *q)
1766 {
1767         q->tag_set = set;
1768
1769         mutex_lock(&set->tag_list_lock);
1770         list_add_tail(&q->tag_set_list, &set->tag_list);
1771         blk_mq_update_tag_set_depth(set);
1772         mutex_unlock(&set->tag_list_lock);
1773 }
1774
1775 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1776 {
1777         struct blk_mq_hw_ctx **hctxs;
1778         struct blk_mq_ctx __percpu *ctx;
1779         struct request_queue *q;
1780         unsigned int *map;
1781         int i;
1782
1783         ctx = alloc_percpu(struct blk_mq_ctx);
1784         if (!ctx)
1785                 return ERR_PTR(-ENOMEM);
1786
1787         /*
1788          * If a crashdump is active, then we are potentially in a very
1789          * memory constrained environment. Limit us to 1 queue and
1790          * 64 tags to prevent using too much memory.
1791          */
1792         if (is_kdump_kernel()) {
1793                 set->nr_hw_queues = 1;
1794                 set->queue_depth = min(64U, set->queue_depth);
1795         }
1796
1797         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1798                         set->numa_node);
1799
1800         if (!hctxs)
1801                 goto err_percpu;
1802
1803         map = blk_mq_make_queue_map(set);
1804         if (!map)
1805                 goto err_map;
1806
1807         for (i = 0; i < set->nr_hw_queues; i++) {
1808                 int node = blk_mq_hw_queue_to_node(map, i);
1809
1810                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1811                                         GFP_KERNEL, node);
1812                 if (!hctxs[i])
1813                         goto err_hctxs;
1814
1815                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1816                                                 node))
1817                         goto err_hctxs;
1818
1819                 atomic_set(&hctxs[i]->nr_active, 0);
1820                 hctxs[i]->numa_node = node;
1821                 hctxs[i]->queue_num = i;
1822         }
1823
1824         q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1825         if (!q)
1826                 goto err_hctxs;
1827
1828         /*
1829          * Init percpu_ref in atomic mode so that it's faster to shutdown.
1830          * See blk_register_queue() for details.
1831          */
1832         if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1833                             PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
1834                 goto err_map;
1835
1836         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1837         blk_queue_rq_timeout(q, 30000);
1838
1839         q->nr_queues = nr_cpu_ids;
1840         q->nr_hw_queues = set->nr_hw_queues;
1841         q->mq_map = map;
1842
1843         q->queue_ctx = ctx;
1844         q->queue_hw_ctx = hctxs;
1845
1846         q->mq_ops = set->ops;
1847         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1848
1849         if (!(set->flags & BLK_MQ_F_SG_MERGE))
1850                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1851
1852         q->sg_reserved_size = INT_MAX;
1853
1854         INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1855         INIT_LIST_HEAD(&q->requeue_list);
1856         spin_lock_init(&q->requeue_lock);
1857
1858         if (q->nr_hw_queues > 1)
1859                 blk_queue_make_request(q, blk_mq_make_request);
1860         else
1861                 blk_queue_make_request(q, blk_sq_make_request);
1862
1863         if (set->timeout)
1864                 blk_queue_rq_timeout(q, set->timeout);
1865
1866         /*
1867          * Do this after blk_queue_make_request() overrides it...
1868          */
1869         q->nr_requests = set->queue_depth;
1870
1871         if (set->ops->complete)
1872                 blk_queue_softirq_done(q, set->ops->complete);
1873
1874         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1875
1876         if (blk_mq_init_hw_queues(q, set))
1877                 goto err_hw;
1878
1879         mutex_lock(&all_q_mutex);
1880         list_add_tail(&q->all_q_node, &all_q_list);
1881         mutex_unlock(&all_q_mutex);
1882
1883         blk_mq_add_queue_tag_set(set, q);
1884
1885         blk_mq_map_swqueue(q);
1886
1887         return q;
1888
1889 err_hw:
1890         blk_cleanup_queue(q);
1891 err_hctxs:
1892         kfree(map);
1893         for (i = 0; i < set->nr_hw_queues; i++) {
1894                 if (!hctxs[i])
1895                         break;
1896                 free_cpumask_var(hctxs[i]->cpumask);
1897                 kfree(hctxs[i]);
1898         }
1899 err_map:
1900         kfree(hctxs);
1901 err_percpu:
1902         free_percpu(ctx);
1903         return ERR_PTR(-ENOMEM);
1904 }
1905 EXPORT_SYMBOL(blk_mq_init_queue);
1906
1907 void blk_mq_free_queue(struct request_queue *q)
1908 {
1909         struct blk_mq_tag_set   *set = q->tag_set;
1910
1911         blk_mq_del_queue_tag_set(q);
1912
1913         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1914         blk_mq_free_hw_queues(q, set);
1915
1916         percpu_ref_exit(&q->mq_usage_counter);
1917
1918         free_percpu(q->queue_ctx);
1919         kfree(q->queue_hw_ctx);
1920         kfree(q->mq_map);
1921
1922         q->queue_ctx = NULL;
1923         q->queue_hw_ctx = NULL;
1924         q->mq_map = NULL;
1925
1926         mutex_lock(&all_q_mutex);
1927         list_del_init(&q->all_q_node);
1928         mutex_unlock(&all_q_mutex);
1929 }
1930
1931 /* Basically redo blk_mq_init_queue with queue frozen */
1932 static void blk_mq_queue_reinit(struct request_queue *q)
1933 {
1934         WARN_ON_ONCE(!q->mq_freeze_depth);
1935
1936         blk_mq_sysfs_unregister(q);
1937
1938         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1939
1940         /*
1941          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1942          * we should change hctx numa_node according to new topology (this
1943          * involves free and re-allocate memory, worthy doing?)
1944          */
1945
1946         blk_mq_map_swqueue(q);
1947
1948         blk_mq_sysfs_register(q);
1949 }
1950
1951 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1952                                       unsigned long action, void *hcpu)
1953 {
1954         struct request_queue *q;
1955
1956         /*
1957          * Before new mappings are established, hotadded cpu might already
1958          * start handling requests. This doesn't break anything as we map
1959          * offline CPUs to first hardware queue. We will re-init the queue
1960          * below to get optimal settings.
1961          */
1962         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1963             action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1964                 return NOTIFY_OK;
1965
1966         mutex_lock(&all_q_mutex);
1967
1968         /*
1969          * We need to freeze and reinit all existing queues.  Freezing
1970          * involves synchronous wait for an RCU grace period and doing it
1971          * one by one may take a long time.  Start freezing all queues in
1972          * one swoop and then wait for the completions so that freezing can
1973          * take place in parallel.
1974          */
1975         list_for_each_entry(q, &all_q_list, all_q_node)
1976                 blk_mq_freeze_queue_start(q);
1977         list_for_each_entry(q, &all_q_list, all_q_node)
1978                 blk_mq_freeze_queue_wait(q);
1979
1980         list_for_each_entry(q, &all_q_list, all_q_node)
1981                 blk_mq_queue_reinit(q);
1982
1983         list_for_each_entry(q, &all_q_list, all_q_node)
1984                 blk_mq_unfreeze_queue(q);
1985
1986         mutex_unlock(&all_q_mutex);
1987         return NOTIFY_OK;
1988 }
1989
1990 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1991 {
1992         int i;
1993
1994         for (i = 0; i < set->nr_hw_queues; i++) {
1995                 set->tags[i] = blk_mq_init_rq_map(set, i);
1996                 if (!set->tags[i])
1997                         goto out_unwind;
1998         }
1999
2000         return 0;
2001
2002 out_unwind:
2003         while (--i >= 0)
2004                 blk_mq_free_rq_map(set, set->tags[i], i);
2005
2006         return -ENOMEM;
2007 }
2008
2009 /*
2010  * Allocate the request maps associated with this tag_set. Note that this
2011  * may reduce the depth asked for, if memory is tight. set->queue_depth
2012  * will be updated to reflect the allocated depth.
2013  */
2014 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2015 {
2016         unsigned int depth;
2017         int err;
2018
2019         depth = set->queue_depth;
2020         do {
2021                 err = __blk_mq_alloc_rq_maps(set);
2022                 if (!err)
2023                         break;
2024
2025                 set->queue_depth >>= 1;
2026                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2027                         err = -ENOMEM;
2028                         break;
2029                 }
2030         } while (set->queue_depth);
2031
2032         if (!set->queue_depth || err) {
2033                 pr_err("blk-mq: failed to allocate request map\n");
2034                 return -ENOMEM;
2035         }
2036
2037         if (depth != set->queue_depth)
2038                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2039                                                 depth, set->queue_depth);
2040
2041         return 0;
2042 }
2043
2044 /*
2045  * Alloc a tag set to be associated with one or more request queues.
2046  * May fail with EINVAL for various error conditions. May adjust the
2047  * requested depth down, if if it too large. In that case, the set
2048  * value will be stored in set->queue_depth.
2049  */
2050 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2051 {
2052         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2053
2054         if (!set->nr_hw_queues)
2055                 return -EINVAL;
2056         if (!set->queue_depth)
2057                 return -EINVAL;
2058         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2059                 return -EINVAL;
2060
2061         if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
2062                 return -EINVAL;
2063
2064         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2065                 pr_info("blk-mq: reduced tag depth to %u\n",
2066                         BLK_MQ_MAX_DEPTH);
2067                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2068         }
2069
2070         set->tags = kmalloc_node(set->nr_hw_queues *
2071                                  sizeof(struct blk_mq_tags *),
2072                                  GFP_KERNEL, set->numa_node);
2073         if (!set->tags)
2074                 return -ENOMEM;
2075
2076         if (blk_mq_alloc_rq_maps(set))
2077                 goto enomem;
2078
2079         mutex_init(&set->tag_list_lock);
2080         INIT_LIST_HEAD(&set->tag_list);
2081
2082         return 0;
2083 enomem:
2084         kfree(set->tags);
2085         set->tags = NULL;
2086         return -ENOMEM;
2087 }
2088 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2089
2090 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2091 {
2092         int i;
2093
2094         for (i = 0; i < set->nr_hw_queues; i++) {
2095                 if (set->tags[i])
2096                         blk_mq_free_rq_map(set, set->tags[i], i);
2097         }
2098
2099         kfree(set->tags);
2100         set->tags = NULL;
2101 }
2102 EXPORT_SYMBOL(blk_mq_free_tag_set);
2103
2104 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2105 {
2106         struct blk_mq_tag_set *set = q->tag_set;
2107         struct blk_mq_hw_ctx *hctx;
2108         int i, ret;
2109
2110         if (!set || nr > set->queue_depth)
2111                 return -EINVAL;
2112
2113         ret = 0;
2114         queue_for_each_hw_ctx(q, hctx, i) {
2115                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2116                 if (ret)
2117                         break;
2118         }
2119
2120         if (!ret)
2121                 q->nr_requests = nr;
2122
2123         return ret;
2124 }
2125
2126 void blk_mq_disable_hotplug(void)
2127 {
2128         mutex_lock(&all_q_mutex);
2129 }
2130
2131 void blk_mq_enable_hotplug(void)
2132 {
2133         mutex_unlock(&all_q_mutex);
2134 }
2135
2136 static int __init blk_mq_init(void)
2137 {
2138         blk_mq_cpu_init();
2139
2140         hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2141
2142         return 0;
2143 }
2144 subsys_initcall(blk_mq_init);