Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[cascardo/linux.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/smp.h>
17 #include <linux/llist.h>
18 #include <linux/list_sort.h>
19 #include <linux/cpu.h>
20 #include <linux/cache.h>
21 #include <linux/sched/sysctl.h>
22 #include <linux/delay.h>
23
24 #include <trace/events/block.h>
25
26 #include <linux/blk-mq.h>
27 #include "blk.h"
28 #include "blk-mq.h"
29 #include "blk-mq-tag.h"
30
31 static DEFINE_MUTEX(all_q_mutex);
32 static LIST_HEAD(all_q_list);
33
34 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
35
36 /*
37  * Check if any of the ctx's have pending work in this hardware queue
38  */
39 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
40 {
41         unsigned int i;
42
43         for (i = 0; i < hctx->ctx_map.map_size; i++)
44                 if (hctx->ctx_map.map[i].word)
45                         return true;
46
47         return false;
48 }
49
50 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
51                                               struct blk_mq_ctx *ctx)
52 {
53         return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
54 }
55
56 #define CTX_TO_BIT(hctx, ctx)   \
57         ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
58
59 /*
60  * Mark this ctx as having pending work in this hardware queue
61  */
62 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
63                                      struct blk_mq_ctx *ctx)
64 {
65         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
66
67         if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
68                 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
69 }
70
71 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
72                                       struct blk_mq_ctx *ctx)
73 {
74         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
75
76         clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
77 }
78
79 static int blk_mq_queue_enter(struct request_queue *q)
80 {
81         while (true) {
82                 int ret;
83
84                 if (percpu_ref_tryget_live(&q->mq_usage_counter))
85                         return 0;
86
87                 ret = wait_event_interruptible(q->mq_freeze_wq,
88                                 !q->mq_freeze_depth || blk_queue_dying(q));
89                 if (blk_queue_dying(q))
90                         return -ENODEV;
91                 if (ret)
92                         return ret;
93         }
94 }
95
96 static void blk_mq_queue_exit(struct request_queue *q)
97 {
98         percpu_ref_put(&q->mq_usage_counter);
99 }
100
101 static void blk_mq_usage_counter_release(struct percpu_ref *ref)
102 {
103         struct request_queue *q =
104                 container_of(ref, struct request_queue, mq_usage_counter);
105
106         wake_up_all(&q->mq_freeze_wq);
107 }
108
109 /*
110  * Guarantee no request is in use, so we can change any data structure of
111  * the queue afterward.
112  */
113 void blk_mq_freeze_queue(struct request_queue *q)
114 {
115         bool freeze;
116
117         spin_lock_irq(q->queue_lock);
118         freeze = !q->mq_freeze_depth++;
119         spin_unlock_irq(q->queue_lock);
120
121         if (freeze) {
122                 /*
123                  * XXX: Temporary kludge to work around SCSI blk-mq stall.
124                  * SCSI synchronously creates and destroys many queues
125                  * back-to-back during probe leading to lengthy stalls.
126                  * This will be fixed by keeping ->mq_usage_counter in
127                  * atomic mode until genhd registration, but, for now,
128                  * let's work around using expedited synchronization.
129                  */
130                 __percpu_ref_kill_expedited(&q->mq_usage_counter);
131
132                 blk_mq_run_queues(q, false);
133         }
134         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
135 }
136
137 static void blk_mq_unfreeze_queue(struct request_queue *q)
138 {
139         bool wake;
140
141         spin_lock_irq(q->queue_lock);
142         wake = !--q->mq_freeze_depth;
143         WARN_ON_ONCE(q->mq_freeze_depth < 0);
144         spin_unlock_irq(q->queue_lock);
145         if (wake) {
146                 percpu_ref_reinit(&q->mq_usage_counter);
147                 wake_up_all(&q->mq_freeze_wq);
148         }
149 }
150
151 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
152 {
153         return blk_mq_has_free_tags(hctx->tags);
154 }
155 EXPORT_SYMBOL(blk_mq_can_queue);
156
157 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
158                                struct request *rq, unsigned int rw_flags)
159 {
160         if (blk_queue_io_stat(q))
161                 rw_flags |= REQ_IO_STAT;
162
163         INIT_LIST_HEAD(&rq->queuelist);
164         /* csd/requeue_work/fifo_time is initialized before use */
165         rq->q = q;
166         rq->mq_ctx = ctx;
167         rq->cmd_flags |= rw_flags;
168         /* do not touch atomic flags, it needs atomic ops against the timer */
169         rq->cpu = -1;
170         INIT_HLIST_NODE(&rq->hash);
171         RB_CLEAR_NODE(&rq->rb_node);
172         rq->rq_disk = NULL;
173         rq->part = NULL;
174         rq->start_time = jiffies;
175 #ifdef CONFIG_BLK_CGROUP
176         rq->rl = NULL;
177         set_start_time_ns(rq);
178         rq->io_start_time_ns = 0;
179 #endif
180         rq->nr_phys_segments = 0;
181 #if defined(CONFIG_BLK_DEV_INTEGRITY)
182         rq->nr_integrity_segments = 0;
183 #endif
184         rq->special = NULL;
185         /* tag was already set */
186         rq->errors = 0;
187
188         rq->cmd = rq->__cmd;
189
190         rq->extra_len = 0;
191         rq->sense_len = 0;
192         rq->resid_len = 0;
193         rq->sense = NULL;
194
195         INIT_LIST_HEAD(&rq->timeout_list);
196         rq->timeout = 0;
197
198         rq->end_io = NULL;
199         rq->end_io_data = NULL;
200         rq->next_rq = NULL;
201
202         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
203 }
204
205 static struct request *
206 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
207 {
208         struct request *rq;
209         unsigned int tag;
210
211         tag = blk_mq_get_tag(data);
212         if (tag != BLK_MQ_TAG_FAIL) {
213                 rq = data->hctx->tags->rqs[tag];
214
215                 if (blk_mq_tag_busy(data->hctx)) {
216                         rq->cmd_flags = REQ_MQ_INFLIGHT;
217                         atomic_inc(&data->hctx->nr_active);
218                 }
219
220                 rq->tag = tag;
221                 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
222                 return rq;
223         }
224
225         return NULL;
226 }
227
228 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
229                 bool reserved)
230 {
231         struct blk_mq_ctx *ctx;
232         struct blk_mq_hw_ctx *hctx;
233         struct request *rq;
234         struct blk_mq_alloc_data alloc_data;
235
236         if (blk_mq_queue_enter(q))
237                 return NULL;
238
239         ctx = blk_mq_get_ctx(q);
240         hctx = q->mq_ops->map_queue(q, ctx->cpu);
241         blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
242                         reserved, ctx, hctx);
243
244         rq = __blk_mq_alloc_request(&alloc_data, rw);
245         if (!rq && (gfp & __GFP_WAIT)) {
246                 __blk_mq_run_hw_queue(hctx);
247                 blk_mq_put_ctx(ctx);
248
249                 ctx = blk_mq_get_ctx(q);
250                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
251                 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
252                                 hctx);
253                 rq =  __blk_mq_alloc_request(&alloc_data, rw);
254                 ctx = alloc_data.ctx;
255         }
256         blk_mq_put_ctx(ctx);
257         return rq;
258 }
259 EXPORT_SYMBOL(blk_mq_alloc_request);
260
261 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
262                                   struct blk_mq_ctx *ctx, struct request *rq)
263 {
264         const int tag = rq->tag;
265         struct request_queue *q = rq->q;
266
267         if (rq->cmd_flags & REQ_MQ_INFLIGHT)
268                 atomic_dec(&hctx->nr_active);
269         rq->cmd_flags = 0;
270
271         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
272         blk_mq_put_tag(hctx, tag, &ctx->last_tag);
273         blk_mq_queue_exit(q);
274 }
275
276 void blk_mq_free_request(struct request *rq)
277 {
278         struct blk_mq_ctx *ctx = rq->mq_ctx;
279         struct blk_mq_hw_ctx *hctx;
280         struct request_queue *q = rq->q;
281
282         ctx->rq_completed[rq_is_sync(rq)]++;
283
284         hctx = q->mq_ops->map_queue(q, ctx->cpu);
285         __blk_mq_free_request(hctx, ctx, rq);
286 }
287
288 /*
289  * Clone all relevant state from a request that has been put on hold in
290  * the flush state machine into the preallocated flush request that hangs
291  * off the request queue.
292  *
293  * For a driver the flush request should be invisible, that's why we are
294  * impersonating the original request here.
295  */
296 void blk_mq_clone_flush_request(struct request *flush_rq,
297                 struct request *orig_rq)
298 {
299         struct blk_mq_hw_ctx *hctx =
300                 orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
301
302         flush_rq->mq_ctx = orig_rq->mq_ctx;
303         flush_rq->tag = orig_rq->tag;
304         memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
305                 hctx->cmd_size);
306 }
307
308 inline void __blk_mq_end_io(struct request *rq, int error)
309 {
310         blk_account_io_done(rq);
311
312         if (rq->end_io) {
313                 rq->end_io(rq, error);
314         } else {
315                 if (unlikely(blk_bidi_rq(rq)))
316                         blk_mq_free_request(rq->next_rq);
317                 blk_mq_free_request(rq);
318         }
319 }
320 EXPORT_SYMBOL(__blk_mq_end_io);
321
322 void blk_mq_end_io(struct request *rq, int error)
323 {
324         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
325                 BUG();
326         __blk_mq_end_io(rq, error);
327 }
328 EXPORT_SYMBOL(blk_mq_end_io);
329
330 static void __blk_mq_complete_request_remote(void *data)
331 {
332         struct request *rq = data;
333
334         rq->q->softirq_done_fn(rq);
335 }
336
337 static void blk_mq_ipi_complete_request(struct request *rq)
338 {
339         struct blk_mq_ctx *ctx = rq->mq_ctx;
340         bool shared = false;
341         int cpu;
342
343         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
344                 rq->q->softirq_done_fn(rq);
345                 return;
346         }
347
348         cpu = get_cpu();
349         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
350                 shared = cpus_share_cache(cpu, ctx->cpu);
351
352         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
353                 rq->csd.func = __blk_mq_complete_request_remote;
354                 rq->csd.info = rq;
355                 rq->csd.flags = 0;
356                 smp_call_function_single_async(ctx->cpu, &rq->csd);
357         } else {
358                 rq->q->softirq_done_fn(rq);
359         }
360         put_cpu();
361 }
362
363 void __blk_mq_complete_request(struct request *rq)
364 {
365         struct request_queue *q = rq->q;
366
367         if (!q->softirq_done_fn)
368                 blk_mq_end_io(rq, rq->errors);
369         else
370                 blk_mq_ipi_complete_request(rq);
371 }
372
373 /**
374  * blk_mq_complete_request - end I/O on a request
375  * @rq:         the request being processed
376  *
377  * Description:
378  *      Ends all I/O on a request. It does not handle partial completions.
379  *      The actual completion happens out-of-order, through a IPI handler.
380  **/
381 void blk_mq_complete_request(struct request *rq)
382 {
383         struct request_queue *q = rq->q;
384
385         if (unlikely(blk_should_fake_timeout(q)))
386                 return;
387         if (!blk_mark_rq_complete(rq))
388                 __blk_mq_complete_request(rq);
389 }
390 EXPORT_SYMBOL(blk_mq_complete_request);
391
392 static void blk_mq_start_request(struct request *rq, bool last)
393 {
394         struct request_queue *q = rq->q;
395
396         trace_block_rq_issue(q, rq);
397
398         rq->resid_len = blk_rq_bytes(rq);
399         if (unlikely(blk_bidi_rq(rq)))
400                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
401
402         blk_add_timer(rq);
403
404         /*
405          * Ensure that ->deadline is visible before set the started
406          * flag and clear the completed flag.
407          */
408         smp_mb__before_atomic();
409
410         /*
411          * Mark us as started and clear complete. Complete might have been
412          * set if requeue raced with timeout, which then marked it as
413          * complete. So be sure to clear complete again when we start
414          * the request, otherwise we'll ignore the completion event.
415          */
416         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
417                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
418         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
419                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
420
421         if (q->dma_drain_size && blk_rq_bytes(rq)) {
422                 /*
423                  * Make sure space for the drain appears.  We know we can do
424                  * this because max_hw_segments has been adjusted to be one
425                  * fewer than the device can handle.
426                  */
427                 rq->nr_phys_segments++;
428         }
429
430         /*
431          * Flag the last request in the series so that drivers know when IO
432          * should be kicked off, if they don't do it on a per-request basis.
433          *
434          * Note: the flag isn't the only condition drivers should do kick off.
435          * If drive is busy, the last request might not have the bit set.
436          */
437         if (last)
438                 rq->cmd_flags |= REQ_END;
439 }
440
441 static void __blk_mq_requeue_request(struct request *rq)
442 {
443         struct request_queue *q = rq->q;
444
445         trace_block_rq_requeue(q, rq);
446         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
447
448         rq->cmd_flags &= ~REQ_END;
449
450         if (q->dma_drain_size && blk_rq_bytes(rq))
451                 rq->nr_phys_segments--;
452 }
453
454 void blk_mq_requeue_request(struct request *rq)
455 {
456         __blk_mq_requeue_request(rq);
457         blk_clear_rq_complete(rq);
458
459         BUG_ON(blk_queued_rq(rq));
460         blk_mq_add_to_requeue_list(rq, true);
461 }
462 EXPORT_SYMBOL(blk_mq_requeue_request);
463
464 static void blk_mq_requeue_work(struct work_struct *work)
465 {
466         struct request_queue *q =
467                 container_of(work, struct request_queue, requeue_work);
468         LIST_HEAD(rq_list);
469         struct request *rq, *next;
470         unsigned long flags;
471
472         spin_lock_irqsave(&q->requeue_lock, flags);
473         list_splice_init(&q->requeue_list, &rq_list);
474         spin_unlock_irqrestore(&q->requeue_lock, flags);
475
476         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
477                 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
478                         continue;
479
480                 rq->cmd_flags &= ~REQ_SOFTBARRIER;
481                 list_del_init(&rq->queuelist);
482                 blk_mq_insert_request(rq, true, false, false);
483         }
484
485         while (!list_empty(&rq_list)) {
486                 rq = list_entry(rq_list.next, struct request, queuelist);
487                 list_del_init(&rq->queuelist);
488                 blk_mq_insert_request(rq, false, false, false);
489         }
490
491         /*
492          * Use the start variant of queue running here, so that running
493          * the requeue work will kick stopped queues.
494          */
495         blk_mq_start_hw_queues(q);
496 }
497
498 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
499 {
500         struct request_queue *q = rq->q;
501         unsigned long flags;
502
503         /*
504          * We abuse this flag that is otherwise used by the I/O scheduler to
505          * request head insertation from the workqueue.
506          */
507         BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
508
509         spin_lock_irqsave(&q->requeue_lock, flags);
510         if (at_head) {
511                 rq->cmd_flags |= REQ_SOFTBARRIER;
512                 list_add(&rq->queuelist, &q->requeue_list);
513         } else {
514                 list_add_tail(&rq->queuelist, &q->requeue_list);
515         }
516         spin_unlock_irqrestore(&q->requeue_lock, flags);
517 }
518 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
519
520 void blk_mq_kick_requeue_list(struct request_queue *q)
521 {
522         kblockd_schedule_work(&q->requeue_work);
523 }
524 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
525
526 static inline bool is_flush_request(struct request *rq, unsigned int tag)
527 {
528         return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
529                         rq->q->flush_rq->tag == tag);
530 }
531
532 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
533 {
534         struct request *rq = tags->rqs[tag];
535
536         if (!is_flush_request(rq, tag))
537                 return rq;
538
539         return rq->q->flush_rq;
540 }
541 EXPORT_SYMBOL(blk_mq_tag_to_rq);
542
543 struct blk_mq_timeout_data {
544         struct blk_mq_hw_ctx *hctx;
545         unsigned long *next;
546         unsigned int *next_set;
547 };
548
549 static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
550 {
551         struct blk_mq_timeout_data *data = __data;
552         struct blk_mq_hw_ctx *hctx = data->hctx;
553         unsigned int tag;
554
555          /* It may not be in flight yet (this is where
556          * the REQ_ATOMIC_STARTED flag comes in). The requests are
557          * statically allocated, so we know it's always safe to access the
558          * memory associated with a bit offset into ->rqs[].
559          */
560         tag = 0;
561         do {
562                 struct request *rq;
563
564                 tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
565                 if (tag >= hctx->tags->nr_tags)
566                         break;
567
568                 rq = blk_mq_tag_to_rq(hctx->tags, tag++);
569                 if (rq->q != hctx->queue)
570                         continue;
571                 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
572                         continue;
573
574                 blk_rq_check_expired(rq, data->next, data->next_set);
575         } while (1);
576 }
577
578 static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
579                                         unsigned long *next,
580                                         unsigned int *next_set)
581 {
582         struct blk_mq_timeout_data data = {
583                 .hctx           = hctx,
584                 .next           = next,
585                 .next_set       = next_set,
586         };
587
588         /*
589          * Ask the tagging code to iterate busy requests, so we can
590          * check them for timeout.
591          */
592         blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
593 }
594
595 static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
596 {
597         struct request_queue *q = rq->q;
598
599         /*
600          * We know that complete is set at this point. If STARTED isn't set
601          * anymore, then the request isn't active and the "timeout" should
602          * just be ignored. This can happen due to the bitflag ordering.
603          * Timeout first checks if STARTED is set, and if it is, assumes
604          * the request is active. But if we race with completion, then
605          * we both flags will get cleared. So check here again, and ignore
606          * a timeout event with a request that isn't active.
607          */
608         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
609                 return BLK_EH_NOT_HANDLED;
610
611         if (!q->mq_ops->timeout)
612                 return BLK_EH_RESET_TIMER;
613
614         return q->mq_ops->timeout(rq);
615 }
616
617 static void blk_mq_rq_timer(unsigned long data)
618 {
619         struct request_queue *q = (struct request_queue *) data;
620         struct blk_mq_hw_ctx *hctx;
621         unsigned long next = 0;
622         int i, next_set = 0;
623
624         queue_for_each_hw_ctx(q, hctx, i) {
625                 /*
626                  * If not software queues are currently mapped to this
627                  * hardware queue, there's nothing to check
628                  */
629                 if (!hctx->nr_ctx || !hctx->tags)
630                         continue;
631
632                 blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
633         }
634
635         if (next_set) {
636                 next = blk_rq_timeout(round_jiffies_up(next));
637                 mod_timer(&q->timeout, next);
638         } else {
639                 queue_for_each_hw_ctx(q, hctx, i)
640                         blk_mq_tag_idle(hctx);
641         }
642 }
643
644 /*
645  * Reverse check our software queue for entries that we could potentially
646  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
647  * too much time checking for merges.
648  */
649 static bool blk_mq_attempt_merge(struct request_queue *q,
650                                  struct blk_mq_ctx *ctx, struct bio *bio)
651 {
652         struct request *rq;
653         int checked = 8;
654
655         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
656                 int el_ret;
657
658                 if (!checked--)
659                         break;
660
661                 if (!blk_rq_merge_ok(rq, bio))
662                         continue;
663
664                 el_ret = blk_try_merge(rq, bio);
665                 if (el_ret == ELEVATOR_BACK_MERGE) {
666                         if (bio_attempt_back_merge(q, rq, bio)) {
667                                 ctx->rq_merged++;
668                                 return true;
669                         }
670                         break;
671                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
672                         if (bio_attempt_front_merge(q, rq, bio)) {
673                                 ctx->rq_merged++;
674                                 return true;
675                         }
676                         break;
677                 }
678         }
679
680         return false;
681 }
682
683 /*
684  * Process software queues that have been marked busy, splicing them
685  * to the for-dispatch
686  */
687 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
688 {
689         struct blk_mq_ctx *ctx;
690         int i;
691
692         for (i = 0; i < hctx->ctx_map.map_size; i++) {
693                 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
694                 unsigned int off, bit;
695
696                 if (!bm->word)
697                         continue;
698
699                 bit = 0;
700                 off = i * hctx->ctx_map.bits_per_word;
701                 do {
702                         bit = find_next_bit(&bm->word, bm->depth, bit);
703                         if (bit >= bm->depth)
704                                 break;
705
706                         ctx = hctx->ctxs[bit + off];
707                         clear_bit(bit, &bm->word);
708                         spin_lock(&ctx->lock);
709                         list_splice_tail_init(&ctx->rq_list, list);
710                         spin_unlock(&ctx->lock);
711
712                         bit++;
713                 } while (1);
714         }
715 }
716
717 /*
718  * Run this hardware queue, pulling any software queues mapped to it in.
719  * Note that this function currently has various problems around ordering
720  * of IO. In particular, we'd like FIFO behaviour on handling existing
721  * items on the hctx->dispatch list. Ignore that for now.
722  */
723 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
724 {
725         struct request_queue *q = hctx->queue;
726         struct request *rq;
727         LIST_HEAD(rq_list);
728         int queued;
729
730         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
731
732         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
733                 return;
734
735         hctx->run++;
736
737         /*
738          * Touch any software queue that has pending entries.
739          */
740         flush_busy_ctxs(hctx, &rq_list);
741
742         /*
743          * If we have previous entries on our dispatch list, grab them
744          * and stuff them at the front for more fair dispatch.
745          */
746         if (!list_empty_careful(&hctx->dispatch)) {
747                 spin_lock(&hctx->lock);
748                 if (!list_empty(&hctx->dispatch))
749                         list_splice_init(&hctx->dispatch, &rq_list);
750                 spin_unlock(&hctx->lock);
751         }
752
753         /*
754          * Now process all the entries, sending them to the driver.
755          */
756         queued = 0;
757         while (!list_empty(&rq_list)) {
758                 int ret;
759
760                 rq = list_first_entry(&rq_list, struct request, queuelist);
761                 list_del_init(&rq->queuelist);
762
763                 blk_mq_start_request(rq, list_empty(&rq_list));
764
765                 ret = q->mq_ops->queue_rq(hctx, rq);
766                 switch (ret) {
767                 case BLK_MQ_RQ_QUEUE_OK:
768                         queued++;
769                         continue;
770                 case BLK_MQ_RQ_QUEUE_BUSY:
771                         list_add(&rq->queuelist, &rq_list);
772                         __blk_mq_requeue_request(rq);
773                         break;
774                 default:
775                         pr_err("blk-mq: bad return on queue: %d\n", ret);
776                 case BLK_MQ_RQ_QUEUE_ERROR:
777                         rq->errors = -EIO;
778                         blk_mq_end_io(rq, rq->errors);
779                         break;
780                 }
781
782                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
783                         break;
784         }
785
786         if (!queued)
787                 hctx->dispatched[0]++;
788         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
789                 hctx->dispatched[ilog2(queued) + 1]++;
790
791         /*
792          * Any items that need requeuing? Stuff them into hctx->dispatch,
793          * that is where we will continue on next queue run.
794          */
795         if (!list_empty(&rq_list)) {
796                 spin_lock(&hctx->lock);
797                 list_splice(&rq_list, &hctx->dispatch);
798                 spin_unlock(&hctx->lock);
799         }
800 }
801
802 /*
803  * It'd be great if the workqueue API had a way to pass
804  * in a mask and had some smarts for more clever placement.
805  * For now we just round-robin here, switching for every
806  * BLK_MQ_CPU_WORK_BATCH queued items.
807  */
808 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
809 {
810         int cpu = hctx->next_cpu;
811
812         if (--hctx->next_cpu_batch <= 0) {
813                 int next_cpu;
814
815                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
816                 if (next_cpu >= nr_cpu_ids)
817                         next_cpu = cpumask_first(hctx->cpumask);
818
819                 hctx->next_cpu = next_cpu;
820                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
821         }
822
823         return cpu;
824 }
825
826 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
827 {
828         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
829                 return;
830
831         if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
832                 __blk_mq_run_hw_queue(hctx);
833         else if (hctx->queue->nr_hw_queues == 1)
834                 kblockd_schedule_delayed_work(&hctx->run_work, 0);
835         else {
836                 unsigned int cpu;
837
838                 cpu = blk_mq_hctx_next_cpu(hctx);
839                 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
840         }
841 }
842
843 void blk_mq_run_queues(struct request_queue *q, bool async)
844 {
845         struct blk_mq_hw_ctx *hctx;
846         int i;
847
848         queue_for_each_hw_ctx(q, hctx, i) {
849                 if ((!blk_mq_hctx_has_pending(hctx) &&
850                     list_empty_careful(&hctx->dispatch)) ||
851                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
852                         continue;
853
854                 preempt_disable();
855                 blk_mq_run_hw_queue(hctx, async);
856                 preempt_enable();
857         }
858 }
859 EXPORT_SYMBOL(blk_mq_run_queues);
860
861 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
862 {
863         cancel_delayed_work(&hctx->run_work);
864         cancel_delayed_work(&hctx->delay_work);
865         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
866 }
867 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
868
869 void blk_mq_stop_hw_queues(struct request_queue *q)
870 {
871         struct blk_mq_hw_ctx *hctx;
872         int i;
873
874         queue_for_each_hw_ctx(q, hctx, i)
875                 blk_mq_stop_hw_queue(hctx);
876 }
877 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
878
879 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
880 {
881         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
882
883         preempt_disable();
884         blk_mq_run_hw_queue(hctx, false);
885         preempt_enable();
886 }
887 EXPORT_SYMBOL(blk_mq_start_hw_queue);
888
889 void blk_mq_start_hw_queues(struct request_queue *q)
890 {
891         struct blk_mq_hw_ctx *hctx;
892         int i;
893
894         queue_for_each_hw_ctx(q, hctx, i)
895                 blk_mq_start_hw_queue(hctx);
896 }
897 EXPORT_SYMBOL(blk_mq_start_hw_queues);
898
899
900 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
901 {
902         struct blk_mq_hw_ctx *hctx;
903         int i;
904
905         queue_for_each_hw_ctx(q, hctx, i) {
906                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
907                         continue;
908
909                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
910                 preempt_disable();
911                 blk_mq_run_hw_queue(hctx, async);
912                 preempt_enable();
913         }
914 }
915 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
916
917 static void blk_mq_run_work_fn(struct work_struct *work)
918 {
919         struct blk_mq_hw_ctx *hctx;
920
921         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
922
923         __blk_mq_run_hw_queue(hctx);
924 }
925
926 static void blk_mq_delay_work_fn(struct work_struct *work)
927 {
928         struct blk_mq_hw_ctx *hctx;
929
930         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
931
932         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
933                 __blk_mq_run_hw_queue(hctx);
934 }
935
936 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
937 {
938         unsigned long tmo = msecs_to_jiffies(msecs);
939
940         if (hctx->queue->nr_hw_queues == 1)
941                 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
942         else {
943                 unsigned int cpu;
944
945                 cpu = blk_mq_hctx_next_cpu(hctx);
946                 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
947         }
948 }
949 EXPORT_SYMBOL(blk_mq_delay_queue);
950
951 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
952                                     struct request *rq, bool at_head)
953 {
954         struct blk_mq_ctx *ctx = rq->mq_ctx;
955
956         trace_block_rq_insert(hctx->queue, rq);
957
958         if (at_head)
959                 list_add(&rq->queuelist, &ctx->rq_list);
960         else
961                 list_add_tail(&rq->queuelist, &ctx->rq_list);
962
963         blk_mq_hctx_mark_pending(hctx, ctx);
964 }
965
966 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
967                 bool async)
968 {
969         struct request_queue *q = rq->q;
970         struct blk_mq_hw_ctx *hctx;
971         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
972
973         current_ctx = blk_mq_get_ctx(q);
974         if (!cpu_online(ctx->cpu))
975                 rq->mq_ctx = ctx = current_ctx;
976
977         hctx = q->mq_ops->map_queue(q, ctx->cpu);
978
979         spin_lock(&ctx->lock);
980         __blk_mq_insert_request(hctx, rq, at_head);
981         spin_unlock(&ctx->lock);
982
983         if (run_queue)
984                 blk_mq_run_hw_queue(hctx, async);
985
986         blk_mq_put_ctx(current_ctx);
987 }
988
989 static void blk_mq_insert_requests(struct request_queue *q,
990                                      struct blk_mq_ctx *ctx,
991                                      struct list_head *list,
992                                      int depth,
993                                      bool from_schedule)
994
995 {
996         struct blk_mq_hw_ctx *hctx;
997         struct blk_mq_ctx *current_ctx;
998
999         trace_block_unplug(q, depth, !from_schedule);
1000
1001         current_ctx = blk_mq_get_ctx(q);
1002
1003         if (!cpu_online(ctx->cpu))
1004                 ctx = current_ctx;
1005         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1006
1007         /*
1008          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1009          * offline now
1010          */
1011         spin_lock(&ctx->lock);
1012         while (!list_empty(list)) {
1013                 struct request *rq;
1014
1015                 rq = list_first_entry(list, struct request, queuelist);
1016                 list_del_init(&rq->queuelist);
1017                 rq->mq_ctx = ctx;
1018                 __blk_mq_insert_request(hctx, rq, false);
1019         }
1020         spin_unlock(&ctx->lock);
1021
1022         blk_mq_run_hw_queue(hctx, from_schedule);
1023         blk_mq_put_ctx(current_ctx);
1024 }
1025
1026 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1027 {
1028         struct request *rqa = container_of(a, struct request, queuelist);
1029         struct request *rqb = container_of(b, struct request, queuelist);
1030
1031         return !(rqa->mq_ctx < rqb->mq_ctx ||
1032                  (rqa->mq_ctx == rqb->mq_ctx &&
1033                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1034 }
1035
1036 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1037 {
1038         struct blk_mq_ctx *this_ctx;
1039         struct request_queue *this_q;
1040         struct request *rq;
1041         LIST_HEAD(list);
1042         LIST_HEAD(ctx_list);
1043         unsigned int depth;
1044
1045         list_splice_init(&plug->mq_list, &list);
1046
1047         list_sort(NULL, &list, plug_ctx_cmp);
1048
1049         this_q = NULL;
1050         this_ctx = NULL;
1051         depth = 0;
1052
1053         while (!list_empty(&list)) {
1054                 rq = list_entry_rq(list.next);
1055                 list_del_init(&rq->queuelist);
1056                 BUG_ON(!rq->q);
1057                 if (rq->mq_ctx != this_ctx) {
1058                         if (this_ctx) {
1059                                 blk_mq_insert_requests(this_q, this_ctx,
1060                                                         &ctx_list, depth,
1061                                                         from_schedule);
1062                         }
1063
1064                         this_ctx = rq->mq_ctx;
1065                         this_q = rq->q;
1066                         depth = 0;
1067                 }
1068
1069                 depth++;
1070                 list_add_tail(&rq->queuelist, &ctx_list);
1071         }
1072
1073         /*
1074          * If 'this_ctx' is set, we know we have entries to complete
1075          * on 'ctx_list'. Do those.
1076          */
1077         if (this_ctx) {
1078                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1079                                        from_schedule);
1080         }
1081 }
1082
1083 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1084 {
1085         init_request_from_bio(rq, bio);
1086
1087         if (blk_do_io_stat(rq))
1088                 blk_account_io_start(rq, 1);
1089 }
1090
1091 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1092 {
1093         return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1094                 !blk_queue_nomerges(hctx->queue);
1095 }
1096
1097 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1098                                          struct blk_mq_ctx *ctx,
1099                                          struct request *rq, struct bio *bio)
1100 {
1101         if (!hctx_allow_merges(hctx)) {
1102                 blk_mq_bio_to_request(rq, bio);
1103                 spin_lock(&ctx->lock);
1104 insert_rq:
1105                 __blk_mq_insert_request(hctx, rq, false);
1106                 spin_unlock(&ctx->lock);
1107                 return false;
1108         } else {
1109                 struct request_queue *q = hctx->queue;
1110
1111                 spin_lock(&ctx->lock);
1112                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1113                         blk_mq_bio_to_request(rq, bio);
1114                         goto insert_rq;
1115                 }
1116
1117                 spin_unlock(&ctx->lock);
1118                 __blk_mq_free_request(hctx, ctx, rq);
1119                 return true;
1120         }
1121 }
1122
1123 struct blk_map_ctx {
1124         struct blk_mq_hw_ctx *hctx;
1125         struct blk_mq_ctx *ctx;
1126 };
1127
1128 static struct request *blk_mq_map_request(struct request_queue *q,
1129                                           struct bio *bio,
1130                                           struct blk_map_ctx *data)
1131 {
1132         struct blk_mq_hw_ctx *hctx;
1133         struct blk_mq_ctx *ctx;
1134         struct request *rq;
1135         int rw = bio_data_dir(bio);
1136         struct blk_mq_alloc_data alloc_data;
1137
1138         if (unlikely(blk_mq_queue_enter(q))) {
1139                 bio_endio(bio, -EIO);
1140                 return NULL;
1141         }
1142
1143         ctx = blk_mq_get_ctx(q);
1144         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1145
1146         if (rw_is_sync(bio->bi_rw))
1147                 rw |= REQ_SYNC;
1148
1149         trace_block_getrq(q, bio, rw);
1150         blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1151                         hctx);
1152         rq = __blk_mq_alloc_request(&alloc_data, rw);
1153         if (unlikely(!rq)) {
1154                 __blk_mq_run_hw_queue(hctx);
1155                 blk_mq_put_ctx(ctx);
1156                 trace_block_sleeprq(q, bio, rw);
1157
1158                 ctx = blk_mq_get_ctx(q);
1159                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1160                 blk_mq_set_alloc_data(&alloc_data, q,
1161                                 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1162                 rq = __blk_mq_alloc_request(&alloc_data, rw);
1163                 ctx = alloc_data.ctx;
1164                 hctx = alloc_data.hctx;
1165         }
1166
1167         hctx->queued++;
1168         data->hctx = hctx;
1169         data->ctx = ctx;
1170         return rq;
1171 }
1172
1173 /*
1174  * Multiple hardware queue variant. This will not use per-process plugs,
1175  * but will attempt to bypass the hctx queueing if we can go straight to
1176  * hardware for SYNC IO.
1177  */
1178 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1179 {
1180         const int is_sync = rw_is_sync(bio->bi_rw);
1181         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1182         struct blk_map_ctx data;
1183         struct request *rq;
1184
1185         blk_queue_bounce(q, &bio);
1186
1187         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1188                 bio_endio(bio, -EIO);
1189                 return;
1190         }
1191
1192         rq = blk_mq_map_request(q, bio, &data);
1193         if (unlikely(!rq))
1194                 return;
1195
1196         if (unlikely(is_flush_fua)) {
1197                 blk_mq_bio_to_request(rq, bio);
1198                 blk_insert_flush(rq);
1199                 goto run_queue;
1200         }
1201
1202         if (is_sync) {
1203                 int ret;
1204
1205                 blk_mq_bio_to_request(rq, bio);
1206                 blk_mq_start_request(rq, true);
1207
1208                 /*
1209                  * For OK queue, we are done. For error, kill it. Any other
1210                  * error (busy), just add it to our list as we previously
1211                  * would have done
1212                  */
1213                 ret = q->mq_ops->queue_rq(data.hctx, rq);
1214                 if (ret == BLK_MQ_RQ_QUEUE_OK)
1215                         goto done;
1216                 else {
1217                         __blk_mq_requeue_request(rq);
1218
1219                         if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1220                                 rq->errors = -EIO;
1221                                 blk_mq_end_io(rq, rq->errors);
1222                                 goto done;
1223                         }
1224                 }
1225         }
1226
1227         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1228                 /*
1229                  * For a SYNC request, send it to the hardware immediately. For
1230                  * an ASYNC request, just ensure that we run it later on. The
1231                  * latter allows for merging opportunities and more efficient
1232                  * dispatching.
1233                  */
1234 run_queue:
1235                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1236         }
1237 done:
1238         blk_mq_put_ctx(data.ctx);
1239 }
1240
1241 /*
1242  * Single hardware queue variant. This will attempt to use any per-process
1243  * plug for merging and IO deferral.
1244  */
1245 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1246 {
1247         const int is_sync = rw_is_sync(bio->bi_rw);
1248         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1249         unsigned int use_plug, request_count = 0;
1250         struct blk_map_ctx data;
1251         struct request *rq;
1252
1253         /*
1254          * If we have multiple hardware queues, just go directly to
1255          * one of those for sync IO.
1256          */
1257         use_plug = !is_flush_fua && !is_sync;
1258
1259         blk_queue_bounce(q, &bio);
1260
1261         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1262                 bio_endio(bio, -EIO);
1263                 return;
1264         }
1265
1266         if (use_plug && !blk_queue_nomerges(q) &&
1267             blk_attempt_plug_merge(q, bio, &request_count))
1268                 return;
1269
1270         rq = blk_mq_map_request(q, bio, &data);
1271         if (unlikely(!rq))
1272                 return;
1273
1274         if (unlikely(is_flush_fua)) {
1275                 blk_mq_bio_to_request(rq, bio);
1276                 blk_insert_flush(rq);
1277                 goto run_queue;
1278         }
1279
1280         /*
1281          * A task plug currently exists. Since this is completely lockless,
1282          * utilize that to temporarily store requests until the task is
1283          * either done or scheduled away.
1284          */
1285         if (use_plug) {
1286                 struct blk_plug *plug = current->plug;
1287
1288                 if (plug) {
1289                         blk_mq_bio_to_request(rq, bio);
1290                         if (list_empty(&plug->mq_list))
1291                                 trace_block_plug(q);
1292                         else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1293                                 blk_flush_plug_list(plug, false);
1294                                 trace_block_plug(q);
1295                         }
1296                         list_add_tail(&rq->queuelist, &plug->mq_list);
1297                         blk_mq_put_ctx(data.ctx);
1298                         return;
1299                 }
1300         }
1301
1302         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1303                 /*
1304                  * For a SYNC request, send it to the hardware immediately. For
1305                  * an ASYNC request, just ensure that we run it later on. The
1306                  * latter allows for merging opportunities and more efficient
1307                  * dispatching.
1308                  */
1309 run_queue:
1310                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1311         }
1312
1313         blk_mq_put_ctx(data.ctx);
1314 }
1315
1316 /*
1317  * Default mapping to a software queue, since we use one per CPU.
1318  */
1319 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1320 {
1321         return q->queue_hw_ctx[q->mq_map[cpu]];
1322 }
1323 EXPORT_SYMBOL(blk_mq_map_queue);
1324
1325 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1326                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1327 {
1328         struct page *page;
1329
1330         if (tags->rqs && set->ops->exit_request) {
1331                 int i;
1332
1333                 for (i = 0; i < tags->nr_tags; i++) {
1334                         if (!tags->rqs[i])
1335                                 continue;
1336                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1337                                                 hctx_idx, i);
1338                         tags->rqs[i] = NULL;
1339                 }
1340         }
1341
1342         while (!list_empty(&tags->page_list)) {
1343                 page = list_first_entry(&tags->page_list, struct page, lru);
1344                 list_del_init(&page->lru);
1345                 __free_pages(page, page->private);
1346         }
1347
1348         kfree(tags->rqs);
1349
1350         blk_mq_free_tags(tags);
1351 }
1352
1353 static size_t order_to_size(unsigned int order)
1354 {
1355         return (size_t)PAGE_SIZE << order;
1356 }
1357
1358 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1359                 unsigned int hctx_idx)
1360 {
1361         struct blk_mq_tags *tags;
1362         unsigned int i, j, entries_per_page, max_order = 4;
1363         size_t rq_size, left;
1364
1365         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1366                                 set->numa_node);
1367         if (!tags)
1368                 return NULL;
1369
1370         INIT_LIST_HEAD(&tags->page_list);
1371
1372         tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1373                                  GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1374                                  set->numa_node);
1375         if (!tags->rqs) {
1376                 blk_mq_free_tags(tags);
1377                 return NULL;
1378         }
1379
1380         /*
1381          * rq_size is the size of the request plus driver payload, rounded
1382          * to the cacheline size
1383          */
1384         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1385                                 cache_line_size());
1386         left = rq_size * set->queue_depth;
1387
1388         for (i = 0; i < set->queue_depth; ) {
1389                 int this_order = max_order;
1390                 struct page *page;
1391                 int to_do;
1392                 void *p;
1393
1394                 while (left < order_to_size(this_order - 1) && this_order)
1395                         this_order--;
1396
1397                 do {
1398                         page = alloc_pages_node(set->numa_node,
1399                                 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1400                                 this_order);
1401                         if (page)
1402                                 break;
1403                         if (!this_order--)
1404                                 break;
1405                         if (order_to_size(this_order) < rq_size)
1406                                 break;
1407                 } while (1);
1408
1409                 if (!page)
1410                         goto fail;
1411
1412                 page->private = this_order;
1413                 list_add_tail(&page->lru, &tags->page_list);
1414
1415                 p = page_address(page);
1416                 entries_per_page = order_to_size(this_order) / rq_size;
1417                 to_do = min(entries_per_page, set->queue_depth - i);
1418                 left -= to_do * rq_size;
1419                 for (j = 0; j < to_do; j++) {
1420                         tags->rqs[i] = p;
1421                         tags->rqs[i]->atomic_flags = 0;
1422                         tags->rqs[i]->cmd_flags = 0;
1423                         if (set->ops->init_request) {
1424                                 if (set->ops->init_request(set->driver_data,
1425                                                 tags->rqs[i], hctx_idx, i,
1426                                                 set->numa_node)) {
1427                                         tags->rqs[i] = NULL;
1428                                         goto fail;
1429                                 }
1430                         }
1431
1432                         p += rq_size;
1433                         i++;
1434                 }
1435         }
1436
1437         return tags;
1438
1439 fail:
1440         blk_mq_free_rq_map(set, tags, hctx_idx);
1441         return NULL;
1442 }
1443
1444 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1445 {
1446         kfree(bitmap->map);
1447 }
1448
1449 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1450 {
1451         unsigned int bpw = 8, total, num_maps, i;
1452
1453         bitmap->bits_per_word = bpw;
1454
1455         num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1456         bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1457                                         GFP_KERNEL, node);
1458         if (!bitmap->map)
1459                 return -ENOMEM;
1460
1461         bitmap->map_size = num_maps;
1462
1463         total = nr_cpu_ids;
1464         for (i = 0; i < num_maps; i++) {
1465                 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1466                 total -= bitmap->map[i].depth;
1467         }
1468
1469         return 0;
1470 }
1471
1472 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1473 {
1474         struct request_queue *q = hctx->queue;
1475         struct blk_mq_ctx *ctx;
1476         LIST_HEAD(tmp);
1477
1478         /*
1479          * Move ctx entries to new CPU, if this one is going away.
1480          */
1481         ctx = __blk_mq_get_ctx(q, cpu);
1482
1483         spin_lock(&ctx->lock);
1484         if (!list_empty(&ctx->rq_list)) {
1485                 list_splice_init(&ctx->rq_list, &tmp);
1486                 blk_mq_hctx_clear_pending(hctx, ctx);
1487         }
1488         spin_unlock(&ctx->lock);
1489
1490         if (list_empty(&tmp))
1491                 return NOTIFY_OK;
1492
1493         ctx = blk_mq_get_ctx(q);
1494         spin_lock(&ctx->lock);
1495
1496         while (!list_empty(&tmp)) {
1497                 struct request *rq;
1498
1499                 rq = list_first_entry(&tmp, struct request, queuelist);
1500                 rq->mq_ctx = ctx;
1501                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1502         }
1503
1504         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1505         blk_mq_hctx_mark_pending(hctx, ctx);
1506
1507         spin_unlock(&ctx->lock);
1508
1509         blk_mq_run_hw_queue(hctx, true);
1510         blk_mq_put_ctx(ctx);
1511         return NOTIFY_OK;
1512 }
1513
1514 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1515 {
1516         struct request_queue *q = hctx->queue;
1517         struct blk_mq_tag_set *set = q->tag_set;
1518
1519         if (set->tags[hctx->queue_num])
1520                 return NOTIFY_OK;
1521
1522         set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1523         if (!set->tags[hctx->queue_num])
1524                 return NOTIFY_STOP;
1525
1526         hctx->tags = set->tags[hctx->queue_num];
1527         return NOTIFY_OK;
1528 }
1529
1530 static int blk_mq_hctx_notify(void *data, unsigned long action,
1531                               unsigned int cpu)
1532 {
1533         struct blk_mq_hw_ctx *hctx = data;
1534
1535         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1536                 return blk_mq_hctx_cpu_offline(hctx, cpu);
1537         else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1538                 return blk_mq_hctx_cpu_online(hctx, cpu);
1539
1540         return NOTIFY_OK;
1541 }
1542
1543 static void blk_mq_exit_hw_queues(struct request_queue *q,
1544                 struct blk_mq_tag_set *set, int nr_queue)
1545 {
1546         struct blk_mq_hw_ctx *hctx;
1547         unsigned int i;
1548
1549         queue_for_each_hw_ctx(q, hctx, i) {
1550                 if (i == nr_queue)
1551                         break;
1552
1553                 blk_mq_tag_idle(hctx);
1554
1555                 if (set->ops->exit_hctx)
1556                         set->ops->exit_hctx(hctx, i);
1557
1558                 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1559                 kfree(hctx->ctxs);
1560                 blk_mq_free_bitmap(&hctx->ctx_map);
1561         }
1562
1563 }
1564
1565 static void blk_mq_free_hw_queues(struct request_queue *q,
1566                 struct blk_mq_tag_set *set)
1567 {
1568         struct blk_mq_hw_ctx *hctx;
1569         unsigned int i;
1570
1571         queue_for_each_hw_ctx(q, hctx, i) {
1572                 free_cpumask_var(hctx->cpumask);
1573                 kfree(hctx);
1574         }
1575 }
1576
1577 static int blk_mq_init_hw_queues(struct request_queue *q,
1578                 struct blk_mq_tag_set *set)
1579 {
1580         struct blk_mq_hw_ctx *hctx;
1581         unsigned int i;
1582
1583         /*
1584          * Initialize hardware queues
1585          */
1586         queue_for_each_hw_ctx(q, hctx, i) {
1587                 int node;
1588
1589                 node = hctx->numa_node;
1590                 if (node == NUMA_NO_NODE)
1591                         node = hctx->numa_node = set->numa_node;
1592
1593                 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1594                 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1595                 spin_lock_init(&hctx->lock);
1596                 INIT_LIST_HEAD(&hctx->dispatch);
1597                 hctx->queue = q;
1598                 hctx->queue_num = i;
1599                 hctx->flags = set->flags;
1600                 hctx->cmd_size = set->cmd_size;
1601
1602                 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1603                                                 blk_mq_hctx_notify, hctx);
1604                 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1605
1606                 hctx->tags = set->tags[i];
1607
1608                 /*
1609                  * Allocate space for all possible cpus to avoid allocation at
1610                  * runtime
1611                  */
1612                 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1613                                                 GFP_KERNEL, node);
1614                 if (!hctx->ctxs)
1615                         break;
1616
1617                 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1618                         break;
1619
1620                 hctx->nr_ctx = 0;
1621
1622                 if (set->ops->init_hctx &&
1623                     set->ops->init_hctx(hctx, set->driver_data, i))
1624                         break;
1625         }
1626
1627         if (i == q->nr_hw_queues)
1628                 return 0;
1629
1630         /*
1631          * Init failed
1632          */
1633         blk_mq_exit_hw_queues(q, set, i);
1634
1635         return 1;
1636 }
1637
1638 static void blk_mq_init_cpu_queues(struct request_queue *q,
1639                                    unsigned int nr_hw_queues)
1640 {
1641         unsigned int i;
1642
1643         for_each_possible_cpu(i) {
1644                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1645                 struct blk_mq_hw_ctx *hctx;
1646
1647                 memset(__ctx, 0, sizeof(*__ctx));
1648                 __ctx->cpu = i;
1649                 spin_lock_init(&__ctx->lock);
1650                 INIT_LIST_HEAD(&__ctx->rq_list);
1651                 __ctx->queue = q;
1652
1653                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1654                 if (!cpu_online(i))
1655                         continue;
1656
1657                 hctx = q->mq_ops->map_queue(q, i);
1658                 cpumask_set_cpu(i, hctx->cpumask);
1659                 hctx->nr_ctx++;
1660
1661                 /*
1662                  * Set local node, IFF we have more than one hw queue. If
1663                  * not, we remain on the home node of the device
1664                  */
1665                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1666                         hctx->numa_node = cpu_to_node(i);
1667         }
1668 }
1669
1670 static void blk_mq_map_swqueue(struct request_queue *q)
1671 {
1672         unsigned int i;
1673         struct blk_mq_hw_ctx *hctx;
1674         struct blk_mq_ctx *ctx;
1675
1676         queue_for_each_hw_ctx(q, hctx, i) {
1677                 cpumask_clear(hctx->cpumask);
1678                 hctx->nr_ctx = 0;
1679         }
1680
1681         /*
1682          * Map software to hardware queues
1683          */
1684         queue_for_each_ctx(q, ctx, i) {
1685                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1686                 if (!cpu_online(i))
1687                         continue;
1688
1689                 hctx = q->mq_ops->map_queue(q, i);
1690                 cpumask_set_cpu(i, hctx->cpumask);
1691                 ctx->index_hw = hctx->nr_ctx;
1692                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1693         }
1694
1695         queue_for_each_hw_ctx(q, hctx, i) {
1696                 /*
1697                  * If no software queues are mapped to this hardware queue,
1698                  * disable it and free the request entries.
1699                  */
1700                 if (!hctx->nr_ctx) {
1701                         struct blk_mq_tag_set *set = q->tag_set;
1702
1703                         if (set->tags[i]) {
1704                                 blk_mq_free_rq_map(set, set->tags[i], i);
1705                                 set->tags[i] = NULL;
1706                                 hctx->tags = NULL;
1707                         }
1708                         continue;
1709                 }
1710
1711                 /*
1712                  * Initialize batch roundrobin counts
1713                  */
1714                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1715                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1716         }
1717 }
1718
1719 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1720 {
1721         struct blk_mq_hw_ctx *hctx;
1722         struct request_queue *q;
1723         bool shared;
1724         int i;
1725
1726         if (set->tag_list.next == set->tag_list.prev)
1727                 shared = false;
1728         else
1729                 shared = true;
1730
1731         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1732                 blk_mq_freeze_queue(q);
1733
1734                 queue_for_each_hw_ctx(q, hctx, i) {
1735                         if (shared)
1736                                 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1737                         else
1738                                 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1739                 }
1740                 blk_mq_unfreeze_queue(q);
1741         }
1742 }
1743
1744 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1745 {
1746         struct blk_mq_tag_set *set = q->tag_set;
1747
1748         mutex_lock(&set->tag_list_lock);
1749         list_del_init(&q->tag_set_list);
1750         blk_mq_update_tag_set_depth(set);
1751         mutex_unlock(&set->tag_list_lock);
1752 }
1753
1754 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1755                                      struct request_queue *q)
1756 {
1757         q->tag_set = set;
1758
1759         mutex_lock(&set->tag_list_lock);
1760         list_add_tail(&q->tag_set_list, &set->tag_list);
1761         blk_mq_update_tag_set_depth(set);
1762         mutex_unlock(&set->tag_list_lock);
1763 }
1764
1765 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1766 {
1767         struct blk_mq_hw_ctx **hctxs;
1768         struct blk_mq_ctx __percpu *ctx;
1769         struct request_queue *q;
1770         unsigned int *map;
1771         int i;
1772
1773         ctx = alloc_percpu(struct blk_mq_ctx);
1774         if (!ctx)
1775                 return ERR_PTR(-ENOMEM);
1776
1777         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1778                         set->numa_node);
1779
1780         if (!hctxs)
1781                 goto err_percpu;
1782
1783         map = blk_mq_make_queue_map(set);
1784         if (!map)
1785                 goto err_map;
1786
1787         for (i = 0; i < set->nr_hw_queues; i++) {
1788                 int node = blk_mq_hw_queue_to_node(map, i);
1789
1790                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1791                                         GFP_KERNEL, node);
1792                 if (!hctxs[i])
1793                         goto err_hctxs;
1794
1795                 if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1796                         goto err_hctxs;
1797
1798                 atomic_set(&hctxs[i]->nr_active, 0);
1799                 hctxs[i]->numa_node = node;
1800                 hctxs[i]->queue_num = i;
1801         }
1802
1803         q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1804         if (!q)
1805                 goto err_hctxs;
1806
1807         if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release))
1808                 goto err_map;
1809
1810         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1811         blk_queue_rq_timeout(q, 30000);
1812
1813         q->nr_queues = nr_cpu_ids;
1814         q->nr_hw_queues = set->nr_hw_queues;
1815         q->mq_map = map;
1816
1817         q->queue_ctx = ctx;
1818         q->queue_hw_ctx = hctxs;
1819
1820         q->mq_ops = set->ops;
1821         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1822
1823         if (!(set->flags & BLK_MQ_F_SG_MERGE))
1824                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1825
1826         q->sg_reserved_size = INT_MAX;
1827
1828         INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1829         INIT_LIST_HEAD(&q->requeue_list);
1830         spin_lock_init(&q->requeue_lock);
1831
1832         if (q->nr_hw_queues > 1)
1833                 blk_queue_make_request(q, blk_mq_make_request);
1834         else
1835                 blk_queue_make_request(q, blk_sq_make_request);
1836
1837         blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
1838         if (set->timeout)
1839                 blk_queue_rq_timeout(q, set->timeout);
1840
1841         /*
1842          * Do this after blk_queue_make_request() overrides it...
1843          */
1844         q->nr_requests = set->queue_depth;
1845
1846         if (set->ops->complete)
1847                 blk_queue_softirq_done(q, set->ops->complete);
1848
1849         blk_mq_init_flush(q);
1850         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1851
1852         q->flush_rq = kzalloc(round_up(sizeof(struct request) +
1853                                 set->cmd_size, cache_line_size()),
1854                                 GFP_KERNEL);
1855         if (!q->flush_rq)
1856                 goto err_hw;
1857
1858         if (blk_mq_init_hw_queues(q, set))
1859                 goto err_flush_rq;
1860
1861         mutex_lock(&all_q_mutex);
1862         list_add_tail(&q->all_q_node, &all_q_list);
1863         mutex_unlock(&all_q_mutex);
1864
1865         blk_mq_add_queue_tag_set(set, q);
1866
1867         blk_mq_map_swqueue(q);
1868
1869         return q;
1870
1871 err_flush_rq:
1872         kfree(q->flush_rq);
1873 err_hw:
1874         blk_cleanup_queue(q);
1875 err_hctxs:
1876         kfree(map);
1877         for (i = 0; i < set->nr_hw_queues; i++) {
1878                 if (!hctxs[i])
1879                         break;
1880                 free_cpumask_var(hctxs[i]->cpumask);
1881                 kfree(hctxs[i]);
1882         }
1883 err_map:
1884         kfree(hctxs);
1885 err_percpu:
1886         free_percpu(ctx);
1887         return ERR_PTR(-ENOMEM);
1888 }
1889 EXPORT_SYMBOL(blk_mq_init_queue);
1890
1891 void blk_mq_free_queue(struct request_queue *q)
1892 {
1893         struct blk_mq_tag_set   *set = q->tag_set;
1894
1895         blk_mq_del_queue_tag_set(q);
1896
1897         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1898         blk_mq_free_hw_queues(q, set);
1899
1900         percpu_ref_exit(&q->mq_usage_counter);
1901
1902         free_percpu(q->queue_ctx);
1903         kfree(q->queue_hw_ctx);
1904         kfree(q->mq_map);
1905
1906         q->queue_ctx = NULL;
1907         q->queue_hw_ctx = NULL;
1908         q->mq_map = NULL;
1909
1910         mutex_lock(&all_q_mutex);
1911         list_del_init(&q->all_q_node);
1912         mutex_unlock(&all_q_mutex);
1913 }
1914
1915 /* Basically redo blk_mq_init_queue with queue frozen */
1916 static void blk_mq_queue_reinit(struct request_queue *q)
1917 {
1918         blk_mq_freeze_queue(q);
1919
1920         blk_mq_sysfs_unregister(q);
1921
1922         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1923
1924         /*
1925          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1926          * we should change hctx numa_node according to new topology (this
1927          * involves free and re-allocate memory, worthy doing?)
1928          */
1929
1930         blk_mq_map_swqueue(q);
1931
1932         blk_mq_sysfs_register(q);
1933
1934         blk_mq_unfreeze_queue(q);
1935 }
1936
1937 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1938                                       unsigned long action, void *hcpu)
1939 {
1940         struct request_queue *q;
1941
1942         /*
1943          * Before new mappings are established, hotadded cpu might already
1944          * start handling requests. This doesn't break anything as we map
1945          * offline CPUs to first hardware queue. We will re-init the queue
1946          * below to get optimal settings.
1947          */
1948         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1949             action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1950                 return NOTIFY_OK;
1951
1952         mutex_lock(&all_q_mutex);
1953         list_for_each_entry(q, &all_q_list, all_q_node)
1954                 blk_mq_queue_reinit(q);
1955         mutex_unlock(&all_q_mutex);
1956         return NOTIFY_OK;
1957 }
1958
1959 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1960 {
1961         int i;
1962
1963         for (i = 0; i < set->nr_hw_queues; i++) {
1964                 set->tags[i] = blk_mq_init_rq_map(set, i);
1965                 if (!set->tags[i])
1966                         goto out_unwind;
1967         }
1968
1969         return 0;
1970
1971 out_unwind:
1972         while (--i >= 0)
1973                 blk_mq_free_rq_map(set, set->tags[i], i);
1974
1975         return -ENOMEM;
1976 }
1977
1978 /*
1979  * Allocate the request maps associated with this tag_set. Note that this
1980  * may reduce the depth asked for, if memory is tight. set->queue_depth
1981  * will be updated to reflect the allocated depth.
1982  */
1983 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1984 {
1985         unsigned int depth;
1986         int err;
1987
1988         depth = set->queue_depth;
1989         do {
1990                 err = __blk_mq_alloc_rq_maps(set);
1991                 if (!err)
1992                         break;
1993
1994                 set->queue_depth >>= 1;
1995                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
1996                         err = -ENOMEM;
1997                         break;
1998                 }
1999         } while (set->queue_depth);
2000
2001         if (!set->queue_depth || err) {
2002                 pr_err("blk-mq: failed to allocate request map\n");
2003                 return -ENOMEM;
2004         }
2005
2006         if (depth != set->queue_depth)
2007                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2008                                                 depth, set->queue_depth);
2009
2010         return 0;
2011 }
2012
2013 /*
2014  * Alloc a tag set to be associated with one or more request queues.
2015  * May fail with EINVAL for various error conditions. May adjust the
2016  * requested depth down, if if it too large. In that case, the set
2017  * value will be stored in set->queue_depth.
2018  */
2019 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2020 {
2021         if (!set->nr_hw_queues)
2022                 return -EINVAL;
2023         if (!set->queue_depth)
2024                 return -EINVAL;
2025         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2026                 return -EINVAL;
2027
2028         if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
2029                 return -EINVAL;
2030
2031         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2032                 pr_info("blk-mq: reduced tag depth to %u\n",
2033                         BLK_MQ_MAX_DEPTH);
2034                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2035         }
2036
2037         set->tags = kmalloc_node(set->nr_hw_queues *
2038                                  sizeof(struct blk_mq_tags *),
2039                                  GFP_KERNEL, set->numa_node);
2040         if (!set->tags)
2041                 return -ENOMEM;
2042
2043         if (blk_mq_alloc_rq_maps(set))
2044                 goto enomem;
2045
2046         mutex_init(&set->tag_list_lock);
2047         INIT_LIST_HEAD(&set->tag_list);
2048
2049         return 0;
2050 enomem:
2051         kfree(set->tags);
2052         set->tags = NULL;
2053         return -ENOMEM;
2054 }
2055 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2056
2057 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2058 {
2059         int i;
2060
2061         for (i = 0; i < set->nr_hw_queues; i++) {
2062                 if (set->tags[i])
2063                         blk_mq_free_rq_map(set, set->tags[i], i);
2064         }
2065
2066         kfree(set->tags);
2067         set->tags = NULL;
2068 }
2069 EXPORT_SYMBOL(blk_mq_free_tag_set);
2070
2071 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2072 {
2073         struct blk_mq_tag_set *set = q->tag_set;
2074         struct blk_mq_hw_ctx *hctx;
2075         int i, ret;
2076
2077         if (!set || nr > set->queue_depth)
2078                 return -EINVAL;
2079
2080         ret = 0;
2081         queue_for_each_hw_ctx(q, hctx, i) {
2082                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2083                 if (ret)
2084                         break;
2085         }
2086
2087         if (!ret)
2088                 q->nr_requests = nr;
2089
2090         return ret;
2091 }
2092
2093 void blk_mq_disable_hotplug(void)
2094 {
2095         mutex_lock(&all_q_mutex);
2096 }
2097
2098 void blk_mq_enable_hotplug(void)
2099 {
2100         mutex_unlock(&all_q_mutex);
2101 }
2102
2103 static int __init blk_mq_init(void)
2104 {
2105         blk_mq_cpu_init();
2106
2107         hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2108
2109         return 0;
2110 }
2111 subsys_initcall(blk_mq_init);