d38371160019ca17468478d30a5c8f1792f5e109
[cascardo/linux.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/delay.h>
24 #include <linux/crash_dump.h>
25
26 #include <trace/events/block.h>
27
28 #include <linux/blk-mq.h>
29 #include "blk.h"
30 #include "blk-mq.h"
31 #include "blk-mq-tag.h"
32
33 static DEFINE_MUTEX(all_q_mutex);
34 static LIST_HEAD(all_q_list);
35
36 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
37
38 /*
39  * Check if any of the ctx's have pending work in this hardware queue
40  */
41 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
42 {
43         unsigned int i;
44
45         for (i = 0; i < hctx->ctx_map.size; i++)
46                 if (hctx->ctx_map.map[i].word)
47                         return true;
48
49         return false;
50 }
51
52 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
53                                               struct blk_mq_ctx *ctx)
54 {
55         return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
56 }
57
58 #define CTX_TO_BIT(hctx, ctx)   \
59         ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
60
61 /*
62  * Mark this ctx as having pending work in this hardware queue
63  */
64 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
65                                      struct blk_mq_ctx *ctx)
66 {
67         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
68
69         if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
70                 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
71 }
72
73 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
74                                       struct blk_mq_ctx *ctx)
75 {
76         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
77
78         clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
79 }
80
81 static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
82 {
83         while (true) {
84                 int ret;
85
86                 if (percpu_ref_tryget_live(&q->mq_usage_counter))
87                         return 0;
88
89                 if (!(gfp & __GFP_WAIT))
90                         return -EBUSY;
91
92                 ret = wait_event_interruptible(q->mq_freeze_wq,
93                                 !atomic_read(&q->mq_freeze_depth) ||
94                                 blk_queue_dying(q));
95                 if (blk_queue_dying(q))
96                         return -ENODEV;
97                 if (ret)
98                         return ret;
99         }
100 }
101
102 static void blk_mq_queue_exit(struct request_queue *q)
103 {
104         percpu_ref_put(&q->mq_usage_counter);
105 }
106
107 static void blk_mq_usage_counter_release(struct percpu_ref *ref)
108 {
109         struct request_queue *q =
110                 container_of(ref, struct request_queue, mq_usage_counter);
111
112         wake_up_all(&q->mq_freeze_wq);
113 }
114
115 void blk_mq_freeze_queue_start(struct request_queue *q)
116 {
117         int freeze_depth;
118
119         freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
120         if (freeze_depth == 1) {
121                 percpu_ref_kill(&q->mq_usage_counter);
122                 blk_mq_run_hw_queues(q, false);
123         }
124 }
125 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
126
127 static void blk_mq_freeze_queue_wait(struct request_queue *q)
128 {
129         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
130 }
131
132 /*
133  * Guarantee no request is in use, so we can change any data structure of
134  * the queue afterward.
135  */
136 void blk_mq_freeze_queue(struct request_queue *q)
137 {
138         blk_mq_freeze_queue_start(q);
139         blk_mq_freeze_queue_wait(q);
140 }
141 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
142
143 void blk_mq_unfreeze_queue(struct request_queue *q)
144 {
145         int freeze_depth;
146
147         freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
148         WARN_ON_ONCE(freeze_depth < 0);
149         if (!freeze_depth) {
150                 percpu_ref_reinit(&q->mq_usage_counter);
151                 wake_up_all(&q->mq_freeze_wq);
152         }
153 }
154 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
155
156 void blk_mq_wake_waiters(struct request_queue *q)
157 {
158         struct blk_mq_hw_ctx *hctx;
159         unsigned int i;
160
161         queue_for_each_hw_ctx(q, hctx, i)
162                 if (blk_mq_hw_queue_mapped(hctx))
163                         blk_mq_tag_wakeup_all(hctx->tags, true);
164
165         /*
166          * If we are called because the queue has now been marked as
167          * dying, we need to ensure that processes currently waiting on
168          * the queue are notified as well.
169          */
170         wake_up_all(&q->mq_freeze_wq);
171 }
172
173 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
174 {
175         return blk_mq_has_free_tags(hctx->tags);
176 }
177 EXPORT_SYMBOL(blk_mq_can_queue);
178
179 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
180                                struct request *rq, unsigned int rw_flags)
181 {
182         if (blk_queue_io_stat(q))
183                 rw_flags |= REQ_IO_STAT;
184
185         INIT_LIST_HEAD(&rq->queuelist);
186         /* csd/requeue_work/fifo_time is initialized before use */
187         rq->q = q;
188         rq->mq_ctx = ctx;
189         rq->cmd_flags |= rw_flags;
190         /* do not touch atomic flags, it needs atomic ops against the timer */
191         rq->cpu = -1;
192         INIT_HLIST_NODE(&rq->hash);
193         RB_CLEAR_NODE(&rq->rb_node);
194         rq->rq_disk = NULL;
195         rq->part = NULL;
196         rq->start_time = jiffies;
197 #ifdef CONFIG_BLK_CGROUP
198         rq->rl = NULL;
199         set_start_time_ns(rq);
200         rq->io_start_time_ns = 0;
201 #endif
202         rq->nr_phys_segments = 0;
203 #if defined(CONFIG_BLK_DEV_INTEGRITY)
204         rq->nr_integrity_segments = 0;
205 #endif
206         rq->special = NULL;
207         /* tag was already set */
208         rq->errors = 0;
209
210         rq->cmd = rq->__cmd;
211
212         rq->extra_len = 0;
213         rq->sense_len = 0;
214         rq->resid_len = 0;
215         rq->sense = NULL;
216
217         INIT_LIST_HEAD(&rq->timeout_list);
218         rq->timeout = 0;
219
220         rq->end_io = NULL;
221         rq->end_io_data = NULL;
222         rq->next_rq = NULL;
223
224         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
225 }
226
227 static struct request *
228 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
229 {
230         struct request *rq;
231         unsigned int tag;
232
233         tag = blk_mq_get_tag(data);
234         if (tag != BLK_MQ_TAG_FAIL) {
235                 rq = data->hctx->tags->rqs[tag];
236
237                 if (blk_mq_tag_busy(data->hctx)) {
238                         rq->cmd_flags = REQ_MQ_INFLIGHT;
239                         atomic_inc(&data->hctx->nr_active);
240                 }
241
242                 rq->tag = tag;
243                 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
244                 return rq;
245         }
246
247         return NULL;
248 }
249
250 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
251                 bool reserved)
252 {
253         struct blk_mq_ctx *ctx;
254         struct blk_mq_hw_ctx *hctx;
255         struct request *rq;
256         struct blk_mq_alloc_data alloc_data;
257         int ret;
258
259         ret = blk_mq_queue_enter(q, gfp);
260         if (ret)
261                 return ERR_PTR(ret);
262
263         ctx = blk_mq_get_ctx(q);
264         hctx = q->mq_ops->map_queue(q, ctx->cpu);
265         blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
266                         reserved, ctx, hctx);
267
268         rq = __blk_mq_alloc_request(&alloc_data, rw);
269         if (!rq && (gfp & __GFP_WAIT)) {
270                 __blk_mq_run_hw_queue(hctx);
271                 blk_mq_put_ctx(ctx);
272
273                 ctx = blk_mq_get_ctx(q);
274                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
275                 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
276                                 hctx);
277                 rq =  __blk_mq_alloc_request(&alloc_data, rw);
278                 ctx = alloc_data.ctx;
279         }
280         blk_mq_put_ctx(ctx);
281         if (!rq) {
282                 blk_mq_queue_exit(q);
283                 return ERR_PTR(-EWOULDBLOCK);
284         }
285         return rq;
286 }
287 EXPORT_SYMBOL(blk_mq_alloc_request);
288
289 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
290                                   struct blk_mq_ctx *ctx, struct request *rq)
291 {
292         const int tag = rq->tag;
293         struct request_queue *q = rq->q;
294
295         if (rq->cmd_flags & REQ_MQ_INFLIGHT)
296                 atomic_dec(&hctx->nr_active);
297         rq->cmd_flags = 0;
298
299         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
300         blk_mq_put_tag(hctx, tag, &ctx->last_tag);
301         blk_mq_queue_exit(q);
302 }
303
304 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
305 {
306         struct blk_mq_ctx *ctx = rq->mq_ctx;
307
308         ctx->rq_completed[rq_is_sync(rq)]++;
309         __blk_mq_free_request(hctx, ctx, rq);
310
311 }
312 EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
313
314 void blk_mq_free_request(struct request *rq)
315 {
316         struct blk_mq_hw_ctx *hctx;
317         struct request_queue *q = rq->q;
318
319         hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
320         blk_mq_free_hctx_request(hctx, rq);
321 }
322 EXPORT_SYMBOL_GPL(blk_mq_free_request);
323
324 inline void __blk_mq_end_request(struct request *rq, int error)
325 {
326         blk_account_io_done(rq);
327
328         if (rq->end_io) {
329                 rq->end_io(rq, error);
330         } else {
331                 if (unlikely(blk_bidi_rq(rq)))
332                         blk_mq_free_request(rq->next_rq);
333                 blk_mq_free_request(rq);
334         }
335 }
336 EXPORT_SYMBOL(__blk_mq_end_request);
337
338 void blk_mq_end_request(struct request *rq, int error)
339 {
340         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
341                 BUG();
342         __blk_mq_end_request(rq, error);
343 }
344 EXPORT_SYMBOL(blk_mq_end_request);
345
346 static void __blk_mq_complete_request_remote(void *data)
347 {
348         struct request *rq = data;
349
350         rq->q->softirq_done_fn(rq);
351 }
352
353 static void blk_mq_ipi_complete_request(struct request *rq)
354 {
355         struct blk_mq_ctx *ctx = rq->mq_ctx;
356         bool shared = false;
357         int cpu;
358
359         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
360                 rq->q->softirq_done_fn(rq);
361                 return;
362         }
363
364         cpu = get_cpu();
365         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
366                 shared = cpus_share_cache(cpu, ctx->cpu);
367
368         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
369                 rq->csd.func = __blk_mq_complete_request_remote;
370                 rq->csd.info = rq;
371                 rq->csd.flags = 0;
372                 smp_call_function_single_async(ctx->cpu, &rq->csd);
373         } else {
374                 rq->q->softirq_done_fn(rq);
375         }
376         put_cpu();
377 }
378
379 void __blk_mq_complete_request(struct request *rq)
380 {
381         struct request_queue *q = rq->q;
382
383         if (!q->softirq_done_fn)
384                 blk_mq_end_request(rq, rq->errors);
385         else
386                 blk_mq_ipi_complete_request(rq);
387 }
388
389 /**
390  * blk_mq_complete_request - end I/O on a request
391  * @rq:         the request being processed
392  *
393  * Description:
394  *      Ends all I/O on a request. It does not handle partial completions.
395  *      The actual completion happens out-of-order, through a IPI handler.
396  **/
397 void blk_mq_complete_request(struct request *rq, int error)
398 {
399         struct request_queue *q = rq->q;
400
401         if (unlikely(blk_should_fake_timeout(q)))
402                 return;
403         if (!blk_mark_rq_complete(rq)) {
404                 rq->errors = error;
405                 __blk_mq_complete_request(rq);
406         }
407 }
408 EXPORT_SYMBOL(blk_mq_complete_request);
409
410 int blk_mq_request_started(struct request *rq)
411 {
412         return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
413 }
414 EXPORT_SYMBOL_GPL(blk_mq_request_started);
415
416 void blk_mq_start_request(struct request *rq)
417 {
418         struct request_queue *q = rq->q;
419
420         trace_block_rq_issue(q, rq);
421
422         rq->resid_len = blk_rq_bytes(rq);
423         if (unlikely(blk_bidi_rq(rq)))
424                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
425
426         blk_add_timer(rq);
427
428         /*
429          * Ensure that ->deadline is visible before set the started
430          * flag and clear the completed flag.
431          */
432         smp_mb__before_atomic();
433
434         /*
435          * Mark us as started and clear complete. Complete might have been
436          * set if requeue raced with timeout, which then marked it as
437          * complete. So be sure to clear complete again when we start
438          * the request, otherwise we'll ignore the completion event.
439          */
440         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
441                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
442         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
443                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
444
445         if (q->dma_drain_size && blk_rq_bytes(rq)) {
446                 /*
447                  * Make sure space for the drain appears.  We know we can do
448                  * this because max_hw_segments has been adjusted to be one
449                  * fewer than the device can handle.
450                  */
451                 rq->nr_phys_segments++;
452         }
453 }
454 EXPORT_SYMBOL(blk_mq_start_request);
455
456 static void __blk_mq_requeue_request(struct request *rq)
457 {
458         struct request_queue *q = rq->q;
459
460         trace_block_rq_requeue(q, rq);
461
462         if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
463                 if (q->dma_drain_size && blk_rq_bytes(rq))
464                         rq->nr_phys_segments--;
465         }
466 }
467
468 void blk_mq_requeue_request(struct request *rq)
469 {
470         __blk_mq_requeue_request(rq);
471
472         BUG_ON(blk_queued_rq(rq));
473         blk_mq_add_to_requeue_list(rq, true);
474 }
475 EXPORT_SYMBOL(blk_mq_requeue_request);
476
477 static void blk_mq_requeue_work(struct work_struct *work)
478 {
479         struct request_queue *q =
480                 container_of(work, struct request_queue, requeue_work);
481         LIST_HEAD(rq_list);
482         struct request *rq, *next;
483         unsigned long flags;
484
485         spin_lock_irqsave(&q->requeue_lock, flags);
486         list_splice_init(&q->requeue_list, &rq_list);
487         spin_unlock_irqrestore(&q->requeue_lock, flags);
488
489         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
490                 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
491                         continue;
492
493                 rq->cmd_flags &= ~REQ_SOFTBARRIER;
494                 list_del_init(&rq->queuelist);
495                 blk_mq_insert_request(rq, true, false, false);
496         }
497
498         while (!list_empty(&rq_list)) {
499                 rq = list_entry(rq_list.next, struct request, queuelist);
500                 list_del_init(&rq->queuelist);
501                 blk_mq_insert_request(rq, false, false, false);
502         }
503
504         /*
505          * Use the start variant of queue running here, so that running
506          * the requeue work will kick stopped queues.
507          */
508         blk_mq_start_hw_queues(q);
509 }
510
511 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
512 {
513         struct request_queue *q = rq->q;
514         unsigned long flags;
515
516         /*
517          * We abuse this flag that is otherwise used by the I/O scheduler to
518          * request head insertation from the workqueue.
519          */
520         BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
521
522         spin_lock_irqsave(&q->requeue_lock, flags);
523         if (at_head) {
524                 rq->cmd_flags |= REQ_SOFTBARRIER;
525                 list_add(&rq->queuelist, &q->requeue_list);
526         } else {
527                 list_add_tail(&rq->queuelist, &q->requeue_list);
528         }
529         spin_unlock_irqrestore(&q->requeue_lock, flags);
530 }
531 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
532
533 void blk_mq_cancel_requeue_work(struct request_queue *q)
534 {
535         cancel_work_sync(&q->requeue_work);
536 }
537 EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
538
539 void blk_mq_kick_requeue_list(struct request_queue *q)
540 {
541         kblockd_schedule_work(&q->requeue_work);
542 }
543 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
544
545 void blk_mq_abort_requeue_list(struct request_queue *q)
546 {
547         unsigned long flags;
548         LIST_HEAD(rq_list);
549
550         spin_lock_irqsave(&q->requeue_lock, flags);
551         list_splice_init(&q->requeue_list, &rq_list);
552         spin_unlock_irqrestore(&q->requeue_lock, flags);
553
554         while (!list_empty(&rq_list)) {
555                 struct request *rq;
556
557                 rq = list_first_entry(&rq_list, struct request, queuelist);
558                 list_del_init(&rq->queuelist);
559                 rq->errors = -EIO;
560                 blk_mq_end_request(rq, rq->errors);
561         }
562 }
563 EXPORT_SYMBOL(blk_mq_abort_requeue_list);
564
565 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
566 {
567         return tags->rqs[tag];
568 }
569 EXPORT_SYMBOL(blk_mq_tag_to_rq);
570
571 struct blk_mq_timeout_data {
572         unsigned long next;
573         unsigned int next_set;
574 };
575
576 void blk_mq_rq_timed_out(struct request *req, bool reserved)
577 {
578         struct blk_mq_ops *ops = req->q->mq_ops;
579         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
580
581         /*
582          * We know that complete is set at this point. If STARTED isn't set
583          * anymore, then the request isn't active and the "timeout" should
584          * just be ignored. This can happen due to the bitflag ordering.
585          * Timeout first checks if STARTED is set, and if it is, assumes
586          * the request is active. But if we race with completion, then
587          * we both flags will get cleared. So check here again, and ignore
588          * a timeout event with a request that isn't active.
589          */
590         if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
591                 return;
592
593         if (ops->timeout)
594                 ret = ops->timeout(req, reserved);
595
596         switch (ret) {
597         case BLK_EH_HANDLED:
598                 __blk_mq_complete_request(req);
599                 break;
600         case BLK_EH_RESET_TIMER:
601                 blk_add_timer(req);
602                 blk_clear_rq_complete(req);
603                 break;
604         case BLK_EH_NOT_HANDLED:
605                 break;
606         default:
607                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
608                 break;
609         }
610 }
611
612 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
613                 struct request *rq, void *priv, bool reserved)
614 {
615         struct blk_mq_timeout_data *data = priv;
616
617         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
618                 /*
619                  * If a request wasn't started before the queue was
620                  * marked dying, kill it here or it'll go unnoticed.
621                  */
622                 if (unlikely(blk_queue_dying(rq->q)))
623                         blk_mq_complete_request(rq, -EIO);
624                 return;
625         }
626         if (rq->cmd_flags & REQ_NO_TIMEOUT)
627                 return;
628
629         if (time_after_eq(jiffies, rq->deadline)) {
630                 if (!blk_mark_rq_complete(rq))
631                         blk_mq_rq_timed_out(rq, reserved);
632         } else if (!data->next_set || time_after(data->next, rq->deadline)) {
633                 data->next = rq->deadline;
634                 data->next_set = 1;
635         }
636 }
637
638 static void blk_mq_rq_timer(unsigned long priv)
639 {
640         struct request_queue *q = (struct request_queue *)priv;
641         struct blk_mq_timeout_data data = {
642                 .next           = 0,
643                 .next_set       = 0,
644         };
645         int i;
646
647         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
648
649         if (data.next_set) {
650                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
651                 mod_timer(&q->timeout, data.next);
652         } else {
653                 struct blk_mq_hw_ctx *hctx;
654
655                 queue_for_each_hw_ctx(q, hctx, i) {
656                         /* the hctx may be unmapped, so check it here */
657                         if (blk_mq_hw_queue_mapped(hctx))
658                                 blk_mq_tag_idle(hctx);
659                 }
660         }
661 }
662
663 /*
664  * Reverse check our software queue for entries that we could potentially
665  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
666  * too much time checking for merges.
667  */
668 static bool blk_mq_attempt_merge(struct request_queue *q,
669                                  struct blk_mq_ctx *ctx, struct bio *bio)
670 {
671         struct request *rq;
672         int checked = 8;
673
674         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
675                 int el_ret;
676
677                 if (!checked--)
678                         break;
679
680                 if (!blk_rq_merge_ok(rq, bio))
681                         continue;
682
683                 el_ret = blk_try_merge(rq, bio);
684                 if (el_ret == ELEVATOR_BACK_MERGE) {
685                         if (bio_attempt_back_merge(q, rq, bio)) {
686                                 ctx->rq_merged++;
687                                 return true;
688                         }
689                         break;
690                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
691                         if (bio_attempt_front_merge(q, rq, bio)) {
692                                 ctx->rq_merged++;
693                                 return true;
694                         }
695                         break;
696                 }
697         }
698
699         return false;
700 }
701
702 /*
703  * Process software queues that have been marked busy, splicing them
704  * to the for-dispatch
705  */
706 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
707 {
708         struct blk_mq_ctx *ctx;
709         int i;
710
711         for (i = 0; i < hctx->ctx_map.size; i++) {
712                 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
713                 unsigned int off, bit;
714
715                 if (!bm->word)
716                         continue;
717
718                 bit = 0;
719                 off = i * hctx->ctx_map.bits_per_word;
720                 do {
721                         bit = find_next_bit(&bm->word, bm->depth, bit);
722                         if (bit >= bm->depth)
723                                 break;
724
725                         ctx = hctx->ctxs[bit + off];
726                         clear_bit(bit, &bm->word);
727                         spin_lock(&ctx->lock);
728                         list_splice_tail_init(&ctx->rq_list, list);
729                         spin_unlock(&ctx->lock);
730
731                         bit++;
732                 } while (1);
733         }
734 }
735
736 /*
737  * Run this hardware queue, pulling any software queues mapped to it in.
738  * Note that this function currently has various problems around ordering
739  * of IO. In particular, we'd like FIFO behaviour on handling existing
740  * items on the hctx->dispatch list. Ignore that for now.
741  */
742 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
743 {
744         struct request_queue *q = hctx->queue;
745         struct request *rq;
746         LIST_HEAD(rq_list);
747         LIST_HEAD(driver_list);
748         struct list_head *dptr;
749         int queued;
750
751         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
752
753         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
754                 return;
755
756         hctx->run++;
757
758         /*
759          * Touch any software queue that has pending entries.
760          */
761         flush_busy_ctxs(hctx, &rq_list);
762
763         /*
764          * If we have previous entries on our dispatch list, grab them
765          * and stuff them at the front for more fair dispatch.
766          */
767         if (!list_empty_careful(&hctx->dispatch)) {
768                 spin_lock(&hctx->lock);
769                 if (!list_empty(&hctx->dispatch))
770                         list_splice_init(&hctx->dispatch, &rq_list);
771                 spin_unlock(&hctx->lock);
772         }
773
774         /*
775          * Start off with dptr being NULL, so we start the first request
776          * immediately, even if we have more pending.
777          */
778         dptr = NULL;
779
780         /*
781          * Now process all the entries, sending them to the driver.
782          */
783         queued = 0;
784         while (!list_empty(&rq_list)) {
785                 struct blk_mq_queue_data bd;
786                 int ret;
787
788                 rq = list_first_entry(&rq_list, struct request, queuelist);
789                 list_del_init(&rq->queuelist);
790
791                 bd.rq = rq;
792                 bd.list = dptr;
793                 bd.last = list_empty(&rq_list);
794
795                 ret = q->mq_ops->queue_rq(hctx, &bd);
796                 switch (ret) {
797                 case BLK_MQ_RQ_QUEUE_OK:
798                         queued++;
799                         continue;
800                 case BLK_MQ_RQ_QUEUE_BUSY:
801                         list_add(&rq->queuelist, &rq_list);
802                         __blk_mq_requeue_request(rq);
803                         break;
804                 default:
805                         pr_err("blk-mq: bad return on queue: %d\n", ret);
806                 case BLK_MQ_RQ_QUEUE_ERROR:
807                         rq->errors = -EIO;
808                         blk_mq_end_request(rq, rq->errors);
809                         break;
810                 }
811
812                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
813                         break;
814
815                 /*
816                  * We've done the first request. If we have more than 1
817                  * left in the list, set dptr to defer issue.
818                  */
819                 if (!dptr && rq_list.next != rq_list.prev)
820                         dptr = &driver_list;
821         }
822
823         if (!queued)
824                 hctx->dispatched[0]++;
825         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
826                 hctx->dispatched[ilog2(queued) + 1]++;
827
828         /*
829          * Any items that need requeuing? Stuff them into hctx->dispatch,
830          * that is where we will continue on next queue run.
831          */
832         if (!list_empty(&rq_list)) {
833                 spin_lock(&hctx->lock);
834                 list_splice(&rq_list, &hctx->dispatch);
835                 spin_unlock(&hctx->lock);
836                 /*
837                  * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
838                  * it's possible the queue is stopped and restarted again
839                  * before this. Queue restart will dispatch requests. And since
840                  * requests in rq_list aren't added into hctx->dispatch yet,
841                  * the requests in rq_list might get lost.
842                  *
843                  * blk_mq_run_hw_queue() already checks the STOPPED bit
844                  **/
845                 blk_mq_run_hw_queue(hctx, true);
846         }
847 }
848
849 /*
850  * It'd be great if the workqueue API had a way to pass
851  * in a mask and had some smarts for more clever placement.
852  * For now we just round-robin here, switching for every
853  * BLK_MQ_CPU_WORK_BATCH queued items.
854  */
855 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
856 {
857         if (hctx->queue->nr_hw_queues == 1)
858                 return WORK_CPU_UNBOUND;
859
860         if (--hctx->next_cpu_batch <= 0) {
861                 int cpu = hctx->next_cpu, next_cpu;
862
863                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
864                 if (next_cpu >= nr_cpu_ids)
865                         next_cpu = cpumask_first(hctx->cpumask);
866
867                 hctx->next_cpu = next_cpu;
868                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
869
870                 return cpu;
871         }
872
873         return hctx->next_cpu;
874 }
875
876 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
877 {
878         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
879             !blk_mq_hw_queue_mapped(hctx)))
880                 return;
881
882         if (!async) {
883                 int cpu = get_cpu();
884                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
885                         __blk_mq_run_hw_queue(hctx);
886                         put_cpu();
887                         return;
888                 }
889
890                 put_cpu();
891         }
892
893         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
894                         &hctx->run_work, 0);
895 }
896
897 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
898 {
899         struct blk_mq_hw_ctx *hctx;
900         int i;
901
902         queue_for_each_hw_ctx(q, hctx, i) {
903                 if ((!blk_mq_hctx_has_pending(hctx) &&
904                     list_empty_careful(&hctx->dispatch)) ||
905                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
906                         continue;
907
908                 blk_mq_run_hw_queue(hctx, async);
909         }
910 }
911 EXPORT_SYMBOL(blk_mq_run_hw_queues);
912
913 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
914 {
915         cancel_delayed_work(&hctx->run_work);
916         cancel_delayed_work(&hctx->delay_work);
917         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
918 }
919 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
920
921 void blk_mq_stop_hw_queues(struct request_queue *q)
922 {
923         struct blk_mq_hw_ctx *hctx;
924         int i;
925
926         queue_for_each_hw_ctx(q, hctx, i)
927                 blk_mq_stop_hw_queue(hctx);
928 }
929 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
930
931 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
932 {
933         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
934
935         blk_mq_run_hw_queue(hctx, false);
936 }
937 EXPORT_SYMBOL(blk_mq_start_hw_queue);
938
939 void blk_mq_start_hw_queues(struct request_queue *q)
940 {
941         struct blk_mq_hw_ctx *hctx;
942         int i;
943
944         queue_for_each_hw_ctx(q, hctx, i)
945                 blk_mq_start_hw_queue(hctx);
946 }
947 EXPORT_SYMBOL(blk_mq_start_hw_queues);
948
949 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
950 {
951         struct blk_mq_hw_ctx *hctx;
952         int i;
953
954         queue_for_each_hw_ctx(q, hctx, i) {
955                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
956                         continue;
957
958                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
959                 blk_mq_run_hw_queue(hctx, async);
960         }
961 }
962 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
963
964 static void blk_mq_run_work_fn(struct work_struct *work)
965 {
966         struct blk_mq_hw_ctx *hctx;
967
968         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
969
970         __blk_mq_run_hw_queue(hctx);
971 }
972
973 static void blk_mq_delay_work_fn(struct work_struct *work)
974 {
975         struct blk_mq_hw_ctx *hctx;
976
977         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
978
979         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
980                 __blk_mq_run_hw_queue(hctx);
981 }
982
983 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
984 {
985         if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
986                 return;
987
988         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
989                         &hctx->delay_work, msecs_to_jiffies(msecs));
990 }
991 EXPORT_SYMBOL(blk_mq_delay_queue);
992
993 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
994                                     struct request *rq, bool at_head)
995 {
996         struct blk_mq_ctx *ctx = rq->mq_ctx;
997
998         trace_block_rq_insert(hctx->queue, rq);
999
1000         if (at_head)
1001                 list_add(&rq->queuelist, &ctx->rq_list);
1002         else
1003                 list_add_tail(&rq->queuelist, &ctx->rq_list);
1004
1005         blk_mq_hctx_mark_pending(hctx, ctx);
1006 }
1007
1008 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1009                 bool async)
1010 {
1011         struct request_queue *q = rq->q;
1012         struct blk_mq_hw_ctx *hctx;
1013         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
1014
1015         current_ctx = blk_mq_get_ctx(q);
1016         if (!cpu_online(ctx->cpu))
1017                 rq->mq_ctx = ctx = current_ctx;
1018
1019         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1020
1021         spin_lock(&ctx->lock);
1022         __blk_mq_insert_request(hctx, rq, at_head);
1023         spin_unlock(&ctx->lock);
1024
1025         if (run_queue)
1026                 blk_mq_run_hw_queue(hctx, async);
1027
1028         blk_mq_put_ctx(current_ctx);
1029 }
1030
1031 static void blk_mq_insert_requests(struct request_queue *q,
1032                                      struct blk_mq_ctx *ctx,
1033                                      struct list_head *list,
1034                                      int depth,
1035                                      bool from_schedule)
1036
1037 {
1038         struct blk_mq_hw_ctx *hctx;
1039         struct blk_mq_ctx *current_ctx;
1040
1041         trace_block_unplug(q, depth, !from_schedule);
1042
1043         current_ctx = blk_mq_get_ctx(q);
1044
1045         if (!cpu_online(ctx->cpu))
1046                 ctx = current_ctx;
1047         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1048
1049         /*
1050          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1051          * offline now
1052          */
1053         spin_lock(&ctx->lock);
1054         while (!list_empty(list)) {
1055                 struct request *rq;
1056
1057                 rq = list_first_entry(list, struct request, queuelist);
1058                 list_del_init(&rq->queuelist);
1059                 rq->mq_ctx = ctx;
1060                 __blk_mq_insert_request(hctx, rq, false);
1061         }
1062         spin_unlock(&ctx->lock);
1063
1064         blk_mq_run_hw_queue(hctx, from_schedule);
1065         blk_mq_put_ctx(current_ctx);
1066 }
1067
1068 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1069 {
1070         struct request *rqa = container_of(a, struct request, queuelist);
1071         struct request *rqb = container_of(b, struct request, queuelist);
1072
1073         return !(rqa->mq_ctx < rqb->mq_ctx ||
1074                  (rqa->mq_ctx == rqb->mq_ctx &&
1075                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1076 }
1077
1078 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1079 {
1080         struct blk_mq_ctx *this_ctx;
1081         struct request_queue *this_q;
1082         struct request *rq;
1083         LIST_HEAD(list);
1084         LIST_HEAD(ctx_list);
1085         unsigned int depth;
1086
1087         list_splice_init(&plug->mq_list, &list);
1088
1089         list_sort(NULL, &list, plug_ctx_cmp);
1090
1091         this_q = NULL;
1092         this_ctx = NULL;
1093         depth = 0;
1094
1095         while (!list_empty(&list)) {
1096                 rq = list_entry_rq(list.next);
1097                 list_del_init(&rq->queuelist);
1098                 BUG_ON(!rq->q);
1099                 if (rq->mq_ctx != this_ctx) {
1100                         if (this_ctx) {
1101                                 blk_mq_insert_requests(this_q, this_ctx,
1102                                                         &ctx_list, depth,
1103                                                         from_schedule);
1104                         }
1105
1106                         this_ctx = rq->mq_ctx;
1107                         this_q = rq->q;
1108                         depth = 0;
1109                 }
1110
1111                 depth++;
1112                 list_add_tail(&rq->queuelist, &ctx_list);
1113         }
1114
1115         /*
1116          * If 'this_ctx' is set, we know we have entries to complete
1117          * on 'ctx_list'. Do those.
1118          */
1119         if (this_ctx) {
1120                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1121                                        from_schedule);
1122         }
1123 }
1124
1125 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1126 {
1127         init_request_from_bio(rq, bio);
1128
1129         if (blk_do_io_stat(rq))
1130                 blk_account_io_start(rq, 1);
1131 }
1132
1133 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1134 {
1135         return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1136                 !blk_queue_nomerges(hctx->queue);
1137 }
1138
1139 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1140                                          struct blk_mq_ctx *ctx,
1141                                          struct request *rq, struct bio *bio)
1142 {
1143         if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
1144                 blk_mq_bio_to_request(rq, bio);
1145                 spin_lock(&ctx->lock);
1146 insert_rq:
1147                 __blk_mq_insert_request(hctx, rq, false);
1148                 spin_unlock(&ctx->lock);
1149                 return false;
1150         } else {
1151                 struct request_queue *q = hctx->queue;
1152
1153                 spin_lock(&ctx->lock);
1154                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1155                         blk_mq_bio_to_request(rq, bio);
1156                         goto insert_rq;
1157                 }
1158
1159                 spin_unlock(&ctx->lock);
1160                 __blk_mq_free_request(hctx, ctx, rq);
1161                 return true;
1162         }
1163 }
1164
1165 struct blk_map_ctx {
1166         struct blk_mq_hw_ctx *hctx;
1167         struct blk_mq_ctx *ctx;
1168 };
1169
1170 static struct request *blk_mq_map_request(struct request_queue *q,
1171                                           struct bio *bio,
1172                                           struct blk_map_ctx *data)
1173 {
1174         struct blk_mq_hw_ctx *hctx;
1175         struct blk_mq_ctx *ctx;
1176         struct request *rq;
1177         int rw = bio_data_dir(bio);
1178         struct blk_mq_alloc_data alloc_data;
1179
1180         if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
1181                 bio_io_error(bio);
1182                 return NULL;
1183         }
1184
1185         ctx = blk_mq_get_ctx(q);
1186         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1187
1188         if (rw_is_sync(bio->bi_rw))
1189                 rw |= REQ_SYNC;
1190
1191         trace_block_getrq(q, bio, rw);
1192         blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1193                         hctx);
1194         rq = __blk_mq_alloc_request(&alloc_data, rw);
1195         if (unlikely(!rq)) {
1196                 __blk_mq_run_hw_queue(hctx);
1197                 blk_mq_put_ctx(ctx);
1198                 trace_block_sleeprq(q, bio, rw);
1199
1200                 ctx = blk_mq_get_ctx(q);
1201                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1202                 blk_mq_set_alloc_data(&alloc_data, q,
1203                                 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1204                 rq = __blk_mq_alloc_request(&alloc_data, rw);
1205                 ctx = alloc_data.ctx;
1206                 hctx = alloc_data.hctx;
1207         }
1208
1209         hctx->queued++;
1210         data->hctx = hctx;
1211         data->ctx = ctx;
1212         return rq;
1213 }
1214
1215 static int blk_mq_direct_issue_request(struct request *rq)
1216 {
1217         int ret;
1218         struct request_queue *q = rq->q;
1219         struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q,
1220                         rq->mq_ctx->cpu);
1221         struct blk_mq_queue_data bd = {
1222                 .rq = rq,
1223                 .list = NULL,
1224                 .last = 1
1225         };
1226
1227         /*
1228          * For OK queue, we are done. For error, kill it. Any other
1229          * error (busy), just add it to our list as we previously
1230          * would have done
1231          */
1232         ret = q->mq_ops->queue_rq(hctx, &bd);
1233         if (ret == BLK_MQ_RQ_QUEUE_OK)
1234                 return 0;
1235         else {
1236                 __blk_mq_requeue_request(rq);
1237
1238                 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1239                         rq->errors = -EIO;
1240                         blk_mq_end_request(rq, rq->errors);
1241                         return 0;
1242                 }
1243                 return -1;
1244         }
1245 }
1246
1247 /*
1248  * Multiple hardware queue variant. This will not use per-process plugs,
1249  * but will attempt to bypass the hctx queueing if we can go straight to
1250  * hardware for SYNC IO.
1251  */
1252 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1253 {
1254         const int is_sync = rw_is_sync(bio->bi_rw);
1255         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1256         struct blk_map_ctx data;
1257         struct request *rq;
1258         unsigned int request_count = 0;
1259         struct blk_plug *plug;
1260         struct request *same_queue_rq = NULL;
1261
1262         blk_queue_bounce(q, &bio);
1263
1264         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1265                 bio_io_error(bio);
1266                 return;
1267         }
1268
1269         blk_queue_split(q, &bio, q->bio_split);
1270
1271         if (!is_flush_fua && !blk_queue_nomerges(q)) {
1272                 if (blk_attempt_plug_merge(q, bio, &request_count,
1273                                            &same_queue_rq))
1274                         return;
1275         } else
1276                 request_count = blk_plug_queued_count(q);
1277
1278         rq = blk_mq_map_request(q, bio, &data);
1279         if (unlikely(!rq))
1280                 return;
1281
1282         if (unlikely(is_flush_fua)) {
1283                 blk_mq_bio_to_request(rq, bio);
1284                 blk_insert_flush(rq);
1285                 goto run_queue;
1286         }
1287
1288         plug = current->plug;
1289         /*
1290          * If the driver supports defer issued based on 'last', then
1291          * queue it up like normal since we can potentially save some
1292          * CPU this way.
1293          */
1294         if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1295             !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1296                 struct request *old_rq = NULL;
1297
1298                 blk_mq_bio_to_request(rq, bio);
1299
1300                 /*
1301                  * we do limited pluging. If bio can be merged, do merge.
1302                  * Otherwise the existing request in the plug list will be
1303                  * issued. So the plug list will have one request at most
1304                  */
1305                 if (plug) {
1306                         /*
1307                          * The plug list might get flushed before this. If that
1308                          * happens, same_queue_rq is invalid and plug list is empty
1309                          **/
1310                         if (same_queue_rq && !list_empty(&plug->mq_list)) {
1311                                 old_rq = same_queue_rq;
1312                                 list_del_init(&old_rq->queuelist);
1313                         }
1314                         list_add_tail(&rq->queuelist, &plug->mq_list);
1315                 } else /* is_sync */
1316                         old_rq = rq;
1317                 blk_mq_put_ctx(data.ctx);
1318                 if (!old_rq)
1319                         return;
1320                 if (!blk_mq_direct_issue_request(old_rq))
1321                         return;
1322                 blk_mq_insert_request(old_rq, false, true, true);
1323                 return;
1324         }
1325
1326         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1327                 /*
1328                  * For a SYNC request, send it to the hardware immediately. For
1329                  * an ASYNC request, just ensure that we run it later on. The
1330                  * latter allows for merging opportunities and more efficient
1331                  * dispatching.
1332                  */
1333 run_queue:
1334                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1335         }
1336         blk_mq_put_ctx(data.ctx);
1337 }
1338
1339 /*
1340  * Single hardware queue variant. This will attempt to use any per-process
1341  * plug for merging and IO deferral.
1342  */
1343 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1344 {
1345         const int is_sync = rw_is_sync(bio->bi_rw);
1346         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1347         struct blk_plug *plug;
1348         unsigned int request_count = 0;
1349         struct blk_map_ctx data;
1350         struct request *rq;
1351
1352         blk_queue_bounce(q, &bio);
1353
1354         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1355                 bio_io_error(bio);
1356                 return;
1357         }
1358
1359         blk_queue_split(q, &bio, q->bio_split);
1360
1361         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1362             blk_attempt_plug_merge(q, bio, &request_count, NULL))
1363                 return;
1364
1365         rq = blk_mq_map_request(q, bio, &data);
1366         if (unlikely(!rq))
1367                 return;
1368
1369         if (unlikely(is_flush_fua)) {
1370                 blk_mq_bio_to_request(rq, bio);
1371                 blk_insert_flush(rq);
1372                 goto run_queue;
1373         }
1374
1375         /*
1376          * A task plug currently exists. Since this is completely lockless,
1377          * utilize that to temporarily store requests until the task is
1378          * either done or scheduled away.
1379          */
1380         plug = current->plug;
1381         if (plug) {
1382                 blk_mq_bio_to_request(rq, bio);
1383                 if (list_empty(&plug->mq_list))
1384                         trace_block_plug(q);
1385                 else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1386                         blk_flush_plug_list(plug, false);
1387                         trace_block_plug(q);
1388                 }
1389                 list_add_tail(&rq->queuelist, &plug->mq_list);
1390                 blk_mq_put_ctx(data.ctx);
1391                 return;
1392         }
1393
1394         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1395                 /*
1396                  * For a SYNC request, send it to the hardware immediately. For
1397                  * an ASYNC request, just ensure that we run it later on. The
1398                  * latter allows for merging opportunities and more efficient
1399                  * dispatching.
1400                  */
1401 run_queue:
1402                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1403         }
1404
1405         blk_mq_put_ctx(data.ctx);
1406 }
1407
1408 /*
1409  * Default mapping to a software queue, since we use one per CPU.
1410  */
1411 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1412 {
1413         return q->queue_hw_ctx[q->mq_map[cpu]];
1414 }
1415 EXPORT_SYMBOL(blk_mq_map_queue);
1416
1417 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1418                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1419 {
1420         struct page *page;
1421
1422         if (tags->rqs && set->ops->exit_request) {
1423                 int i;
1424
1425                 for (i = 0; i < tags->nr_tags; i++) {
1426                         if (!tags->rqs[i])
1427                                 continue;
1428                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1429                                                 hctx_idx, i);
1430                         tags->rqs[i] = NULL;
1431                 }
1432         }
1433
1434         while (!list_empty(&tags->page_list)) {
1435                 page = list_first_entry(&tags->page_list, struct page, lru);
1436                 list_del_init(&page->lru);
1437                 /*
1438                  * Remove kmemleak object previously allocated in
1439                  * blk_mq_init_rq_map().
1440                  */
1441                 kmemleak_free(page_address(page));
1442                 __free_pages(page, page->private);
1443         }
1444
1445         kfree(tags->rqs);
1446
1447         blk_mq_free_tags(tags);
1448 }
1449
1450 static size_t order_to_size(unsigned int order)
1451 {
1452         return (size_t)PAGE_SIZE << order;
1453 }
1454
1455 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1456                 unsigned int hctx_idx)
1457 {
1458         struct blk_mq_tags *tags;
1459         unsigned int i, j, entries_per_page, max_order = 4;
1460         size_t rq_size, left;
1461
1462         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1463                                 set->numa_node,
1464                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1465         if (!tags)
1466                 return NULL;
1467
1468         INIT_LIST_HEAD(&tags->page_list);
1469
1470         tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1471                                  GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1472                                  set->numa_node);
1473         if (!tags->rqs) {
1474                 blk_mq_free_tags(tags);
1475                 return NULL;
1476         }
1477
1478         /*
1479          * rq_size is the size of the request plus driver payload, rounded
1480          * to the cacheline size
1481          */
1482         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1483                                 cache_line_size());
1484         left = rq_size * set->queue_depth;
1485
1486         for (i = 0; i < set->queue_depth; ) {
1487                 int this_order = max_order;
1488                 struct page *page;
1489                 int to_do;
1490                 void *p;
1491
1492                 while (left < order_to_size(this_order - 1) && this_order)
1493                         this_order--;
1494
1495                 do {
1496                         page = alloc_pages_node(set->numa_node,
1497                                 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1498                                 this_order);
1499                         if (page)
1500                                 break;
1501                         if (!this_order--)
1502                                 break;
1503                         if (order_to_size(this_order) < rq_size)
1504                                 break;
1505                 } while (1);
1506
1507                 if (!page)
1508                         goto fail;
1509
1510                 page->private = this_order;
1511                 list_add_tail(&page->lru, &tags->page_list);
1512
1513                 p = page_address(page);
1514                 /*
1515                  * Allow kmemleak to scan these pages as they contain pointers
1516                  * to additional allocations like via ops->init_request().
1517                  */
1518                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
1519                 entries_per_page = order_to_size(this_order) / rq_size;
1520                 to_do = min(entries_per_page, set->queue_depth - i);
1521                 left -= to_do * rq_size;
1522                 for (j = 0; j < to_do; j++) {
1523                         tags->rqs[i] = p;
1524                         if (set->ops->init_request) {
1525                                 if (set->ops->init_request(set->driver_data,
1526                                                 tags->rqs[i], hctx_idx, i,
1527                                                 set->numa_node)) {
1528                                         tags->rqs[i] = NULL;
1529                                         goto fail;
1530                                 }
1531                         }
1532
1533                         p += rq_size;
1534                         i++;
1535                 }
1536         }
1537         return tags;
1538
1539 fail:
1540         blk_mq_free_rq_map(set, tags, hctx_idx);
1541         return NULL;
1542 }
1543
1544 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1545 {
1546         kfree(bitmap->map);
1547 }
1548
1549 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1550 {
1551         unsigned int bpw = 8, total, num_maps, i;
1552
1553         bitmap->bits_per_word = bpw;
1554
1555         num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1556         bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1557                                         GFP_KERNEL, node);
1558         if (!bitmap->map)
1559                 return -ENOMEM;
1560
1561         total = nr_cpu_ids;
1562         for (i = 0; i < num_maps; i++) {
1563                 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1564                 total -= bitmap->map[i].depth;
1565         }
1566
1567         return 0;
1568 }
1569
1570 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1571 {
1572         struct request_queue *q = hctx->queue;
1573         struct blk_mq_ctx *ctx;
1574         LIST_HEAD(tmp);
1575
1576         /*
1577          * Move ctx entries to new CPU, if this one is going away.
1578          */
1579         ctx = __blk_mq_get_ctx(q, cpu);
1580
1581         spin_lock(&ctx->lock);
1582         if (!list_empty(&ctx->rq_list)) {
1583                 list_splice_init(&ctx->rq_list, &tmp);
1584                 blk_mq_hctx_clear_pending(hctx, ctx);
1585         }
1586         spin_unlock(&ctx->lock);
1587
1588         if (list_empty(&tmp))
1589                 return NOTIFY_OK;
1590
1591         ctx = blk_mq_get_ctx(q);
1592         spin_lock(&ctx->lock);
1593
1594         while (!list_empty(&tmp)) {
1595                 struct request *rq;
1596
1597                 rq = list_first_entry(&tmp, struct request, queuelist);
1598                 rq->mq_ctx = ctx;
1599                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1600         }
1601
1602         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1603         blk_mq_hctx_mark_pending(hctx, ctx);
1604
1605         spin_unlock(&ctx->lock);
1606
1607         blk_mq_run_hw_queue(hctx, true);
1608         blk_mq_put_ctx(ctx);
1609         return NOTIFY_OK;
1610 }
1611
1612 static int blk_mq_hctx_notify(void *data, unsigned long action,
1613                               unsigned int cpu)
1614 {
1615         struct blk_mq_hw_ctx *hctx = data;
1616
1617         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1618                 return blk_mq_hctx_cpu_offline(hctx, cpu);
1619
1620         /*
1621          * In case of CPU online, tags may be reallocated
1622          * in blk_mq_map_swqueue() after mapping is updated.
1623          */
1624
1625         return NOTIFY_OK;
1626 }
1627
1628 /* hctx->ctxs will be freed in queue's release handler */
1629 static void blk_mq_exit_hctx(struct request_queue *q,
1630                 struct blk_mq_tag_set *set,
1631                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1632 {
1633         unsigned flush_start_tag = set->queue_depth;
1634
1635         blk_mq_tag_idle(hctx);
1636
1637         if (set->ops->exit_request)
1638                 set->ops->exit_request(set->driver_data,
1639                                        hctx->fq->flush_rq, hctx_idx,
1640                                        flush_start_tag + hctx_idx);
1641
1642         if (set->ops->exit_hctx)
1643                 set->ops->exit_hctx(hctx, hctx_idx);
1644
1645         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1646         blk_free_flush_queue(hctx->fq);
1647         blk_mq_free_bitmap(&hctx->ctx_map);
1648 }
1649
1650 static void blk_mq_exit_hw_queues(struct request_queue *q,
1651                 struct blk_mq_tag_set *set, int nr_queue)
1652 {
1653         struct blk_mq_hw_ctx *hctx;
1654         unsigned int i;
1655
1656         queue_for_each_hw_ctx(q, hctx, i) {
1657                 if (i == nr_queue)
1658                         break;
1659                 blk_mq_exit_hctx(q, set, hctx, i);
1660         }
1661 }
1662
1663 static void blk_mq_free_hw_queues(struct request_queue *q,
1664                 struct blk_mq_tag_set *set)
1665 {
1666         struct blk_mq_hw_ctx *hctx;
1667         unsigned int i;
1668
1669         queue_for_each_hw_ctx(q, hctx, i)
1670                 free_cpumask_var(hctx->cpumask);
1671 }
1672
1673 static int blk_mq_init_hctx(struct request_queue *q,
1674                 struct blk_mq_tag_set *set,
1675                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1676 {
1677         int node;
1678         unsigned flush_start_tag = set->queue_depth;
1679
1680         node = hctx->numa_node;
1681         if (node == NUMA_NO_NODE)
1682                 node = hctx->numa_node = set->numa_node;
1683
1684         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1685         INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1686         spin_lock_init(&hctx->lock);
1687         INIT_LIST_HEAD(&hctx->dispatch);
1688         hctx->queue = q;
1689         hctx->queue_num = hctx_idx;
1690         hctx->flags = set->flags;
1691
1692         blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1693                                         blk_mq_hctx_notify, hctx);
1694         blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1695
1696         hctx->tags = set->tags[hctx_idx];
1697
1698         /*
1699          * Allocate space for all possible cpus to avoid allocation at
1700          * runtime
1701          */
1702         hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1703                                         GFP_KERNEL, node);
1704         if (!hctx->ctxs)
1705                 goto unregister_cpu_notifier;
1706
1707         if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1708                 goto free_ctxs;
1709
1710         hctx->nr_ctx = 0;
1711
1712         if (set->ops->init_hctx &&
1713             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1714                 goto free_bitmap;
1715
1716         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1717         if (!hctx->fq)
1718                 goto exit_hctx;
1719
1720         if (set->ops->init_request &&
1721             set->ops->init_request(set->driver_data,
1722                                    hctx->fq->flush_rq, hctx_idx,
1723                                    flush_start_tag + hctx_idx, node))
1724                 goto free_fq;
1725
1726         return 0;
1727
1728  free_fq:
1729         kfree(hctx->fq);
1730  exit_hctx:
1731         if (set->ops->exit_hctx)
1732                 set->ops->exit_hctx(hctx, hctx_idx);
1733  free_bitmap:
1734         blk_mq_free_bitmap(&hctx->ctx_map);
1735  free_ctxs:
1736         kfree(hctx->ctxs);
1737  unregister_cpu_notifier:
1738         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1739
1740         return -1;
1741 }
1742
1743 static int blk_mq_init_hw_queues(struct request_queue *q,
1744                 struct blk_mq_tag_set *set)
1745 {
1746         struct blk_mq_hw_ctx *hctx;
1747         unsigned int i;
1748
1749         /*
1750          * Initialize hardware queues
1751          */
1752         queue_for_each_hw_ctx(q, hctx, i) {
1753                 if (blk_mq_init_hctx(q, set, hctx, i))
1754                         break;
1755         }
1756
1757         if (i == q->nr_hw_queues)
1758                 return 0;
1759
1760         /*
1761          * Init failed
1762          */
1763         blk_mq_exit_hw_queues(q, set, i);
1764
1765         return 1;
1766 }
1767
1768 static void blk_mq_init_cpu_queues(struct request_queue *q,
1769                                    unsigned int nr_hw_queues)
1770 {
1771         unsigned int i;
1772
1773         for_each_possible_cpu(i) {
1774                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1775                 struct blk_mq_hw_ctx *hctx;
1776
1777                 memset(__ctx, 0, sizeof(*__ctx));
1778                 __ctx->cpu = i;
1779                 spin_lock_init(&__ctx->lock);
1780                 INIT_LIST_HEAD(&__ctx->rq_list);
1781                 __ctx->queue = q;
1782
1783                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1784                 if (!cpu_online(i))
1785                         continue;
1786
1787                 hctx = q->mq_ops->map_queue(q, i);
1788
1789                 /*
1790                  * Set local node, IFF we have more than one hw queue. If
1791                  * not, we remain on the home node of the device
1792                  */
1793                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1794                         hctx->numa_node = cpu_to_node(i);
1795         }
1796 }
1797
1798 static void blk_mq_map_swqueue(struct request_queue *q,
1799                                const struct cpumask *online_mask)
1800 {
1801         unsigned int i;
1802         struct blk_mq_hw_ctx *hctx;
1803         struct blk_mq_ctx *ctx;
1804         struct blk_mq_tag_set *set = q->tag_set;
1805
1806         /*
1807          * Avoid others reading imcomplete hctx->cpumask through sysfs
1808          */
1809         mutex_lock(&q->sysfs_lock);
1810
1811         queue_for_each_hw_ctx(q, hctx, i) {
1812                 cpumask_clear(hctx->cpumask);
1813                 hctx->nr_ctx = 0;
1814         }
1815
1816         /*
1817          * Map software to hardware queues
1818          */
1819         queue_for_each_ctx(q, ctx, i) {
1820                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1821                 if (!cpumask_test_cpu(i, online_mask))
1822                         continue;
1823
1824                 hctx = q->mq_ops->map_queue(q, i);
1825                 cpumask_set_cpu(i, hctx->cpumask);
1826                 ctx->index_hw = hctx->nr_ctx;
1827                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1828         }
1829
1830         mutex_unlock(&q->sysfs_lock);
1831
1832         queue_for_each_hw_ctx(q, hctx, i) {
1833                 struct blk_mq_ctxmap *map = &hctx->ctx_map;
1834
1835                 /*
1836                  * If no software queues are mapped to this hardware queue,
1837                  * disable it and free the request entries.
1838                  */
1839                 if (!hctx->nr_ctx) {
1840                         if (set->tags[i]) {
1841                                 blk_mq_free_rq_map(set, set->tags[i], i);
1842                                 set->tags[i] = NULL;
1843                         }
1844                         hctx->tags = NULL;
1845                         continue;
1846                 }
1847
1848                 /* unmapped hw queue can be remapped after CPU topo changed */
1849                 if (!set->tags[i])
1850                         set->tags[i] = blk_mq_init_rq_map(set, i);
1851                 hctx->tags = set->tags[i];
1852                 WARN_ON(!hctx->tags);
1853
1854                 /*
1855                  * Set the map size to the number of mapped software queues.
1856                  * This is more accurate and more efficient than looping
1857                  * over all possibly mapped software queues.
1858                  */
1859                 map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word);
1860
1861                 /*
1862                  * Initialize batch roundrobin counts
1863                  */
1864                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1865                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1866         }
1867
1868         queue_for_each_ctx(q, ctx, i) {
1869                 if (!cpumask_test_cpu(i, online_mask))
1870                         continue;
1871
1872                 hctx = q->mq_ops->map_queue(q, i);
1873                 cpumask_set_cpu(i, hctx->tags->cpumask);
1874         }
1875 }
1876
1877 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1878 {
1879         struct blk_mq_hw_ctx *hctx;
1880         struct request_queue *q;
1881         bool shared;
1882         int i;
1883
1884         if (set->tag_list.next == set->tag_list.prev)
1885                 shared = false;
1886         else
1887                 shared = true;
1888
1889         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1890                 blk_mq_freeze_queue(q);
1891
1892                 queue_for_each_hw_ctx(q, hctx, i) {
1893                         if (shared)
1894                                 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1895                         else
1896                                 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1897                 }
1898                 blk_mq_unfreeze_queue(q);
1899         }
1900 }
1901
1902 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1903 {
1904         struct blk_mq_tag_set *set = q->tag_set;
1905
1906         mutex_lock(&set->tag_list_lock);
1907         list_del_init(&q->tag_set_list);
1908         blk_mq_update_tag_set_depth(set);
1909         mutex_unlock(&set->tag_list_lock);
1910 }
1911
1912 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1913                                      struct request_queue *q)
1914 {
1915         q->tag_set = set;
1916
1917         mutex_lock(&set->tag_list_lock);
1918         list_add_tail(&q->tag_set_list, &set->tag_list);
1919         blk_mq_update_tag_set_depth(set);
1920         mutex_unlock(&set->tag_list_lock);
1921 }
1922
1923 /*
1924  * It is the actual release handler for mq, but we do it from
1925  * request queue's release handler for avoiding use-after-free
1926  * and headache because q->mq_kobj shouldn't have been introduced,
1927  * but we can't group ctx/kctx kobj without it.
1928  */
1929 void blk_mq_release(struct request_queue *q)
1930 {
1931         struct blk_mq_hw_ctx *hctx;
1932         unsigned int i;
1933
1934         /* hctx kobj stays in hctx */
1935         queue_for_each_hw_ctx(q, hctx, i) {
1936                 if (!hctx)
1937                         continue;
1938                 kfree(hctx->ctxs);
1939                 kfree(hctx);
1940         }
1941
1942         kfree(q->mq_map);
1943         q->mq_map = NULL;
1944
1945         kfree(q->queue_hw_ctx);
1946
1947         /* ctx kobj stays in queue_ctx */
1948         free_percpu(q->queue_ctx);
1949 }
1950
1951 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1952 {
1953         struct request_queue *uninit_q, *q;
1954
1955         uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1956         if (!uninit_q)
1957                 return ERR_PTR(-ENOMEM);
1958
1959         q = blk_mq_init_allocated_queue(set, uninit_q);
1960         if (IS_ERR(q))
1961                 blk_cleanup_queue(uninit_q);
1962
1963         return q;
1964 }
1965 EXPORT_SYMBOL(blk_mq_init_queue);
1966
1967 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
1968                                                   struct request_queue *q)
1969 {
1970         struct blk_mq_hw_ctx **hctxs;
1971         struct blk_mq_ctx __percpu *ctx;
1972         unsigned int *map;
1973         int i;
1974
1975         ctx = alloc_percpu(struct blk_mq_ctx);
1976         if (!ctx)
1977                 return ERR_PTR(-ENOMEM);
1978
1979         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1980                         set->numa_node);
1981
1982         if (!hctxs)
1983                 goto err_percpu;
1984
1985         map = blk_mq_make_queue_map(set);
1986         if (!map)
1987                 goto err_map;
1988
1989         for (i = 0; i < set->nr_hw_queues; i++) {
1990                 int node = blk_mq_hw_queue_to_node(map, i);
1991
1992                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1993                                         GFP_KERNEL, node);
1994                 if (!hctxs[i])
1995                         goto err_hctxs;
1996
1997                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1998                                                 node))
1999                         goto err_hctxs;
2000
2001                 atomic_set(&hctxs[i]->nr_active, 0);
2002                 hctxs[i]->numa_node = node;
2003                 hctxs[i]->queue_num = i;
2004         }
2005
2006         /*
2007          * Init percpu_ref in atomic mode so that it's faster to shutdown.
2008          * See blk_register_queue() for details.
2009          */
2010         if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
2011                             PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
2012                 goto err_hctxs;
2013
2014         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
2015         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2016
2017         q->nr_queues = nr_cpu_ids;
2018         q->nr_hw_queues = set->nr_hw_queues;
2019         q->mq_map = map;
2020
2021         q->queue_ctx = ctx;
2022         q->queue_hw_ctx = hctxs;
2023
2024         q->mq_ops = set->ops;
2025         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2026
2027         if (!(set->flags & BLK_MQ_F_SG_MERGE))
2028                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2029
2030         q->sg_reserved_size = INT_MAX;
2031
2032         INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
2033         INIT_LIST_HEAD(&q->requeue_list);
2034         spin_lock_init(&q->requeue_lock);
2035
2036         if (q->nr_hw_queues > 1)
2037                 blk_queue_make_request(q, blk_mq_make_request);
2038         else
2039                 blk_queue_make_request(q, blk_sq_make_request);
2040
2041         /*
2042          * Do this after blk_queue_make_request() overrides it...
2043          */
2044         q->nr_requests = set->queue_depth;
2045
2046         if (set->ops->complete)
2047                 blk_queue_softirq_done(q, set->ops->complete);
2048
2049         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2050
2051         if (blk_mq_init_hw_queues(q, set))
2052                 goto err_hctxs;
2053
2054         get_online_cpus();
2055         mutex_lock(&all_q_mutex);
2056
2057         list_add_tail(&q->all_q_node, &all_q_list);
2058         blk_mq_add_queue_tag_set(set, q);
2059         blk_mq_map_swqueue(q, cpu_online_mask);
2060
2061         mutex_unlock(&all_q_mutex);
2062         put_online_cpus();
2063
2064         return q;
2065
2066 err_hctxs:
2067         kfree(map);
2068         for (i = 0; i < set->nr_hw_queues; i++) {
2069                 if (!hctxs[i])
2070                         break;
2071                 free_cpumask_var(hctxs[i]->cpumask);
2072                 kfree(hctxs[i]);
2073         }
2074 err_map:
2075         kfree(hctxs);
2076 err_percpu:
2077         free_percpu(ctx);
2078         return ERR_PTR(-ENOMEM);
2079 }
2080 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2081
2082 void blk_mq_free_queue(struct request_queue *q)
2083 {
2084         struct blk_mq_tag_set   *set = q->tag_set;
2085
2086         mutex_lock(&all_q_mutex);
2087         list_del_init(&q->all_q_node);
2088         mutex_unlock(&all_q_mutex);
2089
2090         blk_mq_del_queue_tag_set(q);
2091
2092         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2093         blk_mq_free_hw_queues(q, set);
2094
2095         percpu_ref_exit(&q->mq_usage_counter);
2096 }
2097
2098 /* Basically redo blk_mq_init_queue with queue frozen */
2099 static void blk_mq_queue_reinit(struct request_queue *q,
2100                                 const struct cpumask *online_mask)
2101 {
2102         WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2103
2104         blk_mq_sysfs_unregister(q);
2105
2106         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
2107
2108         /*
2109          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2110          * we should change hctx numa_node according to new topology (this
2111          * involves free and re-allocate memory, worthy doing?)
2112          */
2113
2114         blk_mq_map_swqueue(q, online_mask);
2115
2116         blk_mq_sysfs_register(q);
2117 }
2118
2119 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
2120                                       unsigned long action, void *hcpu)
2121 {
2122         struct request_queue *q;
2123         int cpu = (unsigned long)hcpu;
2124         /*
2125          * New online cpumask which is going to be set in this hotplug event.
2126          * Declare this cpumasks as global as cpu-hotplug operation is invoked
2127          * one-by-one and dynamically allocating this could result in a failure.
2128          */
2129         static struct cpumask online_new;
2130
2131         /*
2132          * Before hotadded cpu starts handling requests, new mappings must
2133          * be established.  Otherwise, these requests in hw queue might
2134          * never be dispatched.
2135          *
2136          * For example, there is a single hw queue (hctx) and two CPU queues
2137          * (ctx0 for CPU0, and ctx1 for CPU1).
2138          *
2139          * Now CPU1 is just onlined and a request is inserted into
2140          * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
2141          * still zero.
2142          *
2143          * And then while running hw queue, flush_busy_ctxs() finds bit0 is
2144          * set in pending bitmap and tries to retrieve requests in
2145          * hctx->ctxs[0]->rq_list.  But htx->ctxs[0] is a pointer to ctx0,
2146          * so the request in ctx1->rq_list is ignored.
2147          */
2148         switch (action & ~CPU_TASKS_FROZEN) {
2149         case CPU_DEAD:
2150         case CPU_UP_CANCELED:
2151                 cpumask_copy(&online_new, cpu_online_mask);
2152                 break;
2153         case CPU_UP_PREPARE:
2154                 cpumask_copy(&online_new, cpu_online_mask);
2155                 cpumask_set_cpu(cpu, &online_new);
2156                 break;
2157         default:
2158                 return NOTIFY_OK;
2159         }
2160
2161         mutex_lock(&all_q_mutex);
2162
2163         /*
2164          * We need to freeze and reinit all existing queues.  Freezing
2165          * involves synchronous wait for an RCU grace period and doing it
2166          * one by one may take a long time.  Start freezing all queues in
2167          * one swoop and then wait for the completions so that freezing can
2168          * take place in parallel.
2169          */
2170         list_for_each_entry(q, &all_q_list, all_q_node)
2171                 blk_mq_freeze_queue_start(q);
2172         list_for_each_entry(q, &all_q_list, all_q_node) {
2173                 blk_mq_freeze_queue_wait(q);
2174
2175                 /*
2176                  * timeout handler can't touch hw queue during the
2177                  * reinitialization
2178                  */
2179                 del_timer_sync(&q->timeout);
2180         }
2181
2182         list_for_each_entry(q, &all_q_list, all_q_node)
2183                 blk_mq_queue_reinit(q, &online_new);
2184
2185         list_for_each_entry(q, &all_q_list, all_q_node)
2186                 blk_mq_unfreeze_queue(q);
2187
2188         mutex_unlock(&all_q_mutex);
2189         return NOTIFY_OK;
2190 }
2191
2192 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2193 {
2194         int i;
2195
2196         for (i = 0; i < set->nr_hw_queues; i++) {
2197                 set->tags[i] = blk_mq_init_rq_map(set, i);
2198                 if (!set->tags[i])
2199                         goto out_unwind;
2200         }
2201
2202         return 0;
2203
2204 out_unwind:
2205         while (--i >= 0)
2206                 blk_mq_free_rq_map(set, set->tags[i], i);
2207
2208         return -ENOMEM;
2209 }
2210
2211 /*
2212  * Allocate the request maps associated with this tag_set. Note that this
2213  * may reduce the depth asked for, if memory is tight. set->queue_depth
2214  * will be updated to reflect the allocated depth.
2215  */
2216 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2217 {
2218         unsigned int depth;
2219         int err;
2220
2221         depth = set->queue_depth;
2222         do {
2223                 err = __blk_mq_alloc_rq_maps(set);
2224                 if (!err)
2225                         break;
2226
2227                 set->queue_depth >>= 1;
2228                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2229                         err = -ENOMEM;
2230                         break;
2231                 }
2232         } while (set->queue_depth);
2233
2234         if (!set->queue_depth || err) {
2235                 pr_err("blk-mq: failed to allocate request map\n");
2236                 return -ENOMEM;
2237         }
2238
2239         if (depth != set->queue_depth)
2240                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2241                                                 depth, set->queue_depth);
2242
2243         return 0;
2244 }
2245
2246 struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags)
2247 {
2248         return tags->cpumask;
2249 }
2250 EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask);
2251
2252 /*
2253  * Alloc a tag set to be associated with one or more request queues.
2254  * May fail with EINVAL for various error conditions. May adjust the
2255  * requested depth down, if if it too large. In that case, the set
2256  * value will be stored in set->queue_depth.
2257  */
2258 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2259 {
2260         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2261
2262         if (!set->nr_hw_queues)
2263                 return -EINVAL;
2264         if (!set->queue_depth)
2265                 return -EINVAL;
2266         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2267                 return -EINVAL;
2268
2269         if (!set->ops->queue_rq || !set->ops->map_queue)
2270                 return -EINVAL;
2271
2272         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2273                 pr_info("blk-mq: reduced tag depth to %u\n",
2274                         BLK_MQ_MAX_DEPTH);
2275                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2276         }
2277
2278         /*
2279          * If a crashdump is active, then we are potentially in a very
2280          * memory constrained environment. Limit us to 1 queue and
2281          * 64 tags to prevent using too much memory.
2282          */
2283         if (is_kdump_kernel()) {
2284                 set->nr_hw_queues = 1;
2285                 set->queue_depth = min(64U, set->queue_depth);
2286         }
2287
2288         set->tags = kmalloc_node(set->nr_hw_queues *
2289                                  sizeof(struct blk_mq_tags *),
2290                                  GFP_KERNEL, set->numa_node);
2291         if (!set->tags)
2292                 return -ENOMEM;
2293
2294         if (blk_mq_alloc_rq_maps(set))
2295                 goto enomem;
2296
2297         mutex_init(&set->tag_list_lock);
2298         INIT_LIST_HEAD(&set->tag_list);
2299
2300         return 0;
2301 enomem:
2302         kfree(set->tags);
2303         set->tags = NULL;
2304         return -ENOMEM;
2305 }
2306 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2307
2308 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2309 {
2310         int i;
2311
2312         for (i = 0; i < set->nr_hw_queues; i++) {
2313                 if (set->tags[i]) {
2314                         blk_mq_free_rq_map(set, set->tags[i], i);
2315                         free_cpumask_var(set->tags[i]->cpumask);
2316                 }
2317         }
2318
2319         kfree(set->tags);
2320         set->tags = NULL;
2321 }
2322 EXPORT_SYMBOL(blk_mq_free_tag_set);
2323
2324 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2325 {
2326         struct blk_mq_tag_set *set = q->tag_set;
2327         struct blk_mq_hw_ctx *hctx;
2328         int i, ret;
2329
2330         if (!set || nr > set->queue_depth)
2331                 return -EINVAL;
2332
2333         ret = 0;
2334         queue_for_each_hw_ctx(q, hctx, i) {
2335                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2336                 if (ret)
2337                         break;
2338         }
2339
2340         if (!ret)
2341                 q->nr_requests = nr;
2342
2343         return ret;
2344 }
2345
2346 void blk_mq_disable_hotplug(void)
2347 {
2348         mutex_lock(&all_q_mutex);
2349 }
2350
2351 void blk_mq_enable_hotplug(void)
2352 {
2353         mutex_unlock(&all_q_mutex);
2354 }
2355
2356 static int __init blk_mq_init(void)
2357 {
2358         blk_mq_cpu_init();
2359
2360         hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2361
2362         return 0;
2363 }
2364 subsys_initcall(blk_mq_init);