Merge branch 'for-4.9/block-irq' of git://git.kernel.dk/linux-block
[cascardo/linux.git] / drivers / block / null_blk.c
1 #include <linux/module.h>
2
3 #include <linux/moduleparam.h>
4 #include <linux/sched.h>
5 #include <linux/fs.h>
6 #include <linux/blkdev.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/blk-mq.h>
10 #include <linux/hrtimer.h>
11 #include <linux/lightnvm.h>
12
13 struct nullb_cmd {
14         struct list_head list;
15         struct llist_node ll_list;
16         struct call_single_data csd;
17         struct request *rq;
18         struct bio *bio;
19         unsigned int tag;
20         struct nullb_queue *nq;
21         struct hrtimer timer;
22 };
23
24 struct nullb_queue {
25         unsigned long *tag_map;
26         wait_queue_head_t wait;
27         unsigned int queue_depth;
28
29         struct nullb_cmd *cmds;
30 };
31
32 struct nullb {
33         struct list_head list;
34         unsigned int index;
35         struct request_queue *q;
36         struct gendisk *disk;
37         struct nvm_dev *ndev;
38         struct blk_mq_tag_set tag_set;
39         struct hrtimer timer;
40         unsigned int queue_depth;
41         spinlock_t lock;
42
43         struct nullb_queue *queues;
44         unsigned int nr_queues;
45         char disk_name[DISK_NAME_LEN];
46 };
47
48 static LIST_HEAD(nullb_list);
49 static struct mutex lock;
50 static int null_major;
51 static int nullb_indexes;
52 static struct kmem_cache *ppa_cache;
53
54 enum {
55         NULL_IRQ_NONE           = 0,
56         NULL_IRQ_SOFTIRQ        = 1,
57         NULL_IRQ_TIMER          = 2,
58 };
59
60 enum {
61         NULL_Q_BIO              = 0,
62         NULL_Q_RQ               = 1,
63         NULL_Q_MQ               = 2,
64 };
65
66 static int submit_queues;
67 module_param(submit_queues, int, S_IRUGO);
68 MODULE_PARM_DESC(submit_queues, "Number of submission queues");
69
70 static int home_node = NUMA_NO_NODE;
71 module_param(home_node, int, S_IRUGO);
72 MODULE_PARM_DESC(home_node, "Home node for the device");
73
74 static int queue_mode = NULL_Q_MQ;
75
76 static int null_param_store_val(const char *str, int *val, int min, int max)
77 {
78         int ret, new_val;
79
80         ret = kstrtoint(str, 10, &new_val);
81         if (ret)
82                 return -EINVAL;
83
84         if (new_val < min || new_val > max)
85                 return -EINVAL;
86
87         *val = new_val;
88         return 0;
89 }
90
91 static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
92 {
93         return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
94 }
95
96 static const struct kernel_param_ops null_queue_mode_param_ops = {
97         .set    = null_set_queue_mode,
98         .get    = param_get_int,
99 };
100
101 device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
102 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
103
104 static int gb = 250;
105 module_param(gb, int, S_IRUGO);
106 MODULE_PARM_DESC(gb, "Size in GB");
107
108 static int bs = 512;
109 module_param(bs, int, S_IRUGO);
110 MODULE_PARM_DESC(bs, "Block size (in bytes)");
111
112 static int nr_devices = 2;
113 module_param(nr_devices, int, S_IRUGO);
114 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
115
116 static bool use_lightnvm;
117 module_param(use_lightnvm, bool, S_IRUGO);
118 MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
119
120 static int irqmode = NULL_IRQ_SOFTIRQ;
121
122 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
123 {
124         return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
125                                         NULL_IRQ_TIMER);
126 }
127
128 static const struct kernel_param_ops null_irqmode_param_ops = {
129         .set    = null_set_irqmode,
130         .get    = param_get_int,
131 };
132
133 device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
134 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
135
136 static unsigned long completion_nsec = 10000;
137 module_param(completion_nsec, ulong, S_IRUGO);
138 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
139
140 static int hw_queue_depth = 64;
141 module_param(hw_queue_depth, int, S_IRUGO);
142 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
143
144 static bool use_per_node_hctx = false;
145 module_param(use_per_node_hctx, bool, S_IRUGO);
146 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
147
148 static void put_tag(struct nullb_queue *nq, unsigned int tag)
149 {
150         clear_bit_unlock(tag, nq->tag_map);
151
152         if (waitqueue_active(&nq->wait))
153                 wake_up(&nq->wait);
154 }
155
156 static unsigned int get_tag(struct nullb_queue *nq)
157 {
158         unsigned int tag;
159
160         do {
161                 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
162                 if (tag >= nq->queue_depth)
163                         return -1U;
164         } while (test_and_set_bit_lock(tag, nq->tag_map));
165
166         return tag;
167 }
168
169 static void free_cmd(struct nullb_cmd *cmd)
170 {
171         put_tag(cmd->nq, cmd->tag);
172 }
173
174 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
175
176 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
177 {
178         struct nullb_cmd *cmd;
179         unsigned int tag;
180
181         tag = get_tag(nq);
182         if (tag != -1U) {
183                 cmd = &nq->cmds[tag];
184                 cmd->tag = tag;
185                 cmd->nq = nq;
186                 if (irqmode == NULL_IRQ_TIMER) {
187                         hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
188                                      HRTIMER_MODE_REL);
189                         cmd->timer.function = null_cmd_timer_expired;
190                 }
191                 return cmd;
192         }
193
194         return NULL;
195 }
196
197 static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
198 {
199         struct nullb_cmd *cmd;
200         DEFINE_WAIT(wait);
201
202         cmd = __alloc_cmd(nq);
203         if (cmd || !can_wait)
204                 return cmd;
205
206         do {
207                 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
208                 cmd = __alloc_cmd(nq);
209                 if (cmd)
210                         break;
211
212                 io_schedule();
213         } while (1);
214
215         finish_wait(&nq->wait, &wait);
216         return cmd;
217 }
218
219 static void end_cmd(struct nullb_cmd *cmd)
220 {
221         struct request_queue *q = NULL;
222
223         if (cmd->rq)
224                 q = cmd->rq->q;
225
226         switch (queue_mode)  {
227         case NULL_Q_MQ:
228                 blk_mq_end_request(cmd->rq, 0);
229                 return;
230         case NULL_Q_RQ:
231                 INIT_LIST_HEAD(&cmd->rq->queuelist);
232                 blk_end_request_all(cmd->rq, 0);
233                 break;
234         case NULL_Q_BIO:
235                 bio_endio(cmd->bio);
236                 break;
237         }
238
239         free_cmd(cmd);
240
241         /* Restart queue if needed, as we are freeing a tag */
242         if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
243                 unsigned long flags;
244
245                 spin_lock_irqsave(q->queue_lock, flags);
246                 blk_start_queue_async(q);
247                 spin_unlock_irqrestore(q->queue_lock, flags);
248         }
249 }
250
251 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
252 {
253         end_cmd(container_of(timer, struct nullb_cmd, timer));
254
255         return HRTIMER_NORESTART;
256 }
257
258 static void null_cmd_end_timer(struct nullb_cmd *cmd)
259 {
260         ktime_t kt = ktime_set(0, completion_nsec);
261
262         hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
263 }
264
265 static void null_softirq_done_fn(struct request *rq)
266 {
267         if (queue_mode == NULL_Q_MQ)
268                 end_cmd(blk_mq_rq_to_pdu(rq));
269         else
270                 end_cmd(rq->special);
271 }
272
273 static inline void null_handle_cmd(struct nullb_cmd *cmd)
274 {
275         /* Complete IO by inline, softirq or timer */
276         switch (irqmode) {
277         case NULL_IRQ_SOFTIRQ:
278                 switch (queue_mode)  {
279                 case NULL_Q_MQ:
280                         blk_mq_complete_request(cmd->rq, cmd->rq->errors);
281                         break;
282                 case NULL_Q_RQ:
283                         blk_complete_request(cmd->rq);
284                         break;
285                 case NULL_Q_BIO:
286                         /*
287                          * XXX: no proper submitting cpu information available.
288                          */
289                         end_cmd(cmd);
290                         break;
291                 }
292                 break;
293         case NULL_IRQ_NONE:
294                 end_cmd(cmd);
295                 break;
296         case NULL_IRQ_TIMER:
297                 null_cmd_end_timer(cmd);
298                 break;
299         }
300 }
301
302 static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
303 {
304         int index = 0;
305
306         if (nullb->nr_queues != 1)
307                 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
308
309         return &nullb->queues[index];
310 }
311
312 static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
313 {
314         struct nullb *nullb = q->queuedata;
315         struct nullb_queue *nq = nullb_to_queue(nullb);
316         struct nullb_cmd *cmd;
317
318         cmd = alloc_cmd(nq, 1);
319         cmd->bio = bio;
320
321         null_handle_cmd(cmd);
322         return BLK_QC_T_NONE;
323 }
324
325 static int null_rq_prep_fn(struct request_queue *q, struct request *req)
326 {
327         struct nullb *nullb = q->queuedata;
328         struct nullb_queue *nq = nullb_to_queue(nullb);
329         struct nullb_cmd *cmd;
330
331         cmd = alloc_cmd(nq, 0);
332         if (cmd) {
333                 cmd->rq = req;
334                 req->special = cmd;
335                 return BLKPREP_OK;
336         }
337         blk_stop_queue(q);
338
339         return BLKPREP_DEFER;
340 }
341
342 static void null_request_fn(struct request_queue *q)
343 {
344         struct request *rq;
345
346         while ((rq = blk_fetch_request(q)) != NULL) {
347                 struct nullb_cmd *cmd = rq->special;
348
349                 spin_unlock_irq(q->queue_lock);
350                 null_handle_cmd(cmd);
351                 spin_lock_irq(q->queue_lock);
352         }
353 }
354
355 static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
356                          const struct blk_mq_queue_data *bd)
357 {
358         struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
359
360         if (irqmode == NULL_IRQ_TIMER) {
361                 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
362                 cmd->timer.function = null_cmd_timer_expired;
363         }
364         cmd->rq = bd->rq;
365         cmd->nq = hctx->driver_data;
366
367         blk_mq_start_request(bd->rq);
368
369         null_handle_cmd(cmd);
370         return BLK_MQ_RQ_QUEUE_OK;
371 }
372
373 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
374 {
375         BUG_ON(!nullb);
376         BUG_ON(!nq);
377
378         init_waitqueue_head(&nq->wait);
379         nq->queue_depth = nullb->queue_depth;
380 }
381
382 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
383                           unsigned int index)
384 {
385         struct nullb *nullb = data;
386         struct nullb_queue *nq = &nullb->queues[index];
387
388         hctx->driver_data = nq;
389         null_init_queue(nullb, nq);
390         nullb->nr_queues++;
391
392         return 0;
393 }
394
395 static struct blk_mq_ops null_mq_ops = {
396         .queue_rq       = null_queue_rq,
397         .init_hctx      = null_init_hctx,
398         .complete       = null_softirq_done_fn,
399 };
400
401 static void cleanup_queue(struct nullb_queue *nq)
402 {
403         kfree(nq->tag_map);
404         kfree(nq->cmds);
405 }
406
407 static void cleanup_queues(struct nullb *nullb)
408 {
409         int i;
410
411         for (i = 0; i < nullb->nr_queues; i++)
412                 cleanup_queue(&nullb->queues[i]);
413
414         kfree(nullb->queues);
415 }
416
417 #ifdef CONFIG_NVM
418
419 static void null_lnvm_end_io(struct request *rq, int error)
420 {
421         struct nvm_rq *rqd = rq->end_io_data;
422
423         nvm_end_io(rqd, error);
424
425         blk_put_request(rq);
426 }
427
428 static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
429 {
430         struct request_queue *q = dev->q;
431         struct request *rq;
432         struct bio *bio = rqd->bio;
433
434         rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
435         if (IS_ERR(rq))
436                 return -ENOMEM;
437
438         rq->cmd_type = REQ_TYPE_DRV_PRIV;
439         rq->__sector = bio->bi_iter.bi_sector;
440         rq->ioprio = bio_prio(bio);
441
442         if (bio_has_data(bio))
443                 rq->nr_phys_segments = bio_phys_segments(q, bio);
444
445         rq->__data_len = bio->bi_iter.bi_size;
446         rq->bio = rq->biotail = bio;
447
448         rq->end_io_data = rqd;
449
450         blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
451
452         return 0;
453 }
454
455 static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
456 {
457         sector_t size = gb * 1024 * 1024 * 1024ULL;
458         sector_t blksize;
459         struct nvm_id_group *grp;
460
461         id->ver_id = 0x1;
462         id->vmnt = 0;
463         id->cgrps = 1;
464         id->cap = 0x2;
465         id->dom = 0x1;
466
467         id->ppaf.blk_offset = 0;
468         id->ppaf.blk_len = 16;
469         id->ppaf.pg_offset = 16;
470         id->ppaf.pg_len = 16;
471         id->ppaf.sect_offset = 32;
472         id->ppaf.sect_len = 8;
473         id->ppaf.pln_offset = 40;
474         id->ppaf.pln_len = 8;
475         id->ppaf.lun_offset = 48;
476         id->ppaf.lun_len = 8;
477         id->ppaf.ch_offset = 56;
478         id->ppaf.ch_len = 8;
479
480         sector_div(size, bs); /* convert size to pages */
481         size >>= 8; /* concert size to pgs pr blk */
482         grp = &id->groups[0];
483         grp->mtype = 0;
484         grp->fmtype = 0;
485         grp->num_ch = 1;
486         grp->num_pg = 256;
487         blksize = size;
488         size >>= 16;
489         grp->num_lun = size + 1;
490         sector_div(blksize, grp->num_lun);
491         grp->num_blk = blksize;
492         grp->num_pln = 1;
493
494         grp->fpg_sz = bs;
495         grp->csecs = bs;
496         grp->trdt = 25000;
497         grp->trdm = 25000;
498         grp->tprt = 500000;
499         grp->tprm = 500000;
500         grp->tbet = 1500000;
501         grp->tbem = 1500000;
502         grp->mpos = 0x010101; /* single plane rwe */
503         grp->cpar = hw_queue_depth;
504
505         return 0;
506 }
507
508 static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
509 {
510         mempool_t *virtmem_pool;
511
512         virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
513         if (!virtmem_pool) {
514                 pr_err("null_blk: Unable to create virtual memory pool\n");
515                 return NULL;
516         }
517
518         return virtmem_pool;
519 }
520
521 static void null_lnvm_destroy_dma_pool(void *pool)
522 {
523         mempool_destroy(pool);
524 }
525
526 static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
527                                 gfp_t mem_flags, dma_addr_t *dma_handler)
528 {
529         return mempool_alloc(pool, mem_flags);
530 }
531
532 static void null_lnvm_dev_dma_free(void *pool, void *entry,
533                                                         dma_addr_t dma_handler)
534 {
535         mempool_free(entry, pool);
536 }
537
538 static struct nvm_dev_ops null_lnvm_dev_ops = {
539         .identity               = null_lnvm_id,
540         .submit_io              = null_lnvm_submit_io,
541
542         .create_dma_pool        = null_lnvm_create_dma_pool,
543         .destroy_dma_pool       = null_lnvm_destroy_dma_pool,
544         .dev_dma_alloc          = null_lnvm_dev_dma_alloc,
545         .dev_dma_free           = null_lnvm_dev_dma_free,
546
547         /* Simulate nvme protocol restriction */
548         .max_phys_sect          = 64,
549 };
550
551 static int null_nvm_register(struct nullb *nullb)
552 {
553         struct nvm_dev *dev;
554         int rv;
555
556         dev = nvm_alloc_dev(0);
557         if (!dev)
558                 return -ENOMEM;
559
560         dev->q = nullb->q;
561         memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
562         dev->ops = &null_lnvm_dev_ops;
563
564         rv = nvm_register(dev);
565         if (rv) {
566                 kfree(dev);
567                 return rv;
568         }
569         nullb->ndev = dev;
570         return 0;
571 }
572
573 static void null_nvm_unregister(struct nullb *nullb)
574 {
575         nvm_unregister(nullb->ndev);
576 }
577 #else
578 static int null_nvm_register(struct nullb *nullb)
579 {
580         return -EINVAL;
581 }
582 static void null_nvm_unregister(struct nullb *nullb) {}
583 #endif /* CONFIG_NVM */
584
585 static void null_del_dev(struct nullb *nullb)
586 {
587         list_del_init(&nullb->list);
588
589         if (use_lightnvm)
590                 null_nvm_unregister(nullb);
591         else
592                 del_gendisk(nullb->disk);
593         blk_cleanup_queue(nullb->q);
594         if (queue_mode == NULL_Q_MQ)
595                 blk_mq_free_tag_set(&nullb->tag_set);
596         if (!use_lightnvm)
597                 put_disk(nullb->disk);
598         cleanup_queues(nullb);
599         kfree(nullb);
600 }
601
602 static int null_open(struct block_device *bdev, fmode_t mode)
603 {
604         return 0;
605 }
606
607 static void null_release(struct gendisk *disk, fmode_t mode)
608 {
609 }
610
611 static const struct block_device_operations null_fops = {
612         .owner =        THIS_MODULE,
613         .open =         null_open,
614         .release =      null_release,
615 };
616
617 static int setup_commands(struct nullb_queue *nq)
618 {
619         struct nullb_cmd *cmd;
620         int i, tag_size;
621
622         nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
623         if (!nq->cmds)
624                 return -ENOMEM;
625
626         tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
627         nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
628         if (!nq->tag_map) {
629                 kfree(nq->cmds);
630                 return -ENOMEM;
631         }
632
633         for (i = 0; i < nq->queue_depth; i++) {
634                 cmd = &nq->cmds[i];
635                 INIT_LIST_HEAD(&cmd->list);
636                 cmd->ll_list.next = NULL;
637                 cmd->tag = -1U;
638         }
639
640         return 0;
641 }
642
643 static int setup_queues(struct nullb *nullb)
644 {
645         nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
646                                                                 GFP_KERNEL);
647         if (!nullb->queues)
648                 return -ENOMEM;
649
650         nullb->nr_queues = 0;
651         nullb->queue_depth = hw_queue_depth;
652
653         return 0;
654 }
655
656 static int init_driver_queues(struct nullb *nullb)
657 {
658         struct nullb_queue *nq;
659         int i, ret = 0;
660
661         for (i = 0; i < submit_queues; i++) {
662                 nq = &nullb->queues[i];
663
664                 null_init_queue(nullb, nq);
665
666                 ret = setup_commands(nq);
667                 if (ret)
668                         return ret;
669                 nullb->nr_queues++;
670         }
671         return 0;
672 }
673
674 static int null_gendisk_register(struct nullb *nullb)
675 {
676         struct gendisk *disk;
677         sector_t size;
678
679         disk = nullb->disk = alloc_disk_node(1, home_node);
680         if (!disk)
681                 return -ENOMEM;
682         size = gb * 1024 * 1024 * 1024ULL;
683         set_capacity(disk, size >> 9);
684
685         disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
686         disk->major             = null_major;
687         disk->first_minor       = nullb->index;
688         disk->fops              = &null_fops;
689         disk->private_data      = nullb;
690         disk->queue             = nullb->q;
691         strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
692
693         add_disk(disk);
694         return 0;
695 }
696
697 static int null_add_dev(void)
698 {
699         struct nullb *nullb;
700         int rv;
701
702         nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
703         if (!nullb) {
704                 rv = -ENOMEM;
705                 goto out;
706         }
707
708         spin_lock_init(&nullb->lock);
709
710         if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
711                 submit_queues = nr_online_nodes;
712
713         rv = setup_queues(nullb);
714         if (rv)
715                 goto out_free_nullb;
716
717         if (queue_mode == NULL_Q_MQ) {
718                 nullb->tag_set.ops = &null_mq_ops;
719                 nullb->tag_set.nr_hw_queues = submit_queues;
720                 nullb->tag_set.queue_depth = hw_queue_depth;
721                 nullb->tag_set.numa_node = home_node;
722                 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
723                 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
724                 nullb->tag_set.driver_data = nullb;
725
726                 rv = blk_mq_alloc_tag_set(&nullb->tag_set);
727                 if (rv)
728                         goto out_cleanup_queues;
729
730                 nullb->q = blk_mq_init_queue(&nullb->tag_set);
731                 if (IS_ERR(nullb->q)) {
732                         rv = -ENOMEM;
733                         goto out_cleanup_tags;
734                 }
735         } else if (queue_mode == NULL_Q_BIO) {
736                 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
737                 if (!nullb->q) {
738                         rv = -ENOMEM;
739                         goto out_cleanup_queues;
740                 }
741                 blk_queue_make_request(nullb->q, null_queue_bio);
742                 rv = init_driver_queues(nullb);
743                 if (rv)
744                         goto out_cleanup_blk_queue;
745         } else {
746                 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
747                 if (!nullb->q) {
748                         rv = -ENOMEM;
749                         goto out_cleanup_queues;
750                 }
751                 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
752                 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
753                 rv = init_driver_queues(nullb);
754                 if (rv)
755                         goto out_cleanup_blk_queue;
756         }
757
758         nullb->q->queuedata = nullb;
759         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
760         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
761
762         mutex_lock(&lock);
763         nullb->index = nullb_indexes++;
764         mutex_unlock(&lock);
765
766         blk_queue_logical_block_size(nullb->q, bs);
767         blk_queue_physical_block_size(nullb->q, bs);
768
769         sprintf(nullb->disk_name, "nullb%d", nullb->index);
770
771         if (use_lightnvm)
772                 rv = null_nvm_register(nullb);
773         else
774                 rv = null_gendisk_register(nullb);
775
776         if (rv)
777                 goto out_cleanup_blk_queue;
778
779         mutex_lock(&lock);
780         list_add_tail(&nullb->list, &nullb_list);
781         mutex_unlock(&lock);
782
783         return 0;
784 out_cleanup_blk_queue:
785         blk_cleanup_queue(nullb->q);
786 out_cleanup_tags:
787         if (queue_mode == NULL_Q_MQ)
788                 blk_mq_free_tag_set(&nullb->tag_set);
789 out_cleanup_queues:
790         cleanup_queues(nullb);
791 out_free_nullb:
792         kfree(nullb);
793 out:
794         return rv;
795 }
796
797 static int __init null_init(void)
798 {
799         int ret = 0;
800         unsigned int i;
801         struct nullb *nullb;
802
803         if (bs > PAGE_SIZE) {
804                 pr_warn("null_blk: invalid block size\n");
805                 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
806                 bs = PAGE_SIZE;
807         }
808
809         if (use_lightnvm && bs != 4096) {
810                 pr_warn("null_blk: LightNVM only supports 4k block size\n");
811                 pr_warn("null_blk: defaults block size to 4k\n");
812                 bs = 4096;
813         }
814
815         if (use_lightnvm && queue_mode != NULL_Q_MQ) {
816                 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
817                 pr_warn("null_blk: defaults queue mode to blk-mq\n");
818                 queue_mode = NULL_Q_MQ;
819         }
820
821         if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
822                 if (submit_queues < nr_online_nodes) {
823                         pr_warn("null_blk: submit_queues param is set to %u.",
824                                                         nr_online_nodes);
825                         submit_queues = nr_online_nodes;
826                 }
827         } else if (submit_queues > nr_cpu_ids)
828                 submit_queues = nr_cpu_ids;
829         else if (!submit_queues)
830                 submit_queues = 1;
831
832         mutex_init(&lock);
833
834         null_major = register_blkdev(0, "nullb");
835         if (null_major < 0)
836                 return null_major;
837
838         if (use_lightnvm) {
839                 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
840                                                                 0, 0, NULL);
841                 if (!ppa_cache) {
842                         pr_err("null_blk: unable to create ppa cache\n");
843                         ret = -ENOMEM;
844                         goto err_ppa;
845                 }
846         }
847
848         for (i = 0; i < nr_devices; i++) {
849                 ret = null_add_dev();
850                 if (ret)
851                         goto err_dev;
852         }
853
854         pr_info("null: module loaded\n");
855         return 0;
856
857 err_dev:
858         while (!list_empty(&nullb_list)) {
859                 nullb = list_entry(nullb_list.next, struct nullb, list);
860                 null_del_dev(nullb);
861         }
862         kmem_cache_destroy(ppa_cache);
863 err_ppa:
864         unregister_blkdev(null_major, "nullb");
865         return ret;
866 }
867
868 static void __exit null_exit(void)
869 {
870         struct nullb *nullb;
871
872         unregister_blkdev(null_major, "nullb");
873
874         mutex_lock(&lock);
875         while (!list_empty(&nullb_list)) {
876                 nullb = list_entry(nullb_list.next, struct nullb, list);
877                 null_del_dev(nullb);
878         }
879         mutex_unlock(&lock);
880
881         kmem_cache_destroy(ppa_cache);
882 }
883
884 module_init(null_init);
885 module_exit(null_exit);
886
887 MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
888 MODULE_LICENSE("GPL");