Merge tag 'vfio-v4.5-rc1' of git://github.com/awilliam/linux-vfio
[cascardo/linux.git] / drivers / block / null_blk.c
1 #include <linux/module.h>
2
3 #include <linux/moduleparam.h>
4 #include <linux/sched.h>
5 #include <linux/fs.h>
6 #include <linux/blkdev.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/blk-mq.h>
10 #include <linux/hrtimer.h>
11 #include <linux/lightnvm.h>
12
13 struct nullb_cmd {
14         struct list_head list;
15         struct llist_node ll_list;
16         struct call_single_data csd;
17         struct request *rq;
18         struct bio *bio;
19         unsigned int tag;
20         struct nullb_queue *nq;
21         struct hrtimer timer;
22 };
23
24 struct nullb_queue {
25         unsigned long *tag_map;
26         wait_queue_head_t wait;
27         unsigned int queue_depth;
28
29         struct nullb_cmd *cmds;
30 };
31
32 struct nullb {
33         struct list_head list;
34         unsigned int index;
35         struct request_queue *q;
36         struct gendisk *disk;
37         struct blk_mq_tag_set tag_set;
38         struct hrtimer timer;
39         unsigned int queue_depth;
40         spinlock_t lock;
41
42         struct nullb_queue *queues;
43         unsigned int nr_queues;
44         char disk_name[DISK_NAME_LEN];
45 };
46
47 static LIST_HEAD(nullb_list);
48 static struct mutex lock;
49 static int null_major;
50 static int nullb_indexes;
51 static struct kmem_cache *ppa_cache;
52
53 enum {
54         NULL_IRQ_NONE           = 0,
55         NULL_IRQ_SOFTIRQ        = 1,
56         NULL_IRQ_TIMER          = 2,
57 };
58
59 enum {
60         NULL_Q_BIO              = 0,
61         NULL_Q_RQ               = 1,
62         NULL_Q_MQ               = 2,
63 };
64
65 static int submit_queues;
66 module_param(submit_queues, int, S_IRUGO);
67 MODULE_PARM_DESC(submit_queues, "Number of submission queues");
68
69 static int home_node = NUMA_NO_NODE;
70 module_param(home_node, int, S_IRUGO);
71 MODULE_PARM_DESC(home_node, "Home node for the device");
72
73 static int queue_mode = NULL_Q_MQ;
74
75 static int null_param_store_val(const char *str, int *val, int min, int max)
76 {
77         int ret, new_val;
78
79         ret = kstrtoint(str, 10, &new_val);
80         if (ret)
81                 return -EINVAL;
82
83         if (new_val < min || new_val > max)
84                 return -EINVAL;
85
86         *val = new_val;
87         return 0;
88 }
89
90 static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
91 {
92         return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
93 }
94
95 static const struct kernel_param_ops null_queue_mode_param_ops = {
96         .set    = null_set_queue_mode,
97         .get    = param_get_int,
98 };
99
100 device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
101 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
102
103 static int gb = 250;
104 module_param(gb, int, S_IRUGO);
105 MODULE_PARM_DESC(gb, "Size in GB");
106
107 static int bs = 512;
108 module_param(bs, int, S_IRUGO);
109 MODULE_PARM_DESC(bs, "Block size (in bytes)");
110
111 static int nr_devices = 2;
112 module_param(nr_devices, int, S_IRUGO);
113 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
114
115 static bool use_lightnvm;
116 module_param(use_lightnvm, bool, S_IRUGO);
117 MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
118
119 static int irqmode = NULL_IRQ_SOFTIRQ;
120
121 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
122 {
123         return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
124                                         NULL_IRQ_TIMER);
125 }
126
127 static const struct kernel_param_ops null_irqmode_param_ops = {
128         .set    = null_set_irqmode,
129         .get    = param_get_int,
130 };
131
132 device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
133 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
134
135 static unsigned long completion_nsec = 10000;
136 module_param(completion_nsec, ulong, S_IRUGO);
137 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
138
139 static int hw_queue_depth = 64;
140 module_param(hw_queue_depth, int, S_IRUGO);
141 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
142
143 static bool use_per_node_hctx = false;
144 module_param(use_per_node_hctx, bool, S_IRUGO);
145 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
146
147 static void put_tag(struct nullb_queue *nq, unsigned int tag)
148 {
149         clear_bit_unlock(tag, nq->tag_map);
150
151         if (waitqueue_active(&nq->wait))
152                 wake_up(&nq->wait);
153 }
154
155 static unsigned int get_tag(struct nullb_queue *nq)
156 {
157         unsigned int tag;
158
159         do {
160                 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
161                 if (tag >= nq->queue_depth)
162                         return -1U;
163         } while (test_and_set_bit_lock(tag, nq->tag_map));
164
165         return tag;
166 }
167
168 static void free_cmd(struct nullb_cmd *cmd)
169 {
170         put_tag(cmd->nq, cmd->tag);
171 }
172
173 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
174
175 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
176 {
177         struct nullb_cmd *cmd;
178         unsigned int tag;
179
180         tag = get_tag(nq);
181         if (tag != -1U) {
182                 cmd = &nq->cmds[tag];
183                 cmd->tag = tag;
184                 cmd->nq = nq;
185                 if (irqmode == NULL_IRQ_TIMER) {
186                         hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
187                                      HRTIMER_MODE_REL);
188                         cmd->timer.function = null_cmd_timer_expired;
189                 }
190                 return cmd;
191         }
192
193         return NULL;
194 }
195
196 static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
197 {
198         struct nullb_cmd *cmd;
199         DEFINE_WAIT(wait);
200
201         cmd = __alloc_cmd(nq);
202         if (cmd || !can_wait)
203                 return cmd;
204
205         do {
206                 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
207                 cmd = __alloc_cmd(nq);
208                 if (cmd)
209                         break;
210
211                 io_schedule();
212         } while (1);
213
214         finish_wait(&nq->wait, &wait);
215         return cmd;
216 }
217
218 static void end_cmd(struct nullb_cmd *cmd)
219 {
220         struct request_queue *q = NULL;
221
222         if (cmd->rq)
223                 q = cmd->rq->q;
224
225         switch (queue_mode)  {
226         case NULL_Q_MQ:
227                 blk_mq_end_request(cmd->rq, 0);
228                 return;
229         case NULL_Q_RQ:
230                 INIT_LIST_HEAD(&cmd->rq->queuelist);
231                 blk_end_request_all(cmd->rq, 0);
232                 break;
233         case NULL_Q_BIO:
234                 bio_endio(cmd->bio);
235                 break;
236         }
237
238         free_cmd(cmd);
239
240         /* Restart queue if needed, as we are freeing a tag */
241         if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
242                 unsigned long flags;
243
244                 spin_lock_irqsave(q->queue_lock, flags);
245                 blk_start_queue_async(q);
246                 spin_unlock_irqrestore(q->queue_lock, flags);
247         }
248 }
249
250 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
251 {
252         end_cmd(container_of(timer, struct nullb_cmd, timer));
253
254         return HRTIMER_NORESTART;
255 }
256
257 static void null_cmd_end_timer(struct nullb_cmd *cmd)
258 {
259         ktime_t kt = ktime_set(0, completion_nsec);
260
261         hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
262 }
263
264 static void null_softirq_done_fn(struct request *rq)
265 {
266         if (queue_mode == NULL_Q_MQ)
267                 end_cmd(blk_mq_rq_to_pdu(rq));
268         else
269                 end_cmd(rq->special);
270 }
271
272 static inline void null_handle_cmd(struct nullb_cmd *cmd)
273 {
274         /* Complete IO by inline, softirq or timer */
275         switch (irqmode) {
276         case NULL_IRQ_SOFTIRQ:
277                 switch (queue_mode)  {
278                 case NULL_Q_MQ:
279                         blk_mq_complete_request(cmd->rq, cmd->rq->errors);
280                         break;
281                 case NULL_Q_RQ:
282                         blk_complete_request(cmd->rq);
283                         break;
284                 case NULL_Q_BIO:
285                         /*
286                          * XXX: no proper submitting cpu information available.
287                          */
288                         end_cmd(cmd);
289                         break;
290                 }
291                 break;
292         case NULL_IRQ_NONE:
293                 end_cmd(cmd);
294                 break;
295         case NULL_IRQ_TIMER:
296                 null_cmd_end_timer(cmd);
297                 break;
298         }
299 }
300
301 static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
302 {
303         int index = 0;
304
305         if (nullb->nr_queues != 1)
306                 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
307
308         return &nullb->queues[index];
309 }
310
311 static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
312 {
313         struct nullb *nullb = q->queuedata;
314         struct nullb_queue *nq = nullb_to_queue(nullb);
315         struct nullb_cmd *cmd;
316
317         cmd = alloc_cmd(nq, 1);
318         cmd->bio = bio;
319
320         null_handle_cmd(cmd);
321         return BLK_QC_T_NONE;
322 }
323
324 static int null_rq_prep_fn(struct request_queue *q, struct request *req)
325 {
326         struct nullb *nullb = q->queuedata;
327         struct nullb_queue *nq = nullb_to_queue(nullb);
328         struct nullb_cmd *cmd;
329
330         cmd = alloc_cmd(nq, 0);
331         if (cmd) {
332                 cmd->rq = req;
333                 req->special = cmd;
334                 return BLKPREP_OK;
335         }
336         blk_stop_queue(q);
337
338         return BLKPREP_DEFER;
339 }
340
341 static void null_request_fn(struct request_queue *q)
342 {
343         struct request *rq;
344
345         while ((rq = blk_fetch_request(q)) != NULL) {
346                 struct nullb_cmd *cmd = rq->special;
347
348                 spin_unlock_irq(q->queue_lock);
349                 null_handle_cmd(cmd);
350                 spin_lock_irq(q->queue_lock);
351         }
352 }
353
354 static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
355                          const struct blk_mq_queue_data *bd)
356 {
357         struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
358
359         if (irqmode == NULL_IRQ_TIMER) {
360                 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
361                 cmd->timer.function = null_cmd_timer_expired;
362         }
363         cmd->rq = bd->rq;
364         cmd->nq = hctx->driver_data;
365
366         blk_mq_start_request(bd->rq);
367
368         null_handle_cmd(cmd);
369         return BLK_MQ_RQ_QUEUE_OK;
370 }
371
372 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
373 {
374         BUG_ON(!nullb);
375         BUG_ON(!nq);
376
377         init_waitqueue_head(&nq->wait);
378         nq->queue_depth = nullb->queue_depth;
379 }
380
381 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
382                           unsigned int index)
383 {
384         struct nullb *nullb = data;
385         struct nullb_queue *nq = &nullb->queues[index];
386
387         hctx->driver_data = nq;
388         null_init_queue(nullb, nq);
389         nullb->nr_queues++;
390
391         return 0;
392 }
393
394 static struct blk_mq_ops null_mq_ops = {
395         .queue_rq       = null_queue_rq,
396         .map_queue      = blk_mq_map_queue,
397         .init_hctx      = null_init_hctx,
398         .complete       = null_softirq_done_fn,
399 };
400
401 static void cleanup_queue(struct nullb_queue *nq)
402 {
403         kfree(nq->tag_map);
404         kfree(nq->cmds);
405 }
406
407 static void cleanup_queues(struct nullb *nullb)
408 {
409         int i;
410
411         for (i = 0; i < nullb->nr_queues; i++)
412                 cleanup_queue(&nullb->queues[i]);
413
414         kfree(nullb->queues);
415 }
416
417 static void null_del_dev(struct nullb *nullb)
418 {
419         list_del_init(&nullb->list);
420
421         if (use_lightnvm)
422                 nvm_unregister(nullb->disk_name);
423         else
424                 del_gendisk(nullb->disk);
425         blk_cleanup_queue(nullb->q);
426         if (queue_mode == NULL_Q_MQ)
427                 blk_mq_free_tag_set(&nullb->tag_set);
428         if (!use_lightnvm)
429                 put_disk(nullb->disk);
430         cleanup_queues(nullb);
431         kfree(nullb);
432 }
433
434 #ifdef CONFIG_NVM
435
436 static void null_lnvm_end_io(struct request *rq, int error)
437 {
438         struct nvm_rq *rqd = rq->end_io_data;
439         struct nvm_dev *dev = rqd->dev;
440
441         dev->mt->end_io(rqd, error);
442
443         blk_put_request(rq);
444 }
445
446 static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
447 {
448         struct request_queue *q = dev->q;
449         struct request *rq;
450         struct bio *bio = rqd->bio;
451
452         rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
453         if (IS_ERR(rq))
454                 return -ENOMEM;
455
456         rq->cmd_type = REQ_TYPE_DRV_PRIV;
457         rq->__sector = bio->bi_iter.bi_sector;
458         rq->ioprio = bio_prio(bio);
459
460         if (bio_has_data(bio))
461                 rq->nr_phys_segments = bio_phys_segments(q, bio);
462
463         rq->__data_len = bio->bi_iter.bi_size;
464         rq->bio = rq->biotail = bio;
465
466         rq->end_io_data = rqd;
467
468         blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
469
470         return 0;
471 }
472
473 static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
474 {
475         sector_t size = gb * 1024 * 1024 * 1024ULL;
476         sector_t blksize;
477         struct nvm_id_group *grp;
478
479         id->ver_id = 0x1;
480         id->vmnt = 0;
481         id->cgrps = 1;
482         id->cap = 0x3;
483         id->dom = 0x1;
484
485         id->ppaf.blk_offset = 0;
486         id->ppaf.blk_len = 16;
487         id->ppaf.pg_offset = 16;
488         id->ppaf.pg_len = 16;
489         id->ppaf.sect_offset = 32;
490         id->ppaf.sect_len = 8;
491         id->ppaf.pln_offset = 40;
492         id->ppaf.pln_len = 8;
493         id->ppaf.lun_offset = 48;
494         id->ppaf.lun_len = 8;
495         id->ppaf.ch_offset = 56;
496         id->ppaf.ch_len = 8;
497
498         do_div(size, bs); /* convert size to pages */
499         do_div(size, 256); /* concert size to pgs pr blk */
500         grp = &id->groups[0];
501         grp->mtype = 0;
502         grp->fmtype = 0;
503         grp->num_ch = 1;
504         grp->num_pg = 256;
505         blksize = size;
506         do_div(size, (1 << 16));
507         grp->num_lun = size + 1;
508         do_div(blksize, grp->num_lun);
509         grp->num_blk = blksize;
510         grp->num_pln = 1;
511
512         grp->fpg_sz = bs;
513         grp->csecs = bs;
514         grp->trdt = 25000;
515         grp->trdm = 25000;
516         grp->tprt = 500000;
517         grp->tprm = 500000;
518         grp->tbet = 1500000;
519         grp->tbem = 1500000;
520         grp->mpos = 0x010101; /* single plane rwe */
521         grp->cpar = hw_queue_depth;
522
523         return 0;
524 }
525
526 static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
527 {
528         mempool_t *virtmem_pool;
529
530         virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
531         if (!virtmem_pool) {
532                 pr_err("null_blk: Unable to create virtual memory pool\n");
533                 return NULL;
534         }
535
536         return virtmem_pool;
537 }
538
539 static void null_lnvm_destroy_dma_pool(void *pool)
540 {
541         mempool_destroy(pool);
542 }
543
544 static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
545                                 gfp_t mem_flags, dma_addr_t *dma_handler)
546 {
547         return mempool_alloc(pool, mem_flags);
548 }
549
550 static void null_lnvm_dev_dma_free(void *pool, void *entry,
551                                                         dma_addr_t dma_handler)
552 {
553         mempool_free(entry, pool);
554 }
555
556 static struct nvm_dev_ops null_lnvm_dev_ops = {
557         .identity               = null_lnvm_id,
558         .submit_io              = null_lnvm_submit_io,
559
560         .create_dma_pool        = null_lnvm_create_dma_pool,
561         .destroy_dma_pool       = null_lnvm_destroy_dma_pool,
562         .dev_dma_alloc          = null_lnvm_dev_dma_alloc,
563         .dev_dma_free           = null_lnvm_dev_dma_free,
564
565         /* Simulate nvme protocol restriction */
566         .max_phys_sect          = 64,
567 };
568 #else
569 static struct nvm_dev_ops null_lnvm_dev_ops;
570 #endif /* CONFIG_NVM */
571
572 static int null_open(struct block_device *bdev, fmode_t mode)
573 {
574         return 0;
575 }
576
577 static void null_release(struct gendisk *disk, fmode_t mode)
578 {
579 }
580
581 static const struct block_device_operations null_fops = {
582         .owner =        THIS_MODULE,
583         .open =         null_open,
584         .release =      null_release,
585 };
586
587 static int setup_commands(struct nullb_queue *nq)
588 {
589         struct nullb_cmd *cmd;
590         int i, tag_size;
591
592         nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
593         if (!nq->cmds)
594                 return -ENOMEM;
595
596         tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
597         nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
598         if (!nq->tag_map) {
599                 kfree(nq->cmds);
600                 return -ENOMEM;
601         }
602
603         for (i = 0; i < nq->queue_depth; i++) {
604                 cmd = &nq->cmds[i];
605                 INIT_LIST_HEAD(&cmd->list);
606                 cmd->ll_list.next = NULL;
607                 cmd->tag = -1U;
608         }
609
610         return 0;
611 }
612
613 static int setup_queues(struct nullb *nullb)
614 {
615         nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
616                                                                 GFP_KERNEL);
617         if (!nullb->queues)
618                 return -ENOMEM;
619
620         nullb->nr_queues = 0;
621         nullb->queue_depth = hw_queue_depth;
622
623         return 0;
624 }
625
626 static int init_driver_queues(struct nullb *nullb)
627 {
628         struct nullb_queue *nq;
629         int i, ret = 0;
630
631         for (i = 0; i < submit_queues; i++) {
632                 nq = &nullb->queues[i];
633
634                 null_init_queue(nullb, nq);
635
636                 ret = setup_commands(nq);
637                 if (ret)
638                         return ret;
639                 nullb->nr_queues++;
640         }
641         return 0;
642 }
643
644 static int null_add_dev(void)
645 {
646         struct gendisk *disk;
647         struct nullb *nullb;
648         sector_t size;
649         int rv;
650
651         nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
652         if (!nullb) {
653                 rv = -ENOMEM;
654                 goto out;
655         }
656
657         spin_lock_init(&nullb->lock);
658
659         if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
660                 submit_queues = nr_online_nodes;
661
662         rv = setup_queues(nullb);
663         if (rv)
664                 goto out_free_nullb;
665
666         if (queue_mode == NULL_Q_MQ) {
667                 nullb->tag_set.ops = &null_mq_ops;
668                 nullb->tag_set.nr_hw_queues = submit_queues;
669                 nullb->tag_set.queue_depth = hw_queue_depth;
670                 nullb->tag_set.numa_node = home_node;
671                 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
672                 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
673                 nullb->tag_set.driver_data = nullb;
674
675                 rv = blk_mq_alloc_tag_set(&nullb->tag_set);
676                 if (rv)
677                         goto out_cleanup_queues;
678
679                 nullb->q = blk_mq_init_queue(&nullb->tag_set);
680                 if (IS_ERR(nullb->q)) {
681                         rv = -ENOMEM;
682                         goto out_cleanup_tags;
683                 }
684         } else if (queue_mode == NULL_Q_BIO) {
685                 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
686                 if (!nullb->q) {
687                         rv = -ENOMEM;
688                         goto out_cleanup_queues;
689                 }
690                 blk_queue_make_request(nullb->q, null_queue_bio);
691                 rv = init_driver_queues(nullb);
692                 if (rv)
693                         goto out_cleanup_blk_queue;
694         } else {
695                 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
696                 if (!nullb->q) {
697                         rv = -ENOMEM;
698                         goto out_cleanup_queues;
699                 }
700                 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
701                 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
702                 rv = init_driver_queues(nullb);
703                 if (rv)
704                         goto out_cleanup_blk_queue;
705         }
706
707         nullb->q->queuedata = nullb;
708         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
709         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
710
711
712         mutex_lock(&lock);
713         list_add_tail(&nullb->list, &nullb_list);
714         nullb->index = nullb_indexes++;
715         mutex_unlock(&lock);
716
717         blk_queue_logical_block_size(nullb->q, bs);
718         blk_queue_physical_block_size(nullb->q, bs);
719
720         sprintf(nullb->disk_name, "nullb%d", nullb->index);
721
722         if (use_lightnvm) {
723                 rv = nvm_register(nullb->q, nullb->disk_name,
724                                                         &null_lnvm_dev_ops);
725                 if (rv)
726                         goto out_cleanup_blk_queue;
727                 goto done;
728         }
729
730         disk = nullb->disk = alloc_disk_node(1, home_node);
731         if (!disk) {
732                 rv = -ENOMEM;
733                 goto out_cleanup_lightnvm;
734         }
735         size = gb * 1024 * 1024 * 1024ULL;
736         set_capacity(disk, size >> 9);
737
738         disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
739         disk->major             = null_major;
740         disk->first_minor       = nullb->index;
741         disk->fops              = &null_fops;
742         disk->private_data      = nullb;
743         disk->queue             = nullb->q;
744         strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
745
746         add_disk(disk);
747 done:
748         return 0;
749
750 out_cleanup_lightnvm:
751         if (use_lightnvm)
752                 nvm_unregister(nullb->disk_name);
753 out_cleanup_blk_queue:
754         blk_cleanup_queue(nullb->q);
755 out_cleanup_tags:
756         if (queue_mode == NULL_Q_MQ)
757                 blk_mq_free_tag_set(&nullb->tag_set);
758 out_cleanup_queues:
759         cleanup_queues(nullb);
760 out_free_nullb:
761         kfree(nullb);
762 out:
763         return rv;
764 }
765
766 static int __init null_init(void)
767 {
768         int ret = 0;
769         unsigned int i;
770         struct nullb *nullb;
771
772         if (bs > PAGE_SIZE) {
773                 pr_warn("null_blk: invalid block size\n");
774                 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
775                 bs = PAGE_SIZE;
776         }
777
778         if (use_lightnvm && bs != 4096) {
779                 pr_warn("null_blk: LightNVM only supports 4k block size\n");
780                 pr_warn("null_blk: defaults block size to 4k\n");
781                 bs = 4096;
782         }
783
784         if (use_lightnvm && queue_mode != NULL_Q_MQ) {
785                 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
786                 pr_warn("null_blk: defaults queue mode to blk-mq\n");
787                 queue_mode = NULL_Q_MQ;
788         }
789
790         if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
791                 if (submit_queues < nr_online_nodes) {
792                         pr_warn("null_blk: submit_queues param is set to %u.",
793                                                         nr_online_nodes);
794                         submit_queues = nr_online_nodes;
795                 }
796         } else if (submit_queues > nr_cpu_ids)
797                 submit_queues = nr_cpu_ids;
798         else if (!submit_queues)
799                 submit_queues = 1;
800
801         mutex_init(&lock);
802
803         null_major = register_blkdev(0, "nullb");
804         if (null_major < 0)
805                 return null_major;
806
807         if (use_lightnvm) {
808                 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
809                                                                 0, 0, NULL);
810                 if (!ppa_cache) {
811                         pr_err("null_blk: unable to create ppa cache\n");
812                         ret = -ENOMEM;
813                         goto err_ppa;
814                 }
815         }
816
817         for (i = 0; i < nr_devices; i++) {
818                 ret = null_add_dev();
819                 if (ret)
820                         goto err_dev;
821         }
822
823         pr_info("null: module loaded\n");
824         return 0;
825
826 err_dev:
827         while (!list_empty(&nullb_list)) {
828                 nullb = list_entry(nullb_list.next, struct nullb, list);
829                 null_del_dev(nullb);
830         }
831         kmem_cache_destroy(ppa_cache);
832 err_ppa:
833         unregister_blkdev(null_major, "nullb");
834         return ret;
835 }
836
837 static void __exit null_exit(void)
838 {
839         struct nullb *nullb;
840
841         unregister_blkdev(null_major, "nullb");
842
843         mutex_lock(&lock);
844         while (!list_empty(&nullb_list)) {
845                 nullb = list_entry(nullb_list.next, struct nullb, list);
846                 null_del_dev(nullb);
847         }
848         mutex_unlock(&lock);
849
850         kmem_cache_destroy(ppa_cache);
851 }
852
853 module_init(null_init);
854 module_exit(null_exit);
855
856 MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
857 MODULE_LICENSE("GPL");