NVMe: Move error handling to failed reset handler
[cascardo/linux.git] / drivers / nvme / host / core.c
1 /*
2  * NVM Express device driver
3  * Copyright (c) 2011-2014, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/delay.h>
18 #include <linux/errno.h>
19 #include <linux/hdreg.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/list_sort.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/pr.h>
26 #include <linux/ptrace.h>
27 #include <linux/nvme_ioctl.h>
28 #include <linux/t10-pi.h>
29 #include <scsi/sg.h>
30 #include <asm/unaligned.h>
31
32 #include "nvme.h"
33
34 #define NVME_MINORS             (1U << MINORBITS)
35
36 static int nvme_major;
37 module_param(nvme_major, int, 0);
38
39 static int nvme_char_major;
40 module_param(nvme_char_major, int, 0);
41
42 static LIST_HEAD(nvme_ctrl_list);
43 DEFINE_SPINLOCK(dev_list_lock);
44
45 static struct class *nvme_class;
46
47 static void nvme_free_ns(struct kref *kref)
48 {
49         struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
50
51         if (ns->type == NVME_NS_LIGHTNVM)
52                 nvme_nvm_unregister(ns->queue, ns->disk->disk_name);
53
54         spin_lock(&dev_list_lock);
55         ns->disk->private_data = NULL;
56         spin_unlock(&dev_list_lock);
57
58         put_disk(ns->disk);
59         ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
60         nvme_put_ctrl(ns->ctrl);
61         kfree(ns);
62 }
63
64 static void nvme_put_ns(struct nvme_ns *ns)
65 {
66         kref_put(&ns->kref, nvme_free_ns);
67 }
68
69 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
70 {
71         struct nvme_ns *ns;
72
73         spin_lock(&dev_list_lock);
74         ns = disk->private_data;
75         if (ns && !kref_get_unless_zero(&ns->kref))
76                 ns = NULL;
77         spin_unlock(&dev_list_lock);
78
79         return ns;
80 }
81
82 void nvme_requeue_req(struct request *req)
83 {
84         unsigned long flags;
85
86         blk_mq_requeue_request(req);
87         spin_lock_irqsave(req->q->queue_lock, flags);
88         if (!blk_queue_stopped(req->q))
89                 blk_mq_kick_requeue_list(req->q);
90         spin_unlock_irqrestore(req->q->queue_lock, flags);
91 }
92
93 struct request *nvme_alloc_request(struct request_queue *q,
94                 struct nvme_command *cmd, unsigned int flags)
95 {
96         bool write = cmd->common.opcode & 1;
97         struct request *req;
98
99         req = blk_mq_alloc_request(q, write, flags);
100         if (IS_ERR(req))
101                 return req;
102
103         req->cmd_type = REQ_TYPE_DRV_PRIV;
104         req->cmd_flags |= REQ_FAILFAST_DRIVER;
105         req->__data_len = 0;
106         req->__sector = (sector_t) -1;
107         req->bio = req->biotail = NULL;
108
109         req->cmd = (unsigned char *)cmd;
110         req->cmd_len = sizeof(struct nvme_command);
111         req->special = (void *)0;
112
113         return req;
114 }
115
116 /*
117  * Returns 0 on success.  If the result is negative, it's a Linux error code;
118  * if the result is positive, it's an NVM Express status code
119  */
120 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
121                 void *buffer, unsigned bufflen, u32 *result, unsigned timeout)
122 {
123         struct request *req;
124         int ret;
125
126         req = nvme_alloc_request(q, cmd, 0);
127         if (IS_ERR(req))
128                 return PTR_ERR(req);
129
130         req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
131
132         if (buffer && bufflen) {
133                 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
134                 if (ret)
135                         goto out;
136         }
137
138         blk_execute_rq(req->q, NULL, req, 0);
139         if (result)
140                 *result = (u32)(uintptr_t)req->special;
141         ret = req->errors;
142  out:
143         blk_mq_free_request(req);
144         return ret;
145 }
146
147 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
148                 void *buffer, unsigned bufflen)
149 {
150         return __nvme_submit_sync_cmd(q, cmd, buffer, bufflen, NULL, 0);
151 }
152
153 int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
154                 void __user *ubuffer, unsigned bufflen,
155                 void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
156                 u32 *result, unsigned timeout)
157 {
158         bool write = cmd->common.opcode & 1;
159         struct nvme_ns *ns = q->queuedata;
160         struct gendisk *disk = ns ? ns->disk : NULL;
161         struct request *req;
162         struct bio *bio = NULL;
163         void *meta = NULL;
164         int ret;
165
166         req = nvme_alloc_request(q, cmd, 0);
167         if (IS_ERR(req))
168                 return PTR_ERR(req);
169
170         req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
171
172         if (ubuffer && bufflen) {
173                 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
174                                 GFP_KERNEL);
175                 if (ret)
176                         goto out;
177                 bio = req->bio;
178
179                 if (!disk)
180                         goto submit;
181                 bio->bi_bdev = bdget_disk(disk, 0);
182                 if (!bio->bi_bdev) {
183                         ret = -ENODEV;
184                         goto out_unmap;
185                 }
186
187                 if (meta_buffer) {
188                         struct bio_integrity_payload *bip;
189
190                         meta = kmalloc(meta_len, GFP_KERNEL);
191                         if (!meta) {
192                                 ret = -ENOMEM;
193                                 goto out_unmap;
194                         }
195
196                         if (write) {
197                                 if (copy_from_user(meta, meta_buffer,
198                                                 meta_len)) {
199                                         ret = -EFAULT;
200                                         goto out_free_meta;
201                                 }
202                         }
203
204                         bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
205                         if (IS_ERR(bip)) {
206                                 ret = PTR_ERR(bip);
207                                 goto out_free_meta;
208                         }
209
210                         bip->bip_iter.bi_size = meta_len;
211                         bip->bip_iter.bi_sector = meta_seed;
212
213                         ret = bio_integrity_add_page(bio, virt_to_page(meta),
214                                         meta_len, offset_in_page(meta));
215                         if (ret != meta_len) {
216                                 ret = -ENOMEM;
217                                 goto out_free_meta;
218                         }
219                 }
220         }
221  submit:
222         blk_execute_rq(req->q, disk, req, 0);
223         ret = req->errors;
224         if (result)
225                 *result = (u32)(uintptr_t)req->special;
226         if (meta && !ret && !write) {
227                 if (copy_to_user(meta_buffer, meta, meta_len))
228                         ret = -EFAULT;
229         }
230  out_free_meta:
231         kfree(meta);
232  out_unmap:
233         if (bio) {
234                 if (disk && bio->bi_bdev)
235                         bdput(bio->bi_bdev);
236                 blk_rq_unmap_user(bio);
237         }
238  out:
239         blk_mq_free_request(req);
240         return ret;
241 }
242
243 int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
244                 void __user *ubuffer, unsigned bufflen, u32 *result,
245                 unsigned timeout)
246 {
247         return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0,
248                         result, timeout);
249 }
250
251 int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
252 {
253         struct nvme_command c = { };
254         int error;
255
256         /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
257         c.identify.opcode = nvme_admin_identify;
258         c.identify.cns = cpu_to_le32(1);
259
260         *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
261         if (!*id)
262                 return -ENOMEM;
263
264         error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
265                         sizeof(struct nvme_id_ctrl));
266         if (error)
267                 kfree(*id);
268         return error;
269 }
270
271 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
272 {
273         struct nvme_command c = { };
274
275         c.identify.opcode = nvme_admin_identify;
276         c.identify.cns = cpu_to_le32(2);
277         c.identify.nsid = cpu_to_le32(nsid);
278         return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
279 }
280
281 int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
282                 struct nvme_id_ns **id)
283 {
284         struct nvme_command c = { };
285         int error;
286
287         /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
288         c.identify.opcode = nvme_admin_identify,
289         c.identify.nsid = cpu_to_le32(nsid),
290
291         *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL);
292         if (!*id)
293                 return -ENOMEM;
294
295         error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
296                         sizeof(struct nvme_id_ns));
297         if (error)
298                 kfree(*id);
299         return error;
300 }
301
302 int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
303                                         dma_addr_t dma_addr, u32 *result)
304 {
305         struct nvme_command c;
306
307         memset(&c, 0, sizeof(c));
308         c.features.opcode = nvme_admin_get_features;
309         c.features.nsid = cpu_to_le32(nsid);
310         c.features.prp1 = cpu_to_le64(dma_addr);
311         c.features.fid = cpu_to_le32(fid);
312
313         return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0);
314 }
315
316 int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
317                                         dma_addr_t dma_addr, u32 *result)
318 {
319         struct nvme_command c;
320
321         memset(&c, 0, sizeof(c));
322         c.features.opcode = nvme_admin_set_features;
323         c.features.prp1 = cpu_to_le64(dma_addr);
324         c.features.fid = cpu_to_le32(fid);
325         c.features.dword11 = cpu_to_le32(dword11);
326
327         return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0);
328 }
329
330 int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
331 {
332         struct nvme_command c = { };
333         int error;
334
335         c.common.opcode = nvme_admin_get_log_page,
336         c.common.nsid = cpu_to_le32(0xFFFFFFFF),
337         c.common.cdw10[0] = cpu_to_le32(
338                         (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) |
339                          NVME_LOG_SMART),
340
341         *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL);
342         if (!*log)
343                 return -ENOMEM;
344
345         error = nvme_submit_sync_cmd(dev->admin_q, &c, *log,
346                         sizeof(struct nvme_smart_log));
347         if (error)
348                 kfree(*log);
349         return error;
350 }
351
352 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
353 {
354         u32 q_count = (*count - 1) | ((*count - 1) << 16);
355         u32 result;
356         int status, nr_io_queues;
357
358         status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0,
359                         &result);
360         if (status)
361                 return status;
362
363         nr_io_queues = min(result & 0xffff, result >> 16) + 1;
364         *count = min(*count, nr_io_queues);
365         return 0;
366 }
367
368 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
369 {
370         struct nvme_user_io io;
371         struct nvme_command c;
372         unsigned length, meta_len;
373         void __user *metadata;
374
375         if (copy_from_user(&io, uio, sizeof(io)))
376                 return -EFAULT;
377
378         switch (io.opcode) {
379         case nvme_cmd_write:
380         case nvme_cmd_read:
381         case nvme_cmd_compare:
382                 break;
383         default:
384                 return -EINVAL;
385         }
386
387         length = (io.nblocks + 1) << ns->lba_shift;
388         meta_len = (io.nblocks + 1) * ns->ms;
389         metadata = (void __user *)(uintptr_t)io.metadata;
390
391         if (ns->ext) {
392                 length += meta_len;
393                 meta_len = 0;
394         } else if (meta_len) {
395                 if ((io.metadata & 3) || !io.metadata)
396                         return -EINVAL;
397         }
398
399         memset(&c, 0, sizeof(c));
400         c.rw.opcode = io.opcode;
401         c.rw.flags = io.flags;
402         c.rw.nsid = cpu_to_le32(ns->ns_id);
403         c.rw.slba = cpu_to_le64(io.slba);
404         c.rw.length = cpu_to_le16(io.nblocks);
405         c.rw.control = cpu_to_le16(io.control);
406         c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
407         c.rw.reftag = cpu_to_le32(io.reftag);
408         c.rw.apptag = cpu_to_le16(io.apptag);
409         c.rw.appmask = cpu_to_le16(io.appmask);
410
411         return __nvme_submit_user_cmd(ns->queue, &c,
412                         (void __user *)(uintptr_t)io.addr, length,
413                         metadata, meta_len, io.slba, NULL, 0);
414 }
415
416 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
417                         struct nvme_passthru_cmd __user *ucmd)
418 {
419         struct nvme_passthru_cmd cmd;
420         struct nvme_command c;
421         unsigned timeout = 0;
422         int status;
423
424         if (!capable(CAP_SYS_ADMIN))
425                 return -EACCES;
426         if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
427                 return -EFAULT;
428
429         memset(&c, 0, sizeof(c));
430         c.common.opcode = cmd.opcode;
431         c.common.flags = cmd.flags;
432         c.common.nsid = cpu_to_le32(cmd.nsid);
433         c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
434         c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
435         c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
436         c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
437         c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
438         c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
439         c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
440         c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
441
442         if (cmd.timeout_ms)
443                 timeout = msecs_to_jiffies(cmd.timeout_ms);
444
445         status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
446                         (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
447                         &cmd.result, timeout);
448         if (status >= 0) {
449                 if (put_user(cmd.result, &ucmd->result))
450                         return -EFAULT;
451         }
452
453         return status;
454 }
455
456 static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
457                 unsigned int cmd, unsigned long arg)
458 {
459         struct nvme_ns *ns = bdev->bd_disk->private_data;
460
461         switch (cmd) {
462         case NVME_IOCTL_ID:
463                 force_successful_syscall_return();
464                 return ns->ns_id;
465         case NVME_IOCTL_ADMIN_CMD:
466                 return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
467         case NVME_IOCTL_IO_CMD:
468                 return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
469         case NVME_IOCTL_SUBMIT_IO:
470                 return nvme_submit_io(ns, (void __user *)arg);
471 #ifdef CONFIG_BLK_DEV_NVME_SCSI
472         case SG_GET_VERSION_NUM:
473                 return nvme_sg_get_version_num((void __user *)arg);
474         case SG_IO:
475                 return nvme_sg_io(ns, (void __user *)arg);
476 #endif
477         default:
478                 return -ENOTTY;
479         }
480 }
481
482 #ifdef CONFIG_COMPAT
483 static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
484                         unsigned int cmd, unsigned long arg)
485 {
486         switch (cmd) {
487         case SG_IO:
488                 return -ENOIOCTLCMD;
489         }
490         return nvme_ioctl(bdev, mode, cmd, arg);
491 }
492 #else
493 #define nvme_compat_ioctl       NULL
494 #endif
495
496 static int nvme_open(struct block_device *bdev, fmode_t mode)
497 {
498         return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO;
499 }
500
501 static void nvme_release(struct gendisk *disk, fmode_t mode)
502 {
503         nvme_put_ns(disk->private_data);
504 }
505
506 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
507 {
508         /* some standard values */
509         geo->heads = 1 << 6;
510         geo->sectors = 1 << 5;
511         geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
512         return 0;
513 }
514
515 #ifdef CONFIG_BLK_DEV_INTEGRITY
516 static void nvme_init_integrity(struct nvme_ns *ns)
517 {
518         struct blk_integrity integrity;
519
520         switch (ns->pi_type) {
521         case NVME_NS_DPS_PI_TYPE3:
522                 integrity.profile = &t10_pi_type3_crc;
523                 break;
524         case NVME_NS_DPS_PI_TYPE1:
525         case NVME_NS_DPS_PI_TYPE2:
526                 integrity.profile = &t10_pi_type1_crc;
527                 break;
528         default:
529                 integrity.profile = NULL;
530                 break;
531         }
532         integrity.tuple_size = ns->ms;
533         blk_integrity_register(ns->disk, &integrity);
534         blk_queue_max_integrity_segments(ns->queue, 1);
535 }
536 #else
537 static void nvme_init_integrity(struct nvme_ns *ns)
538 {
539 }
540 #endif /* CONFIG_BLK_DEV_INTEGRITY */
541
542 static void nvme_config_discard(struct nvme_ns *ns)
543 {
544         u32 logical_block_size = queue_logical_block_size(ns->queue);
545         ns->queue->limits.discard_zeroes_data = 0;
546         ns->queue->limits.discard_alignment = logical_block_size;
547         ns->queue->limits.discard_granularity = logical_block_size;
548         blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
549         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
550 }
551
552 static int nvme_revalidate_disk(struct gendisk *disk)
553 {
554         struct nvme_ns *ns = disk->private_data;
555         struct nvme_id_ns *id;
556         u8 lbaf, pi_type;
557         u16 old_ms;
558         unsigned short bs;
559
560         if (test_bit(NVME_NS_DEAD, &ns->flags)) {
561                 set_capacity(disk, 0);
562                 return -ENODEV;
563         }
564         if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
565                 dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
566                                 __func__, ns->ctrl->instance, ns->ns_id);
567                 return -ENODEV;
568         }
569         if (id->ncap == 0) {
570                 kfree(id);
571                 return -ENODEV;
572         }
573
574         if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
575                 if (nvme_nvm_register(ns->queue, disk->disk_name)) {
576                         dev_warn(ns->ctrl->dev,
577                                 "%s: LightNVM init failure\n", __func__);
578                         kfree(id);
579                         return -ENODEV;
580                 }
581                 ns->type = NVME_NS_LIGHTNVM;
582         }
583
584         if (ns->ctrl->vs >= NVME_VS(1, 1))
585                 memcpy(ns->eui, id->eui64, sizeof(ns->eui));
586         if (ns->ctrl->vs >= NVME_VS(1, 2))
587                 memcpy(ns->uuid, id->nguid, sizeof(ns->uuid));
588
589         old_ms = ns->ms;
590         lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
591         ns->lba_shift = id->lbaf[lbaf].ds;
592         ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
593         ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
594
595         /*
596          * If identify namespace failed, use default 512 byte block size so
597          * block layer can use before failing read/write for 0 capacity.
598          */
599         if (ns->lba_shift == 0)
600                 ns->lba_shift = 9;
601         bs = 1 << ns->lba_shift;
602         /* XXX: PI implementation requires metadata equal t10 pi tuple size */
603         pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
604                                         id->dps & NVME_NS_DPS_PI_MASK : 0;
605
606         blk_mq_freeze_queue(disk->queue);
607         if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
608                                 ns->ms != old_ms ||
609                                 bs != queue_logical_block_size(disk->queue) ||
610                                 (ns->ms && ns->ext)))
611                 blk_integrity_unregister(disk);
612
613         ns->pi_type = pi_type;
614         blk_queue_logical_block_size(ns->queue, bs);
615
616         if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
617                 nvme_init_integrity(ns);
618         if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
619                 set_capacity(disk, 0);
620         else
621                 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
622
623         if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
624                 nvme_config_discard(ns);
625         blk_mq_unfreeze_queue(disk->queue);
626
627         kfree(id);
628         return 0;
629 }
630
631 static char nvme_pr_type(enum pr_type type)
632 {
633         switch (type) {
634         case PR_WRITE_EXCLUSIVE:
635                 return 1;
636         case PR_EXCLUSIVE_ACCESS:
637                 return 2;
638         case PR_WRITE_EXCLUSIVE_REG_ONLY:
639                 return 3;
640         case PR_EXCLUSIVE_ACCESS_REG_ONLY:
641                 return 4;
642         case PR_WRITE_EXCLUSIVE_ALL_REGS:
643                 return 5;
644         case PR_EXCLUSIVE_ACCESS_ALL_REGS:
645                 return 6;
646         default:
647                 return 0;
648         }
649 };
650
651 static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
652                                 u64 key, u64 sa_key, u8 op)
653 {
654         struct nvme_ns *ns = bdev->bd_disk->private_data;
655         struct nvme_command c;
656         u8 data[16] = { 0, };
657
658         put_unaligned_le64(key, &data[0]);
659         put_unaligned_le64(sa_key, &data[8]);
660
661         memset(&c, 0, sizeof(c));
662         c.common.opcode = op;
663         c.common.nsid = cpu_to_le32(ns->ns_id);
664         c.common.cdw10[0] = cpu_to_le32(cdw10);
665
666         return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
667 }
668
669 static int nvme_pr_register(struct block_device *bdev, u64 old,
670                 u64 new, unsigned flags)
671 {
672         u32 cdw10;
673
674         if (flags & ~PR_FL_IGNORE_KEY)
675                 return -EOPNOTSUPP;
676
677         cdw10 = old ? 2 : 0;
678         cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
679         cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
680         return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
681 }
682
683 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
684                 enum pr_type type, unsigned flags)
685 {
686         u32 cdw10;
687
688         if (flags & ~PR_FL_IGNORE_KEY)
689                 return -EOPNOTSUPP;
690
691         cdw10 = nvme_pr_type(type) << 8;
692         cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
693         return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
694 }
695
696 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
697                 enum pr_type type, bool abort)
698 {
699         u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
700         return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
701 }
702
703 static int nvme_pr_clear(struct block_device *bdev, u64 key)
704 {
705         u32 cdw10 = 1 | (key ? 1 << 3 : 0);
706         return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
707 }
708
709 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
710 {
711         u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
712         return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
713 }
714
715 static const struct pr_ops nvme_pr_ops = {
716         .pr_register    = nvme_pr_register,
717         .pr_reserve     = nvme_pr_reserve,
718         .pr_release     = nvme_pr_release,
719         .pr_preempt     = nvme_pr_preempt,
720         .pr_clear       = nvme_pr_clear,
721 };
722
723 static const struct block_device_operations nvme_fops = {
724         .owner          = THIS_MODULE,
725         .ioctl          = nvme_ioctl,
726         .compat_ioctl   = nvme_compat_ioctl,
727         .open           = nvme_open,
728         .release        = nvme_release,
729         .getgeo         = nvme_getgeo,
730         .revalidate_disk= nvme_revalidate_disk,
731         .pr_ops         = &nvme_pr_ops,
732 };
733
734 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
735 {
736         unsigned long timeout =
737                 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
738         u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
739         int ret;
740
741         while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
742                 if ((csts & NVME_CSTS_RDY) == bit)
743                         break;
744
745                 msleep(100);
746                 if (fatal_signal_pending(current))
747                         return -EINTR;
748                 if (time_after(jiffies, timeout)) {
749                         dev_err(ctrl->dev,
750                                 "Device not ready; aborting %s\n", enabled ?
751                                                 "initialisation" : "reset");
752                         return -ENODEV;
753                 }
754         }
755
756         return ret;
757 }
758
759 /*
760  * If the device has been passed off to us in an enabled state, just clear
761  * the enabled bit.  The spec says we should set the 'shutdown notification
762  * bits', but doing so may cause the device to complete commands to the
763  * admin queue ... and we don't know what memory that might be pointing at!
764  */
765 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
766 {
767         int ret;
768
769         ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
770         ctrl->ctrl_config &= ~NVME_CC_ENABLE;
771
772         ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
773         if (ret)
774                 return ret;
775         return nvme_wait_ready(ctrl, cap, false);
776 }
777
778 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
779 {
780         /*
781          * Default to a 4K page size, with the intention to update this
782          * path in the future to accomodate architectures with differing
783          * kernel and IO page sizes.
784          */
785         unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12;
786         int ret;
787
788         if (page_shift < dev_page_min) {
789                 dev_err(ctrl->dev,
790                         "Minimum device page size %u too large for host (%u)\n",
791                         1 << dev_page_min, 1 << page_shift);
792                 return -ENODEV;
793         }
794
795         ctrl->page_size = 1 << page_shift;
796
797         ctrl->ctrl_config = NVME_CC_CSS_NVM;
798         ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
799         ctrl->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
800         ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
801         ctrl->ctrl_config |= NVME_CC_ENABLE;
802
803         ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
804         if (ret)
805                 return ret;
806         return nvme_wait_ready(ctrl, cap, true);
807 }
808
809 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
810 {
811         unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies;
812         u32 csts;
813         int ret;
814
815         ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
816         ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
817
818         ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
819         if (ret)
820                 return ret;
821
822         while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
823                 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
824                         break;
825
826                 msleep(100);
827                 if (fatal_signal_pending(current))
828                         return -EINTR;
829                 if (time_after(jiffies, timeout)) {
830                         dev_err(ctrl->dev,
831                                 "Device shutdown incomplete; abort shutdown\n");
832                         return -ENODEV;
833                 }
834         }
835
836         return ret;
837 }
838
839 /*
840  * Initialize the cached copies of the Identify data and various controller
841  * register in our nvme_ctrl structure.  This should be called as soon as
842  * the admin queue is fully up and running.
843  */
844 int nvme_init_identify(struct nvme_ctrl *ctrl)
845 {
846         struct nvme_id_ctrl *id;
847         u64 cap;
848         int ret, page_shift;
849
850         ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
851         if (ret) {
852                 dev_err(ctrl->dev, "Reading VS failed (%d)\n", ret);
853                 return ret;
854         }
855
856         ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
857         if (ret) {
858                 dev_err(ctrl->dev, "Reading CAP failed (%d)\n", ret);
859                 return ret;
860         }
861         page_shift = NVME_CAP_MPSMIN(cap) + 12;
862
863         if (ctrl->vs >= NVME_VS(1, 1))
864                 ctrl->subsystem = NVME_CAP_NSSRC(cap);
865
866         ret = nvme_identify_ctrl(ctrl, &id);
867         if (ret) {
868                 dev_err(ctrl->dev, "Identify Controller failed (%d)\n", ret);
869                 return -EIO;
870         }
871
872         ctrl->oncs = le16_to_cpup(&id->oncs);
873         atomic_set(&ctrl->abort_limit, id->acl + 1);
874         ctrl->vwc = id->vwc;
875         memcpy(ctrl->serial, id->sn, sizeof(id->sn));
876         memcpy(ctrl->model, id->mn, sizeof(id->mn));
877         memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
878         if (id->mdts)
879                 ctrl->max_hw_sectors = 1 << (id->mdts + page_shift - 9);
880         else
881                 ctrl->max_hw_sectors = UINT_MAX;
882
883         if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
884                 unsigned int max_hw_sectors;
885
886                 ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
887                 max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
888                 if (ctrl->max_hw_sectors) {
889                         ctrl->max_hw_sectors = min(max_hw_sectors,
890                                                         ctrl->max_hw_sectors);
891                 } else {
892                         ctrl->max_hw_sectors = max_hw_sectors;
893                 }
894         }
895
896         kfree(id);
897         return 0;
898 }
899
900 static int nvme_dev_open(struct inode *inode, struct file *file)
901 {
902         struct nvme_ctrl *ctrl;
903         int instance = iminor(inode);
904         int ret = -ENODEV;
905
906         spin_lock(&dev_list_lock);
907         list_for_each_entry(ctrl, &nvme_ctrl_list, node) {
908                 if (ctrl->instance != instance)
909                         continue;
910
911                 if (!ctrl->admin_q) {
912                         ret = -EWOULDBLOCK;
913                         break;
914                 }
915                 if (!kref_get_unless_zero(&ctrl->kref))
916                         break;
917                 file->private_data = ctrl;
918                 ret = 0;
919                 break;
920         }
921         spin_unlock(&dev_list_lock);
922
923         return ret;
924 }
925
926 static int nvme_dev_release(struct inode *inode, struct file *file)
927 {
928         nvme_put_ctrl(file->private_data);
929         return 0;
930 }
931
932 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
933 {
934         struct nvme_ns *ns;
935         int ret;
936
937         mutex_lock(&ctrl->namespaces_mutex);
938         if (list_empty(&ctrl->namespaces)) {
939                 ret = -ENOTTY;
940                 goto out_unlock;
941         }
942
943         ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
944         if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
945                 dev_warn(ctrl->dev,
946                         "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
947                 ret = -EINVAL;
948                 goto out_unlock;
949         }
950
951         dev_warn(ctrl->dev,
952                 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
953         kref_get(&ns->kref);
954         mutex_unlock(&ctrl->namespaces_mutex);
955
956         ret = nvme_user_cmd(ctrl, ns, argp);
957         nvme_put_ns(ns);
958         return ret;
959
960 out_unlock:
961         mutex_unlock(&ctrl->namespaces_mutex);
962         return ret;
963 }
964
965 static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
966                 unsigned long arg)
967 {
968         struct nvme_ctrl *ctrl = file->private_data;
969         void __user *argp = (void __user *)arg;
970
971         switch (cmd) {
972         case NVME_IOCTL_ADMIN_CMD:
973                 return nvme_user_cmd(ctrl, NULL, argp);
974         case NVME_IOCTL_IO_CMD:
975                 return nvme_dev_user_cmd(ctrl, argp);
976         case NVME_IOCTL_RESET:
977                 dev_warn(ctrl->dev, "resetting controller\n");
978                 return ctrl->ops->reset_ctrl(ctrl);
979         case NVME_IOCTL_SUBSYS_RESET:
980                 return nvme_reset_subsystem(ctrl);
981         default:
982                 return -ENOTTY;
983         }
984 }
985
986 static const struct file_operations nvme_dev_fops = {
987         .owner          = THIS_MODULE,
988         .open           = nvme_dev_open,
989         .release        = nvme_dev_release,
990         .unlocked_ioctl = nvme_dev_ioctl,
991         .compat_ioctl   = nvme_dev_ioctl,
992 };
993
994 static ssize_t nvme_sysfs_reset(struct device *dev,
995                                 struct device_attribute *attr, const char *buf,
996                                 size_t count)
997 {
998         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
999         int ret;
1000
1001         ret = ctrl->ops->reset_ctrl(ctrl);
1002         if (ret < 0)
1003                 return ret;
1004         return count;
1005 }
1006 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
1007
1008 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
1009                                                                 char *buf)
1010 {
1011         struct nvme_ns *ns = dev_to_disk(dev)->private_data;
1012         return sprintf(buf, "%pU\n", ns->uuid);
1013 }
1014 static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
1015
1016 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
1017                                                                 char *buf)
1018 {
1019         struct nvme_ns *ns = dev_to_disk(dev)->private_data;
1020         return sprintf(buf, "%8phd\n", ns->eui);
1021 }
1022 static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
1023
1024 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
1025                                                                 char *buf)
1026 {
1027         struct nvme_ns *ns = dev_to_disk(dev)->private_data;
1028         return sprintf(buf, "%d\n", ns->ns_id);
1029 }
1030 static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
1031
1032 static struct attribute *nvme_ns_attrs[] = {
1033         &dev_attr_uuid.attr,
1034         &dev_attr_eui.attr,
1035         &dev_attr_nsid.attr,
1036         NULL,
1037 };
1038
1039 static umode_t nvme_attrs_are_visible(struct kobject *kobj,
1040                 struct attribute *a, int n)
1041 {
1042         struct device *dev = container_of(kobj, struct device, kobj);
1043         struct nvme_ns *ns = dev_to_disk(dev)->private_data;
1044
1045         if (a == &dev_attr_uuid.attr) {
1046                 if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
1047                         return 0;
1048         }
1049         if (a == &dev_attr_eui.attr) {
1050                 if (!memchr_inv(ns->eui, 0, sizeof(ns->eui)))
1051                         return 0;
1052         }
1053         return a->mode;
1054 }
1055
1056 static const struct attribute_group nvme_ns_attr_group = {
1057         .attrs          = nvme_ns_attrs,
1058         .is_visible     = nvme_attrs_are_visible,
1059 };
1060
1061 #define nvme_show_function(field)                                               \
1062 static ssize_t  field##_show(struct device *dev,                                \
1063                             struct device_attribute *attr, char *buf)           \
1064 {                                                                               \
1065         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);                          \
1066         return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field);   \
1067 }                                                                               \
1068 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
1069
1070 nvme_show_function(model);
1071 nvme_show_function(serial);
1072 nvme_show_function(firmware_rev);
1073
1074 static struct attribute *nvme_dev_attrs[] = {
1075         &dev_attr_reset_controller.attr,
1076         &dev_attr_model.attr,
1077         &dev_attr_serial.attr,
1078         &dev_attr_firmware_rev.attr,
1079         NULL
1080 };
1081
1082 static struct attribute_group nvme_dev_attrs_group = {
1083         .attrs = nvme_dev_attrs,
1084 };
1085
1086 static const struct attribute_group *nvme_dev_attr_groups[] = {
1087         &nvme_dev_attrs_group,
1088         NULL,
1089 };
1090
1091 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
1092 {
1093         struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
1094         struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
1095
1096         return nsa->ns_id - nsb->ns_id;
1097 }
1098
1099 static struct nvme_ns *nvme_find_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1100 {
1101         struct nvme_ns *ns;
1102
1103         lockdep_assert_held(&ctrl->namespaces_mutex);
1104
1105         list_for_each_entry(ns, &ctrl->namespaces, list) {
1106                 if (ns->ns_id == nsid)
1107                         return ns;
1108                 if (ns->ns_id > nsid)
1109                         break;
1110         }
1111         return NULL;
1112 }
1113
1114 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1115 {
1116         struct nvme_ns *ns;
1117         struct gendisk *disk;
1118         int node = dev_to_node(ctrl->dev);
1119
1120         lockdep_assert_held(&ctrl->namespaces_mutex);
1121
1122         ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
1123         if (!ns)
1124                 return;
1125
1126         ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
1127         if (ns->instance < 0)
1128                 goto out_free_ns;
1129
1130         ns->queue = blk_mq_init_queue(ctrl->tagset);
1131         if (IS_ERR(ns->queue))
1132                 goto out_release_instance;
1133         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1134         ns->queue->queuedata = ns;
1135         ns->ctrl = ctrl;
1136
1137         disk = alloc_disk_node(0, node);
1138         if (!disk)
1139                 goto out_free_queue;
1140
1141         kref_init(&ns->kref);
1142         ns->ns_id = nsid;
1143         ns->disk = disk;
1144         ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
1145
1146         blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1147         if (ctrl->max_hw_sectors) {
1148                 blk_queue_max_hw_sectors(ns->queue, ctrl->max_hw_sectors);
1149                 blk_queue_max_segments(ns->queue,
1150                         (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
1151         }
1152         if (ctrl->stripe_size)
1153                 blk_queue_chunk_sectors(ns->queue, ctrl->stripe_size >> 9);
1154         if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
1155                 blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
1156         blk_queue_virt_boundary(ns->queue, ctrl->page_size - 1);
1157
1158         disk->major = nvme_major;
1159         disk->first_minor = 0;
1160         disk->fops = &nvme_fops;
1161         disk->private_data = ns;
1162         disk->queue = ns->queue;
1163         disk->driverfs_dev = ctrl->device;
1164         disk->flags = GENHD_FL_EXT_DEVT;
1165         sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
1166
1167         if (nvme_revalidate_disk(ns->disk))
1168                 goto out_free_disk;
1169
1170         list_add_tail(&ns->list, &ctrl->namespaces);
1171         kref_get(&ctrl->kref);
1172         if (ns->type == NVME_NS_LIGHTNVM)
1173                 return;
1174
1175         add_disk(ns->disk);
1176         if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
1177                                         &nvme_ns_attr_group))
1178                 pr_warn("%s: failed to create sysfs group for identification\n",
1179                         ns->disk->disk_name);
1180         return;
1181  out_free_disk:
1182         kfree(disk);
1183  out_free_queue:
1184         blk_cleanup_queue(ns->queue);
1185  out_release_instance:
1186         ida_simple_remove(&ctrl->ns_ida, ns->instance);
1187  out_free_ns:
1188         kfree(ns);
1189 }
1190
1191 static void nvme_ns_remove(struct nvme_ns *ns)
1192 {
1193         if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
1194                 return;
1195
1196         if (ns->disk->flags & GENHD_FL_UP) {
1197                 if (blk_get_integrity(ns->disk))
1198                         blk_integrity_unregister(ns->disk);
1199                 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
1200                                         &nvme_ns_attr_group);
1201                 del_gendisk(ns->disk);
1202                 blk_mq_abort_requeue_list(ns->queue);
1203                 blk_cleanup_queue(ns->queue);
1204         }
1205         mutex_lock(&ns->ctrl->namespaces_mutex);
1206         list_del_init(&ns->list);
1207         mutex_unlock(&ns->ctrl->namespaces_mutex);
1208         nvme_put_ns(ns);
1209 }
1210
1211 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1212 {
1213         struct nvme_ns *ns;
1214
1215         ns = nvme_find_ns(ctrl, nsid);
1216         if (ns) {
1217                 if (revalidate_disk(ns->disk))
1218                         nvme_ns_remove(ns);
1219         } else
1220                 nvme_alloc_ns(ctrl, nsid);
1221 }
1222
1223 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
1224 {
1225         struct nvme_ns *ns;
1226         __le32 *ns_list;
1227         unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024);
1228         int ret = 0;
1229
1230         ns_list = kzalloc(0x1000, GFP_KERNEL);
1231         if (!ns_list)
1232                 return -ENOMEM;
1233
1234         for (i = 0; i < num_lists; i++) {
1235                 ret = nvme_identify_ns_list(ctrl, prev, ns_list);
1236                 if (ret)
1237                         goto out;
1238
1239                 for (j = 0; j < min(nn, 1024U); j++) {
1240                         nsid = le32_to_cpu(ns_list[j]);
1241                         if (!nsid)
1242                                 goto out;
1243
1244                         nvme_validate_ns(ctrl, nsid);
1245
1246                         while (++prev < nsid) {
1247                                 ns = nvme_find_ns(ctrl, prev);
1248                                 if (ns)
1249                                         nvme_ns_remove(ns);
1250                         }
1251                 }
1252                 nn -= j;
1253         }
1254  out:
1255         kfree(ns_list);
1256         return ret;
1257 }
1258
1259 static void __nvme_scan_namespaces(struct nvme_ctrl *ctrl, unsigned nn)
1260 {
1261         struct nvme_ns *ns, *next;
1262         unsigned i;
1263
1264         lockdep_assert_held(&ctrl->namespaces_mutex);
1265
1266         for (i = 1; i <= nn; i++)
1267                 nvme_validate_ns(ctrl, i);
1268
1269         list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
1270                 if (ns->ns_id > nn)
1271                         nvme_ns_remove(ns);
1272         }
1273 }
1274
1275 void nvme_scan_namespaces(struct nvme_ctrl *ctrl)
1276 {
1277         struct nvme_id_ctrl *id;
1278         unsigned nn;
1279
1280         if (nvme_identify_ctrl(ctrl, &id))
1281                 return;
1282
1283         mutex_lock(&ctrl->namespaces_mutex);
1284         nn = le32_to_cpu(id->nn);
1285         if (ctrl->vs >= NVME_VS(1, 1) &&
1286             !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
1287                 if (!nvme_scan_ns_list(ctrl, nn))
1288                         goto done;
1289         }
1290         __nvme_scan_namespaces(ctrl, le32_to_cpup(&id->nn));
1291  done:
1292         list_sort(NULL, &ctrl->namespaces, ns_cmp);
1293         mutex_unlock(&ctrl->namespaces_mutex);
1294         kfree(id);
1295 }
1296
1297 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
1298 {
1299         struct nvme_ns *ns, *next;
1300
1301         list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
1302                 nvme_ns_remove(ns);
1303 }
1304
1305 static DEFINE_IDA(nvme_instance_ida);
1306
1307 static int nvme_set_instance(struct nvme_ctrl *ctrl)
1308 {
1309         int instance, error;
1310
1311         do {
1312                 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
1313                         return -ENODEV;
1314
1315                 spin_lock(&dev_list_lock);
1316                 error = ida_get_new(&nvme_instance_ida, &instance);
1317                 spin_unlock(&dev_list_lock);
1318         } while (error == -EAGAIN);
1319
1320         if (error)
1321                 return -ENODEV;
1322
1323         ctrl->instance = instance;
1324         return 0;
1325 }
1326
1327 static void nvme_release_instance(struct nvme_ctrl *ctrl)
1328 {
1329         spin_lock(&dev_list_lock);
1330         ida_remove(&nvme_instance_ida, ctrl->instance);
1331         spin_unlock(&dev_list_lock);
1332 }
1333
1334 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
1335  {
1336         device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
1337
1338         spin_lock(&dev_list_lock);
1339         list_del(&ctrl->node);
1340         spin_unlock(&dev_list_lock);
1341 }
1342
1343 static void nvme_free_ctrl(struct kref *kref)
1344 {
1345         struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref);
1346
1347         put_device(ctrl->device);
1348         nvme_release_instance(ctrl);
1349         ida_destroy(&ctrl->ns_ida);
1350
1351         ctrl->ops->free_ctrl(ctrl);
1352 }
1353
1354 void nvme_put_ctrl(struct nvme_ctrl *ctrl)
1355 {
1356         kref_put(&ctrl->kref, nvme_free_ctrl);
1357 }
1358
1359 /*
1360  * Initialize a NVMe controller structures.  This needs to be called during
1361  * earliest initialization so that we have the initialized structured around
1362  * during probing.
1363  */
1364 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
1365                 const struct nvme_ctrl_ops *ops, unsigned long quirks)
1366 {
1367         int ret;
1368
1369         INIT_LIST_HEAD(&ctrl->namespaces);
1370         mutex_init(&ctrl->namespaces_mutex);
1371         kref_init(&ctrl->kref);
1372         ctrl->dev = dev;
1373         ctrl->ops = ops;
1374         ctrl->quirks = quirks;
1375
1376         ret = nvme_set_instance(ctrl);
1377         if (ret)
1378                 goto out;
1379
1380         ctrl->device = device_create_with_groups(nvme_class, ctrl->dev,
1381                                 MKDEV(nvme_char_major, ctrl->instance),
1382                                 dev, nvme_dev_attr_groups,
1383                                 "nvme%d", ctrl->instance);
1384         if (IS_ERR(ctrl->device)) {
1385                 ret = PTR_ERR(ctrl->device);
1386                 goto out_release_instance;
1387         }
1388         get_device(ctrl->device);
1389         dev_set_drvdata(ctrl->device, ctrl);
1390         ida_init(&ctrl->ns_ida);
1391
1392         spin_lock(&dev_list_lock);
1393         list_add_tail(&ctrl->node, &nvme_ctrl_list);
1394         spin_unlock(&dev_list_lock);
1395
1396         return 0;
1397 out_release_instance:
1398         nvme_release_instance(ctrl);
1399 out:
1400         return ret;
1401 }
1402
1403 /**
1404  * nvme_kill_queues(): Ends all namespace queues
1405  * @ctrl: the dead controller that needs to end
1406  *
1407  * Call this function when the driver determines it is unable to get the
1408  * controller in a state capable of servicing IO.
1409  */
1410 void nvme_kill_queues(struct nvme_ctrl *ctrl)
1411 {
1412         struct nvme_ns *ns;
1413
1414         mutex_lock(&ctrl->namespaces_mutex);
1415         list_for_each_entry(ns, &ctrl->namespaces, list) {
1416                 if (!kref_get_unless_zero(&ns->kref))
1417                         continue;
1418
1419                 /*
1420                  * Revalidating a dead namespace sets capacity to 0. This will
1421                  * end buffered writers dirtying pages that can't be synced.
1422                  */
1423                 if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags))
1424                         revalidate_disk(ns->disk);
1425
1426                 blk_set_queue_dying(ns->queue);
1427                 blk_mq_abort_requeue_list(ns->queue);
1428                 blk_mq_start_stopped_hw_queues(ns->queue, true);
1429
1430                 nvme_put_ns(ns);
1431         }
1432         mutex_unlock(&ctrl->namespaces_mutex);
1433 }
1434
1435 void nvme_stop_queues(struct nvme_ctrl *ctrl)
1436 {
1437         struct nvme_ns *ns;
1438
1439         mutex_lock(&ctrl->namespaces_mutex);
1440         list_for_each_entry(ns, &ctrl->namespaces, list) {
1441                 spin_lock_irq(ns->queue->queue_lock);
1442                 queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
1443                 spin_unlock_irq(ns->queue->queue_lock);
1444
1445                 blk_mq_cancel_requeue_work(ns->queue);
1446                 blk_mq_stop_hw_queues(ns->queue);
1447         }
1448         mutex_unlock(&ctrl->namespaces_mutex);
1449 }
1450
1451 void nvme_start_queues(struct nvme_ctrl *ctrl)
1452 {
1453         struct nvme_ns *ns;
1454
1455         mutex_lock(&ctrl->namespaces_mutex);
1456         list_for_each_entry(ns, &ctrl->namespaces, list) {
1457                 queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
1458                 blk_mq_start_stopped_hw_queues(ns->queue, true);
1459                 blk_mq_kick_requeue_list(ns->queue);
1460         }
1461         mutex_unlock(&ctrl->namespaces_mutex);
1462 }
1463
1464 int __init nvme_core_init(void)
1465 {
1466         int result;
1467
1468         result = register_blkdev(nvme_major, "nvme");
1469         if (result < 0)
1470                 return result;
1471         else if (result > 0)
1472                 nvme_major = result;
1473
1474         result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
1475                                                         &nvme_dev_fops);
1476         if (result < 0)
1477                 goto unregister_blkdev;
1478         else if (result > 0)
1479                 nvme_char_major = result;
1480
1481         nvme_class = class_create(THIS_MODULE, "nvme");
1482         if (IS_ERR(nvme_class)) {
1483                 result = PTR_ERR(nvme_class);
1484                 goto unregister_chrdev;
1485         }
1486
1487         return 0;
1488
1489  unregister_chrdev:
1490         __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
1491  unregister_blkdev:
1492         unregister_blkdev(nvme_major, "nvme");
1493         return result;
1494 }
1495
1496 void nvme_core_exit(void)
1497 {
1498         unregister_blkdev(nvme_major, "nvme");
1499         class_destroy(nvme_class);
1500         __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
1501 }