Merge branch 'for-4.5/nvme' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 22 Jan 2016 03:58:02 +0000 (19:58 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 22 Jan 2016 03:58:02 +0000 (19:58 -0800)
Pull NVMe updates from Jens Axboe:
 "Last branch for this series is the nvme changes.  It's in a separate
  branch to avoid splitting too much between core and NVMe changes,
  since NVMe is still helping drive some blk-mq changes.  That said, not
  a huge amount of core changes in here.  The grunt of the work is the
  continued split of the code"

* 'for-4.5/nvme' of git://git.kernel.dk/linux-block: (67 commits)
  uapi: update install list after nvme.h rename
  NVMe: Export NVMe attributes to sysfs group
  NVMe: Shutdown controller only for power-off
  NVMe: IO queue deletion re-write
  NVMe: Remove queue freezing on resets
  NVMe: Use a retryable error code on reset
  NVMe: Fix admin queue ring wrap
  nvme: make SG_IO support optional
  nvme: fixes for NVME_IOCTL_IO_CMD on the char device
  nvme: synchronize access to ctrl->namespaces
  nvme: Move nvme_freeze/unfreeze_queues to nvme core
  PCI/AER: include header file
  NVMe: Export namespace attributes to sysfs
  NVMe: Add pci error handlers
  block: remove REQ_NO_TIMEOUT flag
  nvme: merge iod and cmd_info
  nvme: meta_sg doesn't have to be an array
  nvme: properly free resources for cancelled command
  nvme: simplify completion handling
  nvme: special case AEN requests
  ...

19 files changed:
block/bio-integrity.c
block/blk-core.c
block/blk-mq.c
block/blk-timeout.c
block/blk.h
drivers/nvme/host/Kconfig
drivers/nvme/host/Makefile
drivers/nvme/host/core.c [new file with mode: 0644]
drivers/nvme/host/lightnvm.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/scsi.c
drivers/target/target_core_iblock.c
include/linux/aer.h
include/linux/bio.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/nvme.h
include/uapi/linux/Kbuild

index f6325d5..711e4d8 100644 (file)
@@ -66,7 +66,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
        }
 
        if (unlikely(!bip))
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memset(bip, 0, sizeof(*bip));
 
@@ -89,7 +89,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
        return bip;
 err:
        mempool_free(bip, bs->bio_integrity_pool);
-       return NULL;
+       return ERR_PTR(-ENOMEM);
 }
 EXPORT_SYMBOL(bio_integrity_alloc);
 
@@ -298,10 +298,10 @@ int bio_integrity_prep(struct bio *bio)
 
        /* Allocate bio integrity payload and integrity vectors */
        bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
-       if (unlikely(bip == NULL)) {
+       if (IS_ERR(bip)) {
                printk(KERN_ERR "could not allocate data integrity bioset\n");
                kfree(buf);
-               return -EIO;
+               return PTR_ERR(bip);
        }
 
        bip->bip_flags |= BIP_BLOCK_INTEGRITY;
@@ -465,9 +465,8 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
        BUG_ON(bip_src == NULL);
 
        bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
-
-       if (bip == NULL)
-               return -EIO;
+       if (IS_ERR(bip))
+               return PTR_ERR(bip);
 
        memcpy(bip->bip_vec, bip_src->bip_vec,
               bip_src->bip_vcnt * sizeof(struct bio_vec));
index 476244d..ab51685 100644 (file)
@@ -680,6 +680,13 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
        wake_up_all(&q->mq_freeze_wq);
 }
 
+static void blk_rq_timed_out_timer(unsigned long data)
+{
+       struct request_queue *q = (struct request_queue *)data;
+
+       kblockd_schedule_work(&q->timeout_work);
+}
+
 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
        struct request_queue *q;
@@ -841,6 +848,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
        if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
                goto fail;
 
+       INIT_WORK(&q->timeout_work, blk_timeout_work);
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
        q->unprep_rq_fn         = NULL;
index 6889d71..4c0622f 100644 (file)
@@ -603,8 +603,6 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
                        blk_mq_complete_request(rq, -EIO);
                return;
        }
-       if (rq->cmd_flags & REQ_NO_TIMEOUT)
-               return;
 
        if (time_after_eq(jiffies, rq->deadline)) {
                if (!blk_mark_rq_complete(rq))
@@ -615,15 +613,19 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
        }
 }
 
-static void blk_mq_rq_timer(unsigned long priv)
+static void blk_mq_timeout_work(struct work_struct *work)
 {
-       struct request_queue *q = (struct request_queue *)priv;
+       struct request_queue *q =
+               container_of(work, struct request_queue, timeout_work);
        struct blk_mq_timeout_data data = {
                .next           = 0,
                .next_set       = 0,
        };
        int i;
 
+       if (blk_queue_enter(q, true))
+               return;
+
        blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
 
        if (data.next_set) {
@@ -638,6 +640,7 @@ static void blk_mq_rq_timer(unsigned long priv)
                                blk_mq_tag_idle(hctx);
                }
        }
+       blk_queue_exit(q);
 }
 
 /*
@@ -2008,7 +2011,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
                hctxs[i]->queue_num = i;
        }
 
-       setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
+       INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
        blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
 
        q->nr_queues = nr_cpu_ids;
index 3610af5..a30441a 100644 (file)
@@ -127,13 +127,16 @@ static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout
        }
 }
 
-void blk_rq_timed_out_timer(unsigned long data)
+void blk_timeout_work(struct work_struct *work)
 {
-       struct request_queue *q = (struct request_queue *) data;
+       struct request_queue *q =
+               container_of(work, struct request_queue, timeout_work);
        unsigned long flags, next = 0;
        struct request *rq, *tmp;
        int next_set = 0;
 
+       if (blk_queue_enter(q, true))
+               return;
        spin_lock_irqsave(q->queue_lock, flags);
 
        list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
@@ -143,6 +146,7 @@ void blk_rq_timed_out_timer(unsigned long data)
                mod_timer(&q->timeout, round_jiffies_up(next));
 
        spin_unlock_irqrestore(q->queue_lock, flags);
+       blk_queue_exit(q);
 }
 
 /**
@@ -193,9 +197,6 @@ void blk_add_timer(struct request *req)
        struct request_queue *q = req->q;
        unsigned long expiry;
 
-       if (req->cmd_flags & REQ_NO_TIMEOUT)
-               return;
-
        /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
        if (!q->mq_ops && !q->rq_timed_out_fn)
                return;
index c43926d..70e4aee 100644 (file)
@@ -93,7 +93,7 @@ static inline void blk_flush_integrity(void)
 }
 #endif
 
-void blk_rq_timed_out_timer(unsigned long data);
+void blk_timeout_work(struct work_struct *work);
 unsigned long blk_rq_timeout(unsigned long timeout);
 void blk_add_timer(struct request *req);
 void blk_delete_timer(struct request *);
index 002a94a..5d62373 100644 (file)
@@ -8,3 +8,14 @@ config BLK_DEV_NVME
 
          To compile this driver as a module, choose M here: the
          module will be called nvme.
+
+config BLK_DEV_NVME_SCSI
+       bool "SCSI emulation for NVMe device nodes"
+       depends on BLK_DEV_NVME
+       ---help---
+         This adds support for the SG_IO ioctl on the NVMe character
+         and block devices nodes, as well a a translation for a small
+         number of selected SCSI commands to NVMe commands to the NVMe
+         driver.  If you don't know what this means you probably want
+         to say N here, and if you know what it means you probably
+         want to say N as well.
index a5fe239..51bf908 100644 (file)
@@ -1,5 +1,6 @@
 
 obj-$(CONFIG_BLK_DEV_NVME)     += nvme.o
 
-lightnvm-$(CONFIG_NVM) := lightnvm.o
-nvme-y         += pci.o scsi.o $(lightnvm-y)
+lightnvm-$(CONFIG_NVM)                 := lightnvm.o
+nvme-y                                 += core.o pci.o $(lightnvm-y)
+nvme-$(CONFIG_BLK_DEV_NVME_SCSI)        += scsi.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
new file mode 100644 (file)
index 0000000..c5bf001
--- /dev/null
@@ -0,0 +1,1472 @@
+/*
+ * NVM Express device driver
+ * Copyright (c) 2011-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list_sort.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pr.h>
+#include <linux/ptrace.h>
+#include <linux/nvme_ioctl.h>
+#include <linux/t10-pi.h>
+#include <scsi/sg.h>
+#include <asm/unaligned.h>
+
+#include "nvme.h"
+
+#define NVME_MINORS            (1U << MINORBITS)
+
+static int nvme_major;
+module_param(nvme_major, int, 0);
+
+static int nvme_char_major;
+module_param(nvme_char_major, int, 0);
+
+static LIST_HEAD(nvme_ctrl_list);
+DEFINE_SPINLOCK(dev_list_lock);
+
+static struct class *nvme_class;
+
+static void nvme_free_ns(struct kref *kref)
+{
+       struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
+
+       if (ns->type == NVME_NS_LIGHTNVM)
+               nvme_nvm_unregister(ns->queue, ns->disk->disk_name);
+
+       spin_lock(&dev_list_lock);
+       ns->disk->private_data = NULL;
+       spin_unlock(&dev_list_lock);
+
+       nvme_put_ctrl(ns->ctrl);
+       put_disk(ns->disk);
+       kfree(ns);
+}
+
+static void nvme_put_ns(struct nvme_ns *ns)
+{
+       kref_put(&ns->kref, nvme_free_ns);
+}
+
+static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
+{
+       struct nvme_ns *ns;
+
+       spin_lock(&dev_list_lock);
+       ns = disk->private_data;
+       if (ns && !kref_get_unless_zero(&ns->kref))
+               ns = NULL;
+       spin_unlock(&dev_list_lock);
+
+       return ns;
+}
+
+void nvme_requeue_req(struct request *req)
+{
+       unsigned long flags;
+
+       blk_mq_requeue_request(req);
+       spin_lock_irqsave(req->q->queue_lock, flags);
+       if (!blk_queue_stopped(req->q))
+               blk_mq_kick_requeue_list(req->q);
+       spin_unlock_irqrestore(req->q->queue_lock, flags);
+}
+
+struct request *nvme_alloc_request(struct request_queue *q,
+               struct nvme_command *cmd, unsigned int flags)
+{
+       bool write = cmd->common.opcode & 1;
+       struct request *req;
+
+       req = blk_mq_alloc_request(q, write, flags);
+       if (IS_ERR(req))
+               return req;
+
+       req->cmd_type = REQ_TYPE_DRV_PRIV;
+       req->cmd_flags |= REQ_FAILFAST_DRIVER;
+       req->__data_len = 0;
+       req->__sector = (sector_t) -1;
+       req->bio = req->biotail = NULL;
+
+       req->cmd = (unsigned char *)cmd;
+       req->cmd_len = sizeof(struct nvme_command);
+       req->special = (void *)0;
+
+       return req;
+}
+
+/*
+ * Returns 0 on success.  If the result is negative, it's a Linux error code;
+ * if the result is positive, it's an NVM Express status code
+ */
+int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
+               void *buffer, unsigned bufflen, u32 *result, unsigned timeout)
+{
+       struct request *req;
+       int ret;
+
+       req = nvme_alloc_request(q, cmd, 0);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
+
+       req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+       if (buffer && bufflen) {
+               ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
+               if (ret)
+                       goto out;
+       }
+
+       blk_execute_rq(req->q, NULL, req, 0);
+       if (result)
+               *result = (u32)(uintptr_t)req->special;
+       ret = req->errors;
+ out:
+       blk_mq_free_request(req);
+       return ret;
+}
+
+int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
+               void *buffer, unsigned bufflen)
+{
+       return __nvme_submit_sync_cmd(q, cmd, buffer, bufflen, NULL, 0);
+}
+
+int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
+               void __user *ubuffer, unsigned bufflen,
+               void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
+               u32 *result, unsigned timeout)
+{
+       bool write = cmd->common.opcode & 1;
+       struct nvme_ns *ns = q->queuedata;
+       struct gendisk *disk = ns ? ns->disk : NULL;
+       struct request *req;
+       struct bio *bio = NULL;
+       void *meta = NULL;
+       int ret;
+
+       req = nvme_alloc_request(q, cmd, 0);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
+
+       req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+       if (ubuffer && bufflen) {
+               ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
+                               GFP_KERNEL);
+               if (ret)
+                       goto out;
+               bio = req->bio;
+
+               if (!disk)
+                       goto submit;
+               bio->bi_bdev = bdget_disk(disk, 0);
+               if (!bio->bi_bdev) {
+                       ret = -ENODEV;
+                       goto out_unmap;
+               }
+
+               if (meta_buffer) {
+                       struct bio_integrity_payload *bip;
+
+                       meta = kmalloc(meta_len, GFP_KERNEL);
+                       if (!meta) {
+                               ret = -ENOMEM;
+                               goto out_unmap;
+                       }
+
+                       if (write) {
+                               if (copy_from_user(meta, meta_buffer,
+                                               meta_len)) {
+                                       ret = -EFAULT;
+                                       goto out_free_meta;
+                               }
+                       }
+
+                       bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
+                       if (IS_ERR(bip)) {
+                               ret = PTR_ERR(bip);
+                               goto out_free_meta;
+                       }
+
+                       bip->bip_iter.bi_size = meta_len;
+                       bip->bip_iter.bi_sector = meta_seed;
+
+                       ret = bio_integrity_add_page(bio, virt_to_page(meta),
+                                       meta_len, offset_in_page(meta));
+                       if (ret != meta_len) {
+                               ret = -ENOMEM;
+                               goto out_free_meta;
+                       }
+               }
+       }
+ submit:
+       blk_execute_rq(req->q, disk, req, 0);
+       ret = req->errors;
+       if (result)
+               *result = (u32)(uintptr_t)req->special;
+       if (meta && !ret && !write) {
+               if (copy_to_user(meta_buffer, meta, meta_len))
+                       ret = -EFAULT;
+       }
+ out_free_meta:
+       kfree(meta);
+ out_unmap:
+       if (bio) {
+               if (disk && bio->bi_bdev)
+                       bdput(bio->bi_bdev);
+               blk_rq_unmap_user(bio);
+       }
+ out:
+       blk_mq_free_request(req);
+       return ret;
+}
+
+int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
+               void __user *ubuffer, unsigned bufflen, u32 *result,
+               unsigned timeout)
+{
+       return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0,
+                       result, timeout);
+}
+
+int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
+{
+       struct nvme_command c = { };
+       int error;
+
+       /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
+       c.identify.opcode = nvme_admin_identify;
+       c.identify.cns = cpu_to_le32(1);
+
+       *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
+       if (!*id)
+               return -ENOMEM;
+
+       error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
+                       sizeof(struct nvme_id_ctrl));
+       if (error)
+               kfree(*id);
+       return error;
+}
+
+static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
+{
+       struct nvme_command c = { };
+
+       c.identify.opcode = nvme_admin_identify;
+       c.identify.cns = cpu_to_le32(2);
+       c.identify.nsid = cpu_to_le32(nsid);
+       return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
+}
+
+int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
+               struct nvme_id_ns **id)
+{
+       struct nvme_command c = { };
+       int error;
+
+       /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
+       c.identify.opcode = nvme_admin_identify,
+       c.identify.nsid = cpu_to_le32(nsid),
+
+       *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL);
+       if (!*id)
+               return -ENOMEM;
+
+       error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
+                       sizeof(struct nvme_id_ns));
+       if (error)
+               kfree(*id);
+       return error;
+}
+
+int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
+                                       dma_addr_t dma_addr, u32 *result)
+{
+       struct nvme_command c;
+
+       memset(&c, 0, sizeof(c));
+       c.features.opcode = nvme_admin_get_features;
+       c.features.nsid = cpu_to_le32(nsid);
+       c.features.prp1 = cpu_to_le64(dma_addr);
+       c.features.fid = cpu_to_le32(fid);
+
+       return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0);
+}
+
+int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
+                                       dma_addr_t dma_addr, u32 *result)
+{
+       struct nvme_command c;
+
+       memset(&c, 0, sizeof(c));
+       c.features.opcode = nvme_admin_set_features;
+       c.features.prp1 = cpu_to_le64(dma_addr);
+       c.features.fid = cpu_to_le32(fid);
+       c.features.dword11 = cpu_to_le32(dword11);
+
+       return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0);
+}
+
+int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
+{
+       struct nvme_command c = { };
+       int error;
+
+       c.common.opcode = nvme_admin_get_log_page,
+       c.common.nsid = cpu_to_le32(0xFFFFFFFF),
+       c.common.cdw10[0] = cpu_to_le32(
+                       (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) |
+                        NVME_LOG_SMART),
+
+       *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL);
+       if (!*log)
+               return -ENOMEM;
+
+       error = nvme_submit_sync_cmd(dev->admin_q, &c, *log,
+                       sizeof(struct nvme_smart_log));
+       if (error)
+               kfree(*log);
+       return error;
+}
+
+int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
+{
+       u32 q_count = (*count - 1) | ((*count - 1) << 16);
+       u32 result;
+       int status, nr_io_queues;
+
+       status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0,
+                       &result);
+       if (status)
+               return status;
+
+       nr_io_queues = min(result & 0xffff, result >> 16) + 1;
+       *count = min(*count, nr_io_queues);
+       return 0;
+}
+
+static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+{
+       struct nvme_user_io io;
+       struct nvme_command c;
+       unsigned length, meta_len;
+       void __user *metadata;
+
+       if (copy_from_user(&io, uio, sizeof(io)))
+               return -EFAULT;
+
+       switch (io.opcode) {
+       case nvme_cmd_write:
+       case nvme_cmd_read:
+       case nvme_cmd_compare:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       length = (io.nblocks + 1) << ns->lba_shift;
+       meta_len = (io.nblocks + 1) * ns->ms;
+       metadata = (void __user *)(uintptr_t)io.metadata;
+
+       if (ns->ext) {
+               length += meta_len;
+               meta_len = 0;
+       } else if (meta_len) {
+               if ((io.metadata & 3) || !io.metadata)
+                       return -EINVAL;
+       }
+
+       memset(&c, 0, sizeof(c));
+       c.rw.opcode = io.opcode;
+       c.rw.flags = io.flags;
+       c.rw.nsid = cpu_to_le32(ns->ns_id);
+       c.rw.slba = cpu_to_le64(io.slba);
+       c.rw.length = cpu_to_le16(io.nblocks);
+       c.rw.control = cpu_to_le16(io.control);
+       c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
+       c.rw.reftag = cpu_to_le32(io.reftag);
+       c.rw.apptag = cpu_to_le16(io.apptag);
+       c.rw.appmask = cpu_to_le16(io.appmask);
+
+       return __nvme_submit_user_cmd(ns->queue, &c,
+                       (void __user *)(uintptr_t)io.addr, length,
+                       metadata, meta_len, io.slba, NULL, 0);
+}
+
+static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+                       struct nvme_passthru_cmd __user *ucmd)
+{
+       struct nvme_passthru_cmd cmd;
+       struct nvme_command c;
+       unsigned timeout = 0;
+       int status;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+       if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+               return -EFAULT;
+
+       memset(&c, 0, sizeof(c));
+       c.common.opcode = cmd.opcode;
+       c.common.flags = cmd.flags;
+       c.common.nsid = cpu_to_le32(cmd.nsid);
+       c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+       c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+       c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
+       c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
+       c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
+       c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
+       c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
+       c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
+
+       if (cmd.timeout_ms)
+               timeout = msecs_to_jiffies(cmd.timeout_ms);
+
+       status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
+                       (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
+                       &cmd.result, timeout);
+       if (status >= 0) {
+               if (put_user(cmd.result, &ucmd->result))
+                       return -EFAULT;
+       }
+
+       return status;
+}
+
+static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
+               unsigned int cmd, unsigned long arg)
+{
+       struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+       switch (cmd) {
+       case NVME_IOCTL_ID:
+               force_successful_syscall_return();
+               return ns->ns_id;
+       case NVME_IOCTL_ADMIN_CMD:
+               return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
+       case NVME_IOCTL_IO_CMD:
+               return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
+       case NVME_IOCTL_SUBMIT_IO:
+               return nvme_submit_io(ns, (void __user *)arg);
+#ifdef CONFIG_BLK_DEV_NVME_SCSI
+       case SG_GET_VERSION_NUM:
+               return nvme_sg_get_version_num((void __user *)arg);
+       case SG_IO:
+               return nvme_sg_io(ns, (void __user *)arg);
+#endif
+       default:
+               return -ENOTTY;
+       }
+}
+
+#ifdef CONFIG_COMPAT
+static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
+                       unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case SG_IO:
+               return -ENOIOCTLCMD;
+       }
+       return nvme_ioctl(bdev, mode, cmd, arg);
+}
+#else
+#define nvme_compat_ioctl      NULL
+#endif
+
+static int nvme_open(struct block_device *bdev, fmode_t mode)
+{
+       return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO;
+}
+
+static void nvme_release(struct gendisk *disk, fmode_t mode)
+{
+       nvme_put_ns(disk->private_data);
+}
+
+static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+       /* some standard values */
+       geo->heads = 1 << 6;
+       geo->sectors = 1 << 5;
+       geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
+       return 0;
+}
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static void nvme_init_integrity(struct nvme_ns *ns)
+{
+       struct blk_integrity integrity;
+
+       switch (ns->pi_type) {
+       case NVME_NS_DPS_PI_TYPE3:
+               integrity.profile = &t10_pi_type3_crc;
+               break;
+       case NVME_NS_DPS_PI_TYPE1:
+       case NVME_NS_DPS_PI_TYPE2:
+               integrity.profile = &t10_pi_type1_crc;
+               break;
+       default:
+               integrity.profile = NULL;
+               break;
+       }
+       integrity.tuple_size = ns->ms;
+       blk_integrity_register(ns->disk, &integrity);
+       blk_queue_max_integrity_segments(ns->queue, 1);
+}
+#else
+static void nvme_init_integrity(struct nvme_ns *ns)
+{
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+static void nvme_config_discard(struct nvme_ns *ns)
+{
+       u32 logical_block_size = queue_logical_block_size(ns->queue);
+       ns->queue->limits.discard_zeroes_data = 0;
+       ns->queue->limits.discard_alignment = logical_block_size;
+       ns->queue->limits.discard_granularity = logical_block_size;
+       blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
+       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
+}
+
+static int nvme_revalidate_disk(struct gendisk *disk)
+{
+       struct nvme_ns *ns = disk->private_data;
+       struct nvme_id_ns *id;
+       u8 lbaf, pi_type;
+       u16 old_ms;
+       unsigned short bs;
+
+       if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
+               dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
+                               __func__, ns->ctrl->instance, ns->ns_id);
+               return -ENODEV;
+       }
+       if (id->ncap == 0) {
+               kfree(id);
+               return -ENODEV;
+       }
+
+       if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
+               if (nvme_nvm_register(ns->queue, disk->disk_name)) {
+                       dev_warn(ns->ctrl->dev,
+                               "%s: LightNVM init failure\n", __func__);
+                       kfree(id);
+                       return -ENODEV;
+               }
+               ns->type = NVME_NS_LIGHTNVM;
+       }
+
+       if (ns->ctrl->vs >= NVME_VS(1, 1))
+               memcpy(ns->eui, id->eui64, sizeof(ns->eui));
+       if (ns->ctrl->vs >= NVME_VS(1, 2))
+               memcpy(ns->uuid, id->nguid, sizeof(ns->uuid));
+
+       old_ms = ns->ms;
+       lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
+       ns->lba_shift = id->lbaf[lbaf].ds;
+       ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+       ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
+
+       /*
+        * If identify namespace failed, use default 512 byte block size so
+        * block layer can use before failing read/write for 0 capacity.
+        */
+       if (ns->lba_shift == 0)
+               ns->lba_shift = 9;
+       bs = 1 << ns->lba_shift;
+       /* XXX: PI implementation requires metadata equal t10 pi tuple size */
+       pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
+                                       id->dps & NVME_NS_DPS_PI_MASK : 0;
+
+       blk_mq_freeze_queue(disk->queue);
+       if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
+                               ns->ms != old_ms ||
+                               bs != queue_logical_block_size(disk->queue) ||
+                               (ns->ms && ns->ext)))
+               blk_integrity_unregister(disk);
+
+       ns->pi_type = pi_type;
+       blk_queue_logical_block_size(ns->queue, bs);
+
+       if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
+               nvme_init_integrity(ns);
+       if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
+               set_capacity(disk, 0);
+       else
+               set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+
+       if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
+               nvme_config_discard(ns);
+       blk_mq_unfreeze_queue(disk->queue);
+
+       kfree(id);
+       return 0;
+}
+
+static char nvme_pr_type(enum pr_type type)
+{
+       switch (type) {
+       case PR_WRITE_EXCLUSIVE:
+               return 1;
+       case PR_EXCLUSIVE_ACCESS:
+               return 2;
+       case PR_WRITE_EXCLUSIVE_REG_ONLY:
+               return 3;
+       case PR_EXCLUSIVE_ACCESS_REG_ONLY:
+               return 4;
+       case PR_WRITE_EXCLUSIVE_ALL_REGS:
+               return 5;
+       case PR_EXCLUSIVE_ACCESS_ALL_REGS:
+               return 6;
+       default:
+               return 0;
+       }
+};
+
+static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
+                               u64 key, u64 sa_key, u8 op)
+{
+       struct nvme_ns *ns = bdev->bd_disk->private_data;
+       struct nvme_command c;
+       u8 data[16] = { 0, };
+
+       put_unaligned_le64(key, &data[0]);
+       put_unaligned_le64(sa_key, &data[8]);
+
+       memset(&c, 0, sizeof(c));
+       c.common.opcode = op;
+       c.common.nsid = cpu_to_le32(ns->ns_id);
+       c.common.cdw10[0] = cpu_to_le32(cdw10);
+
+       return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
+}
+
+static int nvme_pr_register(struct block_device *bdev, u64 old,
+               u64 new, unsigned flags)
+{
+       u32 cdw10;
+
+       if (flags & ~PR_FL_IGNORE_KEY)
+               return -EOPNOTSUPP;
+
+       cdw10 = old ? 2 : 0;
+       cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
+       cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
+       return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
+}
+
+static int nvme_pr_reserve(struct block_device *bdev, u64 key,
+               enum pr_type type, unsigned flags)
+{
+       u32 cdw10;
+
+       if (flags & ~PR_FL_IGNORE_KEY)
+               return -EOPNOTSUPP;
+
+       cdw10 = nvme_pr_type(type) << 8;
+       cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
+       return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
+}
+
+static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
+               enum pr_type type, bool abort)
+{
+       u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
+       return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
+}
+
+static int nvme_pr_clear(struct block_device *bdev, u64 key)
+{
+       u32 cdw10 = 1 | (key ? 1 << 3 : 0);
+       return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
+}
+
+static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
+{
+       u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
+       return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
+}
+
+static const struct pr_ops nvme_pr_ops = {
+       .pr_register    = nvme_pr_register,
+       .pr_reserve     = nvme_pr_reserve,
+       .pr_release     = nvme_pr_release,
+       .pr_preempt     = nvme_pr_preempt,
+       .pr_clear       = nvme_pr_clear,
+};
+
+static const struct block_device_operations nvme_fops = {
+       .owner          = THIS_MODULE,
+       .ioctl          = nvme_ioctl,
+       .compat_ioctl   = nvme_compat_ioctl,
+       .open           = nvme_open,
+       .release        = nvme_release,
+       .getgeo         = nvme_getgeo,
+       .revalidate_disk= nvme_revalidate_disk,
+       .pr_ops         = &nvme_pr_ops,
+};
+
+static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
+{
+       unsigned long timeout =
+               ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+       u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
+       int ret;
+
+       while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
+               if ((csts & NVME_CSTS_RDY) == bit)
+                       break;
+
+               msleep(100);
+               if (fatal_signal_pending(current))
+                       return -EINTR;
+               if (time_after(jiffies, timeout)) {
+                       dev_err(ctrl->dev,
+                               "Device not ready; aborting %s\n", enabled ?
+                                               "initialisation" : "reset");
+                       return -ENODEV;
+               }
+       }
+
+       return ret;
+}
+
+/*
+ * If the device has been passed off to us in an enabled state, just clear
+ * the enabled bit.  The spec says we should set the 'shutdown notification
+ * bits', but doing so may cause the device to complete commands to the
+ * admin queue ... and we don't know what memory that might be pointing at!
+ */
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
+{
+       int ret;
+
+       ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
+       ctrl->ctrl_config &= ~NVME_CC_ENABLE;
+
+       ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+       if (ret)
+               return ret;
+       return nvme_wait_ready(ctrl, cap, false);
+}
+
+int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
+{
+       /*
+        * Default to a 4K page size, with the intention to update this
+        * path in the future to accomodate architectures with differing
+        * kernel and IO page sizes.
+        */
+       unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12;
+       int ret;
+
+       if (page_shift < dev_page_min) {
+               dev_err(ctrl->dev,
+                       "Minimum device page size %u too large for host (%u)\n",
+                       1 << dev_page_min, 1 << page_shift);
+               return -ENODEV;
+       }
+
+       ctrl->page_size = 1 << page_shift;
+
+       ctrl->ctrl_config = NVME_CC_CSS_NVM;
+       ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
+       ctrl->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
+       ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
+       ctrl->ctrl_config |= NVME_CC_ENABLE;
+
+       ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+       if (ret)
+               return ret;
+       return nvme_wait_ready(ctrl, cap, true);
+}
+
+int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
+{
+       unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies;
+       u32 csts;
+       int ret;
+
+       ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
+       ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
+
+       ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+       if (ret)
+               return ret;
+
+       while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
+               if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
+                       break;
+
+               msleep(100);
+               if (fatal_signal_pending(current))
+                       return -EINTR;
+               if (time_after(jiffies, timeout)) {
+                       dev_err(ctrl->dev,
+                               "Device shutdown incomplete; abort shutdown\n");
+                       return -ENODEV;
+               }
+       }
+
+       return ret;
+}
+
+/*
+ * Initialize the cached copies of the Identify data and various controller
+ * register in our nvme_ctrl structure.  This should be called as soon as
+ * the admin queue is fully up and running.
+ */
+int nvme_init_identify(struct nvme_ctrl *ctrl)
+{
+       struct nvme_id_ctrl *id;
+       u64 cap;
+       int ret, page_shift;
+
+       ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
+       if (ret) {
+               dev_err(ctrl->dev, "Reading VS failed (%d)\n", ret);
+               return ret;
+       }
+
+       ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
+       if (ret) {
+               dev_err(ctrl->dev, "Reading CAP failed (%d)\n", ret);
+               return ret;
+       }
+       page_shift = NVME_CAP_MPSMIN(cap) + 12;
+
+       if (ctrl->vs >= NVME_VS(1, 1))
+               ctrl->subsystem = NVME_CAP_NSSRC(cap);
+
+       ret = nvme_identify_ctrl(ctrl, &id);
+       if (ret) {
+               dev_err(ctrl->dev, "Identify Controller failed (%d)\n", ret);
+               return -EIO;
+       }
+
+       ctrl->oncs = le16_to_cpup(&id->oncs);
+       atomic_set(&ctrl->abort_limit, id->acl + 1);
+       ctrl->vwc = id->vwc;
+       memcpy(ctrl->serial, id->sn, sizeof(id->sn));
+       memcpy(ctrl->model, id->mn, sizeof(id->mn));
+       memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
+       if (id->mdts)
+               ctrl->max_hw_sectors = 1 << (id->mdts + page_shift - 9);
+       else
+               ctrl->max_hw_sectors = UINT_MAX;
+
+       if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
+               unsigned int max_hw_sectors;
+
+               ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
+               max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
+               if (ctrl->max_hw_sectors) {
+                       ctrl->max_hw_sectors = min(max_hw_sectors,
+                                                       ctrl->max_hw_sectors);
+               } else {
+                       ctrl->max_hw_sectors = max_hw_sectors;
+               }
+       }
+
+       kfree(id);
+       return 0;
+}
+
+static int nvme_dev_open(struct inode *inode, struct file *file)
+{
+       struct nvme_ctrl *ctrl;
+       int instance = iminor(inode);
+       int ret = -ENODEV;
+
+       spin_lock(&dev_list_lock);
+       list_for_each_entry(ctrl, &nvme_ctrl_list, node) {
+               if (ctrl->instance != instance)
+                       continue;
+
+               if (!ctrl->admin_q) {
+                       ret = -EWOULDBLOCK;
+                       break;
+               }
+               if (!kref_get_unless_zero(&ctrl->kref))
+                       break;
+               file->private_data = ctrl;
+               ret = 0;
+               break;
+       }
+       spin_unlock(&dev_list_lock);
+
+       return ret;
+}
+
+static int nvme_dev_release(struct inode *inode, struct file *file)
+{
+       nvme_put_ctrl(file->private_data);
+       return 0;
+}
+
+static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
+{
+       struct nvme_ns *ns;
+       int ret;
+
+       mutex_lock(&ctrl->namespaces_mutex);
+       if (list_empty(&ctrl->namespaces)) {
+               ret = -ENOTTY;
+               goto out_unlock;
+       }
+
+       ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
+       if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
+               dev_warn(ctrl->dev,
+                       "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       dev_warn(ctrl->dev,
+               "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
+       kref_get(&ns->kref);
+       mutex_unlock(&ctrl->namespaces_mutex);
+
+       ret = nvme_user_cmd(ctrl, ns, argp);
+       nvme_put_ns(ns);
+       return ret;
+
+out_unlock:
+       mutex_unlock(&ctrl->namespaces_mutex);
+       return ret;
+}
+
+static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
+               unsigned long arg)
+{
+       struct nvme_ctrl *ctrl = file->private_data;
+       void __user *argp = (void __user *)arg;
+
+       switch (cmd) {
+       case NVME_IOCTL_ADMIN_CMD:
+               return nvme_user_cmd(ctrl, NULL, argp);
+       case NVME_IOCTL_IO_CMD:
+               return nvme_dev_user_cmd(ctrl, argp);
+       case NVME_IOCTL_RESET:
+               dev_warn(ctrl->dev, "resetting controller\n");
+               return ctrl->ops->reset_ctrl(ctrl);
+       case NVME_IOCTL_SUBSYS_RESET:
+               return nvme_reset_subsystem(ctrl);
+       default:
+               return -ENOTTY;
+       }
+}
+
+static const struct file_operations nvme_dev_fops = {
+       .owner          = THIS_MODULE,
+       .open           = nvme_dev_open,
+       .release        = nvme_dev_release,
+       .unlocked_ioctl = nvme_dev_ioctl,
+       .compat_ioctl   = nvme_dev_ioctl,
+};
+
+static ssize_t nvme_sysfs_reset(struct device *dev,
+                               struct device_attribute *attr, const char *buf,
+                               size_t count)
+{
+       struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+       int ret;
+
+       ret = ctrl->ops->reset_ctrl(ctrl);
+       if (ret < 0)
+               return ret;
+       return count;
+}
+static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
+
+static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
+                                                               char *buf)
+{
+       struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+       return sprintf(buf, "%pU\n", ns->uuid);
+}
+static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
+
+static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
+                                                               char *buf)
+{
+       struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+       return sprintf(buf, "%8phd\n", ns->eui);
+}
+static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
+
+static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
+                                                               char *buf)
+{
+       struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+       return sprintf(buf, "%d\n", ns->ns_id);
+}
+static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
+
+static struct attribute *nvme_ns_attrs[] = {
+       &dev_attr_uuid.attr,
+       &dev_attr_eui.attr,
+       &dev_attr_nsid.attr,
+       NULL,
+};
+
+static umode_t nvme_attrs_are_visible(struct kobject *kobj,
+               struct attribute *a, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+
+       if (a == &dev_attr_uuid.attr) {
+               if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
+                       return 0;
+       }
+       if (a == &dev_attr_eui.attr) {
+               if (!memchr_inv(ns->eui, 0, sizeof(ns->eui)))
+                       return 0;
+       }
+       return a->mode;
+}
+
+static const struct attribute_group nvme_ns_attr_group = {
+       .attrs          = nvme_ns_attrs,
+       .is_visible     = nvme_attrs_are_visible,
+};
+
+#define nvme_show_function(field)                                              \
+static ssize_t  field##_show(struct device *dev,                               \
+                           struct device_attribute *attr, char *buf)           \
+{                                                                              \
+        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);                         \
+        return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field);  \
+}                                                                              \
+static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
+
+nvme_show_function(model);
+nvme_show_function(serial);
+nvme_show_function(firmware_rev);
+
+static struct attribute *nvme_dev_attrs[] = {
+       &dev_attr_reset_controller.attr,
+       &dev_attr_model.attr,
+       &dev_attr_serial.attr,
+       &dev_attr_firmware_rev.attr,
+       NULL
+};
+
+static struct attribute_group nvme_dev_attrs_group = {
+       .attrs = nvme_dev_attrs,
+};
+
+static const struct attribute_group *nvme_dev_attr_groups[] = {
+       &nvme_dev_attrs_group,
+       NULL,
+};
+
+static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+       struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
+       struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
+
+       return nsa->ns_id - nsb->ns_id;
+}
+
+static struct nvme_ns *nvme_find_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+{
+       struct nvme_ns *ns;
+
+       lockdep_assert_held(&ctrl->namespaces_mutex);
+
+       list_for_each_entry(ns, &ctrl->namespaces, list) {
+               if (ns->ns_id == nsid)
+                       return ns;
+               if (ns->ns_id > nsid)
+                       break;
+       }
+       return NULL;
+}
+
+static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+{
+       struct nvme_ns *ns;
+       struct gendisk *disk;
+       int node = dev_to_node(ctrl->dev);
+
+       lockdep_assert_held(&ctrl->namespaces_mutex);
+
+       ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
+       if (!ns)
+               return;
+
+       ns->queue = blk_mq_init_queue(ctrl->tagset);
+       if (IS_ERR(ns->queue))
+               goto out_free_ns;
+       queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
+       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
+       ns->queue->queuedata = ns;
+       ns->ctrl = ctrl;
+
+       disk = alloc_disk_node(0, node);
+       if (!disk)
+               goto out_free_queue;
+
+       kref_init(&ns->kref);
+       ns->ns_id = nsid;
+       ns->disk = disk;
+       ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
+
+       blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
+       if (ctrl->max_hw_sectors) {
+               blk_queue_max_hw_sectors(ns->queue, ctrl->max_hw_sectors);
+               blk_queue_max_segments(ns->queue,
+                       (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
+       }
+       if (ctrl->stripe_size)
+               blk_queue_chunk_sectors(ns->queue, ctrl->stripe_size >> 9);
+       if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+               blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
+       blk_queue_virt_boundary(ns->queue, ctrl->page_size - 1);
+
+       disk->major = nvme_major;
+       disk->first_minor = 0;
+       disk->fops = &nvme_fops;
+       disk->private_data = ns;
+       disk->queue = ns->queue;
+       disk->driverfs_dev = ctrl->device;
+       disk->flags = GENHD_FL_EXT_DEVT;
+       sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, nsid);
+
+       if (nvme_revalidate_disk(ns->disk))
+               goto out_free_disk;
+
+       list_add_tail(&ns->list, &ctrl->namespaces);
+       kref_get(&ctrl->kref);
+       if (ns->type == NVME_NS_LIGHTNVM)
+               return;
+
+       add_disk(ns->disk);
+       if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
+                                       &nvme_ns_attr_group))
+               pr_warn("%s: failed to create sysfs group for identification\n",
+                       ns->disk->disk_name);
+       return;
+ out_free_disk:
+       kfree(disk);
+ out_free_queue:
+       blk_cleanup_queue(ns->queue);
+ out_free_ns:
+       kfree(ns);
+}
+
+static void nvme_ns_remove(struct nvme_ns *ns)
+{
+       bool kill = nvme_io_incapable(ns->ctrl) &&
+                       !blk_queue_dying(ns->queue);
+
+       lockdep_assert_held(&ns->ctrl->namespaces_mutex);
+
+       if (kill) {
+               blk_set_queue_dying(ns->queue);
+
+               /*
+                * The controller was shutdown first if we got here through
+                * device removal. The shutdown may requeue outstanding
+                * requests. These need to be aborted immediately so
+                * del_gendisk doesn't block indefinitely for their completion.
+                */
+               blk_mq_abort_requeue_list(ns->queue);
+       }
+       if (ns->disk->flags & GENHD_FL_UP) {
+               if (blk_get_integrity(ns->disk))
+                       blk_integrity_unregister(ns->disk);
+               sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
+                                       &nvme_ns_attr_group);
+               del_gendisk(ns->disk);
+       }
+       if (kill || !blk_queue_dying(ns->queue)) {
+               blk_mq_abort_requeue_list(ns->queue);
+               blk_cleanup_queue(ns->queue);
+       }
+       list_del_init(&ns->list);
+       nvme_put_ns(ns);
+}
+
+static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+{
+       struct nvme_ns *ns;
+
+       ns = nvme_find_ns(ctrl, nsid);
+       if (ns) {
+               if (revalidate_disk(ns->disk))
+                       nvme_ns_remove(ns);
+       } else
+               nvme_alloc_ns(ctrl, nsid);
+}
+
+static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
+{
+       struct nvme_ns *ns;
+       __le32 *ns_list;
+       unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024);
+       int ret = 0;
+
+       ns_list = kzalloc(0x1000, GFP_KERNEL);
+       if (!ns_list)
+               return -ENOMEM;
+
+       for (i = 0; i < num_lists; i++) {
+               ret = nvme_identify_ns_list(ctrl, prev, ns_list);
+               if (ret)
+                       goto out;
+
+               for (j = 0; j < min(nn, 1024U); j++) {
+                       nsid = le32_to_cpu(ns_list[j]);
+                       if (!nsid)
+                               goto out;
+
+                       nvme_validate_ns(ctrl, nsid);
+
+                       while (++prev < nsid) {
+                               ns = nvme_find_ns(ctrl, prev);
+                               if (ns)
+                                       nvme_ns_remove(ns);
+                       }
+               }
+               nn -= j;
+       }
+ out:
+       kfree(ns_list);
+       return ret;
+}
+
+static void __nvme_scan_namespaces(struct nvme_ctrl *ctrl, unsigned nn)
+{
+       struct nvme_ns *ns, *next;
+       unsigned i;
+
+       lockdep_assert_held(&ctrl->namespaces_mutex);
+
+       for (i = 1; i <= nn; i++)
+               nvme_validate_ns(ctrl, i);
+
+       list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
+               if (ns->ns_id > nn)
+                       nvme_ns_remove(ns);
+       }
+}
+
+void nvme_scan_namespaces(struct nvme_ctrl *ctrl)
+{
+       struct nvme_id_ctrl *id;
+       unsigned nn;
+
+       if (nvme_identify_ctrl(ctrl, &id))
+               return;
+
+       mutex_lock(&ctrl->namespaces_mutex);
+       nn = le32_to_cpu(id->nn);
+       if (ctrl->vs >= NVME_VS(1, 1) &&
+           !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
+               if (!nvme_scan_ns_list(ctrl, nn))
+                       goto done;
+       }
+       __nvme_scan_namespaces(ctrl, le32_to_cpup(&id->nn));
+ done:
+       list_sort(NULL, &ctrl->namespaces, ns_cmp);
+       mutex_unlock(&ctrl->namespaces_mutex);
+       kfree(id);
+}
+
+void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
+{
+       struct nvme_ns *ns, *next;
+
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
+               nvme_ns_remove(ns);
+       mutex_unlock(&ctrl->namespaces_mutex);
+}
+
+static DEFINE_IDA(nvme_instance_ida);
+
+static int nvme_set_instance(struct nvme_ctrl *ctrl)
+{
+       int instance, error;
+
+       do {
+               if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
+                       return -ENODEV;
+
+               spin_lock(&dev_list_lock);
+               error = ida_get_new(&nvme_instance_ida, &instance);
+               spin_unlock(&dev_list_lock);
+       } while (error == -EAGAIN);
+
+       if (error)
+               return -ENODEV;
+
+       ctrl->instance = instance;
+       return 0;
+}
+
+static void nvme_release_instance(struct nvme_ctrl *ctrl)
+{
+       spin_lock(&dev_list_lock);
+       ida_remove(&nvme_instance_ida, ctrl->instance);
+       spin_unlock(&dev_list_lock);
+}
+
+void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
+ {
+       device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
+
+       spin_lock(&dev_list_lock);
+       list_del(&ctrl->node);
+       spin_unlock(&dev_list_lock);
+}
+
+static void nvme_free_ctrl(struct kref *kref)
+{
+       struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref);
+
+       put_device(ctrl->device);
+       nvme_release_instance(ctrl);
+
+       ctrl->ops->free_ctrl(ctrl);
+}
+
+void nvme_put_ctrl(struct nvme_ctrl *ctrl)
+{
+       kref_put(&ctrl->kref, nvme_free_ctrl);
+}
+
+/*
+ * Initialize a NVMe controller structures.  This needs to be called during
+ * earliest initialization so that we have the initialized structured around
+ * during probing.
+ */
+int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+               const struct nvme_ctrl_ops *ops, unsigned long quirks)
+{
+       int ret;
+
+       INIT_LIST_HEAD(&ctrl->namespaces);
+       mutex_init(&ctrl->namespaces_mutex);
+       kref_init(&ctrl->kref);
+       ctrl->dev = dev;
+       ctrl->ops = ops;
+       ctrl->quirks = quirks;
+
+       ret = nvme_set_instance(ctrl);
+       if (ret)
+               goto out;
+
+       ctrl->device = device_create_with_groups(nvme_class, ctrl->dev,
+                               MKDEV(nvme_char_major, ctrl->instance),
+                               dev, nvme_dev_attr_groups,
+                               "nvme%d", ctrl->instance);
+       if (IS_ERR(ctrl->device)) {
+               ret = PTR_ERR(ctrl->device);
+               goto out_release_instance;
+       }
+       get_device(ctrl->device);
+       dev_set_drvdata(ctrl->device, ctrl);
+
+       spin_lock(&dev_list_lock);
+       list_add_tail(&ctrl->node, &nvme_ctrl_list);
+       spin_unlock(&dev_list_lock);
+
+       return 0;
+out_release_instance:
+       nvme_release_instance(ctrl);
+out:
+       return ret;
+}
+
+void nvme_stop_queues(struct nvme_ctrl *ctrl)
+{
+       struct nvme_ns *ns;
+
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list) {
+               spin_lock_irq(ns->queue->queue_lock);
+               queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
+               spin_unlock_irq(ns->queue->queue_lock);
+
+               blk_mq_cancel_requeue_work(ns->queue);
+               blk_mq_stop_hw_queues(ns->queue);
+       }
+       mutex_unlock(&ctrl->namespaces_mutex);
+}
+
+void nvme_start_queues(struct nvme_ctrl *ctrl)
+{
+       struct nvme_ns *ns;
+
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list) {
+               queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
+               blk_mq_start_stopped_hw_queues(ns->queue, true);
+               blk_mq_kick_requeue_list(ns->queue);
+       }
+       mutex_unlock(&ctrl->namespaces_mutex);
+}
+
+int __init nvme_core_init(void)
+{
+       int result;
+
+       result = register_blkdev(nvme_major, "nvme");
+       if (result < 0)
+               return result;
+       else if (result > 0)
+               nvme_major = result;
+
+       result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
+                                                       &nvme_dev_fops);
+       if (result < 0)
+               goto unregister_blkdev;
+       else if (result > 0)
+               nvme_char_major = result;
+
+       nvme_class = class_create(THIS_MODULE, "nvme");
+       if (IS_ERR(nvme_class)) {
+               result = PTR_ERR(nvme_class);
+               goto unregister_chrdev;
+       }
+
+       return 0;
+
+ unregister_chrdev:
+       __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+ unregister_blkdev:
+       unregister_blkdev(nvme_major, "nvme");
+       return result;
+}
+
+void nvme_core_exit(void)
+{
+       unregister_blkdev(nvme_major, "nvme");
+       class_destroy(nvme_class);
+       __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+}
index 71f2bbc..5cd3725 100644 (file)
@@ -294,7 +294,6 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
 static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
 {
        struct nvme_ns *ns = nvmdev->q->queuedata;
-       struct nvme_dev *dev = ns->dev;
        struct nvme_nvm_id *nvme_nvm_id;
        struct nvme_nvm_command c = {};
        int ret;
@@ -307,7 +306,7 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
        if (!nvme_nvm_id)
                return -ENOMEM;
 
-       ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
+       ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
                                nvme_nvm_id, sizeof(struct nvme_nvm_id));
        if (ret) {
                ret = -EIO;
@@ -332,9 +331,8 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
                                nvm_l2p_update_fn *update_l2p, void *priv)
 {
        struct nvme_ns *ns = nvmdev->q->queuedata;
-       struct nvme_dev *dev = ns->dev;
        struct nvme_nvm_command c = {};
-       u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
+       u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
        u32 nlb_pr_rq = len / sizeof(u64);
        u64 cmd_slba = slba;
        void *entries;
@@ -352,10 +350,10 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
                c.l2p.slba = cpu_to_le64(cmd_slba);
                c.l2p.nlb = cpu_to_le32(cmd_nlb);
 
-               ret = nvme_submit_sync_cmd(dev->admin_q,
+               ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
                                (struct nvme_command *)&c, entries, len);
                if (ret) {
-                       dev_err(dev->dev, "L2P table transfer failed (%d)\n",
+                       dev_err(ns->ctrl->dev, "L2P table transfer failed (%d)\n",
                                                                        ret);
                        ret = -EIO;
                        goto out;
@@ -381,7 +379,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
 {
        struct request_queue *q = nvmdev->q;
        struct nvme_ns *ns = q->queuedata;
-       struct nvme_dev *dev = ns->dev;
+       struct nvme_ctrl *ctrl = ns->ctrl;
        struct nvme_nvm_command c = {};
        struct nvme_nvm_bb_tbl *bb_tbl;
        int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
@@ -395,30 +393,30 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
        if (!bb_tbl)
                return -ENOMEM;
 
-       ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
+       ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
                                                                bb_tbl, tblsz);
        if (ret) {
-               dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
+               dev_err(ctrl->dev, "get bad block table failed (%d)\n", ret);
                ret = -EIO;
                goto out;
        }
 
        if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
                bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
-               dev_err(dev->dev, "bbt format mismatch\n");
+               dev_err(ctrl->dev, "bbt format mismatch\n");
                ret = -EINVAL;
                goto out;
        }
 
        if (le16_to_cpu(bb_tbl->verid) != 1) {
                ret = -EINVAL;
-               dev_err(dev->dev, "bbt version not supported\n");
+               dev_err(ctrl->dev, "bbt version not supported\n");
                goto out;
        }
 
        if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
                ret = -EINVAL;
-               dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)",
+               dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
                                        le32_to_cpu(bb_tbl->tblks), nr_blocks);
                goto out;
        }
@@ -434,7 +432,6 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
                                                                int type)
 {
        struct nvme_ns *ns = nvmdev->q->queuedata;
-       struct nvme_dev *dev = ns->dev;
        struct nvme_nvm_command c = {};
        int ret = 0;
 
@@ -444,10 +441,10 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
        c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
        c.set_bb.value = type;
 
-       ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
+       ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
                                                                NULL, 0);
        if (ret)
-               dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
+               dev_err(ns->ctrl->dev, "set bad block table failed (%d)\n", ret);
        return ret;
 }
 
@@ -532,9 +529,8 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
 static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
 {
        struct nvme_ns *ns = nvmdev->q->queuedata;
-       struct nvme_dev *dev = ns->dev;
 
-       return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
+       return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
 }
 
 static void nvme_nvm_destroy_dma_pool(void *pool)
@@ -592,8 +588,9 @@ void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
 
 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
 {
-       struct nvme_dev *dev = ns->dev;
-       struct pci_dev *pdev = to_pci_dev(dev->dev);
+       struct nvme_ctrl *ctrl = ns->ctrl;
+       /* XXX: this is poking into PCI structures from generic code! */
+       struct pci_dev *pdev = to_pci_dev(ctrl->dev);
 
        /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
        if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
index 044253d..4fb5bb7 100644 (file)
 #include <linux/kref.h>
 #include <linux/blk-mq.h>
 
+enum {
+       /*
+        * Driver internal status code for commands that were cancelled due
+        * to timeouts or controller shutdown.  The value is negative so
+        * that it a) doesn't overlap with the unsigned hardware error codes,
+        * and b) can easily be tested for.
+        */
+       NVME_SC_CANCELLED               = -EINTR,
+};
+
 extern unsigned char nvme_io_timeout;
 #define NVME_IO_TIMEOUT        (nvme_io_timeout * HZ)
 
+extern unsigned char admin_timeout;
+#define ADMIN_TIMEOUT  (admin_timeout * HZ)
+
+extern unsigned char shutdown_timeout;
+#define SHUTDOWN_TIMEOUT       (shutdown_timeout * HZ)
+
 enum {
        NVME_NS_LBA             = 0,
        NVME_NS_LIGHTNVM        = 1,
 };
 
 /*
- * Represents an NVM Express device.  Each nvme_dev is a PCI function.
+ * List of workarounds for devices that required behavior not specified in
+ * the standard.
  */
-struct nvme_dev {
-       struct list_head node;
-       struct nvme_queue **queues;
+enum nvme_quirks {
+       /*
+        * Prefers I/O aligned to a stripe size specified in a vendor
+        * specific Identify field.
+        */
+       NVME_QUIRK_STRIPE_SIZE                  = (1 << 0),
+
+       /*
+        * The controller doesn't handle Identify value others than 0 or 1
+        * correctly.
+        */
+       NVME_QUIRK_IDENTIFY_CNS                 = (1 << 1),
+};
+
+struct nvme_ctrl {
+       const struct nvme_ctrl_ops *ops;
        struct request_queue *admin_q;
-       struct blk_mq_tag_set tagset;
-       struct blk_mq_tag_set admin_tagset;
-       u32 __iomem *dbs;
        struct device *dev;
-       struct dma_pool *prp_page_pool;
-       struct dma_pool *prp_small_pool;
+       struct kref kref;
        int instance;
-       unsigned queue_count;
-       unsigned online_queues;
-       unsigned max_qid;
-       int q_depth;
-       u32 db_stride;
-       u32 ctrl_config;
-       struct msix_entry *entry;
-       struct nvme_bar __iomem *bar;
+       struct blk_mq_tag_set *tagset;
        struct list_head namespaces;
-       struct kref kref;
-       struct device *device;
-       struct work_struct reset_work;
-       struct work_struct probe_work;
-       struct work_struct scan_work;
+       struct mutex namespaces_mutex;
+       struct device *device;  /* char device */
+       struct list_head node;
+
        char name[12];
        char serial[20];
        char model[40];
        char firmware_rev[8];
-       bool subsystem;
+
+       u32 ctrl_config;
+
+       u32 page_size;
        u32 max_hw_sectors;
        u32 stripe_size;
-       u32 page_size;
-       void __iomem *cmb;
-       dma_addr_t cmb_dma_addr;
-       u64 cmb_size;
-       u32 cmbsz;
        u16 oncs;
-       u16 abort_limit;
+       atomic_t abort_limit;
        u8 event_limit;
        u8 vwc;
+       u32 vs;
+       bool subsystem;
+       unsigned long quirks;
 };
 
 /*
@@ -79,11 +98,14 @@ struct nvme_dev {
 struct nvme_ns {
        struct list_head list;
 
-       struct nvme_dev *dev;
+       struct nvme_ctrl *ctrl;
        struct request_queue *queue;
        struct gendisk *disk;
        struct kref kref;
 
+       u8 eui[8];
+       u8 uuid[16];
+
        unsigned ns_id;
        int lba_shift;
        u16 ms;
@@ -94,41 +116,156 @@ struct nvme_ns {
        u32 mode_select_block_len;
 };
 
-/*
- * The nvme_iod describes the data in an I/O, including the list of PRP
- * entries.  You can't see it in this data structure because C doesn't let
- * me express that.  Use nvme_alloc_iod to ensure there's enough space
- * allocated to store the PRP list.
- */
-struct nvme_iod {
-       unsigned long private;  /* For the use of the submitter of the I/O */
-       int npages;             /* In the PRP list. 0 means small pool in use */
-       int offset;             /* Of PRP list */
-       int nents;              /* Used in scatterlist */
-       int length;             /* Of data, in bytes */
-       dma_addr_t first_dma;
-       struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
-       struct scatterlist sg[0];
+struct nvme_ctrl_ops {
+       int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
+       int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
+       int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
+       bool (*io_incapable)(struct nvme_ctrl *ctrl);
+       int (*reset_ctrl)(struct nvme_ctrl *ctrl);
+       void (*free_ctrl)(struct nvme_ctrl *ctrl);
 };
 
+static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
+{
+       u32 val = 0;
+
+       if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
+               return false;
+       return val & NVME_CSTS_RDY;
+}
+
+static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
+{
+       u32 val = 0;
+
+       if (ctrl->ops->io_incapable(ctrl))
+               return false;
+       if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
+               return false;
+       return val & NVME_CSTS_CFS;
+}
+
+static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
+{
+       if (!ctrl->subsystem)
+               return -ENOTTY;
+       return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
+}
+
 static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
 {
        return (sector >> (ns->lba_shift - 9));
 }
 
+static inline void nvme_setup_flush(struct nvme_ns *ns,
+               struct nvme_command *cmnd)
+{
+       memset(cmnd, 0, sizeof(*cmnd));
+       cmnd->common.opcode = nvme_cmd_flush;
+       cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+}
+
+static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
+               struct nvme_command *cmnd)
+{
+       u16 control = 0;
+       u32 dsmgmt = 0;
+
+       if (req->cmd_flags & REQ_FUA)
+               control |= NVME_RW_FUA;
+       if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
+               control |= NVME_RW_LR;
+
+       if (req->cmd_flags & REQ_RAHEAD)
+               dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+
+       memset(cmnd, 0, sizeof(*cmnd));
+       cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
+       cmnd->rw.command_id = req->tag;
+       cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+       cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+       cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+
+       if (ns->ms) {
+               switch (ns->pi_type) {
+               case NVME_NS_DPS_PI_TYPE3:
+                       control |= NVME_RW_PRINFO_PRCHK_GUARD;
+                       break;
+               case NVME_NS_DPS_PI_TYPE1:
+               case NVME_NS_DPS_PI_TYPE2:
+                       control |= NVME_RW_PRINFO_PRCHK_GUARD |
+                                       NVME_RW_PRINFO_PRCHK_REF;
+                       cmnd->rw.reftag = cpu_to_le32(
+                                       nvme_block_nr(ns, blk_rq_pos(req)));
+                       break;
+               }
+               if (!blk_integrity_rq(req))
+                       control |= NVME_RW_PRINFO_PRACT;
+       }
+
+       cmnd->rw.control = cpu_to_le16(control);
+       cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+}
+
+
+static inline int nvme_error_status(u16 status)
+{
+       switch (status & 0x7ff) {
+       case NVME_SC_SUCCESS:
+               return 0;
+       case NVME_SC_CAP_EXCEEDED:
+               return -ENOSPC;
+       default:
+               return -EIO;
+       }
+}
+
+static inline bool nvme_req_needs_retry(struct request *req, u16 status)
+{
+       return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
+               (jiffies - req->start_time) < req->timeout;
+}
+
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
+int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
+int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
+int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+               const struct nvme_ctrl_ops *ops, unsigned long quirks);
+void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
+void nvme_put_ctrl(struct nvme_ctrl *ctrl);
+int nvme_init_identify(struct nvme_ctrl *ctrl);
+
+void nvme_scan_namespaces(struct nvme_ctrl *ctrl);
+void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
+
+void nvme_stop_queues(struct nvme_ctrl *ctrl);
+void nvme_start_queues(struct nvme_ctrl *ctrl);
+
+struct request *nvme_alloc_request(struct request_queue *q,
+               struct nvme_command *cmd, unsigned int flags);
+void nvme_requeue_req(struct request *req);
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buf, unsigned bufflen);
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-               void *buffer, void __user *ubuffer, unsigned bufflen,
+               void *buffer, unsigned bufflen,  u32 *result, unsigned timeout);
+int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
+               void __user *ubuffer, unsigned bufflen, u32 *result,
+               unsigned timeout);
+int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
+               void __user *ubuffer, unsigned bufflen,
+               void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
                u32 *result, unsigned timeout);
-int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id);
-int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
+int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
+int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
                struct nvme_id_ns **id);
-int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log);
-int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
+int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
+int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
                        dma_addr_t dma_addr, u32 *result);
-int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
+int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
                        dma_addr_t dma_addr, u32 *result);
+int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
+
+extern spinlock_t dev_list_lock;
 
 struct sg_io_hdr;
 
@@ -154,4 +291,7 @@ static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *i
 }
 #endif /* CONFIG_NVM */
 
+int __init nvme_core_init(void);
+void nvme_core_exit(void);
+
 #endif /* _NVME_H */
index f5c0e26..72ef832 100644 (file)
@@ -12,6 +12,7 @@
  * more details.
  */
 
+#include <linux/aer.h>
 #include <linux/bitops.h>
 #include <linux/blkdev.h>
 #include <linux/blk-mq.h>
 #include <linux/kdev_t.h>
 #include <linux/kthread.h>
 #include <linux/kernel.h>
-#include <linux/list_sort.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
+#include <linux/mutex.h>
 #include <linux/pci.h>
 #include <linux/poison.h>
 #include <linux/ptrace.h>
 #include <linux/slab.h>
 #include <linux/t10-pi.h>
 #include <linux/types.h>
-#include <linux/pr.h>
-#include <scsi/sg.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
 #include <asm/unaligned.h>
 
-#include <uapi/linux/nvme_ioctl.h>
 #include "nvme.h"
 
-#define NVME_MINORS            (1U << MINORBITS)
 #define NVME_Q_DEPTH           1024
 #define NVME_AQ_DEPTH          256
 #define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
-#define ADMIN_TIMEOUT          (admin_timeout * HZ)
-#define SHUTDOWN_TIMEOUT       (shutdown_timeout * HZ)
+               
+/*
+ * We handle AEN commands ourselves and don't even let the
+ * block layer know about them.
+ */
+#define NVME_NR_AEN_COMMANDS   1
+#define NVME_AQ_BLKMQ_DEPTH    (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
 
-static unsigned char admin_timeout = 60;
+unsigned char admin_timeout = 60;
 module_param(admin_timeout, byte, 0644);
 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
 
@@ -63,16 +65,10 @@ unsigned char nvme_io_timeout = 30;
 module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
 
-static unsigned char shutdown_timeout = 5;
+unsigned char shutdown_timeout = 5;
 module_param(shutdown_timeout, byte, 0644);
 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
 
-static int nvme_major;
-module_param(nvme_major, int, 0);
-
-static int nvme_char_major;
-module_param(nvme_char_major, int, 0);
-
 static int use_threaded_interrupts;
 module_param(use_threaded_interrupts, int, 0);
 
@@ -80,28 +76,60 @@ static bool use_cmb_sqes = true;
 module_param(use_cmb_sqes, bool, 0644);
 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
 
-static DEFINE_SPINLOCK(dev_list_lock);
 static LIST_HEAD(dev_list);
 static struct task_struct *nvme_thread;
 static struct workqueue_struct *nvme_workq;
 static wait_queue_head_t nvme_kthread_wait;
 
-static struct class *nvme_class;
+struct nvme_dev;
+struct nvme_queue;
 
-static int __nvme_reset(struct nvme_dev *dev);
 static int nvme_reset(struct nvme_dev *dev);
 static void nvme_process_cq(struct nvme_queue *nvmeq);
-static void nvme_dead_ctrl(struct nvme_dev *dev);
+static void nvme_remove_dead_ctrl(struct nvme_dev *dev);
+static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
 
-struct async_cmd_info {
-       struct kthread_work work;
-       struct kthread_worker *worker;
-       struct request *req;
-       u32 result;
-       int status;
-       void *ctx;
+/*
+ * Represents an NVM Express device.  Each nvme_dev is a PCI function.
+ */
+struct nvme_dev {
+       struct list_head node;
+       struct nvme_queue **queues;
+       struct blk_mq_tag_set tagset;
+       struct blk_mq_tag_set admin_tagset;
+       u32 __iomem *dbs;
+       struct device *dev;
+       struct dma_pool *prp_page_pool;
+       struct dma_pool *prp_small_pool;
+       unsigned queue_count;
+       unsigned online_queues;
+       unsigned max_qid;
+       int q_depth;
+       u32 db_stride;
+       struct msix_entry *entry;
+       void __iomem *bar;
+       struct work_struct reset_work;
+       struct work_struct scan_work;
+       struct work_struct remove_work;
+       struct mutex shutdown_lock;
+       bool subsystem;
+       void __iomem *cmb;
+       dma_addr_t cmb_dma_addr;
+       u64 cmb_size;
+       u32 cmbsz;
+       unsigned long flags;
+
+#define NVME_CTRL_RESETTING    0
+
+       struct nvme_ctrl ctrl;
+       struct completion ioq_wait;
 };
 
+static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
+{
+       return container_of(ctrl, struct nvme_dev, ctrl);
+}
+
 /*
  * An NVM Express queue.  Each device has at least two (one for admin
  * commands and one for I/O commands).
@@ -126,7 +154,24 @@ struct nvme_queue {
        u16 qid;
        u8 cq_phase;
        u8 cqe_seen;
-       struct async_cmd_info cmdinfo;
+};
+
+/*
+ * The nvme_iod describes the data in an I/O, including the list of PRP
+ * entries.  You can't see it in this data structure because C doesn't let
+ * me express that.  Use nvme_init_iod to ensure there's enough space
+ * allocated to store the PRP list.
+ */
+struct nvme_iod {
+       struct nvme_queue *nvmeq;
+       int aborted;
+       int npages;             /* In the PRP list. 0 means small pool in use */
+       int nents;              /* Used in scatterlist */
+       int length;             /* Of data, in bytes */
+       dma_addr_t first_dma;
+       struct scatterlist meta_sg; /* metadata requires single contiguous buffer */
+       struct scatterlist *sg;
+       struct scatterlist inline_sg[0];
 };
 
 /*
@@ -148,23 +193,11 @@ static inline void _nvme_check_size(void)
        BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
 }
 
-typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
-                                               struct nvme_completion *);
-
-struct nvme_cmd_info {
-       nvme_completion_fn fn;
-       void *ctx;
-       int aborted;
-       struct nvme_queue *nvmeq;
-       struct nvme_iod iod[0];
-};
-
 /*
  * Max size of iod being embedded in the request payload
  */
 #define NVME_INT_PAGES         2
-#define NVME_INT_BYTES(dev)    (NVME_INT_PAGES * (dev)->page_size)
-#define NVME_INT_MASK          0x01
+#define NVME_INT_BYTES(dev)    (NVME_INT_PAGES * (dev)->ctrl.page_size)
 
 /*
  * Will slightly overestimate the number of pages needed.  This is OK
@@ -173,19 +206,22 @@ struct nvme_cmd_info {
  */
 static int nvme_npages(unsigned size, struct nvme_dev *dev)
 {
-       unsigned nprps = DIV_ROUND_UP(size + dev->page_size, dev->page_size);
+       unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
+                                     dev->ctrl.page_size);
        return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
 }
 
-static unsigned int nvme_cmd_size(struct nvme_dev *dev)
+static unsigned int nvme_iod_alloc_size(struct nvme_dev *dev,
+               unsigned int size, unsigned int nseg)
 {
-       unsigned int ret = sizeof(struct nvme_cmd_info);
-
-       ret += sizeof(struct nvme_iod);
-       ret += sizeof(__le64 *) * nvme_npages(NVME_INT_BYTES(dev), dev);
-       ret += sizeof(struct scatterlist) * NVME_INT_PAGES;
+       return sizeof(__le64 *) * nvme_npages(size, dev) +
+                       sizeof(struct scatterlist) * nseg;
+}
 
-       return ret;
+static unsigned int nvme_cmd_size(struct nvme_dev *dev)
+{
+       return sizeof(struct nvme_iod) +
+               nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
 }
 
 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -215,11 +251,11 @@ static int nvme_admin_init_request(void *data, struct request *req,
                                unsigned int numa_node)
 {
        struct nvme_dev *dev = data;
-       struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = dev->queues[0];
 
        BUG_ON(!nvmeq);
-       cmd->nvmeq = nvmeq;
+       iod->nvmeq = nvmeq;
        return 0;
 }
 
@@ -242,148 +278,36 @@ static int nvme_init_request(void *data, struct request *req,
                                unsigned int numa_node)
 {
        struct nvme_dev *dev = data;
-       struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
 
        BUG_ON(!nvmeq);
-       cmd->nvmeq = nvmeq;
+       iod->nvmeq = nvmeq;
        return 0;
 }
 
-static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
-                               nvme_completion_fn handler)
+static void nvme_complete_async_event(struct nvme_dev *dev,
+               struct nvme_completion *cqe)
 {
-       cmd->fn = handler;
-       cmd->ctx = ctx;
-       cmd->aborted = 0;
-       blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
-}
-
-static void *iod_get_private(struct nvme_iod *iod)
-{
-       return (void *) (iod->private & ~0x1UL);
-}
-
-/*
- * If bit 0 is set, the iod is embedded in the request payload.
- */
-static bool iod_should_kfree(struct nvme_iod *iod)
-{
-       return (iod->private & NVME_INT_MASK) == 0;
-}
-
-/* Special values must be less than 0x1000 */
-#define CMD_CTX_BASE           ((void *)POISON_POINTER_DELTA)
-#define CMD_CTX_CANCELLED      (0x30C + CMD_CTX_BASE)
-#define CMD_CTX_COMPLETED      (0x310 + CMD_CTX_BASE)
-#define CMD_CTX_INVALID                (0x314 + CMD_CTX_BASE)
-
-static void special_completion(struct nvme_queue *nvmeq, void *ctx,
-                                               struct nvme_completion *cqe)
-{
-       if (ctx == CMD_CTX_CANCELLED)
-               return;
-       if (ctx == CMD_CTX_COMPLETED) {
-               dev_warn(nvmeq->q_dmadev,
-                               "completed id %d twice on queue %d\n",
-                               cqe->command_id, le16_to_cpup(&cqe->sq_id));
-               return;
-       }
-       if (ctx == CMD_CTX_INVALID) {
-               dev_warn(nvmeq->q_dmadev,
-                               "invalid id %d completed on queue %d\n",
-                               cqe->command_id, le16_to_cpup(&cqe->sq_id));
-               return;
-       }
-       dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
-}
-
-static void *cancel_cmd_info(struct nvme_cmd_info *cmd, nvme_completion_fn *fn)
-{
-       void *ctx;
-
-       if (fn)
-               *fn = cmd->fn;
-       ctx = cmd->ctx;
-       cmd->fn = special_completion;
-       cmd->ctx = CMD_CTX_CANCELLED;
-       return ctx;
-}
-
-static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
-                                               struct nvme_completion *cqe)
-{
-       u32 result = le32_to_cpup(&cqe->result);
-       u16 status = le16_to_cpup(&cqe->status) >> 1;
+       u16 status = le16_to_cpu(cqe->status) >> 1;
+       u32 result = le32_to_cpu(cqe->result);
 
        if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ)
-               ++nvmeq->dev->event_limit;
+               ++dev->ctrl.event_limit;
        if (status != NVME_SC_SUCCESS)
                return;
 
        switch (result & 0xff07) {
        case NVME_AER_NOTICE_NS_CHANGED:
-               dev_info(nvmeq->q_dmadev, "rescanning\n");
-               schedule_work(&nvmeq->dev->scan_work);
+               dev_info(dev->dev, "rescanning\n");
+               queue_work(nvme_workq, &dev->scan_work);
        default:
-               dev_warn(nvmeq->q_dmadev, "async event result %08x\n", result);
-       }
-}
-
-static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
-                                               struct nvme_completion *cqe)
-{
-       struct request *req = ctx;
-
-       u16 status = le16_to_cpup(&cqe->status) >> 1;
-       u32 result = le32_to_cpup(&cqe->result);
-
-       blk_mq_free_request(req);
-
-       dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
-       ++nvmeq->dev->abort_limit;
-}
-
-static void async_completion(struct nvme_queue *nvmeq, void *ctx,
-                                               struct nvme_completion *cqe)
-{
-       struct async_cmd_info *cmdinfo = ctx;
-       cmdinfo->result = le32_to_cpup(&cqe->result);
-       cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
-       queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
-       blk_mq_free_request(cmdinfo->req);
-}
-
-static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq,
-                                 unsigned int tag)
-{
-       struct request *req = blk_mq_tag_to_rq(*nvmeq->tags, tag);
-
-       return blk_mq_rq_to_pdu(req);
-}
-
-/*
- * Called with local interrupts disabled and the q_lock held.  May not sleep.
- */
-static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag,
-                                               nvme_completion_fn *fn)
-{
-       struct nvme_cmd_info *cmd = get_cmd_from_tag(nvmeq, tag);
-       void *ctx;
-       if (tag >= nvmeq->q_depth) {
-               *fn = special_completion;
-               return CMD_CTX_INVALID;
+               dev_warn(dev->dev, "async event result %08x\n", result);
        }
-       if (fn)
-               *fn = cmd->fn;
-       ctx = cmd->ctx;
-       cmd->fn = special_completion;
-       cmd->ctx = CMD_CTX_COMPLETED;
-       return ctx;
 }
 
 /**
- * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
+ * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
  * @nvmeq: The queue to use
  * @cmd: The command to send
  *
@@ -405,69 +329,44 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
        nvmeq->sq_tail = tail;
 }
 
-static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
-{
-       unsigned long flags;
-       spin_lock_irqsave(&nvmeq->q_lock, flags);
-       __nvme_submit_cmd(nvmeq, cmd);
-       spin_unlock_irqrestore(&nvmeq->q_lock, flags);
-}
-
-static __le64 **iod_list(struct nvme_iod *iod)
-{
-       return ((void *)iod) + iod->offset;
-}
-
-static inline void iod_init(struct nvme_iod *iod, unsigned nbytes,
-                           unsigned nseg, unsigned long private)
-{
-       iod->private = private;
-       iod->offset = offsetof(struct nvme_iod, sg[nseg]);
-       iod->npages = -1;
-       iod->length = nbytes;
-       iod->nents = 0;
-}
-
-static struct nvme_iod *
-__nvme_alloc_iod(unsigned nseg, unsigned bytes, struct nvme_dev *dev,
-                unsigned long priv, gfp_t gfp)
+static __le64 **iod_list(struct request *req)
 {
-       struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
-                               sizeof(__le64 *) * nvme_npages(bytes, dev) +
-                               sizeof(struct scatterlist) * nseg, gfp);
-
-       if (iod)
-               iod_init(iod, bytes, nseg, priv);
-
-       return iod;
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       return (__le64 **)(iod->sg + req->nr_phys_segments);
 }
 
-static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev,
-                                      gfp_t gfp)
+static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
 {
-       unsigned size = !(rq->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(rq) :
-                                                sizeof(struct nvme_dsm_range);
-       struct nvme_iod *iod;
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
+       int nseg = rq->nr_phys_segments;
+       unsigned size;
 
-       if (rq->nr_phys_segments <= NVME_INT_PAGES &&
-           size <= NVME_INT_BYTES(dev)) {
-               struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq);
+       if (rq->cmd_flags & REQ_DISCARD)
+               size = sizeof(struct nvme_dsm_range);
+       else
+               size = blk_rq_bytes(rq);
 
-               iod = cmd->iod;
-               iod_init(iod, size, rq->nr_phys_segments,
-                               (unsigned long) rq | NVME_INT_MASK);
-               return iod;
+       if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
+               iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
+               if (!iod->sg)
+                       return BLK_MQ_RQ_QUEUE_BUSY;
+       } else {
+               iod->sg = iod->inline_sg;
        }
 
-       return __nvme_alloc_iod(rq->nr_phys_segments, size, dev,
-                               (unsigned long) rq, gfp);
+       iod->aborted = 0;
+       iod->npages = -1;
+       iod->nents = 0;
+       iod->length = size;
+       return 0;
 }
 
-static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
+static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
 {
-       const int last_prp = dev->page_size / 8 - 1;
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       const int last_prp = dev->ctrl.page_size / 8 - 1;
        int i;
-       __le64 **list = iod_list(iod);
+       __le64 **list = iod_list(req);
        dma_addr_t prp_dma = iod->first_dma;
 
        if (iod->npages == 0)
@@ -479,20 +378,8 @@ static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
                prp_dma = next_prp_dma;
        }
 
-       if (iod_should_kfree(iod))
-               kfree(iod);
-}
-
-static int nvme_error_status(u16 status)
-{
-       switch (status & 0x7ff) {
-       case NVME_SC_SUCCESS:
-               return 0;
-       case NVME_SC_CAP_EXCEEDED:
-               return -ENOSPC;
-       default:
-               return -EIO;
-       }
+       if (iod->sg != iod->inline_sg)
+               kfree(iod->sg);
 }
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
@@ -549,27 +436,6 @@ static void nvme_dif_remap(struct request *req,
        }
        kunmap_atomic(pmap);
 }
-
-static void nvme_init_integrity(struct nvme_ns *ns)
-{
-       struct blk_integrity integrity;
-
-       switch (ns->pi_type) {
-       case NVME_NS_DPS_PI_TYPE3:
-               integrity.profile = &t10_pi_type3_crc;
-               break;
-       case NVME_NS_DPS_PI_TYPE1:
-       case NVME_NS_DPS_PI_TYPE2:
-               integrity.profile = &t10_pi_type1_crc;
-               break;
-       default:
-               integrity.profile = NULL;
-               break;
-       }
-       integrity.tuple_size = ns->ms;
-       blk_integrity_register(ns->disk, &integrity);
-       blk_queue_max_integrity_segments(ns->queue, 1);
-}
 #else /* CONFIG_BLK_DEV_INTEGRITY */
 static void nvme_dif_remap(struct request *req,
                        void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
@@ -581,91 +447,27 @@ static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
 static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
 {
 }
-static void nvme_init_integrity(struct nvme_ns *ns)
-{
-}
 #endif
 
-static void req_completion(struct nvme_queue *nvmeq, void *ctx,
-                                               struct nvme_completion *cqe)
-{
-       struct nvme_iod *iod = ctx;
-       struct request *req = iod_get_private(iod);
-       struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
-       u16 status = le16_to_cpup(&cqe->status) >> 1;
-       bool requeue = false;
-       int error = 0;
-
-       if (unlikely(status)) {
-               if (!(status & NVME_SC_DNR || blk_noretry_request(req))
-                   && (jiffies - req->start_time) < req->timeout) {
-                       unsigned long flags;
-
-                       requeue = true;
-                       blk_mq_requeue_request(req);
-                       spin_lock_irqsave(req->q->queue_lock, flags);
-                       if (!blk_queue_stopped(req->q))
-                               blk_mq_kick_requeue_list(req->q);
-                       spin_unlock_irqrestore(req->q->queue_lock, flags);
-                       goto release_iod;
-               }
-
-               if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
-                       if (cmd_rq->ctx == CMD_CTX_CANCELLED)
-                               error = -EINTR;
-                       else
-                               error = status;
-               } else {
-                       error = nvme_error_status(status);
-               }
-       }
-
-       if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
-               u32 result = le32_to_cpup(&cqe->result);
-               req->special = (void *)(uintptr_t)result;
-       }
-
-       if (cmd_rq->aborted)
-               dev_warn(nvmeq->dev->dev,
-                       "completing aborted command with status:%04x\n",
-                       error);
-
-release_iod:
-       if (iod->nents) {
-               dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents,
-                       rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-               if (blk_integrity_rq(req)) {
-                       if (!rq_data_dir(req))
-                               nvme_dif_remap(req, nvme_dif_complete);
-                       dma_unmap_sg(nvmeq->dev->dev, iod->meta_sg, 1,
-                               rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-               }
-       }
-       nvme_free_iod(nvmeq->dev, iod);
-
-       if (likely(!requeue))
-               blk_mq_complete_request(req, error);
-}
-
-/* length is in bytes.  gfp flags indicates whether we may sleep. */
-static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
-               int total_len, gfp_t gfp)
+static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
+               int total_len)
 {
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct dma_pool *pool;
        int length = total_len;
        struct scatterlist *sg = iod->sg;
        int dma_len = sg_dma_len(sg);
        u64 dma_addr = sg_dma_address(sg);
-       u32 page_size = dev->page_size;
+       u32 page_size = dev->ctrl.page_size;
        int offset = dma_addr & (page_size - 1);
        __le64 *prp_list;
-       __le64 **list = iod_list(iod);
+       __le64 **list = iod_list(req);
        dma_addr_t prp_dma;
        int nprps, i;
 
        length -= (page_size - offset);
        if (length <= 0)
-               return total_len;
+               return true;
 
        dma_len -= (page_size - offset);
        if (dma_len) {
@@ -678,7 +480,7 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
 
        if (length <= page_size) {
                iod->first_dma = dma_addr;
-               return total_len;
+               return true;
        }
 
        nprps = DIV_ROUND_UP(length, page_size);
@@ -690,11 +492,11 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
                iod->npages = 1;
        }
 
-       prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+       prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
        if (!prp_list) {
                iod->first_dma = dma_addr;
                iod->npages = -1;
-               return (total_len - length) + page_size;
+               return false;
        }
        list[0] = prp_list;
        iod->first_dma = prp_dma;
@@ -702,9 +504,9 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
        for (;;) {
                if (i == page_size >> 3) {
                        __le64 *old_prp_list = prp_list;
-                       prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+                       prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
                        if (!prp_list)
-                               return total_len - length;
+                               return false;
                        list[iod->npages++] = prp_list;
                        prp_list[0] = old_prp_list[i - 1];
                        old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -724,115 +526,105 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
                dma_len = sg_dma_len(sg);
        }
 
-       return total_len;
+       return true;
 }
 
-static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req,
-               struct nvme_iod *iod)
+static int nvme_map_data(struct nvme_dev *dev, struct request *req,
+               struct nvme_command *cmnd)
 {
-       struct nvme_command cmnd;
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       struct request_queue *q = req->q;
+       enum dma_data_direction dma_dir = rq_data_dir(req) ?
+                       DMA_TO_DEVICE : DMA_FROM_DEVICE;
+       int ret = BLK_MQ_RQ_QUEUE_ERROR;
 
-       memcpy(&cmnd, req->cmd, sizeof(cmnd));
-       cmnd.rw.command_id = req->tag;
-       if (req->nr_phys_segments) {
-               cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
-               cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
-       }
+       sg_init_table(iod->sg, req->nr_phys_segments);
+       iod->nents = blk_rq_map_sg(q, req, iod->sg);
+       if (!iod->nents)
+               goto out;
 
-       __nvme_submit_cmd(nvmeq, &cmnd);
-}
+       ret = BLK_MQ_RQ_QUEUE_BUSY;
+       if (!dma_map_sg(dev->dev, iod->sg, iod->nents, dma_dir))
+               goto out;
 
-/*
- * We reuse the small pool to allocate the 16-byte range here as it is not
- * worth having a special pool for these or additional cases to handle freeing
- * the iod.
- */
-static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
-               struct request *req, struct nvme_iod *iod)
-{
-       struct nvme_dsm_range *range =
-                               (struct nvme_dsm_range *)iod_list(iod)[0];
-       struct nvme_command cmnd;
+       if (!nvme_setup_prps(dev, req, blk_rq_bytes(req)))
+               goto out_unmap;
 
-       range->cattr = cpu_to_le32(0);
-       range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
-       range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+       ret = BLK_MQ_RQ_QUEUE_ERROR;
+       if (blk_integrity_rq(req)) {
+               if (blk_rq_count_integrity_sg(q, req->bio) != 1)
+                       goto out_unmap;
 
-       memset(&cmnd, 0, sizeof(cmnd));
-       cmnd.dsm.opcode = nvme_cmd_dsm;
-       cmnd.dsm.command_id = req->tag;
-       cmnd.dsm.nsid = cpu_to_le32(ns->ns_id);
-       cmnd.dsm.prp1 = cpu_to_le64(iod->first_dma);
-       cmnd.dsm.nr = 0;
-       cmnd.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+               sg_init_table(&iod->meta_sg, 1);
+               if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
+                       goto out_unmap;
 
-       __nvme_submit_cmd(nvmeq, &cmnd);
-}
+               if (rq_data_dir(req))
+                       nvme_dif_remap(req, nvme_dif_prep);
 
-static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
-                                                               int cmdid)
-{
-       struct nvme_command cmnd;
+               if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
+                       goto out_unmap;
+       }
 
-       memset(&cmnd, 0, sizeof(cmnd));
-       cmnd.common.opcode = nvme_cmd_flush;
-       cmnd.common.command_id = cmdid;
-       cmnd.common.nsid = cpu_to_le32(ns->ns_id);
+       cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+       cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
+       if (blk_integrity_rq(req))
+               cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
+       return BLK_MQ_RQ_QUEUE_OK;
 
-       __nvme_submit_cmd(nvmeq, &cmnd);
+out_unmap:
+       dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
+out:
+       return ret;
 }
 
-static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
-                                                       struct nvme_ns *ns)
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
 {
-       struct request *req = iod_get_private(iod);
-       struct nvme_command cmnd;
-       u16 control = 0;
-       u32 dsmgmt = 0;
-
-       if (req->cmd_flags & REQ_FUA)
-               control |= NVME_RW_FUA;
-       if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
-               control |= NVME_RW_LR;
-
-       if (req->cmd_flags & REQ_RAHEAD)
-               dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
-
-       memset(&cmnd, 0, sizeof(cmnd));
-       cmnd.rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
-       cmnd.rw.command_id = req->tag;
-       cmnd.rw.nsid = cpu_to_le32(ns->ns_id);
-       cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
-       cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
-       cmnd.rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
-       cmnd.rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
-
-       if (ns->ms) {
-               switch (ns->pi_type) {
-               case NVME_NS_DPS_PI_TYPE3:
-                       control |= NVME_RW_PRINFO_PRCHK_GUARD;
-                       break;
-               case NVME_NS_DPS_PI_TYPE1:
-               case NVME_NS_DPS_PI_TYPE2:
-                       control |= NVME_RW_PRINFO_PRCHK_GUARD |
-                                       NVME_RW_PRINFO_PRCHK_REF;
-                       cmnd.rw.reftag = cpu_to_le32(
-                                       nvme_block_nr(ns, blk_rq_pos(req)));
-                       break;
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       enum dma_data_direction dma_dir = rq_data_dir(req) ?
+                       DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+       if (iod->nents) {
+               dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
+               if (blk_integrity_rq(req)) {
+                       if (!rq_data_dir(req))
+                               nvme_dif_remap(req, nvme_dif_complete);
+                       dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
                }
-               if (blk_integrity_rq(req))
-                       cmnd.rw.metadata =
-                               cpu_to_le64(sg_dma_address(iod->meta_sg));
-               else
-                       control |= NVME_RW_PRINFO_PRACT;
        }
 
-       cmnd.rw.control = cpu_to_le16(control);
-       cmnd.rw.dsmgmt = cpu_to_le32(dsmgmt);
+       nvme_free_iod(dev, req);
+}
 
-       __nvme_submit_cmd(nvmeq, &cmnd);
+/*
+ * We reuse the small pool to allocate the 16-byte range here as it is not
+ * worth having a special pool for these or additional cases to handle freeing
+ * the iod.
+ */
+static int nvme_setup_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+               struct request *req, struct nvme_command *cmnd)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       struct nvme_dsm_range *range;
 
-       return 0;
+       range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
+                                               &iod->first_dma);
+       if (!range)
+               return BLK_MQ_RQ_QUEUE_BUSY;
+       iod_list(req)[0] = (__le64 *)range;
+       iod->npages = 0;
+
+       range->cattr = cpu_to_le32(0);
+       range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
+       range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+
+       memset(cmnd, 0, sizeof(*cmnd));
+       cmnd->dsm.opcode = nvme_cmd_dsm;
+       cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
+       cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
+       cmnd->dsm.nr = 0;
+       cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+       return BLK_MQ_RQ_QUEUE_OK;
 }
 
 /*
@@ -845,9 +637,8 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_queue *nvmeq = hctx->driver_data;
        struct nvme_dev *dev = nvmeq->dev;
        struct request *req = bd->rq;
-       struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
-       struct nvme_iod *iod;
-       enum dma_data_direction dma_dir;
+       struct nvme_command cmnd;
+       int ret = BLK_MQ_RQ_QUEUE_OK;
 
        /*
         * If formated with metadata, require the block layer provide a buffer
@@ -857,91 +648,72 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (ns && ns->ms && !blk_integrity_rq(req)) {
                if (!(ns->pi_type && ns->ms == 8) &&
                                        req->cmd_type != REQ_TYPE_DRV_PRIV) {
-                       blk_mq_complete_request(req, -EFAULT);
+                       blk_mq_end_request(req, -EFAULT);
                        return BLK_MQ_RQ_QUEUE_OK;
                }
        }
 
-       iod = nvme_alloc_iod(req, dev, GFP_ATOMIC);
-       if (!iod)
-               return BLK_MQ_RQ_QUEUE_BUSY;
+       ret = nvme_init_iod(req, dev);
+       if (ret)
+               return ret;
 
        if (req->cmd_flags & REQ_DISCARD) {
-               void *range;
-               /*
-                * We reuse the small pool to allocate the 16-byte range here
-                * as it is not worth having a special pool for these or
-                * additional cases to handle freeing the iod.
-                */
-               range = dma_pool_alloc(dev->prp_small_pool, GFP_ATOMIC,
-                                               &iod->first_dma);
-               if (!range)
-                       goto retry_cmd;
-               iod_list(iod)[0] = (__le64 *)range;
-               iod->npages = 0;
-       } else if (req->nr_phys_segments) {
-               dma_dir = rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+               ret = nvme_setup_discard(nvmeq, ns, req, &cmnd);
+       } else {
+               if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+                       memcpy(&cmnd, req->cmd, sizeof(cmnd));
+               else if (req->cmd_flags & REQ_FLUSH)
+                       nvme_setup_flush(ns, &cmnd);
+               else
+                       nvme_setup_rw(ns, req, &cmnd);
+
+               if (req->nr_phys_segments)
+                       ret = nvme_map_data(dev, req, &cmnd);
+       }
 
-               sg_init_table(iod->sg, req->nr_phys_segments);
-               iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
-               if (!iod->nents)
-                       goto error_cmd;
+       if (ret)
+               goto out;
 
-               if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir))
-                       goto retry_cmd;
+       cmnd.common.command_id = req->tag;
+       blk_mq_start_request(req);
 
-               if (blk_rq_bytes(req) !=
-                    nvme_setup_prps(dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) {
-                       dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
-                       goto retry_cmd;
-               }
-               if (blk_integrity_rq(req)) {
-                       if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) {
-                               dma_unmap_sg(dev->dev, iod->sg, iod->nents,
-                                               dma_dir);
-                               goto error_cmd;
-                       }
+       spin_lock_irq(&nvmeq->q_lock);
+       __nvme_submit_cmd(nvmeq, &cmnd);
+       nvme_process_cq(nvmeq);
+       spin_unlock_irq(&nvmeq->q_lock);
+       return BLK_MQ_RQ_QUEUE_OK;
+out:
+       nvme_free_iod(dev, req);
+       return ret;
+}
 
-                       sg_init_table(iod->meta_sg, 1);
-                       if (blk_rq_map_integrity_sg(
-                                       req->q, req->bio, iod->meta_sg) != 1) {
-                               dma_unmap_sg(dev->dev, iod->sg, iod->nents,
-                                               dma_dir);
-                               goto error_cmd;
-                       }
+static void nvme_complete_rq(struct request *req)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       struct nvme_dev *dev = iod->nvmeq->dev;
+       int error = 0;
 
-                       if (rq_data_dir(req))
-                               nvme_dif_remap(req, nvme_dif_prep);
+       nvme_unmap_data(dev, req);
 
-                       if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) {
-                               dma_unmap_sg(dev->dev, iod->sg, iod->nents,
-                                               dma_dir);
-                               goto error_cmd;
-                       }
+       if (unlikely(req->errors)) {
+               if (nvme_req_needs_retry(req, req->errors)) {
+                       nvme_requeue_req(req);
+                       return;
                }
-       }
 
-       nvme_set_info(cmd, iod, req_completion);
-       spin_lock_irq(&nvmeq->q_lock);
-       if (req->cmd_type == REQ_TYPE_DRV_PRIV)
-               nvme_submit_priv(nvmeq, req, iod);
-       else if (req->cmd_flags & REQ_DISCARD)
-               nvme_submit_discard(nvmeq, ns, req, iod);
-       else if (req->cmd_flags & REQ_FLUSH)
-               nvme_submit_flush(nvmeq, ns, req->tag);
-       else
-               nvme_submit_iod(nvmeq, iod, ns);
+               if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+                       error = req->errors;
+               else
+                       error = nvme_error_status(req->errors);
+       }
 
-       nvme_process_cq(nvmeq);
-       spin_unlock_irq(&nvmeq->q_lock);
-       return BLK_MQ_RQ_QUEUE_OK;
+       if (unlikely(iod->aborted)) {
+               dev_warn(dev->dev,
+                       "completing aborted command with status: %04x\n",
+                       req->errors);
+       }
 
- error_cmd:
-       nvme_free_iod(dev, iod);
-       return BLK_MQ_RQ_QUEUE_ERROR;
- retry_cmd:
-       nvme_free_iod(dev, iod);
-       return BLK_MQ_RQ_QUEUE_BUSY;
+       blk_mq_end_request(req, error);
 }
 
 static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
@@ -952,20 +724,47 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
        phase = nvmeq->cq_phase;
 
        for (;;) {
-               void *ctx;
-               nvme_completion_fn fn;
                struct nvme_completion cqe = nvmeq->cqes[head];
-               if ((le16_to_cpu(cqe.status) & 1) != phase)
+               u16 status = le16_to_cpu(cqe.status);
+               struct request *req;
+
+               if ((status & 1) != phase)
                        break;
                nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
                if (++head == nvmeq->q_depth) {
                        head = 0;
                        phase = !phase;
                }
+
                if (tag && *tag == cqe.command_id)
                        *tag = -1;
-               ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn);
-               fn(nvmeq, ctx, &cqe);
+
+               if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
+                       dev_warn(nvmeq->q_dmadev,
+                               "invalid id %d completed on queue %d\n",
+                               cqe.command_id, le16_to_cpu(cqe.sq_id));
+                       continue;
+               }
+
+               /*
+                * AEN requests are special as they don't time out and can
+                * survive any kind of queue freeze and often don't respond to
+                * aborts.  We don't even bother to allocate a struct request
+                * for them but rather special case them here.
+                */
+               if (unlikely(nvmeq->qid == 0 &&
+                               cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
+                       nvme_complete_async_event(nvmeq->dev, &cqe);
+                       continue;
+               }
+
+               req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
+               if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
+                       u32 result = le32_to_cpu(cqe.result);
+                       req->special = (void *)(uintptr_t)result;
+               }
+               blk_mq_complete_request(req, status >> 1);
+
        }
 
        /* If the controller ignores the cq head doorbell and continuously
@@ -1028,127 +827,30 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
        return 0;
 }
 
-/*
- * Returns 0 on success.  If the result is negative, it's a Linux error code;
- * if the result is positive, it's an NVM Express status code
- */
-int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-               void *buffer, void __user *ubuffer, unsigned bufflen,
-               u32 *result, unsigned timeout)
+static void nvme_submit_async_event(struct nvme_dev *dev)
 {
-       bool write = cmd->common.opcode & 1;
-       struct bio *bio = NULL;
-       struct request *req;
-       int ret;
-
-       req = blk_mq_alloc_request(q, write, 0);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
+       struct nvme_command c;
 
-       req->cmd_type = REQ_TYPE_DRV_PRIV;
-       req->cmd_flags |= REQ_FAILFAST_DRIVER;
-       req->__data_len = 0;
-       req->__sector = (sector_t) -1;
-       req->bio = req->biotail = NULL;
-
-       req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
-
-       req->cmd = (unsigned char *)cmd;
-       req->cmd_len = sizeof(struct nvme_command);
-       req->special = (void *)0;
-
-       if (buffer && bufflen) {
-               ret = blk_rq_map_kern(q, req, buffer, bufflen,
-                                     __GFP_DIRECT_RECLAIM);
-               if (ret)
-                       goto out;
-       } else if (ubuffer && bufflen) {
-               ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
-                                     __GFP_DIRECT_RECLAIM);
-               if (ret)
-                       goto out;
-               bio = req->bio;
-       }
+       memset(&c, 0, sizeof(c));
+       c.common.opcode = nvme_admin_async_event;
+       c.common.command_id = NVME_AQ_BLKMQ_DEPTH + --dev->ctrl.event_limit;
 
-       blk_execute_rq(req->q, NULL, req, 0);
-       if (bio)
-               blk_rq_unmap_user(bio);
-       if (result)
-               *result = (u32)(uintptr_t)req->special;
-       ret = req->errors;
- out:
-       blk_mq_free_request(req);
-       return ret;
+       __nvme_submit_cmd(dev->queues[0], &c);
 }
 
-int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-               void *buffer, unsigned bufflen)
+static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
 {
-       return __nvme_submit_sync_cmd(q, cmd, buffer, NULL, bufflen, NULL, 0);
+       struct nvme_command c;
+
+       memset(&c, 0, sizeof(c));
+       c.delete_queue.opcode = opcode;
+       c.delete_queue.qid = cpu_to_le16(id);
+
+       return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
 }
 
-static int nvme_submit_async_admin_req(struct nvme_dev *dev)
-{
-       struct nvme_queue *nvmeq = dev->queues[0];
-       struct nvme_command c;
-       struct nvme_cmd_info *cmd_info;
-       struct request *req;
-
-       req = blk_mq_alloc_request(dev->admin_q, WRITE,
-                       BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
-
-       req->cmd_flags |= REQ_NO_TIMEOUT;
-       cmd_info = blk_mq_rq_to_pdu(req);
-       nvme_set_info(cmd_info, NULL, async_req_completion);
-
-       memset(&c, 0, sizeof(c));
-       c.common.opcode = nvme_admin_async_event;
-       c.common.command_id = req->tag;
-
-       blk_mq_free_request(req);
-       __nvme_submit_cmd(nvmeq, &c);
-       return 0;
-}
-
-static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
-                       struct nvme_command *cmd,
-                       struct async_cmd_info *cmdinfo, unsigned timeout)
-{
-       struct nvme_queue *nvmeq = dev->queues[0];
-       struct request *req;
-       struct nvme_cmd_info *cmd_rq;
-
-       req = blk_mq_alloc_request(dev->admin_q, WRITE, 0);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
-
-       req->timeout = timeout;
-       cmd_rq = blk_mq_rq_to_pdu(req);
-       cmdinfo->req = req;
-       nvme_set_info(cmd_rq, cmdinfo, async_completion);
-       cmdinfo->status = -EINTR;
-
-       cmd->common.command_id = req->tag;
-
-       nvme_submit_cmd(nvmeq, cmd);
-       return 0;
-}
-
-static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
-{
-       struct nvme_command c;
-
-       memset(&c, 0, sizeof(c));
-       c.delete_queue.opcode = opcode;
-       c.delete_queue.qid = cpu_to_le16(id);
-
-       return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
-}
-
-static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
-                                               struct nvme_queue *nvmeq)
+static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
+                                               struct nvme_queue *nvmeq)
 {
        struct nvme_command c;
        int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
@@ -1165,7 +867,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
        c.create_cq.cq_flags = cpu_to_le16(flags);
        c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
 
-       return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
+       return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
 }
 
 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
@@ -1186,7 +888,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
        c.create_sq.sq_flags = cpu_to_le16(flags);
        c.create_sq.cqid = cpu_to_le16(qid);
 
-       return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
+       return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
 }
 
 static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
@@ -1199,195 +901,111 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
        return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
 }
 
-int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id)
+static void abort_endio(struct request *req, int error)
 {
-       struct nvme_command c = { };
-       int error;
-
-       /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
-       c.identify.opcode = nvme_admin_identify;
-       c.identify.cns = cpu_to_le32(1);
-
-       *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
-       if (!*id)
-               return -ENOMEM;
-
-       error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
-                       sizeof(struct nvme_id_ctrl));
-       if (error)
-               kfree(*id);
-       return error;
-}
-
-int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
-               struct nvme_id_ns **id)
-{
-       struct nvme_command c = { };
-       int error;
-
-       /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
-       c.identify.opcode = nvme_admin_identify,
-       c.identify.nsid = cpu_to_le32(nsid),
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       struct nvme_queue *nvmeq = iod->nvmeq;
+       u32 result = (u32)(uintptr_t)req->special;
+       u16 status = req->errors;
 
-       *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL);
-       if (!*id)
-               return -ENOMEM;
-
-       error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
-                       sizeof(struct nvme_id_ns));
-       if (error)
-               kfree(*id);
-       return error;
-}
-
-int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
-                                       dma_addr_t dma_addr, u32 *result)
-{
-       struct nvme_command c;
-
-       memset(&c, 0, sizeof(c));
-       c.features.opcode = nvme_admin_get_features;
-       c.features.nsid = cpu_to_le32(nsid);
-       c.features.prp1 = cpu_to_le64(dma_addr);
-       c.features.fid = cpu_to_le32(fid);
-
-       return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, NULL, 0,
-                       result, 0);
-}
-
-int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
-                                       dma_addr_t dma_addr, u32 *result)
-{
-       struct nvme_command c;
-
-       memset(&c, 0, sizeof(c));
-       c.features.opcode = nvme_admin_set_features;
-       c.features.prp1 = cpu_to_le64(dma_addr);
-       c.features.fid = cpu_to_le32(fid);
-       c.features.dword11 = cpu_to_le32(dword11);
-
-       return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, NULL, 0,
-                       result, 0);
-}
-
-int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log)
-{
-       struct nvme_command c = { };
-       int error;
-
-       c.common.opcode = nvme_admin_get_log_page,
-       c.common.nsid = cpu_to_le32(0xFFFFFFFF),
-       c.common.cdw10[0] = cpu_to_le32(
-                       (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) |
-                        NVME_LOG_SMART),
-
-       *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL);
-       if (!*log)
-               return -ENOMEM;
+       dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
+       atomic_inc(&nvmeq->dev->ctrl.abort_limit);
 
-       error = nvme_submit_sync_cmd(dev->admin_q, &c, *log,
-                       sizeof(struct nvme_smart_log));
-       if (error)
-               kfree(*log);
-       return error;
+       blk_mq_free_request(req);
 }
 
-/**
- * nvme_abort_req - Attempt aborting a request
- *
- * Schedule controller reset if the command was already aborted once before and
- * still hasn't been returned to the driver, or if this is the admin queue.
- */
-static void nvme_abort_req(struct request *req)
+static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 {
-       struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
-       struct nvme_queue *nvmeq = cmd_rq->nvmeq;
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       struct nvme_queue *nvmeq = iod->nvmeq;
        struct nvme_dev *dev = nvmeq->dev;
        struct request *abort_req;
-       struct nvme_cmd_info *abort_cmd;
        struct nvme_command cmd;
 
-       if (!nvmeq->qid || cmd_rq->aborted) {
-               spin_lock(&dev_list_lock);
-               if (!__nvme_reset(dev)) {
-                       dev_warn(dev->dev,
-                                "I/O %d QID %d timeout, reset controller\n",
-                                req->tag, nvmeq->qid);
-               }
-               spin_unlock(&dev_list_lock);
-               return;
+       /*
+        * Shutdown immediately if controller times out while starting. The
+        * reset work will see the pci device disabled when it gets the forced
+        * cancellation error. All outstanding requests are completed on
+        * shutdown, so we return BLK_EH_HANDLED.
+        */
+       if (test_bit(NVME_CTRL_RESETTING, &dev->flags)) {
+               dev_warn(dev->dev,
+                        "I/O %d QID %d timeout, disable controller\n",
+                        req->tag, nvmeq->qid);
+               nvme_dev_disable(dev, false);
+               req->errors = NVME_SC_CANCELLED;
+               return BLK_EH_HANDLED;
        }
 
-       if (!dev->abort_limit)
-               return;
+       /*
+        * Shutdown the controller immediately and schedule a reset if the
+        * command was already aborted once before and still hasn't been
+        * returned to the driver, or if this is the admin queue.
+        */
+       if (!nvmeq->qid || iod->aborted) {
+               dev_warn(dev->dev,
+                        "I/O %d QID %d timeout, reset controller\n",
+                        req->tag, nvmeq->qid);
+               nvme_dev_disable(dev, false);
+               queue_work(nvme_workq, &dev->reset_work);
 
-       abort_req = blk_mq_alloc_request(dev->admin_q, WRITE,
-                       BLK_MQ_REQ_NOWAIT);
-       if (IS_ERR(abort_req))
-               return;
+               /*
+                * Mark the request as handled, since the inline shutdown
+                * forces all outstanding requests to complete.
+                */
+               req->errors = NVME_SC_CANCELLED;
+               return BLK_EH_HANDLED;
+       }
 
-       abort_cmd = blk_mq_rq_to_pdu(abort_req);
-       nvme_set_info(abort_cmd, abort_req, abort_completion);
+       iod->aborted = 1;
+
+       if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
+               atomic_inc(&dev->ctrl.abort_limit);
+               return BLK_EH_RESET_TIMER;
+       }
 
        memset(&cmd, 0, sizeof(cmd));
        cmd.abort.opcode = nvme_admin_abort_cmd;
        cmd.abort.cid = req->tag;
        cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
-       cmd.abort.command_id = abort_req->tag;
 
-       --dev->abort_limit;
-       cmd_rq->aborted = 1;
+       dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n",
+                                req->tag, nvmeq->qid);
+
+       abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
+                       BLK_MQ_REQ_NOWAIT);
+       if (IS_ERR(abort_req)) {
+               atomic_inc(&dev->ctrl.abort_limit);
+               return BLK_EH_RESET_TIMER;
+       }
+
+       abort_req->timeout = ADMIN_TIMEOUT;
+       abort_req->end_io_data = NULL;
+       blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
 
-       dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
-                                                       nvmeq->qid);
-       nvme_submit_cmd(dev->queues[0], &cmd);
+       /*
+        * The aborted req will be completed on receiving the abort req.
+        * We enable the timer again. If hit twice, it'll cause a device reset,
+        * as the device then is in a faulty state.
+        */
+       return BLK_EH_RESET_TIMER;
 }
 
 static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
 {
        struct nvme_queue *nvmeq = data;
-       void *ctx;
-       nvme_completion_fn fn;
-       struct nvme_cmd_info *cmd;
-       struct nvme_completion cqe;
+       int status;
 
        if (!blk_mq_request_started(req))
                return;
 
-       cmd = blk_mq_rq_to_pdu(req);
-
-       if (cmd->ctx == CMD_CTX_CANCELLED)
-               return;
+       dev_warn(nvmeq->q_dmadev,
+                "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
 
+       status = NVME_SC_ABORT_REQ;
        if (blk_queue_dying(req->q))
-               cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
-       else
-               cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
-
-
-       dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
-                                               req->tag, nvmeq->qid);
-       ctx = cancel_cmd_info(cmd, &fn);
-       fn(nvmeq, ctx, &cqe);
-}
-
-static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
-{
-       struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
-       struct nvme_queue *nvmeq = cmd->nvmeq;
-
-       dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
-                                                       nvmeq->qid);
-       spin_lock_irq(&nvmeq->q_lock);
-       nvme_abort_req(req);
-       spin_unlock_irq(&nvmeq->q_lock);
-
-       /*
-        * The aborted req will be completed on receiving the abort req.
-        * We enable the timer again. If hit twice, it'll cause a device reset,
-        * as the device then is in a faulty state.
-        */
-       return BLK_EH_RESET_TIMER;
+               status |= NVME_SC_DNR;
+       blk_mq_complete_request(req, status);
 }
 
 static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1430,8 +1048,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
        nvmeq->cq_vector = -1;
        spin_unlock_irq(&nvmeq->q_lock);
 
-       if (!nvmeq->qid && nvmeq->dev->admin_q)
-               blk_mq_freeze_queue_start(nvmeq->dev->admin_q);
+       if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
+               blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
 
        irq_set_affinity_hint(vector, NULL);
        free_irq(vector, nvmeq);
@@ -1447,21 +1065,20 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
        spin_unlock_irq(&nvmeq->q_lock);
 }
 
-static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
 {
-       struct nvme_queue *nvmeq = dev->queues[qid];
+       struct nvme_queue *nvmeq = dev->queues[0];
 
        if (!nvmeq)
                return;
        if (nvme_suspend_queue(nvmeq))
                return;
 
-       /* Don't tell the adapter to delete the admin queue.
-        * Don't tell a removed adapter to delete IO queues. */
-       if (qid && readl(&dev->bar->csts) != -1) {
-               adapter_delete_sq(dev, qid);
-               adapter_delete_cq(dev, qid);
-       }
+       if (shutdown)
+               nvme_shutdown_ctrl(&dev->ctrl);
+       else
+               nvme_disable_ctrl(&dev->ctrl, lo_hi_readq(
+                                               dev->bar + NVME_REG_CAP));
 
        spin_lock_irq(&nvmeq->q_lock);
        nvme_process_cq(nvmeq);
@@ -1472,11 +1089,12 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
                                int entry_size)
 {
        int q_depth = dev->q_depth;
-       unsigned q_size_aligned = roundup(q_depth * entry_size, dev->page_size);
+       unsigned q_size_aligned = roundup(q_depth * entry_size,
+                                         dev->ctrl.page_size);
 
        if (q_size_aligned * nr_io_queues > dev->cmb_size) {
                u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
-               mem_per_q = round_down(mem_per_q, dev->page_size);
+               mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
                q_depth = div_u64(mem_per_q, entry_size);
 
                /*
@@ -1495,8 +1113,8 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
                                int qid, int depth)
 {
        if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
-               unsigned offset = (qid - 1) *
-                                       roundup(SQ_SIZE(depth), dev->page_size);
+               unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
+                                                     dev->ctrl.page_size);
                nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
                nvmeq->sq_cmds_io = dev->cmb + offset;
        } else {
@@ -1527,7 +1145,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
        nvmeq->q_dmadev = dev->dev;
        nvmeq->dev = dev;
        snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
-                       dev->instance, qid);
+                       dev->ctrl.instance, qid);
        spin_lock_init(&nvmeq->q_lock);
        nvmeq->cq_head = 0;
        nvmeq->cq_phase = 1;
@@ -1604,79 +1222,9 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
        return result;
 }
 
-static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
-{
-       unsigned long timeout;
-       u32 bit = enabled ? NVME_CSTS_RDY : 0;
-
-       timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
-
-       while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) {
-               msleep(100);
-               if (fatal_signal_pending(current))
-                       return -EINTR;
-               if (time_after(jiffies, timeout)) {
-                       dev_err(dev->dev,
-                               "Device not ready; aborting %s\n", enabled ?
-                                               "initialisation" : "reset");
-                       return -ENODEV;
-               }
-       }
-
-       return 0;
-}
-
-/*
- * If the device has been passed off to us in an enabled state, just clear
- * the enabled bit.  The spec says we should set the 'shutdown notification
- * bits', but doing so may cause the device to complete commands to the
- * admin queue ... and we don't know what memory that might be pointing at!
- */
-static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
-{
-       dev->ctrl_config &= ~NVME_CC_SHN_MASK;
-       dev->ctrl_config &= ~NVME_CC_ENABLE;
-       writel(dev->ctrl_config, &dev->bar->cc);
-
-       return nvme_wait_ready(dev, cap, false);
-}
-
-static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
-{
-       dev->ctrl_config &= ~NVME_CC_SHN_MASK;
-       dev->ctrl_config |= NVME_CC_ENABLE;
-       writel(dev->ctrl_config, &dev->bar->cc);
-
-       return nvme_wait_ready(dev, cap, true);
-}
-
-static int nvme_shutdown_ctrl(struct nvme_dev *dev)
-{
-       unsigned long timeout;
-
-       dev->ctrl_config &= ~NVME_CC_SHN_MASK;
-       dev->ctrl_config |= NVME_CC_SHN_NORMAL;
-
-       writel(dev->ctrl_config, &dev->bar->cc);
-
-       timeout = SHUTDOWN_TIMEOUT + jiffies;
-       while ((readl(&dev->bar->csts) & NVME_CSTS_SHST_MASK) !=
-                                                       NVME_CSTS_SHST_CMPLT) {
-               msleep(100);
-               if (fatal_signal_pending(current))
-                       return -EINTR;
-               if (time_after(jiffies, timeout)) {
-                       dev_err(dev->dev,
-                               "Device shutdown incomplete; abort shutdown\n");
-                       return -ENODEV;
-               }
-       }
-
-       return 0;
-}
-
 static struct blk_mq_ops nvme_mq_admin_ops = {
        .queue_rq       = nvme_queue_rq,
+       .complete       = nvme_complete_rq,
        .map_queue      = blk_mq_map_queue,
        .init_hctx      = nvme_admin_init_hctx,
        .exit_hctx      = nvme_admin_exit_hctx,
@@ -1686,6 +1234,7 @@ static struct blk_mq_ops nvme_mq_admin_ops = {
 
 static struct blk_mq_ops nvme_mq_ops = {
        .queue_rq       = nvme_queue_rq,
+       .complete       = nvme_complete_rq,
        .map_queue      = blk_mq_map_queue,
        .init_hctx      = nvme_init_hctx,
        .init_request   = nvme_init_request,
@@ -1695,19 +1244,23 @@ static struct blk_mq_ops nvme_mq_ops = {
 
 static void nvme_dev_remove_admin(struct nvme_dev *dev)
 {
-       if (dev->admin_q && !blk_queue_dying(dev->admin_q)) {
-               blk_cleanup_queue(dev->admin_q);
+       if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
+               blk_cleanup_queue(dev->ctrl.admin_q);
                blk_mq_free_tag_set(&dev->admin_tagset);
        }
 }
 
 static int nvme_alloc_admin_tags(struct nvme_dev *dev)
 {
-       if (!dev->admin_q) {
+       if (!dev->ctrl.admin_q) {
                dev->admin_tagset.ops = &nvme_mq_admin_ops;
                dev->admin_tagset.nr_hw_queues = 1;
-               dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
-               dev->admin_tagset.reserved_tags = 1;
+
+               /*
+                * Subtract one to leave an empty queue entry for 'Full Queue'
+                * condition. See NVM-Express 1.2 specification, section 4.1.2.
+                */
+               dev->admin_tagset.queue_depth = NVME_AQ_BLKMQ_DEPTH - 1;
                dev->admin_tagset.timeout = ADMIN_TIMEOUT;
                dev->admin_tagset.numa_node = dev_to_node(dev->dev);
                dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
@@ -1716,18 +1269,18 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
                if (blk_mq_alloc_tag_set(&dev->admin_tagset))
                        return -ENOMEM;
 
-               dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
-               if (IS_ERR(dev->admin_q)) {
+               dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
+               if (IS_ERR(dev->ctrl.admin_q)) {
                        blk_mq_free_tag_set(&dev->admin_tagset);
                        return -ENOMEM;
                }
-               if (!blk_get_queue(dev->admin_q)) {
+               if (!blk_get_queue(dev->ctrl.admin_q)) {
                        nvme_dev_remove_admin(dev);
-                       dev->admin_q = NULL;
+                       dev->ctrl.admin_q = NULL;
                        return -ENODEV;
                }
        } else
-               blk_mq_unfreeze_queue(dev->admin_q);
+               blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
 
        return 0;
 }
@@ -1736,31 +1289,17 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
 {
        int result;
        u32 aqa;
-       u64 cap = lo_hi_readq(&dev->bar->cap);
+       u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
        struct nvme_queue *nvmeq;
-       /*
-        * default to a 4K page size, with the intention to update this
-        * path in the future to accomodate architectures with differing
-        * kernel and IO page sizes.
-        */
-       unsigned page_shift = 12;
-       unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
-
-       if (page_shift < dev_page_min) {
-               dev_err(dev->dev,
-                               "Minimum device page size (%u) too large for "
-                               "host (%u)\n", 1 << dev_page_min,
-                               1 << page_shift);
-               return -ENODEV;
-       }
 
-       dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
+       dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ?
                                                NVME_CAP_NSSRC(cap) : 0;
 
-       if (dev->subsystem && (readl(&dev->bar->csts) & NVME_CSTS_NSSRO))
-               writel(NVME_CSTS_NSSRO, &dev->bar->csts);
+       if (dev->subsystem &&
+           (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
+               writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
 
-       result = nvme_disable_ctrl(dev, cap);
+       result = nvme_disable_ctrl(&dev->ctrl, cap);
        if (result < 0)
                return result;
 
@@ -1774,18 +1313,11 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
        aqa = nvmeq->q_depth - 1;
        aqa |= aqa << 16;
 
-       dev->page_size = 1 << page_shift;
-
-       dev->ctrl_config = NVME_CC_CSS_NVM;
-       dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
-       dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
-       dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
-
-       writel(aqa, &dev->bar->aqa);
-       lo_hi_writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
-       lo_hi_writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
+       writel(aqa, dev->bar + NVME_REG_AQA);
+       lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
+       lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
 
-       result = nvme_enable_ctrl(dev, cap);
+       result = nvme_enable_ctrl(&dev->ctrl, cap);
        if (result)
                goto free_nvmeq;
 
@@ -1803,406 +1335,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
        return result;
 }
 
-static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
-{
-       struct nvme_dev *dev = ns->dev;
-       struct nvme_user_io io;
-       struct nvme_command c;
-       unsigned length, meta_len;
-       int status, write;
-       dma_addr_t meta_dma = 0;
-       void *meta = NULL;
-       void __user *metadata;
-
-       if (copy_from_user(&io, uio, sizeof(io)))
-               return -EFAULT;
-
-       switch (io.opcode) {
-       case nvme_cmd_write:
-       case nvme_cmd_read:
-       case nvme_cmd_compare:
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       length = (io.nblocks + 1) << ns->lba_shift;
-       meta_len = (io.nblocks + 1) * ns->ms;
-       metadata = (void __user *)(uintptr_t)io.metadata;
-       write = io.opcode & 1;
-
-       if (ns->ext) {
-               length += meta_len;
-               meta_len = 0;
-       }
-       if (meta_len) {
-               if (((io.metadata & 3) || !io.metadata) && !ns->ext)
-                       return -EINVAL;
-
-               meta = dma_alloc_coherent(dev->dev, meta_len,
-                                               &meta_dma, GFP_KERNEL);
-
-               if (!meta) {
-                       status = -ENOMEM;
-                       goto unmap;
-               }
-               if (write) {
-                       if (copy_from_user(meta, metadata, meta_len)) {
-                               status = -EFAULT;
-                               goto unmap;
-                       }
-               }
-       }
-
-       memset(&c, 0, sizeof(c));
-       c.rw.opcode = io.opcode;
-       c.rw.flags = io.flags;
-       c.rw.nsid = cpu_to_le32(ns->ns_id);
-       c.rw.slba = cpu_to_le64(io.slba);
-       c.rw.length = cpu_to_le16(io.nblocks);
-       c.rw.control = cpu_to_le16(io.control);
-       c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
-       c.rw.reftag = cpu_to_le32(io.reftag);
-       c.rw.apptag = cpu_to_le16(io.apptag);
-       c.rw.appmask = cpu_to_le16(io.appmask);
-       c.rw.metadata = cpu_to_le64(meta_dma);
-
-       status = __nvme_submit_sync_cmd(ns->queue, &c, NULL,
-                       (void __user *)(uintptr_t)io.addr, length, NULL, 0);
- unmap:
-       if (meta) {
-               if (status == NVME_SC_SUCCESS && !write) {
-                       if (copy_to_user(metadata, meta, meta_len))
-                               status = -EFAULT;
-               }
-               dma_free_coherent(dev->dev, meta_len, meta, meta_dma);
-       }
-       return status;
-}
-
-static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
-                       struct nvme_passthru_cmd __user *ucmd)
-{
-       struct nvme_passthru_cmd cmd;
-       struct nvme_command c;
-       unsigned timeout = 0;
-       int status;
-
-       if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
-       if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
-               return -EFAULT;
-
-       memset(&c, 0, sizeof(c));
-       c.common.opcode = cmd.opcode;
-       c.common.flags = cmd.flags;
-       c.common.nsid = cpu_to_le32(cmd.nsid);
-       c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
-       c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
-       c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
-       c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
-       c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
-       c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
-       c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
-       c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
-
-       if (cmd.timeout_ms)
-               timeout = msecs_to_jiffies(cmd.timeout_ms);
-
-       status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c,
-                       NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
-                       &cmd.result, timeout);
-       if (status >= 0) {
-               if (put_user(cmd.result, &ucmd->result))
-                       return -EFAULT;
-       }
-
-       return status;
-}
-
-static int nvme_subsys_reset(struct nvme_dev *dev)
-{
-       if (!dev->subsystem)
-               return -ENOTTY;
-
-       writel(0x4E564D65, &dev->bar->nssr); /* "NVMe" */
-       return 0;
-}
-
-static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
-                                                       unsigned long arg)
-{
-       struct nvme_ns *ns = bdev->bd_disk->private_data;
-
-       switch (cmd) {
-       case NVME_IOCTL_ID:
-               force_successful_syscall_return();
-               return ns->ns_id;
-       case NVME_IOCTL_ADMIN_CMD:
-               return nvme_user_cmd(ns->dev, NULL, (void __user *)arg);
-       case NVME_IOCTL_IO_CMD:
-               return nvme_user_cmd(ns->dev, ns, (void __user *)arg);
-       case NVME_IOCTL_SUBMIT_IO:
-               return nvme_submit_io(ns, (void __user *)arg);
-       case SG_GET_VERSION_NUM:
-               return nvme_sg_get_version_num((void __user *)arg);
-       case SG_IO:
-               return nvme_sg_io(ns, (void __user *)arg);
-       default:
-               return -ENOTTY;
-       }
-}
-
-#ifdef CONFIG_COMPAT
-static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
-                                       unsigned int cmd, unsigned long arg)
-{
-       switch (cmd) {
-       case SG_IO:
-               return -ENOIOCTLCMD;
-       }
-       return nvme_ioctl(bdev, mode, cmd, arg);
-}
-#else
-#define nvme_compat_ioctl      NULL
-#endif
-
-static void nvme_free_dev(struct kref *kref);
-static void nvme_free_ns(struct kref *kref)
-{
-       struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
-
-       if (ns->type == NVME_NS_LIGHTNVM)
-               nvme_nvm_unregister(ns->queue, ns->disk->disk_name);
-
-       spin_lock(&dev_list_lock);
-       ns->disk->private_data = NULL;
-       spin_unlock(&dev_list_lock);
-
-       kref_put(&ns->dev->kref, nvme_free_dev);
-       put_disk(ns->disk);
-       kfree(ns);
-}
-
-static int nvme_open(struct block_device *bdev, fmode_t mode)
-{
-       int ret = 0;
-       struct nvme_ns *ns;
-
-       spin_lock(&dev_list_lock);
-       ns = bdev->bd_disk->private_data;
-       if (!ns)
-               ret = -ENXIO;
-       else if (!kref_get_unless_zero(&ns->kref))
-               ret = -ENXIO;
-       spin_unlock(&dev_list_lock);
-
-       return ret;
-}
-
-static void nvme_release(struct gendisk *disk, fmode_t mode)
-{
-       struct nvme_ns *ns = disk->private_data;
-       kref_put(&ns->kref, nvme_free_ns);
-}
-
-static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
-{
-       /* some standard values */
-       geo->heads = 1 << 6;
-       geo->sectors = 1 << 5;
-       geo->cylinders = get_capacity(bd->bd_disk) >> 11;
-       return 0;
-}
-
-static void nvme_config_discard(struct nvme_ns *ns)
-{
-       u32 logical_block_size = queue_logical_block_size(ns->queue);
-       ns->queue->limits.discard_zeroes_data = 0;
-       ns->queue->limits.discard_alignment = logical_block_size;
-       ns->queue->limits.discard_granularity = logical_block_size;
-       blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
-       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
-}
-
-static int nvme_revalidate_disk(struct gendisk *disk)
-{
-       struct nvme_ns *ns = disk->private_data;
-       struct nvme_dev *dev = ns->dev;
-       struct nvme_id_ns *id;
-       u8 lbaf, pi_type;
-       u16 old_ms;
-       unsigned short bs;
-
-       if (nvme_identify_ns(dev, ns->ns_id, &id)) {
-               dev_warn(dev->dev, "%s: Identify failure nvme%dn%d\n", __func__,
-                                               dev->instance, ns->ns_id);
-               return -ENODEV;
-       }
-       if (id->ncap == 0) {
-               kfree(id);
-               return -ENODEV;
-       }
-
-       if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
-               if (nvme_nvm_register(ns->queue, disk->disk_name)) {
-                       dev_warn(dev->dev,
-                               "%s: LightNVM init failure\n", __func__);
-                       kfree(id);
-                       return -ENODEV;
-               }
-               ns->type = NVME_NS_LIGHTNVM;
-       }
-
-       old_ms = ns->ms;
-       lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
-       ns->lba_shift = id->lbaf[lbaf].ds;
-       ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
-       ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
-
-       /*
-        * If identify namespace failed, use default 512 byte block size so
-        * block layer can use before failing read/write for 0 capacity.
-        */
-       if (ns->lba_shift == 0)
-               ns->lba_shift = 9;
-       bs = 1 << ns->lba_shift;
-
-       /* XXX: PI implementation requires metadata equal t10 pi tuple size */
-       pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
-                                       id->dps & NVME_NS_DPS_PI_MASK : 0;
-
-       blk_mq_freeze_queue(disk->queue);
-       if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
-                               ns->ms != old_ms ||
-                               bs != queue_logical_block_size(disk->queue) ||
-                               (ns->ms && ns->ext)))
-               blk_integrity_unregister(disk);
-
-       ns->pi_type = pi_type;
-       blk_queue_logical_block_size(ns->queue, bs);
-
-       if (ns->ms && !ns->ext)
-               nvme_init_integrity(ns);
-
-       if ((ns->ms && !(ns->ms == 8 && ns->pi_type) &&
-                                               !blk_get_integrity(disk)) ||
-                                               ns->type == NVME_NS_LIGHTNVM)
-               set_capacity(disk, 0);
-       else
-               set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
-
-       if (dev->oncs & NVME_CTRL_ONCS_DSM)
-               nvme_config_discard(ns);
-       blk_mq_unfreeze_queue(disk->queue);
-
-       kfree(id);
-       return 0;
-}
-
-static char nvme_pr_type(enum pr_type type)
-{
-       switch (type) {
-       case PR_WRITE_EXCLUSIVE:
-               return 1;
-       case PR_EXCLUSIVE_ACCESS:
-               return 2;
-       case PR_WRITE_EXCLUSIVE_REG_ONLY:
-               return 3;
-       case PR_EXCLUSIVE_ACCESS_REG_ONLY:
-               return 4;
-       case PR_WRITE_EXCLUSIVE_ALL_REGS:
-               return 5;
-       case PR_EXCLUSIVE_ACCESS_ALL_REGS:
-               return 6;
-       default:
-               return 0;
-       }
-};
-
-static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
-                               u64 key, u64 sa_key, u8 op)
-{
-       struct nvme_ns *ns = bdev->bd_disk->private_data;
-       struct nvme_command c;
-       u8 data[16] = { 0, };
-
-       put_unaligned_le64(key, &data[0]);
-       put_unaligned_le64(sa_key, &data[8]);
-
-       memset(&c, 0, sizeof(c));
-       c.common.opcode = op;
-       c.common.nsid = cpu_to_le32(ns->ns_id);
-       c.common.cdw10[0] = cpu_to_le32(cdw10);
-
-       return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
-}
-
-static int nvme_pr_register(struct block_device *bdev, u64 old,
-               u64 new, unsigned flags)
-{
-       u32 cdw10;
-
-       if (flags & ~PR_FL_IGNORE_KEY)
-               return -EOPNOTSUPP;
-
-       cdw10 = old ? 2 : 0;
-       cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
-       cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
-       return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
-}
-
-static int nvme_pr_reserve(struct block_device *bdev, u64 key,
-               enum pr_type type, unsigned flags)
-{
-       u32 cdw10;
-
-       if (flags & ~PR_FL_IGNORE_KEY)
-               return -EOPNOTSUPP;
-
-       cdw10 = nvme_pr_type(type) << 8;
-       cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
-       return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
-}
-
-static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
-               enum pr_type type, bool abort)
-{
-       u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
-       return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
-}
-
-static int nvme_pr_clear(struct block_device *bdev, u64 key)
-{
-       u32 cdw10 = 1 | (key ? 1 << 3 : 0);
-       return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
-}
-
-static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
-{
-       u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
-       return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
-}
-
-static const struct pr_ops nvme_pr_ops = {
-       .pr_register    = nvme_pr_register,
-       .pr_reserve     = nvme_pr_reserve,
-       .pr_release     = nvme_pr_release,
-       .pr_preempt     = nvme_pr_preempt,
-       .pr_clear       = nvme_pr_clear,
-};
-
-static const struct block_device_operations nvme_fops = {
-       .owner          = THIS_MODULE,
-       .ioctl          = nvme_ioctl,
-       .compat_ioctl   = nvme_compat_ioctl,
-       .open           = nvme_open,
-       .release        = nvme_release,
-       .getgeo         = nvme_getgeo,
-       .revalidate_disk= nvme_revalidate_disk,
-       .pr_ops         = &nvme_pr_ops,
-};
-
 static int nvme_kthread(void *data)
 {
        struct nvme_dev *dev, *next;
@@ -2212,14 +1344,20 @@ static int nvme_kthread(void *data)
                spin_lock(&dev_list_lock);
                list_for_each_entry_safe(dev, next, &dev_list, node) {
                        int i;
-                       u32 csts = readl(&dev->bar->csts);
+                       u32 csts = readl(dev->bar + NVME_REG_CSTS);
+
+                       /*
+                        * Skip controllers currently under reset.
+                        */
+                       if (work_pending(&dev->reset_work) || work_busy(&dev->reset_work))
+                               continue;
 
                        if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
                                                        csts & NVME_CSTS_CFS) {
-                               if (!__nvme_reset(dev)) {
+                               if (queue_work(nvme_workq, &dev->reset_work)) {
                                        dev_warn(dev->dev,
                                                "Failed status: %x, reset controller\n",
-                                               readl(&dev->bar->csts));
+                                               readl(dev->bar + NVME_REG_CSTS));
                                }
                                continue;
                        }
@@ -2230,11 +1368,8 @@ static int nvme_kthread(void *data)
                                spin_lock_irq(&nvmeq->q_lock);
                                nvme_process_cq(nvmeq);
 
-                               while ((i == 0) && (dev->event_limit > 0)) {
-                                       if (nvme_submit_async_admin_req(dev))
-                                               break;
-                                       dev->event_limit--;
-                               }
+                               while (i == 0 && dev->ctrl.event_limit > 0)
+                                       nvme_submit_async_event(dev);
                                spin_unlock_irq(&nvmeq->q_lock);
                        }
                }
@@ -2244,127 +1379,33 @@ static int nvme_kthread(void *data)
        return 0;
 }
 
-static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
-{
-       struct nvme_ns *ns;
-       struct gendisk *disk;
-       int node = dev_to_node(dev->dev);
-
-       ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
-       if (!ns)
-               return;
-
-       ns->queue = blk_mq_init_queue(&dev->tagset);
-       if (IS_ERR(ns->queue))
-               goto out_free_ns;
-       queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
-       ns->dev = dev;
-       ns->queue->queuedata = ns;
-
-       disk = alloc_disk_node(0, node);
-       if (!disk)
-               goto out_free_queue;
-
-       kref_init(&ns->kref);
-       ns->ns_id = nsid;
-       ns->disk = disk;
-       ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
-       list_add_tail(&ns->list, &dev->namespaces);
-
-       blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
-       if (dev->max_hw_sectors) {
-               blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
-               blk_queue_max_segments(ns->queue,
-                       (dev->max_hw_sectors / (dev->page_size >> 9)) + 1);
-       }
-       if (dev->stripe_size)
-               blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
-       if (dev->vwc & NVME_CTRL_VWC_PRESENT)
-               blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
-       blk_queue_virt_boundary(ns->queue, dev->page_size - 1);
-
-       disk->major = nvme_major;
-       disk->first_minor = 0;
-       disk->fops = &nvme_fops;
-       disk->private_data = ns;
-       disk->queue = ns->queue;
-       disk->driverfs_dev = dev->device;
-       disk->flags = GENHD_FL_EXT_DEVT;
-       sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
-
-       /*
-        * Initialize capacity to 0 until we establish the namespace format and
-        * setup integrity extentions if necessary. The revalidate_disk after
-        * add_disk allows the driver to register with integrity if the format
-        * requires it.
-        */
-       set_capacity(disk, 0);
-       if (nvme_revalidate_disk(ns->disk))
-               goto out_free_disk;
-
-       kref_get(&dev->kref);
-       if (ns->type != NVME_NS_LIGHTNVM) {
-               add_disk(ns->disk);
-               if (ns->ms) {
-                       struct block_device *bd = bdget_disk(ns->disk, 0);
-                       if (!bd)
-                               return;
-                       if (blkdev_get(bd, FMODE_READ, NULL)) {
-                               bdput(bd);
-                               return;
-                       }
-                       blkdev_reread_part(bd);
-                       blkdev_put(bd, FMODE_READ);
-               }
-       }
-       return;
- out_free_disk:
-       kfree(disk);
-       list_del(&ns->list);
- out_free_queue:
-       blk_cleanup_queue(ns->queue);
- out_free_ns:
-       kfree(ns);
-}
-
-/*
- * Create I/O queues.  Failing to create an I/O queue is not an issue,
- * we can continue with less than the desired amount of queues, and
- * even a controller without I/O queues an still be used to issue
- * admin commands.  This might be useful to upgrade a buggy firmware
- * for example.
- */
-static void nvme_create_io_queues(struct nvme_dev *dev)
+static int nvme_create_io_queues(struct nvme_dev *dev)
 {
        unsigned i;
+       int ret = 0;
 
-       for (i = dev->queue_count; i <= dev->max_qid; i++)
-               if (!nvme_alloc_queue(dev, i, dev->q_depth))
+       for (i = dev->queue_count; i <= dev->max_qid; i++) {
+               if (!nvme_alloc_queue(dev, i, dev->q_depth)) {
+                       ret = -ENOMEM;
                        break;
+               }
+       }
 
-       for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
-               if (nvme_create_queue(dev->queues[i], i)) {
+       for (i = dev->online_queues; i <= dev->queue_count - 1; i++) {
+               ret = nvme_create_queue(dev->queues[i], i);
+               if (ret) {
                        nvme_free_queues(dev, i);
                        break;
                }
-}
-
-static int set_queue_count(struct nvme_dev *dev, int count)
-{
-       int status;
-       u32 result;
-       u32 q_count = (count - 1) | ((count - 1) << 16);
-
-       status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
-                                                               &result);
-       if (status < 0)
-               return status;
-       if (status > 0) {
-               dev_err(dev->dev, "Could not set queue count (%d)\n", status);
-               return 0;
        }
-       return min(result & 0xffff, result >> 16) + 1;
+
+       /*
+        * Ignore failing Create SQ/CQ commands, we can continue with less
+        * than the desired aount of queues, and even a controller without
+        * I/O queues an still be used to issue admin commands.  This might
+        * be useful to upgrade a buggy firmware for example.
+        */
+       return ret >= 0 ? 0 : ret;
 }
 
 static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
@@ -2379,11 +1420,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
        if (!use_cmb_sqes)
                return NULL;
 
-       dev->cmbsz = readl(&dev->bar->cmbsz);
+       dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
        if (!(NVME_CMB_SZ(dev->cmbsz)))
                return NULL;
 
-       cmbloc = readl(&dev->bar->cmbloc);
+       cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
 
        szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
        size = szu * NVME_CMB_SZ(dev->cmbsz);
@@ -2431,11 +1472,20 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        int result, i, vecs, nr_io_queues, size;
 
        nr_io_queues = num_possible_cpus();
-       result = set_queue_count(dev, nr_io_queues);
-       if (result <= 0)
+       result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
+       if (result < 0)
                return result;
-       if (result < nr_io_queues)
-               nr_io_queues = result;
+
+       /*
+        * Degraded controllers might return an error when setting the queue
+        * count.  We still want to be able to bring them online and offer
+        * access to the admin queue, as that might be only way to fix them up.
+        */
+       if (result > 0) {
+               dev_err(dev->dev, "Could not set queue count (%d)\n", result);
+               nr_io_queues = 0;
+               result = 0;
+       }
 
        if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
                result = nvme_cmb_qdepth(dev, nr_io_queues,
@@ -2457,7 +1507,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
                                return -ENOMEM;
                        size = db_bar_size(dev, nr_io_queues);
                } while (1);
-               dev->dbs = ((void __iomem *)dev->bar) + 4096;
+               dev->dbs = dev->bar + 4096;
                adminq->q_db = dev->dbs;
        }
 
@@ -2501,115 +1551,115 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
 
        /* Free previously allocated queues that are no longer usable */
        nvme_free_queues(dev, nr_io_queues + 1);
-       nvme_create_io_queues(dev);
-
-       return 0;
+       return nvme_create_io_queues(dev);
 
  free_queues:
        nvme_free_queues(dev, 1);
        return result;
 }
 
-static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
+static void nvme_set_irq_hints(struct nvme_dev *dev)
 {
-       struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
-       struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
+       struct nvme_queue *nvmeq;
+       int i;
 
-       return nsa->ns_id - nsb->ns_id;
-}
+       for (i = 0; i < dev->online_queues; i++) {
+               nvmeq = dev->queues[i];
 
-static struct nvme_ns *nvme_find_ns(struct nvme_dev *dev, unsigned nsid)
-{
-       struct nvme_ns *ns;
+               if (!nvmeq->tags || !(*nvmeq->tags))
+                       continue;
 
-       list_for_each_entry(ns, &dev->namespaces, list) {
-               if (ns->ns_id == nsid)
-                       return ns;
-               if (ns->ns_id > nsid)
-                       break;
+               irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+                                       blk_mq_tags_cpumask(*nvmeq->tags));
        }
-       return NULL;
 }
 
-static inline bool nvme_io_incapable(struct nvme_dev *dev)
+static void nvme_dev_scan(struct work_struct *work)
 {
-       return (!dev->bar || readl(&dev->bar->csts) & NVME_CSTS_CFS ||
-                                                       dev->online_queues < 2);
+       struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
+
+       if (!dev->tagset.tags)
+               return;
+       nvme_scan_namespaces(&dev->ctrl);
+       nvme_set_irq_hints(dev);
 }
 
-static void nvme_ns_remove(struct nvme_ns *ns)
+static void nvme_del_queue_end(struct request *req, int error)
 {
-       bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue);
-
-       if (kill) {
-               blk_set_queue_dying(ns->queue);
+       struct nvme_queue *nvmeq = req->end_io_data;
 
-               /*
-                * The controller was shutdown first if we got here through
-                * device removal. The shutdown may requeue outstanding
-                * requests. These need to be aborted immediately so
-                * del_gendisk doesn't block indefinitely for their completion.
-                */
-               blk_mq_abort_requeue_list(ns->queue);
-       }
-       if (ns->disk->flags & GENHD_FL_UP)
-               del_gendisk(ns->disk);
-       if (kill || !blk_queue_dying(ns->queue)) {
-               blk_mq_abort_requeue_list(ns->queue);
-               blk_cleanup_queue(ns->queue);
-       }
-       list_del_init(&ns->list);
-       kref_put(&ns->kref, nvme_free_ns);
+       blk_mq_free_request(req);
+       complete(&nvmeq->dev->ioq_wait);
 }
 
-static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
+static void nvme_del_cq_end(struct request *req, int error)
 {
-       struct nvme_ns *ns, *next;
-       unsigned i;
+       struct nvme_queue *nvmeq = req->end_io_data;
 
-       for (i = 1; i <= nn; i++) {
-               ns = nvme_find_ns(dev, i);
-               if (ns) {
-                       if (revalidate_disk(ns->disk))
-                               nvme_ns_remove(ns);
-               } else
-                       nvme_alloc_ns(dev, i);
-       }
-       list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
-               if (ns->ns_id > nn)
-                       nvme_ns_remove(ns);
+       if (!error) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&nvmeq->q_lock, flags);
+               nvme_process_cq(nvmeq);
+               spin_unlock_irqrestore(&nvmeq->q_lock, flags);
        }
-       list_sort(NULL, &dev->namespaces, ns_cmp);
+
+       nvme_del_queue_end(req, error);
 }
 
-static void nvme_set_irq_hints(struct nvme_dev *dev)
+static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
 {
-       struct nvme_queue *nvmeq;
-       int i;
+       struct request_queue *q = nvmeq->dev->ctrl.admin_q;
+       struct request *req;
+       struct nvme_command cmd;
 
-       for (i = 0; i < dev->online_queues; i++) {
-               nvmeq = dev->queues[i];
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.delete_queue.opcode = opcode;
+       cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
 
-               if (!nvmeq->tags || !(*nvmeq->tags))
-                       continue;
+       req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
 
-               irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
-                                       blk_mq_tags_cpumask(*nvmeq->tags));
-       }
+       req->timeout = ADMIN_TIMEOUT;
+       req->end_io_data = nvmeq;
+
+       blk_execute_rq_nowait(q, NULL, req, false,
+                       opcode == nvme_admin_delete_cq ?
+                               nvme_del_cq_end : nvme_del_queue_end);
+       return 0;
 }
 
-static void nvme_dev_scan(struct work_struct *work)
+static void nvme_disable_io_queues(struct nvme_dev *dev)
 {
-       struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
-       struct nvme_id_ctrl *ctrl;
+       int pass;
+       unsigned long timeout;
+       u8 opcode = nvme_admin_delete_sq;
 
-       if (!dev->tagset.tags)
-               return;
-       if (nvme_identify_ctrl(dev, &ctrl))
-               return;
-       nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
-       kfree(ctrl);
-       nvme_set_irq_hints(dev);
+       for (pass = 0; pass < 2; pass++) {
+               int sent = 0, i = dev->queue_count - 1;
+
+               reinit_completion(&dev->ioq_wait);
+ retry:
+               timeout = ADMIN_TIMEOUT;
+               for (; i > 0; i--) {
+                       struct nvme_queue *nvmeq = dev->queues[i];
+
+                       if (!pass)
+                               nvme_suspend_queue(nvmeq);
+                       if (nvme_delete_queue(nvmeq, opcode))
+                               break;
+                       ++sent;
+               }
+               while (sent--) {
+                       timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
+                       if (timeout == 0)
+                               return;
+                       if (i)
+                               goto retry;
+               }
+               opcode = nvme_admin_delete_cq;
+       }
 }
 
 /*
@@ -2620,42 +1670,7 @@ static void nvme_dev_scan(struct work_struct *work)
  */
 static int nvme_dev_add(struct nvme_dev *dev)
 {
-       struct pci_dev *pdev = to_pci_dev(dev->dev);
-       int res;
-       struct nvme_id_ctrl *ctrl;
-       int shift = NVME_CAP_MPSMIN(lo_hi_readq(&dev->bar->cap)) + 12;
-
-       res = nvme_identify_ctrl(dev, &ctrl);
-       if (res) {
-               dev_err(dev->dev, "Identify Controller failed (%d)\n", res);
-               return -EIO;
-       }
-
-       dev->oncs = le16_to_cpup(&ctrl->oncs);
-       dev->abort_limit = ctrl->acl + 1;
-       dev->vwc = ctrl->vwc;
-       memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
-       memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
-       memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
-       if (ctrl->mdts)
-               dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
-       else
-               dev->max_hw_sectors = UINT_MAX;
-       if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
-                       (pdev->device == 0x0953) && ctrl->vs[3]) {
-               unsigned int max_hw_sectors;
-
-               dev->stripe_size = 1 << (ctrl->vs[3] + shift);
-               max_hw_sectors = dev->stripe_size >> (shift - 9);
-               if (dev->max_hw_sectors) {
-                       dev->max_hw_sectors = min(max_hw_sectors,
-                                                       dev->max_hw_sectors);
-               } else
-                       dev->max_hw_sectors = max_hw_sectors;
-       }
-       kfree(ctrl);
-
-       if (!dev->tagset.tags) {
+       if (!dev->ctrl.tagset) {
                dev->tagset.ops = &nvme_mq_ops;
                dev->tagset.nr_hw_queues = dev->online_queues - 1;
                dev->tagset.timeout = NVME_IO_TIMEOUT;
@@ -2668,8 +1683,9 @@ static int nvme_dev_add(struct nvme_dev *dev)
 
                if (blk_mq_alloc_tag_set(&dev->tagset))
                        return 0;
+               dev->ctrl.tagset = &dev->tagset;
        }
-       schedule_work(&dev->scan_work);
+       queue_work(nvme_workq, &dev->scan_work);
        return 0;
 }
 
@@ -2699,7 +1715,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
        if (!dev->bar)
                goto disable;
 
-       if (readl(&dev->bar->csts) == -1) {
+       if (readl(dev->bar + NVME_REG_CSTS) == -1) {
                result = -ENODEV;
                goto unmap;
        }
@@ -2714,10 +1730,11 @@ static int nvme_dev_map(struct nvme_dev *dev)
                        goto unmap;
        }
 
-       cap = lo_hi_readq(&dev->bar->cap);
+       cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
+
        dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
        dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
-       dev->dbs = ((void __iomem *)dev->bar) + 4096;
+       dev->dbs = dev->bar + 4096;
 
        /*
         * Temporary fix for the Apple controller found in the MacBook8,1 and
@@ -2730,9 +1747,11 @@ static int nvme_dev_map(struct nvme_dev *dev)
                        dev->q_depth);
        }
 
-       if (readl(&dev->bar->vs) >= NVME_VS(1, 2))
+       if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2))
                dev->cmb = nvme_map_cmb(dev);
 
+       pci_enable_pcie_error_reporting(pdev);
+       pci_save_state(pdev);
        return 0;
 
  unmap:
@@ -2760,152 +1779,34 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
                pci_release_regions(pdev);
        }
 
-       if (pci_is_enabled(pdev))
+       if (pci_is_enabled(pdev)) {
+               pci_disable_pcie_error_reporting(pdev);
                pci_disable_device(pdev);
-}
-
-struct nvme_delq_ctx {
-       struct task_struct *waiter;
-       struct kthread_worker *worker;
-       atomic_t refcount;
-};
-
-static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
-{
-       dq->waiter = current;
-       mb();
-
-       for (;;) {
-               set_current_state(TASK_KILLABLE);
-               if (!atomic_read(&dq->refcount))
-                       break;
-               if (!schedule_timeout(ADMIN_TIMEOUT) ||
-                                       fatal_signal_pending(current)) {
-                       /*
-                        * Disable the controller first since we can't trust it
-                        * at this point, but leave the admin queue enabled
-                        * until all queue deletion requests are flushed.
-                        * FIXME: This may take a while if there are more h/w
-                        * queues than admin tags.
-                        */
-                       set_current_state(TASK_RUNNING);
-                       nvme_disable_ctrl(dev, lo_hi_readq(&dev->bar->cap));
-                       nvme_clear_queue(dev->queues[0]);
-                       flush_kthread_worker(dq->worker);
-                       nvme_disable_queue(dev, 0);
-                       return;
-               }
        }
-       set_current_state(TASK_RUNNING);
-}
-
-static void nvme_put_dq(struct nvme_delq_ctx *dq)
-{
-       atomic_dec(&dq->refcount);
-       if (dq->waiter)
-               wake_up_process(dq->waiter);
-}
-
-static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
-{
-       atomic_inc(&dq->refcount);
-       return dq;
-}
-
-static void nvme_del_queue_end(struct nvme_queue *nvmeq)
-{
-       struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
-       nvme_put_dq(dq);
-
-       spin_lock_irq(&nvmeq->q_lock);
-       nvme_process_cq(nvmeq);
-       spin_unlock_irq(&nvmeq->q_lock);
-}
-
-static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
-                                               kthread_work_func_t fn)
-{
-       struct nvme_command c;
-
-       memset(&c, 0, sizeof(c));
-       c.delete_queue.opcode = opcode;
-       c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
-
-       init_kthread_work(&nvmeq->cmdinfo.work, fn);
-       return nvme_submit_admin_async_cmd(nvmeq->dev, &c, &nvmeq->cmdinfo,
-                                                               ADMIN_TIMEOUT);
-}
-
-static void nvme_del_cq_work_handler(struct kthread_work *work)
-{
-       struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
-                                                       cmdinfo.work);
-       nvme_del_queue_end(nvmeq);
-}
-
-static int nvme_delete_cq(struct nvme_queue *nvmeq)
-{
-       return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
-                                               nvme_del_cq_work_handler);
 }
 
-static void nvme_del_sq_work_handler(struct kthread_work *work)
+static int nvme_dev_list_add(struct nvme_dev *dev)
 {
-       struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
-                                                       cmdinfo.work);
-       int status = nvmeq->cmdinfo.status;
-
-       if (!status)
-               status = nvme_delete_cq(nvmeq);
-       if (status)
-               nvme_del_queue_end(nvmeq);
-}
-
-static int nvme_delete_sq(struct nvme_queue *nvmeq)
-{
-       return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
-                                               nvme_del_sq_work_handler);
-}
-
-static void nvme_del_queue_start(struct kthread_work *work)
-{
-       struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
-                                                       cmdinfo.work);
-       if (nvme_delete_sq(nvmeq))
-               nvme_del_queue_end(nvmeq);
-}
+       bool start_thread = false;
 
-static void nvme_disable_io_queues(struct nvme_dev *dev)
-{
-       int i;
-       DEFINE_KTHREAD_WORKER_ONSTACK(worker);
-       struct nvme_delq_ctx dq;
-       struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
-                                       &worker, "nvme%d", dev->instance);
-
-       if (IS_ERR(kworker_task)) {
-               dev_err(dev->dev,
-                       "Failed to create queue del task\n");
-               for (i = dev->queue_count - 1; i > 0; i--)
-                       nvme_disable_queue(dev, i);
-               return;
+       spin_lock(&dev_list_lock);
+       if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
+               start_thread = true;
+               nvme_thread = NULL;
        }
+       list_add(&dev->node, &dev_list);
+       spin_unlock(&dev_list_lock);
 
-       dq.waiter = NULL;
-       atomic_set(&dq.refcount, 0);
-       dq.worker = &worker;
-       for (i = dev->queue_count - 1; i > 0; i--) {
-               struct nvme_queue *nvmeq = dev->queues[i];
+       if (start_thread) {
+               nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+               wake_up_all(&nvme_kthread_wait);
+       } else
+               wait_event_killable(nvme_kthread_wait, nvme_thread);
 
-               if (nvme_suspend_queue(nvmeq))
-                       continue;
-               nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
-               nvmeq->cmdinfo.worker = dq.worker;
-               init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
-               queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
-       }
-       nvme_wait_dq(&dq, dev);
-       kthread_stop(kworker_task);
+       if (IS_ERR_OR_NULL(nvme_thread))
+               return nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
+
+       return 0;
 }
 
 /*
@@ -2928,44 +1829,17 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
                kthread_stop(tmp);
 }
 
-static void nvme_freeze_queues(struct nvme_dev *dev)
-{
-       struct nvme_ns *ns;
-
-       list_for_each_entry(ns, &dev->namespaces, list) {
-               blk_mq_freeze_queue_start(ns->queue);
-
-               spin_lock_irq(ns->queue->queue_lock);
-               queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
-               spin_unlock_irq(ns->queue->queue_lock);
-
-               blk_mq_cancel_requeue_work(ns->queue);
-               blk_mq_stop_hw_queues(ns->queue);
-       }
-}
-
-static void nvme_unfreeze_queues(struct nvme_dev *dev)
-{
-       struct nvme_ns *ns;
-
-       list_for_each_entry(ns, &dev->namespaces, list) {
-               queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
-               blk_mq_unfreeze_queue(ns->queue);
-               blk_mq_start_stopped_hw_queues(ns->queue, true);
-               blk_mq_kick_requeue_list(ns->queue);
-       }
-}
-
-static void nvme_dev_shutdown(struct nvme_dev *dev)
+static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
 {
        int i;
        u32 csts = -1;
 
        nvme_dev_list_remove(dev);
 
+       mutex_lock(&dev->shutdown_lock);
        if (dev->bar) {
-               nvme_freeze_queues(dev);
-               csts = readl(&dev->bar->csts);
+               nvme_stop_queues(&dev->ctrl);
+               csts = readl(dev->bar + NVME_REG_CSTS);
        }
        if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
                for (i = dev->queue_count - 1; i >= 0; i--) {
@@ -2974,30 +1848,13 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
                }
        } else {
                nvme_disable_io_queues(dev);
-               nvme_shutdown_ctrl(dev);
-               nvme_disable_queue(dev, 0);
+               nvme_disable_admin_queue(dev, shutdown);
        }
        nvme_dev_unmap(dev);
 
        for (i = dev->queue_count - 1; i >= 0; i--)
                nvme_clear_queue(dev->queues[i]);
-}
-
-static void nvme_dev_remove(struct nvme_dev *dev)
-{
-       struct nvme_ns *ns, *next;
-
-       if (nvme_io_incapable(dev)) {
-               /*
-                * If the device is not capable of IO (surprise hot-removal,
-                * for example), we need to quiesce prior to deleting the
-                * namespaces. This will end outstanding requests and prevent
-                * attempts to sync dirty data.
-                */
-               nvme_dev_shutdown(dev);
-       }
-       list_for_each_entry_safe(ns, next, &dev->namespaces, list)
-               nvme_ns_remove(ns);
+       mutex_unlock(&dev->shutdown_lock);
 }
 
 static int nvme_setup_prp_pools(struct nvme_dev *dev)
@@ -3023,119 +1880,36 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
        dma_pool_destroy(dev->prp_small_pool);
 }
 
-static DEFINE_IDA(nvme_instance_ida);
-
-static int nvme_set_instance(struct nvme_dev *dev)
-{
-       int instance, error;
-
-       do {
-               if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
-                       return -ENODEV;
-
-               spin_lock(&dev_list_lock);
-               error = ida_get_new(&nvme_instance_ida, &instance);
-               spin_unlock(&dev_list_lock);
-       } while (error == -EAGAIN);
-
-       if (error)
-               return -ENODEV;
-
-       dev->instance = instance;
-       return 0;
-}
-
-static void nvme_release_instance(struct nvme_dev *dev)
-{
-       spin_lock(&dev_list_lock);
-       ida_remove(&nvme_instance_ida, dev->instance);
-       spin_unlock(&dev_list_lock);
-}
-
-static void nvme_free_dev(struct kref *kref)
+static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
 {
-       struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
+       struct nvme_dev *dev = to_nvme_dev(ctrl);
 
        put_device(dev->dev);
-       put_device(dev->device);
-       nvme_release_instance(dev);
        if (dev->tagset.tags)
                blk_mq_free_tag_set(&dev->tagset);
-       if (dev->admin_q)
-               blk_put_queue(dev->admin_q);
+       if (dev->ctrl.admin_q)
+               blk_put_queue(dev->ctrl.admin_q);
        kfree(dev->queues);
        kfree(dev->entry);
        kfree(dev);
 }
 
-static int nvme_dev_open(struct inode *inode, struct file *f)
-{
-       struct nvme_dev *dev;
-       int instance = iminor(inode);
-       int ret = -ENODEV;
-
-       spin_lock(&dev_list_lock);
-       list_for_each_entry(dev, &dev_list, node) {
-               if (dev->instance == instance) {
-                       if (!dev->admin_q) {
-                               ret = -EWOULDBLOCK;
-                               break;
-                       }
-                       if (!kref_get_unless_zero(&dev->kref))
-                               break;
-                       f->private_data = dev;
-                       ret = 0;
-                       break;
-               }
-       }
-       spin_unlock(&dev_list_lock);
-
-       return ret;
-}
-
-static int nvme_dev_release(struct inode *inode, struct file *f)
+static void nvme_reset_work(struct work_struct *work)
 {
-       struct nvme_dev *dev = f->private_data;
-       kref_put(&dev->kref, nvme_free_dev);
-       return 0;
-}
+       struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
+       int result;
 
-static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
-       struct nvme_dev *dev = f->private_data;
-       struct nvme_ns *ns;
-
-       switch (cmd) {
-       case NVME_IOCTL_ADMIN_CMD:
-               return nvme_user_cmd(dev, NULL, (void __user *)arg);
-       case NVME_IOCTL_IO_CMD:
-               if (list_empty(&dev->namespaces))
-                       return -ENOTTY;
-               ns = list_first_entry(&dev->namespaces, struct nvme_ns, list);
-               return nvme_user_cmd(dev, ns, (void __user *)arg);
-       case NVME_IOCTL_RESET:
-               dev_warn(dev->dev, "resetting controller\n");
-               return nvme_reset(dev);
-       case NVME_IOCTL_SUBSYS_RESET:
-               return nvme_subsys_reset(dev);
-       default:
-               return -ENOTTY;
-       }
-}
+       if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags)))
+               goto out;
 
-static const struct file_operations nvme_dev_fops = {
-       .owner          = THIS_MODULE,
-       .open           = nvme_dev_open,
-       .release        = nvme_dev_release,
-       .unlocked_ioctl = nvme_dev_ioctl,
-       .compat_ioctl   = nvme_dev_ioctl,
-};
+       /*
+        * If we're called to reset a live controller first shut it down before
+        * moving on.
+        */
+       if (dev->bar)
+               nvme_dev_disable(dev, false);
 
-static void nvme_probe_work(struct work_struct *work)
-{
-       struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
-       bool start_thread = false;
-       int result;
+       set_bit(NVME_CTRL_RESETTING, &dev->flags);
 
        result = nvme_dev_map(dev);
        if (result)
@@ -3145,35 +1919,24 @@ static void nvme_probe_work(struct work_struct *work)
        if (result)
                goto unmap;
 
-       spin_lock(&dev_list_lock);
-       if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
-               start_thread = true;
-               nvme_thread = NULL;
-       }
-       list_add(&dev->node, &dev_list);
-       spin_unlock(&dev_list_lock);
-
-       if (start_thread) {
-               nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
-               wake_up_all(&nvme_kthread_wait);
-       } else
-               wait_event_killable(nvme_kthread_wait, nvme_thread);
-
-       if (IS_ERR_OR_NULL(nvme_thread)) {
-               result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
-               goto disable;
-       }
-
        nvme_init_queue(dev->queues[0], 0);
        result = nvme_alloc_admin_tags(dev);
        if (result)
                goto disable;
 
+       result = nvme_init_identify(&dev->ctrl);
+       if (result)
+               goto free_tags;
+
        result = nvme_setup_io_queues(dev);
        if (result)
                goto free_tags;
 
-       dev->event_limit = 1;
+       dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
+
+       result = nvme_dev_list_add(dev);
+       if (result)
+               goto remove;
 
        /*
         * Keep the controller around but remove all namespaces if we don't have
@@ -3181,117 +1944,98 @@ static void nvme_probe_work(struct work_struct *work)
         */
        if (dev->online_queues < 2) {
                dev_warn(dev->dev, "IO queues not created\n");
-               nvme_dev_remove(dev);
+               nvme_remove_namespaces(&dev->ctrl);
        } else {
-               nvme_unfreeze_queues(dev);
+               nvme_start_queues(&dev->ctrl);
                nvme_dev_add(dev);
        }
 
+       clear_bit(NVME_CTRL_RESETTING, &dev->flags);
        return;
 
+ remove:
+       nvme_dev_list_remove(dev);
  free_tags:
        nvme_dev_remove_admin(dev);
-       blk_put_queue(dev->admin_q);
-       dev->admin_q = NULL;
+       blk_put_queue(dev->ctrl.admin_q);
+       dev->ctrl.admin_q = NULL;
        dev->queues[0]->tags = NULL;
  disable:
-       nvme_disable_queue(dev, 0);
-       nvme_dev_list_remove(dev);
+       nvme_disable_admin_queue(dev, false);
  unmap:
        nvme_dev_unmap(dev);
  out:
-       if (!work_busy(&dev->reset_work))
-               nvme_dead_ctrl(dev);
+       nvme_remove_dead_ctrl(dev);
 }
 
-static int nvme_remove_dead_ctrl(void *arg)
+static void nvme_remove_dead_ctrl_work(struct work_struct *work)
 {
-       struct nvme_dev *dev = (struct nvme_dev *)arg;
+       struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
        if (pci_get_drvdata(pdev))
                pci_stop_and_remove_bus_device_locked(pdev);
-       kref_put(&dev->kref, nvme_free_dev);
-       return 0;
+       nvme_put_ctrl(&dev->ctrl);
 }
 
-static void nvme_dead_ctrl(struct nvme_dev *dev)
+static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
 {
-       dev_warn(dev->dev, "Device failed to resume\n");
-       kref_get(&dev->kref);
-       if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
-                                               dev->instance))) {
-               dev_err(dev->dev,
-                       "Failed to start controller remove task\n");
-               kref_put(&dev->kref, nvme_free_dev);
-       }
+       dev_warn(dev->dev, "Removing after probe failure\n");
+       kref_get(&dev->ctrl.kref);
+       if (!schedule_work(&dev->remove_work))
+               nvme_put_ctrl(&dev->ctrl);
 }
 
-static void nvme_reset_work(struct work_struct *ws)
+static int nvme_reset(struct nvme_dev *dev)
 {
-       struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
-       bool in_probe = work_busy(&dev->probe_work);
-
-       nvme_dev_shutdown(dev);
+       if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
+               return -ENODEV;
 
-       /* Synchronize with device probe so that work will see failure status
-        * and exit gracefully without trying to schedule another reset */
-       flush_work(&dev->probe_work);
+       if (!queue_work(nvme_workq, &dev->reset_work))
+               return -EBUSY;
 
-       /* Fail this device if reset occured during probe to avoid
-        * infinite initialization loops. */
-       if (in_probe) {
-               nvme_dead_ctrl(dev);
-               return;
-       }
-       /* Schedule device resume asynchronously so the reset work is available
-        * to cleanup errors that may occur during reinitialization */
-       schedule_work(&dev->probe_work);
+       flush_work(&dev->reset_work);
+       return 0;
 }
 
-static int __nvme_reset(struct nvme_dev *dev)
+static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
 {
-       if (work_pending(&dev->reset_work))
-               return -EBUSY;
-       list_del_init(&dev->node);
-       queue_work(nvme_workq, &dev->reset_work);
+       *val = readl(to_nvme_dev(ctrl)->bar + off);
        return 0;
 }
 
-static int nvme_reset(struct nvme_dev *dev)
+static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
 {
-       int ret;
-
-       if (!dev->admin_q || blk_queue_dying(dev->admin_q))
-               return -ENODEV;
-
-       spin_lock(&dev_list_lock);
-       ret = __nvme_reset(dev);
-       spin_unlock(&dev_list_lock);
-
-       if (!ret) {
-               flush_work(&dev->reset_work);
-               flush_work(&dev->probe_work);
-               return 0;
-       }
+       writel(val, to_nvme_dev(ctrl)->bar + off);
+       return 0;
+}
 
-       return ret;
+static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
+{
+       *val = readq(to_nvme_dev(ctrl)->bar + off);
+       return 0;
 }
 
-static ssize_t nvme_sysfs_reset(struct device *dev,
-                               struct device_attribute *attr, const char *buf,
-                               size_t count)
+static bool nvme_pci_io_incapable(struct nvme_ctrl *ctrl)
 {
-       struct nvme_dev *ndev = dev_get_drvdata(dev);
-       int ret;
+       struct nvme_dev *dev = to_nvme_dev(ctrl);
 
-       ret = nvme_reset(ndev);
-       if (ret < 0)
-               return ret;
+       return !dev->bar || dev->online_queues < 2;
+}
 
-       return count;
+static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
+{
+       return nvme_reset(to_nvme_dev(ctrl));
 }
-static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
+
+static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
+       .reg_read32             = nvme_pci_reg_read32,
+       .reg_write32            = nvme_pci_reg_write32,
+       .reg_read64             = nvme_pci_reg_read64,
+       .io_incapable           = nvme_pci_io_incapable,
+       .reset_ctrl             = nvme_pci_reset_ctrl,
+       .free_ctrl              = nvme_pci_free_ctrl,
+};
 
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
@@ -3314,46 +2058,30 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (!dev->queues)
                goto free;
 
-       INIT_LIST_HEAD(&dev->namespaces);
-       INIT_WORK(&dev->reset_work, nvme_reset_work);
        dev->dev = get_device(&pdev->dev);
        pci_set_drvdata(pdev, dev);
-       result = nvme_set_instance(dev);
-       if (result)
-               goto put_pci;
+
+       INIT_LIST_HEAD(&dev->node);
+       INIT_WORK(&dev->scan_work, nvme_dev_scan);
+       INIT_WORK(&dev->reset_work, nvme_reset_work);
+       INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
+       mutex_init(&dev->shutdown_lock);
+       init_completion(&dev->ioq_wait);
 
        result = nvme_setup_prp_pools(dev);
        if (result)
-               goto release;
-
-       kref_init(&dev->kref);
-       dev->device = device_create(nvme_class, &pdev->dev,
-                               MKDEV(nvme_char_major, dev->instance),
-                               dev, "nvme%d", dev->instance);
-       if (IS_ERR(dev->device)) {
-               result = PTR_ERR(dev->device);
-               goto release_pools;
-       }
-       get_device(dev->device);
-       dev_set_drvdata(dev->device, dev);
+               goto put_pci;
 
-       result = device_create_file(dev->device, &dev_attr_reset_controller);
+       result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
+                       id->driver_data);
        if (result)
-               goto put_dev;
+               goto release_pools;
 
-       INIT_LIST_HEAD(&dev->node);
-       INIT_WORK(&dev->scan_work, nvme_dev_scan);
-       INIT_WORK(&dev->probe_work, nvme_probe_work);
-       schedule_work(&dev->probe_work);
+       queue_work(nvme_workq, &dev->reset_work);
        return 0;
 
- put_dev:
-       device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
-       put_device(dev->device);
  release_pools:
        nvme_release_prp_pools(dev);
- release:
-       nvme_release_instance(dev);
  put_pci:
        put_device(dev->dev);
  free:
@@ -3368,15 +2096,15 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
        struct nvme_dev *dev = pci_get_drvdata(pdev);
 
        if (prepare)
-               nvme_dev_shutdown(dev);
+               nvme_dev_disable(dev, false);
        else
-               schedule_work(&dev->probe_work);
+               queue_work(nvme_workq, &dev->reset_work);
 }
 
 static void nvme_shutdown(struct pci_dev *pdev)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
-       nvme_dev_shutdown(dev);
+       nvme_dev_disable(dev, true);
 }
 
 static void nvme_remove(struct pci_dev *pdev)
@@ -3388,34 +2116,25 @@ static void nvme_remove(struct pci_dev *pdev)
        spin_unlock(&dev_list_lock);
 
        pci_set_drvdata(pdev, NULL);
-       flush_work(&dev->probe_work);
        flush_work(&dev->reset_work);
        flush_work(&dev->scan_work);
-       device_remove_file(dev->device, &dev_attr_reset_controller);
-       nvme_dev_remove(dev);
-       nvme_dev_shutdown(dev);
+       nvme_remove_namespaces(&dev->ctrl);
+       nvme_uninit_ctrl(&dev->ctrl);
+       nvme_dev_disable(dev, true);
        nvme_dev_remove_admin(dev);
-       device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
        nvme_free_queues(dev, 0);
        nvme_release_cmb(dev);
        nvme_release_prp_pools(dev);
-       kref_put(&dev->kref, nvme_free_dev);
+       nvme_put_ctrl(&dev->ctrl);
 }
 
-/* These functions are yet to be implemented */
-#define nvme_error_detected NULL
-#define nvme_dump_registers NULL
-#define nvme_link_reset NULL
-#define nvme_slot_reset NULL
-#define nvme_error_resume NULL
-
 #ifdef CONFIG_PM_SLEEP
 static int nvme_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct nvme_dev *ndev = pci_get_drvdata(pdev);
 
-       nvme_dev_shutdown(ndev);
+       nvme_dev_disable(ndev, true);
        return 0;
 }
 
@@ -3424,17 +2143,53 @@ static int nvme_resume(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct nvme_dev *ndev = pci_get_drvdata(pdev);
 
-       schedule_work(&ndev->probe_work);
+       queue_work(nvme_workq, &ndev->reset_work);
        return 0;
 }
 #endif
 
 static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
 
+static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
+                                               pci_channel_state_t state)
+{
+       struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+       /*
+        * A frozen channel requires a reset. When detected, this method will
+        * shutdown the controller to quiesce. The controller will be restarted
+        * after the slot reset through driver's slot_reset callback.
+        */
+       dev_warn(&pdev->dev, "error detected: state:%d\n", state);
+       switch (state) {
+       case pci_channel_io_normal:
+               return PCI_ERS_RESULT_CAN_RECOVER;
+       case pci_channel_io_frozen:
+               nvme_dev_disable(dev, false);
+               return PCI_ERS_RESULT_NEED_RESET;
+       case pci_channel_io_perm_failure:
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
+{
+       struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+       dev_info(&pdev->dev, "restart after slot reset\n");
+       pci_restore_state(pdev);
+       queue_work(nvme_workq, &dev->reset_work);
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void nvme_error_resume(struct pci_dev *pdev)
+{
+       pci_cleanup_aer_uncorrect_error_status(pdev);
+}
+
 static const struct pci_error_handlers nvme_err_handler = {
        .error_detected = nvme_error_detected,
-       .mmio_enabled   = nvme_dump_registers,
-       .link_reset     = nvme_link_reset,
        .slot_reset     = nvme_slot_reset,
        .resume         = nvme_error_resume,
        .reset_notify   = nvme_reset_notify,
@@ -3444,6 +2199,10 @@ static const struct pci_error_handlers nvme_err_handler = {
 #define PCI_CLASS_STORAGE_EXPRESS      0x010802
 
 static const struct pci_device_id nvme_id_table[] = {
+       { PCI_VDEVICE(INTEL, 0x0953),
+               .driver_data = NVME_QUIRK_STRIPE_SIZE, },
+       { PCI_VDEVICE(INTEL, 0x5845),   /* Qemu emulated controller */
+               .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
        { 0, }
@@ -3468,40 +2227,21 @@ static int __init nvme_init(void)
 
        init_waitqueue_head(&nvme_kthread_wait);
 
-       nvme_workq = create_singlethread_workqueue("nvme");
+       nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
        if (!nvme_workq)
                return -ENOMEM;
 
-       result = register_blkdev(nvme_major, "nvme");
+       result = nvme_core_init();
        if (result < 0)
                goto kill_workq;
-       else if (result > 0)
-               nvme_major = result;
-
-       result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
-                                                       &nvme_dev_fops);
-       if (result < 0)
-               goto unregister_blkdev;
-       else if (result > 0)
-               nvme_char_major = result;
-
-       nvme_class = class_create(THIS_MODULE, "nvme");
-       if (IS_ERR(nvme_class)) {
-               result = PTR_ERR(nvme_class);
-               goto unregister_chrdev;
-       }
 
        result = pci_register_driver(&nvme_driver);
        if (result)
-               goto destroy_class;
+               goto core_exit;
        return 0;
 
- destroy_class:
-       class_destroy(nvme_class);
- unregister_chrdev:
-       __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
- unregister_blkdev:
-       unregister_blkdev(nvme_major, "nvme");
+ core_exit:
+       nvme_core_exit();
  kill_workq:
        destroy_workqueue(nvme_workq);
        return result;
@@ -3510,10 +2250,8 @@ static int __init nvme_init(void)
 static void __exit nvme_exit(void)
 {
        pci_unregister_driver(&nvme_driver);
-       unregister_blkdev(nvme_major, "nvme");
+       nvme_core_exit();
        destroy_workqueue(nvme_workq);
-       class_destroy(nvme_class);
-       __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
        BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
        _nvme_check_size();
 }
index c3d8d38..e947e29 100644 (file)
@@ -524,7 +524,7 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
                                        struct sg_io_hdr *hdr, u8 *inq_response,
                                        int alloc_len)
 {
-       struct nvme_dev *dev = ns->dev;
+       struct nvme_ctrl *ctrl = ns->ctrl;
        struct nvme_id_ns *id_ns;
        int res;
        int nvme_sc;
@@ -532,10 +532,10 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
        u8 resp_data_format = 0x02;
        u8 protect;
        u8 cmdque = 0x01 << 1;
-       u8 fw_offset = sizeof(dev->firmware_rev);
+       u8 fw_offset = sizeof(ctrl->firmware_rev);
 
        /* nvme ns identify - use DPS value for PROTECT field */
-       nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
+       nvme_sc = nvme_identify_ns(ctrl, ns->ns_id, &id_ns);
        res = nvme_trans_status_code(hdr, nvme_sc);
        if (res)
                return res;
@@ -553,12 +553,12 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
        inq_response[5] = protect;      /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
        inq_response[7] = cmdque;       /* wbus16=0 | sync=0 | vs=0 */
        strncpy(&inq_response[8], "NVMe    ", 8);
-       strncpy(&inq_response[16], dev->model, 16);
+       strncpy(&inq_response[16], ctrl->model, 16);
 
-       while (dev->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
+       while (ctrl->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
                fw_offset--;
        fw_offset -= 4;
-       strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4);
+       strncpy(&inq_response[32], ctrl->firmware_rev + fw_offset, 4);
 
        xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
        return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
@@ -588,82 +588,113 @@ static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
                                        struct sg_io_hdr *hdr, u8 *inq_response,
                                        int alloc_len)
 {
-       struct nvme_dev *dev = ns->dev;
        int xfer_len;
 
        memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
        inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
        inq_response[3] = INQ_SERIAL_NUMBER_LENGTH;    /* Page Length */
-       strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH);
+       strncpy(&inq_response[4], ns->ctrl->serial, INQ_SERIAL_NUMBER_LENGTH);
 
        xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
        return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
 }
 
-static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-                                       u8 *inq_response, int alloc_len)
+static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+               u8 *inq_response, int alloc_len)
 {
-       struct nvme_dev *dev = ns->dev;
-       int res;
-       int nvme_sc;
-       int xfer_len;
-       __be32 tmp_id = cpu_to_be32(ns->ns_id);
+       struct nvme_id_ns *id_ns;
+       int nvme_sc, res;
+       size_t len;
+       void *eui;
 
-       memset(inq_response, 0, alloc_len);
-       inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;    /* Page Code */
-       if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) {
-               struct nvme_id_ns *id_ns;
-               void *eui;
-               int len;
+       nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
+       res = nvme_trans_status_code(hdr, nvme_sc);
+       if (res)
+               return res;
 
-               nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
-               res = nvme_trans_status_code(hdr, nvme_sc);
-               if (res)
-                       return res;
+       eui = id_ns->eui64;
+       len = sizeof(id_ns->eui64);
 
-               eui = id_ns->eui64;
-               len = sizeof(id_ns->eui64);
-               if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) {
-                       if (bitmap_empty(eui, len * 8)) {
-                               eui = id_ns->nguid;
-                               len = sizeof(id_ns->nguid);
-                       }
-               }
+       if (ns->ctrl->vs >= NVME_VS(1, 2)) {
                if (bitmap_empty(eui, len * 8)) {
-                       kfree(id_ns);
-                       goto scsi_string;
+                       eui = id_ns->nguid;
+                       len = sizeof(id_ns->nguid);
                }
+       }
 
-               inq_response[3] = 4 + len; /* Page Length */
-               /* Designation Descriptor start */
-               inq_response[4] = 0x01;    /* Proto ID=0h | Code set=1h */
-               inq_response[5] = 0x02;    /* PIV=0b | Asso=00b | Designator Type=2h */
-               inq_response[6] = 0x00;    /* Rsvd */
-               inq_response[7] = len;     /* Designator Length */
-               memcpy(&inq_response[8], eui, len);
-               kfree(id_ns);
-       } else {
- scsi_string:
-               if (alloc_len < 72) {
-                       return nvme_trans_completion(hdr,
-                                       SAM_STAT_CHECK_CONDITION,
-                                       ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-                                       SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-               }
-               inq_response[3] = 0x48;    /* Page Length */
-               /* Designation Descriptor start */
-               inq_response[4] = 0x03;    /* Proto ID=0h | Code set=3h */
-               inq_response[5] = 0x08;    /* PIV=0b | Asso=00b | Designator Type=8h */
-               inq_response[6] = 0x00;    /* Rsvd */
-               inq_response[7] = 0x44;    /* Designator Length */
-
-               sprintf(&inq_response[8], "%04x", to_pci_dev(dev->dev)->vendor);
-               memcpy(&inq_response[12], dev->model, sizeof(dev->model));
-               sprintf(&inq_response[52], "%04x", tmp_id);
-               memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
+       if (bitmap_empty(eui, len * 8)) {
+               res = -EOPNOTSUPP;
+               goto out_free_id;
        }
-       xfer_len = alloc_len;
-       return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+       memset(inq_response, 0, alloc_len);
+       inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;
+       inq_response[3] = 4 + len; /* Page Length */
+
+       /* Designation Descriptor start */
+       inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */
+       inq_response[5] = 0x02; /* PIV=0b | Asso=00b | Designator Type=2h */
+       inq_response[6] = 0x00; /* Rsvd */
+       inq_response[7] = len;  /* Designator Length */
+       memcpy(&inq_response[8], eui, len);
+
+       res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len);
+out_free_id:
+       kfree(id_ns);
+       return res;
+}
+
+static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns,
+               struct sg_io_hdr *hdr, u8 *inq_response, int alloc_len)
+{
+       struct nvme_ctrl *ctrl = ns->ctrl;
+       struct nvme_id_ctrl *id_ctrl;
+       int nvme_sc, res;
+
+       if (alloc_len < 72) {
+               return nvme_trans_completion(hdr,
+                               SAM_STAT_CHECK_CONDITION,
+                               ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+                               SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+       }
+
+       nvme_sc = nvme_identify_ctrl(ctrl, &id_ctrl);
+       res = nvme_trans_status_code(hdr, nvme_sc);
+       if (res)
+               return res;
+
+       memset(inq_response, 0, alloc_len);
+       inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;
+       inq_response[3] = 0x48; /* Page Length */
+
+       /* Designation Descriptor start */
+       inq_response[4] = 0x03; /* Proto ID=0h | Code set=3h */
+       inq_response[5] = 0x08; /* PIV=0b | Asso=00b | Designator Type=8h */
+       inq_response[6] = 0x00; /* Rsvd */
+       inq_response[7] = 0x44; /* Designator Length */
+
+       sprintf(&inq_response[8], "%04x", le16_to_cpu(id_ctrl->vid));
+       memcpy(&inq_response[12], ctrl->model, sizeof(ctrl->model));
+       sprintf(&inq_response[52], "%04x", cpu_to_be32(ns->ns_id));
+       memcpy(&inq_response[56], ctrl->serial, sizeof(ctrl->serial));
+
+       res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len);
+       kfree(id_ctrl);
+       return res;
+}
+
+static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+                                       u8 *resp, int alloc_len)
+{
+       int res;
+
+       if (ns->ctrl->vs >= NVME_VS(1, 1)) {
+               res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len);
+               if (res != -EOPNOTSUPP)
+                       return res;
+       }
+
+       return nvme_fill_device_id_scsi_string(ns, hdr, resp, alloc_len);
 }
 
 static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
@@ -672,7 +703,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        u8 *inq_response;
        int res;
        int nvme_sc;
-       struct nvme_dev *dev = ns->dev;
+       struct nvme_ctrl *ctrl = ns->ctrl;
        struct nvme_id_ctrl *id_ctrl;
        struct nvme_id_ns *id_ns;
        int xfer_len;
@@ -688,7 +719,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        if (inq_response == NULL)
                return -ENOMEM;
 
-       nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
+       nvme_sc = nvme_identify_ns(ctrl, ns->ns_id, &id_ns);
        res = nvme_trans_status_code(hdr, nvme_sc);
        if (res)
                goto out_free_inq;
@@ -704,7 +735,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        app_chk = protect << 1;
        ref_chk = protect;
 
-       nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
+       nvme_sc = nvme_identify_ctrl(ctrl, &id_ctrl);
        res = nvme_trans_status_code(hdr, nvme_sc);
        if (res)
                goto out_free_inq;
@@ -815,7 +846,6 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
        int res;
        int xfer_len;
        u8 *log_response;
-       struct nvme_dev *dev = ns->dev;
        struct nvme_smart_log *smart_log;
        u8 temp_c;
        u16 temp_k;
@@ -824,7 +854,7 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
        if (log_response == NULL)
                return -ENOMEM;
 
-       res = nvme_get_log_page(dev, &smart_log);
+       res = nvme_get_log_page(ns->ctrl, &smart_log);
        if (res < 0)
                goto out_free_response;
 
@@ -862,7 +892,6 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        int res;
        int xfer_len;
        u8 *log_response;
-       struct nvme_dev *dev = ns->dev;
        struct nvme_smart_log *smart_log;
        u32 feature_resp;
        u8 temp_c_cur, temp_c_thresh;
@@ -872,7 +901,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        if (log_response == NULL)
                return -ENOMEM;
 
-       res = nvme_get_log_page(dev, &smart_log);
+       res = nvme_get_log_page(ns->ctrl, &smart_log);
        if (res < 0)
                goto out_free_response;
 
@@ -886,7 +915,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        kfree(smart_log);
 
        /* Get Features for Temp Threshold */
-       res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
+       res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, 0,
                                                                &feature_resp);
        if (res != NVME_SC_SUCCESS)
                temp_c_thresh = LOG_TEMP_UNKNOWN;
@@ -948,7 +977,6 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
 {
        int res;
        int nvme_sc;
-       struct nvme_dev *dev = ns->dev;
        struct nvme_id_ns *id_ns;
        u8 flbas;
        u32 lba_length;
@@ -958,7 +986,7 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
                return -EINVAL;
 
-       nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
+       nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
        res = nvme_trans_status_code(hdr, nvme_sc);
        if (res)
                return res;
@@ -1014,14 +1042,13 @@ static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
 {
        int res = 0;
        int nvme_sc;
-       struct nvme_dev *dev = ns->dev;
        u32 feature_resp;
        u8 vwc;
 
        if (len < MODE_PAGE_CACHING_LEN)
                return -EINVAL;
 
-       nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0,
+       nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, 0,
                                                                &feature_resp);
        res = nvme_trans_status_code(hdr, nvme_sc);
        if (res)
@@ -1207,12 +1234,11 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
 {
        int res;
        int nvme_sc;
-       struct nvme_dev *dev = ns->dev;
        struct nvme_id_ctrl *id_ctrl;
        int lowest_pow_st;      /* max npss = lowest power consumption */
        unsigned ps_desired = 0;
 
-       nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
+       nvme_sc = nvme_identify_ctrl(ns->ctrl, &id_ctrl);
        res = nvme_trans_status_code(hdr, nvme_sc);
        if (res)
                return res;
@@ -1256,7 +1282,7 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                                SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
                break;
        }
-       nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
+       nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_POWER_MGMT, ps_desired, 0,
                                    NULL);
        return nvme_trans_status_code(hdr, nvme_sc);
 }
@@ -1280,7 +1306,6 @@ static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr
                                        u8 buffer_id)
 {
        int nvme_sc;
-       struct nvme_dev *dev = ns->dev;
        struct nvme_command c;
 
        if (hdr->iovec_count > 0) {
@@ -1297,7 +1322,7 @@ static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr
        c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
        c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
 
-       nvme_sc = __nvme_submit_sync_cmd(dev->admin_q, &c, NULL,
+       nvme_sc = nvme_submit_user_cmd(ns->ctrl->admin_q, &c,
                        hdr->dxferp, tot_len, NULL, 0);
        return nvme_trans_status_code(hdr, nvme_sc);
 }
@@ -1364,14 +1389,13 @@ static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
 {
        int res = 0;
        int nvme_sc;
-       struct nvme_dev *dev = ns->dev;
        unsigned dword11;
 
        switch (page_code) {
        case MODE_PAGE_CACHING:
                dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
-               nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11,
-                                           0, NULL);
+               nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_VOLATILE_WC,
+                                           dword11, 0, NULL);
                res = nvme_trans_status_code(hdr, nvme_sc);
                break;
        case MODE_PAGE_CONTROL:
@@ -1473,7 +1497,6 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
 {
        int res = 0;
        int nvme_sc;
-       struct nvme_dev *dev = ns->dev;
        u8 flbas;
 
        /*
@@ -1486,7 +1509,7 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
        if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
                struct nvme_id_ns *id_ns;
 
-               nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
+               nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
                res = nvme_trans_status_code(hdr, nvme_sc);
                if (res)
                        return res;
@@ -1570,7 +1593,6 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
 {
        int res;
        int nvme_sc;
-       struct nvme_dev *dev = ns->dev;
        struct nvme_id_ns *id_ns;
        u8 i;
        u8 flbas, nlbaf;
@@ -1579,7 +1601,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        struct nvme_command c;
 
        /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
-       nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
+       nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
        res = nvme_trans_status_code(hdr, nvme_sc);
        if (res)
                return res;
@@ -1611,7 +1633,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        c.format.nsid = cpu_to_le32(ns->ns_id);
        c.format.cdw10 = cpu_to_le32(cdw10);
 
-       nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
+       nvme_sc = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, NULL, 0);
        res = nvme_trans_status_code(hdr, nvme_sc);
 
        kfree(id_ns);
@@ -1704,7 +1726,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                        nvme_sc = NVME_SC_LBA_RANGE;
                        break;
                }
-               nvme_sc = __nvme_submit_sync_cmd(ns->queue, &c, NULL,
+               nvme_sc = nvme_submit_user_cmd(ns->queue, &c,
                                next_mapping_addr, unit_len, NULL, 0);
                if (nvme_sc)
                        break;
@@ -2040,7 +2062,6 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        u32 alloc_len;
        u32 resp_size;
        u32 xfer_len;
-       struct nvme_dev *dev = ns->dev;
        struct nvme_id_ns *id_ns;
        u8 *response;
 
@@ -2052,7 +2073,7 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                resp_size = READ_CAP_10_RESP_SIZE;
        }
 
-       nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
+       nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
        res = nvme_trans_status_code(hdr, nvme_sc);
        if (res)
                return res;     
@@ -2080,7 +2101,6 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        int nvme_sc;
        u32 alloc_len, xfer_len, resp_size;
        u8 *response;
-       struct nvme_dev *dev = ns->dev;
        struct nvme_id_ctrl *id_ctrl;
        u32 ll_length, lun_id;
        u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
@@ -2094,7 +2114,7 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        case ALL_LUNS_RETURNED:
        case ALL_WELL_KNOWN_LUNS_RETURNED:
        case RESTRICTED_LUNS_RETURNED:
-               nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
+               nvme_sc = nvme_identify_ctrl(ns->ctrl, &id_ctrl);
                res = nvme_trans_status_code(hdr, nvme_sc);
                if (res)
                        return res;
@@ -2295,9 +2315,7 @@ static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
                                        struct sg_io_hdr *hdr,
                                        u8 *cmd)
 {
-       struct nvme_dev *dev = ns->dev;
-
-       if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY))
+       if (nvme_ctrl_ready(ns->ctrl))
                return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
                                            NOT_READY, SCSI_ASC_LUN_NOT_READY,
                                            SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
index e77d150..5a2899f 100644 (file)
@@ -615,9 +615,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
        }
 
        bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
-       if (!bip) {
+       if (IS_ERR(bip)) {
                pr_err("Unable to allocate bio_integrity_payload\n");
-               return -ENOMEM;
+               return PTR_ERR(bip);
        }
 
        bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
index 744b997..1640493 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef _AER_H_
 #define _AER_H_
 
+#include <linux/errno.h>
 #include <linux/types.h>
 
 #define AER_NONFATAL                   0
index b9b6e04..5349e68 100644 (file)
@@ -318,16 +318,6 @@ enum bip_flags {
        BIP_IP_CHECKSUM         = 1 << 4, /* IP checksum */
 };
 
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
-{
-       if (bio->bi_rw & REQ_INTEGRITY)
-               return bio->bi_integrity;
-
-       return NULL;
-}
-
 /*
  * bio integrity payload
  */
@@ -349,6 +339,16 @@ struct bio_integrity_payload {
        struct bio_vec          bip_inline_vecs[0];/* embedded bvec array */
 };
 
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+
+static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
+{
+       if (bio->bi_rw & REQ_INTEGRITY)
+               return bio->bi_integrity;
+
+       return NULL;
+}
+
 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
 {
        struct bio_integrity_payload *bip = bio_integrity(bio);
@@ -795,6 +795,18 @@ static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
        return false;
 }
 
+static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
+                                                               unsigned int nr)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
+                                       unsigned int len, unsigned int offset)
+{
+       return 0;
+}
+
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
 #endif /* CONFIG_BLOCK */
index 0fb6584..86a38ea 100644 (file)
@@ -188,7 +188,6 @@ enum rq_flag_bits {
        __REQ_PM,               /* runtime pm request */
        __REQ_HASHED,           /* on IO scheduler merge hash */
        __REQ_MQ_INFLIGHT,      /* track inflight for MQ */
-       __REQ_NO_TIMEOUT,       /* requests may never expire */
        __REQ_NR_BITS,          /* stops here */
 };
 
@@ -242,7 +241,6 @@ enum rq_flag_bits {
 #define REQ_PM                 (1ULL << __REQ_PM)
 #define REQ_HASHED             (1ULL << __REQ_HASHED)
 #define REQ_MQ_INFLIGHT                (1ULL << __REQ_MQ_INFLIGHT)
-#define REQ_NO_TIMEOUT         (1ULL << __REQ_NO_TIMEOUT)
 
 typedef unsigned int blk_qc_t;
 #define BLK_QC_T_NONE  -1U
index d372ea8..29189ae 100644 (file)
@@ -409,6 +409,7 @@ struct request_queue {
 
        unsigned int            rq_timeout;
        struct timer_list       timeout;
+       struct work_struct      timeout_work;
        struct list_head        timeout_list;
 
        struct list_head        icq_list;
index 3af5f45..a55986f 100644 (file)
 
 #include <linux/types.h>
 
-struct nvme_bar {
-       __u64                   cap;    /* Controller Capabilities */
-       __u32                   vs;     /* Version */
-       __u32                   intms;  /* Interrupt Mask Set */
-       __u32                   intmc;  /* Interrupt Mask Clear */
-       __u32                   cc;     /* Controller Configuration */
-       __u32                   rsvd1;  /* Reserved */
-       __u32                   csts;   /* Controller Status */
-       __u32                   nssr;   /* Subsystem Reset */
-       __u32                   aqa;    /* Admin Queue Attributes */
-       __u64                   asq;    /* Admin SQ Base Address */
-       __u64                   acq;    /* Admin CQ Base Address */
-       __u32                   cmbloc; /* Controller Memory Buffer Location */
-       __u32                   cmbsz;  /* Controller Memory Buffer Size */
+enum {
+       NVME_REG_CAP    = 0x0000,       /* Controller Capabilities */
+       NVME_REG_VS     = 0x0008,       /* Version */
+       NVME_REG_INTMS  = 0x000c,       /* Interrupt Mask Set */
+       NVME_REG_INTMC  = 0x0010,       /* Interrupt Mask Set */
+       NVME_REG_CC     = 0x0014,       /* Controller Configuration */
+       NVME_REG_CSTS   = 0x001c,       /* Controller Status */
+       NVME_REG_NSSR   = 0x0020,       /* NVM Subsystem Reset */
+       NVME_REG_AQA    = 0x0024,       /* Admin Queue Attributes */
+       NVME_REG_ASQ    = 0x0028,       /* Admin SQ Base Address */
+       NVME_REG_ACQ    = 0x0030,       /* Admin SQ Base Address */
+       NVME_REG_CMBLOC = 0x0038,       /* Controller Memory Buffer Location */
+       NVME_REG_CMBSZ  = 0x003c,       /* Controller Memory Buffer Size */
 };
 
 #define NVME_CAP_MQES(cap)     ((cap) & 0xffff)
index c2e5d6c..ebd10e6 100644 (file)
@@ -307,7 +307,7 @@ header-y += nfs_mount.h
 header-y += nl80211.h
 header-y += n_r3964.h
 header-y += nubus.h
-header-y += nvme.h
+header-y += nvme_ioctl.h
 header-y += nvram.h
 header-y += omap3isp.h
 header-y += omapfb.h