Merge branch 'for-4.5/nvme' of git://git.kernel.dk/linux-block
[cascardo/linux.git] / drivers / nvme / host / nvme.h
index 044253d..4fb5bb7 100644 (file)
 #include <linux/kref.h>
 #include <linux/blk-mq.h>
 
+enum {
+       /*
+        * Driver internal status code for commands that were cancelled due
+        * to timeouts or controller shutdown.  The value is negative so
+        * that it a) doesn't overlap with the unsigned hardware error codes,
+        * and b) can easily be tested for.
+        */
+       NVME_SC_CANCELLED               = -EINTR,
+};
+
 extern unsigned char nvme_io_timeout;
 #define NVME_IO_TIMEOUT        (nvme_io_timeout * HZ)
 
+extern unsigned char admin_timeout;
+#define ADMIN_TIMEOUT  (admin_timeout * HZ)
+
+extern unsigned char shutdown_timeout;
+#define SHUTDOWN_TIMEOUT       (shutdown_timeout * HZ)
+
 enum {
        NVME_NS_LBA             = 0,
        NVME_NS_LIGHTNVM        = 1,
 };
 
 /*
- * Represents an NVM Express device.  Each nvme_dev is a PCI function.
+ * List of workarounds for devices that required behavior not specified in
+ * the standard.
  */
-struct nvme_dev {
-       struct list_head node;
-       struct nvme_queue **queues;
+enum nvme_quirks {
+       /*
+        * Prefers I/O aligned to a stripe size specified in a vendor
+        * specific Identify field.
+        */
+       NVME_QUIRK_STRIPE_SIZE                  = (1 << 0),
+
+       /*
+        * The controller doesn't handle Identify value others than 0 or 1
+        * correctly.
+        */
+       NVME_QUIRK_IDENTIFY_CNS                 = (1 << 1),
+};
+
+struct nvme_ctrl {
+       const struct nvme_ctrl_ops *ops;
        struct request_queue *admin_q;
-       struct blk_mq_tag_set tagset;
-       struct blk_mq_tag_set admin_tagset;
-       u32 __iomem *dbs;
        struct device *dev;
-       struct dma_pool *prp_page_pool;
-       struct dma_pool *prp_small_pool;
+       struct kref kref;
        int instance;
-       unsigned queue_count;
-       unsigned online_queues;
-       unsigned max_qid;
-       int q_depth;
-       u32 db_stride;
-       u32 ctrl_config;
-       struct msix_entry *entry;
-       struct nvme_bar __iomem *bar;
+       struct blk_mq_tag_set *tagset;
        struct list_head namespaces;
-       struct kref kref;
-       struct device *device;
-       struct work_struct reset_work;
-       struct work_struct probe_work;
-       struct work_struct scan_work;
+       struct mutex namespaces_mutex;
+       struct device *device;  /* char device */
+       struct list_head node;
+
        char name[12];
        char serial[20];
        char model[40];
        char firmware_rev[8];
-       bool subsystem;
+
+       u32 ctrl_config;
+
+       u32 page_size;
        u32 max_hw_sectors;
        u32 stripe_size;
-       u32 page_size;
-       void __iomem *cmb;
-       dma_addr_t cmb_dma_addr;
-       u64 cmb_size;
-       u32 cmbsz;
        u16 oncs;
-       u16 abort_limit;
+       atomic_t abort_limit;
        u8 event_limit;
        u8 vwc;
+       u32 vs;
+       bool subsystem;
+       unsigned long quirks;
 };
 
 /*
@@ -79,11 +98,14 @@ struct nvme_dev {
 struct nvme_ns {
        struct list_head list;
 
-       struct nvme_dev *dev;
+       struct nvme_ctrl *ctrl;
        struct request_queue *queue;
        struct gendisk *disk;
        struct kref kref;
 
+       u8 eui[8];
+       u8 uuid[16];
+
        unsigned ns_id;
        int lba_shift;
        u16 ms;
@@ -94,41 +116,156 @@ struct nvme_ns {
        u32 mode_select_block_len;
 };
 
-/*
- * The nvme_iod describes the data in an I/O, including the list of PRP
- * entries.  You can't see it in this data structure because C doesn't let
- * me express that.  Use nvme_alloc_iod to ensure there's enough space
- * allocated to store the PRP list.
- */
-struct nvme_iod {
-       unsigned long private;  /* For the use of the submitter of the I/O */
-       int npages;             /* In the PRP list. 0 means small pool in use */
-       int offset;             /* Of PRP list */
-       int nents;              /* Used in scatterlist */
-       int length;             /* Of data, in bytes */
-       dma_addr_t first_dma;
-       struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
-       struct scatterlist sg[0];
+struct nvme_ctrl_ops {
+       int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
+       int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
+       int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
+       bool (*io_incapable)(struct nvme_ctrl *ctrl);
+       int (*reset_ctrl)(struct nvme_ctrl *ctrl);
+       void (*free_ctrl)(struct nvme_ctrl *ctrl);
 };
 
+static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
+{
+       u32 val = 0;
+
+       if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
+               return false;
+       return val & NVME_CSTS_RDY;
+}
+
+static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
+{
+       u32 val = 0;
+
+       if (ctrl->ops->io_incapable(ctrl))
+               return false;
+       if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
+               return false;
+       return val & NVME_CSTS_CFS;
+}
+
+static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
+{
+       if (!ctrl->subsystem)
+               return -ENOTTY;
+       return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
+}
+
 static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
 {
        return (sector >> (ns->lba_shift - 9));
 }
 
+static inline void nvme_setup_flush(struct nvme_ns *ns,
+               struct nvme_command *cmnd)
+{
+       memset(cmnd, 0, sizeof(*cmnd));
+       cmnd->common.opcode = nvme_cmd_flush;
+       cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+}
+
+static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
+               struct nvme_command *cmnd)
+{
+       u16 control = 0;
+       u32 dsmgmt = 0;
+
+       if (req->cmd_flags & REQ_FUA)
+               control |= NVME_RW_FUA;
+       if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
+               control |= NVME_RW_LR;
+
+       if (req->cmd_flags & REQ_RAHEAD)
+               dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+
+       memset(cmnd, 0, sizeof(*cmnd));
+       cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
+       cmnd->rw.command_id = req->tag;
+       cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+       cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+       cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+
+       if (ns->ms) {
+               switch (ns->pi_type) {
+               case NVME_NS_DPS_PI_TYPE3:
+                       control |= NVME_RW_PRINFO_PRCHK_GUARD;
+                       break;
+               case NVME_NS_DPS_PI_TYPE1:
+               case NVME_NS_DPS_PI_TYPE2:
+                       control |= NVME_RW_PRINFO_PRCHK_GUARD |
+                                       NVME_RW_PRINFO_PRCHK_REF;
+                       cmnd->rw.reftag = cpu_to_le32(
+                                       nvme_block_nr(ns, blk_rq_pos(req)));
+                       break;
+               }
+               if (!blk_integrity_rq(req))
+                       control |= NVME_RW_PRINFO_PRACT;
+       }
+
+       cmnd->rw.control = cpu_to_le16(control);
+       cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+}
+
+
+static inline int nvme_error_status(u16 status)
+{
+       switch (status & 0x7ff) {
+       case NVME_SC_SUCCESS:
+               return 0;
+       case NVME_SC_CAP_EXCEEDED:
+               return -ENOSPC;
+       default:
+               return -EIO;
+       }
+}
+
+static inline bool nvme_req_needs_retry(struct request *req, u16 status)
+{
+       return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
+               (jiffies - req->start_time) < req->timeout;
+}
+
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
+int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
+int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
+int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+               const struct nvme_ctrl_ops *ops, unsigned long quirks);
+void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
+void nvme_put_ctrl(struct nvme_ctrl *ctrl);
+int nvme_init_identify(struct nvme_ctrl *ctrl);
+
+void nvme_scan_namespaces(struct nvme_ctrl *ctrl);
+void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
+
+void nvme_stop_queues(struct nvme_ctrl *ctrl);
+void nvme_start_queues(struct nvme_ctrl *ctrl);
+
+struct request *nvme_alloc_request(struct request_queue *q,
+               struct nvme_command *cmd, unsigned int flags);
+void nvme_requeue_req(struct request *req);
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buf, unsigned bufflen);
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-               void *buffer, void __user *ubuffer, unsigned bufflen,
+               void *buffer, unsigned bufflen,  u32 *result, unsigned timeout);
+int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
+               void __user *ubuffer, unsigned bufflen, u32 *result,
+               unsigned timeout);
+int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
+               void __user *ubuffer, unsigned bufflen,
+               void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
                u32 *result, unsigned timeout);
-int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id);
-int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
+int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
+int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
                struct nvme_id_ns **id);
-int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log);
-int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
+int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
+int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
                        dma_addr_t dma_addr, u32 *result);
-int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
+int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
                        dma_addr_t dma_addr, u32 *result);
+int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
+
+extern spinlock_t dev_list_lock;
 
 struct sg_io_hdr;
 
@@ -154,4 +291,7 @@ static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *i
 }
 #endif /* CONFIG_NVM */
 
+int __init nvme_core_init(void);
+void nvme_core_exit(void);
+
 #endif /* _NVME_H */