Merge branch 'akpm' (patches from Andrew)
[cascardo/linux.git] / drivers / nvme / host / pci.c
1 /*
2  * NVM Express device driver
3  * Copyright (c) 2011-2014, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14
15 #include <linux/aer.h>
16 #include <linux/bitops.h>
17 #include <linux/blkdev.h>
18 #include <linux/blk-mq.h>
19 #include <linux/blk-mq-pci.h>
20 #include <linux/cpu.h>
21 #include <linux/delay.h>
22 #include <linux/errno.h>
23 #include <linux/fs.h>
24 #include <linux/genhd.h>
25 #include <linux/hdreg.h>
26 #include <linux/idr.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/io.h>
30 #include <linux/kdev_t.h>
31 #include <linux/kernel.h>
32 #include <linux/mm.h>
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <linux/mutex.h>
36 #include <linux/pci.h>
37 #include <linux/poison.h>
38 #include <linux/ptrace.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/t10-pi.h>
42 #include <linux/timer.h>
43 #include <linux/types.h>
44 #include <linux/io-64-nonatomic-lo-hi.h>
45 #include <asm/unaligned.h>
46
47 #include "nvme.h"
48
49 #define NVME_Q_DEPTH            1024
50 #define NVME_AQ_DEPTH           256
51 #define SQ_SIZE(depth)          (depth * sizeof(struct nvme_command))
52 #define CQ_SIZE(depth)          (depth * sizeof(struct nvme_completion))
53                 
54 /*
55  * We handle AEN commands ourselves and don't even let the
56  * block layer know about them.
57  */
58 #define NVME_AQ_BLKMQ_DEPTH     (NVME_AQ_DEPTH - NVME_NR_AERS)
59
60 static int use_threaded_interrupts;
61 module_param(use_threaded_interrupts, int, 0);
62
63 static bool use_cmb_sqes = true;
64 module_param(use_cmb_sqes, bool, 0644);
65 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
66
67 static struct workqueue_struct *nvme_workq;
68
69 struct nvme_dev;
70 struct nvme_queue;
71
72 static int nvme_reset(struct nvme_dev *dev);
73 static void nvme_process_cq(struct nvme_queue *nvmeq);
74 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
75
76 /*
77  * Represents an NVM Express device.  Each nvme_dev is a PCI function.
78  */
79 struct nvme_dev {
80         struct nvme_queue **queues;
81         struct blk_mq_tag_set tagset;
82         struct blk_mq_tag_set admin_tagset;
83         u32 __iomem *dbs;
84         struct device *dev;
85         struct dma_pool *prp_page_pool;
86         struct dma_pool *prp_small_pool;
87         unsigned queue_count;
88         unsigned online_queues;
89         unsigned max_qid;
90         int q_depth;
91         u32 db_stride;
92         void __iomem *bar;
93         struct work_struct reset_work;
94         struct work_struct remove_work;
95         struct timer_list watchdog_timer;
96         struct mutex shutdown_lock;
97         bool subsystem;
98         void __iomem *cmb;
99         dma_addr_t cmb_dma_addr;
100         u64 cmb_size;
101         u32 cmbsz;
102         struct nvme_ctrl ctrl;
103         struct completion ioq_wait;
104 };
105
106 static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
107 {
108         return container_of(ctrl, struct nvme_dev, ctrl);
109 }
110
111 /*
112  * An NVM Express queue.  Each device has at least two (one for admin
113  * commands and one for I/O commands).
114  */
115 struct nvme_queue {
116         struct device *q_dmadev;
117         struct nvme_dev *dev;
118         char irqname[24];       /* nvme4294967295-65535\0 */
119         spinlock_t q_lock;
120         struct nvme_command *sq_cmds;
121         struct nvme_command __iomem *sq_cmds_io;
122         volatile struct nvme_completion *cqes;
123         struct blk_mq_tags **tags;
124         dma_addr_t sq_dma_addr;
125         dma_addr_t cq_dma_addr;
126         u32 __iomem *q_db;
127         u16 q_depth;
128         s16 cq_vector;
129         u16 sq_tail;
130         u16 cq_head;
131         u16 qid;
132         u8 cq_phase;
133         u8 cqe_seen;
134 };
135
136 /*
137  * The nvme_iod describes the data in an I/O, including the list of PRP
138  * entries.  You can't see it in this data structure because C doesn't let
139  * me express that.  Use nvme_init_iod to ensure there's enough space
140  * allocated to store the PRP list.
141  */
142 struct nvme_iod {
143         struct nvme_queue *nvmeq;
144         int aborted;
145         int npages;             /* In the PRP list. 0 means small pool in use */
146         int nents;              /* Used in scatterlist */
147         int length;             /* Of data, in bytes */
148         dma_addr_t first_dma;
149         struct scatterlist meta_sg; /* metadata requires single contiguous buffer */
150         struct scatterlist *sg;
151         struct scatterlist inline_sg[0];
152 };
153
154 /*
155  * Check we didin't inadvertently grow the command struct
156  */
157 static inline void _nvme_check_size(void)
158 {
159         BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
160         BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
161         BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
162         BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
163         BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
164         BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
165         BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
166         BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
167         BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
168         BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
169         BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
170         BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
171 }
172
173 /*
174  * Max size of iod being embedded in the request payload
175  */
176 #define NVME_INT_PAGES          2
177 #define NVME_INT_BYTES(dev)     (NVME_INT_PAGES * (dev)->ctrl.page_size)
178
179 /*
180  * Will slightly overestimate the number of pages needed.  This is OK
181  * as it only leads to a small amount of wasted memory for the lifetime of
182  * the I/O.
183  */
184 static int nvme_npages(unsigned size, struct nvme_dev *dev)
185 {
186         unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
187                                       dev->ctrl.page_size);
188         return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
189 }
190
191 static unsigned int nvme_iod_alloc_size(struct nvme_dev *dev,
192                 unsigned int size, unsigned int nseg)
193 {
194         return sizeof(__le64 *) * nvme_npages(size, dev) +
195                         sizeof(struct scatterlist) * nseg;
196 }
197
198 static unsigned int nvme_cmd_size(struct nvme_dev *dev)
199 {
200         return sizeof(struct nvme_iod) +
201                 nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
202 }
203
204 static int nvmeq_irq(struct nvme_queue *nvmeq)
205 {
206         return pci_irq_vector(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector);
207 }
208
209 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
210                                 unsigned int hctx_idx)
211 {
212         struct nvme_dev *dev = data;
213         struct nvme_queue *nvmeq = dev->queues[0];
214
215         WARN_ON(hctx_idx != 0);
216         WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
217         WARN_ON(nvmeq->tags);
218
219         hctx->driver_data = nvmeq;
220         nvmeq->tags = &dev->admin_tagset.tags[0];
221         return 0;
222 }
223
224 static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
225 {
226         struct nvme_queue *nvmeq = hctx->driver_data;
227
228         nvmeq->tags = NULL;
229 }
230
231 static int nvme_admin_init_request(void *data, struct request *req,
232                                 unsigned int hctx_idx, unsigned int rq_idx,
233                                 unsigned int numa_node)
234 {
235         struct nvme_dev *dev = data;
236         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
237         struct nvme_queue *nvmeq = dev->queues[0];
238
239         BUG_ON(!nvmeq);
240         iod->nvmeq = nvmeq;
241         return 0;
242 }
243
244 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
245                           unsigned int hctx_idx)
246 {
247         struct nvme_dev *dev = data;
248         struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
249
250         if (!nvmeq->tags)
251                 nvmeq->tags = &dev->tagset.tags[hctx_idx];
252
253         WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
254         hctx->driver_data = nvmeq;
255         return 0;
256 }
257
258 static int nvme_init_request(void *data, struct request *req,
259                                 unsigned int hctx_idx, unsigned int rq_idx,
260                                 unsigned int numa_node)
261 {
262         struct nvme_dev *dev = data;
263         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
264         struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
265
266         BUG_ON(!nvmeq);
267         iod->nvmeq = nvmeq;
268         return 0;
269 }
270
271 static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
272 {
273         struct nvme_dev *dev = set->driver_data;
274
275         return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev));
276 }
277
278 /**
279  * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
280  * @nvmeq: The queue to use
281  * @cmd: The command to send
282  *
283  * Safe to use from interrupt context
284  */
285 static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
286                                                 struct nvme_command *cmd)
287 {
288         u16 tail = nvmeq->sq_tail;
289
290         if (nvmeq->sq_cmds_io)
291                 memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
292         else
293                 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
294
295         if (++tail == nvmeq->q_depth)
296                 tail = 0;
297         writel(tail, nvmeq->q_db);
298         nvmeq->sq_tail = tail;
299 }
300
301 static __le64 **iod_list(struct request *req)
302 {
303         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
304         return (__le64 **)(iod->sg + req->nr_phys_segments);
305 }
306
307 static int nvme_init_iod(struct request *rq, unsigned size,
308                 struct nvme_dev *dev)
309 {
310         struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
311         int nseg = rq->nr_phys_segments;
312
313         if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
314                 iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
315                 if (!iod->sg)
316                         return BLK_MQ_RQ_QUEUE_BUSY;
317         } else {
318                 iod->sg = iod->inline_sg;
319         }
320
321         iod->aborted = 0;
322         iod->npages = -1;
323         iod->nents = 0;
324         iod->length = size;
325
326         if (!(rq->cmd_flags & REQ_DONTPREP)) {
327                 rq->retries = 0;
328                 rq->cmd_flags |= REQ_DONTPREP;
329         }
330         return 0;
331 }
332
333 static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
334 {
335         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
336         const int last_prp = dev->ctrl.page_size / 8 - 1;
337         int i;
338         __le64 **list = iod_list(req);
339         dma_addr_t prp_dma = iod->first_dma;
340
341         nvme_cleanup_cmd(req);
342
343         if (iod->npages == 0)
344                 dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
345         for (i = 0; i < iod->npages; i++) {
346                 __le64 *prp_list = list[i];
347                 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
348                 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
349                 prp_dma = next_prp_dma;
350         }
351
352         if (iod->sg != iod->inline_sg)
353                 kfree(iod->sg);
354 }
355
356 #ifdef CONFIG_BLK_DEV_INTEGRITY
357 static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
358 {
359         if (be32_to_cpu(pi->ref_tag) == v)
360                 pi->ref_tag = cpu_to_be32(p);
361 }
362
363 static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
364 {
365         if (be32_to_cpu(pi->ref_tag) == p)
366                 pi->ref_tag = cpu_to_be32(v);
367 }
368
369 /**
370  * nvme_dif_remap - remaps ref tags to bip seed and physical lba
371  *
372  * The virtual start sector is the one that was originally submitted by the
373  * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical
374  * start sector may be different. Remap protection information to match the
375  * physical LBA on writes, and back to the original seed on reads.
376  *
377  * Type 0 and 3 do not have a ref tag, so no remapping required.
378  */
379 static void nvme_dif_remap(struct request *req,
380                         void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
381 {
382         struct nvme_ns *ns = req->rq_disk->private_data;
383         struct bio_integrity_payload *bip;
384         struct t10_pi_tuple *pi;
385         void *p, *pmap;
386         u32 i, nlb, ts, phys, virt;
387
388         if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
389                 return;
390
391         bip = bio_integrity(req->bio);
392         if (!bip)
393                 return;
394
395         pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
396
397         p = pmap;
398         virt = bip_get_seed(bip);
399         phys = nvme_block_nr(ns, blk_rq_pos(req));
400         nlb = (blk_rq_bytes(req) >> ns->lba_shift);
401         ts = ns->disk->queue->integrity.tuple_size;
402
403         for (i = 0; i < nlb; i++, virt++, phys++) {
404                 pi = (struct t10_pi_tuple *)p;
405                 dif_swap(phys, virt, pi);
406                 p += ts;
407         }
408         kunmap_atomic(pmap);
409 }
410 #else /* CONFIG_BLK_DEV_INTEGRITY */
411 static void nvme_dif_remap(struct request *req,
412                         void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
413 {
414 }
415 static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
416 {
417 }
418 static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
419 {
420 }
421 #endif
422
423 static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
424                 int total_len)
425 {
426         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
427         struct dma_pool *pool;
428         int length = total_len;
429         struct scatterlist *sg = iod->sg;
430         int dma_len = sg_dma_len(sg);
431         u64 dma_addr = sg_dma_address(sg);
432         u32 page_size = dev->ctrl.page_size;
433         int offset = dma_addr & (page_size - 1);
434         __le64 *prp_list;
435         __le64 **list = iod_list(req);
436         dma_addr_t prp_dma;
437         int nprps, i;
438
439         length -= (page_size - offset);
440         if (length <= 0)
441                 return true;
442
443         dma_len -= (page_size - offset);
444         if (dma_len) {
445                 dma_addr += (page_size - offset);
446         } else {
447                 sg = sg_next(sg);
448                 dma_addr = sg_dma_address(sg);
449                 dma_len = sg_dma_len(sg);
450         }
451
452         if (length <= page_size) {
453                 iod->first_dma = dma_addr;
454                 return true;
455         }
456
457         nprps = DIV_ROUND_UP(length, page_size);
458         if (nprps <= (256 / 8)) {
459                 pool = dev->prp_small_pool;
460                 iod->npages = 0;
461         } else {
462                 pool = dev->prp_page_pool;
463                 iod->npages = 1;
464         }
465
466         prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
467         if (!prp_list) {
468                 iod->first_dma = dma_addr;
469                 iod->npages = -1;
470                 return false;
471         }
472         list[0] = prp_list;
473         iod->first_dma = prp_dma;
474         i = 0;
475         for (;;) {
476                 if (i == page_size >> 3) {
477                         __le64 *old_prp_list = prp_list;
478                         prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
479                         if (!prp_list)
480                                 return false;
481                         list[iod->npages++] = prp_list;
482                         prp_list[0] = old_prp_list[i - 1];
483                         old_prp_list[i - 1] = cpu_to_le64(prp_dma);
484                         i = 1;
485                 }
486                 prp_list[i++] = cpu_to_le64(dma_addr);
487                 dma_len -= page_size;
488                 dma_addr += page_size;
489                 length -= page_size;
490                 if (length <= 0)
491                         break;
492                 if (dma_len > 0)
493                         continue;
494                 BUG_ON(dma_len < 0);
495                 sg = sg_next(sg);
496                 dma_addr = sg_dma_address(sg);
497                 dma_len = sg_dma_len(sg);
498         }
499
500         return true;
501 }
502
503 static int nvme_map_data(struct nvme_dev *dev, struct request *req,
504                 unsigned size, struct nvme_command *cmnd)
505 {
506         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
507         struct request_queue *q = req->q;
508         enum dma_data_direction dma_dir = rq_data_dir(req) ?
509                         DMA_TO_DEVICE : DMA_FROM_DEVICE;
510         int ret = BLK_MQ_RQ_QUEUE_ERROR;
511
512         sg_init_table(iod->sg, req->nr_phys_segments);
513         iod->nents = blk_rq_map_sg(q, req, iod->sg);
514         if (!iod->nents)
515                 goto out;
516
517         ret = BLK_MQ_RQ_QUEUE_BUSY;
518         if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
519                                 DMA_ATTR_NO_WARN))
520                 goto out;
521
522         if (!nvme_setup_prps(dev, req, size))
523                 goto out_unmap;
524
525         ret = BLK_MQ_RQ_QUEUE_ERROR;
526         if (blk_integrity_rq(req)) {
527                 if (blk_rq_count_integrity_sg(q, req->bio) != 1)
528                         goto out_unmap;
529
530                 sg_init_table(&iod->meta_sg, 1);
531                 if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
532                         goto out_unmap;
533
534                 if (rq_data_dir(req))
535                         nvme_dif_remap(req, nvme_dif_prep);
536
537                 if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
538                         goto out_unmap;
539         }
540
541         cmnd->rw.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
542         cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
543         if (blk_integrity_rq(req))
544                 cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
545         return BLK_MQ_RQ_QUEUE_OK;
546
547 out_unmap:
548         dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
549 out:
550         return ret;
551 }
552
553 static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
554 {
555         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
556         enum dma_data_direction dma_dir = rq_data_dir(req) ?
557                         DMA_TO_DEVICE : DMA_FROM_DEVICE;
558
559         if (iod->nents) {
560                 dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
561                 if (blk_integrity_rq(req)) {
562                         if (!rq_data_dir(req))
563                                 nvme_dif_remap(req, nvme_dif_complete);
564                         dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
565                 }
566         }
567
568         nvme_free_iod(dev, req);
569 }
570
571 /*
572  * NOTE: ns is NULL when called on the admin queue.
573  */
574 static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
575                          const struct blk_mq_queue_data *bd)
576 {
577         struct nvme_ns *ns = hctx->queue->queuedata;
578         struct nvme_queue *nvmeq = hctx->driver_data;
579         struct nvme_dev *dev = nvmeq->dev;
580         struct request *req = bd->rq;
581         struct nvme_command cmnd;
582         unsigned map_len;
583         int ret = BLK_MQ_RQ_QUEUE_OK;
584
585         /*
586          * If formated with metadata, require the block layer provide a buffer
587          * unless this namespace is formated such that the metadata can be
588          * stripped/generated by the controller with PRACT=1.
589          */
590         if (ns && ns->ms && !blk_integrity_rq(req)) {
591                 if (!(ns->pi_type && ns->ms == 8) &&
592                                         req->cmd_type != REQ_TYPE_DRV_PRIV) {
593                         blk_mq_end_request(req, -EFAULT);
594                         return BLK_MQ_RQ_QUEUE_OK;
595                 }
596         }
597
598         map_len = nvme_map_len(req);
599         ret = nvme_init_iod(req, map_len, dev);
600         if (ret)
601                 return ret;
602
603         ret = nvme_setup_cmd(ns, req, &cmnd);
604         if (ret)
605                 goto out;
606
607         if (req->nr_phys_segments)
608                 ret = nvme_map_data(dev, req, map_len, &cmnd);
609
610         if (ret)
611                 goto out;
612
613         cmnd.common.command_id = req->tag;
614         blk_mq_start_request(req);
615
616         spin_lock_irq(&nvmeq->q_lock);
617         if (unlikely(nvmeq->cq_vector < 0)) {
618                 if (ns && !test_bit(NVME_NS_DEAD, &ns->flags))
619                         ret = BLK_MQ_RQ_QUEUE_BUSY;
620                 else
621                         ret = BLK_MQ_RQ_QUEUE_ERROR;
622                 spin_unlock_irq(&nvmeq->q_lock);
623                 goto out;
624         }
625         __nvme_submit_cmd(nvmeq, &cmnd);
626         nvme_process_cq(nvmeq);
627         spin_unlock_irq(&nvmeq->q_lock);
628         return BLK_MQ_RQ_QUEUE_OK;
629 out:
630         nvme_free_iod(dev, req);
631         return ret;
632 }
633
634 static void nvme_complete_rq(struct request *req)
635 {
636         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
637         struct nvme_dev *dev = iod->nvmeq->dev;
638         int error = 0;
639
640         nvme_unmap_data(dev, req);
641
642         if (unlikely(req->errors)) {
643                 if (nvme_req_needs_retry(req, req->errors)) {
644                         req->retries++;
645                         nvme_requeue_req(req);
646                         return;
647                 }
648
649                 if (req->cmd_type == REQ_TYPE_DRV_PRIV)
650                         error = req->errors;
651                 else
652                         error = nvme_error_status(req->errors);
653         }
654
655         if (unlikely(iod->aborted)) {
656                 dev_warn(dev->ctrl.device,
657                         "completing aborted command with status: %04x\n",
658                         req->errors);
659         }
660
661         blk_mq_end_request(req, error);
662 }
663
664 /* We read the CQE phase first to check if the rest of the entry is valid */
665 static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head,
666                 u16 phase)
667 {
668         return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase;
669 }
670
671 static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
672 {
673         u16 head, phase;
674
675         head = nvmeq->cq_head;
676         phase = nvmeq->cq_phase;
677
678         while (nvme_cqe_valid(nvmeq, head, phase)) {
679                 struct nvme_completion cqe = nvmeq->cqes[head];
680                 struct request *req;
681
682                 if (++head == nvmeq->q_depth) {
683                         head = 0;
684                         phase = !phase;
685                 }
686
687                 if (tag && *tag == cqe.command_id)
688                         *tag = -1;
689
690                 if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
691                         dev_warn(nvmeq->dev->ctrl.device,
692                                 "invalid id %d completed on queue %d\n",
693                                 cqe.command_id, le16_to_cpu(cqe.sq_id));
694                         continue;
695                 }
696
697                 /*
698                  * AEN requests are special as they don't time out and can
699                  * survive any kind of queue freeze and often don't respond to
700                  * aborts.  We don't even bother to allocate a struct request
701                  * for them but rather special case them here.
702                  */
703                 if (unlikely(nvmeq->qid == 0 &&
704                                 cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
705                         nvme_complete_async_event(&nvmeq->dev->ctrl, &cqe);
706                         continue;
707                 }
708
709                 req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
710                 if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
711                         memcpy(req->special, &cqe, sizeof(cqe));
712                 blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
713
714         }
715
716         /* If the controller ignores the cq head doorbell and continuously
717          * writes to the queue, it is theoretically possible to wrap around
718          * the queue twice and mistakenly return IRQ_NONE.  Linux only
719          * requires that 0.1% of your interrupts are handled, so this isn't
720          * a big problem.
721          */
722         if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
723                 return;
724
725         if (likely(nvmeq->cq_vector >= 0))
726                 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
727         nvmeq->cq_head = head;
728         nvmeq->cq_phase = phase;
729
730         nvmeq->cqe_seen = 1;
731 }
732
733 static void nvme_process_cq(struct nvme_queue *nvmeq)
734 {
735         __nvme_process_cq(nvmeq, NULL);
736 }
737
738 static irqreturn_t nvme_irq(int irq, void *data)
739 {
740         irqreturn_t result;
741         struct nvme_queue *nvmeq = data;
742         spin_lock(&nvmeq->q_lock);
743         nvme_process_cq(nvmeq);
744         result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
745         nvmeq->cqe_seen = 0;
746         spin_unlock(&nvmeq->q_lock);
747         return result;
748 }
749
750 static irqreturn_t nvme_irq_check(int irq, void *data)
751 {
752         struct nvme_queue *nvmeq = data;
753         if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
754                 return IRQ_WAKE_THREAD;
755         return IRQ_NONE;
756 }
757
758 static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
759 {
760         struct nvme_queue *nvmeq = hctx->driver_data;
761
762         if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
763                 spin_lock_irq(&nvmeq->q_lock);
764                 __nvme_process_cq(nvmeq, &tag);
765                 spin_unlock_irq(&nvmeq->q_lock);
766
767                 if (tag == -1)
768                         return 1;
769         }
770
771         return 0;
772 }
773
774 static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
775 {
776         struct nvme_dev *dev = to_nvme_dev(ctrl);
777         struct nvme_queue *nvmeq = dev->queues[0];
778         struct nvme_command c;
779
780         memset(&c, 0, sizeof(c));
781         c.common.opcode = nvme_admin_async_event;
782         c.common.command_id = NVME_AQ_BLKMQ_DEPTH + aer_idx;
783
784         spin_lock_irq(&nvmeq->q_lock);
785         __nvme_submit_cmd(nvmeq, &c);
786         spin_unlock_irq(&nvmeq->q_lock);
787 }
788
789 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
790 {
791         struct nvme_command c;
792
793         memset(&c, 0, sizeof(c));
794         c.delete_queue.opcode = opcode;
795         c.delete_queue.qid = cpu_to_le16(id);
796
797         return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
798 }
799
800 static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
801                                                 struct nvme_queue *nvmeq)
802 {
803         struct nvme_command c;
804         int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
805
806         /*
807          * Note: we (ab)use the fact the the prp fields survive if no data
808          * is attached to the request.
809          */
810         memset(&c, 0, sizeof(c));
811         c.create_cq.opcode = nvme_admin_create_cq;
812         c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
813         c.create_cq.cqid = cpu_to_le16(qid);
814         c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
815         c.create_cq.cq_flags = cpu_to_le16(flags);
816         c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
817
818         return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
819 }
820
821 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
822                                                 struct nvme_queue *nvmeq)
823 {
824         struct nvme_command c;
825         int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
826
827         /*
828          * Note: we (ab)use the fact the the prp fields survive if no data
829          * is attached to the request.
830          */
831         memset(&c, 0, sizeof(c));
832         c.create_sq.opcode = nvme_admin_create_sq;
833         c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
834         c.create_sq.sqid = cpu_to_le16(qid);
835         c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
836         c.create_sq.sq_flags = cpu_to_le16(flags);
837         c.create_sq.cqid = cpu_to_le16(qid);
838
839         return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
840 }
841
842 static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
843 {
844         return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
845 }
846
847 static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
848 {
849         return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
850 }
851
852 static void abort_endio(struct request *req, int error)
853 {
854         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
855         struct nvme_queue *nvmeq = iod->nvmeq;
856         u16 status = req->errors;
857
858         dev_warn(nvmeq->dev->ctrl.device, "Abort status: 0x%x", status);
859         atomic_inc(&nvmeq->dev->ctrl.abort_limit);
860         blk_mq_free_request(req);
861 }
862
863 static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
864 {
865         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
866         struct nvme_queue *nvmeq = iod->nvmeq;
867         struct nvme_dev *dev = nvmeq->dev;
868         struct request *abort_req;
869         struct nvme_command cmd;
870
871         /*
872          * Shutdown immediately if controller times out while starting. The
873          * reset work will see the pci device disabled when it gets the forced
874          * cancellation error. All outstanding requests are completed on
875          * shutdown, so we return BLK_EH_HANDLED.
876          */
877         if (dev->ctrl.state == NVME_CTRL_RESETTING) {
878                 dev_warn(dev->ctrl.device,
879                          "I/O %d QID %d timeout, disable controller\n",
880                          req->tag, nvmeq->qid);
881                 nvme_dev_disable(dev, false);
882                 req->errors = NVME_SC_CANCELLED;
883                 return BLK_EH_HANDLED;
884         }
885
886         /*
887          * Shutdown the controller immediately and schedule a reset if the
888          * command was already aborted once before and still hasn't been
889          * returned to the driver, or if this is the admin queue.
890          */
891         if (!nvmeq->qid || iod->aborted) {
892                 dev_warn(dev->ctrl.device,
893                          "I/O %d QID %d timeout, reset controller\n",
894                          req->tag, nvmeq->qid);
895                 nvme_dev_disable(dev, false);
896                 queue_work(nvme_workq, &dev->reset_work);
897
898                 /*
899                  * Mark the request as handled, since the inline shutdown
900                  * forces all outstanding requests to complete.
901                  */
902                 req->errors = NVME_SC_CANCELLED;
903                 return BLK_EH_HANDLED;
904         }
905
906         iod->aborted = 1;
907
908         if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
909                 atomic_inc(&dev->ctrl.abort_limit);
910                 return BLK_EH_RESET_TIMER;
911         }
912
913         memset(&cmd, 0, sizeof(cmd));
914         cmd.abort.opcode = nvme_admin_abort_cmd;
915         cmd.abort.cid = req->tag;
916         cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
917
918         dev_warn(nvmeq->dev->ctrl.device,
919                 "I/O %d QID %d timeout, aborting\n",
920                  req->tag, nvmeq->qid);
921
922         abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
923                         BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
924         if (IS_ERR(abort_req)) {
925                 atomic_inc(&dev->ctrl.abort_limit);
926                 return BLK_EH_RESET_TIMER;
927         }
928
929         abort_req->timeout = ADMIN_TIMEOUT;
930         abort_req->end_io_data = NULL;
931         blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
932
933         /*
934          * The aborted req will be completed on receiving the abort req.
935          * We enable the timer again. If hit twice, it'll cause a device reset,
936          * as the device then is in a faulty state.
937          */
938         return BLK_EH_RESET_TIMER;
939 }
940
941 static void nvme_free_queue(struct nvme_queue *nvmeq)
942 {
943         dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
944                                 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
945         if (nvmeq->sq_cmds)
946                 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
947                                         nvmeq->sq_cmds, nvmeq->sq_dma_addr);
948         kfree(nvmeq);
949 }
950
951 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
952 {
953         int i;
954
955         for (i = dev->queue_count - 1; i >= lowest; i--) {
956                 struct nvme_queue *nvmeq = dev->queues[i];
957                 dev->queue_count--;
958                 dev->queues[i] = NULL;
959                 nvme_free_queue(nvmeq);
960         }
961 }
962
963 /**
964  * nvme_suspend_queue - put queue into suspended state
965  * @nvmeq - queue to suspend
966  */
967 static int nvme_suspend_queue(struct nvme_queue *nvmeq)
968 {
969         int vector;
970
971         spin_lock_irq(&nvmeq->q_lock);
972         if (nvmeq->cq_vector == -1) {
973                 spin_unlock_irq(&nvmeq->q_lock);
974                 return 1;
975         }
976         vector = nvmeq_irq(nvmeq);
977         nvmeq->dev->online_queues--;
978         nvmeq->cq_vector = -1;
979         spin_unlock_irq(&nvmeq->q_lock);
980
981         if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
982                 blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
983
984         free_irq(vector, nvmeq);
985
986         return 0;
987 }
988
989 static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
990 {
991         struct nvme_queue *nvmeq = dev->queues[0];
992
993         if (!nvmeq)
994                 return;
995         if (nvme_suspend_queue(nvmeq))
996                 return;
997
998         if (shutdown)
999                 nvme_shutdown_ctrl(&dev->ctrl);
1000         else
1001                 nvme_disable_ctrl(&dev->ctrl, lo_hi_readq(
1002                                                 dev->bar + NVME_REG_CAP));
1003
1004         spin_lock_irq(&nvmeq->q_lock);
1005         nvme_process_cq(nvmeq);
1006         spin_unlock_irq(&nvmeq->q_lock);
1007 }
1008
1009 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
1010                                 int entry_size)
1011 {
1012         int q_depth = dev->q_depth;
1013         unsigned q_size_aligned = roundup(q_depth * entry_size,
1014                                           dev->ctrl.page_size);
1015
1016         if (q_size_aligned * nr_io_queues > dev->cmb_size) {
1017                 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
1018                 mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
1019                 q_depth = div_u64(mem_per_q, entry_size);
1020
1021                 /*
1022                  * Ensure the reduced q_depth is above some threshold where it
1023                  * would be better to map queues in system memory with the
1024                  * original depth
1025                  */
1026                 if (q_depth < 64)
1027                         return -ENOMEM;
1028         }
1029
1030         return q_depth;
1031 }
1032
1033 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1034                                 int qid, int depth)
1035 {
1036         if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
1037                 unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
1038                                                       dev->ctrl.page_size);
1039                 nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
1040                 nvmeq->sq_cmds_io = dev->cmb + offset;
1041         } else {
1042                 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
1043                                         &nvmeq->sq_dma_addr, GFP_KERNEL);
1044                 if (!nvmeq->sq_cmds)
1045                         return -ENOMEM;
1046         }
1047
1048         return 0;
1049 }
1050
1051 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1052                                                         int depth)
1053 {
1054         struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
1055         if (!nvmeq)
1056                 return NULL;
1057
1058         nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
1059                                           &nvmeq->cq_dma_addr, GFP_KERNEL);
1060         if (!nvmeq->cqes)
1061                 goto free_nvmeq;
1062
1063         if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
1064                 goto free_cqdma;
1065
1066         nvmeq->q_dmadev = dev->dev;
1067         nvmeq->dev = dev;
1068         snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
1069                         dev->ctrl.instance, qid);
1070         spin_lock_init(&nvmeq->q_lock);
1071         nvmeq->cq_head = 0;
1072         nvmeq->cq_phase = 1;
1073         nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1074         nvmeq->q_depth = depth;
1075         nvmeq->qid = qid;
1076         nvmeq->cq_vector = -1;
1077         dev->queues[qid] = nvmeq;
1078         dev->queue_count++;
1079
1080         return nvmeq;
1081
1082  free_cqdma:
1083         dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
1084                                                         nvmeq->cq_dma_addr);
1085  free_nvmeq:
1086         kfree(nvmeq);
1087         return NULL;
1088 }
1089
1090 static int queue_request_irq(struct nvme_queue *nvmeq)
1091 {
1092         if (use_threaded_interrupts)
1093                 return request_threaded_irq(nvmeq_irq(nvmeq), nvme_irq_check,
1094                                 nvme_irq, IRQF_SHARED, nvmeq->irqname, nvmeq);
1095         else
1096                 return request_irq(nvmeq_irq(nvmeq), nvme_irq, IRQF_SHARED,
1097                                 nvmeq->irqname, nvmeq);
1098 }
1099
1100 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1101 {
1102         struct nvme_dev *dev = nvmeq->dev;
1103
1104         spin_lock_irq(&nvmeq->q_lock);
1105         nvmeq->sq_tail = 0;
1106         nvmeq->cq_head = 0;
1107         nvmeq->cq_phase = 1;
1108         nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1109         memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1110         dev->online_queues++;
1111         spin_unlock_irq(&nvmeq->q_lock);
1112 }
1113
1114 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1115 {
1116         struct nvme_dev *dev = nvmeq->dev;
1117         int result;
1118
1119         nvmeq->cq_vector = qid - 1;
1120         result = adapter_alloc_cq(dev, qid, nvmeq);
1121         if (result < 0)
1122                 return result;
1123
1124         result = adapter_alloc_sq(dev, qid, nvmeq);
1125         if (result < 0)
1126                 goto release_cq;
1127
1128         result = queue_request_irq(nvmeq);
1129         if (result < 0)
1130                 goto release_sq;
1131
1132         nvme_init_queue(nvmeq, qid);
1133         return result;
1134
1135  release_sq:
1136         adapter_delete_sq(dev, qid);
1137  release_cq:
1138         adapter_delete_cq(dev, qid);
1139         return result;
1140 }
1141
1142 static struct blk_mq_ops nvme_mq_admin_ops = {
1143         .queue_rq       = nvme_queue_rq,
1144         .complete       = nvme_complete_rq,
1145         .init_hctx      = nvme_admin_init_hctx,
1146         .exit_hctx      = nvme_admin_exit_hctx,
1147         .init_request   = nvme_admin_init_request,
1148         .timeout        = nvme_timeout,
1149 };
1150
1151 static struct blk_mq_ops nvme_mq_ops = {
1152         .queue_rq       = nvme_queue_rq,
1153         .complete       = nvme_complete_rq,
1154         .init_hctx      = nvme_init_hctx,
1155         .init_request   = nvme_init_request,
1156         .map_queues     = nvme_pci_map_queues,
1157         .timeout        = nvme_timeout,
1158         .poll           = nvme_poll,
1159 };
1160
1161 static void nvme_dev_remove_admin(struct nvme_dev *dev)
1162 {
1163         if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1164                 /*
1165                  * If the controller was reset during removal, it's possible
1166                  * user requests may be waiting on a stopped queue. Start the
1167                  * queue to flush these to completion.
1168                  */
1169                 blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
1170                 blk_cleanup_queue(dev->ctrl.admin_q);
1171                 blk_mq_free_tag_set(&dev->admin_tagset);
1172         }
1173 }
1174
1175 static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1176 {
1177         if (!dev->ctrl.admin_q) {
1178                 dev->admin_tagset.ops = &nvme_mq_admin_ops;
1179                 dev->admin_tagset.nr_hw_queues = 1;
1180
1181                 /*
1182                  * Subtract one to leave an empty queue entry for 'Full Queue'
1183                  * condition. See NVM-Express 1.2 specification, section 4.1.2.
1184                  */
1185                 dev->admin_tagset.queue_depth = NVME_AQ_BLKMQ_DEPTH - 1;
1186                 dev->admin_tagset.timeout = ADMIN_TIMEOUT;
1187                 dev->admin_tagset.numa_node = dev_to_node(dev->dev);
1188                 dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
1189                 dev->admin_tagset.driver_data = dev;
1190
1191                 if (blk_mq_alloc_tag_set(&dev->admin_tagset))
1192                         return -ENOMEM;
1193
1194                 dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
1195                 if (IS_ERR(dev->ctrl.admin_q)) {
1196                         blk_mq_free_tag_set(&dev->admin_tagset);
1197                         return -ENOMEM;
1198                 }
1199                 if (!blk_get_queue(dev->ctrl.admin_q)) {
1200                         nvme_dev_remove_admin(dev);
1201                         dev->ctrl.admin_q = NULL;
1202                         return -ENODEV;
1203                 }
1204         } else
1205                 blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
1206
1207         return 0;
1208 }
1209
1210 static int nvme_configure_admin_queue(struct nvme_dev *dev)
1211 {
1212         int result;
1213         u32 aqa;
1214         u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
1215         struct nvme_queue *nvmeq;
1216
1217         dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ?
1218                                                 NVME_CAP_NSSRC(cap) : 0;
1219
1220         if (dev->subsystem &&
1221             (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
1222                 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
1223
1224         result = nvme_disable_ctrl(&dev->ctrl, cap);
1225         if (result < 0)
1226                 return result;
1227
1228         nvmeq = dev->queues[0];
1229         if (!nvmeq) {
1230                 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
1231                 if (!nvmeq)
1232                         return -ENOMEM;
1233         }
1234
1235         aqa = nvmeq->q_depth - 1;
1236         aqa |= aqa << 16;
1237
1238         writel(aqa, dev->bar + NVME_REG_AQA);
1239         lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
1240         lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
1241
1242         result = nvme_enable_ctrl(&dev->ctrl, cap);
1243         if (result)
1244                 goto free_nvmeq;
1245
1246         nvmeq->cq_vector = 0;
1247         result = queue_request_irq(nvmeq);
1248         if (result) {
1249                 nvmeq->cq_vector = -1;
1250                 goto free_nvmeq;
1251         }
1252
1253         return result;
1254
1255  free_nvmeq:
1256         nvme_free_queues(dev, 0);
1257         return result;
1258 }
1259
1260 static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1261 {
1262
1263         /* If true, indicates loss of adapter communication, possibly by a
1264          * NVMe Subsystem reset.
1265          */
1266         bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
1267
1268         /* If there is a reset ongoing, we shouldn't reset again. */
1269         if (work_busy(&dev->reset_work))
1270                 return false;
1271
1272         /* We shouldn't reset unless the controller is on fatal error state
1273          * _or_ if we lost the communication with it.
1274          */
1275         if (!(csts & NVME_CSTS_CFS) && !nssro)
1276                 return false;
1277
1278         /* If PCI error recovery process is happening, we cannot reset or
1279          * the recovery mechanism will surely fail.
1280          */
1281         if (pci_channel_offline(to_pci_dev(dev->dev)))
1282                 return false;
1283
1284         return true;
1285 }
1286
1287 static void nvme_watchdog_timer(unsigned long data)
1288 {
1289         struct nvme_dev *dev = (struct nvme_dev *)data;
1290         u32 csts = readl(dev->bar + NVME_REG_CSTS);
1291
1292         /* Skip controllers under certain specific conditions. */
1293         if (nvme_should_reset(dev, csts)) {
1294                 if (queue_work(nvme_workq, &dev->reset_work))
1295                         dev_warn(dev->dev,
1296                                 "Failed status: 0x%x, reset controller.\n",
1297                                 csts);
1298                 return;
1299         }
1300
1301         mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
1302 }
1303
1304 static int nvme_create_io_queues(struct nvme_dev *dev)
1305 {
1306         unsigned i, max;
1307         int ret = 0;
1308
1309         for (i = dev->queue_count; i <= dev->max_qid; i++) {
1310                 if (!nvme_alloc_queue(dev, i, dev->q_depth)) {
1311                         ret = -ENOMEM;
1312                         break;
1313                 }
1314         }
1315
1316         max = min(dev->max_qid, dev->queue_count - 1);
1317         for (i = dev->online_queues; i <= max; i++) {
1318                 ret = nvme_create_queue(dev->queues[i], i);
1319                 if (ret) {
1320                         nvme_free_queues(dev, i);
1321                         break;
1322                 }
1323         }
1324
1325         /*
1326          * Ignore failing Create SQ/CQ commands, we can continue with less
1327          * than the desired aount of queues, and even a controller without
1328          * I/O queues an still be used to issue admin commands.  This might
1329          * be useful to upgrade a buggy firmware for example.
1330          */
1331         return ret >= 0 ? 0 : ret;
1332 }
1333
1334 static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1335 {
1336         u64 szu, size, offset;
1337         u32 cmbloc;
1338         resource_size_t bar_size;
1339         struct pci_dev *pdev = to_pci_dev(dev->dev);
1340         void __iomem *cmb;
1341         dma_addr_t dma_addr;
1342
1343         if (!use_cmb_sqes)
1344                 return NULL;
1345
1346         dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1347         if (!(NVME_CMB_SZ(dev->cmbsz)))
1348                 return NULL;
1349
1350         cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
1351
1352         szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
1353         size = szu * NVME_CMB_SZ(dev->cmbsz);
1354         offset = szu * NVME_CMB_OFST(cmbloc);
1355         bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc));
1356
1357         if (offset > bar_size)
1358                 return NULL;
1359
1360         /*
1361          * Controllers may support a CMB size larger than their BAR,
1362          * for example, due to being behind a bridge. Reduce the CMB to
1363          * the reported size of the BAR
1364          */
1365         if (size > bar_size - offset)
1366                 size = bar_size - offset;
1367
1368         dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset;
1369         cmb = ioremap_wc(dma_addr, size);
1370         if (!cmb)
1371                 return NULL;
1372
1373         dev->cmb_dma_addr = dma_addr;
1374         dev->cmb_size = size;
1375         return cmb;
1376 }
1377
1378 static inline void nvme_release_cmb(struct nvme_dev *dev)
1379 {
1380         if (dev->cmb) {
1381                 iounmap(dev->cmb);
1382                 dev->cmb = NULL;
1383         }
1384 }
1385
1386 static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
1387 {
1388         return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
1389 }
1390
1391 static int nvme_setup_io_queues(struct nvme_dev *dev)
1392 {
1393         struct nvme_queue *adminq = dev->queues[0];
1394         struct pci_dev *pdev = to_pci_dev(dev->dev);
1395         int result, nr_io_queues, size;
1396
1397         nr_io_queues = num_online_cpus();
1398         result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
1399         if (result < 0)
1400                 return result;
1401
1402         if (nr_io_queues == 0)
1403                 return 0;
1404
1405         if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
1406                 result = nvme_cmb_qdepth(dev, nr_io_queues,
1407                                 sizeof(struct nvme_command));
1408                 if (result > 0)
1409                         dev->q_depth = result;
1410                 else
1411                         nvme_release_cmb(dev);
1412         }
1413
1414         size = db_bar_size(dev, nr_io_queues);
1415         if (size > 8192) {
1416                 iounmap(dev->bar);
1417                 do {
1418                         dev->bar = ioremap(pci_resource_start(pdev, 0), size);
1419                         if (dev->bar)
1420                                 break;
1421                         if (!--nr_io_queues)
1422                                 return -ENOMEM;
1423                         size = db_bar_size(dev, nr_io_queues);
1424                 } while (1);
1425                 dev->dbs = dev->bar + 4096;
1426                 adminq->q_db = dev->dbs;
1427         }
1428
1429         /* Deregister the admin queue's interrupt */
1430         free_irq(pci_irq_vector(pdev, 0), adminq);
1431
1432         /*
1433          * If we enable msix early due to not intx, disable it again before
1434          * setting up the full range we need.
1435          */
1436         pci_free_irq_vectors(pdev);
1437         nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues,
1438                         PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY);
1439         if (nr_io_queues <= 0)
1440                 return -EIO;
1441         dev->max_qid = nr_io_queues;
1442
1443         /*
1444          * Should investigate if there's a performance win from allocating
1445          * more queues than interrupt vectors; it might allow the submission
1446          * path to scale better, even if the receive path is limited by the
1447          * number of interrupts.
1448          */
1449
1450         result = queue_request_irq(adminq);
1451         if (result) {
1452                 adminq->cq_vector = -1;
1453                 goto free_queues;
1454         }
1455         return nvme_create_io_queues(dev);
1456
1457  free_queues:
1458         nvme_free_queues(dev, 1);
1459         return result;
1460 }
1461
1462 static void nvme_del_queue_end(struct request *req, int error)
1463 {
1464         struct nvme_queue *nvmeq = req->end_io_data;
1465
1466         blk_mq_free_request(req);
1467         complete(&nvmeq->dev->ioq_wait);
1468 }
1469
1470 static void nvme_del_cq_end(struct request *req, int error)
1471 {
1472         struct nvme_queue *nvmeq = req->end_io_data;
1473
1474         if (!error) {
1475                 unsigned long flags;
1476
1477                 /*
1478                  * We might be called with the AQ q_lock held
1479                  * and the I/O queue q_lock should always
1480                  * nest inside the AQ one.
1481                  */
1482                 spin_lock_irqsave_nested(&nvmeq->q_lock, flags,
1483                                         SINGLE_DEPTH_NESTING);
1484                 nvme_process_cq(nvmeq);
1485                 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
1486         }
1487
1488         nvme_del_queue_end(req, error);
1489 }
1490
1491 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
1492 {
1493         struct request_queue *q = nvmeq->dev->ctrl.admin_q;
1494         struct request *req;
1495         struct nvme_command cmd;
1496
1497         memset(&cmd, 0, sizeof(cmd));
1498         cmd.delete_queue.opcode = opcode;
1499         cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
1500
1501         req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
1502         if (IS_ERR(req))
1503                 return PTR_ERR(req);
1504
1505         req->timeout = ADMIN_TIMEOUT;
1506         req->end_io_data = nvmeq;
1507
1508         blk_execute_rq_nowait(q, NULL, req, false,
1509                         opcode == nvme_admin_delete_cq ?
1510                                 nvme_del_cq_end : nvme_del_queue_end);
1511         return 0;
1512 }
1513
1514 static void nvme_disable_io_queues(struct nvme_dev *dev)
1515 {
1516         int pass, queues = dev->online_queues - 1;
1517         unsigned long timeout;
1518         u8 opcode = nvme_admin_delete_sq;
1519
1520         for (pass = 0; pass < 2; pass++) {
1521                 int sent = 0, i = queues;
1522
1523                 reinit_completion(&dev->ioq_wait);
1524  retry:
1525                 timeout = ADMIN_TIMEOUT;
1526                 for (; i > 0; i--, sent++)
1527                         if (nvme_delete_queue(dev->queues[i], opcode))
1528                                 break;
1529
1530                 while (sent--) {
1531                         timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
1532                         if (timeout == 0)
1533                                 return;
1534                         if (i)
1535                                 goto retry;
1536                 }
1537                 opcode = nvme_admin_delete_cq;
1538         }
1539 }
1540
1541 /*
1542  * Return: error value if an error occurred setting up the queues or calling
1543  * Identify Device.  0 if these succeeded, even if adding some of the
1544  * namespaces failed.  At the moment, these failures are silent.  TBD which
1545  * failures should be reported.
1546  */
1547 static int nvme_dev_add(struct nvme_dev *dev)
1548 {
1549         if (!dev->ctrl.tagset) {
1550                 dev->tagset.ops = &nvme_mq_ops;
1551                 dev->tagset.nr_hw_queues = dev->online_queues - 1;
1552                 dev->tagset.timeout = NVME_IO_TIMEOUT;
1553                 dev->tagset.numa_node = dev_to_node(dev->dev);
1554                 dev->tagset.queue_depth =
1555                                 min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
1556                 dev->tagset.cmd_size = nvme_cmd_size(dev);
1557                 dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
1558                 dev->tagset.driver_data = dev;
1559
1560                 if (blk_mq_alloc_tag_set(&dev->tagset))
1561                         return 0;
1562                 dev->ctrl.tagset = &dev->tagset;
1563         } else {
1564                 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
1565
1566                 /* Free previously allocated queues that are no longer usable */
1567                 nvme_free_queues(dev, dev->online_queues);
1568         }
1569
1570         return 0;
1571 }
1572
1573 static int nvme_pci_enable(struct nvme_dev *dev)
1574 {
1575         u64 cap;
1576         int result = -ENOMEM;
1577         struct pci_dev *pdev = to_pci_dev(dev->dev);
1578
1579         if (pci_enable_device_mem(pdev))
1580                 return result;
1581
1582         pci_set_master(pdev);
1583
1584         if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
1585             dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
1586                 goto disable;
1587
1588         if (readl(dev->bar + NVME_REG_CSTS) == -1) {
1589                 result = -ENODEV;
1590                 goto disable;
1591         }
1592
1593         /*
1594          * Some devices and/or platforms don't advertise or work with INTx
1595          * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
1596          * adjust this later.
1597          */
1598         result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
1599         if (result < 0)
1600                 return result;
1601
1602         cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
1603
1604         dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
1605         dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
1606         dev->dbs = dev->bar + 4096;
1607
1608         /*
1609          * Temporary fix for the Apple controller found in the MacBook8,1 and
1610          * some MacBook7,1 to avoid controller resets and data loss.
1611          */
1612         if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
1613                 dev->q_depth = 2;
1614                 dev_warn(dev->dev, "detected Apple NVMe controller, set "
1615                         "queue depth=%u to work around controller resets\n",
1616                         dev->q_depth);
1617         }
1618
1619         if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2))
1620                 dev->cmb = nvme_map_cmb(dev);
1621
1622         pci_enable_pcie_error_reporting(pdev);
1623         pci_save_state(pdev);
1624         return 0;
1625
1626  disable:
1627         pci_disable_device(pdev);
1628         return result;
1629 }
1630
1631 static void nvme_dev_unmap(struct nvme_dev *dev)
1632 {
1633         if (dev->bar)
1634                 iounmap(dev->bar);
1635         pci_release_mem_regions(to_pci_dev(dev->dev));
1636 }
1637
1638 static void nvme_pci_disable(struct nvme_dev *dev)
1639 {
1640         struct pci_dev *pdev = to_pci_dev(dev->dev);
1641
1642         pci_free_irq_vectors(pdev);
1643
1644         if (pci_is_enabled(pdev)) {
1645                 pci_disable_pcie_error_reporting(pdev);
1646                 pci_disable_device(pdev);
1647         }
1648 }
1649
1650 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1651 {
1652         int i;
1653         u32 csts = -1;
1654
1655         del_timer_sync(&dev->watchdog_timer);
1656
1657         mutex_lock(&dev->shutdown_lock);
1658         if (pci_is_enabled(to_pci_dev(dev->dev))) {
1659                 nvme_stop_queues(&dev->ctrl);
1660                 csts = readl(dev->bar + NVME_REG_CSTS);
1661         }
1662
1663         for (i = dev->queue_count - 1; i > 0; i--)
1664                 nvme_suspend_queue(dev->queues[i]);
1665
1666         if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
1667                 /* A device might become IO incapable very soon during
1668                  * probe, before the admin queue is configured. Thus,
1669                  * queue_count can be 0 here.
1670                  */
1671                 if (dev->queue_count)
1672                         nvme_suspend_queue(dev->queues[0]);
1673         } else {
1674                 nvme_disable_io_queues(dev);
1675                 nvme_disable_admin_queue(dev, shutdown);
1676         }
1677         nvme_pci_disable(dev);
1678
1679         blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
1680         blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
1681         mutex_unlock(&dev->shutdown_lock);
1682 }
1683
1684 static int nvme_setup_prp_pools(struct nvme_dev *dev)
1685 {
1686         dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
1687                                                 PAGE_SIZE, PAGE_SIZE, 0);
1688         if (!dev->prp_page_pool)
1689                 return -ENOMEM;
1690
1691         /* Optimisation for I/Os between 4k and 128k */
1692         dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
1693                                                 256, 256, 0);
1694         if (!dev->prp_small_pool) {
1695                 dma_pool_destroy(dev->prp_page_pool);
1696                 return -ENOMEM;
1697         }
1698         return 0;
1699 }
1700
1701 static void nvme_release_prp_pools(struct nvme_dev *dev)
1702 {
1703         dma_pool_destroy(dev->prp_page_pool);
1704         dma_pool_destroy(dev->prp_small_pool);
1705 }
1706
1707 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
1708 {
1709         struct nvme_dev *dev = to_nvme_dev(ctrl);
1710
1711         put_device(dev->dev);
1712         if (dev->tagset.tags)
1713                 blk_mq_free_tag_set(&dev->tagset);
1714         if (dev->ctrl.admin_q)
1715                 blk_put_queue(dev->ctrl.admin_q);
1716         kfree(dev->queues);
1717         kfree(dev);
1718 }
1719
1720 static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
1721 {
1722         dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
1723
1724         kref_get(&dev->ctrl.kref);
1725         nvme_dev_disable(dev, false);
1726         if (!schedule_work(&dev->remove_work))
1727                 nvme_put_ctrl(&dev->ctrl);
1728 }
1729
1730 static void nvme_reset_work(struct work_struct *work)
1731 {
1732         struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
1733         int result = -ENODEV;
1734
1735         if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
1736                 goto out;
1737
1738         /*
1739          * If we're called to reset a live controller first shut it down before
1740          * moving on.
1741          */
1742         if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
1743                 nvme_dev_disable(dev, false);
1744
1745         if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
1746                 goto out;
1747
1748         result = nvme_pci_enable(dev);
1749         if (result)
1750                 goto out;
1751
1752         result = nvme_configure_admin_queue(dev);
1753         if (result)
1754                 goto out;
1755
1756         nvme_init_queue(dev->queues[0], 0);
1757         result = nvme_alloc_admin_tags(dev);
1758         if (result)
1759                 goto out;
1760
1761         result = nvme_init_identify(&dev->ctrl);
1762         if (result)
1763                 goto out;
1764
1765         result = nvme_setup_io_queues(dev);
1766         if (result)
1767                 goto out;
1768
1769         /*
1770          * A controller that can not execute IO typically requires user
1771          * intervention to correct. For such degraded controllers, the driver
1772          * should not submit commands the user did not request, so skip
1773          * registering for asynchronous event notification on this condition.
1774          */
1775         if (dev->online_queues > 1)
1776                 nvme_queue_async_events(&dev->ctrl);
1777
1778         mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
1779
1780         /*
1781          * Keep the controller around but remove all namespaces if we don't have
1782          * any working I/O queue.
1783          */
1784         if (dev->online_queues < 2) {
1785                 dev_warn(dev->ctrl.device, "IO queues not created\n");
1786                 nvme_kill_queues(&dev->ctrl);
1787                 nvme_remove_namespaces(&dev->ctrl);
1788         } else {
1789                 nvme_start_queues(&dev->ctrl);
1790                 nvme_dev_add(dev);
1791         }
1792
1793         if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
1794                 dev_warn(dev->ctrl.device, "failed to mark controller live\n");
1795                 goto out;
1796         }
1797
1798         if (dev->online_queues > 1)
1799                 nvme_queue_scan(&dev->ctrl);
1800         return;
1801
1802  out:
1803         nvme_remove_dead_ctrl(dev, result);
1804 }
1805
1806 static void nvme_remove_dead_ctrl_work(struct work_struct *work)
1807 {
1808         struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
1809         struct pci_dev *pdev = to_pci_dev(dev->dev);
1810
1811         nvme_kill_queues(&dev->ctrl);
1812         if (pci_get_drvdata(pdev))
1813                 device_release_driver(&pdev->dev);
1814         nvme_put_ctrl(&dev->ctrl);
1815 }
1816
1817 static int nvme_reset(struct nvme_dev *dev)
1818 {
1819         if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
1820                 return -ENODEV;
1821
1822         if (!queue_work(nvme_workq, &dev->reset_work))
1823                 return -EBUSY;
1824
1825         flush_work(&dev->reset_work);
1826         return 0;
1827 }
1828
1829 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
1830 {
1831         *val = readl(to_nvme_dev(ctrl)->bar + off);
1832         return 0;
1833 }
1834
1835 static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
1836 {
1837         writel(val, to_nvme_dev(ctrl)->bar + off);
1838         return 0;
1839 }
1840
1841 static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
1842 {
1843         *val = readq(to_nvme_dev(ctrl)->bar + off);
1844         return 0;
1845 }
1846
1847 static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
1848 {
1849         return nvme_reset(to_nvme_dev(ctrl));
1850 }
1851
1852 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
1853         .name                   = "pcie",
1854         .module                 = THIS_MODULE,
1855         .reg_read32             = nvme_pci_reg_read32,
1856         .reg_write32            = nvme_pci_reg_write32,
1857         .reg_read64             = nvme_pci_reg_read64,
1858         .reset_ctrl             = nvme_pci_reset_ctrl,
1859         .free_ctrl              = nvme_pci_free_ctrl,
1860         .submit_async_event     = nvme_pci_submit_async_event,
1861 };
1862
1863 static int nvme_dev_map(struct nvme_dev *dev)
1864 {
1865         struct pci_dev *pdev = to_pci_dev(dev->dev);
1866
1867         if (pci_request_mem_regions(pdev, "nvme"))
1868                 return -ENODEV;
1869
1870         dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
1871         if (!dev->bar)
1872                 goto release;
1873
1874        return 0;
1875   release:
1876        pci_release_mem_regions(pdev);
1877        return -ENODEV;
1878 }
1879
1880 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1881 {
1882         int node, result = -ENOMEM;
1883         struct nvme_dev *dev;
1884
1885         node = dev_to_node(&pdev->dev);
1886         if (node == NUMA_NO_NODE)
1887                 set_dev_node(&pdev->dev, first_memory_node);
1888
1889         dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
1890         if (!dev)
1891                 return -ENOMEM;
1892         dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
1893                                                         GFP_KERNEL, node);
1894         if (!dev->queues)
1895                 goto free;
1896
1897         dev->dev = get_device(&pdev->dev);
1898         pci_set_drvdata(pdev, dev);
1899
1900         result = nvme_dev_map(dev);
1901         if (result)
1902                 goto free;
1903
1904         INIT_WORK(&dev->reset_work, nvme_reset_work);
1905         INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
1906         setup_timer(&dev->watchdog_timer, nvme_watchdog_timer,
1907                 (unsigned long)dev);
1908         mutex_init(&dev->shutdown_lock);
1909         init_completion(&dev->ioq_wait);
1910
1911         result = nvme_setup_prp_pools(dev);
1912         if (result)
1913                 goto put_pci;
1914
1915         result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
1916                         id->driver_data);
1917         if (result)
1918                 goto release_pools;
1919
1920         dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
1921
1922         queue_work(nvme_workq, &dev->reset_work);
1923         return 0;
1924
1925  release_pools:
1926         nvme_release_prp_pools(dev);
1927  put_pci:
1928         put_device(dev->dev);
1929         nvme_dev_unmap(dev);
1930  free:
1931         kfree(dev->queues);
1932         kfree(dev);
1933         return result;
1934 }
1935
1936 static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
1937 {
1938         struct nvme_dev *dev = pci_get_drvdata(pdev);
1939
1940         if (prepare)
1941                 nvme_dev_disable(dev, false);
1942         else
1943                 queue_work(nvme_workq, &dev->reset_work);
1944 }
1945
1946 static void nvme_shutdown(struct pci_dev *pdev)
1947 {
1948         struct nvme_dev *dev = pci_get_drvdata(pdev);
1949         nvme_dev_disable(dev, true);
1950 }
1951
1952 /*
1953  * The driver's remove may be called on a device in a partially initialized
1954  * state. This function must not have any dependencies on the device state in
1955  * order to proceed.
1956  */
1957 static void nvme_remove(struct pci_dev *pdev)
1958 {
1959         struct nvme_dev *dev = pci_get_drvdata(pdev);
1960
1961         nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
1962
1963         pci_set_drvdata(pdev, NULL);
1964
1965         if (!pci_device_is_present(pdev))
1966                 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
1967
1968         flush_work(&dev->reset_work);
1969         nvme_uninit_ctrl(&dev->ctrl);
1970         nvme_dev_disable(dev, true);
1971         nvme_dev_remove_admin(dev);
1972         nvme_free_queues(dev, 0);
1973         nvme_release_cmb(dev);
1974         nvme_release_prp_pools(dev);
1975         nvme_dev_unmap(dev);
1976         nvme_put_ctrl(&dev->ctrl);
1977 }
1978
1979 static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs)
1980 {
1981         int ret = 0;
1982
1983         if (numvfs == 0) {
1984                 if (pci_vfs_assigned(pdev)) {
1985                         dev_warn(&pdev->dev,
1986                                 "Cannot disable SR-IOV VFs while assigned\n");
1987                         return -EPERM;
1988                 }
1989                 pci_disable_sriov(pdev);
1990                 return 0;
1991         }
1992
1993         ret = pci_enable_sriov(pdev, numvfs);
1994         return ret ? ret : numvfs;
1995 }
1996
1997 #ifdef CONFIG_PM_SLEEP
1998 static int nvme_suspend(struct device *dev)
1999 {
2000         struct pci_dev *pdev = to_pci_dev(dev);
2001         struct nvme_dev *ndev = pci_get_drvdata(pdev);
2002
2003         nvme_dev_disable(ndev, true);
2004         return 0;
2005 }
2006
2007 static int nvme_resume(struct device *dev)
2008 {
2009         struct pci_dev *pdev = to_pci_dev(dev);
2010         struct nvme_dev *ndev = pci_get_drvdata(pdev);
2011
2012         queue_work(nvme_workq, &ndev->reset_work);
2013         return 0;
2014 }
2015 #endif
2016
2017 static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
2018
2019 static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
2020                                                 pci_channel_state_t state)
2021 {
2022         struct nvme_dev *dev = pci_get_drvdata(pdev);
2023
2024         /*
2025          * A frozen channel requires a reset. When detected, this method will
2026          * shutdown the controller to quiesce. The controller will be restarted
2027          * after the slot reset through driver's slot_reset callback.
2028          */
2029         switch (state) {
2030         case pci_channel_io_normal:
2031                 return PCI_ERS_RESULT_CAN_RECOVER;
2032         case pci_channel_io_frozen:
2033                 dev_warn(dev->ctrl.device,
2034                         "frozen state error detected, reset controller\n");
2035                 nvme_dev_disable(dev, false);
2036                 return PCI_ERS_RESULT_NEED_RESET;
2037         case pci_channel_io_perm_failure:
2038                 dev_warn(dev->ctrl.device,
2039                         "failure state error detected, request disconnect\n");
2040                 return PCI_ERS_RESULT_DISCONNECT;
2041         }
2042         return PCI_ERS_RESULT_NEED_RESET;
2043 }
2044
2045 static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
2046 {
2047         struct nvme_dev *dev = pci_get_drvdata(pdev);
2048
2049         dev_info(dev->ctrl.device, "restart after slot reset\n");
2050         pci_restore_state(pdev);
2051         queue_work(nvme_workq, &dev->reset_work);
2052         return PCI_ERS_RESULT_RECOVERED;
2053 }
2054
2055 static void nvme_error_resume(struct pci_dev *pdev)
2056 {
2057         pci_cleanup_aer_uncorrect_error_status(pdev);
2058 }
2059
2060 static const struct pci_error_handlers nvme_err_handler = {
2061         .error_detected = nvme_error_detected,
2062         .slot_reset     = nvme_slot_reset,
2063         .resume         = nvme_error_resume,
2064         .reset_notify   = nvme_reset_notify,
2065 };
2066
2067 /* Move to pci_ids.h later */
2068 #define PCI_CLASS_STORAGE_EXPRESS       0x010802
2069
2070 static const struct pci_device_id nvme_id_table[] = {
2071         { PCI_VDEVICE(INTEL, 0x0953),
2072                 .driver_data = NVME_QUIRK_STRIPE_SIZE |
2073                                 NVME_QUIRK_DISCARD_ZEROES, },
2074         { PCI_VDEVICE(INTEL, 0x0a53),
2075                 .driver_data = NVME_QUIRK_STRIPE_SIZE |
2076                                 NVME_QUIRK_DISCARD_ZEROES, },
2077         { PCI_VDEVICE(INTEL, 0x0a54),
2078                 .driver_data = NVME_QUIRK_STRIPE_SIZE |
2079                                 NVME_QUIRK_DISCARD_ZEROES, },
2080         { PCI_VDEVICE(INTEL, 0x5845),   /* Qemu emulated controller */
2081                 .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
2082         { PCI_DEVICE(0x1c58, 0x0003),   /* HGST adapter */
2083                 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2084         { PCI_DEVICE(0x1c5f, 0x0540),   /* Memblaze Pblaze4 adapter */
2085                 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2086         { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2087         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
2088         { 0, }
2089 };
2090 MODULE_DEVICE_TABLE(pci, nvme_id_table);
2091
2092 static struct pci_driver nvme_driver = {
2093         .name           = "nvme",
2094         .id_table       = nvme_id_table,
2095         .probe          = nvme_probe,
2096         .remove         = nvme_remove,
2097         .shutdown       = nvme_shutdown,
2098         .driver         = {
2099                 .pm     = &nvme_dev_pm_ops,
2100         },
2101         .sriov_configure = nvme_pci_sriov_configure,
2102         .err_handler    = &nvme_err_handler,
2103 };
2104
2105 static int __init nvme_init(void)
2106 {
2107         int result;
2108
2109         nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2110         if (!nvme_workq)
2111                 return -ENOMEM;
2112
2113         result = pci_register_driver(&nvme_driver);
2114         if (result)
2115                 destroy_workqueue(nvme_workq);
2116         return result;
2117 }
2118
2119 static void __exit nvme_exit(void)
2120 {
2121         pci_unregister_driver(&nvme_driver);
2122         destroy_workqueue(nvme_workq);
2123         _nvme_check_size();
2124 }
2125
2126 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
2127 MODULE_LICENSE("GPL");
2128 MODULE_VERSION("1.0");
2129 module_init(nvme_init);
2130 module_exit(nvme_exit);