Merge branch 'for-4.6/drivers' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 19 Mar 2016 00:13:31 +0000 (17:13 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 19 Mar 2016 00:13:31 +0000 (17:13 -0700)
Pull block driver updates from Jens Axboe:
 "This is the block driver pull request for this merge window.  It sits
  on top of for-4.6/core, that was just sent out.

  This contains:

   - A set of fixes for lightnvm.  One from Alan, fixing an overflow,
     and the rest from the usual suspects, Javier and Matias.

   - A set of fixes for nbd from Markus and Dan, and a fixup from Arnd
     for correct usage of the signed 64-bit divider.

   - A set of bug fixes for the Micron mtip32xx, from Asai.

   - A fix for the brd discard handling from Bart.

   - Update the maintainers entry for cciss, since that hardware has
     transferred ownership.

   - Three bug fixes for bcache from Eric Wheeler.

   - Set of fixes for xen-blk{back,front} from Jan and Konrad.

   - Removal of the cpqarray driver.  It has been disabled in Kconfig
     since 2013, and we were initially scheduled to remove it in 3.15.

   - Various updates and fixes for NVMe, with the most important being:

        - Removal of the per-device NVMe thread, replacing that with a
          watchdog timer instead. From Christoph.

        - Exposing the namespace WWID through sysfs, from Keith.

        - Set of cleanups from Ming Lin.

        - Logging the controller device name instead of the underlying
          PCI device name, from Sagi.

        - And a bunch of fixes and optimizations from the usual suspects
          in this area"

* 'for-4.6/drivers' of git://git.kernel.dk/linux-block: (49 commits)
  NVMe: Expose ns wwid through single sysfs entry
  drivers:block: cpqarray clean up
  brd: Fix discard request processing
  cpqarray: remove it from the kernel
  cciss: update MAINTAINERS
  NVMe: Remove unused sq_head read in completion path
  bcache: fix cache_set_flush() NULL pointer dereference on OOM
  bcache: cleaned up error handling around register_cache()
  bcache: fix race of writeback thread starting before complete initialization
  NVMe: Create discard zero quirk white list
  nbd: use correct div_s64 helper
  mtip32xx: remove unneeded variable in mtip_cmd_timeout()
  lightnvm: generalize rrpc ppa calculations
  lightnvm: remove struct nvm_dev->total_blocks
  lightnvm: rename ->nr_pages to ->nr_sects
  lightnvm: update closed list outside of intr context
  xen/blback: Fit the important information of the thread in 17 characters
  lightnvm: fold get bb tbl when using dual/quad plane mode
  lightnvm: fix up nonsensical configure overrun checking
  xen-blkback: advertise indirect segment support earlier
  ...

1  2 
MAINTAINERS
drivers/block/xen-blkfront.c
drivers/lightnvm/core.c
drivers/lightnvm/rrpc.c
drivers/lightnvm/rrpc.h
drivers/nvme/host/Kconfig
drivers/nvme/host/core.c
drivers/nvme/host/lightnvm.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
include/linux/lightnvm.h

diff --cc MAINTAINERS
@@@ -5016,16 -4988,10 +5016,10 @@@ T:   git git://linuxtv.org/anttip/media_t
  S:    Maintained
  F:    drivers/media/dvb-frontends/hd29l2*
  
- HEWLETT-PACKARD SMART2 RAID DRIVER
- L:    iss_storagedev@hp.com
- S:    Orphan
- F:    Documentation/blockdev/cpqarray.txt
- F:    drivers/block/cpqarray.*
  HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
 -M:    Don Brace <don.brace@pmcs.com>
 +M:    Don Brace <don.brace@microsemi.com>
  L:    iss_storagedev@hp.com
 -L:    storagedev@pmcs.com
 +L:    esc.storagedev@microsemi.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    Documentation/scsi/hpsa.txt
Simple merge
Simple merge
@@@ -300,16 -310,14 +310,16 @@@ static int rrpc_move_valid_pages(struc
        }
  
        page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
 -      if (!page)
 +      if (!page) {
 +              bio_put(bio);
                return -ENOMEM;
 +      }
  
        while ((slot = find_first_zero_bit(rblk->invalid_pages,
-                                           nr_pgs_per_blk)) < nr_pgs_per_blk) {
+                                           nr_sec_per_blk)) < nr_sec_per_blk) {
  
                /* Lock laddr */
-               phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
+               phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
  
  try:
                spin_lock(&rrpc->rev_lock);
Simple merge
Simple merge
@@@ -561,13 -604,9 +609,13 @@@ static int nvme_revalidate_disk(struct 
        u16 old_ms;
        unsigned short bs;
  
 +      if (test_bit(NVME_NS_DEAD, &ns->flags)) {
 +              set_capacity(disk, 0);
 +              return -ENODEV;
 +      }
        if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
-               dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
-                               __func__, ns->ctrl->instance, ns->ns_id);
+               dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n",
+                               __func__);
                return -ENODEV;
        }
        if (id->ncap == 0) {
@@@ -839,24 -880,8 +889,25 @@@ int nvme_shutdown_ctrl(struct nvme_ctr
  
        return ret;
  }
+ EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
  
 +static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
 +              struct request_queue *q)
 +{
 +      if (ctrl->max_hw_sectors) {
 +              u32 max_segments =
 +                      (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
 +
 +              blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
 +              blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
 +      }
 +      if (ctrl->stripe_size)
 +              blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
 +      if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
 +              blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
 +      blk_queue_virt_boundary(q, ctrl->page_size - 1);
 +}
 +
  /*
   * Initialize the cached copies of the Identify data and various controller
   * register in our nvme_ctrl structure.  This should be called as soon as
@@@ -1313,9 -1392,12 +1404,10 @@@ void nvme_remove_namespaces(struct nvme
  {
        struct nvme_ns *ns, *next;
  
 -      mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
                nvme_ns_remove(ns);
 -      mutex_unlock(&ctrl->namespaces_mutex);
  }
+ EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
  
  static DEFINE_IDA(nvme_instance_ida);
  
@@@ -1401,8 -1484,6 +1495,7 @@@ int nvme_init_ctrl(struct nvme_ctrl *ct
                goto out_release_instance;
        }
        get_device(ctrl->device);
-       dev_set_drvdata(ctrl->device, ctrl);
 +      ida_init(&ctrl->ns_ida);
  
        spin_lock(&dev_list_lock);
        list_add_tail(&ctrl->node, &nvme_ctrl_list);
@@@ -1414,39 -1495,8 +1507,41 @@@ out_release_instance
  out:
        return ret;
  }
+ EXPORT_SYMBOL_GPL(nvme_init_ctrl);
  
 +/**
 + * nvme_kill_queues(): Ends all namespace queues
 + * @ctrl: the dead controller that needs to end
 + *
 + * Call this function when the driver determines it is unable to get the
 + * controller in a state capable of servicing IO.
 + */
 +void nvme_kill_queues(struct nvme_ctrl *ctrl)
 +{
 +      struct nvme_ns *ns;
 +
 +      mutex_lock(&ctrl->namespaces_mutex);
 +      list_for_each_entry(ns, &ctrl->namespaces, list) {
 +              if (!kref_get_unless_zero(&ns->kref))
 +                      continue;
 +
 +              /*
 +               * Revalidating a dead namespace sets capacity to 0. This will
 +               * end buffered writers dirtying pages that can't be synced.
 +               */
 +              if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags))
 +                      revalidate_disk(ns->disk);
 +
 +              blk_set_queue_dying(ns->queue);
 +              blk_mq_abort_requeue_list(ns->queue);
 +              blk_mq_start_stopped_hw_queues(ns->queue, true);
 +
 +              nvme_put_ns(ns);
 +      }
 +      mutex_unlock(&ctrl->namespaces_mutex);
 +}
++EXPORT_SYMBOL_GPL(nvme_kill_queues);
 +
  void nvme_stop_queues(struct nvme_ctrl *ctrl)
  {
        struct nvme_ns *ns;
Simple merge
Simple merge
@@@ -310,10 -287,10 +298,10 @@@ static void nvme_complete_async_event(s
  
        switch (result & 0xff07) {
        case NVME_AER_NOTICE_NS_CHANGED:
-               dev_info(dev->dev, "rescanning\n");
+               dev_info(dev->ctrl.device, "rescanning\n");
 -              queue_work(nvme_workq, &dev->scan_work);
 +              nvme_queue_scan(dev);
        default:
-               dev_warn(dev->dev, "async event result %08x\n", result);
+               dev_warn(dev->ctrl.device, "async event result %08x\n", result);
        }
  }
  
@@@ -1018,7 -990,7 +1009,7 @@@ static void nvme_cancel_queue_ios(struc
        if (!blk_mq_request_started(req))
                return;
  
-       dev_dbg_ratelimited(nvmeq->q_dmadev,
 -      dev_warn(nvmeq->dev->ctrl.device,
++      dev_dbg_ratelimited(nvmeq->dev->ctrl.device,
                 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
  
        status = NVME_SC_ABORT_REQ;
@@@ -1709,8 -1649,14 +1674,14 @@@ static int nvme_dev_add(struct nvme_de
                if (blk_mq_alloc_tag_set(&dev->tagset))
                        return 0;
                dev->ctrl.tagset = &dev->tagset;
+       } else {
+               blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+               /* Free previously allocated queues that are no longer usable */
+               nvme_free_queues(dev, dev->online_queues);
        }
 -      queue_work(nvme_workq, &dev->scan_work);
 +      nvme_queue_scan(dev);
        return 0;
  }
  
@@@ -1845,10 -1761,10 +1772,10 @@@ static void nvme_dev_disable(struct nvm
        int i;
        u32 csts = -1;
  
-       nvme_dev_list_remove(dev);
+       del_timer_sync(&dev->watchdog_timer);
  
        mutex_lock(&dev->shutdown_lock);
 -      if (dev->bar) {
 +      if (pci_is_enabled(to_pci_dev(dev->dev))) {
                nvme_stop_queues(&dev->ctrl);
                csts = readl(dev->bar + NVME_REG_CSTS);
        }
@@@ -1905,16 -1821,6 +1832,16 @@@ static void nvme_pci_free_ctrl(struct n
        kfree(dev);
  }
  
-       dev_warn(dev->dev, "Removing after probe failure status: %d\n", status);
 +static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
 +{
++      dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
 +
 +      kref_get(&dev->ctrl.kref);
 +      nvme_dev_disable(dev, false);
 +      if (!schedule_work(&dev->remove_work))
 +              nvme_put_ctrl(&dev->ctrl);
 +}
 +
  static void nvme_reset_work(struct work_struct *work)
  {
        struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
  
        result = nvme_setup_io_queues(dev);
        if (result)
 -              goto free_tags;
 +              goto out;
  
        dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
+       queue_work(nvme_workq, &dev->async_work);
  
-       result = nvme_dev_list_add(dev);
-       if (result)
-               goto out;
+       mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
  
        /*
         * Keep the controller around but remove all namespaces if we don't have
@@@ -2085,11 -1986,6 +2012,10 @@@ static int nvme_probe(struct pci_dev *p
        dev->dev = get_device(&pdev->dev);
        pci_set_drvdata(pdev, dev);
  
-       INIT_LIST_HEAD(&dev->node);
 +      result = nvme_dev_map(dev);
 +      if (result)
 +              goto free;
 +
        INIT_WORK(&dev->scan_work, nvme_dev_scan);
        INIT_WORK(&dev->reset_work, nvme_reset_work);
        INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
@@@ -2145,8 -2040,11 +2076,11 @@@ static void nvme_remove(struct pci_dev 
  {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
  
+       del_timer_sync(&dev->watchdog_timer);
 +      set_bit(NVME_CTRL_REMOVING, &dev->flags);
        pci_set_drvdata(pdev, NULL);
 -      flush_work(&dev->reset_work);
+       flush_work(&dev->async_work);
        flush_work(&dev->scan_work);
        nvme_remove_namespaces(&dev->ctrl);
        nvme_uninit_ctrl(&dev->ctrl);
Simple merge