Merge branch 'for-4.9/libnvdimm' into libnvdimm-for-next
authorDan Williams <dan.j.williams@intel.com>
Fri, 7 Oct 2016 23:46:24 +0000 (16:46 -0700)
committerDan Williams <dan.j.williams@intel.com>
Fri, 7 Oct 2016 23:46:24 +0000 (16:46 -0700)
1  2 
drivers/acpi/nfit/core.c
drivers/acpi/nfit/mce.c
drivers/nvdimm/bus.c
drivers/nvdimm/core.c
drivers/nvdimm/nd.h
drivers/nvdimm/region_devs.c
tools/testing/nvdimm/test/nfit.c

diff --combined drivers/acpi/nfit/core.c
@@@ -94,50 -94,54 +94,50 @@@ static struct acpi_device *to_acpi_dev(
        return to_acpi_device(acpi_desc->dev);
  }
  
 -static int xlat_status(void *buf, unsigned int cmd)
 +static int xlat_status(void *buf, unsigned int cmd, u32 status)
  {
        struct nd_cmd_clear_error *clear_err;
        struct nd_cmd_ars_status *ars_status;
 -      struct nd_cmd_ars_start *ars_start;
 -      struct nd_cmd_ars_cap *ars_cap;
        u16 flags;
  
        switch (cmd) {
        case ND_CMD_ARS_CAP:
 -              ars_cap = buf;
 -              if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
 +              if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
                        return -ENOTTY;
  
                /* Command failed */
 -              if (ars_cap->status & 0xffff)
 +              if (status & 0xffff)
                        return -EIO;
  
                /* No supported scan types for this range */
                flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
 -              if ((ars_cap->status >> 16 & flags) == 0)
 +              if ((status >> 16 & flags) == 0)
                        return -ENOTTY;
                break;
        case ND_CMD_ARS_START:
 -              ars_start = buf;
                /* ARS is in progress */
 -              if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
 +              if ((status & 0xffff) == NFIT_ARS_START_BUSY)
                        return -EBUSY;
  
                /* Command failed */
 -              if (ars_start->status & 0xffff)
 +              if (status & 0xffff)
                        return -EIO;
                break;
        case ND_CMD_ARS_STATUS:
                ars_status = buf;
                /* Command failed */
 -              if (ars_status->status & 0xffff)
 +              if (status & 0xffff)
                        return -EIO;
                /* Check extended status (Upper two bytes) */
 -              if (ars_status->status == NFIT_ARS_STATUS_DONE)
 +              if (status == NFIT_ARS_STATUS_DONE)
                        return 0;
  
                /* ARS is in progress */
 -              if (ars_status->status == NFIT_ARS_STATUS_BUSY)
 +              if (status == NFIT_ARS_STATUS_BUSY)
                        return -EBUSY;
  
                /* No ARS performed for the current boot */
 -              if (ars_status->status == NFIT_ARS_STATUS_NONE)
 +              if (status == NFIT_ARS_STATUS_NONE)
                        return -EAGAIN;
  
                /*
                 * agent wants the scan to stop.  If we didn't overflow
                 * then just continue with the returned results.
                 */
 -              if (ars_status->status == NFIT_ARS_STATUS_INTR) {
 +              if (status == NFIT_ARS_STATUS_INTR) {
                        if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
                                return -ENOSPC;
                        return 0;
                }
  
                /* Unknown status */
 -              if (ars_status->status >> 16)
 +              if (status >> 16)
                        return -EIO;
                break;
        case ND_CMD_CLEAR_ERROR:
                clear_err = buf;
 -              if (clear_err->status & 0xffff)
 +              if (status & 0xffff)
                        return -EIO;
                if (!clear_err->cleared)
                        return -EIO;
                break;
        }
  
 +      /* all other non-zero status results in an error */
 +      if (status)
 +              return -EIO;
        return 0;
  }
  
@@@ -185,10 -186,10 +185,10 @@@ static int acpi_nfit_ctl(struct nvdimm_
        struct nd_cmd_pkg *call_pkg = NULL;
        const char *cmd_name, *dimm_name;
        unsigned long cmd_mask, dsm_mask;
 +      u32 offset, fw_status = 0;
        acpi_handle handle;
        unsigned int func;
        const u8 *uuid;
 -      u32 offset;
        int rc, i;
  
        func = cmd;
                                out_obj->buffer.pointer + offset, out_size);
                offset += out_size;
        }
 +
 +      /*
 +       * Set fw_status for all the commands with a known format to be
 +       * later interpreted by xlat_status().
 +       */
 +      if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
 +                      || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
 +              fw_status = *(u32 *) out_obj->buffer.pointer;
 +
        if (offset + in_buf.buffer.length < buf_len) {
                if (i >= 1) {
                        /*
                         */
                        rc = buf_len - offset - in_buf.buffer.length;
                        if (cmd_rc)
 -                              *cmd_rc = xlat_status(buf, cmd);
 +                              *cmd_rc = xlat_status(buf, cmd, fw_status);
                } else {
                        dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
                                        __func__, dimm_name, cmd_name, buf_len,
        } else {
                rc = 0;
                if (cmd_rc)
 -                      *cmd_rc = xlat_status(buf, cmd);
 +                      *cmd_rc = xlat_status(buf, cmd, fw_status);
        }
  
   out:
@@@ -886,6 -878,58 +886,58 @@@ static ssize_t revision_show(struct dev
  }
  static DEVICE_ATTR_RO(revision);
  
+ static ssize_t hw_error_scrub_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+ {
+       struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+       struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+       struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+       return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
+ }
+ /*
+  * The 'hw_error_scrub' attribute can have the following values written to it:
+  * '0': Switch to the default mode where an exception will only insert
+  *      the address of the memory error into the poison and badblocks lists.
+  * '1': Enable a full scrub to happen if an exception for a memory error is
+  *      received.
+  */
+ static ssize_t hw_error_scrub_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+ {
+       struct nvdimm_bus_descriptor *nd_desc;
+       ssize_t rc;
+       long val;
+       rc = kstrtol(buf, 0, &val);
+       if (rc)
+               return rc;
+       device_lock(dev);
+       nd_desc = dev_get_drvdata(dev);
+       if (nd_desc) {
+               struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+               switch (val) {
+               case HW_ERROR_SCRUB_ON:
+                       acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
+                       break;
+               case HW_ERROR_SCRUB_OFF:
+                       acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
+                       break;
+               default:
+                       rc = -EINVAL;
+                       break;
+               }
+       }
+       device_unlock(dev);
+       if (rc)
+               return rc;
+       return size;
+ }
+ static DEVICE_ATTR_RW(hw_error_scrub);
  /*
   * This shows the number of full Address Range Scrubs that have been
   * completed since driver load time. Userspace can wait on this using
@@@ -958,6 -1002,7 +1010,7 @@@ static umode_t nfit_visible(struct kobj
  static struct attribute *acpi_nfit_attributes[] = {
        &dev_attr_revision.attr,
        &dev_attr_scrub.attr,
+       &dev_attr_hw_error_scrub.attr,
        NULL,
  };
  
@@@ -1256,6 -1301,44 +1309,44 @@@ static struct nvdimm *acpi_nfit_dimm_by
        return NULL;
  }
  
+ void __acpi_nvdimm_notify(struct device *dev, u32 event)
+ {
+       struct nfit_mem *nfit_mem;
+       struct acpi_nfit_desc *acpi_desc;
+       dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__,
+                       event);
+       if (event != NFIT_NOTIFY_DIMM_HEALTH) {
+               dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
+                               event);
+               return;
+       }
+       acpi_desc = dev_get_drvdata(dev->parent);
+       if (!acpi_desc)
+               return;
+       /*
+        * If we successfully retrieved acpi_desc, then we know nfit_mem data
+        * is still valid.
+        */
+       nfit_mem = dev_get_drvdata(dev);
+       if (nfit_mem && nfit_mem->flags_attr)
+               sysfs_notify_dirent(nfit_mem->flags_attr);
+ }
+ EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
+ static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
+ {
+       struct acpi_device *adev = data;
+       struct device *dev = &adev->dev;
+       device_lock(dev->parent);
+       __acpi_nvdimm_notify(dev, event);
+       device_unlock(dev->parent);
+ }
  static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
                struct nfit_mem *nfit_mem, u32 device_handle)
  {
                return force_enable_dimms ? 0 : -ENODEV;
        }
  
+       if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
+               ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
+               dev_err(dev, "%s: notification registration failed\n",
+                               dev_name(&adev_dimm->dev));
+               return -ENXIO;
+       }
        /*
         * Until standardization materializes we need to consider 4
         * different command sets.  Note, that checking for function0 (bit0)
        return 0;
  }
  
+ static void shutdown_dimm_notify(void *data)
+ {
+       struct acpi_nfit_desc *acpi_desc = data;
+       struct nfit_mem *nfit_mem;
+       mutex_lock(&acpi_desc->init_mutex);
+       /*
+        * Clear out the nfit_mem->flags_attr and shut down dimm event
+        * notifications.
+        */
+       list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
+               struct acpi_device *adev_dimm = nfit_mem->adev;
+               if (nfit_mem->flags_attr) {
+                       sysfs_put(nfit_mem->flags_attr);
+                       nfit_mem->flags_attr = NULL;
+               }
+               if (adev_dimm)
+                       acpi_remove_notify_handler(adev_dimm->handle,
+                                       ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
+       }
+       mutex_unlock(&acpi_desc->init_mutex);
+ }
  static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
  {
        struct nfit_mem *nfit_mem;
-       int dimm_count = 0;
+       int dimm_count = 0, rc;
+       struct nvdimm *nvdimm;
  
        list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
                struct acpi_nfit_flush_address *flush;
                unsigned long flags = 0, cmd_mask;
-               struct nvdimm *nvdimm;
                u32 device_handle;
                u16 mem_flags;
-               int rc;
  
                device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
                nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
  
        }
  
-       return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
+       rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
+       if (rc)
+               return rc;
+       /*
+        * Now that dimms are successfully registered, and async registration
+        * is flushed, attempt to enable event notification.
+        */
+       list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
+               struct kernfs_node *nfit_kernfs;
+               nvdimm = nfit_mem->nvdimm;
+               nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
+               if (nfit_kernfs)
+                       nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
+                                       "flags");
+               sysfs_put(nfit_kernfs);
+               if (!nfit_mem->flags_attr)
+                       dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
+                                       nvdimm_name(nvdimm));
+       }
+       return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
+                       acpi_desc);
  }
  
  static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
@@@ -1491,9 -1627,9 +1635,9 @@@ static int acpi_nfit_init_interleave_se
        if (!info)
                return -ENOMEM;
        for (i = 0; i < nr; i++) {
-               struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
+               struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
                struct nfit_set_info_map *map = &info->mapping[i];
-               struct nvdimm *nvdimm = nd_mapping->nvdimm;
+               struct nvdimm *nvdimm = mapping->nvdimm;
                struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
                struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
                                spa->range_index, i);
@@@ -1917,7 -2053,7 +2061,7 @@@ static int acpi_nfit_insert_resource(st
  }
  
  static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
-               struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
+               struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
                struct acpi_nfit_memory_map *memdev,
                struct nfit_spa *nfit_spa)
  {
                return -ENODEV;
        }
  
-       nd_mapping->nvdimm = nvdimm;
+       mapping->nvdimm = nvdimm;
        switch (nfit_spa_type(spa)) {
        case NFIT_SPA_PM:
        case NFIT_SPA_VOLATILE:
-               nd_mapping->start = memdev->address;
-               nd_mapping->size = memdev->region_size;
+               mapping->start = memdev->address;
+               mapping->size = memdev->region_size;
                break;
        case NFIT_SPA_DCR:
                nfit_mem = nvdimm_provider_data(nvdimm);
                        dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
                                        spa->range_index, nvdimm_name(nvdimm));
                } else {
-                       nd_mapping->size = nfit_mem->bdw->capacity;
-                       nd_mapping->start = nfit_mem->bdw->start_address;
+                       mapping->size = nfit_mem->bdw->capacity;
+                       mapping->start = nfit_mem->bdw->start_address;
                        ndr_desc->num_lanes = nfit_mem->bdw->windows;
                        blk_valid = 1;
                }
  
-               ndr_desc->nd_mapping = nd_mapping;
+               ndr_desc->mapping = mapping;
                ndr_desc->num_mappings = blk_valid;
                ndbr_desc = to_blk_region_desc(ndr_desc);
                ndbr_desc->enable = acpi_nfit_blk_region_enable;
@@@ -1979,7 -2115,7 +2123,7 @@@ static bool nfit_spa_is_virtual(struct 
  static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
                struct nfit_spa *nfit_spa)
  {
-       static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
+       static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
        struct acpi_nfit_system_address *spa = nfit_spa->spa;
        struct nd_blk_region_desc ndbr_desc;
        struct nd_region_desc *ndr_desc;
        }
  
        memset(&res, 0, sizeof(res));
-       memset(&nd_mappings, 0, sizeof(nd_mappings));
+       memset(&mappings, 0, sizeof(mappings));
        memset(&ndbr_desc, 0, sizeof(ndbr_desc));
        res.start = spa->address;
        res.end = res.start + spa->length - 1;
  
        list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
                struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
-               struct nd_mapping *nd_mapping;
+               struct nd_mapping_desc *mapping;
  
                if (memdev->range_index != spa->range_index)
                        continue;
                                        spa->range_index, ND_MAX_MAPPINGS);
                        return -ENXIO;
                }
-               nd_mapping = &nd_mappings[count++];
-               rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
+               mapping = &mappings[count++];
+               rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
                                memdev, nfit_spa);
                if (rc)
                        goto out;
        }
  
-       ndr_desc->nd_mapping = nd_mappings;
+       ndr_desc->mapping = mappings;
        ndr_desc->num_mappings = count;
        rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
        if (rc)
@@@ -2678,29 -2814,30 +2822,30 @@@ static int acpi_nfit_remove(struct acpi
        return 0;
  }
  
static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
  {
-       struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
+       struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
        struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
-       struct device *dev = &adev->dev;
        union acpi_object *obj;
        acpi_status status;
        int ret;
  
        dev_dbg(dev, "%s: event: %d\n", __func__, event);
  
-       device_lock(dev);
+       if (event != NFIT_NOTIFY_UPDATE)
+               return;
        if (!dev->driver) {
                /* dev->driver may be null if we're being removed */
                dev_dbg(dev, "%s: no driver found for dev\n", __func__);
-               goto out_unlock;
+               return;
        }
  
        if (!acpi_desc) {
                acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
                if (!acpi_desc)
-                       goto out_unlock;
-               acpi_nfit_desc_init(acpi_desc, &adev->dev);
+                       return;
+               acpi_nfit_desc_init(acpi_desc, dev);
        } else {
                /*
                 * Finish previous registration before considering new
        }
  
        /* Evaluate _FIT */
-       status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
+       status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
        if (ACPI_FAILURE(status)) {
                dev_err(dev, "failed to evaluate _FIT\n");
-               goto out_unlock;
+               return;
        }
  
        obj = buf.pointer;
        } else
                dev_err(dev, "Invalid _FIT\n");
        kfree(buf.pointer);
+ }
+ EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
  
-  out_unlock:
-       device_unlock(dev);
+ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
+ {
+       device_lock(&adev->dev);
+       __acpi_nfit_notify(&adev->dev, adev->handle, event);
+       device_unlock(&adev->dev);
  }
  
  static const struct acpi_device_id acpi_nfit_ids[] = {
diff --combined drivers/acpi/nfit/mce.c
@@@ -14,6 -14,7 +14,7 @@@
   */
  #include <linux/notifier.h>
  #include <linux/acpi.h>
+ #include <linux/nd.h>
  #include <asm/mce.h>
  #include "nfit.h"
  
@@@ -42,7 -43,7 +43,7 @@@ static int nfit_handle_mce(struct notif
                list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
                        struct acpi_nfit_system_address *spa = nfit_spa->spa;
  
 -                      if (nfit_spa_type(spa) == NFIT_SPA_PM)
 +                      if (nfit_spa_type(spa) != NFIT_SPA_PM)
                                continue;
                        /* find the spa that covers the mce addr */
                        if (spa->address > mce->addr)
                }
                mutex_unlock(&acpi_desc->init_mutex);
  
-               /*
-                * We can ignore an -EBUSY here because if an ARS is already
-                * in progress, just let that be the last authoritative one
-                */
-               if (found_match)
+               if (!found_match)
+                       continue;
+               /* If this fails due to an -ENOMEM, there is little we can do */
+               nvdimm_bus_add_poison(acpi_desc->nvdimm_bus,
+                               ALIGN(mce->addr, L1_CACHE_BYTES),
+                               L1_CACHE_BYTES);
+               nvdimm_region_notify(nfit_spa->nd_region,
+                               NVDIMM_REVALIDATE_POISON);
+               if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) {
+                       /*
+                        * We can ignore an -EBUSY here because if an ARS is
+                        * already in progress, just let that be the last
+                        * authoritative one
+                        */
                        acpi_nfit_ars_rescan(acpi_desc);
+               }
+               break;
        }
  
        mutex_unlock(&acpi_desc_lock);
diff --combined drivers/nvdimm/bus.c
@@@ -185,12 -185,8 +185,12 @@@ long nvdimm_clear_poison(struct device 
                return -ENXIO;
  
        nd_desc = nvdimm_bus->nd_desc;
 +      /*
 +       * if ndctl does not exist, it's PMEM_LEGACY and
 +       * we want to just pretend everything is handled.
 +       */
        if (!nd_desc->ndctl)
 -              return -ENXIO;
 +              return len;
  
        memset(&ars_cap, 0, sizeof(ars_cap));
        ars_cap.address = phys;
                return rc;
        if (cmd_rc < 0)
                return cmd_rc;
+       nvdimm_clear_from_poison_list(nvdimm_bus, phys, len);
        return clear_err.cleared;
  }
  EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
diff --combined drivers/nvdimm/core.c
@@@ -99,11 -99,8 +99,11 @@@ static struct nvdimm_map *alloc_nvdimm_
        nvdimm_map->size = size;
        kref_init(&nvdimm_map->kref);
  
 -      if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev)))
 +      if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
 +              dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
 +                              &offset, size, dev_name(dev));
                goto err_request_region;
 +      }
  
        if (flags)
                nvdimm_map->mem = memremap(offset, size, flags);
@@@ -174,9 -171,6 +174,9 @@@ void *devm_nvdimm_memremap(struct devic
                kref_get(&nvdimm_map->kref);
        nvdimm_bus_unlock(dev);
  
 +      if (!nvdimm_map)
 +              return NULL;
 +
        if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
                return NULL;
  
@@@ -547,11 -541,12 +547,12 @@@ void nvdimm_badblocks_populate(struct n
  }
  EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
  
- static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
+ static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
+                       gfp_t flags)
  {
        struct nd_poison *pl;
  
-       pl = kzalloc(sizeof(*pl), GFP_KERNEL);
+       pl = kzalloc(sizeof(*pl), flags);
        if (!pl)
                return -ENOMEM;
  
@@@ -567,7 -562,7 +568,7 @@@ static int bus_add_poison(struct nvdimm
        struct nd_poison *pl;
  
        if (list_empty(&nvdimm_bus->poison_list))
-               return add_poison(nvdimm_bus, addr, length);
+               return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
  
        /*
         * There is a chance this is a duplicate, check for those first.
         * as any overlapping ranges will get resolved when the list is consumed
         * and converted to badblocks
         */
-       return add_poison(nvdimm_bus, addr, length);
+       return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
  }
  
  int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
  }
  EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
  
+ void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
+               phys_addr_t start, unsigned int len)
+ {
+       struct list_head *poison_list = &nvdimm_bus->poison_list;
+       u64 clr_end = start + len - 1;
+       struct nd_poison *pl, *next;
+       nvdimm_bus_lock(&nvdimm_bus->dev);
+       WARN_ON_ONCE(list_empty(poison_list));
+       /*
+        * [start, clr_end] is the poison interval being cleared.
+        * [pl->start, pl_end] is the poison_list entry we're comparing
+        * the above interval against. The poison list entry may need
+        * to be modified (update either start or length), deleted, or
+        * split into two based on the overlap characteristics
+        */
+       list_for_each_entry_safe(pl, next, poison_list, list) {
+               u64 pl_end = pl->start + pl->length - 1;
+               /* Skip intervals with no intersection */
+               if (pl_end < start)
+                       continue;
+               if (pl->start >  clr_end)
+                       continue;
+               /* Delete completely overlapped poison entries */
+               if ((pl->start >= start) && (pl_end <= clr_end)) {
+                       list_del(&pl->list);
+                       kfree(pl);
+                       continue;
+               }
+               /* Adjust start point of partially cleared entries */
+               if ((start <= pl->start) && (clr_end > pl->start)) {
+                       pl->length -= clr_end - pl->start + 1;
+                       pl->start = clr_end + 1;
+                       continue;
+               }
+               /* Adjust pl->length for partial clearing at the tail end */
+               if ((pl->start < start) && (pl_end <= clr_end)) {
+                       /* pl->start remains the same */
+                       pl->length = start - pl->start;
+                       continue;
+               }
+               /*
+                * If clearing in the middle of an entry, we split it into
+                * two by modifying the current entry to represent one half of
+                * the split, and adding a new entry for the second half.
+                */
+               if ((pl->start < start) && (pl_end > clr_end)) {
+                       u64 new_start = clr_end + 1;
+                       u64 new_len = pl_end - new_start + 1;
+                       /* Add new entry covering the right half */
+                       add_poison(nvdimm_bus, new_start, new_len, GFP_NOIO);
+                       /* Adjust this entry to cover the left half */
+                       pl->length = start - pl->start;
+                       continue;
+               }
+       }
+       nvdimm_bus_unlock(&nvdimm_bus->dev);
+ }
+ EXPORT_SYMBOL_GPL(nvdimm_clear_from_poison_list);
  #ifdef CONFIG_BLK_DEV_INTEGRITY
  int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
  {
diff --combined drivers/nvdimm/nd.h
@@@ -52,28 -52,10 +52,28 @@@ struct nvdimm_drvdata 
  struct nd_region_data {
        int ns_count;
        int ns_active;
 -      unsigned int flush_mask;
 -      void __iomem *flush_wpq[0][0];
 +      unsigned int hints_shift;
 +      void __iomem *flush_wpq[0];
  };
  
 +static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
 +              int dimm, int hint)
 +{
 +      unsigned int num = 1 << ndrd->hints_shift;
 +      unsigned int mask = num - 1;
 +
 +      return ndrd->flush_wpq[dimm * num + (hint & mask)];
 +}
 +
 +static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
 +              int hint, void __iomem *flush)
 +{
 +      unsigned int num = 1 << ndrd->hints_shift;
 +      unsigned int mask = num - 1;
 +
 +      ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
 +}
 +
  static inline struct nd_namespace_index *to_namespace_index(
                struct nvdimm_drvdata *ndd, int i)
  {
@@@ -101,9 -83,6 +101,6 @@@ static inline struct nd_namespace_inde
                (unsigned long long) (res ? resource_size(res) : 0), \
                (unsigned long long) (res ? res->start : 0), ##arg)
  
- #define for_each_label(l, label, labels) \
-       for (l = 0; (label = labels ? labels[l] : NULL); l++)
  #define for_each_dpa_resource(ndd, res) \
        for (res = (ndd)->dpa.child; res; res = res->sibling)
  
@@@ -116,6 -95,31 +113,31 @@@ struct nd_percpu_lane 
        spinlock_t lock;
  };
  
+ struct nd_label_ent {
+       struct list_head list;
+       struct nd_namespace_label *label;
+ };
+ enum nd_mapping_lock_class {
+       ND_MAPPING_CLASS0,
+       ND_MAPPING_UUID_SCAN,
+ };
+ struct nd_mapping {
+       struct nvdimm *nvdimm;
+       u64 start;
+       u64 size;
+       struct list_head labels;
+       struct mutex lock;
+       /*
+        * @ndd is for private use at region enable / disable time for
+        * get_ndd() + put_ndd(), all other nd_mapping to ndd
+        * conversions use to_ndd() which respects enabled state of the
+        * nvdimm.
+        */
+       struct nvdimm_drvdata *ndd;
+ };
  struct nd_region {
        struct device dev;
        struct ida ns_ida;
@@@ -209,6 -213,7 +231,7 @@@ void nvdimm_exit(void)
  void nd_region_exit(void);
  struct nvdimm;
  struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
+ int nvdimm_check_config_data(struct device *dev);
  int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
  int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
  int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
@@@ -38,7 -38,7 +38,7 @@@ static int nvdimm_map_flush(struct devi
  
        dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
                        nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
 -      for (i = 0; i < nvdimm->num_flush; i++) {
 +      for (i = 0; i < (1 << ndrd->hints_shift); i++) {
                struct resource *res = &nvdimm->flush_wpq[i];
                unsigned long pfn = PHYS_PFN(res->start);
                void __iomem *flush_page;
  
                if (j < i)
                        flush_page = (void __iomem *) ((unsigned long)
 -                                      ndrd->flush_wpq[dimm][j] & PAGE_MASK);
 +                                      ndrd_get_flush_wpq(ndrd, dimm, j)
 +                                      & PAGE_MASK);
                else
                        flush_page = devm_nvdimm_ioremap(dev,
 -                                      PHYS_PFN(pfn), PAGE_SIZE);
 +                                      PFN_PHYS(pfn), PAGE_SIZE);
                if (!flush_page)
                        return -ENXIO;
 -              ndrd->flush_wpq[dimm][i] = flush_page
 -                      + (res->start & ~PAGE_MASK);
 +              ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
 +                              + (res->start & ~PAGE_MASK));
        }
  
        return 0;
@@@ -70,7 -69,7 +70,7 @@@
  
  int nd_region_activate(struct nd_region *nd_region)
  {
 -      int i, num_flush = 0;
 +      int i, j, num_flush = 0;
        struct nd_region_data *ndrd;
        struct device *dev = &nd_region->dev;
        size_t flush_data_size = sizeof(void *);
                return -ENOMEM;
        dev_set_drvdata(dev, ndrd);
  
 -      ndrd->flush_mask = (1 << ilog2(num_flush)) - 1;
 +      if (!num_flush)
 +              return 0;
 +
 +      ndrd->hints_shift = ilog2(num_flush);
        for (i = 0; i < nd_region->ndr_mappings; i++) {
                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
                struct nvdimm *nvdimm = nd_mapping->nvdimm;
                        return rc;
        }
  
 +      /*
 +       * Clear out entries that are duplicates. This should prevent the
 +       * extra flushings.
 +       */
 +      for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
 +              /* ignore if NULL already */
 +              if (!ndrd_get_flush_wpq(ndrd, i, 0))
 +                      continue;
 +
 +              for (j = i + 1; j < nd_region->ndr_mappings; j++)
 +                      if (ndrd_get_flush_wpq(ndrd, i, 0) ==
 +                          ndrd_get_flush_wpq(ndrd, j, 0))
 +                              ndrd_set_flush_wpq(ndrd, j, 0, NULL);
 +      }
 +
        return 0;
  }
  
@@@ -313,9 -294,8 +313,8 @@@ resource_size_t nd_region_available_dpa
                                blk_max_overlap = overlap;
                                goto retry;
                        }
-               } else if (is_nd_blk(&nd_region->dev)) {
-                       available += nd_blk_available_dpa(nd_mapping);
-               }
+               } else if (is_nd_blk(&nd_region->dev))
+                       available += nd_blk_available_dpa(nd_region);
        }
  
        return available;
@@@ -506,6 -486,17 +505,17 @@@ u64 nd_region_interleave_set_cookie(str
        return 0;
  }
  
+ void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
+ {
+       struct nd_label_ent *label_ent, *e;
+       WARN_ON(!mutex_is_locked(&nd_mapping->lock));
+       list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
+               list_del(&label_ent->list);
+               kfree(label_ent);
+       }
+ }
  /*
   * Upon successful probe/remove, take/release a reference on the
   * associated interleave set (if present), and plant new btt + namespace
@@@ -526,8 -517,10 +536,10 @@@ static void nd_region_notify_driver_act
                        struct nvdimm_drvdata *ndd = nd_mapping->ndd;
                        struct nvdimm *nvdimm = nd_mapping->nvdimm;
  
-                       kfree(nd_mapping->labels);
-                       nd_mapping->labels = NULL;
+                       mutex_lock(&nd_mapping->lock);
+                       nd_mapping_free_labels(nd_mapping);
+                       mutex_unlock(&nd_mapping->lock);
                        put_ndd(ndd);
                        nd_mapping->ndd = NULL;
                        if (ndd)
                if (is_nd_pmem(dev))
                        return;
        }
-       if (dev->parent && is_nd_blk(dev->parent) && probe) {
+       if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent))
+                       && probe) {
                nd_region = to_nd_region(dev->parent);
                nvdimm_bus_lock(dev);
                if (nd_region->ns_seed == dev)
-                       nd_region_create_blk_seed(nd_region);
+                       nd_region_create_ns_seed(nd_region);
                nvdimm_bus_unlock(dev);
        }
        if (is_nd_btt(dev) && probe) {
                nvdimm_bus_lock(dev);
                if (nd_region->btt_seed == dev)
                        nd_region_create_btt_seed(nd_region);
-               if (nd_region->ns_seed == &nd_btt->ndns->dev &&
-                               is_nd_blk(dev->parent))
-                       nd_region_create_blk_seed(nd_region);
+               if (nd_region->ns_seed == &nd_btt->ndns->dev)
+                       nd_region_create_ns_seed(nd_region);
                nvdimm_bus_unlock(dev);
        }
        if (is_nd_pfn(dev) && probe) {
+               struct nd_pfn *nd_pfn = to_nd_pfn(dev);
                nd_region = to_nd_region(dev->parent);
                nvdimm_bus_lock(dev);
                if (nd_region->pfn_seed == dev)
                        nd_region_create_pfn_seed(nd_region);
+               if (nd_region->ns_seed == &nd_pfn->ndns->dev)
+                       nd_region_create_ns_seed(nd_region);
                nvdimm_bus_unlock(dev);
        }
        if (is_nd_dax(dev) && probe) {
+               struct nd_dax *nd_dax = to_nd_dax(dev);
                nd_region = to_nd_region(dev->parent);
                nvdimm_bus_lock(dev);
                if (nd_region->dax_seed == dev)
                        nd_region_create_dax_seed(nd_region);
+               if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
+                       nd_region_create_ns_seed(nd_region);
                nvdimm_bus_unlock(dev);
        }
  }
@@@ -774,10 -775,10 +794,10 @@@ static struct nd_region *nd_region_crea
        int ro = 0;
  
        for (i = 0; i < ndr_desc->num_mappings; i++) {
-               struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
-               struct nvdimm *nvdimm = nd_mapping->nvdimm;
+               struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
+               struct nvdimm *nvdimm = mapping->nvdimm;
  
-               if ((nd_mapping->start | nd_mapping->size) % SZ_4K) {
+               if ((mapping->start | mapping->size) % SZ_4K) {
                        dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
                                        caller, dev_name(&nvdimm->dev), i);
  
                ndl->count = 0;
        }
  
-       memcpy(nd_region->mapping, ndr_desc->nd_mapping,
-                       sizeof(struct nd_mapping) * ndr_desc->num_mappings);
        for (i = 0; i < ndr_desc->num_mappings; i++) {
-               struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
-               struct nvdimm *nvdimm = nd_mapping->nvdimm;
+               struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
+               struct nvdimm *nvdimm = mapping->nvdimm;
+               nd_region->mapping[i].nvdimm = nvdimm;
+               nd_region->mapping[i].start = mapping->start;
+               nd_region->mapping[i].size = mapping->size;
+               INIT_LIST_HEAD(&nd_region->mapping[i].labels);
+               mutex_init(&nd_region->mapping[i].lock);
  
                get_device(&nvdimm->dev);
        }
@@@ -919,8 -924,8 +943,8 @@@ void nvdimm_flush(struct nd_region *nd_
         */
        wmb();
        for (i = 0; i < nd_region->ndr_mappings; i++)
 -              if (ndrd->flush_wpq[i][0])
 -                      writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]);
 +              if (ndrd_get_flush_wpq(ndrd, i, 0))
 +                      writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
        wmb();
  }
  EXPORT_SYMBOL_GPL(nvdimm_flush);
@@@ -944,7 -949,7 +968,7 @@@ int nvdimm_has_flush(struct nd_region *
  
        for (i = 0; i < nd_region->ndr_mappings; i++)
                /* flush hints present, flushing required */
 -              if (ndrd->flush_wpq[i][0])
 +              if (ndrd_get_flush_wpq(ndrd, i, 0))
                        return 1;
  
        /*
@@@ -132,6 -132,8 +132,8 @@@ static u32 handle[NUM_DCR] = 
        [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
  };
  
+ static unsigned long dimm_fail_cmd_flags[NUM_DCR];
  struct nfit_test {
        struct acpi_nfit_desc acpi_desc;
        struct platform_device pdev;
        int (*alloc)(struct nfit_test *t);
        void (*setup)(struct nfit_test *t);
        int setup_hotplug;
+       union acpi_object **_fit;
+       dma_addr_t _fit_dma;
        struct ars_state {
                struct nd_cmd_ars_status *ars_status;
                unsigned long deadline;
                spinlock_t lock;
        } ars_state;
+       struct device *dimm_dev[NUM_DCR];
  };
  
  static struct nfit_test *to_nfit_test(struct device *dev)
@@@ -411,6 -416,9 +416,9 @@@ static int nfit_test_ctl(struct nvdimm_
                if (i >= ARRAY_SIZE(handle))
                        return -ENXIO;
  
+               if ((1 << func) & dimm_fail_cmd_flags[i])
+                       return -EIO;
                switch (func) {
                case ND_CMD_GET_CONFIG_SIZE:
                        rc = nfit_test_cmd_get_config_size(buf, buf_len);
                        break;
                case ND_CMD_SMART_THRESHOLD:
                        rc = nfit_test_cmd_smart_threshold(buf, buf_len);
+                       device_lock(&t->pdev.dev);
+                       __acpi_nvdimm_notify(t->dimm_dev[i], 0x81);
+                       device_unlock(&t->pdev.dev);
                        break;
                default:
                        return -ENOTTY;
@@@ -467,14 -478,12 +478,12 @@@ static struct nfit_test *instances[NUM_
  static void release_nfit_res(void *data)
  {
        struct nfit_test_resource *nfit_res = data;
-       struct resource *res = nfit_res->res;
  
        spin_lock(&nfit_test_lock);
        list_del(&nfit_res->list);
        spin_unlock(&nfit_test_lock);
  
        vfree(nfit_res->buf);
-       kfree(res);
        kfree(nfit_res);
  }
  
@@@ -482,12 -491,11 +491,11 @@@ static void *__test_alloc(struct nfit_t
                void *buf)
  {
        struct device *dev = &t->pdev.dev;
-       struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
        struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
                        GFP_KERNEL);
        int rc;
  
-       if (!res || !buf || !nfit_res)
+       if (!buf || !nfit_res)
                goto err;
        rc = devm_add_action(dev, release_nfit_res, nfit_res);
        if (rc)
        memset(buf, 0, size);
        nfit_res->dev = dev;
        nfit_res->buf = buf;
-       nfit_res->res = res;
-       res->start = *dma;
-       res->end = *dma + size - 1;
-       res->name = "NFIT";
+       nfit_res->res.start = *dma;
+       nfit_res->res.end = *dma + size - 1;
+       nfit_res->res.name = "NFIT";
+       spin_lock_init(&nfit_res->lock);
+       INIT_LIST_HEAD(&nfit_res->requests);
        spin_lock(&nfit_test_lock);
        list_add(&nfit_res->list, &t->resources);
        spin_unlock(&nfit_test_lock);
   err:
        if (buf)
                vfree(buf);
-       kfree(res);
        kfree(nfit_res);
        return NULL;
  }
@@@ -533,13 -541,13 +541,13 @@@ static struct nfit_test_resource *nfit_
                        continue;
                spin_lock(&nfit_test_lock);
                list_for_each_entry(n, &t->resources, list) {
-                       if (addr >= n->res->start && (addr < n->res->start
-                                               + resource_size(n->res))) {
+                       if (addr >= n->res.start && (addr < n->res.start
+                                               + resource_size(&n->res))) {
                                nfit_res = n;
                                break;
                        } else if (addr >= (unsigned long) n->buf
                                        && (addr < (unsigned long) n->buf
-                                               + resource_size(n->res))) {
+                                               + resource_size(&n->res))) {
                                nfit_res = n;
                                break;
                        }
@@@ -564,6 -572,86 +572,86 @@@ static int ars_state_init(struct devic
        return 0;
  }
  
+ static void put_dimms(void *data)
+ {
+       struct device **dimm_dev = data;
+       int i;
+       for (i = 0; i < NUM_DCR; i++)
+               if (dimm_dev[i])
+                       device_unregister(dimm_dev[i]);
+ }
+ static struct class *nfit_test_dimm;
+ static int dimm_name_to_id(struct device *dev)
+ {
+       int dimm;
+       if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1
+                       || dimm >= NUM_DCR || dimm < 0)
+               return -ENXIO;
+       return dimm;
+ }
+ static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+ {
+       int dimm = dimm_name_to_id(dev);
+       if (dimm < 0)
+               return dimm;
+       return sprintf(buf, "%#x", handle[dimm]);
+ }
+ DEVICE_ATTR_RO(handle);
+ static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+ {
+       int dimm = dimm_name_to_id(dev);
+       if (dimm < 0)
+               return dimm;
+       return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]);
+ }
+ static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t size)
+ {
+       int dimm = dimm_name_to_id(dev);
+       unsigned long val;
+       ssize_t rc;
+       if (dimm < 0)
+               return dimm;
+       rc = kstrtol(buf, 0, &val);
+       if (rc)
+               return rc;
+       dimm_fail_cmd_flags[dimm] = val;
+       return size;
+ }
+ static DEVICE_ATTR_RW(fail_cmd);
+ static struct attribute *nfit_test_dimm_attributes[] = {
+       &dev_attr_fail_cmd.attr,
+       &dev_attr_handle.attr,
+       NULL,
+ };
+ static struct attribute_group nfit_test_dimm_attribute_group = {
+       .attrs = nfit_test_dimm_attributes,
+ };
+ static const struct attribute_group *nfit_test_dimm_attribute_groups[] = {
+       &nfit_test_dimm_attribute_group,
+       NULL,
+ };
  static int nfit_test0_alloc(struct nfit_test *t)
  {
        size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
                        return -ENOMEM;
                sprintf(t->label[i], "label%d", i);
  
 -              t->flush[i] = test_alloc(t, sizeof(u64) * NUM_HINTS,
 +              t->flush[i] = test_alloc(t, max(PAGE_SIZE,
 +                                      sizeof(u64) * NUM_HINTS),
                                &t->flush_dma[i]);
                if (!t->flush[i])
                        return -ENOMEM;
                        return -ENOMEM;
        }
  
+       t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma);
+       if (!t->_fit)
+               return -ENOMEM;
+       if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev))
+               return -ENOMEM;
+       for (i = 0; i < NUM_DCR; i++) {
+               t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm,
+                               &t->pdev.dev, 0, NULL,
+                               nfit_test_dimm_attribute_groups,
+                               "test_dimm%d", i);
+               if (!t->dimm_dev[i])
+                       return -ENOMEM;
+       }
        return ars_state_init(&t->pdev.dev, &t->ars_state);
  }
  
@@@ -1409,6 -1511,8 +1512,8 @@@ static int nfit_test_probe(struct platf
        struct acpi_nfit_desc *acpi_desc;
        struct device *dev = &pdev->dev;
        struct nfit_test *nfit_test;
+       struct nfit_mem *nfit_mem;
+       union acpi_object *obj;
        int rc;
  
        nfit_test = to_nfit_test(&pdev->dev);
        if (nfit_test->setup != nfit_test0_setup)
                return 0;
  
-       flush_work(&acpi_desc->work);
        nfit_test->setup_hotplug = 1;
        nfit_test->setup(nfit_test);
  
-       rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
-                       nfit_test->nfit_size);
-       if (rc)
-               return rc;
+       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+       if (!obj)
+               return -ENOMEM;
+       obj->type = ACPI_TYPE_BUFFER;
+       obj->buffer.length = nfit_test->nfit_size;
+       obj->buffer.pointer = nfit_test->nfit_buf;
+       *(nfit_test->_fit) = obj;
+       __acpi_nfit_notify(&pdev->dev, nfit_test, 0x80);
+       /* associate dimm devices with nfit_mem data for notification testing */
+       mutex_lock(&acpi_desc->init_mutex);
+       list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
+               u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
+               int i;
+               for (i = 0; i < NUM_DCR; i++)
+                       if (nfit_handle == handle[i])
+                               dev_set_drvdata(nfit_test->dimm_dev[i],
+                                               nfit_mem);
+       }
+       mutex_unlock(&acpi_desc->init_mutex);
  
        return 0;
  }
@@@ -1518,6 -1638,10 +1639,10 @@@ static __init int nfit_test_init(void
  {
        int rc, i;
  
+       nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm");
+       if (IS_ERR(nfit_test_dimm))
+               return PTR_ERR(nfit_test_dimm);
        nfit_test_setup(nfit_test_lookup);
  
        for (i = 0; i < NUM_NFITS; i++) {
@@@ -1584,6 -1708,7 +1709,7 @@@ static __exit void nfit_test_exit(void
        for (i = 0; i < NUM_NFITS; i++)
                platform_device_unregister(&instances[i]->pdev);
        nfit_test_teardown();
+       class_destroy(nfit_test_dimm);
  }
  
  module_init(nfit_test_init);