Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Feb 2015 18:28:45 +0000 (10:28 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Feb 2015 18:28:45 +0000 (10:28 -0800)
Pull first round of SCSI updates from James Bottomley:
 "This is the usual grab bag of driver updates (hpsa, storvsc, mp2sas,
  megaraid_sas, ses) plus an assortment of minor updates.

  There's also an update to ufs which adds new phy drivers and finally a
  new logging infrastructure for SCSI"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (114 commits)
  scsi_logging: return void for dev_printk() functions
  scsi: print single-character strings with seq_putc
  scsi: merge consecutive seq_puts calls
  scsi: replace seq_printf with seq_puts
  aha152x: replace seq_printf with seq_puts
  advansys: replace seq_printf with seq_puts
  scsi: remove SPRINTF macro
  sg: remove an unused variable
  hpsa: Use local workqueues instead of system workqueues
  hpsa: add in P840ar controller model name
  hpsa: add in gen9 controller model names
  hpsa: detect and report failures changing controller transport modes
  hpsa: shorten the wait for the CISS doorbell mode change ack
  hpsa: refactor duplicated scan completion code into a new routine
  hpsa: move SG descriptor set-up out of hpsa_scatter_gather()
  hpsa: do not use function pointers in fast path command submission
  hpsa: print CDBs instead of kernel virtual addresses for uncommon errors
  hpsa: do not use a void pointer for scsi_cmd field of struct CommandList
  hpsa: return failed from device reset/abort handlers
  hpsa: check for ctlr lockup after command allocation in main io path
  ...

92 files changed:
drivers/ata/libata-eh.c
drivers/misc/enclosure.c
drivers/phy/Kconfig
drivers/phy/Makefile
drivers/phy/phy-qcom-ufs-i.h [new file with mode: 0644]
drivers/phy/phy-qcom-ufs-qmp-14nm.c [new file with mode: 0644]
drivers/phy/phy-qcom-ufs-qmp-14nm.h [new file with mode: 0644]
drivers/phy/phy-qcom-ufs-qmp-20nm.c [new file with mode: 0644]
drivers/phy/phy-qcom-ufs-qmp-20nm.h [new file with mode: 0644]
drivers/phy/phy-qcom-ufs.c [new file with mode: 0644]
drivers/scsi/3w-9xxx.c
drivers/scsi/BusLogic.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/NCR5380.c
drivers/scsi/advansys.c
drivers/scsi/aha152x.c
drivers/scsi/aic7xxx/aic79xx_proc.c
drivers/scsi/aic7xxx/aic7xxx_proc.c
drivers/scsi/arm/fas216.c
drivers/scsi/atari_NCR5380.c
drivers/scsi/atp870u.c
drivers/scsi/ch.c
drivers/scsi/constants.c
drivers/scsi/dc395x.c
drivers/scsi/dpt_i2o.c
drivers/scsi/eata_pio.c
drivers/scsi/esas2r/esas2r_main.c
drivers/scsi/esp_scsi.c
drivers/scsi/gdth_proc.c
drivers/scsi/hpsa.c
drivers/scsi/hpsa.h
drivers/scsi/hpsa_cmd.h
drivers/scsi/in2000.c
drivers/scsi/ips.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/megaraid.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fp.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/megaraid/megaraid_sas_fusion.h
drivers/scsi/mpt2sas/mpi/mpi2.h
drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
drivers/scsi/mpt2sas/mpi/mpi2_tool.h
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/mpt2sas/mpt2sas_base.h
drivers/scsi/mpt2sas/mpt2sas_config.c
drivers/scsi/mpt2sas/mpt2sas_ctl.c
drivers/scsi/mpt2sas/mpt2sas_ctl.h
drivers/scsi/mpt2sas/mpt2sas_debug.h
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/mpt2sas/mpt2sas_transport.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_config.c
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/mpt3sas/mpt3sas_ctl.h
drivers/scsi/mpt3sas/mpt3sas_debug.h
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/mpt3sas/mpt3sas_transport.c
drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
drivers/scsi/nsp32.c
drivers/scsi/pcmcia/nsp_cs.c
drivers/scsi/qla2xxx/qla_dfs.c
drivers/scsi/scsi.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_logging.c [new file with mode: 0644]
drivers/scsi/scsi_proc.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_trace.c
drivers/scsi/sd.c
drivers/scsi/ses.c
drivers/scsi/sg.c
drivers/scsi/sr_ioctl.c
drivers/scsi/storvsc_drv.c
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/Makefile
drivers/scsi/ufs/ufs-qcom.c [new file with mode: 0644]
drivers/scsi/ufs/ufs-qcom.h [new file with mode: 0644]
drivers/scsi/ufs/ufshcd.c
drivers/scsi/wd33c93.c
drivers/scsi/wd7000.c
drivers/xen/xen-scsiback.c
include/linux/enclosure.h
include/linux/phy/phy-qcom-ufs.h [new file with mode: 0644]
include/scsi/scsi.h
include/scsi/scsi_dbg.h
include/scsi/scsi_device.h

index a9f5aed..d2029a4 100644 (file)
@@ -2481,7 +2481,6 @@ static void ata_eh_link_report(struct ata_link *link)
        for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
                struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
                struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
-               const u8 *cdb = qc->cdb;
                char data_buf[20] = "";
                char cdb_buf[70] = "";
 
@@ -2509,16 +2508,15 @@ static void ata_eh_link_report(struct ata_link *link)
                }
 
                if (ata_is_atapi(qc->tf.protocol)) {
-                       if (qc->scsicmd)
-                               scsi_print_command(qc->scsicmd);
-                       else
-                               snprintf(cdb_buf, sizeof(cdb_buf),
-                                "cdb %02x %02x %02x %02x %02x %02x %02x %02x  "
-                                "%02x %02x %02x %02x %02x %02x %02x %02x\n         ",
-                                cdb[0], cdb[1], cdb[2], cdb[3],
-                                cdb[4], cdb[5], cdb[6], cdb[7],
-                                cdb[8], cdb[9], cdb[10], cdb[11],
-                                cdb[12], cdb[13], cdb[14], cdb[15]);
+                       const u8 *cdb = qc->cdb;
+                       size_t cdb_len = qc->dev->cdb_len;
+
+                       if (qc->scsicmd) {
+                               cdb = qc->scsicmd->cmnd;
+                               cdb_len = qc->scsicmd->cmd_len;
+                       }
+                       __scsi_format_command(cdb_buf, sizeof(cdb_buf),
+                                             cdb, cdb_len);
                } else {
                        const char *descr = ata_get_cmd_descript(cmd->command);
                        if (descr)
index 180a544..38552a3 100644 (file)
@@ -145,8 +145,11 @@ enclosure_register(struct device *dev, const char *name, int components,
        if (err)
                goto err;
 
-       for (i = 0; i < components; i++)
+       for (i = 0; i < components; i++) {
                edev->component[i].number = -1;
+               edev->component[i].slot = -1;
+               edev->component[i].power_status = 1;
+       }
 
        mutex_lock(&container_list_lock);
        list_add_tail(&edev->node, &container_list);
@@ -273,27 +276,26 @@ enclosure_component_find_by_name(struct enclosure_device *edev,
 static const struct attribute_group *enclosure_component_groups[];
 
 /**
- * enclosure_component_register - add a particular component to an enclosure
+ * enclosure_component_alloc - prepare a new enclosure component
  * @edev:      the enclosure to add the component
  * @num:       the device number
  * @type:      the type of component being added
  * @name:      an optional name to appear in sysfs (leave NULL if none)
  *
- * Registers the component.  The name is optional for enclosures that
- * give their components a unique name.  If not, leave the field NULL
- * and a name will be assigned.
+ * The name is optional for enclosures that give their components a unique
+ * name.  If not, leave the field NULL and a name will be assigned.
  *
  * Returns a pointer to the enclosure component or an error.
  */
 struct enclosure_component *
-enclosure_component_register(struct enclosure_device *edev,
-                            unsigned int number,
-                            enum enclosure_component_type type,
-                            const char *name)
+enclosure_component_alloc(struct enclosure_device *edev,
+                         unsigned int number,
+                         enum enclosure_component_type type,
+                         const char *name)
 {
        struct enclosure_component *ecomp;
        struct device *cdev;
-       int err, i;
+       int i;
        char newname[COMPONENT_NAME_SIZE];
 
        if (number >= edev->components)
@@ -327,14 +329,30 @@ enclosure_component_register(struct enclosure_device *edev,
        cdev->release = enclosure_component_release;
        cdev->groups = enclosure_component_groups;
 
+       return ecomp;
+}
+EXPORT_SYMBOL_GPL(enclosure_component_alloc);
+
+/**
+ * enclosure_component_register - publishes an initialized enclosure component
+ * @ecomp:     component to add
+ *
+ * Returns 0 on successful registration, releases the component otherwise
+ */
+int enclosure_component_register(struct enclosure_component *ecomp)
+{
+       struct device *cdev;
+       int err;
+
+       cdev = &ecomp->cdev;
        err = device_register(cdev);
        if (err) {
                ecomp->number = -1;
                put_device(cdev);
-               return ERR_PTR(err);
+               return err;
        }
 
-       return ecomp;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(enclosure_component_register);
 
@@ -417,8 +435,21 @@ static ssize_t components_show(struct device *cdev,
 }
 static DEVICE_ATTR_RO(components);
 
+static ssize_t id_show(struct device *cdev,
+                                struct device_attribute *attr,
+                                char *buf)
+{
+       struct enclosure_device *edev = to_enclosure_device(cdev);
+
+       if (edev->cb->show_id)
+               return edev->cb->show_id(edev, buf);
+       return -EINVAL;
+}
+static DEVICE_ATTR_RO(id);
+
 static struct attribute *enclosure_class_attrs[] = {
        &dev_attr_components.attr,
+       &dev_attr_id.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(enclosure_class);
@@ -553,6 +584,40 @@ static ssize_t set_component_locate(struct device *cdev,
        return count;
 }
 
+static ssize_t get_component_power_status(struct device *cdev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+       struct enclosure_component *ecomp = to_enclosure_component(cdev);
+
+       if (edev->cb->get_power_status)
+               edev->cb->get_power_status(edev, ecomp);
+       return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off");
+}
+
+static ssize_t set_component_power_status(struct device *cdev,
+                                         struct device_attribute *attr,
+                                         const char *buf, size_t count)
+{
+       struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+       struct enclosure_component *ecomp = to_enclosure_component(cdev);
+       int val;
+
+       if (strncmp(buf, "on", 2) == 0 &&
+           (buf[2] == '\n' || buf[2] == '\0'))
+               val = 1;
+       else if (strncmp(buf, "off", 3) == 0 &&
+           (buf[3] == '\n' || buf[3] == '\0'))
+               val = 0;
+       else
+               return -EINVAL;
+
+       if (edev->cb->set_power_status)
+               edev->cb->set_power_status(edev, ecomp, val);
+       return count;
+}
+
 static ssize_t get_component_type(struct device *cdev,
                                  struct device_attribute *attr, char *buf)
 {
@@ -561,6 +626,20 @@ static ssize_t get_component_type(struct device *cdev,
        return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]);
 }
 
+static ssize_t get_component_slot(struct device *cdev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct enclosure_component *ecomp = to_enclosure_component(cdev);
+       int slot;
+
+       /* if the enclosure does not override then use 'number' as a stand-in */
+       if (ecomp->slot >= 0)
+               slot = ecomp->slot;
+       else
+               slot = ecomp->number;
+
+       return snprintf(buf, 40, "%d\n", slot);
+}
 
 static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
                    set_component_fault);
@@ -570,14 +649,19 @@ static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
                   set_component_active);
 static DEVICE_ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
                   set_component_locate);
+static DEVICE_ATTR(power_status, S_IRUGO | S_IWUSR, get_component_power_status,
+                  set_component_power_status);
 static DEVICE_ATTR(type, S_IRUGO, get_component_type, NULL);
+static DEVICE_ATTR(slot, S_IRUGO, get_component_slot, NULL);
 
 static struct attribute *enclosure_component_attrs[] = {
        &dev_attr_fault.attr,
        &dev_attr_status.attr,
        &dev_attr_active.attr,
        &dev_attr_locate.attr,
+       &dev_attr_power_status.attr,
        &dev_attr_type.attr,
+       &dev_attr_slot.attr,
        NULL
 };
 ATTRIBUTE_GROUPS(enclosure_component);
index ccad880..26a7623 100644 (file)
@@ -277,4 +277,11 @@ config PHY_STIH41X_USB
          Enable this to support the USB transceiver that is part of
          STMicroelectronics STiH41x SoC series.
 
+config PHY_QCOM_UFS
+       tristate "Qualcomm UFS PHY driver"
+       depends on OF && ARCH_MSM
+       select GENERIC_PHY
+       help
+         Support for UFS PHY on QCOM chipsets.
+
 endmenu
index aa74f96..cfbb720 100644 (file)
@@ -34,3 +34,6 @@ obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY)  += phy-spear1340-miphy.o
 obj-$(CONFIG_PHY_XGENE)                        += phy-xgene.o
 obj-$(CONFIG_PHY_STIH407_USB)          += phy-stih407-usb.o
 obj-$(CONFIG_PHY_STIH41X_USB)          += phy-stih41x-usb.o
+obj-$(CONFIG_PHY_QCOM_UFS)     += phy-qcom-ufs.o
+obj-$(CONFIG_PHY_QCOM_UFS)     += phy-qcom-ufs-qmp-20nm.o
+obj-$(CONFIG_PHY_QCOM_UFS)     += phy-qcom-ufs-qmp-14nm.o
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h
new file mode 100644 (file)
index 0000000..591a391
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_I_H_
+#define UFS_QCOM_PHY_I_H_
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/phy/phy-qcom-ufs.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#define readl_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
+({ \
+       ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+       might_sleep_if(timeout_us); \
+       for (;;) { \
+               (val) = readl(addr); \
+               if (cond) \
+                       break; \
+               if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
+                       (val) = readl(addr); \
+                       break; \
+               } \
+               if (sleep_us) \
+                       usleep_range(DIV_ROUND_UP(sleep_us, 4), sleep_us); \
+       } \
+       (cond) ? 0 : -ETIMEDOUT; \
+})
+
+#define UFS_QCOM_PHY_CAL_ENTRY(reg, val)       \
+       {                               \
+               .reg_offset = reg,      \
+               .cfg_value = val,       \
+       }
+
+#define UFS_QCOM_PHY_NAME_LEN  30
+
+enum {
+       MASK_SERDES_START       = 0x1,
+       MASK_PCS_READY          = 0x1,
+};
+
+enum {
+       OFFSET_SERDES_START     = 0x0,
+};
+
+struct ufs_qcom_phy_stored_attributes {
+       u32 att;
+       u32 value;
+};
+
+
+struct ufs_qcom_phy_calibration {
+       u32 reg_offset;
+       u32 cfg_value;
+};
+
+struct ufs_qcom_phy_vreg {
+       const char *name;
+       struct regulator *reg;
+       int max_uA;
+       int min_uV;
+       int max_uV;
+       bool enabled;
+       bool is_always_on;
+};
+
+struct ufs_qcom_phy {
+       struct list_head list;
+       struct device *dev;
+       void __iomem *mmio;
+       void __iomem *dev_ref_clk_ctrl_mmio;
+       struct clk *tx_iface_clk;
+       struct clk *rx_iface_clk;
+       bool is_iface_clk_enabled;
+       struct clk *ref_clk_src;
+       struct clk *ref_clk_parent;
+       struct clk *ref_clk;
+       bool is_ref_clk_enabled;
+       bool is_dev_ref_clk_enabled;
+       struct ufs_qcom_phy_vreg vdda_pll;
+       struct ufs_qcom_phy_vreg vdda_phy;
+       struct ufs_qcom_phy_vreg vddp_ref_clk;
+       unsigned int quirks;
+
+       /*
+       * If UFS link is put into Hibern8 and if UFS PHY analog hardware is
+       * power collapsed (by clearing UFS_PHY_POWER_DOWN_CONTROL), Hibern8
+       * exit might fail even after powering on UFS PHY analog hardware.
+       * Enabling this quirk will help to solve above issue by doing
+       * custom PHY settings just before PHY analog power collapse.
+       */
+       #define UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE  BIT(0)
+
+       u8 host_ctrl_rev_major;
+       u16 host_ctrl_rev_minor;
+       u16 host_ctrl_rev_step;
+
+       char name[UFS_QCOM_PHY_NAME_LEN];
+       struct ufs_qcom_phy_calibration *cached_regs;
+       int cached_regs_table_size;
+       bool is_powered_on;
+       struct ufs_qcom_phy_specific_ops *phy_spec_ops;
+};
+
+/**
+ * struct ufs_qcom_phy_specific_ops - set of pointers to functions which have a
+ * specific implementation per phy. Each UFS phy, should implement
+ * those functions according to its spec and requirements
+ * @calibrate_phy: pointer to a function that calibrate the phy
+ * @start_serdes: pointer to a function that starts the serdes
+ * @is_physical_coding_sublayer_ready: pointer to a function that
+ * checks pcs readiness. returns 0 for success and non-zero for error.
+ * @set_tx_lane_enable: pointer to a function that enable tx lanes
+ * @power_control: pointer to a function that controls analog rail of phy
+ * and writes to QSERDES_RX_SIGDET_CNTRL attribute
+ */
+struct ufs_qcom_phy_specific_ops {
+       int (*calibrate_phy)(struct ufs_qcom_phy *phy, bool is_rate_B);
+       void (*start_serdes)(struct ufs_qcom_phy *phy);
+       int (*is_physical_coding_sublayer_ready)(struct ufs_qcom_phy *phy);
+       void (*set_tx_lane_enable)(struct ufs_qcom_phy *phy, u32 val);
+       void (*power_control)(struct ufs_qcom_phy *phy, bool val);
+};
+
+struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
+int ufs_qcom_phy_power_on(struct phy *generic_phy);
+int ufs_qcom_phy_power_off(struct phy *generic_phy);
+int ufs_qcom_phy_exit(struct phy *generic_phy);
+int ufs_qcom_phy_init_clks(struct phy *generic_phy,
+                       struct ufs_qcom_phy *phy_common);
+int ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
+                       struct ufs_qcom_phy *phy_common);
+int ufs_qcom_phy_remove(struct phy *generic_phy,
+                      struct ufs_qcom_phy *ufs_qcom_phy);
+struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
+                       struct ufs_qcom_phy *common_cfg,
+                       struct phy_ops *ufs_qcom_phy_gen_ops,
+                       struct ufs_qcom_phy_specific_ops *phy_spec_ops);
+int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+                       struct ufs_qcom_phy_calibration *tbl_A, int tbl_size_A,
+                       struct ufs_qcom_phy_calibration *tbl_B, int tbl_size_B,
+                       bool is_rate_B);
+#endif
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
new file mode 100644 (file)
index 0000000..f5fc50a
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qmp-14nm.h"
+
+#define UFS_PHY_NAME "ufs_phy_qmp_14nm"
+#define UFS_PHY_VDDA_PHY_UV    (925000)
+
+static
+int ufs_qcom_phy_qmp_14nm_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+                                       bool is_rate_B)
+{
+       int tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A);
+       int tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+       int err;
+
+       err = ufs_qcom_phy_calibrate(ufs_qcom_phy, phy_cal_table_rate_A,
+               tbl_size_A, phy_cal_table_rate_B, tbl_size_B, is_rate_B);
+
+       if (err)
+               dev_err(ufs_qcom_phy->dev,
+                       "%s: ufs_qcom_phy_calibrate() failed %d\n",
+                       __func__, err);
+       return err;
+}
+
+static
+void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
+{
+       phy_common->quirks =
+               UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
+}
+
+static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy)
+{
+       struct ufs_qcom_phy_qmp_14nm *phy = phy_get_drvdata(generic_phy);
+       struct ufs_qcom_phy *phy_common = &phy->common_cfg;
+       int err;
+
+       err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
+       if (err) {
+               dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+                       __func__, err);
+               goto out;
+       }
+
+       err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
+       if (err) {
+               dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+                       __func__, err);
+               goto out;
+       }
+       phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
+       phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
+
+       ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
+
+out:
+       return err;
+}
+
+static
+void ufs_qcom_phy_qmp_14nm_power_control(struct ufs_qcom_phy *phy, bool val)
+{
+       writel_relaxed(val ? 0x1 : 0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+       /*
+        * Before any transactions involving PHY, ensure PHY knows
+        * that it's analog rail is powered ON (or OFF).
+        */
+       mb();
+}
+
+static inline
+void ufs_qcom_phy_qmp_14nm_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val)
+{
+       /*
+        * 14nm PHY does not have TX_LANE_ENABLE register.
+        * Implement this function so as not to propagate error to caller.
+        */
+}
+
+static inline void ufs_qcom_phy_qmp_14nm_start_serdes(struct ufs_qcom_phy *phy)
+{
+       u32 tmp;
+
+       tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
+       tmp &= ~MASK_SERDES_START;
+       tmp |= (1 << OFFSET_SERDES_START);
+       writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
+       /* Ensure register value is committed */
+       mb();
+}
+
+static int ufs_qcom_phy_qmp_14nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
+{
+       int err = 0;
+       u32 val;
+
+       err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
+               val, (val & MASK_PCS_READY), 10, 1000000);
+       if (err)
+               dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+                       __func__, err);
+       return err;
+}
+
+static struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
+       .init           = ufs_qcom_phy_qmp_14nm_init,
+       .exit           = ufs_qcom_phy_exit,
+       .power_on       = ufs_qcom_phy_power_on,
+       .power_off      = ufs_qcom_phy_power_off,
+       .owner          = THIS_MODULE,
+};
+
+static struct ufs_qcom_phy_specific_ops phy_14nm_ops = {
+       .calibrate_phy          = ufs_qcom_phy_qmp_14nm_phy_calibrate,
+       .start_serdes           = ufs_qcom_phy_qmp_14nm_start_serdes,
+       .is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_14nm_is_pcs_ready,
+       .set_tx_lane_enable     = ufs_qcom_phy_qmp_14nm_set_tx_lane_enable,
+       .power_control          = ufs_qcom_phy_qmp_14nm_power_control,
+};
+
+static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct phy *generic_phy;
+       struct ufs_qcom_phy_qmp_14nm *phy;
+       int err = 0;
+
+       phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+       if (!phy) {
+               dev_err(dev, "%s: failed to allocate phy\n", __func__);
+               err = -ENOMEM;
+               goto out;
+       }
+
+       generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+                               &ufs_qcom_phy_qmp_14nm_phy_ops, &phy_14nm_ops);
+
+       if (!generic_phy) {
+               dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+                       __func__);
+               err = -EIO;
+               goto out;
+       }
+
+       phy_set_drvdata(generic_phy, phy);
+
+       strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+               sizeof(phy->common_cfg.name));
+
+out:
+       return err;
+}
+
+static int ufs_qcom_phy_qmp_14nm_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct phy *generic_phy = to_phy(dev);
+       struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+       int err = 0;
+
+       err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
+       if (err)
+               dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
+                       __func__, err);
+
+       return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qmp_14nm_of_match[] = {
+       {.compatible = "qcom,ufs-phy-qmp-14nm"},
+       {},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_14nm_of_match);
+
+static struct platform_driver ufs_qcom_phy_qmp_14nm_driver = {
+       .probe = ufs_qcom_phy_qmp_14nm_probe,
+       .remove = ufs_qcom_phy_qmp_14nm_remove,
+       .driver = {
+               .of_match_table = ufs_qcom_phy_qmp_14nm_of_match,
+               .name = "ufs_qcom_phy_qmp_14nm",
+               .owner = THIS_MODULE,
+       },
+};
+
+module_platform_driver(ufs_qcom_phy_qmp_14nm_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP 14nm");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.h b/drivers/phy/phy-qcom-ufs-qmp-14nm.h
new file mode 100644 (file)
index 0000000..3aefdba
--- /dev/null
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QMP_14NM_H_
+#define UFS_QCOM_PHY_QMP_14NM_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+#define COM_OFF(x)     (0x000 + x)
+#define PHY_OFF(x)     (0xC00 + x)
+#define TX_OFF(n, x)   (0x400 + (0x400 * n) + x)
+#define RX_OFF(n, x)   (0x600 + (0x400 * n) + x)
+
+/* UFS PHY QSERDES COM registers */
+#define QSERDES_COM_BG_TIMER                   COM_OFF(0x0C)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN                COM_OFF(0x34)
+#define QSERDES_COM_SYS_CLK_CTRL               COM_OFF(0x3C)
+#define QSERDES_COM_LOCK_CMP1_MODE0            COM_OFF(0x4C)
+#define QSERDES_COM_LOCK_CMP2_MODE0            COM_OFF(0x50)
+#define QSERDES_COM_LOCK_CMP3_MODE0            COM_OFF(0x54)
+#define QSERDES_COM_LOCK_CMP1_MODE1            COM_OFF(0x58)
+#define QSERDES_COM_LOCK_CMP2_MODE1            COM_OFF(0x5C)
+#define QSERDES_COM_LOCK_CMP3_MODE1            COM_OFF(0x60)
+#define QSERDES_COM_CP_CTRL_MODE0              COM_OFF(0x78)
+#define QSERDES_COM_CP_CTRL_MODE1              COM_OFF(0x7C)
+#define QSERDES_COM_PLL_RCTRL_MODE0            COM_OFF(0x84)
+#define QSERDES_COM_PLL_RCTRL_MODE1            COM_OFF(0x88)
+#define QSERDES_COM_PLL_CCTRL_MODE0            COM_OFF(0x90)
+#define QSERDES_COM_PLL_CCTRL_MODE1            COM_OFF(0x94)
+#define QSERDES_COM_SYSCLK_EN_SEL              COM_OFF(0xAC)
+#define QSERDES_COM_RESETSM_CNTRL              COM_OFF(0xB4)
+#define QSERDES_COM_LOCK_CMP_EN                        COM_OFF(0xC8)
+#define QSERDES_COM_LOCK_CMP_CFG               COM_OFF(0xCC)
+#define QSERDES_COM_DEC_START_MODE0            COM_OFF(0xD0)
+#define QSERDES_COM_DEC_START_MODE1            COM_OFF(0xD4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0      COM_OFF(0xDC)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0      COM_OFF(0xE0)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0      COM_OFF(0xE4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE1      COM_OFF(0xE8)
+#define QSERDES_COM_DIV_FRAC_START2_MODE1      COM_OFF(0xEC)
+#define QSERDES_COM_DIV_FRAC_START3_MODE1      COM_OFF(0xF0)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0      COM_OFF(0x108)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0      COM_OFF(0x10C)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1      COM_OFF(0x110)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1      COM_OFF(0x114)
+#define QSERDES_COM_VCO_TUNE_CTRL              COM_OFF(0x124)
+#define QSERDES_COM_VCO_TUNE_MAP               COM_OFF(0x128)
+#define QSERDES_COM_VCO_TUNE1_MODE0            COM_OFF(0x12C)
+#define QSERDES_COM_VCO_TUNE2_MODE0            COM_OFF(0x130)
+#define QSERDES_COM_VCO_TUNE1_MODE1            COM_OFF(0x134)
+#define QSERDES_COM_VCO_TUNE2_MODE1            COM_OFF(0x138)
+#define QSERDES_COM_VCO_TUNE_TIMER1            COM_OFF(0x144)
+#define QSERDES_COM_VCO_TUNE_TIMER2            COM_OFF(0x148)
+#define QSERDES_COM_CLK_SELECT                 COM_OFF(0x174)
+#define QSERDES_COM_HSCLK_SEL                  COM_OFF(0x178)
+#define QSERDES_COM_CORECLK_DIV                        COM_OFF(0x184)
+#define QSERDES_COM_CORE_CLK_EN                        COM_OFF(0x18C)
+#define QSERDES_COM_CMN_CONFIG                 COM_OFF(0x194)
+#define QSERDES_COM_SVS_MODE_CLK_SEL           COM_OFF(0x19C)
+#define QSERDES_COM_CORECLK_DIV_MODE1          COM_OFF(0x1BC)
+
+/* UFS PHY registers */
+#define UFS_PHY_PHY_START                      PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL             PHY_OFF(0x04)
+#define UFS_PHY_PCS_READY_STATUS               PHY_OFF(0x168)
+
+/* UFS PHY TX registers */
+#define QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN      TX_OFF(0, 0x68)
+#define QSERDES_TX_LANE_MODE                           TX_OFF(0, 0x94)
+
+/* UFS PHY RX registers */
+#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN       RX_OFF(0, 0x40)
+#define QSERDES_RX_RX_TERM_BW                  RX_OFF(0, 0x90)
+#define QSERDES_RX_RX_EQ_GAIN1_LSB             RX_OFF(0, 0xC4)
+#define QSERDES_RX_RX_EQ_GAIN1_MSB             RX_OFF(0, 0xC8)
+#define QSERDES_RX_RX_EQ_GAIN2_LSB             RX_OFF(0, 0xCC)
+#define QSERDES_RX_RX_EQ_GAIN2_MSB             RX_OFF(0, 0xD0)
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2       RX_OFF(0, 0xD8)
+#define QSERDES_RX_SIGDET_CNTRL                        RX_OFF(0, 0x114)
+#define QSERDES_RX_SIGDET_LVL                  RX_OFF(0, 0x118)
+#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL       RX_OFF(0, 0x11C)
+#define QSERDES_RX_RX_INTERFACE_MODE           RX_OFF(0, 0x12C)
+
+/*
+ * This structure represents the 14nm specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qmp_14nm {
+       struct ufs_qcom_phy common_cfg;
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
+       UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xd7),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x06),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x05),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x10),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x14),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x02),
+
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x02),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x18),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x0F),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x54),
+};
+
+#endif
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
new file mode 100644 (file)
index 0000000..8332f96
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qmp-20nm.h"
+
+#define UFS_PHY_NAME "ufs_phy_qmp_20nm"
+
+static
+int ufs_qcom_phy_qmp_20nm_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+                                       bool is_rate_B)
+{
+       struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+       int tbl_size_A, tbl_size_B;
+       u8 major = ufs_qcom_phy->host_ctrl_rev_major;
+       u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
+       u16 step = ufs_qcom_phy->host_ctrl_rev_step;
+       int err;
+
+       if ((major == 0x1) && (minor == 0x002) && (step == 0x0000)) {
+               tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_1_2_0);
+               tbl_A = phy_cal_table_rate_A_1_2_0;
+       } else if ((major == 0x1) && (minor == 0x003) && (step == 0x0000)) {
+               tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_1_3_0);
+               tbl_A = phy_cal_table_rate_A_1_3_0;
+       } else {
+               dev_err(ufs_qcom_phy->dev, "%s: Unknown UFS-PHY version, no calibration values\n",
+                       __func__);
+               err = -ENODEV;
+               goto out;
+       }
+
+       tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+       tbl_B = phy_cal_table_rate_B;
+
+       err = ufs_qcom_phy_calibrate(ufs_qcom_phy, tbl_A, tbl_size_A,
+                                               tbl_B, tbl_size_B, is_rate_B);
+
+       if (err)
+               dev_err(ufs_qcom_phy->dev, "%s: ufs_qcom_phy_calibrate() failed %d\n",
+                       __func__, err);
+
+out:
+       return err;
+}
+
+static
+void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
+{
+       phy_common->quirks =
+               UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
+}
+
+static int ufs_qcom_phy_qmp_20nm_init(struct phy *generic_phy)
+{
+       struct ufs_qcom_phy_qmp_20nm *phy = phy_get_drvdata(generic_phy);
+       struct ufs_qcom_phy *phy_common = &phy->common_cfg;
+       int err = 0;
+
+       err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
+       if (err) {
+               dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+                       __func__, err);
+               goto out;
+       }
+
+       err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
+       if (err) {
+               dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+                       __func__, err);
+               goto out;
+       }
+
+       ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
+
+out:
+       return err;
+}
+
+static
+void ufs_qcom_phy_qmp_20nm_power_control(struct ufs_qcom_phy *phy, bool val)
+{
+       bool hibern8_exit_after_pwr_collapse = phy->quirks &
+               UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
+
+       if (val) {
+               writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+               /*
+                * Before any transactions involving PHY, ensure PHY knows
+                * that it's analog rail is powered ON.
+                */
+               mb();
+
+               if (hibern8_exit_after_pwr_collapse) {
+                       /*
+                        * Give atleast 1us delay after restoring PHY analog
+                        * power.
+                        */
+                       usleep_range(1, 2);
+                       writel_relaxed(0x0A, phy->mmio +
+                                      QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
+                       writel_relaxed(0x08, phy->mmio +
+                                      QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
+                       /*
+                        * Make sure workaround is deactivated before proceeding
+                        * with normal PHY operations.
+                        */
+                       mb();
+               }
+       } else {
+               if (hibern8_exit_after_pwr_collapse) {
+                       writel_relaxed(0x0A, phy->mmio +
+                                      QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
+                       writel_relaxed(0x02, phy->mmio +
+                                      QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
+                       /*
+                        * Make sure that above workaround is activated before
+                        * PHY analog power collapse.
+                        */
+                       mb();
+               }
+
+               writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+               /*
+                * ensure that PHY knows its PHY analog rail is going
+                * to be powered down
+                */
+               mb();
+       }
+}
+
+static
+void ufs_qcom_phy_qmp_20nm_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val)
+{
+       writel_relaxed(val & UFS_PHY_TX_LANE_ENABLE_MASK,
+                       phy->mmio + UFS_PHY_TX_LANE_ENABLE);
+       mb();
+}
+
+static inline void ufs_qcom_phy_qmp_20nm_start_serdes(struct ufs_qcom_phy *phy)
+{
+       u32 tmp;
+
+       tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
+       tmp &= ~MASK_SERDES_START;
+       tmp |= (1 << OFFSET_SERDES_START);
+       writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
+       mb();
+}
+
+static int ufs_qcom_phy_qmp_20nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
+{
+       int err = 0;
+       u32 val;
+
+       err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
+                       val, (val & MASK_PCS_READY), 10, 1000000);
+       if (err)
+               dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+                       __func__, err);
+       return err;
+}
+
+static struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
+       .init           = ufs_qcom_phy_qmp_20nm_init,
+       .exit           = ufs_qcom_phy_exit,
+       .power_on       = ufs_qcom_phy_power_on,
+       .power_off      = ufs_qcom_phy_power_off,
+       .owner          = THIS_MODULE,
+};
+
+static struct ufs_qcom_phy_specific_ops phy_20nm_ops = {
+       .calibrate_phy          = ufs_qcom_phy_qmp_20nm_phy_calibrate,
+       .start_serdes           = ufs_qcom_phy_qmp_20nm_start_serdes,
+       .is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_20nm_is_pcs_ready,
+       .set_tx_lane_enable     = ufs_qcom_phy_qmp_20nm_set_tx_lane_enable,
+       .power_control          = ufs_qcom_phy_qmp_20nm_power_control,
+};
+
+static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct phy *generic_phy;
+       struct ufs_qcom_phy_qmp_20nm *phy;
+       int err = 0;
+
+       phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+       if (!phy) {
+               dev_err(dev, "%s: failed to allocate phy\n", __func__);
+               err = -ENOMEM;
+               goto out;
+       }
+
+       generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+                               &ufs_qcom_phy_qmp_20nm_phy_ops, &phy_20nm_ops);
+
+       if (!generic_phy) {
+               dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+                       __func__);
+               err = -EIO;
+               goto out;
+       }
+
+       phy_set_drvdata(generic_phy, phy);
+
+       strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+                       sizeof(phy->common_cfg.name));
+
+out:
+       return err;
+}
+
+static int ufs_qcom_phy_qmp_20nm_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct phy *generic_phy = to_phy(dev);
+       struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+       int err = 0;
+
+       err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
+       if (err)
+               dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
+                       __func__, err);
+
+       return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qmp_20nm_of_match[] = {
+       {.compatible = "qcom,ufs-phy-qmp-20nm"},
+       {},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_20nm_of_match);
+
+static struct platform_driver ufs_qcom_phy_qmp_20nm_driver = {
+       .probe = ufs_qcom_phy_qmp_20nm_probe,
+       .remove = ufs_qcom_phy_qmp_20nm_remove,
+       .driver = {
+               .of_match_table = ufs_qcom_phy_qmp_20nm_of_match,
+               .name = "ufs_qcom_phy_qmp_20nm",
+               .owner = THIS_MODULE,
+       },
+};
+
+module_platform_driver(ufs_qcom_phy_qmp_20nm_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP 20nm");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.h b/drivers/phy/phy-qcom-ufs-qmp-20nm.h
new file mode 100644 (file)
index 0000000..4f3076b
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QMP_20NM_H_
+#define UFS_QCOM_PHY_QMP_20NM_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+
+#define COM_OFF(x)     (0x000 + x)
+#define PHY_OFF(x)     (0xC00 + x)
+#define TX_OFF(n, x)   (0x400 + (0x400 * n) + x)
+#define RX_OFF(n, x)   (0x600 + (0x400 * n) + x)
+
+/* UFS PHY PLL block registers */
+#define QSERDES_COM_SYS_CLK_CTRL               COM_OFF(0x0)
+#define QSERDES_COM_PLL_VCOTAIL_EN             COM_OFF(0x04)
+#define QSERDES_COM_PLL_CNTRL                  COM_OFF(0x14)
+#define QSERDES_COM_PLL_IP_SETI                        COM_OFF(0x24)
+#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL       COM_OFF(0x28)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN                COM_OFF(0x30)
+#define QSERDES_COM_PLL_CP_SETI                        COM_OFF(0x34)
+#define QSERDES_COM_PLL_IP_SETP                        COM_OFF(0x38)
+#define QSERDES_COM_PLL_CP_SETP                        COM_OFF(0x3C)
+#define QSERDES_COM_SYSCLK_EN_SEL_TXBAND       COM_OFF(0x48)
+#define QSERDES_COM_RESETSM_CNTRL              COM_OFF(0x4C)
+#define QSERDES_COM_RESETSM_CNTRL2             COM_OFF(0x50)
+#define QSERDES_COM_PLLLOCK_CMP1               COM_OFF(0x90)
+#define QSERDES_COM_PLLLOCK_CMP2               COM_OFF(0x94)
+#define QSERDES_COM_PLLLOCK_CMP3               COM_OFF(0x98)
+#define QSERDES_COM_PLLLOCK_CMP_EN             COM_OFF(0x9C)
+#define QSERDES_COM_BGTC                       COM_OFF(0xA0)
+#define QSERDES_COM_DEC_START1                 COM_OFF(0xAC)
+#define QSERDES_COM_PLL_AMP_OS                 COM_OFF(0xB0)
+#define QSERDES_COM_RES_CODE_UP_OFFSET         COM_OFF(0xD8)
+#define QSERDES_COM_RES_CODE_DN_OFFSET         COM_OFF(0xDC)
+#define QSERDES_COM_DIV_FRAC_START1            COM_OFF(0x100)
+#define QSERDES_COM_DIV_FRAC_START2            COM_OFF(0x104)
+#define QSERDES_COM_DIV_FRAC_START3            COM_OFF(0x108)
+#define QSERDES_COM_DEC_START2                 COM_OFF(0x10C)
+#define QSERDES_COM_PLL_RXTXEPCLK_EN           COM_OFF(0x110)
+#define QSERDES_COM_PLL_CRCTRL                 COM_OFF(0x114)
+#define QSERDES_COM_PLL_CLKEPDIV               COM_OFF(0x118)
+
+/* TX LANE n (0, 1) registers */
+#define QSERDES_TX_EMP_POST1_LVL(n)            TX_OFF(n, 0x08)
+#define QSERDES_TX_DRV_LVL(n)                  TX_OFF(n, 0x0C)
+#define QSERDES_TX_LANE_MODE(n)                        TX_OFF(n, 0x54)
+
+/* RX LANE n (0, 1) registers */
+#define QSERDES_RX_CDR_CONTROL1(n)             RX_OFF(n, 0x0)
+#define QSERDES_RX_CDR_CONTROL_HALF(n)         RX_OFF(n, 0x8)
+#define QSERDES_RX_RX_EQ_GAIN1_LSB(n)          RX_OFF(n, 0xA8)
+#define QSERDES_RX_RX_EQ_GAIN1_MSB(n)          RX_OFF(n, 0xAC)
+#define QSERDES_RX_RX_EQ_GAIN2_LSB(n)          RX_OFF(n, 0xB0)
+#define QSERDES_RX_RX_EQ_GAIN2_MSB(n)          RX_OFF(n, 0xB4)
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(n)    RX_OFF(n, 0xBC)
+#define QSERDES_RX_CDR_CONTROL_QUARTER(n)      RX_OFF(n, 0xC)
+#define QSERDES_RX_SIGDET_CNTRL(n)             RX_OFF(n, 0x100)
+
+/* UFS PHY registers */
+#define UFS_PHY_PHY_START                      PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL             PHY_OFF(0x4)
+#define UFS_PHY_TX_LANE_ENABLE                 PHY_OFF(0x44)
+#define UFS_PHY_PWM_G1_CLK_DIVIDER             PHY_OFF(0x08)
+#define UFS_PHY_PWM_G2_CLK_DIVIDER             PHY_OFF(0x0C)
+#define UFS_PHY_PWM_G3_CLK_DIVIDER             PHY_OFF(0x10)
+#define UFS_PHY_PWM_G4_CLK_DIVIDER             PHY_OFF(0x14)
+#define UFS_PHY_CORECLK_PWM_G1_CLK_DIVIDER     PHY_OFF(0x34)
+#define UFS_PHY_CORECLK_PWM_G2_CLK_DIVIDER     PHY_OFF(0x38)
+#define UFS_PHY_CORECLK_PWM_G3_CLK_DIVIDER     PHY_OFF(0x3C)
+#define UFS_PHY_CORECLK_PWM_G4_CLK_DIVIDER     PHY_OFF(0x40)
+#define UFS_PHY_OMC_STATUS_RDVAL               PHY_OFF(0x68)
+#define UFS_PHY_LINE_RESET_TIME                        PHY_OFF(0x28)
+#define UFS_PHY_LINE_RESET_GRANULARITY         PHY_OFF(0x2C)
+#define UFS_PHY_TSYNC_RSYNC_CNTL               PHY_OFF(0x48)
+#define UFS_PHY_PLL_CNTL                       PHY_OFF(0x50)
+#define UFS_PHY_TX_LARGE_AMP_DRV_LVL           PHY_OFF(0x54)
+#define UFS_PHY_TX_SMALL_AMP_DRV_LVL           PHY_OFF(0x5C)
+#define UFS_PHY_TX_LARGE_AMP_POST_EMP_LVL      PHY_OFF(0x58)
+#define UFS_PHY_TX_SMALL_AMP_POST_EMP_LVL      PHY_OFF(0x60)
+#define UFS_PHY_CFG_CHANGE_CNT_VAL             PHY_OFF(0x64)
+#define UFS_PHY_RX_SYNC_WAIT_TIME              PHY_OFF(0x6C)
+#define UFS_PHY_TX_MIN_SLEEP_NOCONFIG_TIME_CAPABILITY  PHY_OFF(0xB4)
+#define UFS_PHY_RX_MIN_SLEEP_NOCONFIG_TIME_CAPABILITY  PHY_OFF(0xE0)
+#define UFS_PHY_TX_MIN_STALL_NOCONFIG_TIME_CAPABILITY  PHY_OFF(0xB8)
+#define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAPABILITY  PHY_OFF(0xE4)
+#define UFS_PHY_TX_MIN_SAVE_CONFIG_TIME_CAPABILITY     PHY_OFF(0xBC)
+#define UFS_PHY_RX_MIN_SAVE_CONFIG_TIME_CAPABILITY     PHY_OFF(0xE8)
+#define UFS_PHY_RX_PWM_BURST_CLOSURE_LENGTH_CAPABILITY PHY_OFF(0xFC)
+#define UFS_PHY_RX_MIN_ACTIVATETIME_CAPABILITY         PHY_OFF(0x100)
+#define UFS_PHY_RX_SIGDET_CTRL3                                PHY_OFF(0x14c)
+#define UFS_PHY_RMMI_ATTR_CTRL                 PHY_OFF(0x160)
+#define UFS_PHY_RMMI_RX_CFGUPDT_L1     (1 << 7)
+#define UFS_PHY_RMMI_TX_CFGUPDT_L1     (1 << 6)
+#define UFS_PHY_RMMI_CFGWR_L1          (1 << 5)
+#define UFS_PHY_RMMI_CFGRD_L1          (1 << 4)
+#define UFS_PHY_RMMI_RX_CFGUPDT_L0     (1 << 3)
+#define UFS_PHY_RMMI_TX_CFGUPDT_L0     (1 << 2)
+#define UFS_PHY_RMMI_CFGWR_L0          (1 << 1)
+#define UFS_PHY_RMMI_CFGRD_L0          (1 << 0)
+#define UFS_PHY_RMMI_ATTRID                    PHY_OFF(0x164)
+#define UFS_PHY_RMMI_ATTRWRVAL                 PHY_OFF(0x168)
+#define UFS_PHY_RMMI_ATTRRDVAL_L0_STATUS       PHY_OFF(0x16C)
+#define UFS_PHY_RMMI_ATTRRDVAL_L1_STATUS       PHY_OFF(0x170)
+#define UFS_PHY_PCS_READY_STATUS               PHY_OFF(0x174)
+
+#define UFS_PHY_TX_LANE_ENABLE_MASK            0x3
+
+/*
+ * This structure represents the 20nm specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qmp_20nm {
+       struct ufs_qcom_phy common_cfg;
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_1_2_0[] = {
+       UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+       UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL3, 0x0D),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_VCOTAIL_EN, 0xe1),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CRCTRL, 0xcc),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL_TXBAND, 0x08),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CLKEPDIV, 0x03),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x82),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x03),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x80),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0x80),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x40),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x19),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x03),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x90),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL2, 0x03),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(0), 0xf2),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(0), 0x0c),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(0), 0x12),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(1), 0xf2),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(1), 0x0c),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(1), 0x12),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(0), 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(0), 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(0), 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(0), 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(1), 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(1), 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(1), 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(1), 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x3f),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x1b),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x0f),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x01),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_EMP_POST1_LVL(0), 0x2F),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_DRV_LVL(0), 0x20),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_EMP_POST1_LVL(1), 0x2F),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_DRV_LVL(1), 0x20),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(0), 0x68),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(1), 0x68),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(1), 0xdc),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(0), 0xdc),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_1_3_0[] = {
+       UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+       UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL3, 0x0D),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_VCOTAIL_EN, 0xe1),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CRCTRL, 0xcc),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL_TXBAND, 0x08),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CLKEPDIV, 0x03),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x82),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x03),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x80),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0x80),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x40),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x19),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x03),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x90),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL2, 0x03),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(0), 0xf2),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(0), 0x0c),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(0), 0x12),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(1), 0xf2),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(1), 0x0c),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(1), 0x12),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(0), 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(0), 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(0), 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(0), 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(1), 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(1), 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(1), 0xff),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(1), 0x00),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x2b),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x38),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x3c),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RES_CODE_UP_OFFSET, 0x02),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RES_CODE_DN_OFFSET, 0x02),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x01),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CNTRL, 0x40),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(0), 0x68),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(1), 0x68),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(1), 0xdc),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(0), 0xdc),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x98),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0x65),
+       UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x1e),
+};
+
+#endif
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
new file mode 100644 (file)
index 0000000..44ee983
--- /dev/null
@@ -0,0 +1,745 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-i.h"
+
+#define MAX_PROP_NAME              32
+#define VDDA_PHY_MIN_UV            1000000
+#define VDDA_PHY_MAX_UV            1000000
+#define VDDA_PLL_MIN_UV            1800000
+#define VDDA_PLL_MAX_UV            1800000
+#define VDDP_REF_CLK_MIN_UV        1200000
+#define VDDP_REF_CLK_MAX_UV        1200000
+
+static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
+                                   const char *, bool);
+static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
+                                 const char *);
+static int ufs_qcom_phy_base_init(struct platform_device *pdev,
+                                 struct ufs_qcom_phy *phy_common);
+
+int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+                          struct ufs_qcom_phy_calibration *tbl_A,
+                          int tbl_size_A,
+                          struct ufs_qcom_phy_calibration *tbl_B,
+                          int tbl_size_B, bool is_rate_B)
+{
+       int i;
+       int ret = 0;
+
+       if (!tbl_A) {
+               dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__);
+               ret = EINVAL;
+               goto out;
+       }
+
+       for (i = 0; i < tbl_size_A; i++)
+               writel_relaxed(tbl_A[i].cfg_value,
+                              ufs_qcom_phy->mmio + tbl_A[i].reg_offset);
+
+       /*
+        * In case we would like to work in rate B, we need
+        * to override a registers that were configured in rate A table
+        * with registers of rate B table.
+        * table.
+        */
+       if (is_rate_B) {
+               if (!tbl_B) {
+                       dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL",
+                               __func__);
+                       ret = EINVAL;
+                       goto out;
+               }
+
+               for (i = 0; i < tbl_size_B; i++)
+                       writel_relaxed(tbl_B[i].cfg_value,
+                               ufs_qcom_phy->mmio + tbl_B[i].reg_offset);
+       }
+
+       /* flush buffered writes */
+       mb();
+
+out:
+       return ret;
+}
+
+struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
+                               struct ufs_qcom_phy *common_cfg,
+                               struct phy_ops *ufs_qcom_phy_gen_ops,
+                               struct ufs_qcom_phy_specific_ops *phy_spec_ops)
+{
+       int err;
+       struct device *dev = &pdev->dev;
+       struct phy *generic_phy = NULL;
+       struct phy_provider *phy_provider;
+
+       err = ufs_qcom_phy_base_init(pdev, common_cfg);
+       if (err) {
+               dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
+               goto out;
+       }
+
+       phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+       if (IS_ERR(phy_provider)) {
+               err = PTR_ERR(phy_provider);
+               dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
+               goto out;
+       }
+
+       generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
+       if (IS_ERR(generic_phy)) {
+               err =  PTR_ERR(generic_phy);
+               dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
+               goto out;
+       }
+
+       common_cfg->phy_spec_ops = phy_spec_ops;
+       common_cfg->dev = dev;
+
+out:
+       return generic_phy;
+}
+
+/*
+ * This assumes the embedded phy structure inside generic_phy is of type
+ * struct ufs_qcom_phy. In order to function properly it's crucial
+ * to keep the embedded struct "struct ufs_qcom_phy common_cfg"
+ * as the first inside generic_phy.
+ */
+struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy)
+{
+       return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy);
+}
+
+static
+int ufs_qcom_phy_base_init(struct platform_device *pdev,
+                          struct ufs_qcom_phy *phy_common)
+{
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       int err = 0;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem");
+       if (!res) {
+               dev_err(dev, "%s: phy_mem resource not found\n", __func__);
+               err = -ENOMEM;
+               goto out;
+       }
+
+       phy_common->mmio = devm_ioremap_resource(dev, res);
+       if (IS_ERR((void const *)phy_common->mmio)) {
+               err = PTR_ERR((void const *)phy_common->mmio);
+               phy_common->mmio = NULL;
+               dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
+                       __func__, err);
+               goto out;
+       }
+
+       /* "dev_ref_clk_ctrl_mem" is optional resource */
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                          "dev_ref_clk_ctrl_mem");
+       if (!res) {
+               dev_dbg(dev, "%s: dev_ref_clk_ctrl_mem resource not found\n",
+                       __func__);
+               goto out;
+       }
+
+       phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res);
+       if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio)) {
+               err = PTR_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio);
+               phy_common->dev_ref_clk_ctrl_mmio = NULL;
+               dev_err(dev, "%s: ioremap for dev_ref_clk_ctrl_mem resource failed %d\n",
+                       __func__, err);
+       }
+
+out:
+       return err;
+}
+
+static int __ufs_qcom_phy_clk_get(struct phy *phy,
+                        const char *name, struct clk **clk_out, bool err_print)
+{
+       struct clk *clk;
+       int err = 0;
+       struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
+       struct device *dev = ufs_qcom_phy->dev;
+
+       clk = devm_clk_get(dev, name);
+       if (IS_ERR(clk)) {
+               err = PTR_ERR(clk);
+               if (err_print)
+                       dev_err(dev, "failed to get %s err %d", name, err);
+       } else {
+               *clk_out = clk;
+       }
+
+       return err;
+}
+
+static
+int ufs_qcom_phy_clk_get(struct phy *phy,
+                        const char *name, struct clk **clk_out)
+{
+       return __ufs_qcom_phy_clk_get(phy, name, clk_out, true);
+}
+
+int
+ufs_qcom_phy_init_clks(struct phy *generic_phy,
+                      struct ufs_qcom_phy *phy_common)
+{
+       int err;
+
+       err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
+                                  &phy_common->tx_iface_clk);
+       if (err)
+               goto out;
+
+       err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
+                                  &phy_common->rx_iface_clk);
+       if (err)
+               goto out;
+
+       err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
+                                  &phy_common->ref_clk_src);
+       if (err)
+               goto out;
+
+       /*
+        * "ref_clk_parent" is optional hence don't abort init if it's not
+        * found.
+        */
+       __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent",
+                                  &phy_common->ref_clk_parent, false);
+
+       err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
+                                  &phy_common->ref_clk);
+
+out:
+       return err;
+}
+
+int
+ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
+                             struct ufs_qcom_phy *phy_common)
+{
+       int err;
+
+       err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
+               "vdda-pll");
+       if (err)
+               goto out;
+
+       err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
+               "vdda-phy");
+
+       if (err)
+               goto out;
+
+       /* vddp-ref-clk-* properties are optional */
+       __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
+                                "vddp-ref-clk", true);
+out:
+       return err;
+}
+
+static int __ufs_qcom_phy_init_vreg(struct phy *phy,
+               struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional)
+{
+       int err = 0;
+       struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
+       struct device *dev = ufs_qcom_phy->dev;
+
+       char prop_name[MAX_PROP_NAME];
+
+       vreg->name = kstrdup(name, GFP_KERNEL);
+       if (!vreg->name) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       vreg->reg = devm_regulator_get(dev, name);
+       if (IS_ERR(vreg->reg)) {
+               err = PTR_ERR(vreg->reg);
+               vreg->reg = NULL;
+               if (!optional)
+                       dev_err(dev, "failed to get %s, %d\n", name, err);
+               goto out;
+       }
+
+       if (dev->of_node) {
+               snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name);
+               err = of_property_read_u32(dev->of_node,
+                                       prop_name, &vreg->max_uA);
+               if (err && err != -EINVAL) {
+                       dev_err(dev, "%s: failed to read %s\n",
+                                       __func__, prop_name);
+                       goto out;
+               } else if (err == -EINVAL || !vreg->max_uA) {
+                       if (regulator_count_voltages(vreg->reg) > 0) {
+                               dev_err(dev, "%s: %s is mandatory\n",
+                                               __func__, prop_name);
+                               goto out;
+                       }
+                       err = 0;
+               }
+               snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name);
+               if (of_get_property(dev->of_node, prop_name, NULL))
+                       vreg->is_always_on = true;
+               else
+                       vreg->is_always_on = false;
+       }
+
+       if (!strcmp(name, "vdda-pll")) {
+               vreg->max_uV = VDDA_PLL_MAX_UV;
+               vreg->min_uV = VDDA_PLL_MIN_UV;
+       } else if (!strcmp(name, "vdda-phy")) {
+               vreg->max_uV = VDDA_PHY_MAX_UV;
+               vreg->min_uV = VDDA_PHY_MIN_UV;
+       } else if (!strcmp(name, "vddp-ref-clk")) {
+               vreg->max_uV = VDDP_REF_CLK_MAX_UV;
+               vreg->min_uV = VDDP_REF_CLK_MIN_UV;
+       }
+
+out:
+       if (err)
+               kfree(vreg->name);
+       return err;
+}
+
+static int ufs_qcom_phy_init_vreg(struct phy *phy,
+                       struct ufs_qcom_phy_vreg *vreg, const char *name)
+{
+       return __ufs_qcom_phy_init_vreg(phy, vreg, name, false);
+}
+
+static
+int ufs_qcom_phy_cfg_vreg(struct phy *phy,
+                         struct ufs_qcom_phy_vreg *vreg, bool on)
+{
+       int ret = 0;
+       struct regulator *reg = vreg->reg;
+       const char *name = vreg->name;
+       int min_uV;
+       int uA_load;
+       struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
+       struct device *dev = ufs_qcom_phy->dev;
+
+       BUG_ON(!vreg);
+
+       if (regulator_count_voltages(reg) > 0) {
+               min_uV = on ? vreg->min_uV : 0;
+               ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+               if (ret) {
+                       dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+                                       __func__, name, ret);
+                       goto out;
+               }
+               uA_load = on ? vreg->max_uA : 0;
+               ret = regulator_set_optimum_mode(reg, uA_load);
+               if (ret >= 0) {
+                       /*
+                        * regulator_set_optimum_mode() returns new regulator
+                        * mode upon success.
+                        */
+                       ret = 0;
+               } else {
+                       dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n",
+                                       __func__, name, uA_load, ret);
+                       goto out;
+               }
+       }
+out:
+       return ret;
+}
+
+static
+int ufs_qcom_phy_enable_vreg(struct phy *phy,
+                            struct ufs_qcom_phy_vreg *vreg)
+{
+       struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
+       struct device *dev = ufs_qcom_phy->dev;
+       int ret = 0;
+
+       if (!vreg || vreg->enabled)
+               goto out;
+
+       ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true);
+       if (ret) {
+               dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
+                       __func__, ret);
+               goto out;
+       }
+
+       ret = regulator_enable(vreg->reg);
+       if (ret) {
+               dev_err(dev, "%s: enable failed, err=%d\n",
+                               __func__, ret);
+               goto out;
+       }
+
+       vreg->enabled = true;
+out:
+       return ret;
+}
+
+int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy)
+{
+       int ret = 0;
+       struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
+
+       if (phy->is_ref_clk_enabled)
+               goto out;
+
+       /*
+        * reference clock is propagated in a daisy-chained manner from
+        * source to phy, so ungate them at each stage.
+        */
+       ret = clk_prepare_enable(phy->ref_clk_src);
+       if (ret) {
+               dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n",
+                               __func__, ret);
+               goto out;
+       }
+
+       /*
+        * "ref_clk_parent" is optional clock hence make sure that clk reference
+        * is available before trying to enable the clock.
+        */
+       if (phy->ref_clk_parent) {
+               ret = clk_prepare_enable(phy->ref_clk_parent);
+               if (ret) {
+                       dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n",
+                                       __func__, ret);
+                       goto out_disable_src;
+               }
+       }
+
+       ret = clk_prepare_enable(phy->ref_clk);
+       if (ret) {
+               dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
+                               __func__, ret);
+               goto out_disable_parent;
+       }
+
+       phy->is_ref_clk_enabled = true;
+       goto out;
+
+out_disable_parent:
+       if (phy->ref_clk_parent)
+               clk_disable_unprepare(phy->ref_clk_parent);
+out_disable_src:
+       clk_disable_unprepare(phy->ref_clk_src);
+out:
+       return ret;
+}
+
+static
+int ufs_qcom_phy_disable_vreg(struct phy *phy,
+                             struct ufs_qcom_phy_vreg *vreg)
+{
+       struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
+       struct device *dev = ufs_qcom_phy->dev;
+       int ret = 0;
+
+       if (!vreg || !vreg->enabled || vreg->is_always_on)
+               goto out;
+
+       ret = regulator_disable(vreg->reg);
+
+       if (!ret) {
+               /* ignore errors on applying disable config */
+               ufs_qcom_phy_cfg_vreg(phy, vreg, false);
+               vreg->enabled = false;
+       } else {
+               dev_err(dev, "%s: %s disable failed, err=%d\n",
+                               __func__, vreg->name, ret);
+       }
+out:
+       return ret;
+}
+
+void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
+{
+       struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
+
+       if (phy->is_ref_clk_enabled) {
+               clk_disable_unprepare(phy->ref_clk);
+               /*
+                * "ref_clk_parent" is optional clock hence make sure that clk
+                * reference is available before trying to disable the clock.
+                */
+               if (phy->ref_clk_parent)
+                       clk_disable_unprepare(phy->ref_clk_parent);
+               clk_disable_unprepare(phy->ref_clk_src);
+               phy->is_ref_clk_enabled = false;
+       }
+}
+
+#define UFS_REF_CLK_EN (1 << 5)
+
+static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable)
+{
+       struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
+
+       if (phy->dev_ref_clk_ctrl_mmio &&
+           (enable ^ phy->is_dev_ref_clk_enabled)) {
+               u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio);
+
+               if (enable)
+                       temp |= UFS_REF_CLK_EN;
+               else
+                       temp &= ~UFS_REF_CLK_EN;
+
+               /*
+                * If we are here to disable this clock immediately after
+                * entering into hibern8, we need to make sure that device
+                * ref_clk is active atleast 1us after the hibern8 enter.
+                */
+               if (!enable)
+                       udelay(1);
+
+               writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio);
+               /* ensure that ref_clk is enabled/disabled before we return */
+               wmb();
+               /*
+                * If we call hibern8 exit after this, we need to make sure that
+                * device ref_clk is stable for atleast 1us before the hibern8
+                * exit command.
+                */
+               if (enable)
+                       udelay(1);
+
+               phy->is_dev_ref_clk_enabled = enable;
+       }
+}
+
+void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy)
+{
+       ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
+}
+
+void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
+{
+       ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
+}
+
+/* Turn ON M-PHY RMMI interface clocks */
+int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
+{
+       struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
+       int ret = 0;
+
+       if (phy->is_iface_clk_enabled)
+               goto out;
+
+       ret = clk_prepare_enable(phy->tx_iface_clk);
+       if (ret) {
+               dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
+                               __func__, ret);
+               goto out;
+       }
+       ret = clk_prepare_enable(phy->rx_iface_clk);
+       if (ret) {
+               clk_disable_unprepare(phy->tx_iface_clk);
+               dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n",
+                               __func__, ret);
+               goto out;
+       }
+       phy->is_iface_clk_enabled = true;
+
+out:
+       return ret;
+}
+
+/* Turn OFF M-PHY RMMI interface clocks */
+void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
+{
+       struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
+
+       if (phy->is_iface_clk_enabled) {
+               clk_disable_unprepare(phy->tx_iface_clk);
+               clk_disable_unprepare(phy->rx_iface_clk);
+               phy->is_iface_clk_enabled = false;
+       }
+}
+
+int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
+{
+       struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+       int ret = 0;
+
+       if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
+               dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n",
+                       __func__);
+               ret = -ENOTSUPP;
+       } else {
+               ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy);
+       }
+
+       return ret;
+}
+
+int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
+{
+       struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+       int ret = 0;
+
+       if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) {
+               dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n",
+                       __func__);
+               ret = -ENOTSUPP;
+       } else {
+               ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy,
+                                                              tx_lanes);
+       }
+
+       return ret;
+}
+
+void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
+                                         u8 major, u16 minor, u16 step)
+{
+       struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+
+       ufs_qcom_phy->host_ctrl_rev_major = major;
+       ufs_qcom_phy->host_ctrl_rev_minor = minor;
+       ufs_qcom_phy->host_ctrl_rev_step = step;
+}
+
+int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
+{
+       struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+       int ret = 0;
+
+       if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) {
+               dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n",
+                       __func__);
+               ret = -ENOTSUPP;
+       } else {
+               ret = ufs_qcom_phy->phy_spec_ops->
+                               calibrate_phy(ufs_qcom_phy, is_rate_B);
+               if (ret)
+                       dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n",
+                               __func__, ret);
+       }
+
+       return ret;
+}
+
+int ufs_qcom_phy_remove(struct phy *generic_phy,
+                       struct ufs_qcom_phy *ufs_qcom_phy)
+{
+       phy_power_off(generic_phy);
+
+       kfree(ufs_qcom_phy->vdda_pll.name);
+       kfree(ufs_qcom_phy->vdda_phy.name);
+
+       return 0;
+}
+
+int ufs_qcom_phy_exit(struct phy *generic_phy)
+{
+       struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+
+       if (ufs_qcom_phy->is_powered_on)
+               phy_power_off(generic_phy);
+
+       return 0;
+}
+
+int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
+{
+       struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+
+       if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
+               dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
+                       __func__);
+               return -ENOTSUPP;
+       }
+
+       return ufs_qcom_phy->phy_spec_ops->
+                       is_physical_coding_sublayer_ready(ufs_qcom_phy);
+}
+
+int ufs_qcom_phy_power_on(struct phy *generic_phy)
+{
+       struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+       struct device *dev = phy_common->dev;
+       int err;
+
+       err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy);
+       if (err) {
+               dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
+                       __func__, err);
+               goto out;
+       }
+
+       phy_common->phy_spec_ops->power_control(phy_common, true);
+
+       /* vdda_pll also enables ref clock LDOs so enable it first */
+       err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll);
+       if (err) {
+               dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
+                       __func__, err);
+               goto out_disable_phy;
+       }
+
+       err = ufs_qcom_phy_enable_ref_clk(generic_phy);
+       if (err) {
+               dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
+                       __func__, err);
+               goto out_disable_pll;
+       }
+
+       /* enable device PHY ref_clk pad rail */
+       if (phy_common->vddp_ref_clk.reg) {
+               err = ufs_qcom_phy_enable_vreg(generic_phy,
+                                              &phy_common->vddp_ref_clk);
+               if (err) {
+                       dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
+                               __func__, err);
+                       goto out_disable_ref_clk;
+               }
+       }
+
+       phy_common->is_powered_on = true;
+       goto out;
+
+out_disable_ref_clk:
+       ufs_qcom_phy_disable_ref_clk(generic_phy);
+out_disable_pll:
+       ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
+out_disable_phy:
+       ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
+out:
+       return err;
+}
+
+int ufs_qcom_phy_power_off(struct phy *generic_phy)
+{
+       struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+
+       phy_common->phy_spec_ops->power_control(phy_common, false);
+
+       if (phy_common->vddp_ref_clk.reg)
+               ufs_qcom_phy_disable_vreg(generic_phy,
+                                         &phy_common->vddp_ref_clk);
+       ufs_qcom_phy_disable_ref_clk(generic_phy);
+
+       ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
+       ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
+       phy_common->is_powered_on = false;
+
+       return 0;
+}
index cd4129f..7600639 100644 (file)
@@ -608,7 +608,8 @@ static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
        }
 
        /* Load rest of compatibility struct */
-       strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
+       strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
+               sizeof(tw_dev->tw_compat_info.driver_version));
        tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
        tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
        tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
index 8d66a64..c7be7bb 100644 (file)
@@ -3485,7 +3485,7 @@ static int blogic_show_info(struct seq_file *m, struct Scsi_Host *shost)
        seq_printf(m, "\n\
 Current Driver Queue Depth:    %d\n\
 Currently Allocated CCBs:      %d\n", adapter->drvr_qdepth, adapter->alloc_ccbs);
-       seq_printf(m, "\n\n\
+       seq_puts(m, "\n\n\
                           DATA TRANSFER STATISTICS\n\
 \n\
 Target Tagged Queuing  Queue Depth  Active  Attempted  Completed\n\
@@ -3500,7 +3500,7 @@ Target    Tagged Queuing  Queue Depth  Active  Attempted  Completed\n\
                seq_printf(m,
                                  "         %3d       %3u    %9u        %9u\n", adapter->qdepth[tgt], adapter->active_cmds[tgt], tgt_stats[tgt].cmds_tried, tgt_stats[tgt].cmds_complete);
        }
-       seq_printf(m, "\n\
+       seq_puts(m, "\n\
 Target  Read Commands  Write Commands   Total Bytes Read    Total Bytes Written\n\
 ======  =============  ==============  ===================  ===================\n");
        for (tgt = 0; tgt < adapter->maxdev; tgt++) {
@@ -3517,7 +3517,7 @@ Target  Read Commands  Write Commands   Total Bytes Read    Total Bytes Written\
                else
                        seq_printf(m, "      %9u\n", tgt_stats[tgt].byteswritten.units);
        }
-       seq_printf(m, "\n\
+       seq_puts(m, "\n\
 Target  Command    0-1KB      1-2KB      2-4KB      4-8KB     8-16KB\n\
 ======  =======  =========  =========  =========  =========  =========\n");
        for (tgt = 0; tgt < adapter->maxdev; tgt++) {
@@ -3533,7 +3533,7 @@ Target  Command    0-1KB      1-2KB      2-4KB      4-8KB     8-16KB\n\
                            tgt_stats[tgt].write_sz_buckets[0],
                            tgt_stats[tgt].write_sz_buckets[1], tgt_stats[tgt].write_sz_buckets[2], tgt_stats[tgt].write_sz_buckets[3], tgt_stats[tgt].write_sz_buckets[4]);
        }
-       seq_printf(m, "\n\
+       seq_puts(m, "\n\
 Target  Command   16-32KB    32-64KB   64-128KB   128-256KB   256KB+\n\
 ======  =======  =========  =========  =========  =========  =========\n");
        for (tgt = 0; tgt < adapter->maxdev; tgt++) {
@@ -3549,7 +3549,7 @@ Target  Command   16-32KB    32-64KB   64-128KB   128-256KB   256KB+\n\
                            tgt_stats[tgt].write_sz_buckets[5],
                            tgt_stats[tgt].write_sz_buckets[6], tgt_stats[tgt].write_sz_buckets[7], tgt_stats[tgt].write_sz_buckets[8], tgt_stats[tgt].write_sz_buckets[9]);
        }
-       seq_printf(m, "\n\n\
+       seq_puts(m, "\n\n\
                           ERROR RECOVERY STATISTICS\n\
 \n\
          Command Aborts      Bus Device Resets   Host Adapter Resets\n\
index 9c92f41..b021bcb 100644 (file)
@@ -201,12 +201,12 @@ config SCSI_ENCLOSURE
          certain enclosure conditions to be reported and is not required.
 
 config SCSI_CONSTANTS
-       bool "Verbose SCSI error reporting (kernel size +=12K)"
+       bool "Verbose SCSI error reporting (kernel size +=75K)"
        depends on SCSI
        help
          The error messages regarding your SCSI hardware will be easier to
          understand if you say Y here; it will enlarge your kernel by about
-         12 KB. If in doubt, say Y.
+         75 KB. If in doubt, say Y.
 
 config SCSI_LOGGING
        bool "SCSI logging facility"
index 58158f1..dee160a 100644 (file)
@@ -159,15 +159,15 @@ obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/
 
 # This goes last, so that "real" scsi devices probe earlier
 obj-$(CONFIG_SCSI_DEBUG)       += scsi_debug.o
-
-scsi_mod-y                     += scsi.o hosts.o scsi_ioctl.o constants.o \
+scsi_mod-y                     += scsi.o hosts.o scsi_ioctl.o \
                                   scsicam.o scsi_error.o scsi_lib.o
+scsi_mod-$(CONFIG_SCSI_CONSTANTS) += constants.o
 scsi_mod-$(CONFIG_SCSI_DMA)    += scsi_lib_dma.o
 scsi_mod-y                     += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
 scsi_mod-$(CONFIG_SCSI_NETLINK)        += scsi_netlink.o
 scsi_mod-$(CONFIG_SYSCTL)      += scsi_sysctl.o
 scsi_mod-$(CONFIG_SCSI_PROC_FS)        += scsi_proc.o
-scsi_mod-y                     += scsi_trace.o
+scsi_mod-y                     += scsi_trace.o scsi_logging.o
 scsi_mod-$(CONFIG_PM)          += scsi_pm.o
 
 hv_storvsc-y                   := storvsc_drv.o
index 36244d6..8981701 100644 (file)
@@ -716,8 +716,6 @@ static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance,
 }
 #endif
 
-#undef SPRINTF
-#define SPRINTF(args...) seq_printf(m, ## args)
 static
 void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m);
 static
@@ -734,19 +732,19 @@ static int __maybe_unused NCR5380_show_info(struct seq_file *m,
        hostdata = (struct NCR5380_hostdata *) instance->hostdata;
 
 #ifdef PSEUDO_DMA
-       SPRINTF("Highwater I/O busy spin counts: write %d, read %d\n",
+       seq_printf(m, "Highwater I/O busy spin counts: write %d, read %d\n",
                hostdata->spin_max_w, hostdata->spin_max_r);
 #endif
        spin_lock_irq(instance->host_lock);
        if (!hostdata->connected)
-               SPRINTF("scsi%d: no currently connected command\n", instance->host_no);
+               seq_printf(m, "scsi%d: no currently connected command\n", instance->host_no);
        else
                lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m);
-       SPRINTF("scsi%d: issue_queue\n", instance->host_no);
+       seq_printf(m, "scsi%d: issue_queue\n", instance->host_no);
        for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble)
                lprint_Scsi_Cmnd(ptr, m);
 
-       SPRINTF("scsi%d: disconnected_queue\n", instance->host_no);
+       seq_printf(m, "scsi%d: disconnected_queue\n", instance->host_no);
        for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble)
                lprint_Scsi_Cmnd(ptr, m);
        spin_unlock_irq(instance->host_lock);
@@ -755,8 +753,8 @@ static int __maybe_unused NCR5380_show_info(struct seq_file *m,
 
 static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m)
 {
-       SPRINTF("scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun);
-       SPRINTF("        command = ");
+       seq_printf(m, "scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun);
+       seq_puts(m, "        command = ");
        lprint_command(cmd->cmnd, m);
 }
 
@@ -765,13 +763,13 @@ static void lprint_command(unsigned char *command, struct seq_file *m)
        int i, s;
        lprint_opcode(command[0], m);
        for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
-               SPRINTF("%02x ", command[i]);
-       SPRINTF("\n");
+               seq_printf(m, "%02x ", command[i]);
+       seq_putc(m, '\n');
 }
 
 static void lprint_opcode(int opcode, struct seq_file *m)
 {
-       SPRINTF("%2d (0x%02x)", opcode, opcode);
+       seq_printf(m, "%2d (0x%02x)", opcode, opcode);
 }
 
 
index 2c5ce48..ae95e34 100644 (file)
@@ -2880,7 +2880,7 @@ static void asc_prt_board_devices(struct seq_file *m, struct Scsi_Host *shost)
                chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id;
        }
 
-       seq_printf(m, "Target IDs Detected:");
+       seq_puts(m, "Target IDs Detected:");
        for (i = 0; i <= ADV_MAX_TID; i++) {
                if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i))
                        seq_printf(m, " %X,", i);
@@ -2896,18 +2896,16 @@ static void asc_prt_adv_bios(struct seq_file *m, struct Scsi_Host *shost)
        struct asc_board *boardp = shost_priv(shost);
        ushort major, minor, letter;
 
-       seq_printf(m, "\nROM BIOS Version: ");
+       seq_puts(m, "\nROM BIOS Version: ");
 
        /*
         * If the BIOS saved a valid signature, then fill in
         * the BIOS code segment base address.
         */
        if (boardp->bios_signature != 0x55AA) {
-               seq_printf(m, "Disabled or Pre-3.1\n");
-               seq_printf(m,
-                         "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n");
-               seq_printf(m,
-                         "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n");
+               seq_puts(m, "Disabled or Pre-3.1\n"
+                       "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n"
+                       "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n");
        } else {
                major = (boardp->bios_version >> 12) & 0xF;
                minor = (boardp->bios_version >> 8) & 0xF;
@@ -2923,10 +2921,8 @@ static void asc_prt_adv_bios(struct seq_file *m, struct Scsi_Host *shost)
                 */
                if (major < 3 || (major <= 3 && minor < 1) ||
                    (major <= 3 && minor <= 1 && letter < ('I' - 'A'))) {
-                       seq_printf(m,
-                                  "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n");
-                       seq_printf(m,
-                                  "ftp://ftp.connectcom.net/pub\n");
+                       seq_puts(m, "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n"
+                               "ftp://ftp.connectcom.net/pub\n");
                }
        }
 }
@@ -3056,11 +3052,10 @@ static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
            == ASC_TRUE)
                seq_printf(m, " Serial Number: %s\n", serialstr);
        else if (ep->adapter_info[5] == 0xBB)
-               seq_printf(m,
-                          " Default Settings Used for EEPROM-less Adapter.\n");
+               seq_puts(m,
+                        " Default Settings Used for EEPROM-less Adapter.\n");
        else
-               seq_printf(m,
-                          " Serial Number Signature Not Present.\n");
+               seq_puts(m, " Serial Number Signature Not Present.\n");
 
        seq_printf(m,
                   " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
@@ -3070,34 +3065,30 @@ static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
        seq_printf(m,
                   " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam);
 
-       seq_printf(m, " Target ID:           ");
+       seq_puts(m, " Target ID:           ");
        for (i = 0; i <= ASC_MAX_TID; i++)
                seq_printf(m, " %d", i);
-       seq_printf(m, "\n");
 
-       seq_printf(m, " Disconnects:         ");
+       seq_puts(m, "\n Disconnects:         ");
        for (i = 0; i <= ASC_MAX_TID; i++)
                seq_printf(m, " %c",
                           (ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
-       seq_printf(m, "\n");
 
-       seq_printf(m, " Command Queuing:     ");
+       seq_puts(m, "\n Command Queuing:     ");
        for (i = 0; i <= ASC_MAX_TID; i++)
                seq_printf(m, " %c",
                           (ep->use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
-       seq_printf(m, "\n");
 
-       seq_printf(m, " Start Motor:         ");
+       seq_puts(m, "\n Start Motor:         ");
        for (i = 0; i <= ASC_MAX_TID; i++)
                seq_printf(m, " %c",
                           (ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
-       seq_printf(m, "\n");
 
-       seq_printf(m, " Synchronous Transfer:");
+       seq_puts(m, "\n Synchronous Transfer:");
        for (i = 0; i <= ASC_MAX_TID; i++)
                seq_printf(m, " %c",
                           (ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
 #ifdef CONFIG_ISA
        if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
@@ -3151,7 +3142,7 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
        if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE)
                seq_printf(m, " Serial Number: %s\n", serialstr);
        else
-               seq_printf(m, " Serial Number Signature Not Present.\n");
+               seq_puts(m, " Serial Number Signature Not Present.\n");
 
        if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550)
                seq_printf(m,
@@ -3209,10 +3200,10 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
                           ep_38C1600->termination_lvd, termstr,
                           ep_38C1600->bios_ctrl);
 
-       seq_printf(m, " Target ID:           ");
+       seq_puts(m, " Target ID:           ");
        for (i = 0; i <= ADV_MAX_TID; i++)
                seq_printf(m, " %X", i);
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
        if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
                word = ep_3550->disc_enable;
@@ -3221,11 +3212,11 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
        } else {
                word = ep_38C1600->disc_enable;
        }
-       seq_printf(m, " Disconnects:         ");
+       seq_puts(m, " Disconnects:         ");
        for (i = 0; i <= ADV_MAX_TID; i++)
                seq_printf(m, " %c",
                           (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
        if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
                word = ep_3550->tagqng_able;
@@ -3234,11 +3225,11 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
        } else {
                word = ep_38C1600->tagqng_able;
        }
-       seq_printf(m, " Command Queuing:     ");
+       seq_puts(m, " Command Queuing:     ");
        for (i = 0; i <= ADV_MAX_TID; i++)
                seq_printf(m, " %c",
                           (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
        if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
                word = ep_3550->start_motor;
@@ -3247,28 +3238,28 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
        } else {
                word = ep_38C1600->start_motor;
        }
-       seq_printf(m, " Start Motor:         ");
+       seq_puts(m, " Start Motor:         ");
        for (i = 0; i <= ADV_MAX_TID; i++)
                seq_printf(m, " %c",
                           (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
        if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
-               seq_printf(m, " Synchronous Transfer:");
+               seq_puts(m, " Synchronous Transfer:");
                for (i = 0; i <= ADV_MAX_TID; i++)
                        seq_printf(m, " %c",
                                   (ep_3550->sdtr_able & ADV_TID_TO_TIDMASK(i)) ?
                                   'Y' : 'N');
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
 
        if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
-               seq_printf(m, " Ultra Transfer:      ");
+               seq_puts(m, " Ultra Transfer:      ");
                for (i = 0; i <= ADV_MAX_TID; i++)
                        seq_printf(m, " %c",
                                   (ep_3550->ultra_able & ADV_TID_TO_TIDMASK(i))
                                   ? 'Y' : 'N');
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
 
        if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
@@ -3278,16 +3269,15 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
        } else {
                word = ep_38C1600->wdtr_able;
        }
-       seq_printf(m, " Wide Transfer:       ");
+       seq_puts(m, " Wide Transfer:       ");
        for (i = 0; i <= ADV_MAX_TID; i++)
                seq_printf(m, " %c",
                           (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
        if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800 ||
            adv_dvc_varp->chip_type == ADV_CHIP_ASC38C1600) {
-               seq_printf(m,
-                          " Synchronous Transfer Speed (Mhz):\n  ");
+               seq_puts(m, " Synchronous Transfer Speed (Mhz):\n  ");
                for (i = 0; i <= ADV_MAX_TID; i++) {
                        char *speed_str;
 
@@ -3325,10 +3315,10 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
                        }
                        seq_printf(m, "%X:%s ", i, speed_str);
                        if (i == 7)
-                               seq_printf(m, "\n  ");
+                               seq_puts(m, "\n  ");
                        sdtr_speed >>= 4;
                }
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
 }
 
@@ -3403,7 +3393,7 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
        seq_printf(m,
                   " Total Command Pending: %d\n", v->cur_total_qng);
 
-       seq_printf(m, " Command Queuing:");
+       seq_puts(m, " Command Queuing:");
        for (i = 0; i <= ASC_MAX_TID; i++) {
                if ((chip_scsi_id == i) ||
                    ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3413,10 +3403,9 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
                           i,
                           (v->use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
        }
-       seq_printf(m, "\n");
 
        /* Current number of commands waiting for a device. */
-       seq_printf(m, " Command Queue Pending:");
+       seq_puts(m, "\n Command Queue Pending:");
        for (i = 0; i <= ASC_MAX_TID; i++) {
                if ((chip_scsi_id == i) ||
                    ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3424,10 +3413,9 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
                }
                seq_printf(m, " %X:%u", i, v->cur_dvc_qng[i]);
        }
-       seq_printf(m, "\n");
 
        /* Current limit on number of commands that can be sent to a device. */
-       seq_printf(m, " Command Queue Limit:");
+       seq_puts(m, "\n Command Queue Limit:");
        for (i = 0; i <= ASC_MAX_TID; i++) {
                if ((chip_scsi_id == i) ||
                    ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3435,10 +3423,9 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
                }
                seq_printf(m, " %X:%u", i, v->max_dvc_qng[i]);
        }
-       seq_printf(m, "\n");
 
        /* Indicate whether the device has returned queue full status. */
-       seq_printf(m, " Command Queue Full:");
+       seq_puts(m, "\n Command Queue Full:");
        for (i = 0; i <= ASC_MAX_TID; i++) {
                if ((chip_scsi_id == i) ||
                    ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3450,9 +3437,8 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
                else
                        seq_printf(m, " %X:N", i);
        }
-       seq_printf(m, "\n");
 
-       seq_printf(m, " Synchronous Transfer:");
+       seq_puts(m, "\n Synchronous Transfer:");
        for (i = 0; i <= ASC_MAX_TID; i++) {
                if ((chip_scsi_id == i) ||
                    ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3462,7 +3448,7 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
                           i,
                           (v->sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
        }
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
        for (i = 0; i <= ASC_MAX_TID; i++) {
                uchar syn_period_ix;
@@ -3476,7 +3462,7 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
                seq_printf(m, "  %X:", i);
 
                if ((boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET) == 0) {
-                       seq_printf(m, " Asynchronous");
+                       seq_puts(m, " Asynchronous");
                } else {
                        syn_period_ix =
                            (boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index -
@@ -3494,16 +3480,15 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
                }
 
                if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) {
-                       seq_printf(m, "*\n");
+                       seq_puts(m, "*\n");
                        renegotiate = 1;
                } else {
-                       seq_printf(m, "\n");
+                       seq_putc(m, '\n');
                }
        }
 
        if (renegotiate) {
-               seq_printf(m,
-                          " * = Re-negotiation pending before next command.\n");
+               seq_puts(m, " * = Re-negotiation pending before next command.\n");
        }
 }
 
@@ -3548,7 +3533,7 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
                   c->mcode_date, c->mcode_version);
 
        AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able);
-       seq_printf(m, " Queuing Enabled:");
+       seq_puts(m, " Queuing Enabled:");
        for (i = 0; i <= ADV_MAX_TID; i++) {
                if ((chip_scsi_id == i) ||
                    ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3559,9 +3544,8 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
                           i,
                           (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
        }
-       seq_printf(m, "\n");
 
-       seq_printf(m, " Queue Limit:");
+       seq_puts(m, "\n Queue Limit:");
        for (i = 0; i <= ADV_MAX_TID; i++) {
                if ((chip_scsi_id == i) ||
                    ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3573,9 +3557,8 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
 
                seq_printf(m, " %X:%d", i, lrambyte);
        }
-       seq_printf(m, "\n");
 
-       seq_printf(m, " Command Pending:");
+       seq_puts(m, "\n Command Pending:");
        for (i = 0; i <= ADV_MAX_TID; i++) {
                if ((chip_scsi_id == i) ||
                    ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3587,10 +3570,10 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
 
                seq_printf(m, " %X:%d", i, lrambyte);
        }
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
        AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able);
-       seq_printf(m, " Wide Enabled:");
+       seq_puts(m, " Wide Enabled:");
        for (i = 0; i <= ADV_MAX_TID; i++) {
                if ((chip_scsi_id == i) ||
                    ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3601,10 +3584,10 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
                           i,
                           (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
        }
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
        AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, wdtr_done);
-       seq_printf(m, " Transfer Bit Width:");
+       seq_puts(m, " Transfer Bit Width:");
        for (i = 0; i <= ADV_MAX_TID; i++) {
                if ((chip_scsi_id == i) ||
                    ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3620,14 +3603,14 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
 
                if ((wdtr_able & ADV_TID_TO_TIDMASK(i)) &&
                    (wdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) {
-                       seq_printf(m, "*");
+                       seq_putc(m, '*');
                        renegotiate = 1;
                }
        }
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
        AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able);
-       seq_printf(m, " Synchronous Enabled:");
+       seq_puts(m, " Synchronous Enabled:");
        for (i = 0; i <= ADV_MAX_TID; i++) {
                if ((chip_scsi_id == i) ||
                    ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
@@ -3638,7 +3621,7 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
                           i,
                           (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
        }
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
        AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, sdtr_done);
        for (i = 0; i <= ADV_MAX_TID; i++) {
@@ -3657,14 +3640,14 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
                seq_printf(m, "  %X:", i);
 
                if ((lramword & 0x1F) == 0) {   /* Check for REQ/ACK Offset 0. */
-                       seq_printf(m, " Asynchronous");
+                       seq_puts(m, " Asynchronous");
                } else {
-                       seq_printf(m, " Transfer Period Factor: ");
+                       seq_puts(m, " Transfer Period Factor: ");
 
                        if ((lramword & 0x1F00) == 0x1100) {    /* 80 Mhz */
-                               seq_printf(m, "9 (80.0 Mhz),");
+                               seq_puts(m, "9 (80.0 Mhz),");
                        } else if ((lramword & 0x1F00) == 0x1000) {     /* 40 Mhz */
-                               seq_printf(m, "10 (40.0 Mhz),");
+                               seq_puts(m, "10 (40.0 Mhz),");
                        } else {        /* 20 Mhz or below. */
 
                                period = (((lramword >> 8) * 25) + 50) / 4;
@@ -3684,16 +3667,15 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
                }
 
                if ((sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) {
-                       seq_printf(m, "*\n");
+                       seq_puts(m, "*\n");
                        renegotiate = 1;
                } else {
-                       seq_printf(m, "\n");
+                       seq_putc(m, '\n');
                }
        }
 
        if (renegotiate) {
-               seq_printf(m,
-                          " * = Re-negotiation pending before next command.\n");
+               seq_puts(m, " * = Re-negotiation pending before next command.\n");
        }
 }
 
index 2b960b3..e31c460 100644 (file)
@@ -2490,299 +2490,296 @@ static void show_queues(struct Scsi_Host *shpnt)
        disp_enintr(shpnt);
 }
 
-#undef SPRINTF
-#define SPRINTF(args...) seq_printf(m, ##args)
-
 static void get_command(struct seq_file *m, Scsi_Cmnd * ptr)
 {
        int i;
 
-       SPRINTF("%p: target=%d; lun=%d; cmnd=( ",
+       seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ",
                ptr, ptr->device->id, (u8)ptr->device->lun);
 
        for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++)
-               SPRINTF("0x%02x ", ptr->cmnd[i]);
+               seq_printf(m, "0x%02x ", ptr->cmnd[i]);
 
-       SPRINTF("); resid=%d; residual=%d; buffers=%d; phase |",
+       seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
                scsi_get_resid(ptr), ptr->SCp.this_residual,
                ptr->SCp.buffers_residual);
 
        if (ptr->SCp.phase & not_issued)
-               SPRINTF("not issued|");
+               seq_puts(m, "not issued|");
        if (ptr->SCp.phase & selecting)
-               SPRINTF("selecting|");
+               seq_puts(m, "selecting|");
        if (ptr->SCp.phase & disconnected)
-               SPRINTF("disconnected|");
+               seq_puts(m, "disconnected|");
        if (ptr->SCp.phase & aborted)
-               SPRINTF("aborted|");
+               seq_puts(m, "aborted|");
        if (ptr->SCp.phase & identified)
-               SPRINTF("identified|");
+               seq_puts(m, "identified|");
        if (ptr->SCp.phase & completed)
-               SPRINTF("completed|");
+               seq_puts(m, "completed|");
        if (ptr->SCp.phase & spiordy)
-               SPRINTF("spiordy|");
+               seq_puts(m, "spiordy|");
        if (ptr->SCp.phase & syncneg)
-               SPRINTF("syncneg|");
-       SPRINTF("; next=0x%p\n", SCNEXT(ptr));
+               seq_puts(m, "syncneg|");
+       seq_printf(m, "; next=0x%p\n", SCNEXT(ptr));
 }
 
 static void get_ports(struct seq_file *m, struct Scsi_Host *shpnt)
 {
        int s;
 
-       SPRINTF("\n%s: %s(%s) ", CURRENT_SC ? "on bus" : "waiting", states[STATE].name, states[PREVSTATE].name);
+       seq_printf(m, "\n%s: %s(%s) ", CURRENT_SC ? "on bus" : "waiting", states[STATE].name, states[PREVSTATE].name);
 
        s = GETPORT(SCSISEQ);
-       SPRINTF("SCSISEQ( ");
+       seq_puts(m, "SCSISEQ( ");
        if (s & TEMODEO)
-               SPRINTF("TARGET MODE ");
+               seq_puts(m, "TARGET MODE ");
        if (s & ENSELO)
-               SPRINTF("SELO ");
+               seq_puts(m, "SELO ");
        if (s & ENSELI)
-               SPRINTF("SELI ");
+               seq_puts(m, "SELI ");
        if (s & ENRESELI)
-               SPRINTF("RESELI ");
+               seq_puts(m, "RESELI ");
        if (s & ENAUTOATNO)
-               SPRINTF("AUTOATNO ");
+               seq_puts(m, "AUTOATNO ");
        if (s & ENAUTOATNI)
-               SPRINTF("AUTOATNI ");
+               seq_puts(m, "AUTOATNI ");
        if (s & ENAUTOATNP)
-               SPRINTF("AUTOATNP ");
+               seq_puts(m, "AUTOATNP ");
        if (s & SCSIRSTO)
-               SPRINTF("SCSIRSTO ");
-       SPRINTF(");");
+               seq_puts(m, "SCSIRSTO ");
+       seq_puts(m, ");");
 
-       SPRINTF(" SCSISIG(");
+       seq_puts(m, " SCSISIG(");
        s = GETPORT(SCSISIG);
        switch (s & P_MASK) {
        case P_DATAO:
-               SPRINTF("DATA OUT");
+               seq_puts(m, "DATA OUT");
                break;
        case P_DATAI:
-               SPRINTF("DATA IN");
+               seq_puts(m, "DATA IN");
                break;
        case P_CMD:
-               SPRINTF("COMMAND");
+               seq_puts(m, "COMMAND");
                break;
        case P_STATUS:
-               SPRINTF("STATUS");
+               seq_puts(m, "STATUS");
                break;
        case P_MSGO:
-               SPRINTF("MESSAGE OUT");
+               seq_puts(m, "MESSAGE OUT");
                break;
        case P_MSGI:
-               SPRINTF("MESSAGE IN");
+               seq_puts(m, "MESSAGE IN");
                break;
        default:
-               SPRINTF("*invalid*");
+               seq_puts(m, "*invalid*");
                break;
        }
 
-       SPRINTF("); ");
+       seq_puts(m, "); ");
 
-       SPRINTF("INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo");
+       seq_printf(m, "INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo");
 
-       SPRINTF("SSTAT( ");
+       seq_puts(m, "SSTAT( ");
        s = GETPORT(SSTAT0);
        if (s & TARGET)
-               SPRINTF("TARGET ");
+               seq_puts(m, "TARGET ");
        if (s & SELDO)
-               SPRINTF("SELDO ");
+               seq_puts(m, "SELDO ");
        if (s & SELDI)
-               SPRINTF("SELDI ");
+               seq_puts(m, "SELDI ");
        if (s & SELINGO)
-               SPRINTF("SELINGO ");
+               seq_puts(m, "SELINGO ");
        if (s & SWRAP)
-               SPRINTF("SWRAP ");
+               seq_puts(m, "SWRAP ");
        if (s & SDONE)
-               SPRINTF("SDONE ");
+               seq_puts(m, "SDONE ");
        if (s & SPIORDY)
-               SPRINTF("SPIORDY ");
+               seq_puts(m, "SPIORDY ");
        if (s & DMADONE)
-               SPRINTF("DMADONE ");
+               seq_puts(m, "DMADONE ");
 
        s = GETPORT(SSTAT1);
        if (s & SELTO)
-               SPRINTF("SELTO ");
+               seq_puts(m, "SELTO ");
        if (s & ATNTARG)
-               SPRINTF("ATNTARG ");
+               seq_puts(m, "ATNTARG ");
        if (s & SCSIRSTI)
-               SPRINTF("SCSIRSTI ");
+               seq_puts(m, "SCSIRSTI ");
        if (s & PHASEMIS)
-               SPRINTF("PHASEMIS ");
+               seq_puts(m, "PHASEMIS ");
        if (s & BUSFREE)
-               SPRINTF("BUSFREE ");
+               seq_puts(m, "BUSFREE ");
        if (s & SCSIPERR)
-               SPRINTF("SCSIPERR ");
+               seq_puts(m, "SCSIPERR ");
        if (s & PHASECHG)
-               SPRINTF("PHASECHG ");
+               seq_puts(m, "PHASECHG ");
        if (s & REQINIT)
-               SPRINTF("REQINIT ");
-       SPRINTF("); ");
+               seq_puts(m, "REQINIT ");
+       seq_puts(m, "); ");
 
 
-       SPRINTF("SSTAT( ");
+       seq_puts(m, "SSTAT( ");
 
        s = GETPORT(SSTAT0) & GETPORT(SIMODE0);
 
        if (s & TARGET)
-               SPRINTF("TARGET ");
+               seq_puts(m, "TARGET ");
        if (s & SELDO)
-               SPRINTF("SELDO ");
+               seq_puts(m, "SELDO ");
        if (s & SELDI)
-               SPRINTF("SELDI ");
+               seq_puts(m, "SELDI ");
        if (s & SELINGO)
-               SPRINTF("SELINGO ");
+               seq_puts(m, "SELINGO ");
        if (s & SWRAP)
-               SPRINTF("SWRAP ");
+               seq_puts(m, "SWRAP ");
        if (s & SDONE)
-               SPRINTF("SDONE ");
+               seq_puts(m, "SDONE ");
        if (s & SPIORDY)
-               SPRINTF("SPIORDY ");
+               seq_puts(m, "SPIORDY ");
        if (s & DMADONE)
-               SPRINTF("DMADONE ");
+               seq_puts(m, "DMADONE ");
 
        s = GETPORT(SSTAT1) & GETPORT(SIMODE1);
 
        if (s & SELTO)
-               SPRINTF("SELTO ");
+               seq_puts(m, "SELTO ");
        if (s & ATNTARG)
-               SPRINTF("ATNTARG ");
+               seq_puts(m, "ATNTARG ");
        if (s & SCSIRSTI)
-               SPRINTF("SCSIRSTI ");
+               seq_puts(m, "SCSIRSTI ");
        if (s & PHASEMIS)
-               SPRINTF("PHASEMIS ");
+               seq_puts(m, "PHASEMIS ");
        if (s & BUSFREE)
-               SPRINTF("BUSFREE ");
+               seq_puts(m, "BUSFREE ");
        if (s & SCSIPERR)
-               SPRINTF("SCSIPERR ");
+               seq_puts(m, "SCSIPERR ");
        if (s & PHASECHG)
-               SPRINTF("PHASECHG ");
+               seq_puts(m, "PHASECHG ");
        if (s & REQINIT)
-               SPRINTF("REQINIT ");
-       SPRINTF("); ");
+               seq_puts(m, "REQINIT ");
+       seq_puts(m, "); ");
 
-       SPRINTF("SXFRCTL0( ");
+       seq_puts(m, "SXFRCTL0( ");
 
        s = GETPORT(SXFRCTL0);
        if (s & SCSIEN)
-               SPRINTF("SCSIEN ");
+               seq_puts(m, "SCSIEN ");
        if (s & DMAEN)
-               SPRINTF("DMAEN ");
+               seq_puts(m, "DMAEN ");
        if (s & CH1)
-               SPRINTF("CH1 ");
+               seq_puts(m, "CH1 ");
        if (s & CLRSTCNT)
-               SPRINTF("CLRSTCNT ");
+               seq_puts(m, "CLRSTCNT ");
        if (s & SPIOEN)
-               SPRINTF("SPIOEN ");
+               seq_puts(m, "SPIOEN ");
        if (s & CLRCH1)
-               SPRINTF("CLRCH1 ");
-       SPRINTF("); ");
+               seq_puts(m, "CLRCH1 ");
+       seq_puts(m, "); ");
 
-       SPRINTF("SIGNAL( ");
+       seq_puts(m, "SIGNAL( ");
 
        s = GETPORT(SCSISIG);
        if (s & SIG_ATNI)
-               SPRINTF("ATNI ");
+               seq_puts(m, "ATNI ");
        if (s & SIG_SELI)
-               SPRINTF("SELI ");
+               seq_puts(m, "SELI ");
        if (s & SIG_BSYI)
-               SPRINTF("BSYI ");
+               seq_puts(m, "BSYI ");
        if (s & SIG_REQI)
-               SPRINTF("REQI ");
+               seq_puts(m, "REQI ");
        if (s & SIG_ACKI)
-               SPRINTF("ACKI ");
-       SPRINTF("); ");
+               seq_puts(m, "ACKI ");
+       seq_puts(m, "); ");
 
-       SPRINTF("SELID(%02x), ", GETPORT(SELID));
+       seq_printf(m, "SELID(%02x), ", GETPORT(SELID));
 
-       SPRINTF("STCNT(%d), ", GETSTCNT());
+       seq_printf(m, "STCNT(%d), ", GETSTCNT());
 
-       SPRINTF("SSTAT2( ");
+       seq_puts(m, "SSTAT2( ");
 
        s = GETPORT(SSTAT2);
        if (s & SOFFSET)
-               SPRINTF("SOFFSET ");
+               seq_puts(m, "SOFFSET ");
        if (s & SEMPTY)
-               SPRINTF("SEMPTY ");
+               seq_puts(m, "SEMPTY ");
        if (s & SFULL)
-               SPRINTF("SFULL ");
-       SPRINTF("); SFCNT (%d); ", s & (SFULL | SFCNT));
+               seq_puts(m, "SFULL ");
+       seq_printf(m, "); SFCNT (%d); ", s & (SFULL | SFCNT));
 
        s = GETPORT(SSTAT3);
-       SPRINTF("SCSICNT (%d), OFFCNT(%d), ", (s & 0xf0) >> 4, s & 0x0f);
+       seq_printf(m, "SCSICNT (%d), OFFCNT(%d), ", (s & 0xf0) >> 4, s & 0x0f);
 
-       SPRINTF("SSTAT4( ");
+       seq_puts(m, "SSTAT4( ");
        s = GETPORT(SSTAT4);
        if (s & SYNCERR)
-               SPRINTF("SYNCERR ");
+               seq_puts(m, "SYNCERR ");
        if (s & FWERR)
-               SPRINTF("FWERR ");
+               seq_puts(m, "FWERR ");
        if (s & FRERR)
-               SPRINTF("FRERR ");
-       SPRINTF("); ");
+               seq_puts(m, "FRERR ");
+       seq_puts(m, "); ");
 
-       SPRINTF("DMACNTRL0( ");
+       seq_puts(m, "DMACNTRL0( ");
        s = GETPORT(DMACNTRL0);
-       SPRINTF("%s ", s & _8BIT ? "8BIT" : "16BIT");
-       SPRINTF("%s ", s & DMA ? "DMA" : "PIO");
-       SPRINTF("%s ", s & WRITE_READ ? "WRITE" : "READ");
+       seq_printf(m, "%s ", s & _8BIT ? "8BIT" : "16BIT");
+       seq_printf(m, "%s ", s & DMA ? "DMA" : "PIO");
+       seq_printf(m, "%s ", s & WRITE_READ ? "WRITE" : "READ");
        if (s & ENDMA)
-               SPRINTF("ENDMA ");
+               seq_puts(m, "ENDMA ");
        if (s & INTEN)
-               SPRINTF("INTEN ");
+               seq_puts(m, "INTEN ");
        if (s & RSTFIFO)
-               SPRINTF("RSTFIFO ");
+               seq_puts(m, "RSTFIFO ");
        if (s & SWINT)
-               SPRINTF("SWINT ");
-       SPRINTF("); ");
+               seq_puts(m, "SWINT ");
+       seq_puts(m, "); ");
 
-       SPRINTF("DMASTAT( ");
+       seq_puts(m, "DMASTAT( ");
        s = GETPORT(DMASTAT);
        if (s & ATDONE)
-               SPRINTF("ATDONE ");
+               seq_puts(m, "ATDONE ");
        if (s & WORDRDY)
-               SPRINTF("WORDRDY ");
+               seq_puts(m, "WORDRDY ");
        if (s & DFIFOFULL)
-               SPRINTF("DFIFOFULL ");
+               seq_puts(m, "DFIFOFULL ");
        if (s & DFIFOEMP)
-               SPRINTF("DFIFOEMP ");
-       SPRINTF(")\n");
+               seq_puts(m, "DFIFOEMP ");
+       seq_puts(m, ")\n");
 
-       SPRINTF("enabled interrupts( ");
+       seq_puts(m, "enabled interrupts( ");
 
        s = GETPORT(SIMODE0);
        if (s & ENSELDO)
-               SPRINTF("ENSELDO ");
+               seq_puts(m, "ENSELDO ");
        if (s & ENSELDI)
-               SPRINTF("ENSELDI ");
+               seq_puts(m, "ENSELDI ");
        if (s & ENSELINGO)
-               SPRINTF("ENSELINGO ");
+               seq_puts(m, "ENSELINGO ");
        if (s & ENSWRAP)
-               SPRINTF("ENSWRAP ");
+               seq_puts(m, "ENSWRAP ");
        if (s & ENSDONE)
-               SPRINTF("ENSDONE ");
+               seq_puts(m, "ENSDONE ");
        if (s & ENSPIORDY)
-               SPRINTF("ENSPIORDY ");
+               seq_puts(m, "ENSPIORDY ");
        if (s & ENDMADONE)
-               SPRINTF("ENDMADONE ");
+               seq_puts(m, "ENDMADONE ");
 
        s = GETPORT(SIMODE1);
        if (s & ENSELTIMO)
-               SPRINTF("ENSELTIMO ");
+               seq_puts(m, "ENSELTIMO ");
        if (s & ENATNTARG)
-               SPRINTF("ENATNTARG ");
+               seq_puts(m, "ENATNTARG ");
        if (s & ENPHASEMIS)
-               SPRINTF("ENPHASEMIS ");
+               seq_puts(m, "ENPHASEMIS ");
        if (s & ENBUSFREE)
-               SPRINTF("ENBUSFREE ");
+               seq_puts(m, "ENBUSFREE ");
        if (s & ENSCSIPERR)
-               SPRINTF("ENSCSIPERR ");
+               seq_puts(m, "ENSCSIPERR ");
        if (s & ENPHASECHG)
-               SPRINTF("ENPHASECHG ");
+               seq_puts(m, "ENPHASECHG ");
        if (s & ENREQINIT)
-               SPRINTF("ENREQINIT ");
-       SPRINTF(")\n");
+               seq_puts(m, "ENREQINIT ");
+       seq_puts(m, ")\n");
 }
 
 static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length)
@@ -2825,56 +2822,56 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
        Scsi_Cmnd *ptr;
        unsigned long flags;
 
-       SPRINTF(AHA152X_REVID "\n");
+       seq_puts(m, AHA152X_REVID "\n");
 
-       SPRINTF("ioports 0x%04lx to 0x%04lx\n",
+       seq_printf(m, "ioports 0x%04lx to 0x%04lx\n",
                shpnt->io_port, shpnt->io_port + shpnt->n_io_port - 1);
-       SPRINTF("interrupt 0x%02x\n", shpnt->irq);
-       SPRINTF("disconnection/reconnection %s\n",
+       seq_printf(m, "interrupt 0x%02x\n", shpnt->irq);
+       seq_printf(m, "disconnection/reconnection %s\n",
                RECONNECT ? "enabled" : "disabled");
-       SPRINTF("parity checking %s\n",
+       seq_printf(m, "parity checking %s\n",
                PARITY ? "enabled" : "disabled");
-       SPRINTF("synchronous transfers %s\n",
+       seq_printf(m, "synchronous transfers %s\n",
                SYNCHRONOUS ? "enabled" : "disabled");
-       SPRINTF("%d commands currently queued\n", HOSTDATA(shpnt)->commands);
+       seq_printf(m, "%d commands currently queued\n", HOSTDATA(shpnt)->commands);
 
        if(SYNCHRONOUS) {
-               SPRINTF("synchronously operating targets (tick=50 ns):\n");
+               seq_puts(m, "synchronously operating targets (tick=50 ns):\n");
                for (i = 0; i < 8; i++)
                        if (HOSTDATA(shpnt)->syncrate[i] & 0x7f)
-                               SPRINTF("target %d: period %dT/%dns; req/ack offset %d\n",
+                               seq_printf(m, "target %d: period %dT/%dns; req/ack offset %d\n",
                                        i,
                                        (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2),
                                        (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2) * 50,
                                    HOSTDATA(shpnt)->syncrate[i] & 0x0f);
        }
-       SPRINTF("\nqueue status:\n");
+       seq_puts(m, "\nqueue status:\n");
        DO_LOCK(flags);
        if (ISSUE_SC) {
-               SPRINTF("not yet issued commands:\n");
+               seq_puts(m, "not yet issued commands:\n");
                for (ptr = ISSUE_SC; ptr; ptr = SCNEXT(ptr))
                        get_command(m, ptr);
        } else
-               SPRINTF("no not yet issued commands\n");
+               seq_puts(m, "no not yet issued commands\n");
        DO_UNLOCK(flags);
 
        if (CURRENT_SC) {
-               SPRINTF("current command:\n");
+               seq_puts(m, "current command:\n");
                get_command(m, CURRENT_SC);
        } else
-               SPRINTF("no current command\n");
+               seq_puts(m, "no current command\n");
 
        if (DISCONNECTED_SC) {
-               SPRINTF("disconnected commands:\n");
+               seq_puts(m, "disconnected commands:\n");
                for (ptr = DISCONNECTED_SC; ptr; ptr = SCNEXT(ptr))
                        get_command(m, ptr);
        } else
-               SPRINTF("no disconnected commands\n");
+               seq_puts(m, "no disconnected commands\n");
 
        get_ports(m, shpnt);
 
 #if defined(AHA152X_STAT)
-       SPRINTF("statistics:\n"
+       seq_printf(m, "statistics:\n"
                "total commands:               %d\n"
                "disconnections:               %d\n"
                "busfree with check condition: %d\n"
@@ -2894,7 +2891,7 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
                HOSTDATA(shpnt)->busfree_without_done_command,
                HOSTDATA(shpnt)->busfree_without_any_action);
        for(i=0; i<maxstate; i++) {
-               SPRINTF("%-10s %-12d %-12d %-12ld\n",
+               seq_printf(m, "%-10s %-12d %-12d %-12ld\n",
                        states[i].name,
                        HOSTDATA(shpnt)->count_trans[i],
                        HOSTDATA(shpnt)->count[i],
index 27dbfcc..add2da5 100644 (file)
@@ -97,7 +97,7 @@ ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo)
        u_int mb;
 
        if (tinfo->period == AHD_PERIOD_UNKNOWN) {
-               seq_printf(m, "Renegotiation Pending\n");
+               seq_puts(m, "Renegotiation Pending\n");
                return;
        }
         speed = 3300;
@@ -119,40 +119,38 @@ ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo)
                printed_options = 0;
                seq_printf(m, " (%d.%03dMHz", freq / 1000, freq % 1000);
                if ((tinfo->ppr_options & MSG_EXT_PPR_RD_STRM) != 0) {
-                       seq_printf(m, " RDSTRM");
+                       seq_puts(m, " RDSTRM");
                        printed_options++;
                }
                if ((tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
-                       seq_printf(m, "%s", printed_options ? "|DT" : " DT");
+                       seq_puts(m, printed_options ? "|DT" : " DT");
                        printed_options++;
                }
                if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
-                       seq_printf(m, "%s", printed_options ? "|IU" : " IU");
+                       seq_puts(m, printed_options ? "|IU" : " IU");
                        printed_options++;
                }
                if ((tinfo->ppr_options & MSG_EXT_PPR_RTI) != 0) {
-                       seq_printf(m, "%s",
-                                 printed_options ? "|RTI" : " RTI");
+                       seq_puts(m, printed_options ? "|RTI" : " RTI");
                        printed_options++;
                }
                if ((tinfo->ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) {
-                       seq_printf(m, "%s",
-                                 printed_options ? "|QAS" : " QAS");
+                       seq_puts(m, printed_options ? "|QAS" : " QAS");
                        printed_options++;
                }
        }
 
        if (tinfo->width > 0) {
                if (freq != 0) {
-                       seq_printf(m, ", ");
+                       seq_puts(m, ", ");
                } else {
-                       seq_printf(m, " (");
+                       seq_puts(m, " (");
                }
                seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width));
        } else if (freq != 0) {
-               seq_printf(m, ")");
+               seq_putc(m, ')');
        }
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 }
 
 static void
@@ -167,15 +165,15 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct seq_file *m,
        tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
                                    target_id, &tstate);
        seq_printf(m, "Target %d Negotiation Settings\n", target_id);
-       seq_printf(m, "\tUser: ");
+       seq_puts(m, "\tUser: ");
        ahd_format_transinfo(m, &tinfo->user);
        starget = ahd->platform_data->starget[target_id];
        if (starget == NULL)
                return;
 
-       seq_printf(m, "\tGoal: ");
+       seq_puts(m, "\tGoal: ");
        ahd_format_transinfo(m, &tinfo->goal);
-       seq_printf(m, "\tCurr: ");
+       seq_puts(m, "\tCurr: ");
        ahd_format_transinfo(m, &tinfo->curr);
 
        for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
@@ -291,19 +289,19 @@ ahd_linux_show_info(struct seq_file *m, struct Scsi_Host *shost)
        max_targ = 16;
 
        if (ahd->seep_config == NULL)
-               seq_printf(m, "No Serial EEPROM\n");
+               seq_puts(m, "No Serial EEPROM\n");
        else {
-               seq_printf(m, "Serial EEPROM:\n");
+               seq_puts(m, "Serial EEPROM:\n");
                for (i = 0; i < sizeof(*ahd->seep_config)/2; i++) {
                        if (((i % 8) == 0) && (i != 0)) {
-                               seq_printf(m, "\n");
+                               seq_putc(m, '\n');
                        }
                        seq_printf(m, "0x%.4x ",
                                  ((uint16_t*)ahd->seep_config)[i]);
                }
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
        if ((ahd->features & AHD_WIDE) == 0)
                max_targ = 8;
index 64eec6c..1845960 100644 (file)
@@ -119,15 +119,15 @@ ahc_format_transinfo(struct seq_file *m, struct ahc_transinfo *tinfo)
 
        if (tinfo->width > 0) {
                if (freq != 0) {
-                       seq_printf(m, ", ");
+                       seq_puts(m, ", ");
                } else {
-                       seq_printf(m, " (");
+                       seq_puts(m, " (");
                }
                seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width));
        } else if (freq != 0) {
-               seq_printf(m, ")");
+               seq_putc(m, ')');
        }
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 }
 
 static void
@@ -145,15 +145,15 @@ ahc_dump_target_state(struct ahc_softc *ahc, struct seq_file *m,
        if ((ahc->features & AHC_TWIN) != 0)
                seq_printf(m, "Channel %c ", channel);
        seq_printf(m, "Target %d Negotiation Settings\n", target_id);
-       seq_printf(m, "\tUser: ");
+       seq_puts(m, "\tUser: ");
        ahc_format_transinfo(m, &tinfo->user);
        starget = ahc->platform_data->starget[target_offset];
        if (!starget)
                return;
 
-       seq_printf(m, "\tGoal: ");
+       seq_puts(m, "\tGoal: ");
        ahc_format_transinfo(m, &tinfo->goal);
-       seq_printf(m, "\tCurr: ");
+       seq_puts(m, "\tCurr: ");
        ahc_format_transinfo(m, &tinfo->curr);
 
        for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
@@ -303,19 +303,19 @@ ahc_linux_show_info(struct seq_file *m, struct Scsi_Host *shost)
 
 
        if (ahc->seep_config == NULL)
-               seq_printf(m, "No Serial EEPROM\n");
+               seq_puts(m, "No Serial EEPROM\n");
        else {
-               seq_printf(m, "Serial EEPROM:\n");
+               seq_puts(m, "Serial EEPROM:\n");
                for (i = 0; i < sizeof(*ahc->seep_config)/2; i++) {
                        if (((i % 8) == 0) && (i != 0)) {
-                               seq_printf(m, "\n");
+                               seq_putc(m, '\n');
                        }
                        seq_printf(m, "0x%.4x ",
                                  ((uint16_t*)ahc->seep_config)[i]);
                }
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
        max_targ = 16;
        if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
index e64c3af..decdc71 100644 (file)
@@ -2990,7 +2990,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
        struct fas216_device *dev;
        struct scsi_device *scd;
 
-       seq_printf(m, "Device/Lun TaggedQ       Parity   Sync\n");
+       seq_puts(m, "Device/Lun TaggedQ       Parity   Sync\n");
 
        shost_for_each_device(scd, info->host) {
                dev = &info->device[scd->id];
@@ -3000,7 +3000,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
                                     scd->simple_tags ? "en" : "dis",
                                     scd->current_tag);
                else
-                       seq_printf(m, "unsupported   ");
+                       seq_puts(m, "unsupported   ");
 
                seq_printf(m, "%3sabled ", dev->parity_enabled ? "en" : "dis");
 
@@ -3008,7 +3008,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
                        seq_printf(m, "offset %d, %d ns\n",
                                     dev->sof, dev->period * 4);
                else
-                       seq_printf(m, "async\n");
+                       seq_puts(m, "async\n");
        }
 }
 
index 6daed6b..a702554 100644 (file)
@@ -711,12 +711,12 @@ static void show_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m)
        unsigned char *command;
        seq_printf(m, "scsi%d: destination target %d, lun %llu\n",
                H_NO(cmd), cmd->device->id, cmd->device->lun);
-       seq_printf(m, "        command = ");
+       seq_puts(m, "        command = ");
        command = cmd->cmnd;
        seq_printf(m, "%2d (0x%02x)", command[0], command[0]);
        for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
                seq_printf(m, " %02x", command[i]);
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 }
 
 static int __maybe_unused NCR5380_show_info(struct seq_file *m,
index a795d81..0836433 100644 (file)
@@ -3101,9 +3101,8 @@ static const char *atp870u_info(struct Scsi_Host *notused)
 
 static int atp870u_show_info(struct seq_file *m, struct Scsi_Host *HBAptr)
 {
-       seq_printf(m, "ACARD AEC-671X Driver Version: 2.6+ac\n");
-       seq_printf(m, "\n");
-       seq_printf(m, "Adapter Configuration:\n");
+       seq_puts(m, "ACARD AEC-671X Driver Version: 2.6+ac\n\n"
+               "Adapter Configuration:\n");
        seq_printf(m, "               Base IO: %#.4lx\n", HBAptr->io_port);
        seq_printf(m, "                   IRQ: %d\n", HBAptr->irq);
        return 0;
index 6bac8a7..0045742 100644 (file)
@@ -194,16 +194,10 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
 
  retry:
        errno = 0;
-       if (debug) {
-               DPRINTK("command: ");
-               __scsi_print_command(cmd, cmd_len);
-       }
-
        result = scsi_execute_req(ch->device, cmd, direction, buffer,
                                  buflength, &sshdr, timeout * HZ,
                                  MAX_RETRIES, NULL);
 
-       DPRINTK("result: 0x%x\n",result);
        if (driver_byte(result) & DRIVER_SENSE) {
                if (debug)
                        scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
index e2068a2..fa09d4b 100644 (file)
 #include <scsi/scsi_eh.h>
 #include <scsi/scsi_dbg.h>
 
-
-
 /* Commands with service actions that change the command name */
 #define THIRD_PARTY_COPY_OUT 0x83
 #define THIRD_PARTY_COPY_IN 0x84
 
-#define VENDOR_SPECIFIC_CDB 0xc0
-
 struct sa_name_list {
        int opcode;
        const struct value_name_pair *arr;
@@ -37,7 +33,6 @@ struct value_name_pair {
        const char * name;
 };
 
-#ifdef CONFIG_SCSI_CONSTANTS
 static const char * cdb_byte0_names[] = {
 /* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense",
 /* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL,
@@ -261,28 +256,8 @@ static struct sa_name_list sa_names_arr[] = {
        {0, NULL, 0},
 };
 
-#else /* ifndef CONFIG_SCSI_CONSTANTS */
-static const char *cdb_byte0_names[0];
-
-static struct sa_name_list sa_names_arr[] = {
-       {VARIABLE_LENGTH_CMD, NULL, 0},
-       {MAINTENANCE_IN, NULL, 0},
-       {MAINTENANCE_OUT, NULL, 0},
-       {PERSISTENT_RESERVE_IN, NULL, 0},
-       {PERSISTENT_RESERVE_OUT, NULL, 0},
-       {SERVICE_ACTION_IN_12, NULL, 0},
-       {SERVICE_ACTION_OUT_12, NULL, 0},
-       {SERVICE_ACTION_BIDIRECTIONAL, NULL, 0},
-       {SERVICE_ACTION_IN_16, NULL, 0},
-       {SERVICE_ACTION_OUT_16, NULL, 0},
-       {THIRD_PARTY_COPY_IN, NULL, 0},
-       {THIRD_PARTY_COPY_OUT, NULL, 0},
-       {0, NULL, 0},
-};
-#endif /* CONFIG_SCSI_CONSTANTS */
-
-static bool scsi_opcode_sa_name(int opcode, int service_action,
-                               const char **cdb_name, const char **sa_name)
+bool scsi_opcode_sa_name(int opcode, int service_action,
+                        const char **cdb_name, const char **sa_name)
 {
        struct sa_name_list *sa_name_ptr;
        const struct value_name_pair *arr = NULL;
@@ -315,76 +290,6 @@ static bool scsi_opcode_sa_name(int opcode, int service_action,
        return true;
 }
 
-static void print_opcode_name(const unsigned char *cdbp, size_t cdb_len)
-{
-       int sa, cdb0;
-       const char *cdb_name = NULL, *sa_name = NULL;
-
-       cdb0 = cdbp[0];
-       if (cdb0 == VARIABLE_LENGTH_CMD) {
-               if (cdb_len < 10) {
-                       printk("short variable length command, len=%zu",
-                              cdb_len);
-                       return;
-               }
-               sa = (cdbp[8] << 8) + cdbp[9];
-       } else
-               sa = cdbp[1] & 0x1f;
-
-       if (!scsi_opcode_sa_name(cdb0, sa, &cdb_name, &sa_name)) {
-               if (cdb_name)
-                       printk("%s", cdb_name);
-               else if (cdb0 >= VENDOR_SPECIFIC_CDB)
-                       printk("cdb[0]=0x%x (vendor)", cdb0);
-               else if (cdb0 >= 0x60 && cdb0 < 0x7e)
-                       printk("cdb[0]=0x%x (reserved)", cdb0);
-               else
-                       printk("cdb[0]=0x%x", cdb0);
-       } else {
-               if (sa_name)
-                       printk("%s", sa_name);
-               else if (cdb_name)
-                       printk("%s, sa=0x%x", cdb_name, sa);
-               else
-                       printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
-       }
-}
-
-void __scsi_print_command(const unsigned char *cdb, size_t cdb_len)
-{
-       int k, len;
-
-       print_opcode_name(cdb, cdb_len);
-       len = scsi_command_size(cdb);
-       if (cdb_len < len)
-               len = cdb_len;
-       /* print out all bytes in cdb */
-       for (k = 0; k < len; ++k)
-               printk(" %02x", cdb[k]);
-       printk("\n");
-}
-EXPORT_SYMBOL(__scsi_print_command);
-
-void scsi_print_command(struct scsi_cmnd *cmd)
-{
-       int k;
-
-       if (cmd->cmnd == NULL)
-               return;
-
-       scmd_printk(KERN_INFO, cmd, "CDB: ");
-       print_opcode_name(cmd->cmnd, cmd->cmd_len);
-
-       /* print out all bytes in cdb */
-       printk(":");
-       for (k = 0; k < cmd->cmd_len; ++k)
-               printk(" %02x", cmd->cmnd[k]);
-       printk("\n");
-}
-EXPORT_SYMBOL(scsi_print_command);
-
-#ifdef CONFIG_SCSI_CONSTANTS
-
 struct error_info {
        unsigned short code12;  /* 0x0302 looks better than 0x03,0x02 */
        const char * text;
@@ -392,7 +297,7 @@ struct error_info {
 
 /*
  * The canonical list of T10 Additional Sense Codes is available at:
- * http://www.t10.org/lists/asc-num.txt [most recent: 20130605]
+ * http://www.t10.org/lists/asc-num.txt [most recent: 20141221]
  */
 
 static const struct error_info additional[] =
@@ -421,6 +326,7 @@ static const struct error_info additional[] =
        {0x001E, "Conflicting SA creation request"},
        {0x001F, "Logical unit transitioning to another power condition"},
        {0x0020, "Extended copy information available"},
+       {0x0021, "Atomic command aborted due to ACA"},
 
        {0x0100, "No index/sector signal"},
 
@@ -446,6 +352,7 @@ static const struct error_info additional[] =
        {0x040C, "Logical unit not accessible, target port in unavailable "
         "state"},
        {0x040D, "Logical unit not ready, structure check required"},
+       {0x040E, "Logical unit not ready, security session in progress"},
        {0x0410, "Logical unit not ready, auxiliary memory not accessible"},
        {0x0411, "Logical unit not ready, notify (enable spinup) required"},
        {0x0412, "Logical unit not ready, offline"},
@@ -462,6 +369,11 @@ static const struct error_info additional[] =
        {0x041C, "Logical unit not ready, additional power use not yet "
         "granted"},
        {0x041D, "Logical unit not ready, configuration in progress"},
+       {0x041E, "Logical unit not ready, microcode activation required"},
+       {0x041F, "Logical unit not ready, microcode download required"},
+       {0x0420, "Logical unit not ready, logical unit reset required"},
+       {0x0421, "Logical unit not ready, hard reset required"},
+       {0x0422, "Logical unit not ready, power cycle required"},
 
        {0x0500, "Logical unit does not respond to selection"},
 
@@ -480,6 +392,7 @@ static const struct error_info additional[] =
        {0x0902, "Focus servo failure"},
        {0x0903, "Spindle servo failure"},
        {0x0904, "Head select fault"},
+       {0x0905, "Vibration induced tracking error"},
 
        {0x0A00, "Error log overflow"},
 
@@ -510,6 +423,7 @@ static const struct error_info additional[] =
        {0x0C0D, "Write error - not enough unsolicited data"},
        {0x0C0E, "Multiple write errors"},
        {0x0C0F, "Defects in error window"},
+       {0x0C10, "Incomplete multiple atomic write operations"},
 
        {0x0D00, "Error detected by third party temporary initiator"},
        {0x0D01, "Third party device failure"},
@@ -635,6 +549,10 @@ static const struct error_info additional[] =
        {0x2101, "Invalid element address"},
        {0x2102, "Invalid address for write"},
        {0x2103, "Invalid write crossing layer jump"},
+       {0x2104, "Unaligned write command"},
+       {0x2105, "Write boundary violation"},
+       {0x2106, "Attempt to read invalid data"},
+       {0x2107, "Read boundary violation"},
 
        {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"},
 
@@ -691,6 +609,7 @@ static const struct error_info additional[] =
        {0x2705, "Permanent write protect"},
        {0x2706, "Conditional write protect"},
        {0x2707, "Space allocation failed write protect"},
+       {0x2708, "Zone is read only"},
 
        {0x2800, "Not ready to ready change, medium may have changed"},
        {0x2801, "Import or export element accessed"},
@@ -743,10 +662,15 @@ static const struct error_info additional[] =
        {0x2C0A, "Partition or collection contains user objects"},
        {0x2C0B, "Not reserved"},
        {0x2C0C, "Orwrite generation does not match"},
+       {0x2C0D, "Reset write pointer not allowed"},
+       {0x2C0E, "Zone is offline"},
 
        {0x2D00, "Overwrite error on update in place"},
 
        {0x2E00, "Insufficient time for operation"},
+       {0x2E01, "Command timeout before processing"},
+       {0x2E02, "Command timeout during processing"},
+       {0x2E03, "Command timeout during processing due to error recovery"},
 
        {0x2F00, "Commands cleared by another initiator"},
        {0x2F01, "Commands cleared by power loss notification"},
@@ -868,6 +792,7 @@ static const struct error_info additional[] =
        {0x3F13, "iSCSI IP address removed"},
        {0x3F14, "iSCSI IP address changed"},
        {0x3F15, "Inspect referrals sense descriptors"},
+       {0x3F16, "Microcode has been changed without reset"},
 /*
  *     {0x40NN, "Ram failure"},
  *     {0x40NN, "Diagnostic failure on component nn"},
@@ -946,6 +871,11 @@ static const struct error_info additional[] =
        {0x5306, "Volume identifier missing"},
        {0x5307, "Duplicate volume identifier"},
        {0x5308, "Element status unknown"},
+       {0x5309, "Data transfer device error - load failed"},
+       {0x530a, "Data transfer device error - unload failed"},
+       {0x530b, "Data transfer device error - unload missing"},
+       {0x530c, "Data transfer device error - eject failed"},
+       {0x530d, "Data transfer device error - library communication failed"},
 
        {0x5400, "Scsi to host system interface failure"},
 
@@ -963,6 +893,7 @@ static const struct error_info additional[] =
        {0x550B, "Insufficient power for operation"},
        {0x550C, "Insufficient resources to create rod"},
        {0x550D, "Insufficient resources to create rod token"},
+       {0x550E, "Insufficient zone resources"},
 
        {0x5700, "Unable to recover table-of-contents"},
 
@@ -1247,15 +1178,12 @@ static const char * const snstext[] = {
        "Completed",        /* F: command completed sense data reported,
                                  may occur for successful command */
 };
-#endif
 
 /* Get sense key string or NULL if not available */
 const char *
 scsi_sense_key_string(unsigned char key) {
-#ifdef CONFIG_SCSI_CONSTANTS
        if (key <= 0xE)
                return snstext[key];
-#endif
        return NULL;
 }
 EXPORT_SYMBOL(scsi_sense_key_string);
@@ -1267,7 +1195,6 @@ EXPORT_SYMBOL(scsi_sense_key_string);
 const char *
 scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt)
 {
-#ifdef CONFIG_SCSI_CONSTANTS
        int i;
        unsigned short code = ((asc << 8) | ascq);
 
@@ -1283,122 +1210,10 @@ scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt)
                        return additional2[i].str;
                }
        }
-#else
-       *fmt = NULL;
-#endif
        return NULL;
 }
 EXPORT_SYMBOL(scsi_extd_sense_format);
 
-void
-scsi_show_extd_sense(const struct scsi_device *sdev, const char *name,
-                    unsigned char asc, unsigned char ascq)
-{
-       const char *extd_sense_fmt = NULL;
-       const char *extd_sense_str = scsi_extd_sense_format(asc, ascq,
-                                                           &extd_sense_fmt);
-
-       if (extd_sense_str) {
-               if (extd_sense_fmt)
-                       sdev_prefix_printk(KERN_INFO, sdev, name,
-                                          "Add. Sense: %s (%s%x)",
-                                          extd_sense_str, extd_sense_fmt,
-                                          ascq);
-               else
-                       sdev_prefix_printk(KERN_INFO, sdev, name,
-                                          "Add. Sense: %s", extd_sense_str);
-
-       } else {
-               sdev_prefix_printk(KERN_INFO, sdev, name,
-                                  "%sASC=0x%x %sASCQ=0x%x\n",
-                                  asc >= 0x80 ? "<<vendor>> " : "", asc,
-                                  ascq >= 0x80 ? "<<vendor>> " : "", ascq);
-       }
-}
-EXPORT_SYMBOL(scsi_show_extd_sense);
-
-void
-scsi_show_sense_hdr(const struct scsi_device *sdev, const char *name,
-                   const struct scsi_sense_hdr *sshdr)
-{
-       const char *sense_txt;
-
-       sense_txt = scsi_sense_key_string(sshdr->sense_key);
-       if (sense_txt)
-               sdev_prefix_printk(KERN_INFO, sdev, name,
-                                  "Sense Key : %s [%s]%s\n", sense_txt,
-                                  scsi_sense_is_deferred(sshdr) ?
-                                  "deferred" : "current",
-                                  sshdr->response_code >= 0x72 ?
-                                  " [descriptor]" : "");
-       else
-               sdev_prefix_printk(KERN_INFO, sdev, name,
-                                  "Sense Key : 0x%x [%s]%s", sshdr->sense_key,
-                                  scsi_sense_is_deferred(sshdr) ?
-                                  "deferred" : "current",
-                                  sshdr->response_code >= 0x72 ?
-                                  " [descriptor]" : "");
-}
-EXPORT_SYMBOL(scsi_show_sense_hdr);
-
-/*
- * Print normalized SCSI sense header with a prefix.
- */
-void
-scsi_print_sense_hdr(const struct scsi_device *sdev, const char *name,
-                    const struct scsi_sense_hdr *sshdr)
-{
-       scsi_show_sense_hdr(sdev, name, sshdr);
-       scsi_show_extd_sense(sdev, name, sshdr->asc, sshdr->ascq);
-}
-EXPORT_SYMBOL(scsi_print_sense_hdr);
-
-static void
-scsi_dump_sense_buffer(const unsigned char *sense_buffer, int sense_len)
-{
-       int k, num;
-
-       num = (sense_len < 32) ? sense_len : 32;
-       printk("Unrecognized sense data (in hex):");
-       for (k = 0; k < num; ++k) {
-               if (0 == (k % 16)) {
-                       printk("\n");
-                       printk(KERN_INFO "        ");
-               }
-               printk("%02x ", sense_buffer[k]);
-       }
-       printk("\n");
-       return;
-}
-
-/* Normalize and print sense buffer with name prefix */
-void __scsi_print_sense(const struct scsi_device *sdev, const char *name,
-                       const unsigned char *sense_buffer, int sense_len)
-{
-       struct scsi_sense_hdr sshdr;
-
-       if (!scsi_normalize_sense(sense_buffer, sense_len, &sshdr)) {
-               scsi_dump_sense_buffer(sense_buffer, sense_len);
-               return;
-       }
-       scsi_show_sense_hdr(sdev, name, &sshdr);
-       scsi_show_extd_sense(sdev, name, sshdr.asc, sshdr.ascq);
-}
-EXPORT_SYMBOL(__scsi_print_sense);
-
-/* Normalize and print sense buffer in SCSI command */
-void scsi_print_sense(const struct scsi_cmnd *cmd)
-{
-       struct gendisk *disk = cmd->request->rq_disk;
-       const char *disk_name = disk ? disk->disk_name : NULL;
-
-       __scsi_print_sense(cmd->device, disk_name, cmd->sense_buffer,
-                          SCSI_SENSE_BUFFERSIZE);
-}
-EXPORT_SYMBOL(scsi_print_sense);
-
-#ifdef CONFIG_SCSI_CONSTANTS
-
 static const char * const hostbyte_table[]={
 "DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
 "DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
@@ -1410,17 +1225,13 @@ static const char * const driverbyte_table[]={
 "DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT",  "DRIVER_MEDIA", "DRIVER_ERROR",
 "DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"};
 
-#endif
-
 const char *scsi_hostbyte_string(int result)
 {
        const char *hb_string = NULL;
-#ifdef CONFIG_SCSI_CONSTANTS
        int hb = host_byte(result);
 
        if (hb < ARRAY_SIZE(hostbyte_table))
                hb_string = hostbyte_table[hb];
-#endif
        return hb_string;
 }
 EXPORT_SYMBOL(scsi_hostbyte_string);
@@ -1428,17 +1239,14 @@ EXPORT_SYMBOL(scsi_hostbyte_string);
 const char *scsi_driverbyte_string(int result)
 {
        const char *db_string = NULL;
-#ifdef CONFIG_SCSI_CONSTANTS
        int db = driver_byte(result);
 
        if (db < ARRAY_SIZE(driverbyte_table))
                db_string = driverbyte_table[db];
-#endif
        return db_string;
 }
 EXPORT_SYMBOL(scsi_driverbyte_string);
 
-#ifdef CONFIG_SCSI_CONSTANTS
 #define scsi_mlreturn_name(result)     { result, #result }
 static const struct value_name_pair scsi_mlreturn_arr[] = {
        scsi_mlreturn_name(NEEDS_RETRY),
@@ -1451,11 +1259,9 @@ static const struct value_name_pair scsi_mlreturn_arr[] = {
        scsi_mlreturn_name(SCSI_RETURN_NOT_HANDLED),
        scsi_mlreturn_name(FAST_IO_FAIL)
 };
-#endif
 
 const char *scsi_mlreturn_string(int result)
 {
-#ifdef CONFIG_SCSI_CONSTANTS
        const struct value_name_pair *arr = scsi_mlreturn_arr;
        int k;
 
@@ -1463,29 +1269,6 @@ const char *scsi_mlreturn_string(int result)
                if (result == arr->value)
                        return arr->name;
        }
-#endif
        return NULL;
 }
 EXPORT_SYMBOL(scsi_mlreturn_string);
-
-void scsi_print_result(struct scsi_cmnd *cmd, const char *msg, int disposition)
-{
-       const char *mlret_string = scsi_mlreturn_string(disposition);
-       const char *hb_string = scsi_hostbyte_string(cmd->result);
-       const char *db_string = scsi_driverbyte_string(cmd->result);
-
-       if (hb_string || db_string)
-               scmd_printk(KERN_INFO, cmd,
-                           "%s%s Result: hostbyte=%s driverbyte=%s",
-                           msg ? msg : "",
-                           mlret_string ? mlret_string : "UNKNOWN",
-                           hb_string ? hb_string : "invalid",
-                           db_string ? db_string : "invalid");
-       else
-               scmd_printk(KERN_INFO, cmd,
-                           "%s%s Result: hostbyte=0x%02x driverbyte=0x%02x",
-                           msg ? msg : "",
-                           mlret_string ? mlret_string : "UNKNOWN",
-                           host_byte(cmd->result), driver_byte(cmd->result));
-}
-EXPORT_SYMBOL(scsi_print_result);
index 0c6be0a..5ee7f44 100644 (file)
@@ -4610,13 +4610,10 @@ static void adapter_uninit(struct AdapterCtlBlk *acb)
 }
 
 
-#undef SPRINTF
-#define SPRINTF(args...) seq_printf(m,##args)
-
 #undef YESNO
 #define YESNO(YN) \
- if (YN) SPRINTF(" Yes ");\
- else SPRINTF(" No  ")
+ if (YN) seq_printf(m, " Yes ");\
+ else seq_printf(m, " No  ")
 
 static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
 {
@@ -4626,47 +4623,45 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
        unsigned long flags;
        int dev;
 
-       SPRINTF(DC395X_BANNER " PCI SCSI Host Adapter\n");
-       SPRINTF(" Driver Version " DC395X_VERSION "\n");
+       seq_puts(m, DC395X_BANNER " PCI SCSI Host Adapter\n"
+               " Driver Version " DC395X_VERSION "\n");
 
        DC395x_LOCK_IO(acb->scsi_host, flags);
 
-       SPRINTF("SCSI Host Nr %i, ", host->host_no);
-       SPRINTF("DC395U/UW/F DC315/U %s\n",
+       seq_printf(m, "SCSI Host Nr %i, ", host->host_no);
+       seq_printf(m, "DC395U/UW/F DC315/U %s\n",
                (acb->config & HCC_WIDE_CARD) ? "Wide" : "");
-       SPRINTF("io_port_base 0x%04lx, ", acb->io_port_base);
-       SPRINTF("irq_level 0x%04x, ", acb->irq_level);
-       SPRINTF(" SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
+       seq_printf(m, "io_port_base 0x%04lx, ", acb->io_port_base);
+       seq_printf(m, "irq_level 0x%04x, ", acb->irq_level);
+       seq_printf(m, " SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
 
-       SPRINTF("MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun);
-       SPRINTF("AdapterID %i\n", host->this_id);
+       seq_printf(m, "MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun);
+       seq_printf(m, "AdapterID %i\n", host->this_id);
 
-       SPRINTF("tag_max_num %i", acb->tag_max_num);
-       /*SPRINTF(", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */
-       SPRINTF(", FilterCfg 0x%02x",
+       seq_printf(m, "tag_max_num %i", acb->tag_max_num);
+       /*seq_printf(m, ", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */
+       seq_printf(m, ", FilterCfg 0x%02x",
                DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1));
-       SPRINTF(", DelayReset %is\n", acb->eeprom.delay_time);
-       /*SPRINTF("\n"); */
+       seq_printf(m, ", DelayReset %is\n", acb->eeprom.delay_time);
+       /*seq_printf(m, "\n"); */
 
-       SPRINTF("Nr of DCBs: %i\n", list_size(&acb->dcb_list));
-       SPRINTF
-           ("Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+       seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list));
+       seq_printf(m, "Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
             acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
             acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
             acb->dcb_map[6], acb->dcb_map[7]);
-       SPRINTF
-           ("                      %02x %02x %02x %02x %02x %02x %02x %02x\n",
+       seq_printf(m, "                      %02x %02x %02x %02x %02x %02x %02x %02x\n",
             acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
             acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
             acb->dcb_map[14], acb->dcb_map[15]);
 
-       SPRINTF
-           ("Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
+       seq_puts(m,
+                "Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
 
        dev = 0;
        list_for_each_entry(dcb, &acb->dcb_list, list) {
                int nego_period;
-               SPRINTF("%02i %02i  %02i ", dev, dcb->target_id,
+               seq_printf(m, "%02i %02i  %02i ", dev, dcb->target_id,
                        dcb->target_lun);
                YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
                YESNO(dcb->sync_offset);
@@ -4676,53 +4671,53 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
                YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
                nego_period = clock_period[dcb->sync_period & 0x07] << 2;
                if (dcb->sync_offset)
-                       SPRINTF("  %03i ns ", nego_period);
+                       seq_printf(m, "  %03i ns ", nego_period);
                else
-                       SPRINTF(" (%03i ns)", (dcb->min_nego_period << 2));
+                       seq_printf(m, " (%03i ns)", (dcb->min_nego_period << 2));
 
                if (dcb->sync_offset & 0x0f) {
                        spd = 1000 / (nego_period);
                        spd1 = 1000 % (nego_period);
                        spd1 = (spd1 * 10 + nego_period / 2) / (nego_period);
-                       SPRINTF("   %2i.%1i M     %02i ", spd, spd1,
+                       seq_printf(m, "   %2i.%1i M     %02i ", spd, spd1,
                                (dcb->sync_offset & 0x0f));
                } else
-                       SPRINTF("                 ");
+                       seq_puts(m, "                 ");
 
                /* Add more info ... */
-               SPRINTF("     %02i\n", dcb->max_command);
+               seq_printf(m, "     %02i\n", dcb->max_command);
                dev++;
        }
 
        if (timer_pending(&acb->waiting_timer))
-               SPRINTF("Waiting queue timer running\n");
+               seq_puts(m, "Waiting queue timer running\n");
        else
-               SPRINTF("\n");
+               seq_putc(m, '\n');
 
        list_for_each_entry(dcb, &acb->dcb_list, list) {
                struct ScsiReqBlk *srb;
                if (!list_empty(&dcb->srb_waiting_list))
-                       SPRINTF("DCB (%02i-%i): Waiting: %i:",
+                       seq_printf(m, "DCB (%02i-%i): Waiting: %i:",
                                dcb->target_id, dcb->target_lun,
                                list_size(&dcb->srb_waiting_list));
                 list_for_each_entry(srb, &dcb->srb_waiting_list, list)
-                       SPRINTF(" %p", srb->cmd);
+                       seq_printf(m, " %p", srb->cmd);
                if (!list_empty(&dcb->srb_going_list))
-                       SPRINTF("\nDCB (%02i-%i): Going  : %i:",
+                       seq_printf(m, "\nDCB (%02i-%i): Going  : %i:",
                                dcb->target_id, dcb->target_lun,
                                list_size(&dcb->srb_going_list));
                list_for_each_entry(srb, &dcb->srb_going_list, list)
-                       SPRINTF(" %p", srb->cmd);
+                       seq_printf(m, " %p", srb->cmd);
                if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
-                       SPRINTF("\n");
+                       seq_putc(m, '\n');
        }
 
        if (debug_enabled(DBG_1)) {
-               SPRINTF("DCB list for ACB %p:\n", acb);
+               seq_printf(m, "DCB list for ACB %p:\n", acb);
                list_for_each_entry(dcb, &acb->dcb_list, list) {
-                       SPRINTF("%p -> ", dcb);
+                       seq_printf(m, "%p -> ", dcb);
                }
-               SPRINTF("END\n");
+               seq_puts(m, "END\n");
        }
 
        DC395x_UNLOCK_IO(acb->scsi_host, flags);
index 0bf9769..2806cfb 100644 (file)
@@ -568,7 +568,7 @@ static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
        seq_printf(m, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
                        host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
 
-       seq_printf(m, "Devices:\n");
+       seq_puts(m, "Devices:\n");
        for(chan = 0; chan < MAX_CHANNEL; chan++) {
                for(id = 0; id < MAX_ID; id++) {
                        d = pHba->channel[chan].device[id];
index 8319d2b..ca8003f 100644 (file)
@@ -102,7 +102,7 @@ static int eata_pio_show_info(struct seq_file *m, struct Scsi_Host *shost)
                   shost->host_no, SD(shost)->name);
        seq_printf(m, "Firmware revision: v%s\n",
                   SD(shost)->revision);
-       seq_printf(m, "IO: PIO\n");
+       seq_puts(m, "IO: PIO\n");
        seq_printf(m, "Base IO : %#.4x\n", (u32) shost->base);
        seq_printf(m, "Host Bus: %s\n",
                   (SD(shost)->bustype == 'P')?"PCI ":
index 7e1c21e..31f8966 100644 (file)
@@ -749,7 +749,7 @@ int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh)
        if (dev_count == 0)
                seq_puts(m, "none\n");
 
-       seq_puts(m, "\n");
+       seq_putc(m, '\n');
        return 0;
 
 }
index ce5bd52..065b25d 100644 (file)
@@ -2396,8 +2396,6 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
 
        if (!esp->num_tags)
                esp->num_tags = ESP_DEFAULT_TAGS;
-       else if (esp->num_tags >= ESP_MAX_TAG)
-               esp->num_tags = ESP_MAX_TAG - 1;
        esp->host->transportt = esp_transport_template;
        esp->host->max_lun = ESP_MAX_LUN;
        esp->host->cmd_per_lun = 2;
index 9fb6326..e66e997 100644 (file)
@@ -173,7 +173,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
     /* request is i.e. "cat /proc/scsi/gdth/0" */ 
     /* format: %-15s\t%-10s\t%-15s\t%s */
     /* driver parameters */
-    seq_printf(m, "Driver Parameters:\n");
+    seq_puts(m, "Driver Parameters:\n");
     if (reserve_list[0] == 0xff)
         strcpy(hrec, "--");
     else {
@@ -192,7 +192,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
                    max_ids, hdr_channel);
 
     /* controller information */
-    seq_printf(m,"\nDisk Array Controller Information:\n");
+    seq_puts(m, "\nDisk Array Controller Information:\n");
     seq_printf(m,
                    " Number:       \t%d         \tName:          \t%s\n",
                    ha->hanum, ha->binfo.type_string);
@@ -219,7 +219,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
 
 #ifdef GDTH_DMA_STATISTICS
     /* controller statistics */
-    seq_printf(m,"\nController Statistics:\n");
+    seq_puts(m, "\nController Statistics:\n");
     seq_printf(m,
                    " 32-bit DMA buffer:\t%lu\t64-bit DMA buffer:\t%lu\n",
                    ha->dma32_cnt, ha->dma64_cnt);
@@ -227,7 +227,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
 
     if (ha->more_proc) {
         /* more information: 2. about physical devices */
-        seq_printf(m, "\nPhysical Devices:");
+        seq_puts(m, "\nPhysical Devices:");
         flag = FALSE;
             
         buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
@@ -326,10 +326,10 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
         gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
 
         if (!flag)
-            seq_printf(m, "\n --\n");
+            seq_puts(m, "\n --\n");
 
         /* 3. about logical drives */
-        seq_printf(m,"\nLogical Drives:");
+        seq_puts(m, "\nLogical Drives:");
         flag = FALSE;
 
         buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
@@ -411,10 +411,10 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
         gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
         
         if (!flag)
-            seq_printf(m, "\n --\n");
+            seq_puts(m, "\n --\n");
 
         /* 4. about array drives */
-        seq_printf(m,"\nArray Drives:");
+        seq_puts(m, "\nArray Drives:");
         flag = FALSE;
 
         buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
@@ -471,10 +471,10 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
         gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
         
         if (!flag)
-            seq_printf(m, "\n --\n");
+            seq_puts(m, "\n --\n");
 
         /* 5. about host drives */
-        seq_printf(m,"\nHost Drives:");
+        seq_puts(m, "\nHost Drives:");
         flag = FALSE;
 
         buf = gdth_ioctl_alloc(ha, sizeof(gdth_hget_str), FALSE, &paddr);
@@ -527,11 +527,11 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
         }
         
         if (!flag)
-            seq_printf(m, "\n --\n");
+            seq_puts(m, "\n --\n");
     }
 
     /* controller events */
-    seq_printf(m,"\nController Events:\n");
+    seq_puts(m, "\nController Events:\n");
 
     for (id = -1;;) {
         id = gdth_read_event(ha, id, estr);
index 6bb4611..95d581c 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/jiffies.h>
 #include <linux/percpu-defs.h>
 #include <linux/percpu.h>
+#include <asm/unaligned.h>
 #include <asm/div64.h>
 #include "hpsa_cmd.h"
 #include "hpsa.h"
 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
 #define HPSA "hpsa"
 
-/* How long to wait (in milliseconds) for board to go into simple mode */
-#define MAX_CONFIG_WAIT 30000
+/* How long to wait for CISS doorbell communication */
+#define CLEAR_EVENT_WAIT_INTERVAL 20   /* ms for each msleep() call */
+#define MODE_CHANGE_WAIT_INTERVAL 10   /* ms for each msleep() call */
+#define MAX_CLEAR_EVENT_WAIT 30000     /* times 20 ms = 600 s */
+#define MAX_MODE_CHANGE_WAIT 2000      /* times 10 ms = 20 s */
 #define MAX_IOCTL_CONFIG_WAIT 1000
 
 /*define how many times we will try a command because of bus resets */
@@ -164,24 +168,24 @@ static struct board_type products[] = {
        {0x1926103C, "Smart Array P731m", &SA5_access},
        {0x1928103C, "Smart Array P230i", &SA5_access},
        {0x1929103C, "Smart Array P530", &SA5_access},
-       {0x21BD103C, "Smart Array", &SA5_access},
-       {0x21BE103C, "Smart Array", &SA5_access},
-       {0x21BF103C, "Smart Array", &SA5_access},
-       {0x21C0103C, "Smart Array", &SA5_access},
-       {0x21C1103C, "Smart Array", &SA5_access},
-       {0x21C2103C, "Smart Array", &SA5_access},
-       {0x21C3103C, "Smart Array", &SA5_access},
+       {0x21BD103C, "Smart Array P244br", &SA5_access},
+       {0x21BE103C, "Smart Array P741m", &SA5_access},
+       {0x21BF103C, "Smart HBA H240ar", &SA5_access},
+       {0x21C0103C, "Smart Array P440ar", &SA5_access},
+       {0x21C1103C, "Smart Array P840ar", &SA5_access},
+       {0x21C2103C, "Smart Array P440", &SA5_access},
+       {0x21C3103C, "Smart Array P441", &SA5_access},
        {0x21C4103C, "Smart Array", &SA5_access},
-       {0x21C5103C, "Smart Array", &SA5_access},
-       {0x21C6103C, "Smart Array", &SA5_access},
-       {0x21C7103C, "Smart Array", &SA5_access},
-       {0x21C8103C, "Smart Array", &SA5_access},
+       {0x21C5103C, "Smart Array P841", &SA5_access},
+       {0x21C6103C, "Smart HBA H244br", &SA5_access},
+       {0x21C7103C, "Smart HBA H240", &SA5_access},
+       {0x21C8103C, "Smart HBA H241", &SA5_access},
        {0x21C9103C, "Smart Array", &SA5_access},
-       {0x21CA103C, "Smart Array", &SA5_access},
-       {0x21CB103C, "Smart Array", &SA5_access},
+       {0x21CA103C, "Smart Array P246br", &SA5_access},
+       {0x21CB103C, "Smart Array P840", &SA5_access},
        {0x21CC103C, "Smart Array", &SA5_access},
        {0x21CD103C, "Smart Array", &SA5_access},
-       {0x21CE103C, "Smart Array", &SA5_access},
+       {0x21CE103C, "Smart HBA", &SA5_access},
        {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
        {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
        {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
@@ -195,8 +199,6 @@ static int number_of_controllers;
 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
-static void lock_and_start_io(struct ctlr_info *h);
-static void start_io(struct ctlr_info *h, unsigned long *flags);
 
 #ifdef CONFIG_COMPAT
 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
@@ -204,18 +206,18 @@ static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
 #endif
 
 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
-static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
 static struct CommandList *cmd_alloc(struct ctlr_info *h);
-static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
        void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
        int cmd_type);
+static void hpsa_free_cmd_pool(struct ctlr_info *h);
 #define VPD_PAGE (1 << 8)
 
 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
 static void hpsa_scan_start(struct Scsi_Host *);
 static int hpsa_scan_finished(struct Scsi_Host *sh,
        unsigned long elapsed_time);
+static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
 
 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
@@ -229,7 +231,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
        struct CommandList *c);
 /* performant mode helper functions */
 static void calc_bucket_map(int *bucket, int num_buckets,
-       int nsgs, int min_blocks, int *bucket_map);
+       int nsgs, int min_blocks, u32 *bucket_map);
 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
 static inline u32 next_command(struct ctlr_info *h, u8 q);
 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
@@ -241,14 +243,15 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
                                     int wait_for_ready);
 static inline void finish_cmd(struct CommandList *c);
-static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
+static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
 #define BOARD_NOT_READY 0
 #define BOARD_READY 1
 static void hpsa_drain_accel_commands(struct ctlr_info *h);
 static void hpsa_flush_cache(struct ctlr_info *h);
 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
        struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
-       u8 *scsi3addr);
+       u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
+static void hpsa_command_resubmit_worker(struct work_struct *work);
 
 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
 {
@@ -505,8 +508,8 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
        return (scsi3addr[3] & 0xC0) == 0x40;
 }
 
-static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
-       "1(ADM)", "UNKNOWN"
+static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
+       "1(+0)ADM", "UNKNOWN"
 };
 #define HPSA_RAID_0    0
 #define HPSA_RAID_4    1
@@ -671,7 +674,7 @@ static struct scsi_host_template hpsa_driver_template = {
        .queuecommand           = hpsa_scsi_queue_command,
        .scan_start             = hpsa_scan_start,
        .scan_finished          = hpsa_scan_finished,
-       .change_queue_depth     = scsi_change_queue_depth,
+       .change_queue_depth     = hpsa_change_queue_depth,
        .this_id                = -1,
        .use_clustering         = ENABLE_CLUSTERING,
        .eh_abort_handler       = hpsa_eh_abort_handler,
@@ -688,13 +691,6 @@ static struct scsi_host_template hpsa_driver_template = {
        .no_write_same = 1,
 };
 
-
-/* Enqueuing and dequeuing functions for cmdlists. */
-static inline void addQ(struct list_head *list, struct CommandList *c)
-{
-       list_add_tail(&c->list, list);
-}
-
 static inline u32 next_command(struct ctlr_info *h, u8 q)
 {
        u32 a;
@@ -828,31 +824,21 @@ static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
        struct CommandList *c)
 {
-       unsigned long flags;
-
+       dial_down_lockup_detection_during_fw_flash(h, c);
+       atomic_inc(&h->commands_outstanding);
        switch (c->cmd_type) {
        case CMD_IOACCEL1:
                set_ioaccel1_performant_mode(h, c);
+               writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
                break;
        case CMD_IOACCEL2:
                set_ioaccel2_performant_mode(h, c);
+               writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
                break;
        default:
                set_performant_mode(h, c);
+               h->access.submit_command(h, c);
        }
-       dial_down_lockup_detection_during_fw_flash(h, c);
-       spin_lock_irqsave(&h->lock, flags);
-       addQ(&h->reqQ, c);
-       h->Qdepth++;
-       start_io(h, &flags);
-       spin_unlock_irqrestore(&h->lock, flags);
-}
-
-static inline void removeQ(struct CommandList *c)
-{
-       if (WARN_ON(list_empty(&c->list)))
-               return;
-       list_del_init(&c->list);
 }
 
 static inline int is_hba_lunid(unsigned char scsi3addr[])
@@ -919,7 +905,7 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
 
        /* If this device a non-zero lun of a multi-lun device
         * byte 4 of the 8-byte LUN addr will contain the logical
-        * unit no, zero otherise.
+        * unit no, zero otherwise.
         */
        if (device->scsi3addr[4] == 0) {
                /* This is not a non-zero lun of a multi-lun device */
@@ -984,12 +970,24 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
        /* Raid level changed. */
        h->dev[entry]->raid_level = new_entry->raid_level;
 
-       /* Raid offload parameters changed. */
+       /* Raid offload parameters changed.  Careful about the ordering. */
+       if (new_entry->offload_config && new_entry->offload_enabled) {
+               /*
+                * if drive is newly offload_enabled, we want to copy the
+                * raid map data first.  If previously offload_enabled and
+                * offload_config were set, raid map data had better be
+                * the same as it was before.  if raid map data is changed
+                * then it had better be the case that
+                * h->dev[entry]->offload_enabled is currently 0.
+                */
+               h->dev[entry]->raid_map = new_entry->raid_map;
+               h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
+               wmb(); /* ensure raid map updated prior to ->offload_enabled */
+       }
        h->dev[entry]->offload_config = new_entry->offload_config;
-       h->dev[entry]->offload_enabled = new_entry->offload_enabled;
-       h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
        h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
-       h->dev[entry]->raid_map = new_entry->raid_map;
+       h->dev[entry]->offload_enabled = new_entry->offload_enabled;
+       h->dev[entry]->queue_depth = new_entry->queue_depth;
 
        dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
                scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
@@ -1115,6 +1113,8 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
                return 1;
        if (dev1->offload_enabled != dev2->offload_enabled)
                return 1;
+       if (dev1->queue_depth != dev2->queue_depth)
+               return 1;
        return 0;
 }
 
@@ -1260,6 +1260,85 @@ static void hpsa_show_volume_status(struct ctlr_info *h,
        }
 }
 
+/*
+ * Figure the list of physical drive pointers for a logical drive with
+ * raid offload configured.
+ */
+static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
+                               struct hpsa_scsi_dev_t *dev[], int ndevices,
+                               struct hpsa_scsi_dev_t *logical_drive)
+{
+       struct raid_map_data *map = &logical_drive->raid_map;
+       struct raid_map_disk_data *dd = &map->data[0];
+       int i, j;
+       int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
+                               le16_to_cpu(map->metadata_disks_per_row);
+       int nraid_map_entries = le16_to_cpu(map->row_cnt) *
+                               le16_to_cpu(map->layout_map_count) *
+                               total_disks_per_row;
+       int nphys_disk = le16_to_cpu(map->layout_map_count) *
+                               total_disks_per_row;
+       int qdepth;
+
+       if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
+               nraid_map_entries = RAID_MAP_MAX_ENTRIES;
+
+       qdepth = 0;
+       for (i = 0; i < nraid_map_entries; i++) {
+               logical_drive->phys_disk[i] = NULL;
+               if (!logical_drive->offload_config)
+                       continue;
+               for (j = 0; j < ndevices; j++) {
+                       if (dev[j]->devtype != TYPE_DISK)
+                               continue;
+                       if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
+                               continue;
+                       if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
+                               continue;
+
+                       logical_drive->phys_disk[i] = dev[j];
+                       if (i < nphys_disk)
+                               qdepth = min(h->nr_cmds, qdepth +
+                                   logical_drive->phys_disk[i]->queue_depth);
+                       break;
+               }
+
+               /*
+                * This can happen if a physical drive is removed and
+                * the logical drive is degraded.  In that case, the RAID
+                * map data will refer to a physical disk which isn't actually
+                * present.  And in that case offload_enabled should already
+                * be 0, but we'll turn it off here just in case
+                */
+               if (!logical_drive->phys_disk[i]) {
+                       logical_drive->offload_enabled = 0;
+                       logical_drive->queue_depth = h->nr_cmds;
+               }
+       }
+       if (nraid_map_entries)
+               /*
+                * This is correct for reads, too high for full stripe writes,
+                * way too high for partial stripe writes
+                */
+               logical_drive->queue_depth = qdepth;
+       else
+               logical_drive->queue_depth = h->nr_cmds;
+}
+
+static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
+                               struct hpsa_scsi_dev_t *dev[], int ndevices)
+{
+       int i;
+
+       for (i = 0; i < ndevices; i++) {
+               if (dev[i]->devtype != TYPE_DISK)
+                       continue;
+               if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
+                       continue;
+               hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
+       }
+}
+
 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
        struct hpsa_scsi_dev_t *sd[], int nsds)
 {
@@ -1444,8 +1523,12 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
        spin_lock_irqsave(&h->devlock, flags);
        sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
                sdev_id(sdev), sdev->lun);
-       if (sd != NULL)
+       if (sd != NULL) {
                sdev->hostdata = sd;
+               if (sd->queue_depth)
+                       scsi_change_queue_depth(sdev, sd->queue_depth);
+               atomic_set(&sd->ioaccel_cmds_out, 0);
+       }
        spin_unlock_irqrestore(&h->devlock, flags);
        return 0;
 }
@@ -1478,13 +1561,17 @@ static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
 
        h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
                                GFP_KERNEL);
-       if (!h->cmd_sg_list)
+       if (!h->cmd_sg_list) {
+               dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
                return -ENOMEM;
+       }
        for (i = 0; i < h->nr_cmds; i++) {
                h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
                                                h->chainsize, GFP_KERNEL);
-               if (!h->cmd_sg_list[i])
+               if (!h->cmd_sg_list[i]) {
+                       dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
                        goto clean;
+               }
        }
        return 0;
 
@@ -1504,7 +1591,7 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h,
        chain_block = h->cmd_sg_list[c->cmdindex];
        chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
        chain_len = sizeof(*chain_sg) *
-               (c->Header.SGTotal - h->max_cmd_sg_entries);
+               (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
        chain_sg->Len = cpu_to_le32(chain_len);
        temp64 = pci_map_single(h->pdev, chain_block, chain_len,
                                PCI_DMA_TODEVICE);
@@ -1635,7 +1722,6 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
                struct hpsa_scsi_dev_t *dev)
 {
        struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
-       int raid_retry = 0;
 
        /* check for good status */
        if (likely(c2->error_data.serv_response == 0 &&
@@ -1652,26 +1738,22 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
        if (is_logical_dev_addr_mode(dev->scsi3addr) &&
                c2->error_data.serv_response ==
                        IOACCEL2_SERV_RESPONSE_FAILURE) {
-               dev->offload_enabled = 0;
-               h->drv_req_rescan = 1;  /* schedule controller for a rescan */
-               cmd->result = DID_SOFT_ERROR << 16;
-               cmd_free(h, c);
-               cmd->scsi_done(cmd);
-               return;
-       }
-       raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
-       /* If error found, disable Smart Path, schedule a rescan,
-        * and force a retry on the standard path.
-        */
-       if (raid_retry) {
-               dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
-                       "HP SSD Smart Path");
-               dev->offload_enabled = 0; /* Disable Smart Path */
-               h->drv_req_rescan = 1;    /* schedule controller rescan */
-               cmd->result = DID_SOFT_ERROR << 16;
+               if (c2->error_data.status ==
+                       IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
+                       dev->offload_enabled = 0;
+               goto retry_cmd;
        }
+
+       if (handle_ioaccel_mode2_error(h, c, cmd, c2))
+               goto retry_cmd;
+
        cmd_free(h, c);
        cmd->scsi_done(cmd);
+       return;
+
+retry_cmd:
+       INIT_WORK(&c->work, hpsa_command_resubmit_worker);
+       queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
 }
 
 static void complete_scsi_command(struct CommandList *cp)
@@ -1687,18 +1769,21 @@ static void complete_scsi_command(struct CommandList *cp)
        unsigned long sense_data_size;
 
        ei = cp->err_info;
-       cmd = (struct scsi_cmnd *) cp->scsi_cmd;
+       cmd = cp->scsi_cmd;
        h = cp->h;
        dev = cmd->device->hostdata;
 
        scsi_dma_unmap(cmd); /* undo the DMA mappings */
        if ((cp->cmd_type == CMD_SCSI) &&
-               (cp->Header.SGTotal > h->max_cmd_sg_entries))
+               (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
                hpsa_unmap_sg_chain_block(h, cp);
 
        cmd->result = (DID_OK << 16);           /* host byte */
        cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
 
+       if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
+               atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
+
        if (cp->cmd_type == CMD_IOACCEL2)
                return process_ioaccel2_completion(h, cp, cmd, dev);
 
@@ -1706,6 +1791,8 @@ static void complete_scsi_command(struct CommandList *cp)
 
        scsi_set_resid(cmd, ei->ResidualCnt);
        if (ei->CommandStatus == 0) {
+               if (cp->cmd_type == CMD_IOACCEL1)
+                       atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
                cmd_free(h, cp);
                cmd->scsi_done(cmd);
                return;
@@ -1726,8 +1813,10 @@ static void complete_scsi_command(struct CommandList *cp)
         */
        if (cp->cmd_type == CMD_IOACCEL1) {
                struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
-               cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
-               cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
+               cp->Header.SGList = scsi_sg_count(cmd);
+               cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
+               cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
+                       IOACCEL1_IOFLAGS_CDBLEN_MASK;
                cp->Header.tag = c->tag;
                memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
                memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
@@ -1739,9 +1828,9 @@ static void complete_scsi_command(struct CommandList *cp)
                if (is_logical_dev_addr_mode(dev->scsi3addr)) {
                        if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
                                dev->offload_enabled = 0;
-                       cmd->result = DID_SOFT_ERROR << 16;
-                       cmd_free(h, cp);
-                       cmd->scsi_done(cmd);
+                       INIT_WORK(&cp->work, hpsa_command_resubmit_worker);
+                       queue_work_on(raw_smp_processor_id(),
+                                       h->resubmit_wq, &cp->work);
                        return;
                }
        }
@@ -1798,9 +1887,8 @@ static void complete_scsi_command(struct CommandList *cp)
        case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
                break;
        case CMD_DATA_OVERRUN:
-               dev_warn(&h->pdev->dev, "cp %p has"
-                       " completed with data overrun "
-                       "reported\n", cp);
+               dev_warn(&h->pdev->dev,
+                       "CDB %16phN data overrun\n", cp->Request.CDB);
                break;
        case CMD_INVALID: {
                /* print_bytes(cp, sizeof(*cp), 1, 0);
@@ -1816,34 +1904,38 @@ static void complete_scsi_command(struct CommandList *cp)
                break;
        case CMD_PROTOCOL_ERR:
                cmd->result = DID_ERROR << 16;
-               dev_warn(&h->pdev->dev, "cp %p has "
-                       "protocol error\n", cp);
+               dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
+                               cp->Request.CDB);
                break;
        case CMD_HARDWARE_ERR:
                cmd->result = DID_ERROR << 16;
-               dev_warn(&h->pdev->dev, "cp %p had  hardware error\n", cp);
+               dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
+                       cp->Request.CDB);
                break;
        case CMD_CONNECTION_LOST:
                cmd->result = DID_ERROR << 16;
-               dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
+               dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
+                       cp->Request.CDB);
                break;
        case CMD_ABORTED:
                cmd->result = DID_ABORT << 16;
-               dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
-                               cp, ei->ScsiStatus);
+               dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
+                               cp->Request.CDB, ei->ScsiStatus);
                break;
        case CMD_ABORT_FAILED:
                cmd->result = DID_ERROR << 16;
-               dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
+               dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
+                       cp->Request.CDB);
                break;
        case CMD_UNSOLICITED_ABORT:
                cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
-               dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
-                       "abort\n", cp);
+               dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
+                       cp->Request.CDB);
                break;
        case CMD_TIMEOUT:
                cmd->result = DID_TIME_OUT << 16;
-               dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
+               dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
+                       cp->Request.CDB);
                break;
        case CMD_UNABORTABLE:
                cmd->result = DID_ERROR << 16;
@@ -2048,10 +2140,10 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
        struct CommandList *c;
        struct ErrorInfo *ei;
 
-       c = cmd_special_alloc(h);
+       c = cmd_alloc(h);
 
-       if (c == NULL) {                        /* trouble... */
-               dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+       if (c == NULL) {
+               dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
                return -ENOMEM;
        }
 
@@ -2067,7 +2159,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
                rc = -1;
        }
 out:
-       cmd_special_free(h, c);
+       cmd_free(h, c);
        return rc;
 }
 
@@ -2079,10 +2171,9 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
        struct CommandList *c;
        struct ErrorInfo *ei;
 
-       c = cmd_special_alloc(h);
-
+       c = cmd_alloc(h);
        if (c == NULL) {                        /* trouble... */
-               dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+               dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
                return -ENOMEM;
        }
 
@@ -2098,7 +2189,7 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
                rc = -1;
        }
 out:
-       cmd_special_free(h, c);
+       cmd_free(h, c);
        return rc;
        }
 
@@ -2109,10 +2200,10 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
        struct CommandList *c;
        struct ErrorInfo *ei;
 
-       c = cmd_special_alloc(h);
+       c = cmd_alloc(h);
 
        if (c == NULL) {                        /* trouble... */
-               dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+               dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
                return -ENOMEM;
        }
 
@@ -2128,7 +2219,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
                hpsa_scsi_interpret_error(h, c);
                rc = -1;
        }
-       cmd_special_free(h, c);
+       cmd_free(h, c);
        return rc;
 }
 
@@ -2191,15 +2282,13 @@ static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
                        le16_to_cpu(map_buff->row_cnt));
        dev_info(&h->pdev->dev, "layout_map_count = %u\n",
                        le16_to_cpu(map_buff->layout_map_count));
-       dev_info(&h->pdev->dev, "flags = %u\n",
+       dev_info(&h->pdev->dev, "flags = 0x%x\n",
                        le16_to_cpu(map_buff->flags));
-       if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
-               dev_info(&h->pdev->dev, "encrypytion = ON\n");
-       else
-               dev_info(&h->pdev->dev, "encrypytion = OFF\n");
+       dev_info(&h->pdev->dev, "encrypytion = %s\n",
+                       le16_to_cpu(map_buff->flags) &
+                       RAID_MAP_FLAG_ENCRYPT_ON ?  "ON" : "OFF");
        dev_info(&h->pdev->dev, "dekindex = %u\n",
                        le16_to_cpu(map_buff->dekindex));
-
        map_cnt = le16_to_cpu(map_buff->layout_map_count);
        for (map = 0; map < map_cnt; map++) {
                dev_info(&h->pdev->dev, "Map%u:\n", map);
@@ -2238,26 +2327,26 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
        struct CommandList *c;
        struct ErrorInfo *ei;
 
-       c = cmd_special_alloc(h);
+       c = cmd_alloc(h);
        if (c == NULL) {
-               dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+               dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
                return -ENOMEM;
        }
        if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
                        sizeof(this_device->raid_map), 0,
                        scsi3addr, TYPE_CMD)) {
                dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
-               cmd_special_free(h, c);
+               cmd_free(h, c);
                return -ENOMEM;
        }
        hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
        ei = c->err_info;
        if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
                hpsa_scsi_interpret_error(h, c);
-               cmd_special_free(h, c);
+               cmd_free(h, c);
                return -1;
        }
-       cmd_special_free(h, c);
+       cmd_free(h, c);
 
        /* @todo in the future, dynamically allocate RAID map memory */
        if (le32_to_cpu(this_device->raid_map.structure_size) >
@@ -2269,6 +2358,34 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
        return rc;
 }
 
+static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
+               unsigned char scsi3addr[], u16 bmic_device_index,
+               struct bmic_identify_physical_device *buf, size_t bufsize)
+{
+       int rc = IO_OK;
+       struct CommandList *c;
+       struct ErrorInfo *ei;
+
+       c = cmd_alloc(h);
+       rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
+               0, RAID_CTLR_LUNID, TYPE_CMD);
+       if (rc)
+               goto out;
+
+       c->Request.CDB[2] = bmic_device_index & 0xff;
+       c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
+
+       hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+       ei = c->err_info;
+       if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+               hpsa_scsi_interpret_error(h, c);
+               rc = -1;
+       }
+out:
+       cmd_free(h, c);
+       return rc;
+}
+
 static int hpsa_vpd_page_supported(struct ctlr_info *h,
        unsigned char scsi3addr[], u8 page)
 {
@@ -2369,7 +2486,7 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
 }
 
 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
-               struct ReportLUNdata *buf, int bufsize,
+               void *buf, int bufsize,
                int extended_response)
 {
        int rc = IO_OK;
@@ -2377,9 +2494,9 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
        unsigned char scsi3addr[8];
        struct ErrorInfo *ei;
 
-       c = cmd_special_alloc(h);
+       c = cmd_alloc(h);
        if (c == NULL) {                        /* trouble... */
-               dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+               dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
                return -1;
        }
        /* address the controller */
@@ -2398,24 +2515,26 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
                hpsa_scsi_interpret_error(h, c);
                rc = -1;
        } else {
-               if (buf->extended_response_flag != extended_response) {
+               struct ReportLUNdata *rld = buf;
+
+               if (rld->extended_response_flag != extended_response) {
                        dev_err(&h->pdev->dev,
                                "report luns requested format %u, got %u\n",
                                extended_response,
-                               buf->extended_response_flag);
+                               rld->extended_response_flag);
                        rc = -1;
                }
        }
 out:
-       cmd_special_free(h, c);
+       cmd_free(h, c);
        return rc;
 }
 
 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
-               struct ReportLUNdata *buf,
-               int bufsize, int extended_response)
+               struct ReportExtendedLUNdata *buf, int bufsize)
 {
-       return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
+       return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
+                                               HPSA_REPORT_PHYS_EXTENDED);
 }
 
 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
@@ -2590,6 +2709,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
                this_device->offload_config = 0;
                this_device->offload_enabled = 0;
                this_device->volume_offline = 0;
+               this_device->queue_depth = h->nr_cmds;
        }
 
        if (is_OBDR_device) {
@@ -2732,7 +2852,6 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
 {
        struct ReportExtendedLUNdata *physicals = NULL;
        int responsesize = 24;  /* size of physical extended response */
-       int extended = 2;       /* flag forces reporting 'other dev info'. */
        int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
        u32 nphysicals = 0;     /* number of reported physical devs */
        int found = 0;          /* found match (1) or not (0) */
@@ -2741,8 +2860,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
        struct scsi_cmnd *scmd; /* scsi command within request being aborted */
        struct hpsa_scsi_dev_t *d; /* device of request being aborted */
        struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
-       u32 it_nexus;           /* 4 byte device handle for the ioaccel2 cmd */
-       u32 scsi_nexus;         /* 4 byte device handle for the ioaccel2 cmd */
+       __le32 it_nexus;        /* 4 byte device handle for the ioaccel2 cmd */
+       __le32 scsi_nexus;      /* 4 byte device handle for the ioaccel2 cmd */
 
        if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
                return 0; /* no match */
@@ -2761,8 +2880,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
                return 0; /* no match */
 
        it_nexus = cpu_to_le32(d->ioaccel_handle);
-       scsi_nexus = cpu_to_le32(c2a->scsi_nexus);
-       find = c2a->scsi_nexus;
+       scsi_nexus = c2a->scsi_nexus;
+       find = le32_to_cpu(c2a->scsi_nexus);
 
        if (h->raid_offload_debug > 0)
                dev_info(&h->pdev->dev,
@@ -2779,8 +2898,7 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
        physicals = kzalloc(reportsize, GFP_KERNEL);
        if (physicals == NULL)
                return 0;
-       if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
-               reportsize, extended)) {
+       if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) {
                dev_err(&h->pdev->dev,
                        "Can't lookup %s device handle: report physical LUNs failed.\n",
                        "HP SSD Smart Path");
@@ -2821,34 +2939,20 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
  * Returns 0 on success, -1 otherwise.
  */
 static int hpsa_gather_lun_info(struct ctlr_info *h,
-       int reportphyslunsize, int reportloglunsize,
-       struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
+       struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
        struct ReportLUNdata *logdev, u32 *nlogicals)
 {
-       int physical_entry_size = 8;
-
-       *physical_mode = 0;
-
-       /* For I/O accelerator mode we need to read physical device handles */
-       if (h->transMethod & CFGTBL_Trans_io_accel1 ||
-               h->transMethod & CFGTBL_Trans_io_accel2) {
-               *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
-               physical_entry_size = 24;
-       }
-       if (hpsa_scsi_do_report_phys_luns(h, physdev, reportphyslunsize,
-                                                       *physical_mode)) {
+       if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
                dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
                return -1;
        }
-       *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
-                                                       physical_entry_size;
+       *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
        if (*nphysicals > HPSA_MAX_PHYS_LUN) {
-               dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
-                       "  %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
-                       *nphysicals - HPSA_MAX_PHYS_LUN);
+               dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
+                       HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
                *nphysicals = HPSA_MAX_PHYS_LUN;
        }
-       if (hpsa_scsi_do_report_log_luns(h, logdev, reportloglunsize)) {
+       if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
                dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
                return -1;
        }
@@ -2921,6 +3025,33 @@ static int hpsa_hba_mode_enabled(struct ctlr_info *h)
        return hba_mode_enabled;
 }
 
+/* get physical drive ioaccel handle and queue depth */
+static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
+               struct hpsa_scsi_dev_t *dev,
+               u8 *lunaddrbytes,
+               struct bmic_identify_physical_device *id_phys)
+{
+       int rc;
+       struct ext_report_lun_entry *rle =
+               (struct ext_report_lun_entry *) lunaddrbytes;
+
+       dev->ioaccel_handle = rle->ioaccel_handle;
+       memset(id_phys, 0, sizeof(*id_phys));
+       rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
+                       GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
+                       sizeof(*id_phys));
+       if (!rc)
+               /* Reserve space for FW operations */
+#define DRIVE_CMDS_RESERVED_FOR_FW 2
+#define DRIVE_QUEUE_DEPTH 7
+               dev->queue_depth =
+                       le16_to_cpu(id_phys->current_queue_depth_limit) -
+                               DRIVE_CMDS_RESERVED_FOR_FW;
+       else
+               dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
+       atomic_set(&dev->ioaccel_cmds_out, 0);
+}
+
 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
 {
        /* the idea here is we could get notified
@@ -2935,9 +3066,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
         */
        struct ReportExtendedLUNdata *physdev_list = NULL;
        struct ReportLUNdata *logdev_list = NULL;
+       struct bmic_identify_physical_device *id_phys = NULL;
        u32 nphysicals = 0;
        u32 nlogicals = 0;
-       int physical_mode = 0;
        u32 ndev_allocated = 0;
        struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
        int ncurrent = 0;
@@ -2950,8 +3081,10 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
        physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
        logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
        tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
+       id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
 
-       if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
+       if (!currentsd || !physdev_list || !logdev_list ||
+               !tmpdevice || !id_phys) {
                dev_err(&h->pdev->dev, "out of memory\n");
                goto out;
        }
@@ -2968,10 +3101,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
 
        h->hba_mode_enabled = rescan_hba_mode;
 
-       if (hpsa_gather_lun_info(h,
-                       sizeof(*physdev_list), sizeof(*logdev_list),
-                       (struct ReportLUNdata *) physdev_list, &nphysicals,
-                       &physical_mode, logdev_list, &nlogicals))
+       if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
+                       logdev_list, &nlogicals))
                goto out;
 
        /* We might see up to the maximum number of logical and physical disks
@@ -3068,10 +3199,11 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
                                ncurrent++;
                                break;
                        }
-                       if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
-                               memcpy(&this_device->ioaccel_handle,
-                                       &lunaddrbytes[20],
-                                       sizeof(this_device->ioaccel_handle));
+                       if (h->transMethod & CFGTBL_Trans_io_accel1 ||
+                               h->transMethod & CFGTBL_Trans_io_accel2) {
+                               hpsa_get_ioaccel_drive_info(h, this_device,
+                                                       lunaddrbytes, id_phys);
+                               atomic_set(&this_device->ioaccel_cmds_out, 0);
                                ncurrent++;
                        }
                        break;
@@ -3095,6 +3227,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
                if (ncurrent >= HPSA_MAX_DEVICES)
                        break;
        }
+       hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent);
        adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
 out:
        kfree(tmpdevice);
@@ -3103,9 +3236,22 @@ out:
        kfree(currentsd);
        kfree(physdev_list);
        kfree(logdev_list);
+       kfree(id_phys);
+}
+
+static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
+                                  struct scatterlist *sg)
+{
+       u64 addr64 = (u64) sg_dma_address(sg);
+       unsigned int len = sg_dma_len(sg);
+
+       desc->Addr = cpu_to_le64(addr64);
+       desc->Len = cpu_to_le32(len);
+       desc->Ext = 0;
 }
 
-/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
+/*
+ * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
  * dma mapping  and fills in the scatter gather entries of the
  * hpsa command, cp.
  */
@@ -3113,9 +3259,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
                struct CommandList *cp,
                struct scsi_cmnd *cmd)
 {
-       unsigned int len;
        struct scatterlist *sg;
-       u64 addr64;
        int use_sg, i, sg_index, chained;
        struct SGDescriptor *curr_sg;
 
@@ -3138,13 +3282,11 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
                        curr_sg = h->cmd_sg_list[cp->cmdindex];
                        sg_index = 0;
                }
-               addr64 = (u64) sg_dma_address(sg);
-               len  = sg_dma_len(sg);
-               curr_sg->Addr = cpu_to_le64(addr64);
-               curr_sg->Len = cpu_to_le32(len);
-               curr_sg->Ext = cpu_to_le32(0);
+               hpsa_set_sg_descriptor(curr_sg, sg);
                curr_sg++;
        }
+
+       /* Back the pointer up to the last entry and mark it as "last". */
        (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
 
        if (use_sg + chained > h->maxSG)
@@ -3163,7 +3305,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
 sglist_finished:
 
        cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
-       cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in this cmd list */
+       cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
        return 0;
 }
 
@@ -3217,7 +3359,7 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
 
 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
        struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
-       u8 *scsi3addr)
+       u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
 {
        struct scsi_cmnd *cmd = c->scsi_cmd;
        struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
@@ -3230,13 +3372,17 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
        u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
 
        /* TODO: implement chaining support */
-       if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
+       if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
+               atomic_dec(&phys_disk->ioaccel_cmds_out);
                return IO_ACCEL_INELIGIBLE;
+       }
 
        BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
 
-       if (fixup_ioaccel_cdb(cdb, &cdb_len))
+       if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
+               atomic_dec(&phys_disk->ioaccel_cmds_out);
                return IO_ACCEL_INELIGIBLE;
+       }
 
        c->cmd_type = CMD_IOACCEL1;
 
@@ -3246,8 +3392,10 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
        BUG_ON(c->busaddr & 0x0000007F);
 
        use_sg = scsi_dma_map(cmd);
-       if (use_sg < 0)
+       if (use_sg < 0) {
+               atomic_dec(&phys_disk->ioaccel_cmds_out);
                return use_sg;
+       }
 
        if (use_sg) {
                curr_sg = cp->SG;
@@ -3284,11 +3432,11 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
 
        c->Header.SGList = use_sg;
        /* Fill out the command structure to submit */
-       cp->dev_handle = ioaccel_handle & 0xFFFF;
-       cp->transfer_len = total_len;
-       cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
-                       (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
-       cp->control = control;
+       cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
+       cp->transfer_len = cpu_to_le32(total_len);
+       cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
+                       (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
+       cp->control = cpu_to_le32(control);
        memcpy(cp->CDB, cdb, cdb_len);
        memcpy(cp->CISS_LUN, scsi3addr, 8);
        /* Tag was already set at init time. */
@@ -3306,8 +3454,10 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
        struct scsi_cmnd *cmd = c->scsi_cmd;
        struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
 
+       c->phys_disk = dev;
+
        return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
-               cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
+               cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
 }
 
 /*
@@ -3321,10 +3471,8 @@ static void set_encrypt_ioaccel2(struct ctlr_info *h,
        struct raid_map_data *map = &dev->raid_map;
        u64 first_block;
 
-       BUG_ON(!(dev->offload_config && dev->offload_enabled));
-
        /* Are we doing encryption on this device */
-       if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
+       if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
                return;
        /* Set the data encryption key index. */
        cp->dekindex = map->dekindex;
@@ -3340,101 +3488,38 @@ static void set_encrypt_ioaccel2(struct ctlr_info *h,
        /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
        case WRITE_6:
        case READ_6:
-               if (map->volume_blk_size == 512) {
-                       cp->tweak_lower =
-                               (((u32) cmd->cmnd[2]) << 8) |
-                                       cmd->cmnd[3];
-                       cp->tweak_upper = 0;
-               } else {
-                       first_block =
-                               (((u64) cmd->cmnd[2]) << 8) |
-                                       cmd->cmnd[3];
-                       first_block = (first_block * map->volume_blk_size)/512;
-                       cp->tweak_lower = (u32)first_block;
-                       cp->tweak_upper = (u32)(first_block >> 32);
-               }
+               first_block = get_unaligned_be16(&cmd->cmnd[2]);
                break;
        case WRITE_10:
        case READ_10:
-               if (map->volume_blk_size == 512) {
-                       cp->tweak_lower =
-                               (((u32) cmd->cmnd[2]) << 24) |
-                               (((u32) cmd->cmnd[3]) << 16) |
-                               (((u32) cmd->cmnd[4]) << 8) |
-                                       cmd->cmnd[5];
-                       cp->tweak_upper = 0;
-               } else {
-                       first_block =
-                               (((u64) cmd->cmnd[2]) << 24) |
-                               (((u64) cmd->cmnd[3]) << 16) |
-                               (((u64) cmd->cmnd[4]) << 8) |
-                                       cmd->cmnd[5];
-                       first_block = (first_block * map->volume_blk_size)/512;
-                       cp->tweak_lower = (u32)first_block;
-                       cp->tweak_upper = (u32)(first_block >> 32);
-               }
-               break;
        /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
        case WRITE_12:
        case READ_12:
-               if (map->volume_blk_size == 512) {
-                       cp->tweak_lower =
-                               (((u32) cmd->cmnd[2]) << 24) |
-                               (((u32) cmd->cmnd[3]) << 16) |
-                               (((u32) cmd->cmnd[4]) << 8) |
-                                       cmd->cmnd[5];
-                       cp->tweak_upper = 0;
-               } else {
-                       first_block =
-                               (((u64) cmd->cmnd[2]) << 24) |
-                               (((u64) cmd->cmnd[3]) << 16) |
-                               (((u64) cmd->cmnd[4]) << 8) |
-                                       cmd->cmnd[5];
-                       first_block = (first_block * map->volume_blk_size)/512;
-                       cp->tweak_lower = (u32)first_block;
-                       cp->tweak_upper = (u32)(first_block >> 32);
-               }
+               first_block = get_unaligned_be32(&cmd->cmnd[2]);
                break;
        case WRITE_16:
        case READ_16:
-               if (map->volume_blk_size == 512) {
-                       cp->tweak_lower =
-                               (((u32) cmd->cmnd[6]) << 24) |
-                               (((u32) cmd->cmnd[7]) << 16) |
-                               (((u32) cmd->cmnd[8]) << 8) |
-                                       cmd->cmnd[9];
-                       cp->tweak_upper =
-                               (((u32) cmd->cmnd[2]) << 24) |
-                               (((u32) cmd->cmnd[3]) << 16) |
-                               (((u32) cmd->cmnd[4]) << 8) |
-                                       cmd->cmnd[5];
-               } else {
-                       first_block =
-                               (((u64) cmd->cmnd[2]) << 56) |
-                               (((u64) cmd->cmnd[3]) << 48) |
-                               (((u64) cmd->cmnd[4]) << 40) |
-                               (((u64) cmd->cmnd[5]) << 32) |
-                               (((u64) cmd->cmnd[6]) << 24) |
-                               (((u64) cmd->cmnd[7]) << 16) |
-                               (((u64) cmd->cmnd[8]) << 8) |
-                                       cmd->cmnd[9];
-                       first_block = (first_block * map->volume_blk_size)/512;
-                       cp->tweak_lower = (u32)first_block;
-                       cp->tweak_upper = (u32)(first_block >> 32);
-               }
+               first_block = get_unaligned_be64(&cmd->cmnd[2]);
                break;
        default:
                dev_err(&h->pdev->dev,
-                       "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
-                       __func__);
+                       "ERROR: %s: size (0x%x) not supported for encryption\n",
+                       __func__, cmd->cmnd[0]);
                BUG();
                break;
        }
+
+       if (le32_to_cpu(map->volume_blk_size) != 512)
+               first_block = first_block *
+                               le32_to_cpu(map->volume_blk_size)/512;
+
+       cp->tweak_lower = cpu_to_le32(first_block);
+       cp->tweak_upper = cpu_to_le32(first_block >> 32);
 }
 
 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
        struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
-       u8 *scsi3addr)
+       u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
 {
        struct scsi_cmnd *cmd = c->scsi_cmd;
        struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
@@ -3445,11 +3530,16 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
        u32 len;
        u32 total_len = 0;
 
-       if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
+       if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
+               atomic_dec(&phys_disk->ioaccel_cmds_out);
                return IO_ACCEL_INELIGIBLE;
+       }
 
-       if (fixup_ioaccel_cdb(cdb, &cdb_len))
+       if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
+               atomic_dec(&phys_disk->ioaccel_cmds_out);
                return IO_ACCEL_INELIGIBLE;
+       }
+
        c->cmd_type = CMD_IOACCEL2;
        /* Adjust the DMA address to point to the accelerated command buffer */
        c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
@@ -3460,8 +3550,10 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
        cp->IU_type = IOACCEL2_IU_TYPE;
 
        use_sg = scsi_dma_map(cmd);
-       if (use_sg < 0)
+       if (use_sg < 0) {
+               atomic_dec(&phys_disk->ioaccel_cmds_out);
                return use_sg;
+       }
 
        if (use_sg) {
                BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
@@ -3506,9 +3598,8 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
        /* Set encryption parameters, if necessary */
        set_encrypt_ioaccel2(h, c, cp);
 
-       cp->scsi_nexus = ioaccel_handle;
-       cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
-                               DIRECT_LOOKUP_BIT;
+       cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
+       cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
        memcpy(cp->cdb, cdb, sizeof(cp->cdb));
 
        /* fill in sg elements */
@@ -3528,14 +3619,22 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
  */
 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
        struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
-       u8 *scsi3addr)
+       u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
 {
+       /* Try to honor the device's queue depth */
+       if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
+                                       phys_disk->queue_depth) {
+               atomic_dec(&phys_disk->ioaccel_cmds_out);
+               return IO_ACCEL_INELIGIBLE;
+       }
        if (h->transMethod & CFGTBL_Trans_io_accel1)
                return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
-                                               cdb, cdb_len, scsi3addr);
+                                               cdb, cdb_len, scsi3addr,
+                                               phys_disk);
        else
                return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
-                                               cdb, cdb_len, scsi3addr);
+                                               cdb, cdb_len, scsi3addr,
+                                               phys_disk);
 }
 
 static void raid_map_helper(struct raid_map_data *map,
@@ -3543,21 +3642,22 @@ static void raid_map_helper(struct raid_map_data *map,
 {
        if (offload_to_mirror == 0)  {
                /* use physical disk in the first mirrored group. */
-               *map_index %= map->data_disks_per_row;
+               *map_index %= le16_to_cpu(map->data_disks_per_row);
                return;
        }
        do {
                /* determine mirror group that *map_index indicates */
-               *current_group = *map_index / map->data_disks_per_row;
+               *current_group = *map_index /
+                       le16_to_cpu(map->data_disks_per_row);
                if (offload_to_mirror == *current_group)
                        continue;
-               if (*current_group < (map->layout_map_count - 1)) {
+               if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
                        /* select map index from next group */
-                       *map_index += map->data_disks_per_row;
+                       *map_index += le16_to_cpu(map->data_disks_per_row);
                        (*current_group)++;
                } else {
                        /* select map index from first group */
-                       *map_index %= map->data_disks_per_row;
+                       *map_index %= le16_to_cpu(map->data_disks_per_row);
                        *current_group = 0;
                }
        } while (offload_to_mirror != *current_group);
@@ -3595,13 +3695,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
        u32 disk_block_cnt;
        u8 cdb[16];
        u8 cdb_len;
+       u16 strip_size;
 #if BITS_PER_LONG == 32
        u64 tmpdiv;
 #endif
        int offload_to_mirror;
 
-       BUG_ON(!(dev->offload_config && dev->offload_enabled));
-
        /* check for valid opcode, get LBA and block count */
        switch (cmd->cmnd[0]) {
        case WRITE_6:
@@ -3668,11 +3767,14 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
                return IO_ACCEL_INELIGIBLE;
 
        /* check for invalid block or wraparound */
-       if (last_block >= map->volume_blk_cnt || last_block < first_block)
+       if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
+               last_block < first_block)
                return IO_ACCEL_INELIGIBLE;
 
        /* calculate stripe information for the request */
-       blocks_per_row = map->data_disks_per_row * map->strip_size;
+       blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
+                               le16_to_cpu(map->strip_size);
+       strip_size = le16_to_cpu(map->strip_size);
 #if BITS_PER_LONG == 32
        tmpdiv = first_block;
        (void) do_div(tmpdiv, blocks_per_row);
@@ -3683,18 +3785,18 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
        first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
        last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
        tmpdiv = first_row_offset;
-       (void) do_div(tmpdiv,  map->strip_size);
+       (void) do_div(tmpdiv, strip_size);
        first_column = tmpdiv;
        tmpdiv = last_row_offset;
-       (void) do_div(tmpdiv, map->strip_size);
+       (void) do_div(tmpdiv, strip_size);
        last_column = tmpdiv;
 #else
        first_row = first_block / blocks_per_row;
        last_row = last_block / blocks_per_row;
        first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
        last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
-       first_column = first_row_offset / map->strip_size;
-       last_column = last_row_offset / map->strip_size;
+       first_column = first_row_offset / strip_size;
+       last_column = last_row_offset / strip_size;
 #endif
 
        /* if this isn't a single row/column then give to the controller */
@@ -3702,10 +3804,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
                return IO_ACCEL_INELIGIBLE;
 
        /* proceeding with driver mapping */
-       total_disks_per_row = map->data_disks_per_row +
-                               map->metadata_disks_per_row;
+       total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
+                               le16_to_cpu(map->metadata_disks_per_row);
        map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
-                               map->row_cnt;
+                               le16_to_cpu(map->row_cnt);
        map_index = (map_row * total_disks_per_row) + first_column;
 
        switch (dev->raid_level) {
@@ -3716,23 +3818,24 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
                 * (2-drive R1 and R10 with even # of drives.)
                 * Appropriate for SSDs, not optimal for HDDs
                 */
-               BUG_ON(map->layout_map_count != 2);
+               BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
                if (dev->offload_to_mirror)
-                       map_index += map->data_disks_per_row;
+                       map_index += le16_to_cpu(map->data_disks_per_row);
                dev->offload_to_mirror = !dev->offload_to_mirror;
                break;
        case HPSA_RAID_ADM:
                /* Handles N-way mirrors  (R1-ADM)
                 * and R10 with # of drives divisible by 3.)
                 */
-               BUG_ON(map->layout_map_count != 3);
+               BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
 
                offload_to_mirror = dev->offload_to_mirror;
                raid_map_helper(map, offload_to_mirror,
                                &map_index, &current_group);
                /* set mirror group to use next time */
                offload_to_mirror =
-                       (offload_to_mirror >= map->layout_map_count - 1)
+                       (offload_to_mirror >=
+                       le16_to_cpu(map->layout_map_count) - 1)
                        ? 0 : offload_to_mirror + 1;
                dev->offload_to_mirror = offload_to_mirror;
                /* Avoid direct use of dev->offload_to_mirror within this
@@ -3742,14 +3845,16 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
                break;
        case HPSA_RAID_5:
        case HPSA_RAID_6:
-               if (map->layout_map_count <= 1)
+               if (le16_to_cpu(map->layout_map_count) <= 1)
                        break;
 
                /* Verify first and last block are in same RAID group */
                r5or6_blocks_per_row =
-                       map->strip_size * map->data_disks_per_row;
+                       le16_to_cpu(map->strip_size) *
+                       le16_to_cpu(map->data_disks_per_row);
                BUG_ON(r5or6_blocks_per_row == 0);
-               stripesize = r5or6_blocks_per_row * map->layout_map_count;
+               stripesize = r5or6_blocks_per_row *
+                       le16_to_cpu(map->layout_map_count);
 #if BITS_PER_LONG == 32
                tmpdiv = first_block;
                first_group = do_div(tmpdiv, stripesize);
@@ -3812,28 +3917,35 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
                                                r5or6_blocks_per_row);
 
                first_column = r5or6_first_column =
-                       r5or6_first_row_offset / map->strip_size;
+                       r5or6_first_row_offset / le16_to_cpu(map->strip_size);
                r5or6_last_column =
-                       r5or6_last_row_offset / map->strip_size;
+                       r5or6_last_row_offset / le16_to_cpu(map->strip_size);
 #endif
                if (r5or6_first_column != r5or6_last_column)
                        return IO_ACCEL_INELIGIBLE;
 
                /* Request is eligible */
                map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
-                       map->row_cnt;
+                       le16_to_cpu(map->row_cnt);
 
                map_index = (first_group *
-                       (map->row_cnt * total_disks_per_row)) +
+                       (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
                        (map_row * total_disks_per_row) + first_column;
                break;
        default:
                return IO_ACCEL_INELIGIBLE;
        }
 
+       if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
+               return IO_ACCEL_INELIGIBLE;
+
+       c->phys_disk = dev->phys_disk[map_index];
+
        disk_handle = dd[map_index].ioaccel_handle;
-       disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
-                       (first_row_offset - (first_column * map->strip_size));
+       disk_block = le64_to_cpu(map->disk_starting_blk) +
+                       first_row * le16_to_cpu(map->strip_size) +
+                       (first_row_offset - first_column *
+                       le16_to_cpu(map->strip_size));
        disk_block_cnt = block_cnt;
 
        /* handle differing logical/physical block sizes */
@@ -3876,78 +3988,21 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
                cdb_len = 10;
        }
        return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
-                                               dev->scsi3addr);
+                                               dev->scsi3addr,
+                                               dev->phys_disk[map_index]);
 }
 
-/*
- * Running in struct Scsi_Host->host_lock less mode using LLD internal
- * struct ctlr_info *h->lock w/ spin_lock_irqsave() protection.
- */
-static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
+/* Submit commands down the "normal" RAID stack path */
+static int hpsa_ciss_submit(struct ctlr_info *h,
+       struct CommandList *c, struct scsi_cmnd *cmd,
+       unsigned char scsi3addr[])
 {
-       struct ctlr_info *h;
-       struct hpsa_scsi_dev_t *dev;
-       unsigned char scsi3addr[8];
-       struct CommandList *c;
-       int rc = 0;
-
-       /* Get the ptr to our adapter structure out of cmd->host. */
-       h = sdev_to_hba(cmd->device);
-       dev = cmd->device->hostdata;
-       if (!dev) {
-               cmd->result = DID_NO_CONNECT << 16;
-               cmd->scsi_done(cmd);
-               return 0;
-       }
-       memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
-
-       if (unlikely(lockup_detected(h))) {
-               cmd->result = DID_ERROR << 16;
-               cmd->scsi_done(cmd);
-               return 0;
-       }
-       c = cmd_alloc(h);
-       if (c == NULL) {                        /* trouble... */
-               dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
-               return SCSI_MLQUEUE_HOST_BUSY;
-       }
-
-       /* Fill in the command list header */
-       /* save c in case we have to abort it  */
        cmd->host_scribble = (unsigned char *) c;
-
        c->cmd_type = CMD_SCSI;
        c->scsi_cmd = cmd;
-
-       /* Call alternate submit routine for I/O accelerated commands.
-        * Retries always go down the normal I/O path.
-        */
-       if (likely(cmd->retries == 0 &&
-               cmd->request->cmd_type == REQ_TYPE_FS &&
-               h->acciopath_status)) {
-               if (dev->offload_enabled) {
-                       rc = hpsa_scsi_ioaccel_raid_map(h, c);
-                       if (rc == 0)
-                               return 0; /* Sent on ioaccel path */
-                       if (rc < 0) {   /* scsi_dma_map failed. */
-                               cmd_free(h, c);
-                               return SCSI_MLQUEUE_HOST_BUSY;
-                       }
-               } else if (dev->ioaccel_handle) {
-                       rc = hpsa_scsi_ioaccel_direct_map(h, c);
-                       if (rc == 0)
-                               return 0; /* Sent on direct map path */
-                       if (rc < 0) {   /* scsi_dma_map failed. */
-                               cmd_free(h, c);
-                               return SCSI_MLQUEUE_HOST_BUSY;
-                       }
-               }
-       }
-
        c->Header.ReplyQueue = 0;  /* unused in simple mode */
        memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
-       c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT) |
-                                       DIRECT_LOOKUP_BIT);
+       c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
 
        /* Fill in the request block... */
 
@@ -4003,66 +4058,167 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
        return 0;
 }
 
-static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
-{
-       unsigned long flags;
-
-       /*
-        * Don't let rescans be initiated on a controller known
-        * to be locked up.  If the controller locks up *during*
-        * a rescan, that thread is probably hosed, but at least
-        * we can prevent new rescan threads from piling up on a
-        * locked up controller.
-        */
-       if (unlikely(lockup_detected(h))) {
-               spin_lock_irqsave(&h->scan_lock, flags);
-               h->scan_finished = 1;
-               wake_up_all(&h->scan_wait_queue);
-               spin_unlock_irqrestore(&h->scan_lock, flags);
-               return 1;
-       }
-       return 0;
-}
-
-static void hpsa_scan_start(struct Scsi_Host *sh)
+static void hpsa_command_resubmit_worker(struct work_struct *work)
 {
-       struct ctlr_info *h = shost_to_hba(sh);
-       unsigned long flags;
+       struct scsi_cmnd *cmd;
+       struct hpsa_scsi_dev_t *dev;
+       struct CommandList *c =
+                       container_of(work, struct CommandList, work);
 
-       if (do_not_scan_if_controller_locked_up(h))
+       cmd = c->scsi_cmd;
+       dev = cmd->device->hostdata;
+       if (!dev) {
+               cmd->result = DID_NO_CONNECT << 16;
+               cmd->scsi_done(cmd);
                return;
-
-       /* wait until any scan already in progress is finished. */
-       while (1) {
-               spin_lock_irqsave(&h->scan_lock, flags);
-               if (h->scan_finished)
-                       break;
-               spin_unlock_irqrestore(&h->scan_lock, flags);
-               wait_event(h->scan_wait_queue, h->scan_finished);
-               /* Note: We don't need to worry about a race between this
-                * thread and driver unload because the midlayer will
-                * have incremented the reference count, so unload won't
-                * happen if we're in here.
+       }
+       if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
+               /*
+                * If we get here, it means dma mapping failed. Try
+                * again via scsi mid layer, which will then get
+                * SCSI_MLQUEUE_HOST_BUSY.
                 */
+               cmd->result = DID_IMM_RETRY << 16;
+               cmd->scsi_done(cmd);
        }
-       h->scan_finished = 0; /* mark scan as in progress */
-       spin_unlock_irqrestore(&h->scan_lock, flags);
-
-       if (do_not_scan_if_controller_locked_up(h))
-               return;
-
-       hpsa_update_scsi_devices(h, h->scsi_host->host_no);
-
-       spin_lock_irqsave(&h->scan_lock, flags);
-       h->scan_finished = 1; /* mark scan as finished. */
-       wake_up_all(&h->scan_wait_queue);
-       spin_unlock_irqrestore(&h->scan_lock, flags);
 }
 
-static int hpsa_scan_finished(struct Scsi_Host *sh,
-       unsigned long elapsed_time)
+/* Running in struct Scsi_Host->host_lock less mode */
+static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
 {
-       struct ctlr_info *h = shost_to_hba(sh);
+       struct ctlr_info *h;
+       struct hpsa_scsi_dev_t *dev;
+       unsigned char scsi3addr[8];
+       struct CommandList *c;
+       int rc = 0;
+
+       /* Get the ptr to our adapter structure out of cmd->host. */
+       h = sdev_to_hba(cmd->device);
+       dev = cmd->device->hostdata;
+       if (!dev) {
+               cmd->result = DID_NO_CONNECT << 16;
+               cmd->scsi_done(cmd);
+               return 0;
+       }
+       memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
+
+       if (unlikely(lockup_detected(h))) {
+               cmd->result = DID_ERROR << 16;
+               cmd->scsi_done(cmd);
+               return 0;
+       }
+       c = cmd_alloc(h);
+       if (c == NULL) {                        /* trouble... */
+               dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
+               return SCSI_MLQUEUE_HOST_BUSY;
+       }
+       if (unlikely(lockup_detected(h))) {
+               cmd->result = DID_ERROR << 16;
+               cmd_free(h, c);
+               cmd->scsi_done(cmd);
+               return 0;
+       }
+
+       /*
+        * Call alternate submit routine for I/O accelerated commands.
+        * Retries always go down the normal I/O path.
+        */
+       if (likely(cmd->retries == 0 &&
+               cmd->request->cmd_type == REQ_TYPE_FS &&
+               h->acciopath_status)) {
+
+               cmd->host_scribble = (unsigned char *) c;
+               c->cmd_type = CMD_SCSI;
+               c->scsi_cmd = cmd;
+
+               if (dev->offload_enabled) {
+                       rc = hpsa_scsi_ioaccel_raid_map(h, c);
+                       if (rc == 0)
+                               return 0; /* Sent on ioaccel path */
+                       if (rc < 0) {   /* scsi_dma_map failed. */
+                               cmd_free(h, c);
+                               return SCSI_MLQUEUE_HOST_BUSY;
+                       }
+               } else if (dev->ioaccel_handle) {
+                       rc = hpsa_scsi_ioaccel_direct_map(h, c);
+                       if (rc == 0)
+                               return 0; /* Sent on direct map path */
+                       if (rc < 0) {   /* scsi_dma_map failed. */
+                               cmd_free(h, c);
+                               return SCSI_MLQUEUE_HOST_BUSY;
+                       }
+               }
+       }
+       return hpsa_ciss_submit(h, c, cmd, scsi3addr);
+}
+
+static void hpsa_scan_complete(struct ctlr_info *h)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&h->scan_lock, flags);
+       h->scan_finished = 1;
+       wake_up_all(&h->scan_wait_queue);
+       spin_unlock_irqrestore(&h->scan_lock, flags);
+}
+
+static void hpsa_scan_start(struct Scsi_Host *sh)
+{
+       struct ctlr_info *h = shost_to_hba(sh);
+       unsigned long flags;
+
+       /*
+        * Don't let rescans be initiated on a controller known to be locked
+        * up.  If the controller locks up *during* a rescan, that thread is
+        * probably hosed, but at least we can prevent new rescan threads from
+        * piling up on a locked up controller.
+        */
+       if (unlikely(lockup_detected(h)))
+               return hpsa_scan_complete(h);
+
+       /* wait until any scan already in progress is finished. */
+       while (1) {
+               spin_lock_irqsave(&h->scan_lock, flags);
+               if (h->scan_finished)
+                       break;
+               spin_unlock_irqrestore(&h->scan_lock, flags);
+               wait_event(h->scan_wait_queue, h->scan_finished);
+               /* Note: We don't need to worry about a race between this
+                * thread and driver unload because the midlayer will
+                * have incremented the reference count, so unload won't
+                * happen if we're in here.
+                */
+       }
+       h->scan_finished = 0; /* mark scan as in progress */
+       spin_unlock_irqrestore(&h->scan_lock, flags);
+
+       if (unlikely(lockup_detected(h)))
+               return hpsa_scan_complete(h);
+
+       hpsa_update_scsi_devices(h, h->scsi_host->host_no);
+
+       hpsa_scan_complete(h);
+}
+
+static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+       struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
+
+       if (!logical_drive)
+               return -ENODEV;
+
+       if (qdepth < 1)
+               qdepth = 1;
+       else if (qdepth > logical_drive->queue_depth)
+               qdepth = logical_drive->queue_depth;
+
+       return scsi_change_queue_depth(sdev, qdepth);
+}
+
+static int hpsa_scan_finished(struct Scsi_Host *sh,
+       unsigned long elapsed_time)
+{
+       struct ctlr_info *h = shost_to_hba(sh);
        unsigned long flags;
        int finished;
 
@@ -4096,11 +4252,11 @@ static int hpsa_register_scsi(struct ctlr_info *h)
        sh->max_cmd_len = MAX_COMMAND_SIZE;
        sh->max_lun = HPSA_MAX_LUN;
        sh->max_id = HPSA_MAX_LUN;
-       sh->can_queue = h->nr_cmds;
-       if (h->hba_mode_enabled)
-               sh->cmd_per_lun = 7;
-       else
-               sh->cmd_per_lun = h->nr_cmds;
+       sh->can_queue = h->nr_cmds -
+                       HPSA_CMDS_RESERVED_FOR_ABORTS -
+                       HPSA_CMDS_RESERVED_FOR_DRIVER -
+                       HPSA_MAX_CONCURRENT_PASSTHRUS;
+       sh->cmd_per_lun = sh->can_queue;
        sh->sg_tablesize = h->maxsgentries;
        h->scsi_host = sh;
        sh->hostdata[0] = (unsigned long) h;
@@ -4131,7 +4287,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
        int waittime = 1; /* seconds */
        struct CommandList *c;
 
-       c = cmd_special_alloc(h);
+       c = cmd_alloc(h);
        if (!c) {
                dev_warn(&h->pdev->dev, "out of memory in "
                        "wait_for_device_to_become_ready.\n");
@@ -4177,7 +4333,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
        else
                dev_warn(&h->pdev->dev, "device is ready.\n");
 
-       cmd_special_free(h, c);
+       cmd_free(h, c);
        return rc;
 }
 
@@ -4194,6 +4350,10 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
        h = sdev_to_hba(scsicmd->device);
        if (h == NULL) /* paranoia */
                return FAILED;
+
+       if (lockup_detected(h))
+               return FAILED;
+
        dev = scsicmd->device->hostdata;
        if (!dev) {
                dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
@@ -4227,13 +4387,15 @@ static void swizzle_abort_tag(u8 *tag)
 }
 
 static void hpsa_get_tag(struct ctlr_info *h,
-       struct CommandList *c, u32 *taglower, u32 *tagupper)
+       struct CommandList *c, __le32 *taglower, __le32 *tagupper)
 {
+       u64 tag;
        if (c->cmd_type == CMD_IOACCEL1) {
                struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
                        &h->ioaccel_cmd_pool[c->cmdindex];
-               *tagupper = (u32) (cm1->tag >> 32);
-               *taglower = (u32) (cm1->tag & 0x0ffffffffULL);
+               tag = le64_to_cpu(cm1->tag);
+               *tagupper = cpu_to_le32(tag >> 32);
+               *taglower = cpu_to_le32(tag);
                return;
        }
        if (c->cmd_type == CMD_IOACCEL2) {
@@ -4244,8 +4406,9 @@ static void hpsa_get_tag(struct ctlr_info *h,
                *taglower = cm2->Tag;
                return;
        }
-       *tagupper = (u32) (c->Header.tag >> 32);
-       *taglower = (u32) (c->Header.tag & 0x0ffffffffULL);
+       tag = le64_to_cpu(c->Header.tag);
+       *tagupper = cpu_to_le32(tag >> 32);
+       *taglower = cpu_to_le32(tag);
 }
 
 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
@@ -4254,11 +4417,11 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
        int rc = IO_OK;
        struct CommandList *c;
        struct ErrorInfo *ei;
-       u32 tagupper, taglower;
+       __le32 tagupper, taglower;
 
-       c = cmd_special_alloc(h);
+       c = cmd_alloc(h);
        if (c == NULL) {        /* trouble... */
-               dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+               dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
                return -ENOMEM;
        }
 
@@ -4287,62 +4450,12 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
                rc = -1;
                break;
        }
-       cmd_special_free(h, c);
+       cmd_free(h, c);
        dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
                __func__, tagupper, taglower);
        return rc;
 }
 
-/*
- * hpsa_find_cmd_in_queue
- *
- * Used to determine whether a command (find) is still present
- * in queue_head.   Optionally excludes the last element of queue_head.
- *
- * This is used to avoid unnecessary aborts.  Commands in h->reqQ have
- * not yet been submitted, and so can be aborted by the driver without
- * sending an abort to the hardware.
- *
- * Returns pointer to command if found in queue, NULL otherwise.
- */
-static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
-                       struct scsi_cmnd *find, struct list_head *queue_head)
-{
-       unsigned long flags;
-       struct CommandList *c = NULL;   /* ptr into cmpQ */
-
-       if (!find)
-               return NULL;
-       spin_lock_irqsave(&h->lock, flags);
-       list_for_each_entry(c, queue_head, list) {
-               if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
-                       continue;
-               if (c->scsi_cmd == find) {
-                       spin_unlock_irqrestore(&h->lock, flags);
-                       return c;
-               }
-       }
-       spin_unlock_irqrestore(&h->lock, flags);
-       return NULL;
-}
-
-static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
-                                       u8 *tag, struct list_head *queue_head)
-{
-       unsigned long flags;
-       struct CommandList *c;
-
-       spin_lock_irqsave(&h->lock, flags);
-       list_for_each_entry(c, queue_head, list) {
-               if (memcmp(&c->Header.tag, tag, 8) != 0)
-                       continue;
-               spin_unlock_irqrestore(&h->lock, flags);
-               return c;
-       }
-       spin_unlock_irqrestore(&h->lock, flags);
-       return NULL;
-}
-
 /* ioaccel2 path firmware cannot handle abort task requests.
  * Change abort requests to physical target reset, and send to the
  * address of the physical disk used for the ioaccel 2 command.
@@ -4360,7 +4473,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
        unsigned char *psa = &phys_scsi3addr[0];
 
        /* Get a pointer to the hpsa logical device. */
-       scmd = (struct scsi_cmnd *) abort->scsi_cmd;
+       scmd = abort->scsi_cmd;
        dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
        if (dev == NULL) {
                dev_warn(&h->pdev->dev,
@@ -4429,10 +4542,6 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
        unsigned char *scsi3addr, struct CommandList *abort)
 {
-       u8 swizzled_tag[8];
-       struct CommandList *c;
-       int rc = 0, rc2 = 0;
-
        /* ioccelerator mode 2 commands should be aborted via the
         * accelerated path, since RAID path is unaware of these commands,
         * but underlying firmware can't handle abort TMF.
@@ -4441,27 +4550,8 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h,
        if (abort->cmd_type == CMD_IOACCEL2)
                return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
 
-       /* we do not expect to find the swizzled tag in our queue, but
-        * check anyway just to be sure the assumptions which make this
-        * the case haven't become wrong.
-        */
-       memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
-       swizzle_abort_tag(swizzled_tag);
-       c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
-       if (c != NULL) {
-               dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
-               return hpsa_send_abort(h, scsi3addr, abort, 0);
-       }
-       rc = hpsa_send_abort(h, scsi3addr, abort, 0);
-
-       /* if the command is still in our queue, we can't conclude that it was
-        * aborted (it might have just completed normally) but in any case
-        * we don't need to try to abort it another way.
-        */
-       c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
-       if (c)
-               rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
-       return rc && rc2;
+       return hpsa_send_abort(h, scsi3addr, abort, 0) &&
+                       hpsa_send_abort(h, scsi3addr, abort, 1);
 }
 
 /* Send an abort for the specified command.
@@ -4475,11 +4565,11 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
        struct ctlr_info *h;
        struct hpsa_scsi_dev_t *dev;
        struct CommandList *abort; /* pointer to command to be aborted */
-       struct CommandList *found;
        struct scsi_cmnd *as;   /* ptr to scsi cmd inside aborted command. */
        char msg[256];          /* For debug messaging. */
        int ml = 0;
-       u32 tagupper, taglower;
+       __le32 tagupper, taglower;
+       int refcount;
 
        /* Find the controller of the command to be aborted */
        h = sdev_to_hba(sc->device);
@@ -4487,6 +4577,9 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
                        "ABORT REQUEST FAILED, Controller lookup failed.\n"))
                return FAILED;
 
+       if (lockup_detected(h))
+               return FAILED;
+
        /* Check that controller supports some kind of task abort */
        if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
                !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
@@ -4508,41 +4601,23 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
        /* Get SCSI command to be aborted */
        abort = (struct CommandList *) sc->host_scribble;
        if (abort == NULL) {
-               dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
-                               msg);
-               return FAILED;
+               /* This can happen if the command already completed. */
+               return SUCCESS;
+       }
+       refcount = atomic_inc_return(&abort->refcount);
+       if (refcount == 1) { /* Command is done already. */
+               cmd_free(h, abort);
+               return SUCCESS;
        }
        hpsa_get_tag(h, abort, &taglower, &tagupper);
        ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
-       as  = (struct scsi_cmnd *) abort->scsi_cmd;
+       as  = abort->scsi_cmd;
        if (as != NULL)
                ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
                        as->cmnd[0], as->serial_number);
        dev_dbg(&h->pdev->dev, "%s\n", msg);
        dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
                h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
-
-       /* Search reqQ to See if command is queued but not submitted,
-        * if so, complete the command with aborted status and remove
-        * it from the reqQ.
-        */
-       found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
-       if (found) {
-               found->err_info->CommandStatus = CMD_ABORTED;
-               finish_cmd(found);
-               dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
-                               msg);
-               return SUCCESS;
-       }
-
-       /* not in reqQ, if also not in cmpQ, must have already completed */
-       found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
-       if (!found)  {
-               dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
-                               msg);
-               return SUCCESS;
-       }
-
        /*
         * Command is in flight, or possibly already completed
         * by the firmware (but not to the scsi mid layer) but we can't
@@ -4554,6 +4629,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
                dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
                        h->scsi_host->host_no,
                        dev->bus, dev->target, dev->lun);
+               cmd_free(h, abort);
                return FAILED;
        }
        dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
@@ -4565,32 +4641,38 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
         */
 #define ABORT_COMPLETE_WAIT_SECS 30
        for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
-               found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
-               if (!found)
+               refcount = atomic_read(&abort->refcount);
+               if (refcount < 2) {
+                       cmd_free(h, abort);
                        return SUCCESS;
-               msleep(100);
+               } else {
+                       msleep(100);
+               }
        }
        dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
                msg, ABORT_COMPLETE_WAIT_SECS);
+       cmd_free(h, abort);
        return FAILED;
 }
 
-
 /*
  * For operations that cannot sleep, a command block is allocated at init,
  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
  * which ones are free or in use.  Lock must be held when calling this.
  * cmd_free() is the complement.
  */
+
 static struct CommandList *cmd_alloc(struct ctlr_info *h)
 {
        struct CommandList *c;
        int i;
        union u64bit temp64;
        dma_addr_t cmd_dma_handle, err_dma_handle;
-       int loopcount;
+       int refcount;
+       unsigned long offset;
 
-       /* There is some *extremely* small but non-zero chance that that
+       /*
+        * There is some *extremely* small but non-zero chance that that
         * multiple threads could get in here, and one thread could
         * be scanning through the list of bits looking for a free
         * one, but the free ones are always behind him, and other
@@ -4601,24 +4683,30 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
         * infrequently as to be indistinguishable from never.
         */
 
-       loopcount = 0;
-       do {
-               i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
-               if (i == h->nr_cmds)
-                       i = 0;
-               loopcount++;
-       } while (test_and_set_bit(i & (BITS_PER_LONG - 1),
-                 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0 &&
-               loopcount < 10);
-
-       /* Thread got starved?  We do not expect this to ever happen. */
-       if (loopcount >= 10)
-               return NULL;
-
-       c = h->cmd_pool + i;
-       memset(c, 0, sizeof(*c));
-       cmd_dma_handle = h->cmd_pool_dhandle
-           + i * sizeof(*c);
+       offset = h->last_allocation; /* benignly racy */
+       for (;;) {
+               i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset);
+               if (unlikely(i == h->nr_cmds)) {
+                       offset = 0;
+                       continue;
+               }
+               c = h->cmd_pool + i;
+               refcount = atomic_inc_return(&c->refcount);
+               if (unlikely(refcount > 1)) {
+                       cmd_free(h, c); /* already in use */
+                       offset = (i + 1) % h->nr_cmds;
+                       continue;
+               }
+               set_bit(i & (BITS_PER_LONG - 1),
+                       h->cmd_pool_bits + (i / BITS_PER_LONG));
+               break; /* it's ours now. */
+       }
+       h->last_allocation = i; /* benignly racy */
+
+       /* Zero out all of commandlist except the last field, refcount */
+       memset(c, 0, offsetof(struct CommandList, refcount));
+       c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT));
+       cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c);
        c->err_info = h->errinfo_pool + i;
        memset(c->err_info, 0, sizeof(*c->err_info));
        err_dma_handle = h->errinfo_pool_dhandle
@@ -4626,45 +4714,10 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
 
        c->cmdindex = i;
 
-       INIT_LIST_HEAD(&c->list);
        c->busaddr = (u32) cmd_dma_handle;
        temp64.val = (u64) err_dma_handle;
-       c->ErrDesc.Addr = cpu_to_le64(err_dma_handle);
-       c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info));
-
-       c->h = h;
-       return c;
-}
-
-/* For operations that can wait for kmalloc to possibly sleep,
- * this routine can be called. Lock need not be held to call
- * cmd_special_alloc. cmd_special_free() is the complement.
- */
-static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
-{
-       struct CommandList *c;
-       dma_addr_t cmd_dma_handle, err_dma_handle;
-
-       c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
-       if (c == NULL)
-               return NULL;
-
-       c->cmd_type = CMD_SCSI;
-       c->cmdindex = -1;
-
-       c->err_info = pci_zalloc_consistent(h->pdev, sizeof(*c->err_info),
-                                           &err_dma_handle);
-
-       if (c->err_info == NULL) {
-               pci_free_consistent(h->pdev,
-                       sizeof(*c), c, cmd_dma_handle);
-               return NULL;
-       }
-
-       INIT_LIST_HEAD(&c->list);
-       c->busaddr = (u32) cmd_dma_handle;
-       c->ErrDesc.Addr = cpu_to_le64(err_dma_handle);
-       c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info));
+       c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
+       c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
 
        c->h = h;
        return c;
@@ -4672,20 +4725,13 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
 
 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
 {
-       int i;
-
-       i = c - h->cmd_pool;
-       clear_bit(i & (BITS_PER_LONG - 1),
-                 h->cmd_pool_bits + (i / BITS_PER_LONG));
-}
+       if (atomic_dec_and_test(&c->refcount)) {
+               int i;
 
-static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
-{
-       pci_free_consistent(h->pdev, sizeof(*c->err_info),
-                           c->err_info,
-                           (dma_addr_t) le64_to_cpu(c->ErrDesc.Addr));
-       pci_free_consistent(h->pdev, sizeof(*c),
-                           c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
+               i = c - h->cmd_pool;
+               clear_bit(i & (BITS_PER_LONG - 1),
+                         h->cmd_pool_bits + (i / BITS_PER_LONG));
+       }
 }
 
 #ifdef CONFIG_COMPAT
@@ -4866,7 +4912,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                        memset(buff, 0, iocommand.buf_size);
                }
        }
-       c = cmd_special_alloc(h);
+       c = cmd_alloc(h);
        if (c == NULL) {
                rc = -ENOMEM;
                goto out_kfree;
@@ -4883,8 +4929,6 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                c->Header.SGTotal = cpu_to_le16(0);
        }
        memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
-       /* use the kernel address the cmd block for tag */
-       c->Header.tag = c->busaddr;
 
        /* Fill in Request block */
        memcpy(&c->Request, &iocommand.Request,
@@ -4925,7 +4969,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                }
        }
 out:
-       cmd_special_free(h, c);
+       cmd_free(h, c);
 out_kfree:
        kfree(buff);
        return rc;
@@ -4940,7 +4984,6 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
        u64 temp64;
        BYTE sg_used = 0;
        int status = 0;
-       int i;
        u32 left;
        u32 sz;
        BYTE __user *data_ptr;
@@ -5004,7 +5047,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                data_ptr += sz;
                sg_used++;
        }
-       c = cmd_special_alloc(h);
+       c = cmd_alloc(h);
        if (c == NULL) {
                status = -ENOMEM;
                goto cleanup1;
@@ -5014,7 +5057,6 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
        c->Header.SGList = (u8) sg_used;
        c->Header.SGTotal = cpu_to_le16(sg_used);
        memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
-       c->Header.tag = c->busaddr;
        memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
        if (ioc->buf_size > 0) {
                int i;
@@ -5047,6 +5089,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                goto cleanup0;
        }
        if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
+               int i;
+
                /* Copy the data out of the buffer we created */
                BYTE __user *ptr = ioc->buf;
                for (i = 0; i < sg_used; i++) {
@@ -5059,9 +5103,11 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
        }
        status = 0;
 cleanup0:
-       cmd_special_free(h, c);
+       cmd_free(h, c);
 cleanup1:
        if (buff) {
+               int i;
+
                for (i = 0; i < sg_used; i++)
                        kfree(buff[i]);
                kfree(buff);
@@ -5079,35 +5125,6 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
                (void) check_for_unit_attention(h, c);
 }
 
-static int increment_passthru_count(struct ctlr_info *h)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&h->passthru_count_lock, flags);
-       if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) {
-               spin_unlock_irqrestore(&h->passthru_count_lock, flags);
-               return -1;
-       }
-       h->passthru_count++;
-       spin_unlock_irqrestore(&h->passthru_count_lock, flags);
-       return 0;
-}
-
-static void decrement_passthru_count(struct ctlr_info *h)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&h->passthru_count_lock, flags);
-       if (h->passthru_count <= 0) {
-               spin_unlock_irqrestore(&h->passthru_count_lock, flags);
-               /* not expecting to get here. */
-               dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n");
-               return;
-       }
-       h->passthru_count--;
-       spin_unlock_irqrestore(&h->passthru_count_lock, flags);
-}
-
 /*
  * ioctl
  */
@@ -5130,16 +5147,16 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
        case CCISS_GETDRIVVER:
                return hpsa_getdrivver_ioctl(h, argp);
        case CCISS_PASSTHRU:
-               if (increment_passthru_count(h))
+               if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
                        return -EAGAIN;
                rc = hpsa_passthru_ioctl(h, argp);
-               decrement_passthru_count(h);
+               atomic_inc(&h->passthru_cmds_avail);
                return rc;
        case CCISS_BIG_PASSTHRU:
-               if (increment_passthru_count(h))
+               if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
                        return -EAGAIN;
                rc = hpsa_big_passthru_ioctl(h, argp);
-               decrement_passthru_count(h);
+               atomic_inc(&h->passthru_cmds_avail);
                return rc;
        default:
                return -ENOTTY;
@@ -5173,7 +5190,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
 {
        int pci_dir = XFER_NONE;
        struct CommandList *a; /* for commands to be aborted */
-       u32 tupper, tlower;
 
        c->cmd_type = CMD_IOCTL_PEND;
        c->Header.ReplyQueue = 0;
@@ -5184,7 +5200,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
                c->Header.SGList = 0;
                c->Header.SGTotal = cpu_to_le16(0);
        }
-       c->Header.tag = c->busaddr;
        memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
 
        if (cmd_type == TYPE_CMD) {
@@ -5256,6 +5271,16 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
                        c->Request.CDB[7] = (size >> 16) & 0xFF;
                        c->Request.CDB[8] = (size >> 8) & 0xFF;
                        break;
+               case BMIC_IDENTIFY_PHYSICAL_DEVICE:
+                       c->Request.CDBLen = 10;
+                       c->Request.type_attr_dir =
+                               TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
+                       c->Request.Timeout = 0;
+                       c->Request.CDB[0] = BMIC_READ;
+                       c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
+                       c->Request.CDB[7] = (size >> 16) & 0xFF;
+                       c->Request.CDB[8] = (size >> 8) & 0XFF;
+                       break;
                default:
                        dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
                        BUG();
@@ -5281,10 +5306,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
                        break;
                case  HPSA_ABORT_MSG:
                        a = buff;       /* point to command to be aborted */
-                       dev_dbg(&h->pdev->dev, "Abort Tag:0x%016llx using request Tag:0x%016llx",
+                       dev_dbg(&h->pdev->dev,
+                               "Abort Tag:0x%016llx request Tag:0x%016llx",
                                a->Header.tag, c->Header.tag);
-                       tlower = (u32) (a->Header.tag >> 32);
-                       tupper = (u32) (a->Header.tag & 0x0ffffffffULL);
                        c->Request.CDBLen = 16;
                        c->Request.type_attr_dir =
                                        TYPE_ATTR_DIR(cmd_type,
@@ -5295,14 +5319,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
                        c->Request.CDB[2] = 0x00; /* reserved */
                        c->Request.CDB[3] = 0x00; /* reserved */
                        /* Tag to abort goes in CDB[4]-CDB[11] */
-                       c->Request.CDB[4] = tlower & 0xFF;
-                       c->Request.CDB[5] = (tlower >> 8) & 0xFF;
-                       c->Request.CDB[6] = (tlower >> 16) & 0xFF;
-                       c->Request.CDB[7] = (tlower >> 24) & 0xFF;
-                       c->Request.CDB[8] = tupper & 0xFF;
-                       c->Request.CDB[9] = (tupper >> 8) & 0xFF;
-                       c->Request.CDB[10] = (tupper >> 16) & 0xFF;
-                       c->Request.CDB[11] = (tupper >> 24) & 0xFF;
+                       memcpy(&c->Request.CDB[4], &a->Header.tag,
+                               sizeof(a->Header.tag));
                        c->Request.CDB[12] = 0x00; /* reserved */
                        c->Request.CDB[13] = 0x00; /* reserved */
                        c->Request.CDB[14] = 0x00; /* reserved */
@@ -5349,47 +5367,6 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
        return page_remapped ? (page_remapped + page_offs) : NULL;
 }
 
-/* Takes cmds off the submission queue and sends them to the hardware,
- * then puts them on the queue of cmds waiting for completion.
- * Assumes h->lock is held
- */
-static void start_io(struct ctlr_info *h, unsigned long *flags)
-{
-       struct CommandList *c;
-
-       while (!list_empty(&h->reqQ)) {
-               c = list_entry(h->reqQ.next, struct CommandList, list);
-               /* can't do anything if fifo is full */
-               if ((h->access.fifo_full(h))) {
-                       h->fifo_recently_full = 1;
-                       dev_warn(&h->pdev->dev, "fifo full\n");
-                       break;
-               }
-               h->fifo_recently_full = 0;
-
-               /* Get the first entry from the Request Q */
-               removeQ(c);
-               h->Qdepth--;
-
-               /* Put job onto the completed Q */
-               addQ(&h->cmpQ, c);
-               atomic_inc(&h->commands_outstanding);
-               spin_unlock_irqrestore(&h->lock, *flags);
-               /* Tell the controller execute command */
-               h->access.submit_command(h, c);
-               spin_lock_irqsave(&h->lock, *flags);
-       }
-}
-
-static void lock_and_start_io(struct ctlr_info *h)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&h->lock, flags);
-       start_io(h, &flags);
-       spin_unlock_irqrestore(&h->lock, flags);
-}
-
 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
 {
        return h->access.command_completed(h, q);
@@ -5418,53 +5395,12 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
 
 static inline void finish_cmd(struct CommandList *c)
 {
-       unsigned long flags;
-       int io_may_be_stalled = 0;
-       struct ctlr_info *h = c->h;
-       int count;
-
-       spin_lock_irqsave(&h->lock, flags);
-       removeQ(c);
-
-       /*
-        * Check for possibly stalled i/o.
-        *
-        * If a fifo_full condition is encountered, requests will back up
-        * in h->reqQ.  This queue is only emptied out by start_io which is
-        * only called when a new i/o request comes in.  If no i/o's are
-        * forthcoming, the i/o's in h->reqQ can get stuck.  So we call
-        * start_io from here if we detect such a danger.
-        *
-        * Normally, we shouldn't hit this case, but pounding on the
-        * CCISS_PASSTHRU ioctl can provoke it.  Only call start_io if
-        * commands_outstanding is low.  We want to avoid calling
-        * start_io from in here as much as possible, and esp. don't
-        * want to get in a cycle where we call start_io every time
-        * through here.
-        */
-       count = atomic_read(&h->commands_outstanding);
-       spin_unlock_irqrestore(&h->lock, flags);
-       if (unlikely(h->fifo_recently_full) && count < 5)
-               io_may_be_stalled = 1;
-
        dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
        if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
                        || c->cmd_type == CMD_IOACCEL2))
                complete_scsi_command(c);
        else if (c->cmd_type == CMD_IOCTL_PEND)
                complete(c->waiting);
-       if (unlikely(io_may_be_stalled))
-               lock_and_start_io(h);
-}
-
-static inline u32 hpsa_tag_contains_index(u32 tag)
-{
-       return tag & DIRECT_LOOKUP_BIT;
-}
-
-static inline u32 hpsa_tag_to_index(u32 tag)
-{
-       return tag >> DIRECT_LOOKUP_SHIFT;
 }
 
 
@@ -5484,34 +5420,13 @@ static inline void process_indexed_cmd(struct ctlr_info *h,
        u32 tag_index;
        struct CommandList *c;
 
-       tag_index = hpsa_tag_to_index(raw_tag);
+       tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
        if (!bad_tag(h, tag_index, raw_tag)) {
                c = h->cmd_pool + tag_index;
                finish_cmd(c);
        }
 }
 
-/* process completion of a non-indexed command */
-static inline void process_nonindexed_cmd(struct ctlr_info *h,
-       u32 raw_tag)
-{
-       u32 tag;
-       struct CommandList *c = NULL;
-       unsigned long flags;
-
-       tag = hpsa_tag_discard_error_bits(h, raw_tag);
-       spin_lock_irqsave(&h->lock, flags);
-       list_for_each_entry(c, &h->cmpQ, list) {
-               if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
-                       spin_unlock_irqrestore(&h->lock, flags);
-                       finish_cmd(c);
-                       return;
-               }
-       }
-       spin_unlock_irqrestore(&h->lock, flags);
-       bad_tag(h, h->nr_cmds + 1, raw_tag);
-}
-
 /* Some controllers, like p400, will give us one interrupt
  * after a soft reset, even if we turned interrupts off.
  * Only need to check for this in the hpsa_xxx_discard_completions
@@ -5589,10 +5504,7 @@ static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
        while (interrupt_pending(h)) {
                raw_tag = get_next_completion(h, q);
                while (raw_tag != FIFO_EMPTY) {
-                       if (likely(hpsa_tag_contains_index(raw_tag)))
-                               process_indexed_cmd(h, raw_tag);
-                       else
-                               process_nonindexed_cmd(h, raw_tag);
+                       process_indexed_cmd(h, raw_tag);
                        raw_tag = next_command(h, q);
                }
        }
@@ -5608,10 +5520,7 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
        h->last_intr_timestamp = get_jiffies_64();
        raw_tag = get_next_completion(h, q);
        while (raw_tag != FIFO_EMPTY) {
-               if (likely(hpsa_tag_contains_index(raw_tag)))
-                       process_indexed_cmd(h, raw_tag);
-               else
-                       process_nonindexed_cmd(h, raw_tag);
+               process_indexed_cmd(h, raw_tag);
                raw_tag = next_command(h, q);
        }
        return IRQ_HANDLED;
@@ -5633,7 +5542,8 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
        static const size_t cmd_sz = sizeof(*cmd) +
                                        sizeof(cmd->ErrorDescriptor);
        dma_addr_t paddr64;
-       uint32_t paddr32, tag;
+       __le32 paddr32;
+       u32 tag;
        void __iomem *vaddr;
        int i, err;
 
@@ -5648,7 +5558,7 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
        if (err) {
                iounmap(vaddr);
-               return -ENOMEM;
+               return err;
        }
 
        cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
@@ -5661,12 +5571,12 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
         * although there's no guarantee, we assume that the address is at
         * least 4-byte aligned (most likely, it's page-aligned).
         */
-       paddr32 = paddr64;
+       paddr32 = cpu_to_le32(paddr64);
 
        cmd->CommandHeader.ReplyQueue = 0;
        cmd->CommandHeader.SGList = 0;
        cmd->CommandHeader.SGTotal = cpu_to_le16(0);
-       cmd->CommandHeader.tag = paddr32;
+       cmd->CommandHeader.tag = cpu_to_le64(paddr64);
        memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
 
        cmd->Request.CDBLen = 16;
@@ -5677,14 +5587,14 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
        cmd->Request.CDB[1] = type;
        memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
        cmd->ErrorDescriptor.Addr =
-                       cpu_to_le64((paddr32 + sizeof(*cmd)));
+                       cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
        cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
 
-       writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
+       writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
 
        for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
                tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
-               if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
+               if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
                        break;
                msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
        }
@@ -5718,8 +5628,6 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
        void __iomem *vaddr, u32 use_doorbell)
 {
-       u16 pmcsr;
-       int pos;
 
        if (use_doorbell) {
                /* For everything after the P600, the PCI power state method
@@ -5745,26 +5653,21 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
                 * this causes a secondary PCI reset which will reset the
                 * controller." */
 
-               pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
-               if (pos == 0) {
-                       dev_err(&pdev->dev,
-                               "hpsa_reset_controller: "
-                               "PCI PM not supported\n");
-                       return -ENODEV;
-               }
+               int rc = 0;
+
                dev_info(&pdev->dev, "using PCI PM to reset controller\n");
+
                /* enter the D3hot power management state */
-               pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
-               pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
-               pmcsr |= PCI_D3hot;
-               pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+               rc = pci_set_power_state(pdev, PCI_D3hot);
+               if (rc)
+                       return rc;
 
                msleep(500);
 
                /* enter the D0 power management state */
-               pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
-               pmcsr |= PCI_D0;
-               pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+               rc = pci_set_power_state(pdev, PCI_D0);
+               if (rc)
+                       return rc;
 
                /*
                 * The P600 requires a small delay when changing states.
@@ -5858,8 +5761,12 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
         */
 
        rc = hpsa_lookup_board_id(pdev, &board_id);
-       if (rc < 0 || !ctlr_is_resettable(board_id)) {
-               dev_warn(&pdev->dev, "Not resetting device.\n");
+       if (rc < 0) {
+               dev_warn(&pdev->dev, "Board ID not found\n");
+               return rc;
+       }
+       if (!ctlr_is_resettable(board_id)) {
+               dev_warn(&pdev->dev, "Controller not resettable\n");
                return -ENODEV;
        }
 
@@ -5892,7 +5799,7 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
        }
        rc = write_driver_ver_to_cfgtable(cfgtable);
        if (rc)
-               goto unmap_vaddr;
+               goto unmap_cfgtable;
 
        /* If reset via doorbell register is supported, use that.
         * There are two such methods.  Favor the newest method.
@@ -5904,8 +5811,8 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
        } else {
                use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
                if (use_doorbell) {
-                       dev_warn(&pdev->dev, "Soft reset not supported. "
-                               "Firmware update is required.\n");
+                       dev_warn(&pdev->dev,
+                               "Soft reset not supported. Firmware update is required.\n");
                        rc = -ENOTSUPP; /* try soft reset */
                        goto unmap_cfgtable;
                }
@@ -5925,8 +5832,7 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
        rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
        if (rc) {
                dev_warn(&pdev->dev,
-                       "failed waiting for board to become ready "
-                       "after hard reset\n");
+                       "Failed waiting for board to become ready after hard reset\n");
                goto unmap_cfgtable;
        }
 
@@ -5977,7 +5883,7 @@ static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
               readl(&(tb->HostWrite.CoalIntDelay)));
        dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
               readl(&(tb->HostWrite.CoalIntCount)));
-       dev_info(dev, "   Max outstanding commands = 0x%d\n",
+       dev_info(dev, "   Max outstanding commands = %d\n",
               readl(&(tb->CmdsOutMax)));
        dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
        for (i = 0; i < 16; i++)
@@ -6025,7 +5931,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
 }
 
 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
- * controllers that are capable. If not, we use IO-APIC mode.
+ * controllers that are capable. If not, we use legacy INTx mode.
  */
 
 static void hpsa_interrupt_mode(struct ctlr_info *h)
@@ -6044,7 +5950,7 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)
            (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
                goto default_int_mode;
        if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
-               dev_info(&h->pdev->dev, "MSIX\n");
+               dev_info(&h->pdev->dev, "MSI-X capable controller\n");
                h->msix_vector = MAX_REPLY_QUEUES;
                if (h->msix_vector > num_online_cpus())
                        h->msix_vector = num_online_cpus();
@@ -6065,7 +5971,7 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)
        }
 single_msi_mode:
        if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
-               dev_info(&h->pdev->dev, "MSI\n");
+               dev_info(&h->pdev->dev, "MSI capable controller\n");
                if (!pci_enable_msi(h->pdev))
                        h->msi_vector = 1;
                else
@@ -6172,8 +6078,10 @@ static int hpsa_find_cfgtables(struct ctlr_info *h)
                return rc;
        h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
                       cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
-       if (!h->cfgtable)
+       if (!h->cfgtable) {
+               dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
                return -ENOMEM;
+       }
        rc = write_driver_ver_to_cfgtable(h->cfgtable);
        if (rc)
                return rc;
@@ -6204,6 +6112,15 @@ static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
        }
 }
 
+/* If the controller reports that the total max sg entries is greater than 512,
+ * then we know that chained SG blocks work.  (Original smart arrays did not
+ * support chained SG blocks and would return zero for max sg entries.)
+ */
+static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
+{
+       return h->maxsgentries > 512;
+}
+
 /* Interrogate the hardware for some limits:
  * max commands, max SG elements without chaining, and with chaining,
  * SG chain block size, etc.
@@ -6211,21 +6128,23 @@ static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
 static void hpsa_find_board_params(struct ctlr_info *h)
 {
        hpsa_get_max_perf_mode_cmds(h);
-       h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
+       h->nr_cmds = h->max_commands;
        h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
        h->fw_support = readl(&(h->cfgtable->misc_fw_support));
-       /*
-        * Limit in-command s/g elements to 32 save dma'able memory.
-        * Howvever spec says if 0, use 31
-        */
-       h->max_cmd_sg_entries = 31;
-       if (h->maxsgentries > 512) {
+       if (hpsa_supports_chained_sg_blocks(h)) {
+               /* Limit in-command s/g elements to 32 save dma'able memory. */
                h->max_cmd_sg_entries = 32;
                h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
                h->maxsgentries--; /* save one for chain pointer */
        } else {
-               h->chainsize = 0;
+               /*
+                * Original smart arrays supported at most 31 s/g entries
+                * embedded inline in the command (trying to use more
+                * would lock up the controller)
+                */
+               h->max_cmd_sg_entries = 31;
                h->maxsgentries = 31; /* default to traditional values */
+               h->chainsize = 0;
        }
 
        /* Find out what task management functions are supported and cache */
@@ -6239,7 +6158,7 @@ static void hpsa_find_board_params(struct ctlr_info *h)
 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
 {
        if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
-               dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
+               dev_err(&h->pdev->dev, "not a valid CISS config table\n");
                return false;
        }
        return true;
@@ -6272,24 +6191,27 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
        writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
 }
 
-static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
+static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
 {
        int i;
        u32 doorbell_value;
        unsigned long flags;
        /* wait until the clear_event_notify bit 6 is cleared by controller. */
-       for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+       for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
                spin_lock_irqsave(&h->lock, flags);
                doorbell_value = readl(h->vaddr + SA5_DOORBELL);
                spin_unlock_irqrestore(&h->lock, flags);
                if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
-                       break;
+                       goto done;
                /* delay and try again */
-               msleep(20);
+               msleep(CLEAR_EVENT_WAIT_INTERVAL);
        }
+       return -ENODEV;
+done:
+       return 0;
 }
 
-static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
+static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
 {
        int i;
        u32 doorbell_value;
@@ -6299,17 +6221,21 @@ static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
         * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
         * as we enter this code.)
         */
-       for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+       for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
                spin_lock_irqsave(&h->lock, flags);
                doorbell_value = readl(h->vaddr + SA5_DOORBELL);
                spin_unlock_irqrestore(&h->lock, flags);
                if (!(doorbell_value & CFGTBL_ChangeReq))
-                       break;
+                       goto done;
                /* delay and try again */
-               usleep_range(10000, 20000);
+               msleep(MODE_CHANGE_WAIT_INTERVAL);
        }
+       return -ENODEV;
+done:
+       return 0;
 }
 
+/* return -ENODEV or other reason on error, 0 on success */
 static int hpsa_enter_simple_mode(struct ctlr_info *h)
 {
        u32 trans_support;
@@ -6324,14 +6250,15 @@ static int hpsa_enter_simple_mode(struct ctlr_info *h)
        writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
        writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
        writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
-       hpsa_wait_for_mode_change_ack(h);
+       if (hpsa_wait_for_mode_change_ack(h))
+               goto error;
        print_cfg_table(&h->pdev->dev, h->cfgtable);
        if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
                goto error;
        h->transMethod = CFGTBL_Trans_Simple;
        return 0;
 error:
-       dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
+       dev_err(&h->pdev->dev, "failed to enter simple mode\n");
        return -ENODEV;
 }
 
@@ -6341,7 +6268,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
 
        prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
        if (prod_index < 0)
-               return -ENODEV;
+               return prod_index;
        h->product_name = products[prod_index].product_name;
        h->access = *(products[prod_index].access);
 
@@ -6422,6 +6349,7 @@ static void hpsa_hba_inquiry(struct ctlr_info *h)
 static int hpsa_init_reset_devices(struct pci_dev *pdev)
 {
        int rc, i;
+       void __iomem *vaddr;
 
        if (!reset_devices)
                return 0;
@@ -6445,6 +6373,14 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
 
        pci_set_master(pdev);
 
+       vaddr = pci_ioremap_bar(pdev, 0);
+       if (vaddr == NULL) {
+               rc = -ENOMEM;
+               goto out_disable;
+       }
+       writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+       iounmap(vaddr);
+
        /* Reset the controller with a PCI power-cycle or via doorbell */
        rc = hpsa_kdump_hard_reset_controller(pdev);
 
@@ -6453,14 +6389,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
         * "performant mode".  Or, it might be 640x, which can't reset
         * due to concerns about shared bbwc between 6402/6404 pair.
         */
-       if (rc) {
-               if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */
-                       rc = -ENODEV;
+       if (rc)
                goto out_disable;
-       }
 
        /* Now try to get the controller to respond to a no-op */
-       dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
+       dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
        for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
                if (hpsa_noop(pdev) == 0)
                        break;
@@ -6490,9 +6423,12 @@ static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
            || (h->cmd_pool == NULL)
            || (h->errinfo_pool == NULL)) {
                dev_err(&h->pdev->dev, "out of memory in %s", __func__);
-               return -ENOMEM;
+               goto clean_up;
        }
        return 0;
+clean_up:
+       hpsa_free_cmd_pool(h);
+       return -ENOMEM;
 }
 
 static void hpsa_free_cmd_pool(struct ctlr_info *h)
@@ -6519,16 +6455,38 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
 
 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
 {
-       int i, cpu, rc;
+       int i, cpu;
 
        cpu = cpumask_first(cpu_online_mask);
        for (i = 0; i < h->msix_vector; i++) {
-               rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
+               irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
                cpu = cpumask_next(cpu, cpu_online_mask);
        }
 }
 
-static int hpsa_request_irq(struct ctlr_info *h,
+/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
+static void hpsa_free_irqs(struct ctlr_info *h)
+{
+       int i;
+
+       if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
+               /* Single reply queue, only one irq to free */
+               i = h->intr_mode;
+               irq_set_affinity_hint(h->intr[i], NULL);
+               free_irq(h->intr[i], &h->q[i]);
+               return;
+       }
+
+       for (i = 0; i < h->msix_vector; i++) {
+               irq_set_affinity_hint(h->intr[i], NULL);
+               free_irq(h->intr[i], &h->q[i]);
+       }
+       for (; i < MAX_REPLY_QUEUES; i++)
+               h->q[i] = 0;
+}
+
+/* returns 0 on success; cleans up and returns -Enn on error */
+static int hpsa_request_irqs(struct ctlr_info *h,
        irqreturn_t (*msixhandler)(int, void *),
        irqreturn_t (*intxhandler)(int, void *))
 {
@@ -6543,10 +6501,25 @@ static int hpsa_request_irq(struct ctlr_info *h,
 
        if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
                /* If performant mode and MSI-X, use multiple reply queues */
-               for (i = 0; i < h->msix_vector; i++)
+               for (i = 0; i < h->msix_vector; i++) {
                        rc = request_irq(h->intr[i], msixhandler,
                                        0, h->devname,
                                        &h->q[i]);
+                       if (rc) {
+                               int j;
+
+                               dev_err(&h->pdev->dev,
+                                       "failed to get irq %d for %s\n",
+                                      h->intr[i], h->devname);
+                               for (j = 0; j < i; j++) {
+                                       free_irq(h->intr[j], &h->q[j]);
+                                       h->q[j] = 0;
+                               }
+                               for (; j < MAX_REPLY_QUEUES; j++)
+                                       h->q[j] = 0;
+                               return rc;
+                       }
+               }
                hpsa_irq_affinity_hints(h);
        } else {
                /* Use single reply pool */
@@ -6592,27 +6565,9 @@ static int hpsa_kdump_soft_reset(struct ctlr_info *h)
        return 0;
 }
 
-static void free_irqs(struct ctlr_info *h)
-{
-       int i;
-
-       if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
-               /* Single reply queue, only one irq to free */
-               i = h->intr_mode;
-               irq_set_affinity_hint(h->intr[i], NULL);
-               free_irq(h->intr[i], &h->q[i]);
-               return;
-       }
-
-       for (i = 0; i < h->msix_vector; i++) {
-               irq_set_affinity_hint(h->intr[i], NULL);
-               free_irq(h->intr[i], &h->q[i]);
-       }
-}
-
 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
 {
-       free_irqs(h);
+       hpsa_free_irqs(h);
 #ifdef CONFIG_PCI_MSI
        if (h->msix_vector) {
                if (h->pdev->msix_enabled)
@@ -6658,16 +6613,20 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
 }
 
 /* Called when controller lockup detected. */
-static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
+static void fail_all_outstanding_cmds(struct ctlr_info *h)
 {
-       struct CommandList *c = NULL;
+       int i, refcount;
+       struct CommandList *c;
 
-       assert_spin_locked(&h->lock);
-       /* Mark all outstanding commands as failed and complete them. */
-       while (!list_empty(list)) {
-               c = list_entry(list->next, struct CommandList, list);
-               c->err_info->CommandStatus = CMD_HARDWARE_ERR;
-               finish_cmd(c);
+       flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
+       for (i = 0; i < h->nr_cmds; i++) {
+               c = h->cmd_pool + i;
+               refcount = atomic_inc_return(&c->refcount);
+               if (refcount > 1) {
+                       c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+                       finish_cmd(c);
+               }
+               cmd_free(h, c);
        }
 }
 
@@ -6704,10 +6663,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
        dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
                        lockup_detected);
        pci_disable_device(h->pdev);
-       spin_lock_irqsave(&h->lock, flags);
-       fail_all_cmds_on_list(h, &h->cmpQ);
-       fail_all_cmds_on_list(h, &h->reqQ);
-       spin_unlock_irqrestore(&h->lock, flags);
+       fail_all_outstanding_cmds(h);
 }
 
 static void detect_controller_lockup(struct ctlr_info *h)
@@ -6750,8 +6706,8 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h)
        int i;
        char *event_type;
 
-       /* Clear the driver-requested rescan flag */
-       h->drv_req_rescan = 0;
+       if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
+               return;
 
        /* Ask the controller to clear the events we're handling. */
        if ((h->transMethod & (CFGTBL_Trans_io_accel1
@@ -6798,9 +6754,6 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h)
  */
 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
 {
-       if (h->drv_req_rescan)
-               return 1;
-
        if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
                return 0;
 
@@ -6834,34 +6787,60 @@ static int hpsa_offline_devices_ready(struct ctlr_info *h)
        return 0;
 }
 
-
-static void hpsa_monitor_ctlr_worker(struct work_struct *work)
+static void hpsa_rescan_ctlr_worker(struct work_struct *work)
 {
        unsigned long flags;
        struct ctlr_info *h = container_of(to_delayed_work(work),
-                                       struct ctlr_info, monitor_ctlr_work);
-       detect_controller_lockup(h);
-       if (lockup_detected(h))
+                                       struct ctlr_info, rescan_ctlr_work);
+
+
+       if (h->remove_in_progress)
                return;
 
        if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
                scsi_host_get(h->scsi_host);
-               h->drv_req_rescan = 0;
                hpsa_ack_ctlr_events(h);
                hpsa_scan_start(h->scsi_host);
                scsi_host_put(h->scsi_host);
        }
-
        spin_lock_irqsave(&h->lock, flags);
-       if (h->remove_in_progress) {
-               spin_unlock_irqrestore(&h->lock, flags);
+       if (!h->remove_in_progress)
+               queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
+                               h->heartbeat_sample_interval);
+       spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static void hpsa_monitor_ctlr_worker(struct work_struct *work)
+{
+       unsigned long flags;
+       struct ctlr_info *h = container_of(to_delayed_work(work),
+                                       struct ctlr_info, monitor_ctlr_work);
+
+       detect_controller_lockup(h);
+       if (lockup_detected(h))
                return;
-       }
-       schedule_delayed_work(&h->monitor_ctlr_work,
+
+       spin_lock_irqsave(&h->lock, flags);
+       if (!h->remove_in_progress)
+               schedule_delayed_work(&h->monitor_ctlr_work,
                                h->heartbeat_sample_interval);
        spin_unlock_irqrestore(&h->lock, flags);
 }
 
+static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
+                                               char *name)
+{
+       struct workqueue_struct *wq = NULL;
+       char wq_name[20];
+
+       snprintf(wq_name, sizeof(wq_name), "%s_%d_hpsa", name, h->ctlr);
+       wq = alloc_ordered_workqueue(wq_name, 0);
+       if (!wq)
+               dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
+
+       return wq;
+}
+
 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        int dac, rc;
@@ -6898,13 +6877,23 @@ reinit_after_soft_reset:
 
        h->pdev = pdev;
        h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
-       INIT_LIST_HEAD(&h->cmpQ);
-       INIT_LIST_HEAD(&h->reqQ);
        INIT_LIST_HEAD(&h->offline_device_list);
        spin_lock_init(&h->lock);
        spin_lock_init(&h->offline_device_lock);
        spin_lock_init(&h->scan_lock);
-       spin_lock_init(&h->passthru_count_lock);
+       atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
+
+       h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
+       if (!h->rescan_ctlr_wq) {
+               rc = -ENOMEM;
+               goto clean1;
+       }
+
+       h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
+       if (!h->resubmit_wq) {
+               rc = -ENOMEM;
+               goto clean1;
+       }
 
        /* Allocate and clear per-cpu variable lockup_detected */
        h->lockup_detected = alloc_percpu(u32);
@@ -6939,13 +6928,14 @@ reinit_after_soft_reset:
        /* make sure the board interrupts are off */
        h->access.set_intr_mask(h, HPSA_INTR_OFF);
 
-       if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
+       if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
                goto clean2;
        dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
               h->devname, pdev->device,
               h->intr[h->intr_mode], dac ? "" : " not");
-       if (hpsa_allocate_cmd_pool(h))
-               goto clean4;
+       rc = hpsa_allocate_cmd_pool(h);
+       if (rc)
+               goto clean2_and_free_irqs;
        if (hpsa_allocate_sg_chain_blocks(h))
                goto clean4;
        init_waitqueue_head(&h->scan_wait_queue);
@@ -6974,12 +6964,12 @@ reinit_after_soft_reset:
                spin_lock_irqsave(&h->lock, flags);
                h->access.set_intr_mask(h, HPSA_INTR_OFF);
                spin_unlock_irqrestore(&h->lock, flags);
-               free_irqs(h);
-               rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
+               hpsa_free_irqs(h);
+               rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
                                        hpsa_intx_discard_completions);
                if (rc) {
-                       dev_warn(&h->pdev->dev, "Failed to request_irq after "
-                               "soft reset.\n");
+                       dev_warn(&h->pdev->dev,
+                               "Failed to request_irq after soft reset.\n");
                        goto clean4;
                }
 
@@ -7016,7 +7006,6 @@ reinit_after_soft_reset:
                /* Enable Accelerated IO path at driver layer */
                h->acciopath_status = 1;
 
-       h->drv_req_rescan = 0;
 
        /* Turn the interrupts on so we can service requests */
        h->access.set_intr_mask(h, HPSA_INTR_ON);
@@ -7029,14 +7018,22 @@ reinit_after_soft_reset:
        INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
        schedule_delayed_work(&h->monitor_ctlr_work,
                                h->heartbeat_sample_interval);
+       INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
+       queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
+                               h->heartbeat_sample_interval);
        return 0;
 
 clean4:
        hpsa_free_sg_chain_blocks(h);
        hpsa_free_cmd_pool(h);
-       free_irqs(h);
+clean2_and_free_irqs:
+       hpsa_free_irqs(h);
 clean2:
 clean1:
+       if (h->resubmit_wq)
+               destroy_workqueue(h->resubmit_wq);
+       if (h->rescan_ctlr_wq)
+               destroy_workqueue(h->rescan_ctlr_wq);
        if (h->lockup_detected)
                free_percpu(h->lockup_detected);
        kfree(h);
@@ -7055,9 +7052,9 @@ static void hpsa_flush_cache(struct ctlr_info *h)
        if (!flush_buf)
                return;
 
-       c = cmd_special_alloc(h);
+       c = cmd_alloc(h);
        if (!c) {
-               dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+               dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
                goto out_of_memory;
        }
        if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
@@ -7069,7 +7066,7 @@ static void hpsa_flush_cache(struct ctlr_info *h)
 out:
                dev_warn(&h->pdev->dev,
                        "error flushing cache on controller\n");
-       cmd_special_free(h, c);
+       cmd_free(h, c);
 out_of_memory:
        kfree(flush_buf);
 }
@@ -7110,9 +7107,11 @@ static void hpsa_remove_one(struct pci_dev *pdev)
        /* Get rid of any controller monitoring work items */
        spin_lock_irqsave(&h->lock, flags);
        h->remove_in_progress = 1;
-       cancel_delayed_work(&h->monitor_ctlr_work);
        spin_unlock_irqrestore(&h->lock, flags);
-
+       cancel_delayed_work_sync(&h->monitor_ctlr_work);
+       cancel_delayed_work_sync(&h->rescan_ctlr_work);
+       destroy_workqueue(h->rescan_ctlr_wq);
+       destroy_workqueue(h->resubmit_wq);
        hpsa_unregister_scsi(h);        /* unhook from SCSI subsystem */
        hpsa_shutdown(pdev);
        iounmap(h->vaddr);
@@ -7172,7 +7171,7 @@ static struct pci_driver hpsa_pci_driver = {
  * bits of the command address.
  */
 static void  calc_bucket_map(int bucket[], int num_buckets,
-       int nsgs, int min_blocks, int *bucket_map)
+       int nsgs, int min_blocks, u32 *bucket_map)
 {
        int i, j, b, size;
 
@@ -7193,7 +7192,8 @@ static void  calc_bucket_map(int bucket[], int num_buckets,
        }
 }
 
-static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
+/* return -ENODEV or other reason on error, 0 on success */
+static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
 {
        int i;
        unsigned long register_value;
@@ -7285,12 +7285,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
                }
        }
        writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
-       hpsa_wait_for_mode_change_ack(h);
+       if (hpsa_wait_for_mode_change_ack(h)) {
+               dev_err(&h->pdev->dev,
+                       "performant mode problem - doorbell timeout\n");
+               return -ENODEV;
+       }
        register_value = readl(&(h->cfgtable->TransportActive));
        if (!(register_value & CFGTBL_Trans_Performant)) {
-               dev_warn(&h->pdev->dev, "unable to get board into"
-                                       " performant mode\n");
-               return;
+               dev_err(&h->pdev->dev,
+                       "performant mode problem - transport not active\n");
+               return -ENODEV;
        }
        /* Change the access methods to the performant access methods */
        h->access = access;
@@ -7298,7 +7302,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
 
        if (!((trans_support & CFGTBL_Trans_io_accel1) ||
                (trans_support & CFGTBL_Trans_io_accel2)))
-               return;
+               return 0;
 
        if (trans_support & CFGTBL_Trans_io_accel1) {
                /* Set up I/O accelerator mode */
@@ -7328,12 +7332,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
                                        (i * sizeof(struct ErrorInfo)));
                        cp->err_info_len = sizeof(struct ErrorInfo);
                        cp->sgl_offset = IOACCEL1_SGLOFFSET;
-                       cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
+                       cp->host_context_flags =
+                               cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
                        cp->timeout_sec = 0;
                        cp->ReplyQueue = 0;
                        cp->tag =
-                               cpu_to_le64((i << DIRECT_LOOKUP_SHIFT) |
-                                               DIRECT_LOOKUP_BIT);
+                               cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
                        cp->host_addr =
                                cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
                                        (i * sizeof(struct io_accel1_cmd)));
@@ -7362,7 +7366,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
                        writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
        }
        writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
-       hpsa_wait_for_mode_change_ack(h);
+       if (hpsa_wait_for_mode_change_ack(h)) {
+               dev_err(&h->pdev->dev,
+                       "performant mode problem - enabling ioaccel mode\n");
+               return -ENODEV;
+       }
+       return 0;
 }
 
 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
@@ -7508,17 +7517,18 @@ static int is_accelerated_cmd(struct CommandList *c)
 static void hpsa_drain_accel_commands(struct ctlr_info *h)
 {
        struct CommandList *c = NULL;
-       unsigned long flags;
-       int accel_cmds_out;
+       int i, accel_cmds_out;
+       int refcount;
 
-       do { /* wait for all outstanding commands to drain out */
+       do { /* wait for all outstanding ioaccel commands to drain out */
                accel_cmds_out = 0;
-               spin_lock_irqsave(&h->lock, flags);
-               list_for_each_entry(c, &h->cmpQ, list)
-                       accel_cmds_out += is_accelerated_cmd(c);
-               list_for_each_entry(c, &h->reqQ, list)
-                       accel_cmds_out += is_accelerated_cmd(c);
-               spin_unlock_irqrestore(&h->lock, flags);
+               for (i = 0; i < h->nr_cmds; i++) {
+                       c = h->cmd_pool + i;
+                       refcount = atomic_inc_return(&c->refcount);
+                       if (refcount > 1) /* Command is allocated */
+                               accel_cmds_out += is_accelerated_cmd(c);
+                       cmd_free(h, c);
+               }
                if (accel_cmds_out <= 0)
                        break;
                msleep(100);
index 8e06d9e..6577130 100644 (file)
@@ -32,7 +32,6 @@ struct access_method {
        void (*submit_command)(struct ctlr_info *h,
                struct CommandList *c);
        void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
-       unsigned long (*fifo_full)(struct ctlr_info *h);
        bool (*intr_pending)(struct ctlr_info *h);
        unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
 };
@@ -47,6 +46,11 @@ struct hpsa_scsi_dev_t {
        unsigned char model[16];        /* bytes 16-31 of inquiry data */
        unsigned char raid_level;       /* from inquiry page 0xC1 */
        unsigned char volume_offline;   /* discovered via TUR or VPD */
+       u16 queue_depth;                /* max queue_depth for this device */
+       atomic_t ioaccel_cmds_out;      /* Only used for physical devices
+                                        * counts commands sent to physical
+                                        * device via "ioaccel" path.
+                                        */
        u32 ioaccel_handle;
        int offload_config;             /* I/O accel RAID offload configured */
        int offload_enabled;            /* I/O accel RAID offload enabled */
@@ -55,6 +59,15 @@ struct hpsa_scsi_dev_t {
                                         */
        struct raid_map_data raid_map;  /* I/O accelerator RAID map */
 
+       /*
+        * Pointers from logical drive map indices to the phys drives that
+        * make those logical drives.  Note, multiple logical drives may
+        * share physical drives.  You can have for instance 5 physical
+        * drives with 3 logical drives each using those same 5 physical
+        * disks. We need these pointers for counting i/o's out to physical
+        * devices in order to honor physical device queue depth limits.
+        */
+       struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
 };
 
 struct reply_queue_buffer {
@@ -115,9 +128,12 @@ struct ctlr_info {
        void __iomem *vaddr;
        unsigned long paddr;
        int     nr_cmds; /* Number of commands allowed on this controller */
+#define HPSA_CMDS_RESERVED_FOR_ABORTS 2
+#define HPSA_CMDS_RESERVED_FOR_DRIVER 1
        struct CfgTable __iomem *cfgtable;
        int     interrupts_enabled;
        int     max_commands;
+       int last_allocation;
        atomic_t commands_outstanding;
 #      define PERF_MODE_INT    0
 #      define DOORBELL_INT     1
@@ -131,8 +147,6 @@ struct ctlr_info {
        char hba_mode_enabled;
 
        /* queue and queue Info */
-       struct list_head reqQ;
-       struct list_head cmpQ;
        unsigned int Qdepth;
        unsigned int maxSG;
        spinlock_t lock;
@@ -168,9 +182,8 @@ struct ctlr_info {
        unsigned long transMethod;
 
        /* cap concurrent passthrus at some reasonable maximum */
-#define HPSA_MAX_CONCURRENT_PASSTHRUS (20)
-       spinlock_t passthru_count_lock; /* protects passthru_count */
-       int passthru_count;
+#define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
+       atomic_t passthru_cmds_avail;
 
        /*
         * Performant mode completion buffers
@@ -194,8 +207,8 @@ struct ctlr_info {
        atomic_t firmware_flash_in_progress;
        u32 __percpu *lockup_detected;
        struct delayed_work monitor_ctlr_work;
+       struct delayed_work rescan_ctlr_work;
        int remove_in_progress;
-       u32 fifo_recently_full;
        /* Address of h->q[x] is passed to intr handler to know which queue */
        u8 q[MAX_REPLY_QUEUES];
        u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
@@ -237,8 +250,9 @@ struct ctlr_info {
        spinlock_t offline_device_lock;
        struct list_head offline_device_list;
        int     acciopath_status;
-       int     drv_req_rescan; /* flag for driver to request rescan event */
        int     raid_offload_debug;
+       struct workqueue_struct *resubmit_wq;
+       struct workqueue_struct *rescan_ctlr_wq;
 };
 
 struct offline_device_entry {
@@ -297,6 +311,8 @@ struct offline_device_entry {
  */
 #define SA5_DOORBELL   0x20
 #define SA5_REQUEST_PORT_OFFSET        0x40
+#define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
+#define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
 #define SA5_REPLY_INTR_MASK_OFFSET     0x34
 #define SA5_REPLY_PORT_OFFSET          0x44
 #define SA5_INTR_STATUS                0x30
@@ -353,10 +369,7 @@ static void SA5_submit_command_no_read(struct ctlr_info *h,
 static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
        struct CommandList *c)
 {
-       if (c->cmd_type == CMD_IOACCEL2)
-               writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
-       else
-               writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+       writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
 }
 
 /*
@@ -398,19 +411,19 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
        unsigned long register_value = FIFO_EMPTY;
 
        /* msi auto clears the interrupt pending bit. */
-       if (!(h->msi_vector || h->msix_vector)) {
+       if (unlikely(!(h->msi_vector || h->msix_vector))) {
                /* flush the controller write of the reply queue by reading
                 * outbound doorbell status register.
                 */
-               register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+               (void) readl(h->vaddr + SA5_OUTDB_STATUS);
                writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
                /* Do a read in order to flush the write to the controller
                 * (as per spec.)
                 */
-               register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+               (void) readl(h->vaddr + SA5_OUTDB_STATUS);
        }
 
-       if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
+       if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
                register_value = rq->head[rq->current_entry];
                rq->current_entry++;
                atomic_dec(&h->commands_outstanding);
@@ -425,14 +438,6 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
        return register_value;
 }
 
-/*
- *  Returns true if fifo is full.
- *
- */
-static unsigned long SA5_fifo_full(struct ctlr_info *h)
-{
-       return atomic_read(&h->commands_outstanding) >= h->max_commands;
-}
 /*
  *   returns value read from hardware.
  *     returns FIFO_EMPTY if there is nothing to read
@@ -473,9 +478,6 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h)
        if (!register_value)
                return false;
 
-       if (h->msi_vector || h->msix_vector)
-               return true;
-
        /* Read outbound doorbell to flush */
        register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
        return register_value & SA5_OUTDB_STATUS_PERF_BIT;
@@ -525,7 +527,6 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
 static struct access_method SA5_access = {
        SA5_submit_command,
        SA5_intr_mask,
-       SA5_fifo_full,
        SA5_intr_pending,
        SA5_completed,
 };
@@ -533,7 +534,6 @@ static struct access_method SA5_access = {
 static struct access_method SA5_ioaccel_mode1_access = {
        SA5_submit_command,
        SA5_performant_intr_mask,
-       SA5_fifo_full,
        SA5_ioaccel_mode1_intr_pending,
        SA5_ioaccel_mode1_completed,
 };
@@ -541,7 +541,6 @@ static struct access_method SA5_ioaccel_mode1_access = {
 static struct access_method SA5_ioaccel_mode2_access = {
        SA5_submit_command_ioaccel2,
        SA5_performant_intr_mask,
-       SA5_fifo_full,
        SA5_performant_intr_pending,
        SA5_performant_completed,
 };
@@ -549,7 +548,6 @@ static struct access_method SA5_ioaccel_mode2_access = {
 static struct access_method SA5_performant_access = {
        SA5_submit_command,
        SA5_performant_intr_mask,
-       SA5_fifo_full,
        SA5_performant_intr_pending,
        SA5_performant_completed,
 };
@@ -557,7 +555,6 @@ static struct access_method SA5_performant_access = {
 static struct access_method SA5_performant_access_no_read = {
        SA5_submit_command_no_read,
        SA5_performant_intr_mask,
-       SA5_fifo_full,
        SA5_performant_intr_pending,
        SA5_performant_completed,
 };
index cb988c4..3a621c7 100644 (file)
@@ -206,27 +206,27 @@ struct raid_map_disk_data {
 };
 
 struct raid_map_data {
-       u32   structure_size;           /* Size of entire structure in bytes */
-       u32   volume_blk_size;          /* bytes / block in the volume */
-       u64   volume_blk_cnt;           /* logical blocks on the volume */
+       __le32   structure_size;        /* Size of entire structure in bytes */
+       __le32   volume_blk_size;       /* bytes / block in the volume */
+       __le64   volume_blk_cnt;        /* logical blocks on the volume */
        u8    phys_blk_shift;           /* Shift factor to convert between
                                         * units of logical blocks and physical
                                         * disk blocks */
        u8    parity_rotation_shift;    /* Shift factor to convert between units
                                         * of logical stripes and physical
                                         * stripes */
-       u16   strip_size;               /* blocks used on each disk / stripe */
-       u64   disk_starting_blk;        /* First disk block used in volume */
-       u64   disk_blk_cnt;             /* disk blocks used by volume / disk */
-       u16   data_disks_per_row;       /* data disk entries / row in the map */
-       u16   metadata_disks_per_row;   /* mirror/parity disk entries / row
+       __le16   strip_size;            /* blocks used on each disk / stripe */
+       __le64   disk_starting_blk;     /* First disk block used in volume */
+       __le64   disk_blk_cnt;          /* disk blocks used by volume / disk */
+       __le16   data_disks_per_row;    /* data disk entries / row in the map */
+       __le16   metadata_disks_per_row;/* mirror/parity disk entries / row
                                         * in the map */
-       u16   row_cnt;                  /* rows in each layout map */
-       u16   layout_map_count;         /* layout maps (1 map per mirror/parity
+       __le16   row_cnt;               /* rows in each layout map */
+       __le16   layout_map_count;      /* layout maps (1 map per mirror/parity
                                         * group) */
-       u16   flags;                    /* Bit 0 set if encryption enabled */
+       __le16   flags;                 /* Bit 0 set if encryption enabled */
 #define RAID_MAP_FLAG_ENCRYPT_ON  0x01
-       u16   dekindex;                 /* Data encryption key index. */
+       __le16   dekindex;              /* Data encryption key index. */
        u8    reserved[16];
        struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
 };
@@ -240,6 +240,10 @@ struct ReportLUNdata {
 
 struct ext_report_lun_entry {
        u8 lunid[8];
+#define GET_BMIC_BUS(lunid) ((lunid)[7] & 0x3F)
+#define GET_BMIC_LEVEL_TWO_TARGET(lunid) ((lunid)[6])
+#define GET_BMIC_DRIVE_NUMBER(lunid) (((GET_BMIC_BUS((lunid)) - 1) << 8) + \
+                       GET_BMIC_LEVEL_TWO_TARGET((lunid)))
        u8 wwid[8];
        u8 device_type;
        u8 device_flags;
@@ -268,6 +272,7 @@ struct SenseSubsystem_info {
 #define HPSA_CACHE_FLUSH 0x01  /* C2 was already being used by HPSA */
 #define BMIC_FLASH_FIRMWARE 0xF7
 #define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
+#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
 
 /* Command List Structure */
 union SCSI3Addr {
@@ -313,8 +318,8 @@ union LUNAddr {
 struct CommandListHeader {
        u8              ReplyQueue;
        u8              SGList;
-       u16             SGTotal;
-       u64             tag;
+       __le16          SGTotal;
+       __le64          tag;
        union LUNAddr     LUN;
 };
 
@@ -338,14 +343,14 @@ struct RequestBlock {
 };
 
 struct ErrDescriptor {
-       u64 Addr;
-       u32  Len;
+       __le64 Addr;
+       __le32 Len;
 };
 
 struct SGDescriptor {
-       u64 Addr;
-       u32  Len;
-       u32  Ext;
+       __le64 Addr;
+       __le32 Len;
+       __le32 Ext;
 };
 
 union MoreErrInfo {
@@ -375,22 +380,19 @@ struct ErrorInfo {
 #define CMD_IOACCEL1   0x04
 #define CMD_IOACCEL2   0x05
 
-#define DIRECT_LOOKUP_SHIFT 5
-#define DIRECT_LOOKUP_BIT 0x10
+#define DIRECT_LOOKUP_SHIFT 4
 #define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
 
 #define HPSA_ERROR_BIT          0x02
 struct ctlr_info; /* defined in hpsa.h */
-/* The size of this structure needs to be divisible by 32
- * on all architectures because low 5 bits of the addresses
+/* The size of this structure needs to be divisible by 128
+ * on all architectures.  The low 4 bits of the addresses
  * are used as follows:
  *
  * bit 0: to device, used to indicate "performant mode" command
  *        from device, indidcates error status.
  * bit 1-3: to device, indicates block fetch table entry for
  *          reducing DMA in fetching commands from host memory.
- * bit 4: used to indicate whether tag is "direct lookup" (index),
- *        or a bus address.
  */
 
 #define COMMANDLIST_ALIGNMENT 128
@@ -405,9 +407,21 @@ struct CommandList {
        struct ctlr_info           *h;
        int                        cmd_type;
        long                       cmdindex;
-       struct list_head list;
        struct completion *waiting;
-       void   *scsi_cmd;
+       struct scsi_cmnd *scsi_cmd;
+       struct work_struct work;
+
+       /*
+        * For commands using either of the two "ioaccel" paths to
+        * bypass the RAID stack and go directly to the physical disk
+        * phys_disk is a pointer to the hpsa_scsi_dev_t to which the
+        * i/o is destined.  We need to store that here because the command
+        * may potentially encounter TASK SET FULL and need to be resubmitted
+        * For "normal" i/o's not using the "ioaccel" paths, phys_disk is
+        * not used.
+        */
+       struct hpsa_scsi_dev_t *phys_disk;
+       atomic_t refcount; /* Must be last to avoid memset in cmd_alloc */
 } __aligned(COMMANDLIST_ALIGNMENT);
 
 /* Max S/G elements in I/O accelerator command */
@@ -420,7 +434,7 @@ struct CommandList {
  */
 #define IOACCEL1_COMMANDLIST_ALIGNMENT 128
 struct io_accel1_cmd {
-       u16 dev_handle;                 /* 0x00 - 0x01 */
+       __le16 dev_handle;              /* 0x00 - 0x01 */
        u8  reserved1;                  /* 0x02 */
        u8  function;                   /* 0x03 */
        u8  reserved2[8];               /* 0x04 - 0x0B */
@@ -430,20 +444,20 @@ struct io_accel1_cmd {
        u8  reserved4;                  /* 0x13 */
        u8  sgl_offset;                 /* 0x14 */
        u8  reserved5[7];               /* 0x15 - 0x1B */
-       u32 transfer_len;               /* 0x1C - 0x1F */
+       __le32 transfer_len;            /* 0x1C - 0x1F */
        u8  reserved6[4];               /* 0x20 - 0x23 */
-       u16 io_flags;                   /* 0x24 - 0x25 */
+       __le16 io_flags;                /* 0x24 - 0x25 */
        u8  reserved7[14];              /* 0x26 - 0x33 */
        u8  LUN[8];                     /* 0x34 - 0x3B */
-       u32 control;                    /* 0x3C - 0x3F */
+       __le32 control;                 /* 0x3C - 0x3F */
        u8  CDB[16];                    /* 0x40 - 0x4F */
        u8  reserved8[16];              /* 0x50 - 0x5F */
-       u16 host_context_flags;         /* 0x60 - 0x61 */
-       u16 timeout_sec;                /* 0x62 - 0x63 */
+       __le16 host_context_flags;      /* 0x60 - 0x61 */
+       __le16 timeout_sec;             /* 0x62 - 0x63 */
        u8  ReplyQueue;                 /* 0x64 */
        u8  reserved9[3];               /* 0x65 - 0x67 */
-       u64 tag;                        /* 0x68 - 0x6F */
-       u64 host_addr;                  /* 0x70 - 0x77 */
+       __le64 tag;                     /* 0x68 - 0x6F */
+       __le64 host_addr;               /* 0x70 - 0x77 */
        u8  CISS_LUN[8];                /* 0x78 - 0x7F */
        struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
 } __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
@@ -470,8 +484,8 @@ struct io_accel1_cmd {
 #define IOACCEL1_BUSADDR_CMDTYPE        0x00000060
 
 struct ioaccel2_sg_element {
-       u64 address;
-       u32 length;
+       __le64 address;
+       __le32 length;
        u8 reserved[3];
        u8 chain_indicator;
 #define IOACCEL2_CHAIN 0x80
@@ -526,20 +540,20 @@ struct io_accel2_cmd {
                                             /*     0=off, 1=on */
        u8  reply_queue;                /* Reply Queue ID */
        u8  reserved1;                  /* Reserved */
-       u32 scsi_nexus;                 /* Device Handle */
-       u32 Tag;                        /* cciss tag, lower 4 bytes only */
-       u32 tweak_lower;                /* Encryption tweak, lower 4 bytes */
+       __le32 scsi_nexus;              /* Device Handle */
+       __le32 Tag;                     /* cciss tag, lower 4 bytes only */
+       __le32 tweak_lower;             /* Encryption tweak, lower 4 bytes */
        u8  cdb[16];                    /* SCSI Command Descriptor Block */
        u8  cciss_lun[8];               /* 8 byte SCSI address */
-       u32 data_len;                   /* Total bytes to transfer */
+       __le32 data_len;                /* Total bytes to transfer */
        u8  cmd_priority_task_attr;     /* priority and task attrs */
 #define IOACCEL2_PRIORITY_MASK 0x78
 #define IOACCEL2_ATTR_MASK 0x07
        u8  sg_count;                   /* Number of sg elements */
-       u16 dekindex;                   /* Data encryption key index */
-       u64 err_ptr;                    /* Error Pointer */
-       u32 err_len;                    /* Error Length*/
-       u32 tweak_upper;                /* Encryption tweak, upper 4 bytes */
+       __le16 dekindex;                /* Data encryption key index */
+       __le64 err_ptr;                 /* Error Pointer */
+       __le32 err_len;                 /* Error Length*/
+       __le32 tweak_upper;             /* Encryption tweak, upper 4 bytes */
        struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
        struct io_accel2_scsi_response error_data;
 } __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
@@ -563,18 +577,18 @@ struct hpsa_tmf_struct {
        u8 reserved1;           /* byte 3 Reserved */
        u32 it_nexus;           /* SCSI I-T Nexus */
        u8 lun_id[8];           /* LUN ID for TMF request */
-       u64 tag;                /* cciss tag associated w/ request */
-       u64 abort_tag;          /* cciss tag of SCSI cmd or task to abort */
-       u64 error_ptr;          /* Error Pointer */
-       u32 error_len;          /* Error Length */
+       __le64 tag;             /* cciss tag associated w/ request */
+       __le64 abort_tag;       /* cciss tag of SCSI cmd or TMF to abort */
+       __le64 error_ptr;               /* Error Pointer */
+       __le32 error_len;               /* Error Length */
 };
 
 /* Configuration Table Structure */
 struct HostWrite {
-       u32 TransportRequest;
-       u32 command_pool_addr_hi;
-       u32 CoalIntDelay;
-       u32 CoalIntCount;
+       __le32          TransportRequest;
+       __le32          command_pool_addr_hi;
+       __le32          CoalIntDelay;
+       __le32          CoalIntCount;
 };
 
 #define SIMPLE_MODE     0x02
@@ -585,54 +599,54 @@ struct HostWrite {
 #define DRIVER_SUPPORT_UA_ENABLE        0x00000001
 
 struct CfgTable {
-       u8            Signature[4];
-       u32             SpecValence;
-       u32           TransportSupport;
-       u32           TransportActive;
-       struct          HostWrite HostWrite;
-       u32           CmdsOutMax;
-       u32           BusTypes;
-       u32           TransMethodOffset;
-       u8            ServerName[16];
-       u32           HeartBeat;
-       u32           driver_support;
-#define                        ENABLE_SCSI_PREFETCH 0x100
-#define                        ENABLE_UNIT_ATTN 0x01
-       u32             MaxScatterGatherElements;
-       u32             MaxLogicalUnits;
-       u32             MaxPhysicalDevices;
-       u32             MaxPhysicalDrivesPerLogicalUnit;
-       u32             MaxPerformantModeCommands;
-       u32             MaxBlockFetch;
-       u32             PowerConservationSupport;
-       u32             PowerConservationEnable;
-       u32             TMFSupportFlags;
+       u8              Signature[4];
+       __le32          SpecValence;
+       __le32          TransportSupport;
+       __le32          TransportActive;
+       struct HostWrite HostWrite;
+       __le32          CmdsOutMax;
+       __le32          BusTypes;
+       __le32          TransMethodOffset;
+       u8              ServerName[16];
+       __le32          HeartBeat;
+       __le32          driver_support;
+#define                        ENABLE_SCSI_PREFETCH            0x100
+#define                        ENABLE_UNIT_ATTN                0x01
+       __le32          MaxScatterGatherElements;
+       __le32          MaxLogicalUnits;
+       __le32          MaxPhysicalDevices;
+       __le32          MaxPhysicalDrivesPerLogicalUnit;
+       __le32          MaxPerformantModeCommands;
+       __le32          MaxBlockFetch;
+       __le32          PowerConservationSupport;
+       __le32          PowerConservationEnable;
+       __le32          TMFSupportFlags;
        u8              TMFTagMask[8];
        u8              reserved[0x78 - 0x70];
-       u32             misc_fw_support; /* offset 0x78 */
-#define                        MISC_FW_DOORBELL_RESET (0x02)
-#define                        MISC_FW_DOORBELL_RESET2 (0x010)
-#define                        MISC_FW_RAID_OFFLOAD_BASIC (0x020)
-#define                        MISC_FW_EVENT_NOTIFY (0x080)
+       __le32          misc_fw_support;                /* offset 0x78 */
+#define                        MISC_FW_DOORBELL_RESET          0x02
+#define                        MISC_FW_DOORBELL_RESET2         0x010
+#define                        MISC_FW_RAID_OFFLOAD_BASIC      0x020
+#define                        MISC_FW_EVENT_NOTIFY            0x080
        u8              driver_version[32];
-       u32             max_cached_write_size;
-       u8              driver_scratchpad[16];
-       u32             max_error_info_length;
-       u32             io_accel_max_embedded_sg_count;
-       u32             io_accel_request_size_offset;
-       u32             event_notify;
-#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
-#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
-       u32             clear_event_notify;
+       __le32          max_cached_write_size;
+       u8              driver_scratchpad[16];
+       __le32          max_error_info_length;
+       __le32          io_accel_max_embedded_sg_count;
+       __le32          io_accel_request_size_offset;
+       __le32          event_notify;
+#define                HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
+#define                HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
+       __le32          clear_event_notify;
 };
 
 #define NUM_BLOCKFETCH_ENTRIES 8
 struct TransTable_struct {
-       u32            BlockFetch[NUM_BLOCKFETCH_ENTRIES];
-       u32            RepQSize;
-       u32            RepQCount;
-       u32            RepQCtrAddrLow32;
-       u32            RepQCtrAddrHigh32;
+       __le32          BlockFetch[NUM_BLOCKFETCH_ENTRIES];
+       __le32          RepQSize;
+       __le32          RepQCount;
+       __le32          RepQCtrAddrLow32;
+       __le32          RepQCtrAddrHigh32;
 #define MAX_REPLY_QUEUES 64
        struct vals32  RepQAddr[MAX_REPLY_QUEUES];
 };
@@ -644,5 +658,137 @@ struct hpsa_pci_info {
        u32             board_id;
 };
 
+struct bmic_identify_physical_device {
+       u8    scsi_bus;          /* SCSI Bus number on controller */
+       u8    scsi_id;           /* SCSI ID on this bus */
+       __le16 block_size;           /* sector size in bytes */
+       __le32 total_blocks;         /* number for sectors on drive */
+       __le32 reserved_blocks;   /* controller reserved (RIS) */
+       u8    model[40];         /* Physical Drive Model */
+       u8    serial_number[40]; /* Drive Serial Number */
+       u8    firmware_revision[8]; /* drive firmware revision */
+       u8    scsi_inquiry_bits; /* inquiry byte 7 bits */
+       u8    compaq_drive_stamp; /* 0 means drive not stamped */
+       u8    last_failure_reason;
+#define BMIC_LAST_FAILURE_TOO_SMALL_IN_LOAD_CONFIG             0x01
+#define BMIC_LAST_FAILURE_ERROR_ERASING_RIS                    0x02
+#define BMIC_LAST_FAILURE_ERROR_SAVING_RIS                     0x03
+#define BMIC_LAST_FAILURE_FAIL_DRIVE_COMMAND                   0x04
+#define BMIC_LAST_FAILURE_MARK_BAD_FAILED                      0x05
+#define BMIC_LAST_FAILURE_MARK_BAD_FAILED_IN_FINISH_REMAP      0x06
+#define BMIC_LAST_FAILURE_TIMEOUT                              0x07
+#define BMIC_LAST_FAILURE_AUTOSENSE_FAILED                     0x08
+#define BMIC_LAST_FAILURE_MEDIUM_ERROR_1                       0x09
+#define BMIC_LAST_FAILURE_MEDIUM_ERROR_2                       0x0a
+#define BMIC_LAST_FAILURE_NOT_READY_BAD_SENSE                  0x0b
+#define BMIC_LAST_FAILURE_NOT_READY                            0x0c
+#define BMIC_LAST_FAILURE_HARDWARE_ERROR                       0x0d
+#define BMIC_LAST_FAILURE_ABORTED_COMMAND                      0x0e
+#define BMIC_LAST_FAILURE_WRITE_PROTECTED                      0x0f
+#define BMIC_LAST_FAILURE_SPIN_UP_FAILURE_IN_RECOVER           0x10
+#define BMIC_LAST_FAILURE_REBUILD_WRITE_ERROR                  0x11
+#define BMIC_LAST_FAILURE_TOO_SMALL_IN_HOT_PLUG                        0x12
+#define BMIC_LAST_FAILURE_BUS_RESET_RECOVERY_ABORTED           0x13
+#define BMIC_LAST_FAILURE_REMOVED_IN_HOT_PLUG                  0x14
+#define BMIC_LAST_FAILURE_INIT_REQUEST_SENSE_FAILED            0x15
+#define BMIC_LAST_FAILURE_INIT_START_UNIT_FAILED               0x16
+#define BMIC_LAST_FAILURE_INQUIRY_FAILED                       0x17
+#define BMIC_LAST_FAILURE_NON_DISK_DEVICE                      0x18
+#define BMIC_LAST_FAILURE_READ_CAPACITY_FAILED                 0x19
+#define BMIC_LAST_FAILURE_INVALID_BLOCK_SIZE                   0x1a
+#define BMIC_LAST_FAILURE_HOT_PLUG_REQUEST_SENSE_FAILED                0x1b
+#define BMIC_LAST_FAILURE_HOT_PLUG_START_UNIT_FAILED           0x1c
+#define BMIC_LAST_FAILURE_WRITE_ERROR_AFTER_REMAP              0x1d
+#define BMIC_LAST_FAILURE_INIT_RESET_RECOVERY_ABORTED          0x1e
+#define BMIC_LAST_FAILURE_DEFERRED_WRITE_ERROR                 0x1f
+#define BMIC_LAST_FAILURE_MISSING_IN_SAVE_RIS                  0x20
+#define BMIC_LAST_FAILURE_WRONG_REPLACE                                0x21
+#define BMIC_LAST_FAILURE_GDP_VPD_INQUIRY_FAILED               0x22
+#define BMIC_LAST_FAILURE_GDP_MODE_SENSE_FAILED                        0x23
+#define BMIC_LAST_FAILURE_DRIVE_NOT_IN_48BIT_MODE              0x24
+#define BMIC_LAST_FAILURE_DRIVE_TYPE_MIX_IN_HOT_PLUG           0x25
+#define BMIC_LAST_FAILURE_DRIVE_TYPE_MIX_IN_LOAD_CFG           0x26
+#define BMIC_LAST_FAILURE_PROTOCOL_ADAPTER_FAILED              0x27
+#define BMIC_LAST_FAILURE_FAULTY_ID_BAY_EMPTY                  0x28
+#define BMIC_LAST_FAILURE_FAULTY_ID_BAY_OCCUPIED               0x29
+#define BMIC_LAST_FAILURE_FAULTY_ID_INVALID_BAY                        0x2a
+#define BMIC_LAST_FAILURE_WRITE_RETRIES_FAILED                 0x2b
+
+#define BMIC_LAST_FAILURE_SMART_ERROR_REPORTED                 0x37
+#define BMIC_LAST_FAILURE_PHY_RESET_FAILED                     0x38
+#define BMIC_LAST_FAILURE_ONLY_ONE_CTLR_CAN_SEE_DRIVE          0x40
+#define BMIC_LAST_FAILURE_KC_VOLUME_FAILED                     0x41
+#define BMIC_LAST_FAILURE_UNEXPECTED_REPLACEMENT               0x42
+#define BMIC_LAST_FAILURE_OFFLINE_ERASE                                0x80
+#define BMIC_LAST_FAILURE_OFFLINE_TOO_SMALL                    0x81
+#define BMIC_LAST_FAILURE_OFFLINE_DRIVE_TYPE_MIX               0x82
+#define BMIC_LAST_FAILURE_OFFLINE_ERASE_COMPLETE               0x83
+
+       u8     flags;
+       u8     more_flags;
+       u8     scsi_lun;          /* SCSI LUN for phys drive */
+       u8     yet_more_flags;
+       u8     even_more_flags;
+       __le32 spi_speed_rules;/* SPI Speed data:Ultra disable diagnose */
+       u8     phys_connector[2];         /* connector number on controller */
+       u8     phys_box_on_bus;  /* phys enclosure this drive resides */
+       u8     phys_bay_in_box;  /* phys drv bay this drive resides */
+       __le32 rpm;              /* Drive rotational speed in rpm */
+       u8     device_type;       /* type of drive */
+       u8     sata_version;     /* only valid when drive_type is SATA */
+       __le64 big_total_block_count;
+       __le64 ris_starting_lba;
+       __le32 ris_size;
+       u8     wwid[20];
+       u8     controller_phy_map[32];
+       __le16 phy_count;
+       u8     phy_connected_dev_type[256];
+       u8     phy_to_drive_bay_num[256];
+       __le16 phy_to_attached_dev_index[256];
+       u8     box_index;
+       u8     reserved;
+       __le16 extra_physical_drive_flags;
+#define BMIC_PHYS_DRIVE_SUPPORTS_GAS_GAUGE(idphydrv) \
+       (idphydrv->extra_physical_drive_flags & (1 << 10))
+       u8     negotiated_link_rate[256];
+       u8     phy_to_phy_map[256];
+       u8     redundant_path_present_map;
+       u8     redundant_path_failure_map;
+       u8     active_path_number;
+       __le16 alternate_paths_phys_connector[8];
+       u8     alternate_paths_phys_box_on_port[8];
+       u8     multi_lun_device_lun_count;
+       u8     minimum_good_fw_revision[8];
+       u8     unique_inquiry_bytes[20];
+       u8     current_temperature_degreesC;
+       u8     temperature_threshold_degreesC;
+       u8     max_temperature_degreesC;
+       u8     logical_blocks_per_phys_block_exp; /* phyblocksize = 512*2^exp */
+       __le16 current_queue_depth_limit;
+       u8     switch_name[10];
+       __le16 switch_port;
+       u8     alternate_paths_switch_name[40];
+       u8     alternate_paths_switch_port[8];
+       __le16 power_on_hours; /* valid only if gas gauge supported */
+       __le16 percent_endurance_used; /* valid only if gas gauge supported. */
+#define BMIC_PHYS_DRIVE_SSD_WEAROUT(idphydrv) \
+       ((idphydrv->percent_endurance_used & 0x80) || \
+        (idphydrv->percent_endurance_used > 10000))
+       u8     drive_authentication;
+#define BMIC_PHYS_DRIVE_AUTHENTICATED(idphydrv) \
+       (idphydrv->drive_authentication == 0x80)
+       u8     smart_carrier_authentication;
+#define BMIC_SMART_CARRIER_AUTHENTICATION_SUPPORTED(idphydrv) \
+       (idphydrv->smart_carrier_authentication != 0x0)
+#define BMIC_SMART_CARRIER_AUTHENTICATED(idphydrv) \
+       (idphydrv->smart_carrier_authentication == 0x01)
+       u8     smart_carrier_app_fw_version;
+       u8     smart_carrier_bootloader_fw_version;
+       u8     encryption_key_name[64];
+       __le32 misc_drive_flags;
+       __le16 dek_index;
+       u8     padding[112];
+};
+
 #pragma pack()
 #endif /* HPSA_CMD_H */
index ddf0694..3882d9f 100644 (file)
@@ -2226,36 +2226,36 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance)
 
        if (hd->proc & PR_INFO) {
                seq_printf(m, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No");
-               seq_printf(m, "\nsync_xfer[] =       ");
+               seq_puts(m, "\nsync_xfer[] =       ");
                for (x = 0; x < 7; x++)
                        seq_printf(m, "\t%02x", hd->sync_xfer[x]);
-               seq_printf(m, "\nsync_stat[] =       ");
+               seq_puts(m, "\nsync_stat[] =       ");
                for (x = 0; x < 7; x++)
                        seq_printf(m, "\t%02x", hd->sync_stat[x]);
        }
 #ifdef PROC_STATISTICS
        if (hd->proc & PR_STATISTICS) {
-               seq_printf(m, "\ncommands issued:    ");
+               seq_puts(m, "\ncommands issued:    ");
                for (x = 0; x < 7; x++)
                        seq_printf(m, "\t%ld", hd->cmd_cnt[x]);
-               seq_printf(m, "\ndisconnects allowed:");
+               seq_puts(m, "\ndisconnects allowed:");
                for (x = 0; x < 7; x++)
                        seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]);
-               seq_printf(m, "\ndisconnects done:   ");
+               seq_puts(m, "\ndisconnects done:   ");
                for (x = 0; x < 7; x++)
                        seq_printf(m, "\t%ld", hd->disc_done_cnt[x]);
                seq_printf(m, "\ninterrupts:      \t%ld", hd->int_cnt);
        }
 #endif
        if (hd->proc & PR_CONNECTED) {
-               seq_printf(m, "\nconnected:     ");
+               seq_puts(m, "\nconnected:     ");
                if (hd->connected) {
                        cmd = (Scsi_Cmnd *) hd->connected;
                        seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
                }
        }
        if (hd->proc & PR_INPUTQ) {
-               seq_printf(m, "\ninput_Q:       ");
+               seq_puts(m, "\ninput_Q:       ");
                cmd = (Scsi_Cmnd *) hd->input_Q;
                while (cmd) {
                        seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
@@ -2263,7 +2263,7 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance)
                }
        }
        if (hd->proc & PR_DISCQ) {
-               seq_printf(m, "\ndisconnected_Q:");
+               seq_puts(m, "\ndisconnected_Q:");
                cmd = (Scsi_Cmnd *) hd->disconnected_Q;
                while (cmd) {
                        seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
@@ -2273,7 +2273,7 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance)
        if (hd->proc & PR_TEST) {
                ;               /* insert your own custom function here */
        }
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
        spin_unlock_irqrestore(instance->host_lock, flags);
 #endif                         /* PROC_INTERFACE */
        return 0;
index e5c2843..7542f11 100644 (file)
@@ -2038,15 +2038,14 @@ ips_host_info(ips_ha_t *ha, struct seq_file *m)
 {
        METHOD_TRACE("ips_host_info", 1);
 
-       seq_printf(m, "\nIBM ServeRAID General Information:\n\n");
+       seq_puts(m, "\nIBM ServeRAID General Information:\n\n");
 
        if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) &&
            (le16_to_cpu(ha->nvram->adapter_type) != 0))
                seq_printf(m, "\tController Type                   : %s\n",
                          ips_adapter_name[ha->ad_type - 1]);
        else
-               seq_printf(m,
-                         "\tController Type                   : Unknown\n");
+               seq_puts(m, "\tController Type                   : Unknown\n");
 
        if (ha->io_addr)
                seq_printf(m,
@@ -2138,7 +2137,7 @@ ips_host_info(ips_ha_t *ha, struct seq_file *m)
        seq_printf(m, "\tCurrent Active PT Commands        : %d\n",
                  ha->num_ioctl);
 
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
        return 0;
 }
index 4c25485..c66088d 100644 (file)
@@ -2225,6 +2225,15 @@ lpfc_adisc_done(struct lpfc_vport *vport)
        if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
            !(vport->fc_flag & FC_RSCN_MODE) &&
            (phba->sli_rev < LPFC_SLI_REV4)) {
+               /* The ADISCs are complete.  Doesn't matter if they
+                * succeeded or failed because the ADISC completion
+                * routine guarantees to call the state machine and
+                * the RPI is either unregistered (failed ADISC response)
+                * or the RPI is still valid and the node is marked
+                * mapped for a target.  The exchanges should be in the
+                * correct state. This code is specific to SLI3.
+                */
+               lpfc_issue_clear_la(phba, vport);
                lpfc_issue_reg_vpi(phba, vport);
                return;
        }
index 2485255..bc7b34c 100644 (file)
@@ -2240,7 +2240,7 @@ proc_show_battery(struct seq_file *m, void *v)
                goto free_pdev;
 
        if( mega_adapinq(adapter, dma_handle) != 0 ) {
-               seq_printf(m, "Adapter inquiry failed.\n");
+               seq_puts(m, "Adapter inquiry failed.\n");
                printk(KERN_WARNING "megaraid: inquiry failed.\n");
                goto free_inquiry;
        }
index 0d44d91..14e5c7c 100644 (file)
@@ -35,7 +35,7 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                                "06.805.06.01-rc1"
+#define MEGASAS_VERSION                                "06.806.08.00-rc1"
 
 /*
  * Device IDs
@@ -969,7 +969,20 @@ struct megasas_ctrl_info {
 
        struct {
 #if defined(__BIG_ENDIAN_BITFIELD)
-               u32     reserved:25;
+               u32     reserved:12;
+               u32     discardCacheDuringLDDelete:1;
+               u32     supportSecurityonJBOD:1;
+               u32     supportCacheBypassModes:1;
+               u32     supportDisableSESMonitoring:1;
+               u32     supportForceFlash:1;
+               u32     supportNVDRAM:1;
+               u32     supportDrvActivityLEDSetting:1;
+               u32     supportAllowedOpsforDrvRemoval:1;
+               u32     supportHOQRebuild:1;
+               u32     supportForceTo512e:1;
+               u32     supportNVCacheErase:1;
+               u32     supportDebugQueue:1;
+               u32     supportSwZone:1;
                u32     supportCrashDump:1;
                u32     supportMaxExtLDs:1;
                u32     supportT10RebuildAssist:1;
@@ -981,9 +994,22 @@ struct megasas_ctrl_info {
                u32     supportThermalPollInterval:1;
                u32     supportDisableImmediateIO:1;
                u32     supportT10RebuildAssist:1;
-               u32     supportMaxExtLDs:1;
-               u32     supportCrashDump:1;
-               u32     reserved:25;
+               u32     supportMaxExtLDs:1;
+               u32     supportCrashDump:1;
+               u32     supportSwZone:1;
+               u32     supportDebugQueue:1;
+               u32     supportNVCacheErase:1;
+               u32     supportForceTo512e:1;
+               u32     supportHOQRebuild:1;
+               u32     supportAllowedOpsforDrvRemoval:1;
+               u32     supportDrvActivityLEDSetting:1;
+               u32     supportNVDRAM:1;
+               u32     supportForceFlash:1;
+               u32     supportDisableSESMonitoring:1;
+               u32     supportCacheBypassModes:1;
+               u32     supportSecurityonJBOD:1;
+               u32     discardCacheDuringLDDelete:1;
+               u32     reserved:12;
 #endif
        } adapterOperations3;
 
@@ -1022,6 +1048,13 @@ enum MR_MFI_MPT_PTHR_FLAGS {
        MFI_MPT_ATTACHED = 2,
 };
 
+enum MR_SCSI_CMD_TYPE {
+       READ_WRITE_LDIO = 0,
+       NON_READ_WRITE_LDIO = 1,
+       READ_WRITE_SYSPDIO = 2,
+       NON_READ_WRITE_SYSPDIO = 3,
+};
+
 /* Frame Type */
 #define IO_FRAME                               0
 #define PTHRU_FRAME                            1
@@ -1049,6 +1082,8 @@ enum MR_MFI_MPT_PTHR_FLAGS {
  */
 #define MEGASAS_INT_CMDS                       32
 #define MEGASAS_SKINNY_INT_CMDS                        5
+#define MEGASAS_FUSION_INTERNAL_CMDS           5
+#define MEGASAS_FUSION_IOCTL_CMDS              3
 
 #define MEGASAS_MAX_MSIX_QUEUES                        128
 /*
@@ -1194,19 +1229,23 @@ union megasas_sgl_frame {
 typedef union _MFI_CAPABILITIES {
        struct {
 #if   defined(__BIG_ENDIAN_BITFIELD)
-               u32     reserved:27;
+               u32     reserved:25;
+               u32     security_protocol_cmds_fw:1;
+               u32     support_core_affinity:1;
                u32     support_ndrive_r1_lb:1;
                u32     support_max_255lds:1;
-               u32     reserved1:1;
+               u32     support_fastpath_wb:1;
                u32     support_additional_msix:1;
                u32     support_fp_remote_lun:1;
 #else
                u32     support_fp_remote_lun:1;
                u32     support_additional_msix:1;
-               u32     reserved1:1;
+               u32     support_fastpath_wb:1;
                u32     support_max_255lds:1;
                u32     support_ndrive_r1_lb:1;
-               u32     reserved:27;
+               u32     support_core_affinity:1;
+               u32     security_protocol_cmds_fw:1;
+               u32     reserved:25;
 #endif
        } mfi_capabilities;
        u32     reg;
@@ -1638,20 +1677,20 @@ struct megasas_instance {
        u32 crash_dump_fw_support;
        u32 crash_dump_drv_support;
        u32 crash_dump_app_support;
+       u32 secure_jbod_support;
        spinlock_t crashdump_lock;
 
        struct megasas_register_set __iomem *reg_set;
        u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
        struct megasas_pd_list          pd_list[MEGASAS_MAX_PD];
        struct megasas_pd_list          local_pd_list[MEGASAS_MAX_PD];
-       u8     ld_ids[MEGASAS_MAX_LD_IDS];
+       u8 ld_ids[MEGASAS_MAX_LD_IDS];
        s8 init_id;
 
        u16 max_num_sge;
        u16 max_fw_cmds;
-       /* For Fusion its num IOCTL cmds, for others MFI based its
-          max_fw_cmds */
        u16 max_mfi_cmds;
+       u16 max_scsi_cmds;
        u32 max_sectors_per_req;
        struct megasas_aen_event *ev;
 
@@ -1727,7 +1766,7 @@ struct megasas_instance {
        u8 requestorId;
        char PlasmaFW111;
        char mpio;
-       int throttlequeuedepth;
+       u16 throttlequeuedepth;
        u8 mask_interrupts;
        u8 is_imr;
 };
@@ -1946,5 +1985,6 @@ void __megasas_return_cmd(struct megasas_instance *instance,
 
 void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
        struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion);
+int megasas_cmd_type(struct scsi_cmnd *cmd);
 
 #endif                         /*LSI_MEGARAID_SAS_H */
index ff283d2..890637f 100644 (file)
@@ -78,7 +78,7 @@ static int allow_vf_ioctls;
 module_param(allow_vf_ioctls, int, S_IRUGO);
 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
 
-static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
+static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
 module_param(throttlequeuedepth, int, S_IRUGO);
 MODULE_PARM_DESC(throttlequeuedepth,
        "Adapter queue depth when throttled due to I/O timeout. Default: 16");
@@ -1417,16 +1417,15 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
 }
 
 /**
- * megasas_is_ldio -           Checks if the cmd is for logical drive
+ * megasas_cmd_type -          Checks if the cmd is for logical drive/sysPD
+ *                             and whether it's RW or non RW
  * @scmd:                      SCSI command
  *
- * Called by megasas_queue_command to find out if the command to be queued
- * is a logical drive command
  */
-inline int megasas_is_ldio(struct scsi_cmnd *cmd)
+inline int megasas_cmd_type(struct scsi_cmnd *cmd)
 {
-       if (!MEGASAS_IS_LOGICAL(cmd))
-               return 0;
+       int ret;
+
        switch (cmd->cmnd[0]) {
        case READ_10:
        case WRITE_10:
@@ -1436,10 +1435,14 @@ inline int megasas_is_ldio(struct scsi_cmnd *cmd)
        case WRITE_6:
        case READ_16:
        case WRITE_16:
-               return 1;
+               ret = (MEGASAS_IS_LOGICAL(cmd)) ?
+                       READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
+               break;
        default:
-               return 0;
+               ret = (MEGASAS_IS_LOGICAL(cmd)) ?
+                       NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
        }
+       return ret;
 }
 
  /**
@@ -1471,7 +1474,7 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
                if(!cmd->scmd)
                        continue;
                printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
-               if (megasas_is_ldio(cmd->scmd)){
+               if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
                        ldio = (struct megasas_io_frame *)cmd->frame;
                        mfi_sgl = &ldio->sgl;
                        sgcount = ldio->sge_count;
@@ -1531,7 +1534,7 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
        /*
         * Logical drive command
         */
-       if (megasas_is_ldio(scmd))
+       if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
                frame_count = megasas_build_ldio(instance, scmd, cmd);
        else
                frame_count = megasas_build_dcdb(instance, scmd, cmd);
@@ -1689,22 +1692,66 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
        return 0;
 }
 
+/*
+* megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
+*                                       kill adapter
+* @instance:                           Adapter soft state
+*
+*/
+void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
+{
+       int i;
+       struct megasas_cmd *cmd_mfi;
+       struct megasas_cmd_fusion *cmd_fusion;
+       struct fusion_context *fusion = instance->ctrl_context;
+
+       /* Find all outstanding ioctls */
+       if (fusion) {
+               for (i = 0; i < instance->max_fw_cmds; i++) {
+                       cmd_fusion = fusion->cmd_list[i];
+                       if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
+                               cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+                               if (cmd_mfi->sync_cmd &&
+                                       cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
+                                       megasas_complete_cmd(instance,
+                                                            cmd_mfi, DID_OK);
+                       }
+               }
+       } else {
+               for (i = 0; i < instance->max_fw_cmds; i++) {
+                       cmd_mfi = instance->cmd_list[i];
+                       if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
+                               MFI_CMD_ABORT)
+                               megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+               }
+       }
+}
+
+
 void megaraid_sas_kill_hba(struct megasas_instance *instance)
 {
+       /* Set critical error to block I/O & ioctls in case caller didn't */
+       instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+       /* Wait 1 second to ensure IO or ioctls in build have posted */
+       msleep(1000);
        if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
-           (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
-           (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
-           (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
-           (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
-           (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
-               writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
+               (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+               (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+               (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+               (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+               (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+               writel(MFI_STOP_ADP,
+                       &instance->reg_set->doorbell);
                /* Flush */
                readl(&instance->reg_set->doorbell);
                if (instance->mpio && instance->requestorId)
                        memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
        } else {
-               writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);
+               writel(MFI_STOP_ADP,
+                       &instance->reg_set->inbound_doorbell);
        }
+       /* Complete outstanding ioctls when adapter is killed */
+       megasas_complete_outstanding_ioctls(instance);
 }
 
  /**
@@ -1717,6 +1764,7 @@ void
 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
 {
        unsigned long flags;
+
        if (instance->flag & MEGASAS_FW_BUSY
            && time_after(jiffies, instance->last_time + 5 * HZ)
            && atomic_read(&instance->fw_outstanding) <
@@ -1724,13 +1772,8 @@ megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
 
                spin_lock_irqsave(instance->host->host_lock, flags);
                instance->flag &= ~MEGASAS_FW_BUSY;
-               if (instance->is_imr) {
-                       instance->host->can_queue =
-                               instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
-               } else
-                       instance->host->can_queue =
-                               instance->max_fw_cmds - MEGASAS_INT_CMDS;
 
+               instance->host->can_queue = instance->max_scsi_cmds;
                spin_unlock_irqrestore(instance->host->host_lock, flags);
        }
 }
@@ -3028,10 +3071,9 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
                                        "was tried multiple times during reset."
                                        "Shutting down the HBA\n",
                                        cmd, cmd->scmd, cmd->sync_cmd);
+                               instance->instancet->disable_intr(instance);
+                               atomic_set(&instance->fw_reset_no_pci_access, 1);
                                megaraid_sas_kill_hba(instance);
-
-                               instance->adprecovery =
-                                               MEGASAS_HW_CRITICAL_ERROR;
                                return;
                        }
                }
@@ -3165,8 +3207,8 @@ process_fw_state_change_wq(struct work_struct *work)
                if (megasas_transition_to_ready(instance, 1)) {
                        printk(KERN_NOTICE "megaraid_sas:adapter not ready\n");
 
+                       atomic_set(&instance->fw_reset_no_pci_access, 1);
                        megaraid_sas_kill_hba(instance);
-                       instance->adprecovery   = MEGASAS_HW_CRITICAL_ERROR;
                        return ;
                }
 
@@ -3547,7 +3589,6 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
        int i;
        u32 max_cmd;
        u32 sge_sz;
-       u32 sgl_sz;
        u32 total_sz;
        u32 frame_count;
        struct megasas_cmd *cmd;
@@ -3566,24 +3607,23 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
        }
 
        /*
-        * Calculated the number of 64byte frames required for SGL
-        */
-       sgl_sz = sge_sz * instance->max_num_sge;
-       frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
-       frame_count = 15;
-
-       /*
-        * We need one extra frame for the MFI command
+        * For MFI controllers.
+        * max_num_sge = 60
+        * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
+        * Total 960 byte (15 MFI frame of 64 byte)
+        *
+        * Fusion adapter require only 3 extra frame.
+        * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
+        * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
+        * Total 192 byte (3 MFI frame of 64 byte)
         */
-       frame_count++;
-
+       frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
        total_sz = MEGAMFI_FRAME_SIZE * frame_count;
        /*
         * Use DMA pool facility provided by PCI layer
         */
        instance->frame_dma_pool = pci_pool_create("megasas frame pool",
-                                                  instance->pdev, total_sz, 64,
-                                                  0);
+                                       instance->pdev, total_sz, 256, 0);
 
        if (!instance->frame_dma_pool) {
                printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
@@ -4631,28 +4671,48 @@ static int megasas_init_fw(struct megasas_instance *instance)
                                instance->crash_dump_h);
                instance->crash_dump_buf = NULL;
        }
+
+       instance->secure_jbod_support =
+               ctrl_info->adapterOperations3.supportSecurityonJBOD;
+       if (instance->secure_jbod_support)
+               dev_info(&instance->pdev->dev, "Firmware supports Secure JBOD\n");
        instance->max_sectors_per_req = instance->max_num_sge *
                                                PAGE_SIZE / 512;
        if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
                instance->max_sectors_per_req = tmp_sectors;
 
-       /* Check for valid throttlequeuedepth module parameter */
-       if (instance->is_imr) {
-               if (throttlequeuedepth > (instance->max_fw_cmds -
-                                         MEGASAS_SKINNY_INT_CMDS))
-                       instance->throttlequeuedepth =
-                               MEGASAS_THROTTLE_QUEUE_DEPTH;
-               else
-                       instance->throttlequeuedepth = throttlequeuedepth;
+       /*
+        * 1. For fusion adapters, 3 commands for IOCTL and 5 commands
+        *    for driver's internal DCMDs.
+        * 2. For MFI skinny adapters, 5 commands for IOCTL + driver's
+        *    internal DCMDs.
+        * 3. For rest of MFI adapters, 27 commands reserved for IOCTLs
+        *    and 5 commands for drivers's internal DCMD.
+        */
+       if (instance->ctrl_context) {
+               instance->max_scsi_cmds = instance->max_fw_cmds -
+                                       (MEGASAS_FUSION_INTERNAL_CMDS +
+                                       MEGASAS_FUSION_IOCTL_CMDS);
+               sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
+       } else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+               (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+               instance->max_scsi_cmds = instance->max_fw_cmds -
+                                               MEGASAS_SKINNY_INT_CMDS;
+               sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
        } else {
-               if (throttlequeuedepth > (instance->max_fw_cmds -
-                                         MEGASAS_INT_CMDS))
-                       instance->throttlequeuedepth =
-                               MEGASAS_THROTTLE_QUEUE_DEPTH;
-               else
-                       instance->throttlequeuedepth = throttlequeuedepth;
+               instance->max_scsi_cmds = instance->max_fw_cmds -
+                                               MEGASAS_INT_CMDS;
+               sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5));
        }
 
+       /* Check for valid throttlequeuedepth module parameter */
+       if (throttlequeuedepth &&
+                       throttlequeuedepth <= instance->max_scsi_cmds)
+               instance->throttlequeuedepth = throttlequeuedepth;
+       else
+               instance->throttlequeuedepth =
+                               MEGASAS_THROTTLE_QUEUE_DEPTH;
+
         /*
        * Setup tasklet for cmd completion
        */
@@ -4947,12 +5007,7 @@ static int megasas_io_attach(struct megasas_instance *instance)
         */
        host->irq = instance->pdev->irq;
        host->unique_id = instance->unique_id;
-       if (instance->is_imr) {
-               host->can_queue =
-                       instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
-       } else
-               host->can_queue =
-                       instance->max_fw_cmds - MEGASAS_INT_CMDS;
+       host->can_queue = instance->max_scsi_cmds;
        host->this_id = instance->init_id;
        host->sg_tablesize = instance->max_num_sge;
 
@@ -5130,8 +5185,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
                        ((1 << PAGE_SHIFT) << instance->ctrl_context_pages));
                INIT_LIST_HEAD(&fusion->cmd_pool);
                spin_lock_init(&fusion->mpt_pool_lock);
-               memset(fusion->load_balance_info, 0,
-                       sizeof(struct LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
        }
        break;
        default: /* For all other supported controllers */
@@ -5215,12 +5268,10 @@ static int megasas_probe_one(struct pci_dev *pdev,
        instance->init_id = MEGASAS_DEFAULT_INIT_ID;
        instance->ctrl_info = NULL;
 
+
        if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
-               (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+               (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
                instance->flag_ieee = 1;
-               sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
-       } else
-               sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5));
 
        megasas_dbg_lvl = 0;
        instance->flag = 0;
@@ -6215,9 +6266,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
                goto out_kfree_ioc;
        }
 
-       /*
-        * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds
-        */
        if (down_interruptible(&instance->ioctl_sem)) {
                error = -ERESTARTSYS;
                goto out_kfree_ioc;
index 460c6a3..4f72287 100644 (file)
@@ -172,6 +172,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
        struct MR_FW_RAID_MAP_ALL     *fw_map_old    = NULL;
        struct MR_FW_RAID_MAP         *pFwRaidMap    = NULL;
        int i;
+       u16 ld_count;
 
 
        struct MR_DRV_RAID_MAP_ALL *drv_map =
@@ -191,9 +192,10 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
                fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
                        fusion->ld_map[(instance->map_id & 1)];
                pFwRaidMap = &fw_map_old->raidMap;
+               ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
 
 #if VD_EXT_DEBUG
-               for (i = 0; i < le16_to_cpu(pFwRaidMap->ldCount); i++) {
+               for (i = 0; i < ld_count; i++) {
                        dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
                                "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
                                instance->unique_id, i,
@@ -205,12 +207,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
 
                memset(drv_map, 0, fusion->drv_map_sz);
                pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
-               pDrvRaidMap->ldCount = (__le16)pFwRaidMap->ldCount;
+               pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
                pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
                for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
                        pDrvRaidMap->ldTgtIdToLd[i] =
                                (u8)pFwRaidMap->ldTgtIdToLd[i];
-               for (i = 0; i < le16_to_cpu(pDrvRaidMap->ldCount); i++) {
+               for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
+                       i < MAX_LOGICAL_DRIVES_EXT; i++)
+                       pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
+               for (i = 0; i < ld_count; i++) {
                        pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
 #if VD_EXT_DEBUG
                        dev_dbg(&instance->pdev->dev,
@@ -252,7 +257,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
        struct LD_LOAD_BALANCE_INFO *lbInfo;
        PLD_SPAN_INFO ldSpanInfo;
        struct MR_LD_RAID         *raid;
-       int ldCount, num_lds;
+       u16 ldCount, num_lds;
        u16 ld;
        u32 expected_size;
 
@@ -356,7 +361,7 @@ static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
 
        for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
                ld = MR_TargetIdToLdGet(ldCount, map);
-                       if (ld >= MAX_LOGICAL_DRIVES_EXT)
+                       if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
                                continue;
                raid = MR_LdRaidGet(ld, map);
                dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
@@ -1157,7 +1162,7 @@ void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
 
        for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
                ld = MR_TargetIdToLdGet(ldCount, map);
-               if (ld >= MAX_LOGICAL_DRIVES_EXT)
+               if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
                        continue;
                raid = MR_LdRaidGet(ld, map);
                for (element = 0; element < MAX_QUAD_DEPTH; element++) {
index 71557f6..675b5e7 100644 (file)
@@ -63,7 +63,6 @@ extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
 extern void
 megasas_complete_cmd(struct megasas_instance *instance,
                     struct megasas_cmd *cmd, u8 alt_status);
-int megasas_is_ldio(struct scsi_cmnd *cmd);
 int
 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
              int seconds);
@@ -103,6 +102,8 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
 {
        struct megasas_register_set __iomem *regs;
        regs = instance->reg_set;
+
+       instance->mask_interrupts = 0;
        /* For Thunderbolt/Invader also clear intr on enable */
        writel(~0, &regs->outbound_intr_status);
        readl(&regs->outbound_intr_status);
@@ -111,7 +112,6 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
 
        /* Dummy readl to force pci flush */
        readl(&regs->outbound_intr_mask);
-       instance->mask_interrupts = 0;
 }
 
 /**
@@ -196,6 +196,7 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
 
        cmd->scmd = NULL;
        cmd->sync_cmd_idx = (u32)ULONG_MAX;
+       memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
        list_add(&cmd->list, (&fusion->cmd_pool)->next);
 
        spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
@@ -689,6 +690,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
                = 1;
        init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb
                = 1;
+       init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw
+               = 1;
        /* Convert capability to LE32 */
        cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
 
@@ -698,12 +701,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
                cpu_to_le32(lower_32_bits(ioc_init_handle));
        init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
 
-       req_desc.Words = 0;
+       req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
+       req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
        req_desc.MFAIo.RequestFlags =
                (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
-                MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
-       cpu_to_le32s((u32 *)&req_desc.MFAIo);
-       req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr);
+               MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 
        /*
         * disable the intr before firing the init frame
@@ -1017,8 +1019,12 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
         * does not exceed max cmds that the FW can support
         */
        instance->max_fw_cmds = instance->max_fw_cmds-1;
-       /* Only internal cmds (DCMD) need to have MFI frames */
-       instance->max_mfi_cmds = MEGASAS_INT_CMDS;
+
+       /*
+        * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
+        */
+       instance->max_mfi_cmds =
+               MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
 
        max_cmd = instance->max_fw_cmds;
 
@@ -1285,6 +1291,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
 
                        sgl_ptr =
                          (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
+                       memset(sgl_ptr, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
                }
        }
 
@@ -1658,6 +1665,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
        u32 device_id;
        struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
        u16 pd_index = 0;
+       u16 os_timeout_value;
+       u16 timeout_limit;
        struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
        struct fusion_context *fusion = instance->ctrl_context;
        u8                          span, physArm;
@@ -1674,52 +1683,66 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
 
        io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
 
-
-       /* Check if this is a system PD I/O */
        if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
            instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
-               io_request->Function = 0;
                if (fusion->fast_path_io)
                        io_request->DevHandle =
                        local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
-               io_request->RaidContext.timeoutValue =
-                       local_map_ptr->raidMap.fpPdIoTimeoutSec;
-               io_request->RaidContext.regLockFlags = 0;
-               io_request->RaidContext.regLockRowLBA = 0;
-               io_request->RaidContext.regLockLength = 0;
                io_request->RaidContext.RAIDFlags =
-                       MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
-                       MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
-               if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
-                       (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
-                       io_request->IoFlags |= cpu_to_le16(
-                               MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
-               cmd->request_desc->SCSIIO.RequestFlags =
-                       (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
-                        MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
-               cmd->request_desc->SCSIIO.DevHandle =
-                       local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+                       MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
+                       << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+               cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
                cmd->request_desc->SCSIIO.MSIxIndex =
                        instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;
-               /*
-                * If the command is for the tape device, set the
-                * FP timeout to the os layer timeout value.
-                */
-               if (scmd->device->type == TYPE_TAPE) {
-                       if ((scmd->request->timeout / HZ) > 0xFFFF)
-                               io_request->RaidContext.timeoutValue =
-                                       0xFFFF;
-                       else
-                               io_request->RaidContext.timeoutValue =
-                                       scmd->request->timeout / HZ;
+               os_timeout_value = scmd->request->timeout / HZ;
+
+               if (instance->secure_jbod_support &&
+                       (megasas_cmd_type(scmd) == NON_READ_WRITE_SYSPDIO)) {
+                       /* system pd firmware path */
+                       io_request->Function  =
+                               MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+                       cmd->request_desc->SCSIIO.RequestFlags =
+                               (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+                               MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+                       io_request->RaidContext.timeoutValue =
+                               cpu_to_le16(os_timeout_value);
+               } else {
+                       /* system pd Fast Path */
+                       io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+                       io_request->RaidContext.regLockFlags = 0;
+                       io_request->RaidContext.regLockRowLBA = 0;
+                       io_request->RaidContext.regLockLength = 0;
+                       timeout_limit = (scmd->device->type == TYPE_DISK) ?
+                                       255 : 0xFFFF;
+                       io_request->RaidContext.timeoutValue =
+                               cpu_to_le16((os_timeout_value > timeout_limit) ?
+                               timeout_limit : os_timeout_value);
+               if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+                       (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+                       io_request->IoFlags |=
+                       cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
+
+                       cmd->request_desc->SCSIIO.RequestFlags =
+                               (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+                               MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
                }
        } else {
                if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
                        goto NonFastPath;
 
+               /*
+                * For older firmware, Driver should not access ldTgtIdToLd
+                * beyond index 127 and for Extended VD firmware, ldTgtIdToLd
+                * should not go beyond 255.
+                */
+
+               if ((!fusion->fast_path_io) ||
+                       (device_id >= instance->fw_supported_vd_count))
+                       goto NonFastPath;
+
                ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
-               if ((ld >= instance->fw_supported_vd_count) ||
-                       (!fusion->fast_path_io))
+
+               if (ld >= instance->fw_supported_vd_count)
                        goto NonFastPath;
 
                raid = MR_LdRaidGet(ld, local_map_ptr);
@@ -1811,7 +1834,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
         */
        io_request->IoFlags = cpu_to_le16(scp->cmd_len);
 
-       if (megasas_is_ldio(scp))
+       if (megasas_cmd_type(scp) == READ_WRITE_LDIO)
                megasas_build_ldio_fusion(instance, scp, cmd);
        else
                megasas_build_dcdb_fusion(instance, scp, cmd);
@@ -2612,7 +2635,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
                                instance->host->host_no);
                        megaraid_sas_kill_hba(instance);
                        instance->skip_heartbeat_timer_del = 1;
-                       instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
                        retval = FAILED;
                        goto out;
                }
@@ -2808,8 +2830,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
                                dev_info(&instance->pdev->dev,
                                        "Failed from %s %d\n",
                                        __func__, __LINE__);
-                               instance->adprecovery =
-                                       MEGASAS_HW_CRITICAL_ERROR;
                                megaraid_sas_kill_hba(instance);
                                retval = FAILED;
                        }
@@ -2858,7 +2878,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
                       "adapter scsi%d.\n", instance->host->host_no);
                megaraid_sas_kill_hba(instance);
                instance->skip_heartbeat_timer_del = 1;
-               instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
                retval = FAILED;
        } else {
                /* For VF: Restart HB timer if we didn't OCR */
index 5ab7dae..56e6db2 100644 (file)
@@ -306,14 +306,9 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
  * MPT RAID MFA IO Descriptor.
  */
 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
-#if   defined(__BIG_ENDIAN_BITFIELD)
-       u32     MessageAddress1:24; /* bits 31:8*/
-       u32     RequestFlags:8;
-#else
        u32     RequestFlags:8;
-       u32     MessageAddress1:24; /* bits 31:8*/
-#endif
-       u32     MessageAddress2;      /* bits 61:32 */
+       u32     MessageAddress1:24;
+       u32     MessageAddress2;
 };
 
 /* Default Request Descriptor */
index 088eefa..7fc6f23 100644 (file)
@@ -8,7 +8,7 @@
  *                  scatter/gather formats.
  *  Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.32
+ *  mpi2.h Version:  02.00.35
  *
  *  Version History
  *  ---------------
@@ -83,6 +83,9 @@
  *  04-09-13  02.00.30  Bumped MPI2_HEADER_VERSION_UNIT.
  *  04-17-13  02.00.31  Bumped MPI2_HEADER_VERSION_UNIT.
  *  08-19-13  02.00.32  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  12-05-13  02.00.33  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  01-08-14  02.00.34  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  06-13-14  02.00.35  Bumped MPI2_HEADER_VERSION_UNIT.
  *  --------------------------------------------------------------------------
  */
 
 #define MPI2_VERSION_02_00                  (0x0200)
 
 /* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x20)
+#define MPI2_HEADER_VERSION_UNIT            (0x23)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
index 510ef0d..ee8d2d6 100644 (file)
@@ -6,7 +6,7 @@
  *          Title:  MPI Configuration messages and pages
  *  Creation Date:  November 10, 2006
  *
- *    mpi2_cnfg.h Version:  02.00.26
+ *    mpi2_cnfg.h Version:  02.00.29
  *
  *  Version History
  *  ---------------
  *  04-09-13  02.00.25  Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK.
  *                     Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to
  *                     match the specification.
+ *  12-05-13  02.00.27  Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for
+ *                     MPI2_CONFIG_PAGE_MAN_7.
+ *                     Added EnclosureLevel and ConnectorName fields to
+ *                     MPI2_CONFIG_PAGE_SAS_DEV_0.
+ *                     Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for
+ *                     MPI2_CONFIG_PAGE_SAS_DEV_0.
+ *                     Added EnclosureLevel field to
+ *                     MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ *                     Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for
+ *                     MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ *  01-08-14  02.00.28  Added more defines for the BiosOptions field of
+ *                     MPI2_CONFIG_PAGE_BIOS_1.
+ *  06-13-14  02.00.29  Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and
+ *                     more defines for the BiosOptions field.
  *  --------------------------------------------------------------------------
  */
 
@@ -706,6 +720,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7
 #define MPI2_MANUFACTURING7_PAGEVERSION                 (0x01)
 
 /* defines for the Flags field */
+#define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL         (0x00000008)
 #define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER       (0x00000002)
 #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO                (0x00000001)
 
@@ -1224,7 +1239,9 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1
        MPI2_CONFIG_PAGE_HEADER Header;                     /* 0x00 */
        U32                     BiosOptions;                /* 0x04 */
        U32                     IOCSettings;                /* 0x08 */
-       U32                     Reserved1;                  /* 0x0C */
+       U8                      SSUTimeout;                 /* 0x0C */
+       U8                      Reserved1;                  /* 0x0D */
+       U16                     Reserved2;                  /* 0x0E */
        U32                     DeviceSettings;             /* 0x10 */
        U16                     NumberOfDevices;            /* 0x14 */
        U16                     UEFIVersion;                /* 0x16 */
@@ -1235,9 +1252,24 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1
 } MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1,
   Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t;
 
-#define MPI2_BIOSPAGE1_PAGEVERSION                      (0x05)
+#define MPI2_BIOSPAGE1_PAGEVERSION                      (0x07)
 
 /* values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK                        (0x00003800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL                       (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE                  (0x00000800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID                       (0x00001000)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS                       (0x00001800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY                       (0x00002000)
+
+#define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS                 (0x00000400)
+
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD       (0x00000300)
+#define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD   (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD       (0x00000100)
+#define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD    (0x00000200)
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD    (0x00000300)
+
 #define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID                  (0x000000F0)
 #define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID                   (0x00000000)
 
@@ -2420,13 +2452,13 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0
     U8                                  PortGroups;             /* 0x2C */
     U8                                  DmaGroup;               /* 0x2D */
     U8                                  ControlGroup;           /* 0x2E */
-    U8                                  Reserved1;              /* 0x2F */
-    U32                                 Reserved2;              /* 0x30 */
+       U8                               EnclosureLevel;         /* 0x2F */
+       U8                               ConnectorName[4];       /* 0x30 */
     U32                                 Reserved3;              /* 0x34 */
 } MPI2_CONFIG_PAGE_SAS_DEV_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_0,
   Mpi2SasDevicePage0_t, MPI2_POINTER pMpi2SasDevicePage0_t;
 
-#define MPI2_SASDEVICE0_PAGEVERSION         (0x08)
+#define MPI2_SASDEVICE0_PAGEVERSION         (0x09)
 
 /* values for SAS Device Page 0 AccessStatus field */
 #define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS                  (0x00)
@@ -2464,6 +2496,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0
 #define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED           (0x0020)
 #define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED           (0x0010)
 #define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH         (0x0008)
+#define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID             (0x0002)
 #define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT               (0x0001)
 
 
@@ -2732,7 +2765,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0
     U16                                 EnclosureHandle;            /* 0x16 */
     U16                                 NumSlots;                   /* 0x18 */
     U16                                 StartSlot;                  /* 0x1A */
-    U16                                 Reserved2;                  /* 0x1C */
+       U8                               Reserved2;                  /* 0x1C */
+       U8                               EnclosureLevel;             /* 0x1D */
     U16                                 SEPDevHandle;               /* 0x1E */
     U32                                 Reserved3;                  /* 0x20 */
     U32                                 Reserved4;                  /* 0x24 */
@@ -2740,9 +2774,10 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0
   MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
   Mpi2SasEnclosurePage0_t, MPI2_POINTER pMpi2SasEnclosurePage0_t;
 
-#define MPI2_SASENCLOSURE0_PAGEVERSION      (0x03)
+#define MPI2_SASENCLOSURE0_PAGEVERSION      (0x04)
 
 /* values for SAS Enclosure Page 0 Flags field */
+#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID      (0x0010)
 #define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK              (0x000F)
 #define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN           (0x0000)
 #define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES           (0x0001)
index 2c3b0f2..b02de48 100644 (file)
@@ -6,7 +6,7 @@
  *          Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  *  Creation Date:  October 11, 2006
  *
- *  mpi2_ioc.h Version:  02.00.23
+ *  mpi2_ioc.h Version:  02.00.24
  *
  *  Version History
  *  ---------------
  *                      Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
  *                      Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
  *                      Added Encrypted Hash Extended Image.
+ *  12-05-13  02.00.24  Added MPI25_HASH_IMAGE_TYPE_BIOS.
  *  --------------------------------------------------------------------------
  */
 
@@ -1589,6 +1590,7 @@ Mpi25EncryptedHashEntry_t, MPI2_POINTER pMpi25EncryptedHashEntry_t;
 /* values for HashImageType */
 #define MPI25_HASH_IMAGE_TYPE_UNUSED            (0x00)
 #define MPI25_HASH_IMAGE_TYPE_FIRMWARE          (0x01)
+#define MPI25_HASH_IMAGE_TYPE_BIOS              (0x02)
 
 /* values for HashAlgorithm */
 #define MPI25_HASH_ALGORITHM_UNUSED             (0x00)
index 9be03ed..659b8ac 100644 (file)
@@ -6,7 +6,7 @@
  *          Title:  MPI diagnostic tool structures and definitions
  *  Creation Date:  March 26, 2007
  *
- *    mpi2_tool.h Version:  02.00.11
+ *    mpi2_tool.h Version:  02.00.12
  *
  *  Version History
  *  ---------------
@@ -29,7 +29,8 @@
  *                      MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
  *  07-26-12  02.00.10  Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
  *                     it uses MPI Chain SGE as well as MPI Simple SGE.
- * 08-19-13  02.00.11  Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
+ *  08-19-13  02.00.11  Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
+ *  01-08-14  02.00.12  Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC.
  *  --------------------------------------------------------------------------
  */
 
@@ -101,6 +102,7 @@ typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST
 #define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES      (0x20000000)
 #define MPI2_TOOLBOX_CLEAN_FW_CURRENT               (0x10000000)
 #define MPI2_TOOLBOX_CLEAN_FW_BACKUP                (0x08000000)
+#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC   (0x04000000)
 #define MPI2_TOOLBOX_CLEAN_MEGARAID                 (0x02000000)
 #define MPI2_TOOLBOX_CLEAN_INITIALIZATION           (0x01000000)
 #define MPI2_TOOLBOX_CLEAN_FLASH                    (0x00000004)
index 58e4521..11248de 100644 (file)
@@ -4,7 +4,8 @@
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
  * Copyright (C) 2007-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -639,6 +640,9 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
                if (!ioc->hide_ir_msg)
                        desc = "Log Entry Added";
                break;
+       case MPI2_EVENT_TEMP_THRESHOLD:
+               desc = "Temperature Threshold";
+               break;
        }
 
        if (!desc)
@@ -1296,6 +1300,8 @@ _base_free_irq(struct MPT2SAS_ADAPTER *ioc)
 
        list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
                list_del(&reply_q->list);
+               irq_set_affinity_hint(reply_q->vector, NULL);
+               free_cpumask_var(reply_q->affinity_hint);
                synchronize_irq(reply_q->vector);
                free_irq(reply_q->vector, reply_q);
                kfree(reply_q);
@@ -1325,6 +1331,11 @@ _base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector)
        reply_q->ioc = ioc;
        reply_q->msix_index = index;
        reply_q->vector = vector;
+
+       if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL))
+               return -ENOMEM;
+       cpumask_clear(reply_q->affinity_hint);
+
        atomic_set(&reply_q->busy, 0);
        if (ioc->msix_enable)
                snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
@@ -1359,6 +1370,7 @@ static void
 _base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
 {
        unsigned int cpu, nr_cpus, nr_msix, index = 0;
+       struct adapter_reply_queue *reply_q;
 
        if (!_base_is_controller_msix_enabled(ioc))
                return;
@@ -1373,20 +1385,30 @@ _base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
 
        cpu = cpumask_first(cpu_online_mask);
 
-       do {
+       list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+
                unsigned int i, group = nr_cpus / nr_msix;
 
+               if (cpu >= nr_cpus)
+                       break;
+
                if (index < nr_cpus % nr_msix)
                        group++;
 
                for (i = 0 ; i < group ; i++) {
                        ioc->cpu_msix_table[cpu] = index;
+                       cpumask_or(reply_q->affinity_hint,
+                                  reply_q->affinity_hint, get_cpu_mask(cpu));
                        cpu = cpumask_next(cpu, cpu_online_mask);
                }
 
+               if (irq_set_affinity_hint(reply_q->vector,
+                                          reply_q->affinity_hint))
+                       dinitprintk(ioc, pr_info(MPT2SAS_FMT
+                           "error setting affinity hint for irq vector %d\n",
+                           ioc->name, reply_q->vector));
                index++;
-
-       } while (cpu < nr_cpus);
+       }
 }
 
 /**
@@ -2338,6 +2360,7 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
        mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
        mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
        mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+       mpt2sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
        _base_display_ioc_capabilities(ioc);
 
        /*
@@ -2355,6 +2378,8 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
        ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
        mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
 
+       if (ioc->iounit_pg8.NumSensors)
+               ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
 }
 
 /**
@@ -2486,9 +2511,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
 
        /* command line tunables  for max sgl entries */
        if (max_sgl_entries != -1) {
-               ioc->shost->sg_tablesize = (max_sgl_entries <
-                   MPT2SAS_SG_DEPTH) ? max_sgl_entries :
-                   MPT2SAS_SG_DEPTH;
+               ioc->shost->sg_tablesize =  min_t(unsigned short,
+                            max_sgl_entries, SCSI_MAX_SG_CHAIN_SEGMENTS);
+               if (ioc->shost->sg_tablesize > MPT2SAS_SG_DEPTH)
+                       printk(MPT2SAS_WARN_FMT
+                        "sg_tablesize(%u) is bigger than kernel defined"
+                        " SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name,
+                         ioc->shost->sg_tablesize, MPT2SAS_SG_DEPTH);
        } else {
                ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH;
        }
@@ -3236,7 +3265,7 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
        u16 smid;
        u32 ioc_state;
        unsigned long timeleft;
-       u8 issue_reset;
+       bool issue_reset = false;
        int rc;
        void *request;
        u16 wait_state_count;
@@ -3300,7 +3329,7 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
                _debug_dump_mf(mpi_request,
                    sizeof(Mpi2SasIoUnitControlRequest_t)/4);
                if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
-                       issue_reset = 1;
+                       issue_reset = true;
                goto issue_host_reset;
        }
        if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
@@ -3341,7 +3370,7 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
        u16 smid;
        u32 ioc_state;
        unsigned long timeleft;
-       u8 issue_reset;
+       bool issue_reset = false;
        int rc;
        void *request;
        u16 wait_state_count;
@@ -3398,7 +3427,7 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
                _debug_dump_mf(mpi_request,
                    sizeof(Mpi2SepRequest_t)/4);
                if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
-                       issue_reset = 1;
+                       issue_reset = true;
                goto issue_host_reset;
        }
        if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
@@ -4594,6 +4623,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
        _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
        _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
        _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
+       _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
        r = _base_make_ioc_operational(ioc, CAN_SLEEP);
        if (r)
                goto out_free_resources;
index 239f169..caff8d1 100644 (file)
@@ -4,7 +4,8 @@
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_base.h
  * Copyright (C) 2007-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
 
 /* driver versioning info */
 #define MPT2SAS_DRIVER_NAME            "mpt2sas"
-#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
+#define MPT2SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
 #define MPT2SAS_DESCRIPTION    "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION         "18.100.00.00"
-#define MPT2SAS_MAJOR_VERSION          18
+#define MPT2SAS_DRIVER_VERSION         "20.100.00.00"
+#define MPT2SAS_MAJOR_VERSION          20
 #define MPT2SAS_MINOR_VERSION          100
 #define MPT2SAS_BUILD_VERSION          00
 #define MPT2SAS_RELEASE_VERSION                00
@@ -586,6 +587,7 @@ struct adapter_reply_queue {
        Mpi2ReplyDescriptorsUnion_t *reply_post_free;
        char                    name[MPT_NAME_LENGTH];
        atomic_t                busy;
+       cpumask_var_t           affinity_hint;
        struct list_head        list;
 };
 
@@ -725,6 +727,7 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
  * @ioc_pg8: static ioc page 8
  * @iounit_pg0: static iounit page 0
  * @iounit_pg1: static iounit page 1
+ * @iounit_pg8: static iounit page 8
  * @sas_hba: sas host object
  * @sas_expander_list: expander object list
  * @sas_node_lock:
@@ -795,6 +798,7 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
  * @reply_post_host_index: head index in the pool where FW completes IO
  * @delayed_tr_list: target reset link list
  * @delayed_tr_volume_list: volume target reset link list
+ * @@temp_sensors_count: flag to carry the number of temperature sensors
  */
 struct MPT2SAS_ADAPTER {
        struct list_head list;
@@ -892,6 +896,7 @@ struct MPT2SAS_ADAPTER {
        Mpi2IOCPage8_t ioc_pg8;
        Mpi2IOUnitPage0_t iounit_pg0;
        Mpi2IOUnitPage1_t iounit_pg1;
+       Mpi2IOUnitPage8_t iounit_pg8;
 
        struct _boot_device req_boot_device;
        struct _boot_device req_alt_boot_device;
@@ -992,6 +997,7 @@ struct MPT2SAS_ADAPTER {
 
        struct list_head delayed_tr_list;
        struct list_head delayed_tr_volume_list;
+       u8              temp_sensors_count;
 
        /* diag buffer support */
        u8              *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT];
@@ -1120,6 +1126,8 @@ int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
     *mpi_reply, Mpi2IOUnitPage1_t *config_page);
 int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
     *mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt2sas_config_get_iounit_pg8(struct MPT2SAS_ADAPTER *ioc,
+       Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page);
 int mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc,
        Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz);
 int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
index c72a2ff..c43815b 100644 (file)
@@ -3,7 +3,8 @@
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
  * Copyright (C) 2007-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -718,6 +719,42 @@ mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc,
        return r;
 }
 
+/**
+ * mpt2sas_config_get_iounit_pg8 - obtain iounit page 8
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_iounit_pg8(struct MPT2SAS_ADAPTER *ioc,
+       Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page)
+{
+       Mpi2ConfigRequest_t mpi_request;
+       int r;
+
+       memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+       mpi_request.Function = MPI2_FUNCTION_CONFIG;
+       mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+       mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+       mpi_request.Header.PageNumber = 8;
+       mpi_request.Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION;
+       mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+       r = _config_request(ioc, &mpi_request, mpi_reply,
+           MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+       if (r)
+               goto out;
+
+       mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+       r = _config_request(ioc, &mpi_request, mpi_reply,
+           MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+           sizeof(*config_page));
+ out:
+       return r;
+}
+
 /**
  * mpt2sas_config_get_ioc_pg8 - obtain ioc page 8
  * @ioc: per adapter object
index ca4e563..4e50960 100644 (file)
@@ -4,7 +4,8 @@
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
  * Copyright (C) 2007-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 7f842c8..46b2fc5 100644 (file)
@@ -4,7 +4,8 @@
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
  * Copyright (C) 2007-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index cc57ef3..277120d 100644 (file)
@@ -3,7 +3,8 @@
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
  * Copyright (C) 2007-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 6a1c036..3f26147 100644 (file)
@@ -3,7 +3,8 @@
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
  * Copyright (C) 2007-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -2729,9 +2730,18 @@ _scsih_host_reset(struct scsi_cmnd *scmd)
            ioc->name, scmd);
        scsi_print_command(scmd);
 
+       if (ioc->is_driver_loading) {
+               printk(MPT2SAS_INFO_FMT "Blocking the host reset\n",
+                                                         ioc->name);
+               r = FAILED;
+               goto out;
+       }
+
        retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
            FORCE_BIG_HAMMER);
        r = (retval < 0) ? FAILED : SUCCESS;
+
+ out:
        printk(MPT2SAS_INFO_FMT "host reset: %s scmd(%p)\n",
            ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
 
@@ -3646,6 +3656,31 @@ _scsih_check_volume_delete_events(struct MPT2SAS_ADAPTER *ioc,
                    le16_to_cpu(event_data->VolDevHandle));
 }
 
+/**
+ * _scsih_temp_threshold_events - display temperature threshold exceeded events
+ * @ioc: per adapter object
+ * @event_data: the temp threshold event data
+ * Context: interrupt time.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_temp_threshold_events(struct MPT2SAS_ADAPTER *ioc,
+       Mpi2EventDataTemperature_t *event_data)
+{
+       if (ioc->temp_sensors_count >= event_data->SensorNum) {
+               printk(MPT2SAS_ERR_FMT "Temperature Threshold flags %s%s%s%s"
+                 " exceeded for Sensor: %d !!!\n", ioc->name,
+                 ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ",
+                 ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ",
+                 ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ",
+                 ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ",
+                 event_data->SensorNum);
+               printk(MPT2SAS_ERR_FMT "Current Temp In Celsius: %d\n",
+                       ioc->name, event_data->CurrentTemperature);
+       }
+}
+
 /**
  * _scsih_flush_running_cmds - completing outstanding commands.
  * @ioc: per adapter object
@@ -4509,6 +4544,10 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
                        scmd->result = DID_TRANSPORT_DISRUPTED << 16;
                        goto out;
                }
+               if (log_info == 0x32010081) {
+                       scmd->result = DID_RESET << 16;
+                       break;
+               }
                scmd->result = DID_SOFT_ERROR << 16;
                break;
        case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
@@ -7557,6 +7596,12 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
        case MPI2_EVENT_IR_PHYSICAL_DISK:
                break;
 
+       case MPI2_EVENT_TEMP_THRESHOLD:
+               _scsih_temp_threshold_events(ioc,
+                       (Mpi2EventDataTemperature_t *)
+                       mpi_reply->EventData);
+               break;
+
        default: /* ignore the rest */
                return;
        }
index e689bf2..ff2500a 100644 (file)
@@ -3,7 +3,8 @@
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
  * Copyright (C) 2007-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 20013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 1560115..14a781b 100644 (file)
@@ -4,7 +4,8 @@
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
  * Copyright (C) 2012-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -619,6 +620,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
        case MPI2_EVENT_LOG_ENTRY_ADDED:
                desc = "Log Entry Added";
                break;
+       case MPI2_EVENT_TEMP_THRESHOLD:
+               desc = "Temperature Threshold";
+               break;
        }
 
        if (!desc)
@@ -1580,6 +1584,8 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
 
        list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
                list_del(&reply_q->list);
+               irq_set_affinity_hint(reply_q->vector, NULL);
+               free_cpumask_var(reply_q->affinity_hint);
                synchronize_irq(reply_q->vector);
                free_irq(reply_q->vector, reply_q);
                kfree(reply_q);
@@ -1609,6 +1615,11 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
        reply_q->ioc = ioc;
        reply_q->msix_index = index;
        reply_q->vector = vector;
+
+       if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL))
+               return -ENOMEM;
+       cpumask_clear(reply_q->affinity_hint);
+
        atomic_set(&reply_q->busy, 0);
        if (ioc->msix_enable)
                snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
@@ -1643,6 +1654,7 @@ static void
 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
 {
        unsigned int cpu, nr_cpus, nr_msix, index = 0;
+       struct adapter_reply_queue *reply_q;
 
        if (!_base_is_controller_msix_enabled(ioc))
                return;
@@ -1657,20 +1669,30 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
 
        cpu = cpumask_first(cpu_online_mask);
 
-       do {
+       list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+
                unsigned int i, group = nr_cpus / nr_msix;
 
+               if (cpu >= nr_cpus)
+                       break;
+
                if (index < nr_cpus % nr_msix)
                        group++;
 
                for (i = 0 ; i < group ; i++) {
                        ioc->cpu_msix_table[cpu] = index;
+                       cpumask_or(reply_q->affinity_hint,
+                                  reply_q->affinity_hint, get_cpu_mask(cpu));
                        cpu = cpumask_next(cpu, cpu_online_mask);
                }
 
+               if (irq_set_affinity_hint(reply_q->vector,
+                                          reply_q->affinity_hint))
+                       dinitprintk(ioc, pr_info(MPT3SAS_FMT
+                           "error setting affinity hint for irq vector %d\n",
+                           ioc->name, reply_q->vector));
                index++;
-
-       } while (cpu < nr_cpus);
+       }
 }
 
 /**
@@ -2500,6 +2522,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
        mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
        mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
        mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+       mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
        _base_display_ioc_capabilities(ioc);
 
        /*
@@ -2516,6 +2539,9 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
                    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
        ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
        mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+
+       if (ioc->iounit_pg8.NumSensors)
+               ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
 }
 
 /**
@@ -2659,8 +2685,14 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc,  int sleep_flag)
 
        if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS)
                sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS;
-       else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS)
-               sg_tablesize = MPT3SAS_MAX_PHYS_SEGMENTS;
+       else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS) {
+               sg_tablesize = min_t(unsigned short, sg_tablesize,
+                                     SCSI_MAX_SG_CHAIN_SEGMENTS);
+               pr_warn(MPT3SAS_FMT
+                "sg_tablesize(%u) is bigger than kernel"
+                " defined SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name,
+                sg_tablesize, MPT3SAS_MAX_PHYS_SEGMENTS);
+       }
        ioc->shost->sg_tablesize = sg_tablesize;
 
        ioc->hi_priority_depth = facts->HighPriorityCredit;
@@ -3419,7 +3451,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
        u16 smid;
        u32 ioc_state;
        unsigned long timeleft;
-       u8 issue_reset;
+       bool issue_reset = false;
        int rc;
        void *request;
        u16 wait_state_count;
@@ -3483,7 +3515,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
                _debug_dump_mf(mpi_request,
                    sizeof(Mpi2SasIoUnitControlRequest_t)/4);
                if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
-                       issue_reset = 1;
+                       issue_reset = true;
                goto issue_host_reset;
        }
        if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -3523,7 +3555,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
        u16 smid;
        u32 ioc_state;
        unsigned long timeleft;
-       u8 issue_reset;
+       bool issue_reset = false;
        int rc;
        void *request;
        u16 wait_state_count;
@@ -3581,7 +3613,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
                _debug_dump_mf(mpi_request,
                    sizeof(Mpi2SepRequest_t)/4);
                if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
-                       issue_reset = 1;
+                       issue_reset = false;
                goto issue_host_reset;
        }
        if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -4720,6 +4752,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
        _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
        _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
        _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
+       _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
 
        r = _base_make_ioc_operational(ioc, CAN_SLEEP);
        if (r)
index 40926aa..afa8816 100644 (file)
@@ -4,7 +4,8 @@
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
  * Copyright (C) 2012-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -68,7 +69,7 @@
 
 /* driver versioning info */
 #define MPT3SAS_DRIVER_NAME            "mpt3sas"
-#define MPT3SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
+#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
 #define MPT3SAS_DESCRIPTION    "LSI MPT Fusion SAS 3.0 Device Driver"
 #define MPT3SAS_DRIVER_VERSION         "04.100.00.00"
 #define MPT3SAS_MAJOR_VERSION          4
@@ -506,6 +507,7 @@ struct adapter_reply_queue {
        Mpi2ReplyDescriptorsUnion_t *reply_post_free;
        char                    name[MPT_NAME_LENGTH];
        atomic_t                busy;
+       cpumask_var_t           affinity_hint;
        struct list_head        list;
 };
 
@@ -659,6 +661,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
  * @ioc_pg8: static ioc page 8
  * @iounit_pg0: static iounit page 0
  * @iounit_pg1: static iounit page 1
+ * @iounit_pg8: static iounit page 8
  * @sas_hba: sas host object
  * @sas_expander_list: expander object list
  * @sas_node_lock:
@@ -728,6 +731,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
  * @reply_post_host_index: head index in the pool where FW completes IO
  * @delayed_tr_list: target reset link list
  * @delayed_tr_volume_list: volume target reset link list
+ * @@temp_sensors_count: flag to carry the number of temperature sensors
  */
 struct MPT3SAS_ADAPTER {
        struct list_head list;
@@ -834,6 +838,7 @@ struct MPT3SAS_ADAPTER {
        Mpi2IOCPage8_t ioc_pg8;
        Mpi2IOUnitPage0_t iounit_pg0;
        Mpi2IOUnitPage1_t iounit_pg1;
+       Mpi2IOUnitPage8_t iounit_pg8;
 
        struct _boot_device req_boot_device;
        struct _boot_device req_alt_boot_device;
@@ -934,6 +939,7 @@ struct MPT3SAS_ADAPTER {
 
        struct list_head delayed_tr_list;
        struct list_head delayed_tr_volume_list;
+       u8              temp_sensors_count;
 
        /* diag buffer support */
        u8              *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT];
@@ -1082,6 +1088,8 @@ int mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
        *mpi_reply, Mpi2IOUnitPage1_t *config_page);
 int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
        *mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+       *mpi_reply, Mpi2IOUnitPage8_t *config_page);
 int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
        Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
        u16 sz);
index 4472c2a..e45c461 100644 (file)
@@ -3,7 +3,8 @@
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
  * Copyright (C) 2012-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -870,6 +871,42 @@ mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
        return r;
 }
 
+/**
+ * mpt3sas_config_get_iounit_pg8 - obtain iounit page 8
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc,
+       Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page)
+{
+       Mpi2ConfigRequest_t mpi_request;
+       int r;
+
+       memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+       mpi_request.Function = MPI2_FUNCTION_CONFIG;
+       mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+       mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+       mpi_request.Header.PageNumber = 8;
+       mpi_request.Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION;
+       ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+       r = _config_request(ioc, &mpi_request, mpi_reply,
+           MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+       if (r)
+               goto out;
+
+       mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+       r = _config_request(ioc, &mpi_request, mpi_reply,
+           MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+           sizeof(*config_page));
+ out:
+       return r;
+}
+
 /**
  * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8
  * @ioc: per adapter object
index dca1487..080c8a7 100644 (file)
@@ -4,7 +4,8 @@
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
  * Copyright (C) 2012-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 5f3d7fd..aee99ce 100644 (file)
@@ -4,7 +4,8 @@
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h
  * Copyright (C) 2012-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 4778e7d..4e8a63f 100644 (file)
@@ -3,7 +3,8 @@
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c
  * Copyright (C) 2012-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 94261ee..5a97e32 100644 (file)
@@ -3,7 +3,8 @@
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
  * Copyright (C) 2012-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -2392,9 +2393,17 @@ _scsih_host_reset(struct scsi_cmnd *scmd)
            ioc->name, scmd);
        scsi_print_command(scmd);
 
+       if (ioc->is_driver_loading) {
+               pr_info(MPT3SAS_FMT "Blocking the host reset\n",
+                   ioc->name);
+               r = FAILED;
+               goto out;
+       }
+
        retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
            FORCE_BIG_HAMMER);
        r = (retval < 0) ? FAILED : SUCCESS;
+out:
        pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
            ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
 
@@ -3341,6 +3350,31 @@ _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
                    le16_to_cpu(event_data->VolDevHandle));
 }
 
+/**
+ * _scsih_temp_threshold_events - display temperature threshold exceeded events
+ * @ioc: per adapter object
+ * @event_data: the temp threshold event data
+ * Context: interrupt time.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
+       Mpi2EventDataTemperature_t *event_data)
+{
+       if (ioc->temp_sensors_count >= event_data->SensorNum) {
+               pr_err(MPT3SAS_FMT "Temperature Threshold flags %s%s%s%s"
+                 " exceeded for Sensor: %d !!!\n", ioc->name,
+                 ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ",
+                 ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ",
+                 ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ",
+                 ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ",
+                 event_data->SensorNum);
+               pr_err(MPT3SAS_FMT "Current Temp In Celsius: %d\n",
+                       ioc->name, event_data->CurrentTemperature);
+       }
+}
+
 /**
  * _scsih_flush_running_cmds - completing outstanding commands.
  * @ioc: per adapter object
@@ -7194,6 +7228,12 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
        case MPI2_EVENT_IR_PHYSICAL_DISK:
                break;
 
+       case MPI2_EVENT_TEMP_THRESHOLD:
+               _scsih_temp_threshold_events(ioc,
+                       (Mpi2EventDataTemperature_t *)
+                       mpi_reply->EventData);
+               break;
+
        default: /* ignore the rest */
                return 1;
        }
index 3637ae6..efb98af 100644 (file)
@@ -3,7 +3,8 @@
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c
  * Copyright (C) 2012-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 8a2dd11..b60fd7a 100644 (file)
@@ -4,7 +4,8 @@
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
  * Copyright (C) 2012-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index f681db5..6586a46 100644 (file)
@@ -5,7 +5,8 @@
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
  * Copyright (C) 2012-2014  LSI Corporation
- *  (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2013-2014 Avago Technologies
+ *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 90abb03..c6077ce 100644 (file)
@@ -1441,8 +1441,6 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
        return IRQ_RETVAL(handled);
 }
 
-#undef SPRINTF
-#define SPRINTF(args...) seq_printf(m, ##args)
 
 static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host)
 {
@@ -1458,64 +1456,63 @@ static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host)
        data = (nsp32_hw_data *)host->hostdata;
        base = host->io_port;
 
-       SPRINTF("NinjaSCSI-32 status\n\n");
-       SPRINTF("Driver version:        %s, $Revision: 1.33 $\n", nsp32_release_version);
-       SPRINTF("SCSI host No.:         %d\n",          hostno);
-       SPRINTF("IRQ:                   %d\n",          host->irq);
-       SPRINTF("IO:                    0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
-       SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
-       SPRINTF("sg_tablesize:          %d\n",          host->sg_tablesize);
-       SPRINTF("Chip revision:         0x%x\n",        (nsp32_read2(base, INDEX_REG) >> 8) & 0xff);
+       seq_puts(m, "NinjaSCSI-32 status\n\n");
+       seq_printf(m, "Driver version:        %s, $Revision: 1.33 $\n", nsp32_release_version);
+       seq_printf(m, "SCSI host No.:         %d\n",            hostno);
+       seq_printf(m, "IRQ:                   %d\n",            host->irq);
+       seq_printf(m, "IO:                    0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
+       seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n",   host->base, host->base + data->MmioLength - 1);
+       seq_printf(m, "sg_tablesize:          %d\n",            host->sg_tablesize);
+       seq_printf(m, "Chip revision:         0x%x\n",          (nsp32_read2(base, INDEX_REG) >> 8) & 0xff);
 
        mode_reg = nsp32_index_read1(base, CHIP_MODE);
        model    = data->pci_devid->driver_data;
 
 #ifdef CONFIG_PM
-       SPRINTF("Power Management:      %s\n",          (mode_reg & OPTF) ? "yes" : "no");
+       seq_printf(m, "Power Management:      %s\n",          (mode_reg & OPTF) ? "yes" : "no");
 #endif
-       SPRINTF("OEM:                   %ld, %s\n",     (mode_reg & (OEM0|OEM1)), nsp32_model[model]);
+       seq_printf(m, "OEM:                   %ld, %s\n",     (mode_reg & (OEM0|OEM1)), nsp32_model[model]);
 
        spin_lock_irqsave(&(data->Lock), flags);
-       SPRINTF("CurrentSC:             0x%p\n\n",      data->CurrentSC);
+       seq_printf(m, "CurrentSC:             0x%p\n\n",      data->CurrentSC);
        spin_unlock_irqrestore(&(data->Lock), flags);
 
 
-       SPRINTF("SDTR status\n");
+       seq_puts(m, "SDTR status\n");
        for (id = 0; id < ARRAY_SIZE(data->target); id++) {
 
-                SPRINTF("id %d: ", id);
+               seq_printf(m, "id %d: ", id);
 
                if (id == host->this_id) {
-                       SPRINTF("----- NinjaSCSI-32 host adapter\n");
+                       seq_puts(m, "----- NinjaSCSI-32 host adapter\n");
                        continue;
                }
 
                if (data->target[id].sync_flag == SDTR_DONE) {
                        if (data->target[id].period == 0            &&
                            data->target[id].offset == ASYNC_OFFSET ) {
-                               SPRINTF("async");
+                               seq_puts(m, "async");
                        } else {
-                               SPRINTF(" sync");
+                               seq_puts(m, " sync");
                        }
                } else {
-                       SPRINTF(" none");
+                       seq_puts(m, " none");
                }
 
                if (data->target[id].period != 0) {
 
                        speed = 1000000 / (data->target[id].period * 4);
 
-                       SPRINTF(" transfer %d.%dMB/s, offset %d",
+                       seq_printf(m, " transfer %d.%dMB/s, offset %d",
                                speed / 1000,
                                speed % 1000,
                                data->target[id].offset
                                );
                }
-               SPRINTF("\n");
+               seq_putc(m, '\n');
        }
        return 0;
 }
-#undef SPRINTF
 
 
 
index 34aad32..1b6c883 100644 (file)
@@ -1364,9 +1364,6 @@ static const char *nsp_info(struct Scsi_Host *shpnt)
        return data->nspinfo;
 }
 
-#undef SPRINTF
-#define SPRINTF(args...) seq_printf(m, ##args)
-
 static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host)
 {
        int id;
@@ -1378,75 +1375,74 @@ static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host)
        hostno = host->host_no;
        data = (nsp_hw_data *)host->hostdata;
 
-       SPRINTF("NinjaSCSI status\n\n");
-       SPRINTF("Driver version:        $Revision: 1.23 $\n");
-       SPRINTF("SCSI host No.:         %d\n",          hostno);
-       SPRINTF("IRQ:                   %d\n",          host->irq);
-       SPRINTF("IO:                    0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
-       SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
-       SPRINTF("sg_tablesize:          %d\n",          host->sg_tablesize);
+       seq_puts(m, "NinjaSCSI status\n\n"
+               "Driver version:        $Revision: 1.23 $\n");
+       seq_printf(m, "SCSI host No.:         %d\n",          hostno);
+       seq_printf(m, "IRQ:                   %d\n",          host->irq);
+       seq_printf(m, "IO:                    0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
+       seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
+       seq_printf(m, "sg_tablesize:          %d\n",          host->sg_tablesize);
 
-       SPRINTF("burst transfer mode:   ");
+       seq_puts(m, "burst transfer mode:   ");
        switch (nsp_burst_mode) {
        case BURST_IO8:
-               SPRINTF("io8");
+               seq_puts(m, "io8");
                break;
        case BURST_IO32:
-               SPRINTF("io32");
+               seq_puts(m, "io32");
                break;
        case BURST_MEM32:
-               SPRINTF("mem32");
+               seq_puts(m, "mem32");
                break;
        default:
-               SPRINTF("???");
+               seq_puts(m, "???");
                break;
        }
-       SPRINTF("\n");
+       seq_putc(m, '\n');
 
 
        spin_lock_irqsave(&(data->Lock), flags);
-       SPRINTF("CurrentSC:             0x%p\n\n",      data->CurrentSC);
+       seq_printf(m, "CurrentSC:             0x%p\n\n",      data->CurrentSC);
        spin_unlock_irqrestore(&(data->Lock), flags);
 
-       SPRINTF("SDTR status\n");
+       seq_puts(m, "SDTR status\n");
        for(id = 0; id < ARRAY_SIZE(data->Sync); id++) {
 
-               SPRINTF("id %d: ", id);
+               seq_printf(m, "id %d: ", id);
 
                if (id == host->this_id) {
-                       SPRINTF("----- NinjaSCSI-3 host adapter\n");
+                       seq_puts(m, "----- NinjaSCSI-3 host adapter\n");
                        continue;
                }
 
                switch(data->Sync[id].SyncNegotiation) {
                case SYNC_OK:
-                       SPRINTF(" sync");
+                       seq_puts(m, " sync");
                        break;
                case SYNC_NG:
-                       SPRINTF("async");
+                       seq_puts(m, "async");
                        break;
                case SYNC_NOT_YET:
-                       SPRINTF(" none");
+                       seq_puts(m, " none");
                        break;
                default:
-                       SPRINTF("?????");
+                       seq_puts(m, "?????");
                        break;
                }
 
                if (data->Sync[id].SyncPeriod != 0) {
                        speed = 1000000 / (data->Sync[id].SyncPeriod * 4);
 
-                       SPRINTF(" transfer %d.%dMB/s, offset %d",
+                       seq_printf(m, " transfer %d.%dMB/s, offset %d",
                                speed / 1000,
                                speed % 1000,
                                data->Sync[id].SyncOffset
                                );
                }
-               SPRINTF("\n");
+               seq_putc(m, '\n');
        }
        return 0;
 }
-#undef SPRINTF
 
 /*---------------------------------------------------------------*/
 /* error handler                                                 */
index 2ca39b8..15cf074 100644 (file)
@@ -23,10 +23,10 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
 
        mutex_lock(&ha->fce_mutex);
 
-       seq_printf(s, "FCE Trace Buffer\n");
+       seq_puts(s, "FCE Trace Buffer\n");
        seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
        seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
-       seq_printf(s, "FCE Enable Registers\n");
+       seq_puts(s, "FCE Enable Registers\n");
        seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
            ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
            ha->fce_mb[5], ha->fce_mb[6]);
@@ -38,11 +38,11 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
                        seq_printf(s, "\n%llx: ",
                            (unsigned long long)((cnt * 4) + fce_start));
                else
-                       seq_printf(s, " ");
+                       seq_putc(s, ' ');
                seq_printf(s, "%08x", *fce++);
        }
 
-       seq_printf(s, "\nEnd\n");
+       seq_puts(s, "\nEnd\n");
 
        mutex_unlock(&ha->fce_mutex);
 
index 9b38299..c9c3b57 100644 (file)
@@ -531,7 +531,7 @@ void scsi_log_send(struct scsi_cmnd *cmd)
         *
         * 3: same as 2
         *
-        * 4: same as 3 plus dump extra junk
+        * 4: same as 3
         */
        if (unlikely(scsi_logging_level)) {
                level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
@@ -540,13 +540,6 @@ void scsi_log_send(struct scsi_cmnd *cmd)
                        scmd_printk(KERN_INFO, cmd,
                                    "Send: scmd 0x%p\n", cmd);
                        scsi_print_command(cmd);
-                       if (level > 3) {
-                               printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
-                                      " queuecommand 0x%p\n",
-                                       scsi_sglist(cmd), scsi_bufflen(cmd),
-                                       cmd->device->host->hostt->queuecommand);
-
-                       }
                }
        }
 }
@@ -572,7 +565,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
                                       SCSI_LOG_MLCOMPLETE_BITS);
                if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
                    (level > 1)) {
-                       scsi_print_result(cmd, "Done", disposition);
+                       scsi_print_result(cmd, "Done", disposition);
                        scsi_print_command(cmd);
                        if (status_byte(cmd->result) & CHECK_CONDITION)
                                scsi_print_sense(cmd);
index 4aca1b0..1132321 100644 (file)
@@ -80,6 +80,8 @@ static const char *scsi_debug_version_date = "20141022";
 #define INVALID_FIELD_IN_PARAM_LIST 0x26
 #define UA_RESET_ASC 0x29
 #define UA_CHANGED_ASC 0x2a
+#define TARGET_CHANGED_ASC 0x3f
+#define LUNS_CHANGED_ASCQ 0x0e
 #define INSUFF_RES_ASC 0x55
 #define INSUFF_RES_ASCQ 0x3
 #define POWER_ON_RESET_ASCQ 0x0
@@ -91,6 +93,8 @@ static const char *scsi_debug_version_date = "20141022";
 #define THRESHOLD_EXCEEDED 0x5d
 #define LOW_POWER_COND_ON 0x5e
 #define MISCOMPARE_VERIFY_ASC 0x1d
+#define MICROCODE_CHANGED_ASCQ 0x1     /* with TARGET_CHANGED_ASC */
+#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
 
 /* Additional Sense Code Qualifier (ASCQ) */
 #define ACK_NAK_TO 0x3
@@ -180,7 +184,10 @@ static const char *scsi_debug_version_date = "20141022";
 #define SDEBUG_UA_BUS_RESET 1
 #define SDEBUG_UA_MODE_CHANGED 2
 #define SDEBUG_UA_CAPACITY_CHANGED 3
-#define SDEBUG_NUM_UAS 4
+#define SDEBUG_UA_LUNS_CHANGED 4
+#define SDEBUG_UA_MICROCODE_CHANGED 5  /* simulate firmware change */
+#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
+#define SDEBUG_NUM_UAS 7
 
 /* for check_readiness() */
 #define UAS_ONLY 1     /* check for UAs only */
@@ -326,6 +333,7 @@ static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
 
 struct opcode_info_t {
        u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff
@@ -480,8 +488,9 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
        {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
            NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
                   0, 0, 0, 0, 0, 0} },
-       {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* WRITE_BUFFER */
-           {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+       {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
+           {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
+            0, 0, 0, 0} },                     /* WRITE_BUFFER */
        {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
            write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
                              0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
@@ -782,6 +791,22 @@ static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
        /* return -ENOTTY; // correct return but upsets fdisk */
 }
 
+static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
+{
+       struct sdebug_host_info *sdhp;
+       struct sdebug_dev_info *dp;
+
+       spin_lock(&sdebug_host_list_lock);
+       list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+               list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
+                       if ((devip->sdbg_host == dp->sdbg_host) &&
+                           (devip->target == dp->target))
+                               clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
+               }
+       }
+       spin_unlock(&sdebug_host_list_lock);
+}
+
 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
                           struct sdebug_dev_info * devip)
 {
@@ -817,6 +842,36 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
                        if (debug)
                                cp = "capacity data changed";
                        break;
+               case SDEBUG_UA_MICROCODE_CHANGED:
+                       mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+                                TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ);
+                       if (debug)
+                               cp = "microcode has been changed";
+                       break;
+               case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
+                       mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+                                       TARGET_CHANGED_ASC,
+                                       MICROCODE_CHANGED_WO_RESET_ASCQ);
+                       if (debug)
+                               cp = "microcode has been changed without reset";
+                       break;
+               case SDEBUG_UA_LUNS_CHANGED:
+                       /*
+                        * SPC-3 behavior is to report a UNIT ATTENTION with
+                        * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
+                        * on the target, until a REPORT LUNS command is
+                        * received.  SPC-4 behavior is to report it only once.
+                        * NOTE:  scsi_debug_scsi_level does not use the same
+                        * values as struct scsi_device->scsi_level.
+                        */
+                       if (scsi_debug_scsi_level >= 6) /* SPC-4 and above */
+                               clear_luns_changed_on_target(devip);
+                       mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+                                       TARGET_CHANGED_ASC,
+                                       LUNS_CHANGED_ASCQ);
+                       if (debug)
+                               cp = "reported luns data has changed";
+                       break;
                default:
                        pr_warn("%s: unexpected unit attention code=%d\n",
                                __func__, k);
@@ -3033,6 +3088,55 @@ resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
        return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
 }
 
+/* Note the mode field is in the same position as the (lower) service action
+ * field. For the Report supported operation codes command, SPC-4 suggests
+ * each mode of this command should be reported separately; for future. */
+static int
+resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+       u8 *cmd = scp->cmnd;
+       struct scsi_device *sdp = scp->device;
+       struct sdebug_dev_info *dp;
+       u8 mode;
+
+       mode = cmd[1] & 0x1f;
+       switch (mode) {
+       case 0x4:       /* download microcode (MC) and activate (ACT) */
+               /* set UAs on this device only */
+               set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+               set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
+               break;
+       case 0x5:       /* download MC, save and ACT */
+               set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
+               break;
+       case 0x6:       /* download MC with offsets and ACT */
+               /* set UAs on most devices (LUs) in this target */
+               list_for_each_entry(dp,
+                                   &devip->sdbg_host->dev_info_list,
+                                   dev_list)
+                       if (dp->target == sdp->id) {
+                               set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
+                               if (devip != dp)
+                                       set_bit(SDEBUG_UA_MICROCODE_CHANGED,
+                                               dp->uas_bm);
+                       }
+               break;
+       case 0x7:       /* download MC with offsets, save, and ACT */
+               /* set UA on all devices (LUs) in this target */
+               list_for_each_entry(dp,
+                                   &devip->sdbg_host->dev_info_list,
+                                   dev_list)
+                       if (dp->target == sdp->id)
+                               set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
+                                       dp->uas_bm);
+               break;
+       default:
+               /* do nothing for this command for other mode values */
+               break;
+       }
+       return 0;
+}
+
 static int
 resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 {
@@ -3229,6 +3333,7 @@ static int resp_report_luns(struct scsi_cmnd * scp,
        unsigned char arr[SDEBUG_RLUN_ARR_SZ];
        unsigned char * max_addr;
 
+       clear_luns_changed_on_target(devip);
        alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
        shortish = (alloc_len < 4);
        if (shortish || (select_report > 2)) {
@@ -4369,10 +4474,27 @@ static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
                              size_t count)
 {
         int n;
+       bool changed;
 
        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+               changed = (scsi_debug_max_luns != n);
                scsi_debug_max_luns = n;
                sdebug_max_tgts_luns();
+               if (changed && (scsi_debug_scsi_level >= 5)) {  /* >= SPC-3 */
+                       struct sdebug_host_info *sdhp;
+                       struct sdebug_dev_info *dp;
+
+                       spin_lock(&sdebug_host_list_lock);
+                       list_for_each_entry(sdhp, &sdebug_host_list,
+                                           host_list) {
+                               list_for_each_entry(dp, &sdhp->dev_info_list,
+                                                   dev_list) {
+                                       set_bit(SDEBUG_UA_LUNS_CHANGED,
+                                               dp->uas_bm);
+                               }
+                       }
+                       spin_unlock(&sdebug_host_list_lock);
+               }
                return count;
        }
        return -EINVAL;
index 8afb016..4cdaffc 100644 (file)
@@ -124,41 +124,37 @@ scmd_eh_abort_handler(struct work_struct *work)
        if (scsi_host_eh_past_deadline(sdev->host)) {
                SCSI_LOG_ERROR_RECOVERY(3,
                        scmd_printk(KERN_INFO, scmd,
-                                   "scmd %p eh timeout, not aborting\n",
-                                   scmd));
+                                   "eh timeout, not aborting\n"));
        } else {
                SCSI_LOG_ERROR_RECOVERY(3,
                        scmd_printk(KERN_INFO, scmd,
-                                   "aborting command %p\n", scmd));
+                                   "aborting command\n"));
                rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
                if (rtn == SUCCESS) {
                        set_host_byte(scmd, DID_TIME_OUT);
                        if (scsi_host_eh_past_deadline(sdev->host)) {
                                SCSI_LOG_ERROR_RECOVERY(3,
                                        scmd_printk(KERN_INFO, scmd,
-                                                   "scmd %p eh timeout, "
-                                                   "not retrying aborted "
-                                                   "command\n", scmd));
+                                                   "eh timeout, not retrying "
+                                                   "aborted command\n"));
                        } else if (!scsi_noretry_cmd(scmd) &&
                            (++scmd->retries <= scmd->allowed)) {
                                SCSI_LOG_ERROR_RECOVERY(3,
                                        scmd_printk(KERN_WARNING, scmd,
-                                                   "scmd %p retry "
-                                                   "aborted command\n", scmd));
+                                                   "retry aborted command\n"));
                                scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
                                return;
                        } else {
                                SCSI_LOG_ERROR_RECOVERY(3,
                                        scmd_printk(KERN_WARNING, scmd,
-                                                   "scmd %p finish "
-                                                   "aborted command\n", scmd));
+                                                   "finish aborted command\n"));
                                scsi_finish_command(scmd);
                                return;
                        }
                } else {
                        SCSI_LOG_ERROR_RECOVERY(3,
                                scmd_printk(KERN_INFO, scmd,
-                                           "scmd %p abort %s\n", scmd,
+                                           "cmd abort %s\n",
                                            (rtn == FAST_IO_FAIL) ?
                                            "not send" : "failed"));
                }
@@ -167,8 +163,7 @@ scmd_eh_abort_handler(struct work_struct *work)
        if (!scsi_eh_scmd_add(scmd, 0)) {
                SCSI_LOG_ERROR_RECOVERY(3,
                        scmd_printk(KERN_WARNING, scmd,
-                                   "scmd %p terminate "
-                                   "aborted command\n", scmd));
+                                   "terminate aborted command\n"));
                set_host_byte(scmd, DID_TIME_OUT);
                scsi_finish_command(scmd);
        }
@@ -194,7 +189,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
                scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED;
                SCSI_LOG_ERROR_RECOVERY(3,
                        scmd_printk(KERN_INFO, scmd,
-                                   "scmd %p previous abort failed\n", scmd));
+                                   "previous abort failed\n"));
                BUG_ON(delayed_work_pending(&scmd->abort_work));
                return FAILED;
        }
@@ -208,8 +203,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
                spin_unlock_irqrestore(shost->host_lock, flags);
                SCSI_LOG_ERROR_RECOVERY(3,
                        scmd_printk(KERN_INFO, scmd,
-                                   "scmd %p not aborting, host in recovery\n",
-                                   scmd));
+                                   "not aborting, host in recovery\n"));
                return FAILED;
        }
 
@@ -219,8 +213,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
 
        scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED;
        SCSI_LOG_ERROR_RECOVERY(3,
-               scmd_printk(KERN_INFO, scmd,
-                           "scmd %p abort scheduled\n", scmd));
+               scmd_printk(KERN_INFO, scmd, "abort scheduled\n"));
        queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100);
        return SUCCESS;
 }
@@ -737,8 +730,7 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
        struct completion *eh_action;
 
        SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
-                       "%s scmd: %p result: %x\n",
-                       __func__, scmd, scmd->result));
+                       "%s result: %x\n", __func__, scmd->result));
 
        eh_action = scmd->device->host->eh_action;
        if (eh_action)
@@ -868,6 +860,7 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
 
 /**
  * scsi_try_to_abort_cmd - Ask host to abort a SCSI command
+ * @hostt:     SCSI driver host template
  * @scmd:      SCSI cmd used to send a target reset
  *
  * Return value:
@@ -1052,8 +1045,8 @@ retry:
        scsi_log_completion(scmd, rtn);
 
        SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
-                       "%s: scmd: %p, timeleft: %ld\n",
-                       __func__, scmd, timeleft));
+                       "%s timeleft: %ld\n",
+                       __func__, timeleft));
 
        /*
         * If there is time left scsi_eh_done got called, and we will examine
@@ -1192,8 +1185,7 @@ int scsi_eh_get_sense(struct list_head *work_q,
                        continue;
 
                SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
-                       "sense requested for %p result %x\n",
-                       scmd, scmd->result));
+                       "sense requested, result %x\n", scmd->result));
                SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd));
 
                rtn = scsi_decide_disposition(scmd);
@@ -1235,7 +1227,7 @@ retry_tur:
                                scmd->device->eh_timeout, 0);
 
        SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
-               "%s: scmd %p rtn %x\n", __func__, scmd, rtn));
+               "%s return: %x\n", __func__, rtn));
 
        switch (rtn) {
        case NEEDS_RETRY:
@@ -2092,8 +2084,8 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
                    (++scmd->retries <= scmd->allowed)) {
                        SCSI_LOG_ERROR_RECOVERY(3,
                                scmd_printk(KERN_INFO, scmd,
-                                            "%s: flush retry cmd: %p\n",
-                                            current->comm, scmd));
+                                            "%s: flush retry cmd\n",
+                                            current->comm));
                                scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
                } else {
                        /*
@@ -2105,8 +2097,8 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
                                scmd->result |= (DRIVER_TIMEOUT << 24);
                        SCSI_LOG_ERROR_RECOVERY(3,
                                scmd_printk(KERN_INFO, scmd,
-                                            "%s: flush finish cmd: %p\n",
-                                            current->comm, scmd));
+                                            "%s: flush finish cmd\n",
+                                            current->comm));
                        scsi_finish_command(scmd);
                }
        }
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
new file mode 100644 (file)
index 0000000..bd70339
--- /dev/null
@@ -0,0 +1,485 @@
+/*
+ * scsi_logging.c
+ *
+ * Copyright (C) 2014 SUSE Linux Products GmbH
+ * Copyright (C) 2014 Hannes Reinecke <hare@suse.de>
+ *
+ * This file is released under the GPLv2
+ */
+
+#include <linux/kernel.h>
+#include <linux/atomic.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dbg.h>
+
+#define SCSI_LOG_SPOOLSIZE 4096
+
+#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
+#warning SCSI logging bitmask too large
+#endif
+
+struct scsi_log_buf {
+       char buffer[SCSI_LOG_SPOOLSIZE];
+       unsigned long map;
+};
+
+static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
+
+static char *scsi_log_reserve_buffer(size_t *len)
+{
+       struct scsi_log_buf *buf;
+       unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
+       unsigned long idx = 0;
+
+       preempt_disable();
+       buf = this_cpu_ptr(&scsi_format_log);
+       idx = find_first_zero_bit(&buf->map, map_bits);
+       if (likely(idx < map_bits)) {
+               while (test_and_set_bit(idx, &buf->map)) {
+                       idx = find_next_zero_bit(&buf->map, map_bits, idx);
+                       if (idx >= map_bits)
+                               break;
+               }
+       }
+       if (WARN_ON(idx >= map_bits)) {
+               preempt_enable();
+               return NULL;
+       }
+       *len = SCSI_LOG_BUFSIZE;
+       return buf->buffer + idx * SCSI_LOG_BUFSIZE;
+}
+
+static void scsi_log_release_buffer(char *bufptr)
+{
+       struct scsi_log_buf *buf;
+       unsigned long idx;
+       int ret;
+
+       buf = this_cpu_ptr(&scsi_format_log);
+       if (bufptr >= buf->buffer &&
+           bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
+               idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
+               ret = test_and_clear_bit(idx, &buf->map);
+               WARN_ON(!ret);
+       }
+       preempt_enable();
+}
+
+static inline const char *scmd_name(const struct scsi_cmnd *scmd)
+{
+       return scmd->request->rq_disk ?
+               scmd->request->rq_disk->disk_name : NULL;
+}
+
+static size_t sdev_format_header(char *logbuf, size_t logbuf_len,
+                                const char *name, int tag)
+{
+       size_t off = 0;
+
+       if (name)
+               off += scnprintf(logbuf + off, logbuf_len - off,
+                                "[%s] ", name);
+
+       if (WARN_ON(off >= logbuf_len))
+               return off;
+
+       if (tag >= 0)
+               off += scnprintf(logbuf + off, logbuf_len - off,
+                                "tag#%d ", tag);
+       return off;
+}
+
+void sdev_prefix_printk(const char *level, const struct scsi_device *sdev,
+                       const char *name, const char *fmt, ...)
+{
+       va_list args;
+       char *logbuf;
+       size_t off = 0, logbuf_len;
+
+       if (!sdev)
+               return;
+
+       logbuf = scsi_log_reserve_buffer(&logbuf_len);
+       if (!logbuf)
+               return;
+
+       if (name)
+               off += scnprintf(logbuf + off, logbuf_len - off,
+                                "[%s] ", name);
+       if (!WARN_ON(off >= logbuf_len)) {
+               va_start(args, fmt);
+               off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args);
+               va_end(args);
+       }
+       dev_printk(level, &sdev->sdev_gendev, "%s", logbuf);
+       scsi_log_release_buffer(logbuf);
+}
+EXPORT_SYMBOL(sdev_prefix_printk);
+
+void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
+               const char *fmt, ...)
+{
+       va_list args;
+       char *logbuf;
+       size_t off = 0, logbuf_len;
+
+       if (!scmd || !scmd->cmnd)
+               return;
+
+       logbuf = scsi_log_reserve_buffer(&logbuf_len);
+       if (!logbuf)
+               return;
+       off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd),
+                                scmd->request->tag);
+       if (off < logbuf_len) {
+               va_start(args, fmt);
+               off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args);
+               va_end(args);
+       }
+       dev_printk(level, &scmd->device->sdev_gendev, "%s", logbuf);
+       scsi_log_release_buffer(logbuf);
+}
+EXPORT_SYMBOL(scmd_printk);
+
+static size_t scsi_format_opcode_name(char *buffer, size_t buf_len,
+                                     const unsigned char *cdbp)
+{
+       int sa, cdb0;
+       const char *cdb_name = NULL, *sa_name = NULL;
+       size_t off;
+
+       cdb0 = cdbp[0];
+       if (cdb0 == VARIABLE_LENGTH_CMD) {
+               int len = scsi_varlen_cdb_length(cdbp);
+
+               if (len < 10) {
+                       off = scnprintf(buffer, buf_len,
+                                       "short variable length command, len=%d",
+                                       len);
+                       return off;
+               }
+               sa = (cdbp[8] << 8) + cdbp[9];
+       } else
+               sa = cdbp[1] & 0x1f;
+
+       if (!scsi_opcode_sa_name(cdb0, sa, &cdb_name, &sa_name)) {
+               if (cdb_name)
+                       off = scnprintf(buffer, buf_len, "%s", cdb_name);
+               else {
+                       off = scnprintf(buffer, buf_len, "opcode=0x%x", cdb0);
+                       if (WARN_ON(off >= buf_len))
+                               return off;
+                       if (cdb0 >= VENDOR_SPECIFIC_CDB)
+                               off += scnprintf(buffer + off, buf_len - off,
+                                                " (vendor)");
+                       else if (cdb0 >= 0x60 && cdb0 < 0x7e)
+                               off += scnprintf(buffer + off, buf_len - off,
+                                                " (reserved)");
+               }
+       } else {
+               if (sa_name)
+                       off = scnprintf(buffer, buf_len, "%s", sa_name);
+               else if (cdb_name)
+                       off = scnprintf(buffer, buf_len, "%s, sa=0x%x",
+                                       cdb_name, sa);
+               else
+                       off = scnprintf(buffer, buf_len,
+                                       "opcode=0x%x, sa=0x%x", cdb0, sa);
+       }
+       WARN_ON(off >= buf_len);
+       return off;
+}
+
+size_t __scsi_format_command(char *logbuf, size_t logbuf_len,
+                            const unsigned char *cdb, size_t cdb_len)
+{
+       int len, k;
+       size_t off;
+
+       off = scsi_format_opcode_name(logbuf, logbuf_len, cdb);
+       if (off >= logbuf_len)
+               return off;
+       len = scsi_command_size(cdb);
+       if (cdb_len < len)
+               len = cdb_len;
+       /* print out all bytes in cdb */
+       for (k = 0; k < len; ++k) {
+               if (off > logbuf_len - 3)
+                       break;
+               off += scnprintf(logbuf + off, logbuf_len - off,
+                                " %02x", cdb[k]);
+       }
+       return off;
+}
+EXPORT_SYMBOL(__scsi_format_command);
+
+void scsi_print_command(struct scsi_cmnd *cmd)
+{
+       int k;
+       char *logbuf;
+       size_t off, logbuf_len;
+
+       if (!cmd->cmnd)
+               return;
+
+       logbuf = scsi_log_reserve_buffer(&logbuf_len);
+       if (!logbuf)
+               return;
+
+       off = sdev_format_header(logbuf, logbuf_len,
+                                scmd_name(cmd), cmd->request->tag);
+       if (off >= logbuf_len)
+               goto out_printk;
+       off += scnprintf(logbuf + off, logbuf_len - off, "CDB: ");
+       if (WARN_ON(off >= logbuf_len))
+               goto out_printk;
+
+       off += scsi_format_opcode_name(logbuf + off, logbuf_len - off,
+                                      cmd->cmnd);
+       if (off >= logbuf_len)
+               goto out_printk;
+
+       /* print out all bytes in cdb */
+       if (cmd->cmd_len > 16) {
+               /* Print opcode in one line and use separate lines for CDB */
+               off += scnprintf(logbuf + off, logbuf_len - off, "\n");
+               dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
+               scsi_log_release_buffer(logbuf);
+               for (k = 0; k < cmd->cmd_len; k += 16) {
+                       size_t linelen = min(cmd->cmd_len - k, 16);
+
+                       logbuf = scsi_log_reserve_buffer(&logbuf_len);
+                       if (!logbuf)
+                               break;
+                       off = sdev_format_header(logbuf, logbuf_len,
+                                                scmd_name(cmd),
+                                                cmd->request->tag);
+                       if (!WARN_ON(off > logbuf_len - 58)) {
+                               off += scnprintf(logbuf + off, logbuf_len - off,
+                                                "CDB[%02x]: ", k);
+                               hex_dump_to_buffer(&cmd->cmnd[k], linelen,
+                                                  16, 1, logbuf + off,
+                                                  logbuf_len - off, false);
+                       }
+                       dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s",
+                                  logbuf);
+                       scsi_log_release_buffer(logbuf);
+               }
+               return;
+       }
+       if (!WARN_ON(off > logbuf_len - 49)) {
+               off += scnprintf(logbuf + off, logbuf_len - off, " ");
+               hex_dump_to_buffer(cmd->cmnd, cmd->cmd_len, 16, 1,
+                                  logbuf + off, logbuf_len - off,
+                                  false);
+       }
+out_printk:
+       dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
+       scsi_log_release_buffer(logbuf);
+}
+EXPORT_SYMBOL(scsi_print_command);
+
+static size_t
+scsi_format_extd_sense(char *buffer, size_t buf_len,
+                      unsigned char asc, unsigned char ascq)
+{
+       size_t off = 0;
+       const char *extd_sense_fmt = NULL;
+       const char *extd_sense_str = scsi_extd_sense_format(asc, ascq,
+                                                           &extd_sense_fmt);
+
+       if (extd_sense_str) {
+               off = scnprintf(buffer, buf_len, "Add. Sense: %s",
+                               extd_sense_str);
+               if (extd_sense_fmt)
+                       off += scnprintf(buffer + off, buf_len - off,
+                                        "(%s%x)", extd_sense_fmt, ascq);
+       } else {
+               if (asc >= 0x80)
+                       off = scnprintf(buffer, buf_len, "<<vendor>>");
+               off += scnprintf(buffer + off, buf_len - off,
+                                "ASC=0x%x ", asc);
+               if (ascq >= 0x80)
+                       off += scnprintf(buffer + off, buf_len - off,
+                                        "<<vendor>>");
+               off += scnprintf(buffer + off, buf_len - off,
+                                "ASCQ=0x%x ", ascq);
+       }
+       return off;
+}
+
+static size_t
+scsi_format_sense_hdr(char *buffer, size_t buf_len,
+                     const struct scsi_sense_hdr *sshdr)
+{
+       const char *sense_txt;
+       size_t off;
+
+       off = scnprintf(buffer, buf_len, "Sense Key : ");
+       sense_txt = scsi_sense_key_string(sshdr->sense_key);
+       if (sense_txt)
+               off += scnprintf(buffer + off, buf_len - off,
+                                "%s ", sense_txt);
+       else
+               off += scnprintf(buffer + off, buf_len - off,
+                                "0x%x ", sshdr->sense_key);
+       off += scnprintf(buffer + off, buf_len - off,
+               scsi_sense_is_deferred(sshdr) ? "[deferred] " : "[current] ");
+
+       if (sshdr->response_code >= 0x72)
+               off += scnprintf(buffer + off, buf_len - off, "[descriptor] ");
+       return off;
+}
+
+static void
+scsi_log_dump_sense(const struct scsi_device *sdev, const char *name, int tag,
+                   const unsigned char *sense_buffer, int sense_len)
+{
+       char *logbuf;
+       size_t logbuf_len;
+       int i;
+
+       logbuf = scsi_log_reserve_buffer(&logbuf_len);
+       if (!logbuf)
+               return;
+
+       for (i = 0; i < sense_len; i += 16) {
+               int len = min(sense_len - i, 16);
+               size_t off;
+
+               off = sdev_format_header(logbuf, logbuf_len,
+                                        name, tag);
+               hex_dump_to_buffer(&sense_buffer[i], len, 16, 1,
+                                  logbuf + off, logbuf_len - off,
+                                  false);
+               dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf);
+       }
+       scsi_log_release_buffer(logbuf);
+}
+
+static void
+scsi_log_print_sense_hdr(const struct scsi_device *sdev, const char *name,
+                        int tag, const struct scsi_sense_hdr *sshdr)
+{
+       char *logbuf;
+       size_t off, logbuf_len;
+
+       logbuf = scsi_log_reserve_buffer(&logbuf_len);
+       if (!logbuf)
+               return;
+       off = sdev_format_header(logbuf, logbuf_len, name, tag);
+       off += scsi_format_sense_hdr(logbuf + off, logbuf_len - off, sshdr);
+       dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf);
+       scsi_log_release_buffer(logbuf);
+
+       logbuf = scsi_log_reserve_buffer(&logbuf_len);
+       if (!logbuf)
+               return;
+       off = sdev_format_header(logbuf, logbuf_len, name, tag);
+       off += scsi_format_extd_sense(logbuf + off, logbuf_len - off,
+                                     sshdr->asc, sshdr->ascq);
+       dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf);
+       scsi_log_release_buffer(logbuf);
+}
+
+static void
+scsi_log_print_sense(const struct scsi_device *sdev, const char *name, int tag,
+                    const unsigned char *sense_buffer, int sense_len)
+{
+       struct scsi_sense_hdr sshdr;
+
+       if (scsi_normalize_sense(sense_buffer, sense_len, &sshdr))
+               scsi_log_print_sense_hdr(sdev, name, tag, &sshdr);
+       else
+               scsi_log_dump_sense(sdev, name, tag, sense_buffer, sense_len);
+}
+
+/*
+ * Print normalized SCSI sense header with a prefix.
+ */
+void
+scsi_print_sense_hdr(const struct scsi_device *sdev, const char *name,
+                    const struct scsi_sense_hdr *sshdr)
+{
+       scsi_log_print_sense_hdr(sdev, name, -1, sshdr);
+}
+EXPORT_SYMBOL(scsi_print_sense_hdr);
+
+/* Normalize and print sense buffer with name prefix */
+void __scsi_print_sense(const struct scsi_device *sdev, const char *name,
+                       const unsigned char *sense_buffer, int sense_len)
+{
+       scsi_log_print_sense(sdev, name, -1, sense_buffer, sense_len);
+}
+EXPORT_SYMBOL(__scsi_print_sense);
+
+/* Normalize and print sense buffer in SCSI command */
+void scsi_print_sense(const struct scsi_cmnd *cmd)
+{
+       scsi_log_print_sense(cmd->device, scmd_name(cmd), cmd->request->tag,
+                            cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+}
+EXPORT_SYMBOL(scsi_print_sense);
+
+void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
+                      int disposition)
+{
+       char *logbuf;
+       size_t off, logbuf_len;
+       const char *mlret_string = scsi_mlreturn_string(disposition);
+       const char *hb_string = scsi_hostbyte_string(cmd->result);
+       const char *db_string = scsi_driverbyte_string(cmd->result);
+
+       logbuf = scsi_log_reserve_buffer(&logbuf_len);
+       if (!logbuf)
+               return;
+
+       off = sdev_format_header(logbuf, logbuf_len,
+                                scmd_name(cmd), cmd->request->tag);
+
+       if (off >= logbuf_len)
+               goto out_printk;
+
+       if (msg) {
+               off += scnprintf(logbuf + off, logbuf_len - off,
+                                "%s: ", msg);
+               if (WARN_ON(off >= logbuf_len))
+                       goto out_printk;
+       }
+       if (mlret_string)
+               off += scnprintf(logbuf + off, logbuf_len - off,
+                                "%s ", mlret_string);
+       else
+               off += scnprintf(logbuf + off, logbuf_len - off,
+                                "UNKNOWN(0x%02x) ", disposition);
+       if (WARN_ON(off >= logbuf_len))
+               goto out_printk;
+
+       off += scnprintf(logbuf + off, logbuf_len - off, "Result: ");
+       if (WARN_ON(off >= logbuf_len))
+               goto out_printk;
+
+       if (hb_string)
+               off += scnprintf(logbuf + off, logbuf_len - off,
+                                "hostbyte=%s ", hb_string);
+       else
+               off += scnprintf(logbuf + off, logbuf_len - off,
+                                "hostbyte=0x%02x ", host_byte(cmd->result));
+       if (WARN_ON(off >= logbuf_len))
+               goto out_printk;
+
+       if (db_string)
+               off += scnprintf(logbuf + off, logbuf_len - off,
+                                "driverbyte=%s", db_string);
+       else
+               off += scnprintf(logbuf + off, logbuf_len - off,
+                                "driverbyte=0x%02x", driver_byte(cmd->result));
+out_printk:
+       dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
+       scsi_log_release_buffer(logbuf);
+}
+EXPORT_SYMBOL(scsi_print_result);
index 6fcefa2..251598e 100644 (file)
@@ -189,36 +189,36 @@ static int proc_print_scsidevice(struct device *dev, void *data)
                sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
        for (i = 0; i < 8; i++) {
                if (sdev->vendor[i] >= 0x20)
-                       seq_printf(s, "%c", sdev->vendor[i]);
+                       seq_putc(s, sdev->vendor[i]);
                else
-                       seq_printf(s, " ");
+                       seq_putc(s, ' ');
        }
 
-       seq_printf(s, " Model: ");
+       seq_puts(s, " Model: ");
        for (i = 0; i < 16; i++) {
                if (sdev->model[i] >= 0x20)
-                       seq_printf(s, "%c", sdev->model[i]);
+                       seq_putc(s, sdev->model[i]);
                else
-                       seq_printf(s, " ");
+                       seq_putc(s, ' ');
        }
 
-       seq_printf(s, " Rev: ");
+       seq_puts(s, " Rev: ");
        for (i = 0; i < 4; i++) {
                if (sdev->rev[i] >= 0x20)
-                       seq_printf(s, "%c", sdev->rev[i]);
+                       seq_putc(s, sdev->rev[i]);
                else
-                       seq_printf(s, " ");
+                       seq_putc(s, ' ');
        }
 
-       seq_printf(s, "\n");
+       seq_putc(s, '\n');
 
        seq_printf(s, "  Type:   %s ", scsi_device_type(sdev->type));
        seq_printf(s, "               ANSI  SCSI revision: %02x",
                        sdev->scsi_level - (sdev->scsi_level > 1));
        if (sdev->scsi_level == 2)
-               seq_printf(s, " CCS\n");
+               seq_puts(s, " CCS\n");
        else
-               seq_printf(s, "\n");
+               seq_putc(s, '\n');
 
 out:
        return 0;
index 983aed1..0deb385 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/spinlock.h>
 #include <linux/async.h>
 #include <linux/slab.h>
+#include <asm/unaligned.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -98,20 +99,6 @@ char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
 module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
 MODULE_PARM_DESC(scan, "sync, async or none");
 
-/*
- * max_scsi_report_luns: the maximum number of LUNS that will be
- * returned from the REPORT LUNS command. 8 times this value must
- * be allocated. In theory this could be up to an 8 byte value, but
- * in practice, the maximum number of LUNs suppored by any device
- * is about 16k.
- */
-static unsigned int max_scsi_report_luns = 511;
-
-module_param_named(max_report_luns, max_scsi_report_luns, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(max_report_luns,
-                "REPORT LUNS maximum number of LUNS received (should be"
-                " between 1 and 16384)");
-
 static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
 
 module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
@@ -1367,7 +1354,6 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
        unsigned int retries;
        int result;
        struct scsi_lun *lunp, *lun_data;
-       u8 *data;
        struct scsi_sense_hdr sshdr;
        struct scsi_device *sdev;
        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
@@ -1407,16 +1393,12 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
 
        /*
         * Allocate enough to hold the header (the same size as one scsi_lun)
-        * plus the max number of luns we are requesting.
-        *
-        * Reallocating and trying again (with the exact amount we need)
-        * would be nice, but then we need to somehow limit the size
-        * allocated based on the available memory and the limits of
-        * kmalloc - we don't want a kmalloc() failure of a huge value to
-        * prevent us from finding any LUNs on this target.
+        * plus the number of luns we are requesting.  511 was the default
+        * value of the now removed max_report_luns parameter.
         */
-       length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun);
-       lun_data = kmalloc(length, GFP_ATOMIC |
+       length = (511 + 1) * sizeof(struct scsi_lun);
+retry:
+       lun_data = kmalloc(length, GFP_KERNEL |
                           (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
        if (!lun_data) {
                printk(ALLOC_FAILURE_MSG, __func__);
@@ -1433,10 +1415,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
        /*
         * bytes 6 - 9: length of the command.
         */
-       scsi_cmd[6] = (unsigned char) (length >> 24) & 0xff;
-       scsi_cmd[7] = (unsigned char) (length >> 16) & 0xff;
-       scsi_cmd[8] = (unsigned char) (length >> 8) & 0xff;
-       scsi_cmd[9] = (unsigned char) length & 0xff;
+       put_unaligned_be32(length, &scsi_cmd[6]);
 
        scsi_cmd[10] = 0;       /* reserved */
        scsi_cmd[11] = 0;       /* control */
@@ -1484,19 +1463,16 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
        /*
         * Get the length from the first four bytes of lun_data.
         */
-       data = (u8 *) lun_data->scsi_lun;
-       length = ((data[0] << 24) | (data[1] << 16) |
-                 (data[2] << 8) | (data[3] << 0));
+       if (get_unaligned_be32(lun_data->scsi_lun) +
+           sizeof(struct scsi_lun) > length) {
+               length = get_unaligned_be32(lun_data->scsi_lun) +
+                        sizeof(struct scsi_lun);
+               kfree(lun_data);
+               goto retry;
+       }
+       length = get_unaligned_be32(lun_data->scsi_lun);
 
        num_luns = (length / sizeof(struct scsi_lun));
-       if (num_luns > max_scsi_report_luns) {
-               sdev_printk(KERN_WARNING, sdev,
-                           "Only %d (max_scsi_report_luns)"
-                           " of %d luns reported, try increasing"
-                           " max_scsi_report_luns.\n",
-                           max_scsi_report_luns, num_luns);
-               num_luns = max_scsi_report_luns;
-       }
 
        SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
                "scsi scan: REPORT LUN scan\n"));
index 82af28b..08bb47b 100644 (file)
@@ -143,7 +143,7 @@ scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
                cmd = "WRITE_SAME";
                break;
        default:
-               trace_seq_printf(p, "UNKNOWN");
+               trace_seq_puts(p, "UNKNOWN");
                goto out;
        }
 
@@ -204,7 +204,7 @@ scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
                cmd = "GET_LBA_STATUS";
                break;
        default:
-               trace_seq_printf(p, "UNKNOWN");
+               trace_seq_puts(p, "UNKNOWN");
                goto out;
        }
 
@@ -249,7 +249,7 @@ scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
 {
        const char *ret = trace_seq_buffer_ptr(p);
 
-       trace_seq_printf(p, "-");
+       trace_seq_putc(p, '-');
        trace_seq_putc(p, 0);
 
        return ret;
index 05ea0d4..6b78476 100644 (file)
@@ -3320,11 +3320,8 @@ module_exit(exit_sd);
 static void sd_print_sense_hdr(struct scsi_disk *sdkp,
                               struct scsi_sense_hdr *sshdr)
 {
-       scsi_show_sense_hdr(sdkp->device,
-                           sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
-       scsi_show_extd_sense(sdkp->device,
-                            sdkp->disk ? sdkp->disk->disk_name : NULL,
-                            sshdr->asc, sshdr->ascq);
+       scsi_print_sense_hdr(sdkp->device,
+                            sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
 }
 
 static void sd_print_result(const struct scsi_disk *sdkp, const char *msg,
index b7e79e7..dcb0d76 100644 (file)
@@ -47,7 +47,6 @@ struct ses_device {
 
 struct ses_component {
        u64 addr;
-       unsigned char *desc;
 };
 
 static int ses_probe(struct device *dev)
@@ -68,6 +67,20 @@ static int ses_probe(struct device *dev)
 #define SES_TIMEOUT (30 * HZ)
 #define SES_RETRIES 3
 
+static void init_device_slot_control(unsigned char *dest_desc,
+                                    struct enclosure_component *ecomp,
+                                    unsigned char *status)
+{
+       memcpy(dest_desc, status, 4);
+       dest_desc[0] = 0;
+       /* only clear byte 1 for ENCLOSURE_COMPONENT_DEVICE */
+       if (ecomp->type == ENCLOSURE_COMPONENT_DEVICE)
+               dest_desc[1] = 0;
+       dest_desc[2] &= 0xde;
+       dest_desc[3] &= 0x3c;
+}
+
+
 static int ses_recv_diag(struct scsi_device *sdev, int page_code,
                         void *buf, int bufflen)
 {
@@ -179,14 +192,22 @@ static int ses_set_fault(struct enclosure_device *edev,
                          struct enclosure_component *ecomp,
                         enum enclosure_component_setting val)
 {
-       unsigned char desc[4] = {0 };
+       unsigned char desc[4];
+       unsigned char *desc_ptr;
+
+       desc_ptr = ses_get_page2_descriptor(edev, ecomp);
+
+       if (!desc_ptr)
+               return -EIO;
+
+       init_device_slot_control(desc, ecomp, desc_ptr);
 
        switch (val) {
        case ENCLOSURE_SETTING_DISABLED:
-               /* zero is disabled */
+               desc[3] &= 0xdf;
                break;
        case ENCLOSURE_SETTING_ENABLED:
-               desc[3] = 0x20;
+               desc[3] |= 0x20;
                break;
        default:
                /* SES doesn't do the SGPIO blink settings */
@@ -220,14 +241,22 @@ static int ses_set_locate(struct enclosure_device *edev,
                          struct enclosure_component *ecomp,
                          enum enclosure_component_setting val)
 {
-       unsigned char desc[4] = {0 };
+       unsigned char desc[4];
+       unsigned char *desc_ptr;
+
+       desc_ptr = ses_get_page2_descriptor(edev, ecomp);
+
+       if (!desc_ptr)
+               return -EIO;
+
+       init_device_slot_control(desc, ecomp, desc_ptr);
 
        switch (val) {
        case ENCLOSURE_SETTING_DISABLED:
-               /* zero is disabled */
+               desc[2] &= 0xfd;
                break;
        case ENCLOSURE_SETTING_ENABLED:
-               desc[2] = 0x02;
+               desc[2] |= 0x02;
                break;
        default:
                /* SES doesn't do the SGPIO blink settings */
@@ -240,15 +269,23 @@ static int ses_set_active(struct enclosure_device *edev,
                          struct enclosure_component *ecomp,
                          enum enclosure_component_setting val)
 {
-       unsigned char desc[4] = {0 };
+       unsigned char desc[4];
+       unsigned char *desc_ptr;
+
+       desc_ptr = ses_get_page2_descriptor(edev, ecomp);
+
+       if (!desc_ptr)
+               return -EIO;
+
+       init_device_slot_control(desc, ecomp, desc_ptr);
 
        switch (val) {
        case ENCLOSURE_SETTING_DISABLED:
-               /* zero is disabled */
+               desc[2] &= 0x7f;
                ecomp->active = 0;
                break;
        case ENCLOSURE_SETTING_ENABLED:
-               desc[2] = 0x80;
+               desc[2] |= 0x80;
                ecomp->active = 1;
                break;
        default:
@@ -258,13 +295,63 @@ static int ses_set_active(struct enclosure_device *edev,
        return ses_set_page2_descriptor(edev, ecomp, desc);
 }
 
+static int ses_show_id(struct enclosure_device *edev, char *buf)
+{
+       struct ses_device *ses_dev = edev->scratch;
+       unsigned long long id = get_unaligned_be64(ses_dev->page1+8+4);
+
+       return sprintf(buf, "%#llx\n", id);
+}
+
+static void ses_get_power_status(struct enclosure_device *edev,
+                                struct enclosure_component *ecomp)
+{
+       unsigned char *desc;
+
+       desc = ses_get_page2_descriptor(edev, ecomp);
+       if (desc)
+               ecomp->power_status = (desc[3] & 0x10) ? 0 : 1;
+}
+
+static int ses_set_power_status(struct enclosure_device *edev,
+                               struct enclosure_component *ecomp,
+                               int val)
+{
+       unsigned char desc[4];
+       unsigned char *desc_ptr;
+
+       desc_ptr = ses_get_page2_descriptor(edev, ecomp);
+
+       if (!desc_ptr)
+               return -EIO;
+
+       init_device_slot_control(desc, ecomp, desc_ptr);
+
+       switch (val) {
+       /* power = 1 is device_off = 0 and vice versa */
+       case 0:
+               desc[3] |= 0x10;
+               break;
+       case 1:
+               desc[3] &= 0xef;
+               break;
+       default:
+               return -EINVAL;
+       }
+       ecomp->power_status = val;
+       return ses_set_page2_descriptor(edev, ecomp, desc);
+}
+
 static struct enclosure_component_callbacks ses_enclosure_callbacks = {
        .get_fault              = ses_get_fault,
        .set_fault              = ses_set_fault,
        .get_status             = ses_get_status,
        .get_locate             = ses_get_locate,
        .set_locate             = ses_set_locate,
+       .get_power_status       = ses_get_power_status,
+       .set_power_status       = ses_set_power_status,
        .set_active             = ses_set_active,
+       .show_id                = ses_show_id,
 };
 
 struct ses_host_edev {
@@ -298,19 +385,26 @@ static void ses_process_descriptor(struct enclosure_component *ecomp,
        int invalid = desc[0] & 0x80;
        enum scsi_protocol proto = desc[0] & 0x0f;
        u64 addr = 0;
+       int slot = -1;
        struct ses_component *scomp = ecomp->scratch;
        unsigned char *d;
 
-       scomp->desc = desc;
-
        if (invalid)
                return;
 
        switch (proto) {
+       case SCSI_PROTOCOL_FCP:
+               if (eip) {
+                       d = desc + 4;
+                       slot = d[3];
+               }
+               break;
        case SCSI_PROTOCOL_SAS:
-               if (eip)
+               if (eip) {
+                       d = desc + 4;
+                       slot = d[3];
                        d = desc + 8;
-               else
+               else
                        d = desc + 4;
                /* only take the phy0 addr */
                addr = (u64)d[12] << 56 |
@@ -326,6 +420,7 @@ static void ses_process_descriptor(struct enclosure_component *ecomp,
                /* FIXME: Need to add more protocols than just SAS */
                break;
        }
+       ecomp->slot = slot;
        scomp->addr = addr;
 }
 
@@ -349,7 +444,8 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
                if (scomp->addr != efd->addr)
                        continue;
 
-               enclosure_add_device(edev, i, efd->dev);
+               if (enclosure_add_device(edev, i, efd->dev) == 0)
+                       kobject_uevent(&efd->dev->kobj, KOBJ_CHANGE);
                return 1;
        }
        return 0;
@@ -423,16 +519,24 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
                            type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) {
 
                                if (create)
-                                       ecomp = enclosure_component_register(edev,
-                                                                            components++,
-                                                                            type_ptr[0],
-                                                                            name);
+                                       ecomp = enclosure_component_alloc(
+                                               edev,
+                                               components++,
+                                               type_ptr[0],
+                                               name);
                                else
                                        ecomp = &edev->component[components++];
 
-                               if (!IS_ERR(ecomp) && addl_desc_ptr)
-                                       ses_process_descriptor(ecomp,
-                                                              addl_desc_ptr);
+                               if (!IS_ERR(ecomp)) {
+                                       ses_get_power_status(edev, ecomp);
+                                       if (addl_desc_ptr)
+                                               ses_process_descriptor(
+                                                       ecomp,
+                                                       addl_desc_ptr);
+                                       if (create)
+                                               enclosure_component_register(
+                                                       ecomp);
+                               }
                        }
                        if (desc_ptr)
                                desc_ptr += len;
index b14f64c..a668c88 100644 (file)
@@ -763,7 +763,7 @@ static int
 sg_common_write(Sg_fd * sfp, Sg_request * srp,
                unsigned char *cmnd, int timeout, int blocking)
 {
-       int k, data_dir, at_head;
+       int k, at_head;
        Sg_device *sdp = sfp->parentdp;
        sg_io_hdr_t *hp = &srp->header;
 
@@ -793,21 +793,6 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
                return -ENODEV;
        }
 
-       switch (hp->dxfer_direction) {
-       case SG_DXFER_TO_FROM_DEV:
-       case SG_DXFER_FROM_DEV:
-               data_dir = DMA_FROM_DEVICE;
-               break;
-       case SG_DXFER_TO_DEV:
-               data_dir = DMA_TO_DEVICE;
-               break;
-       case SG_DXFER_UNKNOWN:
-               data_dir = DMA_BIDIRECTIONAL;
-               break;
-       default:
-               data_dir = DMA_NONE;
-               break;
-       }
        hp->duration = jiffies_to_msecs(jiffies);
        if (hp->interface_id != '\0' && /* v3 (or later) interface */
            (SG_FLAG_Q_AT_TAIL & hp->flags))
index fb929fa..03054c0 100644 (file)
@@ -245,9 +245,6 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
                                sr_printk(KERN_INFO, cd,
                                          "CDROM not ready.  Make sure there "
                                          "is a disc in the drive.\n");
-#ifdef DEBUG
-                       scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr);
-#endif
                        err = -ENOMEDIUM;
                        break;
                case ILLEGAL_REQUEST:
@@ -256,16 +253,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
                            sshdr.ascq == 0x00)
                                /* sense: Invalid command operation code */
                                err = -EDRIVE_CANT_DO_THIS;
-#ifdef DEBUG
-                       __scsi_print_command(cgc->cmd, CDROM_PACKET_SIZE);
-                       scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr);
-#endif
                        break;
                default:
-                       sr_printk(KERN_ERR, cd,
-                                 "CDROM (ioctl) error, command: ");
-                       __scsi_print_command(cgc->cmd, CDROM_PACKET_SIZE);
-                       scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr);
                        err = -EIO;
                }
        }
index 4cff0dd..efc6e44 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/hyperv.h>
-#include <linux/mempool.h>
 #include <linux/blkdev.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -309,14 +308,6 @@ enum storvsc_request_type {
  * This is the end of Protocol specific defines.
  */
 
-
-/*
- * We setup a mempool to allocate request structures for this driver
- * on a per-lun basis. The following define specifies the number of
- * elements in the pool.
- */
-
-#define STORVSC_MIN_BUF_NR                             64
 static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
 
 module_param(storvsc_ringbuffer_size, int, S_IRUGO);
@@ -346,7 +337,6 @@ static void storvsc_on_channel_callback(void *context);
 #define STORVSC_IDE_MAX_CHANNELS                       1
 
 struct storvsc_cmd_request {
-       struct list_head entry;
        struct scsi_cmnd *cmd;
 
        unsigned int bounce_sgl_count;
@@ -357,7 +347,6 @@ struct storvsc_cmd_request {
        /* Synchronize the request/response if needed */
        struct completion wait_event;
 
-       unsigned char *sense_buffer;
        struct hv_multipage_buffer data_buffer;
        struct vstor_packet vstor_packet;
 };
@@ -389,11 +378,6 @@ struct storvsc_device {
        struct storvsc_cmd_request reset_request;
 };
 
-struct stor_mem_pools {
-       struct kmem_cache *request_pool;
-       mempool_t *request_mempool;
-};
-
 struct hv_host_device {
        struct hv_device *dev;
        unsigned int port;
@@ -426,21 +410,42 @@ done:
        kfree(wrk);
 }
 
-static void storvsc_bus_scan(struct work_struct *work)
+static void storvsc_host_scan(struct work_struct *work)
 {
        struct storvsc_scan_work *wrk;
-       int id, order_id;
+       struct Scsi_Host *host;
+       struct scsi_device *sdev;
+       unsigned long flags;
 
        wrk = container_of(work, struct storvsc_scan_work, work);
-       for (id = 0; id < wrk->host->max_id; ++id) {
-               if (wrk->host->reverse_ordering)
-                       order_id = wrk->host->max_id - id - 1;
-               else
-                       order_id = id;
-
-               scsi_scan_target(&wrk->host->shost_gendev, 0,
-                               order_id, SCAN_WILD_CARD, 1);
+       host = wrk->host;
+
+       /*
+        * Before scanning the host, first check to see if any of the
+        * currrently known devices have been hot removed. We issue a
+        * "unit ready" command against all currently known devices.
+        * This I/O will result in an error for devices that have been
+        * removed. As part of handling the I/O error, we remove the device.
+        *
+        * When a LUN is added or removed, the host sends us a signal to
+        * scan the host. Thus we are forced to discover the LUNs that
+        * may have been removed this way.
+        */
+       mutex_lock(&host->scan_mutex);
+       spin_lock_irqsave(host->host_lock, flags);
+       list_for_each_entry(sdev, &host->__devices, siblings) {
+               spin_unlock_irqrestore(host->host_lock, flags);
+               scsi_test_unit_ready(sdev, 1, 1, NULL);
+               spin_lock_irqsave(host->host_lock, flags);
+               continue;
        }
+       spin_unlock_irqrestore(host->host_lock, flags);
+       mutex_unlock(&host->scan_mutex);
+       /*
+        * Now scan the host to discover LUNs that may have been added.
+        */
+       scsi_scan_host(host);
+
        kfree(wrk);
 }
 
@@ -1070,10 +1075,8 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
 {
        struct scsi_cmnd *scmnd = cmd_request->cmd;
        struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
-       void (*scsi_done_fn)(struct scsi_cmnd *);
        struct scsi_sense_hdr sense_hdr;
        struct vmscsi_request *vm_srb;
-       struct stor_mem_pools *memp = scmnd->device->hostdata;
        struct Scsi_Host *host;
        struct storvsc_device *stor_dev;
        struct hv_device *dev = host_dev->dev;
@@ -1109,14 +1112,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
                cmd_request->data_buffer.len -
                vm_srb->data_transfer_length);
 
-       scsi_done_fn = scmnd->scsi_done;
-
-       scmnd->host_scribble = NULL;
-       scmnd->scsi_done = NULL;
-
-       scsi_done_fn(scmnd);
-
-       mempool_free(cmd_request, memp->request_mempool);
+       scmnd->scsi_done(scmnd);
 }
 
 static void storvsc_on_io_completion(struct hv_device *device,
@@ -1160,7 +1156,7 @@ static void storvsc_on_io_completion(struct hv_device *device,
                        SRB_STATUS_AUTOSENSE_VALID) {
                        /* autosense data available */
 
-                       memcpy(request->sense_buffer,
+                       memcpy(request->cmd->sense_buffer,
                               vstor_packet->vm_srb.sense_data,
                               vstor_packet->vm_srb.sense_info_length);
 
@@ -1198,7 +1194,7 @@ static void storvsc_on_receive(struct hv_device *device,
                if (!work)
                        return;
 
-               INIT_WORK(&work->work, storvsc_bus_scan);
+               INIT_WORK(&work->work, storvsc_host_scan);
                work->host = stor_device->host;
                schedule_work(&work->work);
                break;
@@ -1378,55 +1374,6 @@ static int storvsc_do_io(struct hv_device *device,
        return ret;
 }
 
-static int storvsc_device_alloc(struct scsi_device *sdevice)
-{
-       struct stor_mem_pools *memp;
-       int number = STORVSC_MIN_BUF_NR;
-
-       memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL);
-       if (!memp)
-               return -ENOMEM;
-
-       memp->request_pool =
-               kmem_cache_create(dev_name(&sdevice->sdev_dev),
-                               sizeof(struct storvsc_cmd_request), 0,
-                               SLAB_HWCACHE_ALIGN, NULL);
-
-       if (!memp->request_pool)
-               goto err0;
-
-       memp->request_mempool = mempool_create(number, mempool_alloc_slab,
-                                               mempool_free_slab,
-                                               memp->request_pool);
-
-       if (!memp->request_mempool)
-               goto err1;
-
-       sdevice->hostdata = memp;
-
-       return 0;
-
-err1:
-       kmem_cache_destroy(memp->request_pool);
-
-err0:
-       kfree(memp);
-       return -ENOMEM;
-}
-
-static void storvsc_device_destroy(struct scsi_device *sdevice)
-{
-       struct stor_mem_pools *memp = sdevice->hostdata;
-
-       if (!memp)
-               return;
-
-       mempool_destroy(memp->request_mempool);
-       kmem_cache_destroy(memp->request_pool);
-       kfree(memp);
-       sdevice->hostdata = NULL;
-}
-
 static int storvsc_device_configure(struct scsi_device *sdevice)
 {
        scsi_change_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS);
@@ -1447,6 +1394,19 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
         */
        sdevice->sdev_bflags |= msft_blist_flags;
 
+       /*
+        * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
+        * if the device is a MSFT virtual device.
+        */
+       if (!strncmp(sdevice->vendor, "Msft", 4)) {
+               switch (vmbus_proto_version) {
+               case VERSION_WIN8:
+               case VERSION_WIN8_1:
+                       sdevice->scsi_level = SCSI_SPC_3;
+                       break;
+               }
+       }
+
        return 0;
 }
 
@@ -1561,13 +1521,11 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
        int ret;
        struct hv_host_device *host_dev = shost_priv(host);
        struct hv_device *dev = host_dev->dev;
-       struct storvsc_cmd_request *cmd_request;
-       unsigned int request_size = 0;
+       struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd);
        int i;
        struct scatterlist *sgl;
        unsigned int sg_count = 0;
        struct vmscsi_request *vm_srb;
-       struct stor_mem_pools *memp = scmnd->device->hostdata;
 
        if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
                /*
@@ -1584,25 +1542,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
                }
        }
 
-       request_size = sizeof(struct storvsc_cmd_request);
-
-       cmd_request = mempool_alloc(memp->request_mempool,
-                                      GFP_ATOMIC);
-
-       /*
-        * We might be invoked in an interrupt context; hence
-        * mempool_alloc() can fail.
-        */
-       if (!cmd_request)
-               return SCSI_MLQUEUE_DEVICE_BUSY;
-
-       memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
-
        /* Setup the cmd request */
        cmd_request->cmd = scmnd;
 
-       scmnd->host_scribble = (unsigned char *)cmd_request;
-
        vm_srb = &cmd_request->vstor_packet.vm_srb;
        vm_srb->win8_extension.time_out_value = 60;
 
@@ -1637,9 +1579,6 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 
        memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
 
-       cmd_request->sense_buffer = scmnd->sense_buffer;
-
-
        cmd_request->data_buffer.len = scsi_bufflen(scmnd);
        if (scsi_sg_count(scmnd)) {
                sgl = (struct scatterlist *)scsi_sglist(scmnd);
@@ -1651,10 +1590,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
                                create_bounce_buffer(sgl, scsi_sg_count(scmnd),
                                                     scsi_bufflen(scmnd),
                                                     vm_srb->data_in);
-                       if (!cmd_request->bounce_sgl) {
-                               ret = SCSI_MLQUEUE_HOST_BUSY;
-                               goto queue_error;
-                       }
+                       if (!cmd_request->bounce_sgl)
+                               return SCSI_MLQUEUE_HOST_BUSY;
 
                        cmd_request->bounce_sgl_count =
                                ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
@@ -1692,27 +1629,21 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
                        destroy_bounce_buffer(cmd_request->bounce_sgl,
                                        cmd_request->bounce_sgl_count);
 
-               ret = SCSI_MLQUEUE_DEVICE_BUSY;
-               goto queue_error;
+               return SCSI_MLQUEUE_DEVICE_BUSY;
        }
 
        return 0;
-
-queue_error:
-       mempool_free(cmd_request, memp->request_mempool);
-       scmnd->host_scribble = NULL;
-       return ret;
 }
 
 static struct scsi_host_template scsi_driver = {
        .module =               THIS_MODULE,
        .name =                 "storvsc_host_t",
+       .cmd_size =             sizeof(struct storvsc_cmd_request),
        .bios_param =           storvsc_get_chs,
        .queuecommand =         storvsc_queuecommand,
        .eh_host_reset_handler =        storvsc_host_reset_handler,
+       .proc_name =            "storvsc_host",
        .eh_timed_out =         storvsc_eh_timed_out,
-       .slave_alloc =          storvsc_device_alloc,
-       .slave_destroy =        storvsc_device_destroy,
        .slave_configure =      storvsc_device_configure,
        .cmd_per_lun =          255,
        .can_queue =            STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
@@ -1760,6 +1691,9 @@ static int storvsc_probe(struct hv_device *device,
        bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
        int target = 0;
        struct storvsc_device *stor_device;
+       int max_luns_per_target;
+       int max_targets;
+       int max_channels;
 
        /*
         * Based on the windows host we are running on,
@@ -1773,12 +1707,18 @@ static int storvsc_probe(struct hv_device *device,
                vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
                vmstor_current_major = VMSTOR_WIN7_MAJOR;
                vmstor_current_minor = VMSTOR_WIN7_MINOR;
+               max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET;
+               max_targets = STORVSC_IDE_MAX_TARGETS;
+               max_channels = STORVSC_IDE_MAX_CHANNELS;
                break;
        default:
                sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
                vmscsi_size_delta = 0;
                vmstor_current_major = VMSTOR_WIN8_MAJOR;
                vmstor_current_minor = VMSTOR_WIN8_MINOR;
+               max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
+               max_targets = STORVSC_MAX_TARGETS;
+               max_channels = STORVSC_MAX_CHANNELS;
                break;
        }
 
@@ -1826,9 +1766,9 @@ static int storvsc_probe(struct hv_device *device,
                break;
 
        case SCSI_GUID:
-               host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
-               host->max_id = STORVSC_MAX_TARGETS;
-               host->max_channel = STORVSC_MAX_CHANNELS - 1;
+               host->max_lun = max_luns_per_target;
+               host->max_id = max_targets;
+               host->max_channel = max_channels - 1;
                break;
 
        default:
index 6e07b2a..8a1f4b3 100644 (file)
@@ -70,3 +70,16 @@ config SCSI_UFSHCD_PLATFORM
        If you have a controller with this interface, say Y or M here.
 
          If unsure, say N.
+
+config SCSI_UFS_QCOM
+       bool "QCOM specific hooks to UFS controller platform driver"
+       depends on SCSI_UFSHCD_PLATFORM && ARCH_MSM
+       select PHY_QCOM_UFS
+       help
+         This selects the QCOM specific additions to UFSHCD platform driver.
+         UFS host on QCOM needs some vendor specific configuration before
+         accessing the hardware which includes PHY configuration and vendor
+         specific registers.
+
+         Select this if you have UFS controller on QCOM chipset.
+         If unsure, say N.
index 1e5bd48..8303bcc 100644 (file)
@@ -1,4 +1,5 @@
 # UFSHCD makefile
+obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
 obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
 obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
 obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
new file mode 100644 (file)
index 0000000..9217af9
--- /dev/null
@@ -0,0 +1,1004 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/time.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+
+#include <linux/phy/phy-qcom-ufs.h>
+#include "ufshcd.h"
+#include "unipro.h"
+#include "ufs-qcom.h"
+#include "ufshci.h"
+
+static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
+
+static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result);
+static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
+               const char *speed_mode);
+static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
+
+static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
+{
+       int err = 0;
+
+       err = ufshcd_dme_get(hba,
+                       UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
+       if (err)
+               dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
+                               __func__, err);
+
+       return err;
+}
+
+static int ufs_qcom_host_clk_get(struct device *dev,
+               const char *name, struct clk **clk_out)
+{
+       struct clk *clk;
+       int err = 0;
+
+       clk = devm_clk_get(dev, name);
+       if (IS_ERR(clk)) {
+               err = PTR_ERR(clk);
+               dev_err(dev, "%s: failed to get %s err %d",
+                               __func__, name, err);
+       } else {
+               *clk_out = clk;
+       }
+
+       return err;
+}
+
+static int ufs_qcom_host_clk_enable(struct device *dev,
+               const char *name, struct clk *clk)
+{
+       int err = 0;
+
+       err = clk_prepare_enable(clk);
+       if (err)
+               dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
+
+       return err;
+}
+
+static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
+{
+       if (!host->is_lane_clks_enabled)
+               return;
+
+       clk_disable_unprepare(host->tx_l1_sync_clk);
+       clk_disable_unprepare(host->tx_l0_sync_clk);
+       clk_disable_unprepare(host->rx_l1_sync_clk);
+       clk_disable_unprepare(host->rx_l0_sync_clk);
+
+       host->is_lane_clks_enabled = false;
+}
+
+static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
+{
+       int err = 0;
+       struct device *dev = host->hba->dev;
+
+       if (host->is_lane_clks_enabled)
+               return 0;
+
+       err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
+               host->rx_l0_sync_clk);
+       if (err)
+               goto out;
+
+       err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
+               host->tx_l0_sync_clk);
+       if (err)
+               goto disable_rx_l0;
+
+       err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
+               host->rx_l1_sync_clk);
+       if (err)
+               goto disable_tx_l0;
+
+       err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
+               host->tx_l1_sync_clk);
+       if (err)
+               goto disable_rx_l1;
+
+       host->is_lane_clks_enabled = true;
+       goto out;
+
+disable_rx_l1:
+       clk_disable_unprepare(host->rx_l1_sync_clk);
+disable_tx_l0:
+       clk_disable_unprepare(host->tx_l0_sync_clk);
+disable_rx_l0:
+       clk_disable_unprepare(host->rx_l0_sync_clk);
+out:
+       return err;
+}
+
+static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
+{
+       int err = 0;
+       struct device *dev = host->hba->dev;
+
+       err = ufs_qcom_host_clk_get(dev,
+                       "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
+       if (err)
+               goto out;
+
+       err = ufs_qcom_host_clk_get(dev,
+                       "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
+       if (err)
+               goto out;
+
+       err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
+               &host->rx_l1_sync_clk);
+       if (err)
+               goto out;
+
+       err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
+               &host->tx_l1_sync_clk);
+out:
+       return err;
+}
+
+static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
+{
+       struct ufs_qcom_host *host = hba->priv;
+       struct phy *phy = host->generic_phy;
+       u32 tx_lanes;
+       int err = 0;
+
+       err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
+       if (err)
+               goto out;
+
+       err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
+       if (err)
+               dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
+                       __func__);
+
+out:
+       return err;
+}
+
+static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
+{
+       int err;
+       u32 tx_fsm_val = 0;
+       unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
+
+       do {
+               err = ufshcd_dme_get(hba,
+                       UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val);
+               if (err || tx_fsm_val == TX_FSM_HIBERN8)
+                       break;
+
+               /* sleep for max. 200us */
+               usleep_range(100, 200);
+       } while (time_before(jiffies, timeout));
+
+       /*
+        * we might have scheduled out for long during polling so
+        * check the state again.
+        */
+       if (time_after(jiffies, timeout))
+               err = ufshcd_dme_get(hba,
+                               UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val);
+
+       if (err) {
+               dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
+                               __func__, err);
+       } else if (tx_fsm_val != TX_FSM_HIBERN8) {
+               err = tx_fsm_val;
+               dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
+                               __func__, err);
+       }
+
+       return err;
+}
+
+static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
+{
+       struct ufs_qcom_host *host = hba->priv;
+       struct phy *phy = host->generic_phy;
+       int ret = 0;
+       u8 major;
+       u16 minor, step;
+       bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
+                                                       ? true : false;
+
+       /* Assert PHY reset and apply PHY calibration values */
+       ufs_qcom_assert_reset(hba);
+       /* provide 1ms delay to let the reset pulse propagate */
+       usleep_range(1000, 1100);
+
+       ufs_qcom_get_controller_revision(hba, &major, &minor, &step);
+       ufs_qcom_phy_save_controller_version(phy, major, minor, step);
+       ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
+       if (ret) {
+               dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
+                       __func__, ret);
+               goto out;
+       }
+
+       /* De-assert PHY reset and start serdes */
+       ufs_qcom_deassert_reset(hba);
+
+       /*
+        * after reset deassertion, phy will need all ref clocks,
+        * voltage, current to settle down before starting serdes.
+        */
+       usleep_range(1000, 1100);
+       ret = ufs_qcom_phy_start_serdes(phy);
+       if (ret) {
+               dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
+                       __func__, ret);
+               goto out;
+       }
+
+       ret = ufs_qcom_phy_is_pcs_ready(phy);
+       if (ret)
+               dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
+                       __func__, ret);
+
+out:
+       return ret;
+}
+
+/*
+ * The UTP controller has a number of internal clock gating cells (CGCs).
+ * Internal hardware sub-modules within the UTP controller control the CGCs.
+ * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
+ * in a specific operation, UTP controller CGCs are by default disabled and
+ * this function enables them (after every UFS link startup) to save some power
+ * leakage.
+ */
+static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
+{
+       ufshcd_writel(hba,
+               ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
+               REG_UFS_CFG2);
+
+       /* Ensure that HW clock gating is enabled before next operations */
+       mb();
+}
+
+static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status)
+{
+       struct ufs_qcom_host *host = hba->priv;
+       int err = 0;
+
+       switch (status) {
+       case PRE_CHANGE:
+               ufs_qcom_power_up_sequence(hba);
+               /*
+                * The PHY PLL output is the source of tx/rx lane symbol
+                * clocks, hence, enable the lane clocks only after PHY
+                * is initialized.
+                */
+               err = ufs_qcom_enable_lane_clks(host);
+               break;
+       case POST_CHANGE:
+               /* check if UFS PHY moved from DISABLED to HIBERN8 */
+               err = ufs_qcom_check_hibern8(hba);
+               ufs_qcom_enable_hw_clk_gating(hba);
+
+               break;
+       default:
+               dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
+               err = -EINVAL;
+               break;
+       }
+       return err;
+}
+
+/**
+ * Returns non-zero for success (which rate of core_clk) and 0
+ * in case of a failure
+ */
+static unsigned long
+ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate)
+{
+       struct ufs_clk_info *clki;
+       u32 core_clk_period_in_ns;
+       u32 tx_clk_cycles_per_us = 0;
+       unsigned long core_clk_rate = 0;
+       u32 core_clk_cycles_per_us = 0;
+
+       static u32 pwm_fr_table[][2] = {
+               {UFS_PWM_G1, 0x1},
+               {UFS_PWM_G2, 0x1},
+               {UFS_PWM_G3, 0x1},
+               {UFS_PWM_G4, 0x1},
+       };
+
+       static u32 hs_fr_table_rA[][2] = {
+               {UFS_HS_G1, 0x1F},
+               {UFS_HS_G2, 0x3e},
+       };
+
+       static u32 hs_fr_table_rB[][2] = {
+               {UFS_HS_G1, 0x24},
+               {UFS_HS_G2, 0x49},
+       };
+
+       if (gear == 0) {
+               dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
+               goto out_error;
+       }
+
+       list_for_each_entry(clki, &hba->clk_list_head, list) {
+               if (!strcmp(clki->name, "core_clk"))
+                       core_clk_rate = clk_get_rate(clki->clk);
+       }
+
+       /* If frequency is smaller than 1MHz, set to 1MHz */
+       if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
+               core_clk_rate = DEFAULT_CLK_RATE_HZ;
+
+       core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
+       ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
+
+       core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
+       core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
+       core_clk_period_in_ns &= MASK_CLK_NS_REG;
+
+       switch (hs) {
+       case FASTAUTO_MODE:
+       case FAST_MODE:
+               if (rate == PA_HS_MODE_A) {
+                       if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
+                               dev_err(hba->dev,
+                                       "%s: index %d exceeds table size %zu\n",
+                                       __func__, gear,
+                                       ARRAY_SIZE(hs_fr_table_rA));
+                               goto out_error;
+                       }
+                       tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
+               } else if (rate == PA_HS_MODE_B) {
+                       if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
+                               dev_err(hba->dev,
+                                       "%s: index %d exceeds table size %zu\n",
+                                       __func__, gear,
+                                       ARRAY_SIZE(hs_fr_table_rB));
+                               goto out_error;
+                       }
+                       tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
+               } else {
+                       dev_err(hba->dev, "%s: invalid rate = %d\n",
+                               __func__, rate);
+                       goto out_error;
+               }
+               break;
+       case SLOWAUTO_MODE:
+       case SLOW_MODE:
+               if (gear > ARRAY_SIZE(pwm_fr_table)) {
+                       dev_err(hba->dev,
+                                       "%s: index %d exceeds table size %zu\n",
+                                       __func__, gear,
+                                       ARRAY_SIZE(pwm_fr_table));
+                       goto out_error;
+               }
+               tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
+               break;
+       case UNCHANGED:
+       default:
+               dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
+               goto out_error;
+       }
+
+       /* this register 2 fields shall be written at once */
+       ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
+                                               REG_UFS_TX_SYMBOL_CLK_NS_US);
+       goto out;
+
+out_error:
+       core_clk_rate = 0;
+out:
+       return core_clk_rate;
+}
+
+static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, bool status)
+{
+       unsigned long core_clk_rate = 0;
+       u32 core_clk_cycles_per_100ms;
+
+       switch (status) {
+       case PRE_CHANGE:
+               core_clk_rate = ufs_qcom_cfg_timers(hba, UFS_PWM_G1,
+                                                   SLOWAUTO_MODE, 0);
+               if (!core_clk_rate) {
+                       dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
+                               __func__);
+                       return -EINVAL;
+               }
+               core_clk_cycles_per_100ms =
+                       (core_clk_rate / MSEC_PER_SEC) * 100;
+               ufshcd_writel(hba, core_clk_cycles_per_100ms,
+                                       REG_UFS_PA_LINK_STARTUP_TIMER);
+               break;
+       case POST_CHANGE:
+               ufs_qcom_link_startup_post_change(hba);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+       struct ufs_qcom_host *host = hba->priv;
+       struct phy *phy = host->generic_phy;
+       int ret = 0;
+
+       if (ufs_qcom_is_link_off(hba)) {
+               /*
+                * Disable the tx/rx lane symbol clocks before PHY is
+                * powered down as the PLL source should be disabled
+                * after downstream clocks are disabled.
+                */
+               ufs_qcom_disable_lane_clks(host);
+               phy_power_off(phy);
+
+               /* Assert PHY soft reset */
+               ufs_qcom_assert_reset(hba);
+               goto out;
+       }
+
+       /*
+        * If UniPro link is not active, PHY ref_clk, main PHY analog power
+        * rail and low noise analog power rail for PLL can be switched off.
+        */
+       if (!ufs_qcom_is_link_active(hba))
+               phy_power_off(phy);
+
+out:
+       return ret;
+}
+
+static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+       struct ufs_qcom_host *host = hba->priv;
+       struct phy *phy = host->generic_phy;
+       int err;
+
+       err = phy_power_on(phy);
+       if (err) {
+               dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
+                       __func__, err);
+               goto out;
+       }
+
+       hba->is_sys_suspended = false;
+
+out:
+       return err;
+}
+
+struct ufs_qcom_dev_params {
+       u32 pwm_rx_gear;        /* pwm rx gear to work in */
+       u32 pwm_tx_gear;        /* pwm tx gear to work in */
+       u32 hs_rx_gear;         /* hs rx gear to work in */
+       u32 hs_tx_gear;         /* hs tx gear to work in */
+       u32 rx_lanes;           /* number of rx lanes */
+       u32 tx_lanes;           /* number of tx lanes */
+       u32 rx_pwr_pwm;         /* rx pwm working pwr */
+       u32 tx_pwr_pwm;         /* tx pwm working pwr */
+       u32 rx_pwr_hs;          /* rx hs working pwr */
+       u32 tx_pwr_hs;          /* tx hs working pwr */
+       u32 hs_rate;            /* rate A/B to work in HS */
+       u32 desired_working_mode;
+};
+
+static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
+                                     struct ufs_pa_layer_attr *dev_max,
+                                     struct ufs_pa_layer_attr *agreed_pwr)
+{
+       int min_qcom_gear;
+       int min_dev_gear;
+       bool is_dev_sup_hs = false;
+       bool is_qcom_max_hs = false;
+
+       if (dev_max->pwr_rx == FAST_MODE)
+               is_dev_sup_hs = true;
+
+       if (qcom_param->desired_working_mode == FAST) {
+               is_qcom_max_hs = true;
+               min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
+                                     qcom_param->hs_tx_gear);
+       } else {
+               min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
+                                     qcom_param->pwm_tx_gear);
+       }
+
+       /*
+        * device doesn't support HS but qcom_param->desired_working_mode is
+        * HS, thus device and qcom_param don't agree
+        */
+       if (!is_dev_sup_hs && is_qcom_max_hs) {
+               pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
+                       __func__);
+               return -ENOTSUPP;
+       } else if (is_dev_sup_hs && is_qcom_max_hs) {
+               /*
+                * since device supports HS, it supports FAST_MODE.
+                * since qcom_param->desired_working_mode is also HS
+                * then final decision (FAST/FASTAUTO) is done according
+                * to qcom_params as it is the restricting factor
+                */
+               agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
+                                               qcom_param->rx_pwr_hs;
+       } else {
+               /*
+                * here qcom_param->desired_working_mode is PWM.
+                * it doesn't matter whether device supports HS or PWM,
+                * in both cases qcom_param->desired_working_mode will
+                * determine the mode
+                */
+                agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
+                                               qcom_param->rx_pwr_pwm;
+       }
+
+       /*
+        * we would like tx to work in the minimum number of lanes
+        * between device capability and vendor preferences.
+        * the same decision will be made for rx
+        */
+       agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
+                                               qcom_param->tx_lanes);
+       agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
+                                               qcom_param->rx_lanes);
+
+       /* device maximum gear is the minimum between device rx and tx gears */
+       min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
+
+       /*
+        * if both device capabilities and vendor pre-defined preferences are
+        * both HS or both PWM then set the minimum gear to be the chosen
+        * working gear.
+        * if one is PWM and one is HS then the one that is PWM get to decide
+        * what is the gear, as it is the one that also decided previously what
+        * pwr the device will be configured to.
+        */
+       if ((is_dev_sup_hs && is_qcom_max_hs) ||
+           (!is_dev_sup_hs && !is_qcom_max_hs))
+               agreed_pwr->gear_rx = agreed_pwr->gear_tx =
+                       min_t(u32, min_dev_gear, min_qcom_gear);
+       else if (!is_dev_sup_hs)
+               agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
+       else
+               agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
+
+       agreed_pwr->hs_rate = qcom_param->hs_rate;
+       return 0;
+}
+
+static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
+{
+       int vote;
+       int err = 0;
+       char mode[BUS_VECTOR_NAME_LEN];
+
+       ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
+
+       vote = ufs_qcom_get_bus_vote(host, mode);
+       if (vote >= 0)
+               err = ufs_qcom_set_bus_vote(host, vote);
+       else
+               err = vote;
+
+       if (err)
+               dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
+       else
+               host->bus_vote.saved_vote = vote;
+       return err;
+}
+
+static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
+                               bool status,
+                               struct ufs_pa_layer_attr *dev_max_params,
+                               struct ufs_pa_layer_attr *dev_req_params)
+{
+       u32 val;
+       struct ufs_qcom_host *host = hba->priv;
+       struct phy *phy = host->generic_phy;
+       struct ufs_qcom_dev_params ufs_qcom_cap;
+       int ret = 0;
+       int res = 0;
+
+       if (!dev_req_params) {
+               pr_err("%s: incoming dev_req_params is NULL\n", __func__);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       switch (status) {
+       case PRE_CHANGE:
+               ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
+               ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
+               ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
+               ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
+               ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
+               ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
+               ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
+               ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
+               ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
+               ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
+               ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
+               ufs_qcom_cap.desired_working_mode =
+                                       UFS_QCOM_LIMIT_DESIRED_MODE;
+
+               ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
+                                                dev_max_params,
+                                                dev_req_params);
+               if (ret) {
+                       pr_err("%s: failed to determine capabilities\n",
+                                       __func__);
+                       goto out;
+               }
+
+               break;
+       case POST_CHANGE:
+               if (!ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
+                                       dev_req_params->pwr_rx,
+                                       dev_req_params->hs_rate)) {
+                       dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
+                               __func__);
+                       /*
+                        * we return error code at the end of the routine,
+                        * but continue to configure UFS_PHY_TX_LANE_ENABLE
+                        * and bus voting as usual
+                        */
+                       ret = -EINVAL;
+               }
+
+               val = ~(MAX_U32 << dev_req_params->lane_tx);
+               res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
+               if (res) {
+                       dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
+                               __func__, res);
+                       ret = res;
+               }
+
+               /* cache the power mode parameters to use internally */
+               memcpy(&host->dev_req_params,
+                               dev_req_params, sizeof(*dev_req_params));
+               ufs_qcom_update_bus_bw_vote(host);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+out:
+       return ret;
+}
+
+/**
+ * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
+ * @hba: host controller instance
+ *
+ * QCOM UFS host controller might have some non standard behaviours (quirks)
+ * than what is specified by UFSHCI specification. Advertise all such
+ * quirks to standard UFS host controller driver so standard takes them into
+ * account.
+ */
+static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
+{
+       u8 major;
+       u16 minor, step;
+
+       ufs_qcom_get_controller_revision(hba, &major, &minor, &step);
+
+       /*
+        * TBD
+        * here we should be advertising controller quirks according to
+        * controller version.
+        */
+}
+
+static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
+               const char *speed_mode)
+{
+       struct device *dev = host->hba->dev;
+       struct device_node *np = dev->of_node;
+       int err;
+       const char *key = "qcom,bus-vector-names";
+
+       if (!speed_mode) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
+               err = of_property_match_string(np, key, "MAX");
+       else
+               err = of_property_match_string(np, key, speed_mode);
+
+out:
+       if (err < 0)
+               dev_err(dev, "%s: Invalid %s mode %d\n",
+                               __func__, speed_mode, err);
+       return err;
+}
+
+static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+{
+       int err = 0;
+
+       if (vote != host->bus_vote.curr_vote)
+               host->bus_vote.curr_vote = vote;
+
+       return err;
+}
+
+static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
+{
+       int gear = max_t(u32, p->gear_rx, p->gear_tx);
+       int lanes = max_t(u32, p->lane_rx, p->lane_tx);
+       int pwr;
+
+       /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
+       if (!gear)
+               gear = 1;
+
+       if (!lanes)
+               lanes = 1;
+
+       if (!p->pwr_rx && !p->pwr_tx) {
+               pwr = SLOWAUTO_MODE;
+               snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
+       } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
+                p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
+               pwr = FAST_MODE;
+               snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
+                        p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
+       } else {
+               pwr = SLOW_MODE;
+               snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
+                        "PWM", gear, lanes);
+       }
+}
+
+static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
+{
+       struct ufs_qcom_host *host = hba->priv;
+       int err = 0;
+       int vote = 0;
+
+       /*
+        * In case ufs_qcom_init() is not yet done, simply ignore.
+        * This ufs_qcom_setup_clocks() shall be called from
+        * ufs_qcom_init() after init is done.
+        */
+       if (!host)
+               return 0;
+
+       if (on) {
+               err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
+               if (err)
+                       goto out;
+
+               err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
+               if (err) {
+                       dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
+                               __func__, err);
+                       ufs_qcom_phy_disable_iface_clk(host->generic_phy);
+                       goto out;
+               }
+               /* enable the device ref clock */
+               ufs_qcom_phy_enable_dev_ref_clk(host->generic_phy);
+               vote = host->bus_vote.saved_vote;
+               if (vote == host->bus_vote.min_bw_vote)
+                       ufs_qcom_update_bus_bw_vote(host);
+       } else {
+               /* M-PHY RMMI interface clocks can be turned off */
+               ufs_qcom_phy_disable_iface_clk(host->generic_phy);
+               if (!ufs_qcom_is_link_active(hba)) {
+                       /* turn off UFS local PHY ref_clk */
+                       ufs_qcom_phy_disable_ref_clk(host->generic_phy);
+                       /* disable device ref_clk */
+                       ufs_qcom_phy_disable_dev_ref_clk(host->generic_phy);
+               }
+               vote = host->bus_vote.min_bw_vote;
+       }
+
+       err = ufs_qcom_set_bus_vote(host, vote);
+       if (err)
+               dev_err(hba->dev, "%s: set bus vote failed %d\n",
+                               __func__, err);
+
+out:
+       return err;
+}
+
+static ssize_t
+show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       struct ufs_qcom_host *host = hba->priv;
+
+       return snprintf(buf, PAGE_SIZE, "%u\n",
+                       host->bus_vote.is_max_bw_needed);
+}
+
+static ssize_t
+store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t count)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       struct ufs_qcom_host *host = hba->priv;
+       uint32_t value;
+
+       if (!kstrtou32(buf, 0, &value)) {
+               host->bus_vote.is_max_bw_needed = !!value;
+               ufs_qcom_update_bus_bw_vote(host);
+       }
+
+       return count;
+}
+
+static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
+{
+       int err;
+       struct device *dev = host->hba->dev;
+       struct device_node *np = dev->of_node;
+
+       err = of_property_count_strings(np, "qcom,bus-vector-names");
+       if (err < 0 ) {
+               dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
+                               __func__, err);
+               goto out;
+       }
+
+       /* cache the vote index for minimum and maximum bandwidth */
+       host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
+       host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
+
+       host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
+       host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
+       sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
+       host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
+       host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
+       err = device_create_file(dev, &host->bus_vote.max_bus_bw);
+out:
+       return err;
+}
+
+#define        ANDROID_BOOT_DEV_MAX    30
+static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
+static int get_android_boot_dev(char *str)
+{
+       strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
+       return 1;
+}
+__setup("androidboot.bootdevice=", get_android_boot_dev);
+
+/**
+ * ufs_qcom_init - bind phy with controller
+ * @hba: host controller instance
+ *
+ * Binds PHY with controller and powers up PHY enabling clocks
+ * and regulators.
+ *
+ * Returns -EPROBE_DEFER if binding fails, returns negative error
+ * on phy power up failure and returns zero on success.
+ */
+static int ufs_qcom_init(struct ufs_hba *hba)
+{
+       int err;
+       struct device *dev = hba->dev;
+       struct ufs_qcom_host *host;
+
+       if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
+               return -ENODEV;
+
+       host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+       if (!host) {
+               err = -ENOMEM;
+               dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
+               goto out;
+       }
+
+       host->hba = hba;
+       hba->priv = (void *)host;
+
+       host->generic_phy = devm_phy_get(dev, "ufsphy");
+
+       if (IS_ERR(host->generic_phy)) {
+               err = PTR_ERR(host->generic_phy);
+               dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
+               goto out;
+       }
+
+       err = ufs_qcom_bus_register(host);
+       if (err)
+               goto out_host_free;
+
+       phy_init(host->generic_phy);
+       err = phy_power_on(host->generic_phy);
+       if (err)
+               goto out_unregister_bus;
+
+       err = ufs_qcom_init_lane_clks(host);
+       if (err)
+               goto out_disable_phy;
+
+       ufs_qcom_advertise_quirks(hba);
+
+       hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING;
+       hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+
+       ufs_qcom_setup_clocks(hba, true);
+
+       if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
+               ufs_qcom_hosts[hba->dev->id] = host;
+
+       goto out;
+
+out_disable_phy:
+       phy_power_off(host->generic_phy);
+out_unregister_bus:
+       phy_exit(host->generic_phy);
+out_host_free:
+       devm_kfree(dev, host);
+       hba->priv = NULL;
+out:
+       return err;
+}
+
+static void ufs_qcom_exit(struct ufs_hba *hba)
+{
+       struct ufs_qcom_host *host = hba->priv;
+
+       ufs_qcom_disable_lane_clks(host);
+       phy_power_off(host->generic_phy);
+}
+
+static
+void ufs_qcom_clk_scale_notify(struct ufs_hba *hba)
+{
+       struct ufs_qcom_host *host = hba->priv;
+       struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
+
+       if (!dev_req_params)
+               return;
+
+       ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
+                               dev_req_params->pwr_rx,
+                               dev_req_params->hs_rate);
+}
+
+/**
+ * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
+ *
+ * The variant operations configure the necessary controller and PHY
+ * handshake during initialization.
+ */
+static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
+       .name                   = "qcom",
+       .init                   = ufs_qcom_init,
+       .exit                   = ufs_qcom_exit,
+       .clk_scale_notify       = ufs_qcom_clk_scale_notify,
+       .setup_clocks           = ufs_qcom_setup_clocks,
+       .hce_enable_notify      = ufs_qcom_hce_enable_notify,
+       .link_startup_notify    = ufs_qcom_link_startup_notify,
+       .pwr_change_notify      = ufs_qcom_pwr_change_notify,
+       .suspend                = ufs_qcom_suspend,
+       .resume                 = ufs_qcom_resume,
+};
+EXPORT_SYMBOL(ufs_hba_qcom_vops);
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
new file mode 100644 (file)
index 0000000..9a6febd
--- /dev/null
@@ -0,0 +1,170 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_H_
+#define UFS_QCOM_H_
+
+#define MAX_UFS_QCOM_HOSTS     1
+#define MAX_U32                 (~(u32)0)
+#define MPHY_TX_FSM_STATE       0x41
+#define TX_FSM_HIBERN8          0x1
+#define HBRN8_POLL_TOUT_MS      100
+#define DEFAULT_CLK_RATE_HZ     1000000
+#define BUS_VECTOR_NAME_LEN     32
+
+#define UFS_HW_VER_MAJOR_SHFT  (28)
+#define UFS_HW_VER_MAJOR_MASK  (0x000F << UFS_HW_VER_MAJOR_SHFT)
+#define UFS_HW_VER_MINOR_SHFT  (16)
+#define UFS_HW_VER_MINOR_MASK  (0x0FFF << UFS_HW_VER_MINOR_SHFT)
+#define UFS_HW_VER_STEP_SHFT   (0)
+#define UFS_HW_VER_STEP_MASK   (0xFFFF << UFS_HW_VER_STEP_SHFT)
+
+/* vendor specific pre-defined parameters */
+#define SLOW 1
+#define FAST 2
+
+#define UFS_QCOM_LIMIT_NUM_LANES_RX    2
+#define UFS_QCOM_LIMIT_NUM_LANES_TX    2
+#define UFS_QCOM_LIMIT_HSGEAR_RX       UFS_HS_G2
+#define UFS_QCOM_LIMIT_HSGEAR_TX       UFS_HS_G2
+#define UFS_QCOM_LIMIT_PWMGEAR_RX      UFS_PWM_G4
+#define UFS_QCOM_LIMIT_PWMGEAR_TX      UFS_PWM_G4
+#define UFS_QCOM_LIMIT_RX_PWR_PWM      SLOW_MODE
+#define UFS_QCOM_LIMIT_TX_PWR_PWM      SLOW_MODE
+#define UFS_QCOM_LIMIT_RX_PWR_HS       FAST_MODE
+#define UFS_QCOM_LIMIT_TX_PWR_HS       FAST_MODE
+#define UFS_QCOM_LIMIT_HS_RATE         PA_HS_MODE_B
+#define UFS_QCOM_LIMIT_DESIRED_MODE    FAST
+
+/* QCOM UFS host controller vendor specific registers */
+enum {
+       REG_UFS_SYS1CLK_1US                 = 0xC0,
+       REG_UFS_TX_SYMBOL_CLK_NS_US         = 0xC4,
+       REG_UFS_LOCAL_PORT_ID_REG           = 0xC8,
+       REG_UFS_PA_ERR_CODE                 = 0xCC,
+       REG_UFS_RETRY_TIMER_REG             = 0xD0,
+       REG_UFS_PA_LINK_STARTUP_TIMER       = 0xD8,
+       REG_UFS_CFG1                        = 0xDC,
+       REG_UFS_CFG2                        = 0xE0,
+       REG_UFS_HW_VERSION                  = 0xE4,
+
+       UFS_DBG_RD_REG_UAWM                     = 0x100,
+       UFS_DBG_RD_REG_UARM                     = 0x200,
+       UFS_DBG_RD_REG_TXUC                     = 0x300,
+       UFS_DBG_RD_REG_RXUC                     = 0x400,
+       UFS_DBG_RD_REG_DFC                      = 0x500,
+       UFS_DBG_RD_REG_TRLUT                    = 0x600,
+       UFS_DBG_RD_REG_TMRLUT                   = 0x700,
+       UFS_UFS_DBG_RD_REG_OCSC                 = 0x800,
+
+       UFS_UFS_DBG_RD_DESC_RAM                 = 0x1500,
+       UFS_UFS_DBG_RD_PRDT_RAM                 = 0x1700,
+       UFS_UFS_DBG_RD_RESP_RAM                 = 0x1800,
+       UFS_UFS_DBG_RD_EDTL_RAM                 = 0x1900,
+};
+
+/* bit definitions for REG_UFS_CFG2 register */
+#define UAWM_HW_CGC_EN         (1 << 0)
+#define UARM_HW_CGC_EN         (1 << 1)
+#define TXUC_HW_CGC_EN         (1 << 2)
+#define RXUC_HW_CGC_EN         (1 << 3)
+#define DFC_HW_CGC_EN          (1 << 4)
+#define TRLUT_HW_CGC_EN                (1 << 5)
+#define TMRLUT_HW_CGC_EN       (1 << 6)
+#define OCSC_HW_CGC_EN         (1 << 7)
+
+#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\
+                                TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\
+                                DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
+                                TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
+
+/* bit offset */
+enum {
+       OFFSET_UFS_PHY_SOFT_RESET           = 1,
+       OFFSET_CLK_NS_REG                   = 10,
+};
+
+/* bit masks */
+enum {
+       MASK_UFS_PHY_SOFT_RESET             = 0x2,
+       MASK_TX_SYMBOL_CLK_1US_REG          = 0x3FF,
+       MASK_CLK_NS_REG                     = 0xFFFC00,
+};
+
+enum ufs_qcom_phy_init_type {
+       UFS_PHY_INIT_FULL,
+       UFS_PHY_INIT_CFG_RESTORE,
+};
+
+static inline void
+ufs_qcom_get_controller_revision(struct ufs_hba *hba,
+                                u8 *major, u16 *minor, u16 *step)
+{
+       u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION);
+
+       *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT;
+       *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT;
+       *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT;
+};
+
+static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
+{
+       ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
+                       1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+
+       /*
+        * Make sure assertion of ufs phy reset is written to
+        * register before returning
+        */
+       mb();
+}
+
+static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
+{
+       ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
+                       0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+
+       /*
+        * Make sure de-assertion of ufs phy reset is written to
+        * register before returning
+        */
+       mb();
+}
+
+struct ufs_qcom_bus_vote {
+       uint32_t client_handle;
+       uint32_t curr_vote;
+       int min_bw_vote;
+       int max_bw_vote;
+       int saved_vote;
+       bool is_max_bw_needed;
+       struct device_attribute max_bus_bw;
+};
+
+struct ufs_qcom_host {
+       struct phy *generic_phy;
+       struct ufs_hba *hba;
+       struct ufs_qcom_bus_vote bus_vote;
+       struct ufs_pa_layer_attr dev_req_params;
+       struct clk *rx_l0_sync_clk;
+       struct clk *tx_l0_sync_clk;
+       struct clk *rx_l1_sync_clk;
+       struct clk *tx_l1_sync_clk;
+       bool is_lane_clks_enabled;
+};
+
+#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
+#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
+#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
+
+#endif /* UFS_QCOM_H_ */
index 2e4614b..5d60a86 100644 (file)
@@ -4714,10 +4714,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
                sdev_printk(KERN_WARNING, sdp,
                            "START_STOP failed for power mode: %d, result %x\n",
                            pwr_mode, ret);
-               if (driver_byte(ret) & DRIVER_SENSE) {
-                       scsi_show_sense_hdr(sdp, NULL, &sshdr);
-                       scsi_show_extd_sense(sdp, NULL, sshdr.asc, sshdr.ascq);
-               }
+               if (driver_byte(ret) & DRIVER_SENSE)
+                       scsi_print_sense_hdr(sdp, NULL, &sshdr);
        }
 
        if (!ret)
index c0506de..9e09da4 100644 (file)
@@ -2143,22 +2143,22 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
                seq_printf(m, "\nclock_freq=%02x no_sync=%02x no_dma=%d"
                        " dma_mode=%02x fast=%d",
                        hd->clock_freq, hd->no_sync, hd->no_dma, hd->dma_mode, hd->fast);
-               seq_printf(m, "\nsync_xfer[] =       ");
+               seq_puts(m, "\nsync_xfer[] =       ");
                for (x = 0; x < 7; x++)
                        seq_printf(m, "\t%02x", hd->sync_xfer[x]);
-               seq_printf(m, "\nsync_stat[] =       ");
+               seq_puts(m, "\nsync_stat[] =       ");
                for (x = 0; x < 7; x++)
                        seq_printf(m, "\t%02x", hd->sync_stat[x]);
        }
 #ifdef PROC_STATISTICS
        if (hd->proc & PR_STATISTICS) {
-               seq_printf(m, "\ncommands issued:    ");
+               seq_puts(m, "\ncommands issued:    ");
                for (x = 0; x < 7; x++)
                        seq_printf(m, "\t%ld", hd->cmd_cnt[x]);
-               seq_printf(m, "\ndisconnects allowed:");
+               seq_puts(m, "\ndisconnects allowed:");
                for (x = 0; x < 7; x++)
                        seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]);
-               seq_printf(m, "\ndisconnects done:   ");
+               seq_puts(m, "\ndisconnects done:   ");
                for (x = 0; x < 7; x++)
                        seq_printf(m, "\t%ld", hd->disc_done_cnt[x]);
                seq_printf(m,
@@ -2167,7 +2167,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
        }
 #endif
        if (hd->proc & PR_CONNECTED) {
-               seq_printf(m, "\nconnected:     ");
+               seq_puts(m, "\nconnected:     ");
                if (hd->connected) {
                        cmd = (struct scsi_cmnd *) hd->connected;
                        seq_printf(m, " %d:%llu(%02x)",
@@ -2175,7 +2175,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
                }
        }
        if (hd->proc & PR_INPUTQ) {
-               seq_printf(m, "\ninput_Q:       ");
+               seq_puts(m, "\ninput_Q:       ");
                cmd = (struct scsi_cmnd *) hd->input_Q;
                while (cmd) {
                        seq_printf(m, " %d:%llu(%02x)",
@@ -2184,7 +2184,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
                }
        }
        if (hd->proc & PR_DISCQ) {
-               seq_printf(m, "\ndisconnected_Q:");
+               seq_puts(m, "\ndisconnected_Q:");
                cmd = (struct scsi_cmnd *) hd->disconnected_Q;
                while (cmd) {
                        seq_printf(m, " %d:%llu(%02x)",
@@ -2192,7 +2192,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
                        cmd = (struct scsi_cmnd *) cmd->host_scribble;
                }
        }
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
        spin_unlock_irq(&hd->lock);
 #endif                         /* PROC_INTERFACE */
        return 0;
index f94d736..0c0f17b 100644 (file)
@@ -1295,9 +1295,6 @@ static void wd7000_revision(Adapter * host)
 }
 
 
-#undef SPRINTF
-#define SPRINTF(args...) { seq_printf(m, ## args); }
-
 static int wd7000_set_info(struct Scsi_Host *host, char *buffer, int length)
 {
        dprintk("Buffer = <%.*s>, length = %d\n", length, buffer, length);
@@ -1320,43 +1317,43 @@ static int wd7000_show_info(struct seq_file *m, struct Scsi_Host *host)
 #endif
 
        spin_lock_irqsave(host->host_lock, flags);
-       SPRINTF("Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", host->host_no, adapter->rev1, adapter->rev2);
-       SPRINTF("  IO base:      0x%x\n", adapter->iobase);
-       SPRINTF("  IRQ:          %d\n", adapter->irq);
-       SPRINTF("  DMA channel:  %d\n", adapter->dma);
-       SPRINTF("  Interrupts:   %d\n", adapter->int_counter);
-       SPRINTF("  BUS_ON time:  %d nanoseconds\n", adapter->bus_on * 125);
-       SPRINTF("  BUS_OFF time: %d nanoseconds\n", adapter->bus_off * 125);
+       seq_printf(m, "Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", host->host_no, adapter->rev1, adapter->rev2);
+       seq_printf(m, "  IO base:      0x%x\n", adapter->iobase);
+       seq_printf(m, "  IRQ:          %d\n", adapter->irq);
+       seq_printf(m, "  DMA channel:  %d\n", adapter->dma);
+       seq_printf(m, "  Interrupts:   %d\n", adapter->int_counter);
+       seq_printf(m, "  BUS_ON time:  %d nanoseconds\n", adapter->bus_on * 125);
+       seq_printf(m, "  BUS_OFF time: %d nanoseconds\n", adapter->bus_off * 125);
 
 #ifdef WD7000_DEBUG
        ogmbs = adapter->mb.ogmb;
        icmbs = adapter->mb.icmb;
 
-       SPRINTF("\nControl port value: 0x%x\n", adapter->control);
-       SPRINTF("Incoming mailbox:\n");
-       SPRINTF("  size: %d\n", ICMB_CNT);
-       SPRINTF("  queued messages: ");
+       seq_printf(m, "\nControl port value: 0x%x\n", adapter->control);
+       seq_puts(m, "Incoming mailbox:\n");
+       seq_printf(m, "  size: %d\n", ICMB_CNT);
+       seq_puts(m, "  queued messages: ");
 
        for (i = count = 0; i < ICMB_CNT; i++)
                if (icmbs[i].status) {
                        count++;
-                       SPRINTF("0x%x ", i);
+                       seq_printf(m, "0x%x ", i);
                }
 
-       SPRINTF(count ? "\n" : "none\n");
+       seq_puts(m, count ? "\n" : "none\n");
 
-       SPRINTF("Outgoing mailbox:\n");
-       SPRINTF("  size: %d\n", OGMB_CNT);
-       SPRINTF("  next message: 0x%x\n", adapter->next_ogmb);
-       SPRINTF("  queued messages: ");
+       seq_puts(m, "Outgoing mailbox:\n");
+       seq_printf(m, "  size: %d\n", OGMB_CNT);
+       seq_printf(m, "  next message: 0x%x\n", adapter->next_ogmb);
+       seq_puts(m, "  queued messages: ");
 
        for (i = count = 0; i < OGMB_CNT; i++)
                if (ogmbs[i].status) {
                        count++;
-                       SPRINTF("0x%x ", i);
+                       seq_printf(m, "0x%x ", i);
                }
 
-       SPRINTF(count ? "\n" : "none\n");
+       seq_puts(m, count ? "\n" : "none\n");
 #endif
 
        spin_unlock_irqrestore(host->host_lock, flags);
index ecd540a..61653a0 100644 (file)
@@ -47,6 +47,7 @@
 
 #include <generated/utsrelease.h>
 
+#include <scsi/scsi.h>
 #include <scsi/scsi_dbg.h>
 #include <scsi/scsi_eh.h>
 #include <scsi/scsi_tcq.h>
index 9a33c5f..7be22da 100644 (file)
@@ -79,6 +79,12 @@ struct enclosure_component_callbacks {
        int (*set_locate)(struct enclosure_device *,
                          struct enclosure_component *,
                          enum enclosure_component_setting);
+       void (*get_power_status)(struct enclosure_device *,
+                                struct enclosure_component *);
+       int (*set_power_status)(struct enclosure_device *,
+                               struct enclosure_component *,
+                               int);
+       int (*show_id)(struct enclosure_device *, char *buf);
 };
 
 
@@ -91,7 +97,9 @@ struct enclosure_component {
        int fault;
        int active;
        int locate;
+       int slot;
        enum enclosure_status status;
+       int power_status;
 };
 
 struct enclosure_device {
@@ -120,8 +128,9 @@ enclosure_register(struct device *, const char *, int,
                   struct enclosure_component_callbacks *);
 void enclosure_unregister(struct enclosure_device *);
 struct enclosure_component *
-enclosure_component_register(struct enclosure_device *, unsigned int,
-                                enum enclosure_component_type, const char *);
+enclosure_component_alloc(struct enclosure_device *, unsigned int,
+                         enum enclosure_component_type, const char *);
+int enclosure_component_register(struct enclosure_component *);
 int enclosure_add_device(struct enclosure_device *enclosure, int component,
                         struct device *dev);
 int enclosure_remove_device(struct enclosure_device *, struct device *);
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
new file mode 100644 (file)
index 0000000..9d18e9f
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef PHY_QCOM_UFS_H_
+#define PHY_QCOM_UFS_H_
+
+#include "phy.h"
+
+/**
+ * ufs_qcom_phy_enable_ref_clk() - Enable the phy
+ * ref clock.
+ * @phy: reference to a generic phy
+ *
+ * returns 0 for success, and non-zero for error.
+ */
+int ufs_qcom_phy_enable_ref_clk(struct phy *phy);
+
+/**
+ * ufs_qcom_phy_disable_ref_clk() - Disable the phy
+ * ref clock.
+ * @phy: reference to a generic phy.
+ */
+void ufs_qcom_phy_disable_ref_clk(struct phy *phy);
+
+/**
+ * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device
+ * ref clock.
+ * @phy: reference to a generic phy.
+ */
+void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy);
+
+/**
+ * ufs_qcom_phy_disable_dev_ref_clk() - Disable the device
+ * ref clock.
+ * @phy: reference to a generic phy.
+ */
+void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
+
+int ufs_qcom_phy_enable_iface_clk(struct phy *phy);
+void ufs_qcom_phy_disable_iface_clk(struct phy *phy);
+int ufs_qcom_phy_start_serdes(struct phy *phy);
+int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
+int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B);
+int ufs_qcom_phy_is_pcs_ready(struct phy *phy);
+void ufs_qcom_phy_save_controller_version(struct phy *phy,
+                       u8 major, u16 minor, u16 step);
+
+#endif /* PHY_QCOM_UFS_H_ */
index 8a7f8ad..d0a66aa 100644 (file)
@@ -195,6 +195,9 @@ enum scsi_timeouts {
 #define        ATA_16                0x85      /* 16-byte pass-thru */
 #define        ATA_12                0xa1      /* 12-byte pass-thru */
 
+/* Vendor specific CDBs start here */
+#define VENDOR_SPECIFIC_CDB 0xc0
+
 /*
  *     SCSI command lengths
  */
index 7982795..f8170e9 100644 (file)
@@ -5,8 +5,11 @@ struct scsi_cmnd;
 struct scsi_device;
 struct scsi_sense_hdr;
 
+#define SCSI_LOG_BUFSIZE 128
+
 extern void scsi_print_command(struct scsi_cmnd *);
-extern void __scsi_print_command(const unsigned char *, size_t);
+extern size_t __scsi_format_command(char *, size_t,
+                                  const unsigned char *, size_t);
 extern void scsi_show_extd_sense(const struct scsi_device *, const char *,
                                 unsigned char, unsigned char);
 extern void scsi_show_sense_hdr(const struct scsi_device *, const char *,
@@ -17,12 +20,73 @@ extern void scsi_print_sense(const struct scsi_cmnd *);
 extern void __scsi_print_sense(const struct scsi_device *, const char *name,
                               const unsigned char *sense_buffer,
                               int sense_len);
-extern void scsi_print_result(struct scsi_cmnd *, const char *, int);
-extern const char *scsi_hostbyte_string(int);
-extern const char *scsi_driverbyte_string(int);
-extern const char *scsi_mlreturn_string(int);
+extern void scsi_print_result(const struct scsi_cmnd *, const char *, int);
+
+#ifdef CONFIG_SCSI_CONSTANTS
+extern bool scsi_opcode_sa_name(int, int, const char **, const char **);
 extern const char *scsi_sense_key_string(unsigned char);
 extern const char *scsi_extd_sense_format(unsigned char, unsigned char,
                                          const char **);
+extern const char *scsi_mlreturn_string(int);
+extern const char *scsi_hostbyte_string(int);
+extern const char *scsi_driverbyte_string(int);
+#else
+static inline bool
+scsi_opcode_sa_name(int cmd, int sa,
+                   const char **cdb_name, const char **sa_name)
+{
+       *cdb_name = NULL;
+       switch (cmd) {
+       case VARIABLE_LENGTH_CMD:
+       case MAINTENANCE_IN:
+       case MAINTENANCE_OUT:
+       case PERSISTENT_RESERVE_IN:
+       case PERSISTENT_RESERVE_OUT:
+       case SERVICE_ACTION_IN_12:
+       case SERVICE_ACTION_OUT_12:
+       case SERVICE_ACTION_BIDIRECTIONAL:
+       case SERVICE_ACTION_IN_16:
+       case SERVICE_ACTION_OUT_16:
+       case EXTENDED_COPY:
+       case RECEIVE_COPY_RESULTS:
+               *sa_name = NULL;
+               return true;
+       default:
+               return false;
+       }
+}
+
+static inline const char *
+scsi_sense_key_string(unsigned char key)
+{
+       return NULL;
+}
+
+static inline const char *
+scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt)
+{
+       *fmt = NULL;
+       return NULL;
+}
+
+static inline const char *
+scsi_mlreturn_string(int result)
+{
+       return NULL;
+}
+
+static inline const char *
+scsi_hostbyte_string(int result)
+{
+       return NULL;
+}
+
+static inline const char *
+scsi_driverbyte_string(int result)
+{
+       return NULL;
+}
+
+#endif
 
 #endif /* _SCSI_SCSI_DBG_H */
index 3a4edd1..a4c9336 100644 (file)
@@ -230,9 +230,6 @@ struct scsi_dh_data {
 #define transport_class_to_sdev(class_dev) \
        to_scsi_device(class_dev->parent)
 
-#define sdev_printk(prefix, sdev, fmt, a...)   \
-       dev_printk(prefix, &(sdev)->sdev_gendev, fmt, ##a)
-
 #define sdev_dbg(sdev, fmt, a...) \
        dev_dbg(&(sdev)->sdev_gendev, fmt, ##a)
 
@@ -240,16 +237,15 @@ struct scsi_dh_data {
  * like scmd_printk, but the device name is passed in
  * as a string pointer
  */
-#define sdev_prefix_printk(l, sdev, p, fmt, a...)                      \
-       (p) ?                                                           \
-       sdev_printk(l, sdev, "[%s] " fmt, p, ##a) :                     \
-       sdev_printk(l, sdev, fmt, ##a)
-
-#define scmd_printk(prefix, scmd, fmt, a...)                           \
-        (scmd)->request->rq_disk ?                                     \
-       sdev_printk(prefix, (scmd)->device, "[%s] " fmt,                \
-                   (scmd)->request->rq_disk->disk_name, ##a) :         \
-       sdev_printk(prefix, (scmd)->device, fmt, ##a)
+__printf(4, 5) void
+sdev_prefix_printk(const char *, const struct scsi_device *, const char *,
+               const char *, ...);
+
+#define sdev_printk(l, sdev, fmt, a...)                                \
+       sdev_prefix_printk(l, sdev, NULL, fmt, ##a)
+
+__printf(3, 4) void
+scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
 
 #define scmd_dbg(scmd, fmt, a...)                                         \
        do {                                                               \